text
stringlengths 56
7.94M
|
---|
\begin{document}
\title[Improving an inequality for the divisor function]{Improving an inequality for the divisor function}
\author[J. Lay]{Jeffrey P.S. Lay}
\address{Mathematical Sciences Institute, The Australian National University}
\email{[email protected]}
\date{}
\begin{abstract}
We improve using elementary means an explicit bound on the divisor function due to Friedlander and Iwaniec. Consequently we modestly improve a result regarding a sieving inequality for Gaussian sequences.
\end{abstract}
\subjclass[2010]{11N36, 11N37, 11N56, 11N64}
\keywords{divisor function, small divisors, Gaussian sequences, sieve estimates}
\maketitle
\section{Introduction}
Let $\tau(n)$ be the number of divisors of $n$. While asymptotic estimates for weighted sums $\sum \tau(n)a_n$ are generally difficult to obtain, explicit bounds often suffice in applications.
We shall consider the relationship between $\tau(n)$ and averages of $\tau(d)$ for small divisors $d$ of $n$. Landreau~\cite{Landreau1988} showed that for any integer $k\geq 2$ there exists a constant $C_k >0$ such that
\begin{equation} \label{e81}
\tau(n) \leq C_k \sum_{\substack{d|n \\ d\leq n^{1/k}}} \big( 2^{\omega(d)} \tau(d) \big)^k, \qquad n\geq 1,
\end{equation}
where $\omega(n)$ counts the number of distinct primes dividing $n$. We shall like to make the constants $C_k$ effective. Friedlander and Iwaniec~\cite{OperadeCribro} considered, {\em inter alia}, a weakened version of~\eqref{e81} for $k=4$, making use of the trivial bound $2^{\omega(n)} \leq \tau(n)$. They showed
\begin{equation} \label{e82}
\tau(n) \leq C \sum_{\substack{d|n \\ d\leq n^{1/4}}} \tau(d)^8, \qquad n\geq 1,
\end{equation}
holds for $C=256$. Numerical evidence suggests this constant is far from optimal. In fact, it can be easily verified that~\eqref{e82} holds with $C=8$ for all $1 \leq n \leq 10^8$. Moreover, equality is attained for $733,133$ values of $n$ within this interval, these being the square-free numbers $n=p_1p_2p_3$ satisfying $n^{1/4}<\min(p_1,p_2,p_3)$. So for small $n$ it is certainly the case that $C=8$ is the best possible constant, with evidence suggesting this trend should continue as $n\to \infty$. Our aim is to investigate whether $C\leq 8$ is admissible for all $n$ sufficiently large, as well as whether the sum can be made sharper.
In this article we show that~\eqref{e82} indeed holds for $C=8$. In addition we improve on the exponent of $\tau(d)$ in the sum, which~\eqref{e81} suggests should be much smaller than $8$, at least for non square-free $n$. Our main result to reach this goal is the following.
\begin{thm} \label{t1}
Let $n \geq 1$. Then there exists $d\leq n^{1/4}$ with $d|n$ such that $\tau(n) \leq 8 \tau(d)^7$.
\end{thm}
We shall also show that the constant $C$ in~\eqref{e82} must satisfy $C\geq 8$. Thus we deduce
\begin{thm} \label{t2}
We have
\begin{equation*}
\tau(n) \leq 8 \sum_{\substack{d|n \\ d\leq n^{1/4}}} \tau(d)^7, \qquad n\geq 1,
\end{equation*}
the constant $8$ being best possible for all $n$.
\end{thm}
The consideration of~\eqref{e82} by Friedlander and Iwaniec in~\cite{OperadeCribro} led to the study of sieving inequalities for Gaussian sequences. We shall see in \S{6} how Theorem~\ref{t2} may be used to modestly improve one of their results~\cite{FriedlanderIwaniec2007}.
\section{A lower bound}
Our first result describes a natural lower bound for the constant $C$ in~\eqref{e82}. This bound arises from the consideration of a particular set of square-free numbers. In fact, the result extends to the general case~\eqref{e81}.
\begin{prop} \label{p21}
Fix an integer $k\geq 2$. Then for any multiplicative function $f:\mathbb N \to \mathbb R$ we have
\begin{equation*}
\limsup_{n\to \infty} \, \tau(n) \Bigg( \sum_{\substack{d|n \\ d\leq n^{1/k}}} f(d) \Bigg)^{-1} \geq 2^{k-1}.
\end{equation*}
\end{prop}
\begin{proof}
Take a prime $p_1> 2^{(k-1)(k-2)/2}$ and choose, using Bertrand's postulate, primes $p_2<p_3<\dots <p_{k-1}$ such that $p_1<p_2<2 p_1$ and $p_i<2^{i-1} p_1$ for $3\leq i \leq k-1$. Then
\begin{equation*}
p_1^{k-1}>2^{(k-1)(k-2) \over 2} \times p_1^{k-2}=\prod_{i=2}^{k-1} 2^{i-1} p_1 >p_2p_3\dotsm p_{k-1}.
\end{equation*}
Consider now $n=p_1p_2\dotsm p_{k-1}$. We see that $p_1 > n^{1/k}$, whence there are no non-trivial divisors $d$ of $n$ with $d\leq n^{1/k}$. So for such an $n$ we have $\tau(n)=2^{k-1}$ and
\begin{equation*}
\sum_{\substack{d|n \\ d\leq n^{1/k}}} f(d) = f(1) = 1.
\end{equation*}
\end{proof}
\section{Some upper bounds}
We now turn our attention to proving Theorem~\ref{t1}. The aim is to choose for any $n$ a divisor $d\leq n^{1/4}$ for which $\tau(d)$ is as large as possible. In this section we demonstrate this procedure for $n$ with certain prime factorisations.
We shall make use of the following elementary inequalities. We write $[x]$ for the integer part of $x$.
\begin{lem} \label{l9}
For all integers $t \geq 4$ we have $7 [t/4] \geq t$ and $([t/4]+1)^4 \geq 2(t+1)$.
\end{lem}
\begin{proof}
Let $i\geq 1$ be the unique integer such that $4i \leq t \leq 4i+3$. For the first inequality we simply see that $7[t/4]=7i \geq 4i+3 \geq t$. For the second we have $([t/4]+1)^4 = (i+1)^4 \geq 8(i+1) = 2(4i+3)+2 \geq 2t+2$.
\end{proof}
We consider the various cases pertaining to how prime powers appear in the prime factorisation of $n$. Our first lemma deals with the case when all exponents are at least~$4$.
\begin{lem} \label{l70}
Suppose $n=p_1^{a_1}p_2^{a_2} \dotsm p_t^{a_t}$ with $a_i \geq 4$ for all $1\leq i \leq t$. Then there exists $d\leq n^{1/4}$ with $d|n$ such that $\tau(n)\leq 2^{-t} \tau(d)^4$.
\end{lem}
\begin{proof}
We let $d = \prod_{i=1}^t p_i^{[a_i/4]}$. Then $d \leq n^{1/4}$ and we have by Lemma~\ref{l9}
\begin{equation*}
\tau(d)^4 = \prod_{i=1}^t \bigg( \bigg[{a_i \over 4} \bigg] + 1 \bigg)^4 \geq 2^t \prod_{i=1}^t (a_i+1) = 2^t \tau(n).
\end{equation*}
\end{proof}
We now consider the cases when all prime powers appearing in the prime factorisation of $n$ occur with exponent $k$ for $k\in \{1,2,3\}$.
\begin{lem} \label{l71}
Suppose $n=p_1p_2 \dotsm p_t$ with $p_1<p_2<\dots <p_t$. Then there exists $d \leq n^{1/4}$ with $d|n$ such that
\begin{equation*}
\tau(n) \leq
\begin{cases}
2^t \tau(d) & \text{if } t \in \{1,2,3\}, \\
\tau(d)^7 & \text{if } t \geq 4.
\end{cases}
\end{equation*}
\end{lem}
\begin{proof}
Firstly let $t \in \{1,2,3\}$ be fixed. In each of these cases we let $d=1$. Then $2^t \tau(d) = \tau(n)$.
On the other hand, if $t\geq 4$ we take $d=p_1p_2\dots p_{[t/4]}$. Then $d\leq n^{1/4}$ and we have, by Lemma~\ref{l9}, $\tau(d)^7 = 2^{7\times {[t/4]}} \geq 2^t = \tau(n)$.
\end{proof}
\begin{lem} \label{l72}
Suppose $n=p_1^2 p_2^2 \dots p_t^2$ with $p_1<p_2<\dots <p_t$. Then there exists $d\leq n^{1/4}$ with $d|n$ such that
\begin{equation*}
\tau(n) \leq
\begin{cases}
3\tau(d) & \text{if } t=1, \\
2^{-2} \tau(d)^7 & \text{if } t \in \{2,3\}, \\
\tau(d)^7 & \text{if } t \geq 4.
\end{cases}
\end{equation*}
\end{lem}
\begin{proof}
If $t=1$ we let $d=1$. Then $3 \tau(d) = \tau(p_1^2)=\tau(n)$.
Next suppose $t \in \{2,3\}$. In these cases take $d=p_1$, whence $\tau(d)^7 = 2^7 > 2^2 \times 3^3 \geq 2^2 \tau(n)$.
Finally suppose $t\geq 4$. Take $d=p_1^2p_2^2 \dotsm p_{[t/4]}^2$. Then $\tau(d)^7 = 3^{7 \times [t/4]} \geq 3^t = \tau(n)$.
\end{proof}
\begin{lem} \label{l73}
Suppose $n=p_1^3 p_2^3 \dotsm p_t^3$ with $p_1<p_2<\dots<p_t$. Then there exists $d\leq n^{1/4}$ with $d|n$ such that
\begin{equation*}
\tau(n) \leq
\begin{cases}
4 \tau(d) & \text{if } t=1, \\
2^{-3} \tau(d)^7 & \text{if } t=2, \\
2^{-5} \tau(d)^7 & \text{if } t=3, \\
\tau(d)^7 & \text{if } t\geq 4.
\end{cases}
\end{equation*}
\end{lem}
\begin{proof}
As before, if $t=1$ let $d=1$, whence $4 \tau(d) = \tau(n)$.
If $t=2$ we take $d=p_1$, which gives $\tau(d)^7 = 2^7 = 2^3 \tau(n)$.
Next, if $t=3$ let $d=p_1^2$. Then $\tau(d)^7 = 3^7 > 2^5 \times 4^3 = 2^5 \tau(n)$.
Finally suppose $t\geq 4$. Take $d=p_1^3p_2^3 \dotsm p_{[t/4]}^3$, whence $\tau(d)^7 = 4^{7\times [t/4]} \geq 4^t = \tau(n)$.
\end{proof}
We are now ready to combine these estimates to prove Theorem~\ref{t1}.
\section{Proof of Theorem~\ref{t1}}
Let $n\geq 1$ and consider the unique prime factorisation of $n$. We group the prime powers according to their exponents: for each $i \in \{1,2,3\}$ let $m_i$ be the product of those occurring with exponent $i$ and let $l$ be the product of those with exponent at least $4$. Henceforth the relations $m_i=1$ and $l=1$ will be understood to mean that no primes of the corresponding form divide $n$.
Write $n=m_1m_2m_3l$. First observe by Lemma~\ref{l70} that there exists a divisor $d_l$ of $l$ with $d_l\leq l^{1/4}$ for which
\begin{equation} \label{e678}
\tau(n) = \tau(m_1m_2m_3) \tau(l) \leq \tau(m_1m_2m_3) \tau(d_l)^7.
\end{equation}
Thus to prove our theorem it suffices to consider those $n$ whose prime factorisations consist solely of prime powers with exponents strictly less than $4$. That is, if for each such $n=m_1m_2m_3$ we can find a divisor $d\leq n^{1/4}$ with $\tau(n) \leq 8 \tau(d)^7$ then by~\eqref{e678} we are done.
In each of the following cases the numbers $d_1,d_2,d_3$ are chosen as per Lemmas~\ref{l71},~\ref{l72}, and ~\ref{l73}. Note that these satisfy $d_i | m_i$ and $d_i\leq m_i^{1/4}$. Moreover, if $m_i=1$ we may choose $d_i=1$.
\begin{enumerate}[(I)]
\item Let $m_1\geq 1$.
\begin{enumerate}[(i)]
\item If $\omega(m_2) \in \{2,3\}$ then $\tau(n) \leq 8 \tau(d_1)^7 \times 2^{-2} \tau(d_2)^7 \times 4 \tau(d_3)^7 \leq 8 \tau(d_1d_2d_3)^7$.
\item If $\omega(m_3) \in \{2,3\}$ then $\tau(n) \leq 8 \tau(d_1)^7 \times 3\tau(d_2)^7 \times 2^{-3} \tau(d_3)^7 \leq 3 \tau(d_1d_2d_3)^7$.
\end{enumerate}
Henceforth we only consider the cases $m_2,m_3=1$ and $\omega(m_2),\omega(m_3) \in \mathbb N \setminus \{2,3\}$.
\item Suppose $m_1=1$ or $\omega(m_1)\geq 4$.
\begin{enumerate}[(i)]
\item If at least one of $\omega(m_2)\geq 4$ or $\omega(m_3)\geq 4$ holds then $\tau(n) \leq \tau(d_1)^7 \times 4 \tau(d_2)^7 \times \tau(d_3)^7 = 4 \tau(d_1d_2d_3)^7$.
\item On the other hand, suppose $\omega(m_2)=\omega(m_3)=1$. In this case write $n=m_1p_1^2p_2^3$. Let $d'=\min(p_1,p_2) \leq (p_1^2p_2^3)^{1/4}$. Then $\tau(d')^7=2^7 > \tau(p_1^2p_2^3)$ and so $\tau(n) < \tau(d_1)^7 \times \tau(d')^7 \leq \tau(d_1d')^7$.
\end{enumerate}
\item Suppose $\omega(m_1)=1$.
\begin{enumerate}[(i)]
\item If at least one of $\omega(m_2)\geq 4$ or $\omega(m_3)\geq 4$ holds then $\tau(n) \leq 2\tau(d_1)^7 \times 4\tau(d_2)^7 \times \tau(d_3)^7 \leq 8\tau(d_1d_2d_3)^7$.
\item On the other hand, suppose $\omega(m_2)=\omega(m_3)=1$. Write $n=m_1 p_1^2p_2^3$. Let $d'=\min(p_1,p_2) \leq (p_1^2p_2^3)^{1/4}$. Then $\tau(d')^7 > \tau(p_1^2p_2^3)$ and so $\tau(n) < 2\tau(d_1) \times \tau(d')^7 \leq 2 \tau(d_1d')^7$.
\end{enumerate}
\item Suppose $\omega(m_1)=2$.
\begin{enumerate}[(i)]
\item If $\omega(m_2)\geq 4$ and $\omega(m_3)\geq 4$ then $\tau(n) \leq 4 \tau(d_1) \times \tau(d_2)^7 \times \tau(d_3)^7 \leq 4 \tau(d_1d_2d_3)^7$.
\item If $\omega(m_2)=1$ and $\omega(m_3)\geq 4$ write $n=p_1p_2p_3^2 m_3$. Let $d'=\min(p_1,p_2,p_3)\leq (p_1p_2p_3^2)^{1/4}$. Then $\tau(d')^7 > \tau(p_1p_2p_3^2)$ and so $\tau(n) < \tau(d')^7 \times \tau(d_3)^7 = \tau(d'd_3)^7$.
\item If $\omega(m_2)\geq 4$ and $\omega(m_3)=1$ write $n=p_1p_2 p_3^3 m_2$. Let $d'=\min(p_1,p_2,p_3)\leq (p_1p_2p_3^3)^{1/4}$. Then $\tau(d')^7 > \tau(p_1p_2p_3^3)$ and so $\tau(n) < \tau(d')^7 \times \tau(d_2)^7 = \tau(d'd_2)^7$.
\item Suppose $\omega(m_2)=\omega(m_3)=1$. Write $n=m_1p_1^2p_2^3$. Let $d'=\min(p_1,p_2)\leq (p_1^2p_2^3)^{1/4}$. Then $\tau(d')^7 > \tau(p_1^2p_2^3)$ and so $\tau(n) < 4\tau(d_1) \times \tau(d')^7 \leq 4\tau(d_1d')^7$.
\end{enumerate}
\item Suppose $\omega(m_1)=3$.
\begin{enumerate}[(i)]
\item If $\omega(m_2)\geq 4$ and $\omega(m_3)\geq 4$ then $\tau(n) \leq 8\tau(d_1) \times \tau(d_2)^7 \times \tau(d_3)^7 \leq 8\tau(d_1d_2d_3)^7$.
\item If $\omega(m_2)=1$ and $\omega(m_3)\geq 4$ write $n=p_1p_2p_3p_4^2m_3$. Let $d'=\min(\{p_i\})\leq (p_1p_2p_3p_4^2)^{1/4}$. Then $\tau(d')^7 > \tau(p_1p_2p_3p_4^2)$ and so $\tau(n) < \tau(d')^7 \times \tau(d_3)^7 = \tau(d'd_3)^7$.
\item If $\omega(m_2)\geq 4$ and $\omega(m_3)=1$ write $n=p_1p_2p_3p_4^3m_2$. Let $d' = \min(\{p_i\}) \leq (p_1p_2p_3p_4^3)^{1/4}$. Then $\tau(d')^7 > \tau(p_1p_2p_3p_4^3)$ and so $\tau(n) < \tau(d')^7 \times \tau(d_2)^7 = \tau(d'd_2)^7$.
\item If $\omega(m_2)=\omega(m_3)=1$ write $n=m_1 p_1^2p_2^3$. Let $d'=\min(p_1,p_2)\leq (p_1^2p_2^3)^{1/4}$. Then $\tau(d')^7 > \tau(p_1^2p_2^3)$ and so $\tau(n) < 8\tau(d_1) \times \tau(d')^7 \leq 8 \tau(d_1d')^7$.
\end{enumerate}
\end{enumerate}
\section{Further speculation}
Returning to~\eqref{e81} one may consider for any $k\geq 2$ and $\eta \geq 1$ the generalised inequality
\begin{equation} \label{e91}
\tau(n) \leq C_{k,\eta} \sum_{\substack{d|n \\ d\leq n^{1/k}}} \tau(d)^\eta.
\end{equation}
Clearly if~\eqref{e91} holds then it must also be true for any $\eta'>\eta$, in which case we may choose $C_{k,\eta'}=C_{k,\eta}$. Thus for fixed $k$ and $C_k=C_{k,\eta}$ we would like to know the smallest $\eta$ for which~\eqref{e91} holds.
A natural question to consider in our case is whether Theorem~\ref{t1} can be improved to show that for all $n\geq 1$ there exists a divisor $d \leq n^{1/4}$ such that $\tau(n) \leq 8 \tau(d)^6$. It appears, however, that the purely elementary methods presented in this paper cannot achieve this in any practical sense. To see why consider a number $n=p_1^2 p_2^2 \dotsm p_{t_1}^2 q_1^3 q_2^3 \dotsm q_{t_2}^3$ with $t_1 \geq 4$ and $t_2\geq 4$. Suppose $p_1<p_2<\dots <p_{t_1}$ and $q_1<q_2<\dots <q_{t_2}$. Without additional assumptions on $n$ the best choice of divisor $d\leq n^{1/4}$ for which $\tau(d)$ is as large as possible is $d=p_1^2 p_2^2 \dotsm p_{[t_1/4]}^2 q_1^3 q_2^3 \dotsm q_{[t_2/4]}^3$. But then (cf.\ Lemma~\ref{l9})
\begin{equation*}
\tau(d)^6 = 3^{6 \times \left[{t_1 \over 4}\right]} \times 4^{6 \times \left[{t_2 \over 4} \right]} \geq 3^{t_1-1} \times 4^{t_2-1} = 12^{-1} \tau(n).
\end{equation*}
Thus the best estimate we can produce unconditionally is $\tau(n) \leq 12 \tau(d)^6$. One may enumerate each of the various cases in regards to the relative sizes of the $p_i,q_j$ to produce a divisor $d$ with $\tau(d)$ large enough; this seems a formidable task in general.
In any case it remains an open problem to determine the smallest $\eta>0$ such that
\begin{equation} \label{e909}
\tau(n) \ll_\eta \sum_{\substack{d|n \\ d \leq n^{1/4}}} \tau(d)^\eta.
\end{equation}
At least in the square-free case this problem has been solved. Iwaniec and Munshi~\cite{IwaniecMunshi2010} showed that~\eqref{e909} holds for square-free $n$ with any $\eta>3 \log 3/\log 2 - 4=0.75488\dots$, this lower bound being best possible.
\section{An application to Gaussian sequences}
Of significant interest in sieve theory is the detection of primes in Gaussian sequences, viz.\ sequences supported on integers which can be expressed as the sum of two squares.
Here we consider a generalised Gaussian sequence $\mathcal A=(a_n)$ defined by
\begin{equation} \label{e321}
a_n = \sum_{\substack{l^2+m^2=n \\ (l,m)=1}} \gamma_l,
\end{equation}
where $l,m$ run over positive integers and $\gamma_l$ are any complex numbers with $|\gamma_l| \leq 1$. We further suppose that the $\gamma_l$ are supported on $r$-th powers, i.e., $\gamma_l=0$ if $l \not= k^r$.
In the process of sieving $\mathcal A$ one requires good estimates for
\begin{equation} \label{e322}
A_d(x) = \sum_{\substack{n\leq x \\ d|n}} a_n.
\end{equation}
It can be shown (see equations~(6) and~(7) in~\cite{FriedlanderIwaniec2007}) that
\begin{equation*}
\sum_{n\leq x} a_n = \sum_{l < \sqrt x} \gamma_l {\varphi(l) \over l} \sqrt{x-l^2} + O(x^{1 \over 2r} \log x ),
\end{equation*}
so for $d$ not too large we expect $A_d(x)$ to be uniformly well approximated by
\begin{equation*}
M_d(x) = {\rho(d) \over d} \sum_{\substack{l<\sqrt x \\ (l,d)=1}} \gamma_l {\varphi(l) \over l} \sqrt{x-l^2},
\end{equation*}
where $\rho(d)$ is the number of solutions to the congruence $\nu^2+1 \equiv 0 \mod d$.
To estimate~\eqref{e322} we may consider instead the smoothed sum
\begin{equation*}
A_d(f) = \sum_{n \equiv 0 \bmod d} a_n f(n),
\end{equation*}
where $f \in C^\infty([0,\infty))$ is such that $f(t)=1$ if $0\leq t \leq (1-\kappa)x$ and $f(t)=0$ if $t\geq x$. Here $x^{-1/4r} \leq \kappa \leq 1$ is some parameter to be optimised later.
\begin{prop} \label{p99}
Suppose $\sqrt x \leq D \leq x^{(r+1)/(2r)}$. Then
\begin{equation*}
\sum_{d\leq D} |A_d(x) - A_d(f)| \ll \kappa x^{r+1 \over 2r} (\log x)^{128}.
\end{equation*}
\end{prop}
\begin{proof}
A rearrangement of the sum gives
\begin{equation*}
\begin{aligned}
\sum_{d\leq D} |A_d(x)-A_d(f)| &=
\sum_{d\leq D} \Bigg| \sum_{\substack{(1-\kappa)x<n\leq x \\ d|n}} \big( 1-f(n) \big) \sum_{\substack{l^2+m^2=n \\ (l,m)=1}} \gamma_l \bigg| \\
&\ll \sum_{\substack{(1-\kappa)x<l^2+m^2\leq x \\ (l,m)=1}} |\gamma_l| \sum_{d|(l^2+m^2)} 1 \\
&\ll \sideset{}{'}\sum_{\substack{(1-\kappa)x<l^2+m^2\leq x \\ (l,m)=1}} |\gamma_l| \tau(l^2+m^2) + \sqrt x \log x,
\end{aligned}
\end{equation*}
where $\sum{}^{'}$ means that the terms with a value of $l$ which is nearest to $\sqrt x$ are omitted. We deduce from Theorem~\ref{t2} that
\begin{equation*}
\sideset{}{'}\sum_{\substack{(1-\kappa)x<l^2+m^2\leq x \\ (l,m)=1}} |\gamma_l| \tau(l^2+m^2) \ll \sideset{}{'}\sum_{l<\sqrt x} |\gamma_l| \sum_{\substack{d \leq x^{1/4} \\ (d,l)=1}} \tau(d)^7 \sum_{\substack{(1-\kappa)x < l^2+m^2 \leq x \\ l^2+m^2 \equiv 0 \bmod d}} 1.
\end{equation*}
Now split the range of $m$ into residue classes $m \equiv \nu l \mod d$, where $\nu^2+1 \equiv 0 \mod d$. This, combined with the observation that $m$ runs over an interval of length $O(\kappa x/\sqrt{x-l^2})$, allows us to estimate the above by
\begin{equation*}
\begin{aligned}
&\ll \kappa x \Bigg( \sum_{d\leq x^{1/4}} \tau(d)^7 {\rho(d) \over d} \Bigg) \Bigg( \sideset{}{'}\sum_{l<\sqrt x} {|\gamma_l| \over \sqrt{x-l^2}} \Bigg) + x^{{1 \over 4}+{1 \over 2r}} (\log x)^{128} \\
&\ll \kappa x \times (\log x)^{128} \times x^{1-r \over 2r} + x^{{1 \over 4}+{1 \over 2r}} (\log x)^{128} \\
&\ll \kappa x^{r+1 \over 2r} (\log x)^{128}.
\end{aligned}
\end{equation*}
\end{proof}
We now use Proposition~\ref{p99} to improve the error term in the main theorem of~\cite{FriedlanderIwaniec2007} by a factor of $O\big( (\log x)^{64.75} \big)$.
\begin{thm}
Let $a_n$ and $A_d(x)$ be as in~\eqref{e321} and~\eqref{e322}, respectively. Suppose $\sqrt x \leq D \leq x^{(r+1)/(2r)}$. Then
\begin{equation*}
\sum_{d\leq D} | A_d(x) - M_d(x) | \ll D^{1 \over 4} x^{3(r+1) \over 8r} (\log x)^{65.25}.
\end{equation*}
\end{thm}
\begin{proof}
We combine equations~(19) and (35) from~\cite{FriedlanderIwaniec2007} with Proposition~\ref{p99} above to obtain the estimate
\begin{equation*}
\begin{aligned}
\sum_{d\leq D} | A_d(x) - M_d(x) | &\ll \sum_{d\leq D} |A_d(x)- A_d(f)| + \kappa^{-1} D^{1 \over 2} x^{r+1 \over 4r} (\log x)^{5 \over 2} + \kappa x^{r+1 \over 2r} \log x \\
&\ll \kappa x^{r+1 \over 2r} (\log x)^{128} + \kappa^{-1} D^{1 \over 2} x^{r+1 \over 4r} (\log x)^{5 \over 2}.
\end{aligned}
\end{equation*}
Choosing
\begin{equation*}
\kappa = D^{1 \over 4} x^{-{r+1 \over 8r}} (\log x)^{{5 \over 4}-64}
\end{equation*}
yields the desired result.
\end{proof}
\end{document}
|
\begin{document}
\title{Spatial dependent spontaneous emission of an atom in a semi-infinite
waveguide of rectangular cross section}
\author{Hai-Xi Song}
\affiliation{Key Laboratory of Low-Dimensional Quantum Structures and Quantum Control of
Ministry of Education, Department of Physics and Synergetic Innovation
Center of Quantum Effects and Applications, Hunan Normal University,
Changsha 410081, China}
\author{Xiao-Qi Sun}
\affiliation{Key Laboratory of Low-Dimensional Quantum Structures and Quantum Control of
Ministry of Education, Department of Physics and Synergetic Innovation
Center of Quantum Effects and Applications, Hunan Normal University,
Changsha 410081, China}
\author{Jing Lu}
\affiliation{Key Laboratory of Low-Dimensional Quantum Structures and Quantum Control of
Ministry of Education, Department of Physics and Synergetic Innovation
Center of Quantum Effects and Applications, Hunan Normal University,
Changsha 410081, China}
\author{Lan Zhou}
\thanks{Corresponding author. Fax No.: +86-731-8887-3055}
\email{[email protected]}
\affiliation{Key Laboratory of Low-Dimensional Quantum Structures and Quantum Control of
Ministry of Education, Department of Physics and Synergetic Innovation
Center of Quantum Effects and Applications, Hunan Normal University,
Changsha 410081, China}
\date{\today }
\begin{abstract}
We study a quantum electrodynamics (QED) system made of an two-level atom
and a semi-infinite rectangular waveguide, which behaves as a perfect mirror
in one end. The spatial dependence of the atomic spontaneous emission has
been included in the coupling strength relevant to the eigenmodes of the
waveguide. The role of retardation is studied for the atomic transition
frequency far away from the cutoff frequencies. The atom-mirror distance
introduces different phases and retardation times into the dynamics of the
atom interacting resonantly with the corresponding transverse modes. It is
found that the upper state population decreases from its initial as long as
the atom-mirror distance does not vanish, and is lowered and lowered when
more and more transverse modes are resonant with the atom. The atomic
spontaneous emission can be either suppressed or enhanced by adjusting the
atomic location for short retardation time. There are partial revivals and
collapses due to the photon reabsorbed and re-emitted by the atom for long
retardation time.
\end{abstract}
\keywords{quantum optics, waveguide, spontaneous emission, retardation}
\maketitle
\narrowtext
\section{Introduction}
Any quantum system inevitably interacts with its surroundings which possess
a huge amount of uncontrollable degrees of freedom. Such interaction causes
the rapid destruction of quantum coherence, which is an essential
requirement for quantum information processing to fully exploit the new
possibilities opened by quantum mechanics. For example, the information
stored in two-level systems (we refer to atoms hereafter) can be destroyed
by their surrounding electromagnetic field. Spontaneous emission (SE) is one
of the most prominent effects in the interaction of atoms with vacuum. It is
an atomic radiation as follow: an atom initially in an excited state relax
to its ground state and emit a quanta of energy to its surrounding vacuum
electromagnetic (EM) field, which carries away the difference in energy
between the two levels.
SE is not only useless but also harmful to quantum information process.
However, recently studies have shown that it is useful to build a device in
a quantum network for controlling single photons by a local atom, e.g. the
atomic radiation leads to the total reflection of the single-photon
propagating in one quantum channel~\cite{Fans,ZLPRL08}, the frequency
converter for single photons~\cite{ZLfconvert}, and the transfer of single
photons from one quantum channel to the other~\cite{ZLQrouter}. Nowadays,
great interest has been paid on the use of atoms to act as a quantum node in
extended communication networks and scalable computational devices~\cite
{zlZeno,Zheng,Roy,ZLMZI,GongPRA78,LawPRA78,SPT13,ZLQrouter2,PRL116,attenuator,MTcheng,Longo,Alexanian,TShiSun}
. As the SE rate of a single atom can be modified by a succession of short
and strong pulses or measure to the quantum system, a dynamical
quantum-Zeno-effect(QZE) switch for single photons is proposed~\cite{zlZeno}
. The quantum interference between multiple transition pathways of atomic
internal states has been exploited to modify the transport property of the
single photons in a quantum channel~\cite{GongPRA78}. With the well-known
result that the atomic SE depends on the electromagnetic vacuum environment
that the atom is subjected to, a boundary has been used to increase the
efficiency of the quantum router~\cite{ZLQrouter2}.
Actually, the SE rate of a single atom is one of the basic topic of quantum
electrodynamics, numerous studies of the SE rates~\cite{PR219-263} have been
carried out for atoms in free space~(e.g.\cite{PRA50-1755}), in a
cavity~(e.g. \cite{PRL47-233}), near a metallic mirror~(e.g. \cite
{PRA35-5081}) or a dielectric interfacer~(e.g. \cite{PRD3-280}), between two
mirrors~(e.g. \cite{PRA43-5795}) or two dielectric interfaces~(e.g. \cite
{PRA57-3913}). However, photons used to transmit information or distribute
the entanglement along the network, are confined in a one dimensional (1D)
waveguide. Similar to cavities, 1D waveguide has a well-defined mode
spectrum and a relatively loss-free environment. However, unlike cavities,
modes are available in waveguide for photons to propagate. The geometry
constraint not only confines the propagating direction of photon, but also
gives rise to an increasing of the interference effects. Since photons do
not interact with each other, atoms implanted in waveguide are necessary to
mediate the photon-photon interaction or redirect the possible propagating
directions. The coupling strength of atoms to the waveguide is enhanced by
decreasing the mode volume. Consequently, the atomic radiation in 1D
waveguide plays an important role in controlling photons in quantum
networks. And the studies on the atoms in 1D waveguide is now referred to by
the term ``waveguide quantum electrodynamics (QED).''
Since the radiative properties of an atom in a confined space differ
fundamentally from that in free space, considerable interest has been paid
on atoms in a semi-infinite or infinite 1D waveguide. However most works
focus on 1D waveguide without a cross section~\cite
{DongPRA79,NJP14-043,PRA87-013820,PRA87-063830,PRA91-053845}. In this paper,
we study the radiative properties of an atom in a semi-infinite waveguide of
rectangular cross section, which is a typical 1D QED system. The termination
of the waveguide is regarded as a perfect mirror, which reflects emitted
photons back to the atom. We analyze the interaction of an initially excited
two-level atom with the waveguide in vacuum. The Markovian approximation is
first used to analyze the dependence of the SE rate on the density of states
and the spatial profile of the waveguide. To find the influence of the time
that one-photon wave packet requires to bounce back and forth between the
atom and the mirror, we perform the linear expansion of the dispersion
relation for the atomic transition frequency far away from the cutoff
frequencies, and obtain a delay-differential equation. Then the atomic SE
dynamics is studied by varying the cross section of the waveguide as well as
the atomic location.
This paper is organized as follows. In Sec.~\ref{Sec:2}, we introduce the
system we studied. In Sec.~\ref{Sec:3}, we derive the relevant equations
describing the dynamics of the system in single-excitation subspace. In Sec.~
\ref{Sec:4}, we do the Markovian approximation to study the effect of the
mode profile on the spontaneous rate. In Sec.~\ref{Sec:5}, the atomic
dynamics is studies by linearly expanding the dispersion relation around the
transition frequency which is valid far from the branch threshold, where the
delay time is introduced. Finally, We conclude this work in Sec.~\ref{Sec:5}.
\section{\label{Sec:2}An atom inside a rectangular pipe waveguide}
The system we studied is shown in Fig.~\ref{fig:1}. A waveguide made of
ideal perfect conducting walls is formed from surfaces at $x=0$, $x=a$, $y=0$
, $y=b$, $z=0$, and is placed along the $z$ axis.
\begin{figure}
\caption{(Color online) Schematic illustration for (a) the two-level atom
embedded in a semi-infinite waveguide of rectangular cross section, and (b)
the dispersion relation of the guiding modes which interact with the atom at
$\vec{r}
\label{fig:1}
\end{figure}
The waveguide is assumed to be infinite along the $z$ direction. The
boundary condition restricts photons to travel without loss of power in two
independent guiding modes \cite{liqongPRA,HuangPRA}: TE modes whose electric
field has no longitudinal component, and TM modes whose magnetic field has
no longitudinal component. Let $\vec{k}=\left( k_{x},k_{y},k\right) $ be the
wave vector. The relations $k_{x}=m\pi /a$ and $k_{y}=n\pi /a$ with positive
integers $n,m\ $can be imposed by the condition that the tangential
components of the electric field vanish at all the conducting wall, however,
there is no constraint on $k$. Therefore, the waveguide allows a continuous
range of frequencies described by the dispersion relation
\begin{equation}
\omega _{mnk}=\sqrt{c^{2}k^{2}+\Omega _{mn}^{2}}, \label{A-1}
\end{equation}
where $c$ is the speed of light in vacuum, $\Omega _{mn}=\pi c\sqrt{
m^{2}/a^{2}+n^{2}/b^{2}}$ is the minimum frequency for a traveling wave. We
note that $m$ and $n$ cannot both be zero. If $a>b$, TE$_{10}$ is the lowest
guiding mode for the waveguide \cite{EMtextb}, and the lowest TM modes occur
for $m=1$, $n=1$. Obviously, the waveguide modes form a one-dimensional
continuum. Each guiding mode provides a quantum channel for photons to
travel from one location to the other.
At $\vec{r}=\left( x_{0},y_{0},z_{0}\right) $ is an atom with transition
frequency $\omega _{A}$ between upper level $\left\vert e\right\rangle $ and
lower level $\left\vert g\right\rangle $, which is excited initially. The
atom sits inside the hollow waveguide. $z_{0}$ is the distance between the
atom to the wall (or the mirror) at $z=0$. The free Hamiltonian for the atom
is described by
\begin{equation}
H_{s}=\omega _{A}\sigma _{+}\sigma _{-}, \label{A-2}
\end{equation}
where $\sigma _{+}\equiv \left\vert e\right\rangle \left\langle g\right\vert
$ ($\sigma _{-}\equiv \left\vert g\right\rangle \left\langle e\right\vert $)
is the rising (lowering) atomic operator. For the purpose of simplicity, the
electric dipole of the stationary atom is assumed to be oriented along the $z
$ direction, which means that the atom only interacts with the TM$_{mn}$
modes. Since the number $\left( m,n,k\right) $ specifies the mode function
of this air-filled metal pipe waveguide, we label the annihilation operator
for each TM guiding mode by $a_{mnk}$. The free Hamiltonian for the
waveguide is described by
\begin{equation}
H_{f}=\sum_{mn}\int_{-\infty }^{\infty }dk\text{ }\omega _{mnk}a_{mnk}^{\dag
}a_{mnk} \label{A-3}
\end{equation}
The interaction between the atom and field via the dipole coupling in the
rotating-wave approximation reads
\begin{equation}
H_{I}=\sum_{mn}\int_{-\infty }^{\infty }dkg_{mnk}(\sigma _{-}a_{mnk}^{\dag
}-\sigma _{+}a_{mnk}) \label{A-4}
\end{equation}
where the coupling strength
\begin{equation}
g_{mnk}=\frac{2id\Omega _{mn}}{\sqrt{\pi \epsilon _{0}A\omega _{mnk}}}\sin
\frac{m\pi x_{0}}{a}\sin \frac{n\pi y_{0}}{b}\cos (kz_{0}) \label{A-5}
\end{equation}
Here, $\epsilon _{0}$ the permittivity of free space, $d$ the magnitude of
the transition dipole moment of the atom and assumed to be real, $A=ab$ the
area of the rectangular cross section. Using the dispersion relation, the
coupling strength can be rewritten as
\begin{eqnarray}
g_{mn\omega } &=&\frac{2id\Omega _{mn}}{\sqrt{\pi \epsilon _{0}A\omega }}
\sin \frac{m\pi x_{0}}{a}\sin \frac{n\pi y_{0}}{b} \label{A-6} \\
&&\times \cos \left( \sqrt{\omega ^{2}-\Omega _{mn}^{2}}\frac{z_{0}}{c}
\right) . \notag
\end{eqnarray}
The cosine function in Eqs. (\ref{A-5}) and (\ref{A-6}) occurs due to the
termination of the waveguide, which presents the difference from the
infinite waveguide. Obviously, the atom located at $x_{0}=a/2$ and $y_{0}=b/2
$ decouples to the TM$_{mn}$ guiding mode with even integer $m$ or $n$. The
total system are described by Hamiltonian $H=H_{s}+H_{f}+H_{I}$.
\section{\label{Sec:3}Evolution of the atom-vacuum system}
In this section we study the dynamics of the system when the atom is
initially in the excited state $\left\vert e\right\rangle $ and the field is
in the vacuum state $\left\vert 0\right\rangle $. Since the number of quanta
is conserved in this system, we can write the wavefunction of the system as:
\begin{equation}
\left\vert \psi \left( t\right) \right\rangle =\varepsilon \left( t\right)
\left\vert e0\right\rangle +\sum_{mn}\int dk\varphi _{mn}\left( k,t\right)
a_{mnk}^{\dag }\left\vert g0\right\rangle \label{B-1}
\end{equation}
in one quantum subspace. The first term in Eq.(\ref{B-1}) describes the atom
in the excited state with no photons in the field, $\varepsilon \left(
t\right) $ is the corresponding amplitude, whereas the second term in Eq.(
\ref{B-1}) describes the atom in the ground state with a photon emitted at a
mode $k$ of the TM$_{mn}$ guiding mode, $\varphi _{mn}\left( k,t\right) $ is
the corresponding amplitude. The initial state of the system is denoted by
the amplitudes $\varepsilon \left( 0\right) =1$, $\varphi _{mn}\left(
k,0\right) =0$. The Schr\"{o}dinger equation results in the following
coupled equation of the amplitudes
\begin{subequations}
\label{B-2}
\begin{eqnarray}
\dot{\varepsilon}\left( t\right) &=&-i\omega _{A}\varepsilon \left( t\right)
+i\sum_{mn}\int_{-\infty }^{\infty }dkg_{mnk}\varphi _{mn} , \\
\dot{\varphi}_{mn}&=&-i\omega _{mnk}\varphi _{mn}\left( k,t\right)
-ig_{mnk}\varepsilon (t),
\end{eqnarray}
where the overdot indicates the derivative with respect to time. The
population of the atomic excited state are usually analyzed by eliminating
the field variables and focusing on the dynamics of the radiating system. We
start by removing the high frequency term in Eq.(\ref{B-2}) via the
transformation
\end{subequations}
\begin{equation}
\varepsilon \left( t\right) =\tilde{\varepsilon}\left( t\right) e^{-i\omega
_{A}t},\varphi _{mn}\left( k,t\right) =\tilde{\varphi}_{mn}\left( k,t\right)
e^{-i\omega _{mnk}t}. \label{B-3}
\end{equation}
Then we formally integrating equation of $\tilde{\varphi}_{mn}\left(
k,t\right) $, which is later inserted into the equation for $\tilde{
\varepsilon}\left( t\right) $. The probability amplitude for the excited
atomic state is determined by the following integro-differential equation
\begin{equation}
\partial _{t}\tilde{\varepsilon}\left( t\right) =\sum_{mn}\int_{-\infty
}^{\infty }dk\int_{0}^{t}d\tau g_{mnk}^{2}\tilde{\varepsilon}\left( \tau
\right) e^{i\left( \omega _{A}-\omega _{mnk}\right) \left( t-\tau \right) }.
\label{B-4}
\end{equation}
Eq.(\ref{B-4}) shows that the value of $\partial _{t}\tilde{\varepsilon}
\left( t\right) $ depends on the values of $\tilde{\varepsilon}\left(
t\right) $ at all earlier time.
\section{\label{Sec:4}Spatial dependence of the spontaneous rate}
To see how the mode distribution of the quantum vacuum fluctuation modifies
the atomic spontaneous rate, we set $\tilde{\varepsilon}\left( \tau \right)
\approx \tilde{\varepsilon}\left( 0\right) =1$ on the right-hand side of
equation (\ref{B-4}), and the amplitude of level $\left\vert e\right\rangle $
reads
\begin{equation}
\varepsilon \left( t\right) =e^{-i\omega _{A}t}\left[ 1+\int_{0}^{t}d\tau
\left( t-\tau \right) G\left( \tau \right) e^{i\omega _{A}\tau }\right] ,
\label{B-5}
\end{equation}
where the reservoir response (memory) function
\begin{equation}
G\left( \tau \right) =-\int_{-\infty }^{\infty
}dk\sum_{mn}g_{mnk}^{2}e^{-i\omega _{mnk}\tau } \label{B-6}
\end{equation}
characterizes the spectrum of the rectangular hollow waveguide. Its Fourier
transformation yields the coupling spectrum~\cite{N405-546,PRL87-270}
\begin{equation}
G\left( \omega \right) =-\sum_{mn}g_{mn\omega }^{2}\rho \left( \omega \right)
\label{B-7}
\end{equation}
which is the density of states
\begin{equation}
\rho \left( \omega \right) =\frac{\omega }{c\sqrt{\omega ^{2}-\Omega
_{mn}^{2}}} \label{B-8}
\end{equation}
weighted by the strength of the coupling to the continuum. Since the
dispersion relation of the semi-infinite waveguide is the same as that of
the infinite rectangular waveguide, $\rho \left( \omega \right) $ in Eq.(\ref
{B-8}) is also the density of state of the infinite rectangular waveguide.
For weak atom-field coupling, the amplitude of level $\left\vert
e\right\rangle $ decays exponentially
\begin{equation*}
\varepsilon \left( t\right) \approx \exp \left( -i\omega _{A}t-\frac{1}{2}
Rt\right)
\end{equation*}
Accordingly, the SE rate, the key ingredient in the SE dynamics, reads
\begin{equation}
R=2\pi \int_{-\infty }^{+\infty }d\omega f\left( \omega \right) G\left(
\omega \right) \label{B-9}
\end{equation}
which is the overlap of the coupling spectrum $G\left( \omega \right) $ and
the modulation spectrum~\cite{N405-546,PRL87-270}
\begin{equation}
f\left( \omega \right) =\frac{t}{2\pi }\text{sinc}^{2}\frac{\left( \omega
-\omega _{A}\right) t}{2}. \label{B-10}
\end{equation}
Here, $\sin c\left( x\right) =\sin x/x$. the modulation spectrum is
symmetrically centered on $\omega _{a}$ and decays in amplitude as $t^{-1}$.
Function $f\left( \omega \right) $ is the Fourier transform of the function
\begin{equation}
f\left( \tau \right) =\left( 1-\frac{\tau }{t}\right) e^{i\omega _{A}\tau
}\Theta \left( t-\tau \right) , \label{B-11}
\end{equation}
where $\Theta \left( x\right) $ is the Heaviside unit step function, i.e., $
\Theta \left( x\right) =1$ for $x\geq 0$, and $\Theta \left( x\right) =0$
for $x<0$. Taking the limit $t\rightarrow +\infty $, the modulation spectrum
$f\left( \omega \right) \rightarrow \delta \left( \omega -\omega _{A}\right)
$, then we obtain the golden rule value
\begin{equation}
R=2\pi G\left( \omega _{A}\right) . \label{B-12}
\end{equation}
The modal profile affects on the decay rate via location of the atom. If the
atom is located at $x_{0}=a/2$ and $y_{0}=b/2$, no photons are radiated into
the TM$_{mn}$ guiding mode with even integer $m$ or $n$ since the guiding
mode are standing waves in the transverse direction. The cutoff frequencies
also affect the decay rate via the local density of states. In Fig.~\ref
{fig:1}b, we have given a schematic diagram of the dispersion relation of
the guiding modes which interact with the atom at $\vec{r}=\left(
a/2,b/2,z_{0}\right) $ for $a=2b$. If the transition frequency $\omega
_{A}<\Omega _{11}$, SE cannot occur since $\rho \left( \omega \right) =0$.
Since $\rho \left( \omega \right) $ tends to infinite when $\omega
_{A}\rightarrow \Omega _{mn}$, the excited state population decays very
rapidly. When $\omega _{A}$ is located in the frequency band between $\Omega
_{11}$ and $\Omega _{31}$, the TM$_{11}$ guiding mode contribute to the
spontaneous rate. However, there is an enhancement or inhibition of
spontaneous decay depending on the factor $\cos \left( 2\pi z_{0}/\lambda
_{1A}\right) $, where the wave length
\begin{equation}
\lambda _{1A}=\frac{2\pi c}{\sqrt{\omega _{A}^{2}-\Omega _{11}^{2}}}.
\label{B-13}
\end{equation}
In Fig.~\ref{fig:2}(a), we have plotted the probability of finding the atom
in its excited state as a function of $\Gamma t$ for three different values
of $z_{0}=0,\lambda _{1A}/8,\lambda _{1A}/4$, where
\begin{equation}
\Gamma =\frac{4d^{2}\Omega _{11}^{2}}{A\epsilon _{0}c\sqrt{\omega
_{A}^{2}-\Omega _{11}^{2}}}. \label{B-14}
\end{equation}
It can be seen that in the interval $z_{0}\in \left[ n\lambda _{1A},n\lambda
_{1A}+\lambda _{1A}/4\right] $ with integer $n$, the SE rate decreases as
the atom-mirror separation increases. However in the interval $z_{0}\in
\left[ n\lambda _{1A}+\lambda _{1A}/4,n\lambda _{1A}+\lambda _{1A}/2\right] $
, its SE rate increases as $z_{0}$ increases. Since $\cos x$ is a periodical
function of the argument $x$, the figure is only plotted in $z_{0}\in \left[
0,\lambda _{1A}/4\right] $. It can be seen that at $z_{0}=n\lambda
_{1A}+\lambda _{1A}/4$, the SE is completely suppressed. Since we have set
that $a=2b$, TM$_{51}$ and TM$_{13}$ is the third and fourth guiding modes
which might interacting with the atom. When $\Omega _{31}<\omega _{A}<\Omega
_{51}$, the atom interacts with the continua of two guiding modes TM$_{11}$
and TM$_{31}$. The spontaneous rate increases although it still depends on $
z_{0}$. As $\omega _{A}$ increases, more and more guiding modes are included
to increase the spontaneous rate.
\begin{figure}
\caption{(Color online) Atomic excitation probability as function of $\Gamma
t$ with $a=2b$. (a) The transition frequency $\protect\omega _{A}
\label{fig:2}
\end{figure}
In Fig.~\ref{fig:2}(b), we have plotted the atomic excitation probability
with $z_{0}=\lambda _{1A}/4$ and $\omega _{A}\approx \left( \Omega
_{31}+\Omega _{51}\right) /2$. In this case, the TM$_{11}$ mode does not
contribut to the SE (see the red dashed line), however, the SE is still
enhanced, this enhancement is due to the atomic coupling to the continuum of
TM$_{31}$ mode (see the blue solid line). One can understand this from Eq.(
\ref{B-12}) that the SE rate of the atom caused by the TM$_{31}$ mode is
also dependent of the factor $\cos \left( 2\pi z_{0}/\lambda _{2A}\right) $,
where $\lambda _{2A}=2\pi c/\sqrt{\omega _{A}^{2}-\Omega _{31}^{2}}$. Since
the wavelength of the radiation emitted by the atom into the continuum is
different for different guiding modes, the SE is generally increased when
more guiding modes interact with the atom.
One can also obtain the spontaneous rate in Eq.(\ref{B-12}) by replacing $
\tilde{\varepsilon}\left( \tau \right) $ with $\tilde{\varepsilon}\left(
t\right) $ in Eq.(\ref{B-4}) and making the up limit of integral tend to
infinite. Hence, the Markovian approximation yields the same phenomenon in
the context below Eq.(\ref{B-12}), which means that retardation effect is
neglected.
\section{\label{Sec:5}The atomic population of the excited state}
An excited atom relaxes to its ground state accompanied by an release of a
photon to the EM vacuum. In this hollow waveguide, the emitted photon
propagates along the positive and negative $z$ directions. Since the
termination of the waveguide imposes a hard-wall boundary condition on the
field which behaves as a perfect mirror, the photon traveling along the
negative $z$ axis is retroreflected to the atom, and re-excites the atom,
which leads to a non-Markovian type dynamics of the system.
In this section, we study the spontaneous emission dynamics involving the
retardation time for atom located at $\vec{r}=\left( a/2,b/2,z_{0}\right)$
with $a=2b$. For the convenience of discussion, we denote the transversally
confined propagating modes which couple to the atom as TM$_{j}$ with $
j=\left( m,n\right) $ according to the ascending order of the cutoff
frequencies. By assuming that the transition frequency $\omega _{A}$ is far
away from the cutoff frequencies $\Omega _{j}$., we can expand frequency $
\omega _{jk}$ around the transition frequency $\omega _{A}$ up to the linear
term
\begin{equation}
\omega _{jk}=\omega _{A}+v_{j}\left( k-k_{j0}\right) , \label{C-1}
\end{equation}
where $k_{j0}=\sqrt{\omega _{A}^{2}-\Omega _{j}^{2}}/c$ is determined by $
\omega _{jk_{0}}=\omega _{A}$ and the group velocity
\begin{equation}
v_{j}\equiv \frac{d\omega _{jk}}{dk}|_{k=k_{j0}}=\frac{c\sqrt{\omega
_{A}^{2}-\Omega _{j}^{2}}}{\omega _{A}} \label{C-2}
\end{equation}
is different for different TM$_{j}$ guiding modes. We substitute submit Eq.(
\ref{C-1}) into integro-differential equation (\ref{B-4}). Integrating over
all wave vectors $k$ gives rise to a linear combination of $\delta \left(
t\pm \tau _{j}-\tau \right) $ and $\delta \left( t-\tau \right) $, where $
\tau _{j}=2z_{0}/v_{j}$ is the time that the emitted photon take the round
trip between the atom and the mirror in the give transverse mode $j$. We
approximately obtain a delay-differential equation
\begin{equation}
\partial _{t}\tilde{\varepsilon}\left( t\right) =-\sum_{j}\Gamma _{j}\left[
\tilde{\varepsilon}\left( t\right) +e^{i\varphi _{j}}\tilde{\varepsilon}
\left( t-\tau _{j}\right) \Theta \left( t-\tau _{j}\right) \right]
\label{C-3}
\end{equation}
for the probability amplitude that the atom at time $t$ is in the excited
state, where
\begin{subequations}
\label{C-4}
\begin{eqnarray}
\varphi _{j} &=&2k_{j0}z_{0}=\sqrt{\omega _{A}^{2}-\Omega _{j}^{2}}\frac{
2z_{0}}{c} \\
\Gamma _{j} &=&\frac{4d^{2}\Omega _{j}^{2}}{A\epsilon _{0}\omega _{A}v_{j}}
\sin ^{2}\frac{m\pi }{2}\sin ^{2}\frac{n\pi }{2}.
\end{eqnarray}
The first term on the right hand side of Eq.(\ref{C-3}) leads to the
exponential decay of the atom. The second term involved the time $\tau _{j}$
that the light needs for the distance atom-mirror-atom, which represents the
effect of the reflected radiation on the atom that was emitted at time $\tau
_{j}$ in the TM$_{mn}$ mode before it interacts again with the atom.
\subsection{SE dynamics in single mode}
In the frequency band between $\Omega _{11}$ and $\Omega _{31}$, the
waveguide is said to be single-moded. The atom with the transition frequency
$\omega _{A}\in \left( \Omega _{11},\Omega _{31}\right) $ only interacts
with the TM$_{11}$ ($j=1$) guiding mode, the delay-differential equation
reduces to
\end{subequations}
\begin{equation}
\partial _{t}\tilde{\varepsilon}\left( t\right) =-\Gamma _{1}\left[ \tilde{
\varepsilon}\left( t\right) +e^{i\varphi _{1}}\tilde{\varepsilon}\left(
t-\tau _{1}\right) \Theta \left( t-\tau _{1}\right) \right] . \label{C1-1}
\end{equation}
where $\Gamma _{1}=\Gamma $ given in Eq. (\ref{B-14}). For the case that the
retarded argument $\tau _{1}\rightarrow 0$, the memory effects inherent in
the system disappear. The amplitude of state $\left\vert e\right\rangle $
becomes
\begin{equation*}
\tilde{\varepsilon}\left( t\right) =\exp \left[ -\Gamma \left( 1+e^{i\varphi
_{1}}\right) t\right] .
\end{equation*}
The SE rate and the frequency shift are presented by the real part $2\Gamma
\left( 1+\cos \varphi _{1}\right) $ and imagine part $\Gamma \sin \varphi
_{1}$, respectively. In the limit $\tau _{1}\rightarrow \infty $, the second
term of Eq.(\ref{C1-1}) vanishes. Since the waveguide becomes infinite, the
atomic population decays exponentially and the SE rate $\Gamma $ is
independent of the coordinate $z_{0}$.
\begin{figure}
\caption{\textit{(Color on line)}
\label{fig:3}
\end{figure}
It can be seen from Eq.(\ref{C1-1}) that the time axis is divided into
intervals of length $\tau _{1}$. We can formally integrate Eq.(\ref{C1-1})
and change the dummy integration variable, which is then substitute into the
integrand. Proceeding indefinitely with iteration, the time behavior of the
atomic state populations reads
\begin{equation}
\tilde{\varepsilon}\left( t\right) =\sum_{l=0}^{\infty }\frac{\left( -\Gamma
e^{i\varphi _{1}}\right) ^{n}}{n!}e^{-\Gamma \left( t-l\tau _{1}\right)
}\left( t-l\tau _{1}\right) ^{n}\Theta \left( t-l\tau _{1}\right) .
\label{C1-2}
\end{equation}
A step character is presented in Eq.(\ref{C1-2}). For $t\in \left[ 0,\tau
_{1}\right] $, the atomic amplitude $\tilde{\varepsilon}\left( t\right)
=\exp \left( -\Gamma t\right) $ decays exponentially which coincides with
the behavior of a excited atom in an infinite waveguide. The underlying
physics is that the atom requires at least the time $\tau _{1}$ to recognize
the mirror. For $t\in \left[ \tau _{1},2\tau _{1}\right] $, due to the
emitted radiation reflected back to the atom, $l=1$ term has been included,
which gives rise to the interference for finding the atom in the excited
state. In Fig.~\ref{fig:3}, we have plotted the norm $\left\vert \tilde{
\varepsilon}\left( t\right) \right\vert $ of the atomic amplitude versus $
\Gamma t$ with delay $\Gamma \tau _{1}=0.1$ (a) and $\Gamma \tau _{1}=1$
(b). The exponential decay of the atom inside an infinite waveguide is
plotted with the black solid line. When $\Gamma \tau _{1}\ll 1$, an
exponential decay law is found, however, the SE can be either increased or
descreased by the phase. The SE is completely suppressed when phase $\varphi
_{1}=\left( 2n+1\right) \pi $. When $\Gamma \tau _{1}\geq 1$, the atom first
decays exponentially, after the atom recognize the mirror, it displays a
behavior deviating from the exponential decay law, and a partial revival of
the atomic population can be found. It it the interference between the
emitted wave and the radiation wave reflected back to the atom which makes
the atom-mirror separation $z_{0}$ has significant influence on the atomic
dynamic via the phase. When the distance between the atom and the
termination are further large (i.e., $\Gamma \tau _{1}\gg 1$), it is found
from Fig.~\ref{fig:5}a that there is also a partial revival of the atomic
population, however, the atom-mirror separation $z_{0}$ does not make any
sense. In this case, the atom have already decayed to the ground state at
the time that the photon bounces back to the atom, so there is no emitted
wave to be interference with the wave reflected back to the atom, which
means that the atomic revival is due to the atom being partially re-excited
by the radiation. Since the the light emitted in the positive direction has
depart from the atom, the probability that the atom is re-excited becomes
lower and lower.
\subsection{SE dynamics in multiple modes}
An excited atom radiates waves into the continua of all modes resonant with
the atom. When the cross area become larger, more modes are included in the
resonance, then the atomic dynamics is not only affected by the time $\tau
_{1}$ that light needs to bounce back and forth between the atom and the
termination in the TM$_{11}$ mode, but also by other time $\tau _{j}$
required for a photon emitted by the atom to propagate in the TM$_{mn}$ mode
and reabsorbed by the atom. The definition of delay time $\tau _{j}$ told us
that $\tau _{j}<\tau _{j+1}$.
\begin{figure}
\caption{\textit{(Color on line)}
\label{fig:4}
\end{figure}
In this section, we assume that the atomic transition frequency is in the
regime $\left[ \Omega _{31},\Omega _{51}\right] $, which means that only two
TM modes (i.e. TM$_{11}$, TM$_{31}$) in resonance with the atom, the
delay-differential equation reduces to
\begin{eqnarray}
\partial _{t}\tilde{\varepsilon}\left( t\right) &=&-\left( \Gamma +\Gamma
_{2}\right) \tilde{\varepsilon}\left( t\right) -\Gamma e^{i\varphi _{1}}
\tilde{\varepsilon}\left( t-\tau _{1}\right) \Theta \left( t-\tau _{1}\right)
\notag \label{C2-1} \\
&&-\Gamma _{2}e^{i\varphi _{2}}\tilde{\varepsilon}\left( t-\tau _{2}\right)
\Theta \left( t-\tau _{2}\right) .
\end{eqnarray}
The space dependence enters via both the phases $\varphi _{1}$, $\varphi
_{2} $ and the delay time $\tau _{1},\tau _{2}$ of the different modes. If
both arguments $\tau _{1},\tau _{2}\rightarrow 0$, the amplitude of state $
\left\vert e\right\rangle $ becomes
\begin{equation}
\tilde{\varepsilon}\left( t\right) =\exp \left[ -\sum_{j=1}^{2}\Gamma
_{j}\left( 1+e^{i\varphi _{j}}\right) t\right] \label{C2-2}
\end{equation}
Two terms are consisted of in the above equation, the SE rate $
\sum_{j=1}^{2}2\Gamma _{j}\left( 1+\cos \varphi _{j}\right) $ and frequency
shift $\sum_{j=1}^{2}\Gamma _{j}\sin \varphi _{j}$. Comparing to the single
mode case, the SE rate is enhanced, however the frequency shift can be
either increased or decreased due to the space dependence. In the limit $
\tau _{1},\tau _{2}\rightarrow \infty $, the second and third terms of Eq.(
\ref{C2-1}) vanishes. The amplitude $\tilde{\varepsilon}\left( t\right)
=\exp \left[ -\left( \Gamma +\Gamma _{2}\right) t\right] $ shows that the
atomic population decays exponentially, $\Gamma +\Gamma _{2}$ is the SE rate
that the atom interacts with the continuum of the TM$_{11}$ and TM$_{31}$
modes of an infinite waveguide, which is also independent of the coordinate $
z_{0}$. In the case that $\tau _{1}\rightarrow 0$, the delay-differential
equation reads
\begin{eqnarray}
\partial _{t}\tilde{\varepsilon}\left( t\right) &=&-\left( \Gamma +\Gamma
e^{i\varphi _{1}}+\Gamma _{2}\right) \tilde{\varepsilon}\left( t\right)
\label{C2-3} \\
&&-\Gamma _{2}e^{i\varphi _{2}}\tilde{\varepsilon}\left( t-\tau _{2}\right)
\Theta \left( t-\tau _{2}\right) \notag
\end{eqnarray}
Using Laplace transformation and geometric series expansion, the solution
read
\begin{equation}
\tilde{\varepsilon}\left( t\right) =\sum_{l=0}^{\infty }\frac{\left( -\Gamma
_{2}e^{i\varphi _{2}}\right) ^{l}}{n!}e^{-\left( \Gamma +\Gamma e^{i\varphi
_{1}}+\Gamma _{2}\right) \left( t-l\tau _{2}\right) }\left( t-l\tau
_{2}\right) ^{l} \label{C2-4}
\end{equation}
If the atom is located at $z_{0}$ satisfying $\varphi _{1}=\left(
2n+1\right) \pi $, the SE that the TM$_{11}$ mode contribute to is
completely suppressed, then one obtain the SE dynamics due to the emitted
photon propagating only via the continuum of the TM$_{31}$ mode. In the case
with finite $\tau _{1}$ and $\tau _{2}\rightarrow \infty $, the upper state
amplitude becomes
\begin{equation}
\partial _{t}\tilde{\varepsilon}\left( t\right) =-\left( \Gamma +\Gamma
_{2}\right) \tilde{\varepsilon}\left( t\right) -\Gamma e^{i\varphi _{1}}
\tilde{\varepsilon}\left( t-\tau _{1}\right) \Theta \left( t-\tau
_{1}\right) . \label{C2-5}
\end{equation}
However, it is impossible for an atom inside a realistic waveguide to appear
the dynamics described by Eq.(\ref{C2-5}).
In Fig.~\ref{fig:4}, we have numerically plot the amplitude $\left\vert
\tilde{\varepsilon}\left( t\right) \right\vert $ as a function of $\Gamma t$
with $\Gamma \tau _{1}=0.1$ (a) and $1$ (b). It can be seen that in the
interval $\left[ 0,\tau _{1}\right] $, the upper state population of the
atom decays exponential with a rate $\Gamma +\Gamma _{2}$. After time $\tau
_{1}$, photons emitted by the atom is reflected back to the atom by the
mirror so that the atom-mirror distance has great influence on the SE
dynamics via phase $\varphi _{j}$ and $\tau _{j}$. Phase $\varphi _{1}$
first gives arise to deviation from the decay with a rate $\Gamma +\Gamma
_{2}$ in the interval $\left[ \tau _{1},\tau _{2}\right] $. As soon as $
t>\tau _{2}$, the wave propagating in the TM$_{31}$ mode is reflected back
to the atom by the mirror, phase $\varphi _{2}$ deviates the atomic dynamics
from that of finite $\tau _{1}$ and $\tau _{2}\rightarrow \infty $. In the
weak coupling case (see, Fig.~\ref{fig:4}a), the excited state probability
decreases as the time increases. However, in the strong coupling case,
several peaks can be observed in Fig.~\ref{fig:4}b, which present partial
revivals of the atom probability when $\Gamma \tau _{1}\geq 1$. Comparing to
the time evolution in Fig.~\ref{fig:3}, the SE is enhanced for a given phase
$\varphi _{1}$. Phase $\varphi _{2}$ and retardation time $\tau _{2}$ shift
the position of the peak and the dip.
\begin{figure}
\caption{\textit{(Color on line)}
\label{fig:5}
\end{figure}
In Fig.~\ref{fig:5}, we have numerically plotted the probability $|\tilde{
\varepsilon}\left( t\right)|^2$ as a function of $\Gamma t$ with delay $
\Gamma\tau _{1}=10$ for (a) single-moded and (b) double-moded cases. When
the photon returns back to the atom, the atom has already decay to its
ground state, it is impossible for interference to occur so that the phases $
\varphi_j$ has no effect on the atomic dynamics. The peaks at time $t>\tau
_{1}$ ow to the reflected light reabsorbed by the atom. By comparing two
figures in Fig.~\ref{fig:5}, we found that the probability that the atom is
reexcited by the radiation wave is lower in the multiple-mode case than that
in the single-mode case, and more peaks appear in the interval $[m\tau
_{1},(m+1)\tau _{1}]$ for the multiple-mode case. Such observations are easy
to understand because there are more transverse modes to interact with the
atom.
\section{discussion and conclusion}
We have study the dynamics of an atom inside a hollow waveguide of
rectangular cross section $A=ab$, made of ideal perfect conducting walls.
Such 1D waveguide generally consists of both TE and TM waves, the atom with
dipole along the $z$-direction interacts only with the TM$_{mn}$ transverse
modes, their coupling strength depends on the atomic location. A two-level
atom with location fixed at $\left( a/2,b/2,z_{0}\right) \ $is considered,
which decouples to fields of the TM$\ $modes with even integer $m,n$. We
have first discussed the dependence of the SE rate on the atom-mirror
separation and the density of states by Markovian approximation. 1) Since
the density of state vanishes below $\Omega _{11}$, the SE is completely
suppressed when $\omega _{A}<\Omega _{11}$; 2) Since the density of state
tends to infinite, the excited state population decays very rapidly when $
\omega _{A}\rightarrow \Omega _{mn}$, 3) Away from the cutoff frequencies,
the SE rate is increased when more TM modes in resonance with the atom, and
the upper state population could be unchanged all the time for single mode
case with $z_{0}=n\lambda _{1A}+\lambda _{1A}/4$ (i.e., $\varphi _{1}=\left(
2n+1\right) \pi $). However, the distance $z_{0}\neq 0$ gives rise to the
time delay that radiation emitted by the atom return to the emitter. To
study this backaction of the ideal perfect conducting at $z=0$ on the atom,
we perform a linear approximation, which is valid for $\omega _{A}$ far from
the cutoff frequencies, and phase $\varphi _{j}$ and retardation time $\tau
_{j}$ are introduced into the dynamics of the atom via $\omega _{A}$ and $
z_{0}$. The SE dynamics is studied for both single-moded and double-moded
cases. We find that 1) the upper state population is less than its initial
as long as $\tau _{j}\neq 0$. The discussion in Markov approximation
corresponds to the case with the retardation time $\tau _{j}\rightarrow 0$.
2) The probability for finding the atom in its excited state is lowered when
more transverse modes are resonant with the atom. 3) In the interval $\left[
0,\tau _{1}\right] $, the atomic behavior is the same to that of an excited
atom in an infinite waveguide, and the SE rate is independent of $z_{0}$. 4)
After $t>\tau _{1}$, two situations should be distinguished. For short
retardation time, the interference between the radiation wave and the
emitted wave makes the dynamics is strongly dependent on $\varphi _{j}$. For
long retardation time, the atom has already decay to its ground state when
the photon returns back to the atom, the partial revivals and collapses are
due to the photon reabsorbed and re-emitted by the atom. .
\begin{acknowledgments}
This work was supported by NSFC Grants No. 11374095, No. 11422540, No.
11434011, No. 11575058; National Fundamental Research Program of China (the
973 Program) Grant No. 2012CB922103; Hunan Provincial Natural Science
Foundation of China Grants No. 11JJ7001.
\end{acknowledgments}
\end{document}
|
\begin{document}
\publicationdetails{18}{2016}{3}{3}{1302}
\maketitle
\begin{abstract}
Word $W$ is an \emph{instance} of word $V$ provided there is a homomorphism $\phi$ mapping letters to nonempty words so that $\phi(V) = W$. For example, taking $\phi$ such that $\phi(c)=fr$, $\phi(o)=e$ and $\phi(l)=zer$, we see that ``freezer'' is an instance of ``cool''.
Let $\II_n(V,[q])$ be the probability that a random length $n$ word on the alphabet $[q] = \{1,2,\cdots q\}$ is an instance of $V$. Having previously shown that $\lim_{n \rightarrow \infty} \II_n(V,[q])$ exists, we now calculate this limit for two Zimin words, $Z_2 = aba$ and $Z_3 = abacaba$.
\end{abstract}
\section{Introduction}
Our present interest is in words--not the linguistic units with lexical value, but rather strings of symbols or letters.
We are interested in words as abstract discrete structures.
In particular, we are investigating elements of a free monoid.
A monoid is an algebraic structure consisting of a set, an associative binary operation on the set, and an identity element.
A free monoid is defined over some generating set of elements, which we view as an alphabet of letters.
Its binary operation is simply concatenation, its elements--called free words--are all finite strings of letters, and its identity element is the empty word (generally denoted with $\varepsilon$ or $\lambda$).
Often, the operation of a monoid is called multiplication, so it is fitting that a ``subword'' of a free word is called a ``factor.''
For example, in the free monoid over alphabet $\{a,b,c,d,r\}$, the word $cadabra$ is a factor of $abracadabra$ because $abracadabra$ is the product of $abra$ and $cadabra$.
\subsection{Combinatorial Limit Theory}
In an era of massive technological and computational advances, we have large systems for transportation, communication, education, and commerce (to name a few examples).
We also possess massive quantities of information in every part of life.
Therefore, in many applications of discrete mathematics, the useful theory is that which is relevant to arbitrarily large discrete structures.
For example, graphs can be used to model a computer network, with each vertex representing a device and each edge a data connection between devices.
The most well-known computer network, the Internet, consists of billions of devices with constantly changing connections; one cannot simply create a database of all billion-vertex graphs and their properties.
We use the term ``combinatorial limit theory'' in general reference to combinatorial methods which help answer the following question: What happens to discrete structures as they grow large?
In the combinatorial limit theory of graphs, major recent developments include the flag algebras of \textcite{R-07} and the graph limits of Borgs, Chayes, Freedman, Lov\'asz, Schrijver, S\'os, Szegedy, Vesztergombi, etc. (see \cite{L-12}).
Given the fundamental reliance of these methods on graph homomorphisms and graph densities, we strive to apply the same ideas to free words, or henceforth, simply ``words.''
\subsection{Definitions} \label{words}
\begin{defn} \label{defn:word}
For a fixed set $\Sigma$, called an \emph{alphabet}, denote with $\Sigma^*$ the set of all finite words formed by concatenation of elements of $\Sigma$, called \emph{letters}.
Words in $\Sigma^*$ are called \emph{$\Sigma$-words}.
The set of length-$n$ $\Sigma$-words is denoted with $\Sigma^n$.
The \emph{empty word}, denoted $\varepsilon$, consisting of zero letters, is a $\Sigma$-word for any alphabet $\Sigma$.
\end{defn}
The set $\Sigma^*$, together with the associative binary operation of concatenation and the identity element $\varepsilon$, forms a free monoid.
We denote concatenation with juxtaposition.
Generally we use natural numbers or minuscule Roman letters as letters and majuscule Roman letters (especially $T,U,V,W,X,Y,$ and $Z$) to name words.
Majuscule Greek letters (especially $\Gamma$ and $\Sigma$) name alphabets, though for a standard $q$-letter alphabet, we frequently use the set $[q] = \{1, 2, \ldots, q \}$.
\begin{ex}
Alphabet $[3]$ consists of letters 1, 2, and 3.
The set of $[3]$-words is
\[\{1,2,3\}^* = \{\varepsilon, 1, 2, 3, 11, 12, 13, 21, 22, 23, 31, 32, 33, 111, 112, 113, 121, \ldots \}.\]
\end{ex}
\begin{defn} \label{defn:letter}
A word $W$ is formed from the concatenation of finitely many letters.
If letter $x$ is one of the letters concatenated to form $W$, we say $x$ \emph{occurs in} $W$, or $x \in W$.
For natural number $n \in \N$, an $n$-fold concatenation of word $W$ is denoted $W^n$.
The \emph{length} of word $W$, denoted $|W|$, is the number of letters in $W$, counting multiplicity.
$\L(W)$, the \emph{alphabet generated by} $W$, is the set of all letters that occur in $W$.
For $q \in \N$, word $W$ is \emph{$q$-ary} provided $|L(W)| \leq q$.
We use $||W||$ to denote the number of letter recurrences in $W$, so $||W|| = |W| - |L(W)|$.
\end{defn}
\begin{ex}
Let $W = bananas$.
Then $a,b \in W$, but $c \not\in W$.
Also $|W| = 7$, $L(W) = \{a,b,n,s\}$, and $||W|| = 3$.
For the empty word, we have $|\varepsilon| = 0$, $L(\varepsilon) = \emptyset$, and $||\varepsilon|| = 0$.
\end{ex}
\begin{defn} \label{defn:factor}
Word $W$ has $\binom{|W|+1}{2}$ (nonempty) \emph{substrings}, each defined by an integer pair $(i,j)$ with $0\leq i < j \leq |W|$.
Denote with $W[i,j]$ the word in the $(i,j)$-substring, consisting of $j-i$ consecutive letters of $W$, beginning with the $(i+1)$-th.
Word $V$ is a \emph{factor} of $W$, denoted $V \leq W$, provided $V = W[i,j]$ for some integers $i$ and $j$ with $0\leq i < j \leq |W|$; equivalently, $W = SVT$ for some (possibly empty) words $S$ and $T$.
\end{defn}
\begin{ex}
$nana \leq nana \leq bananas$, with $nana = nana[0,4] = bananas[2,6]$.
\end{ex}
\begin{defn}
For alphabets $\Gamma$ and $\Sigma$, every (monoid) homomorphism $\phi: \Gamma^* \rightarrow \Sigma^*$ is uniquely defined by a function $\phi:\Gamma \rightarrow \Sigma^*$.
We call a homomorphism \emph{nonerasing} provided it is defined by $\phi:\Gamma \rightarrow \Sigma^* \setminus \{\varepsilon\}$; that is, no letter maps to $\varepsilon$.
\end{defn}
\begin{ex}
Consider the homomorphism $\phi: \{b,n,s,u\}^* \rightarrow \{m,n,o,p,r,v\}^*$ defined by Table~\ref{table:exFunction}.
Then $\phi(sun) = moon$ and $\phi(bus) = vroom$.
\end{ex}
\begin{table}[ht]
\centering
\caption{Example of a nonerasing function.} \label{table:exFunction}
\begin{tabular}{c | c | c | c | c}
$x$ & $b$ & $n$ & $s$ & $u$\\ \hline
$\phi(x)$ & $vr$ & $n$ & $m$ & $oo$
\end{tabular}
\end{table}
\begin{defn} \label{defn:instance}
$U$ is an \emph{instance of $V$}, or a \emph{$V$-instance}, provided $U = \phi(V)$ for some nonerasing homomorphism $\phi$; equivalently,
\begin{itemize}
\item $V = x_0x_1 \cdots x_{m-1}$ where each $x_i$ is a letter;
\item $U = A_0A_1 \cdots A_{m-1}$ with each word $A_i \neq \varepsilon$ and $A_i = A_j$ whenever $x_i=x_j$.
\end{itemize}
$W$ \emph{encounters} $V$, denoted $V \preceq W$, provided $U \leq W$ for some $V$-instance $U$.
If $W$ fails to encounter $V$, we say $W$ \emph{avoids} $V$.
\end{defn}
To help distinguish the encountered word and the encountering word, ``pattern'' is elsewhere used to refer to $V$ in the encounter relation $V \preceq W$.
Also, an instance of a word is sometimes called a ``substitution instance'' and ``witness'' is sometimes used in place of encounter.
\begin{defn} \label{def:unavoidable}
A word $V$ is \emph{unavoidable} provided, for any finite alphabet, there are only finitely many words that avoid $V$.
\end{defn}
The first classification of unavoidable words was by \textcite{BEM-79}. Three years later, Zimin published a fundamentally different classification of unavoidable words (\cite{Z-82} in Russian, \cite{Z-84} in English).
\begin{defn} \label{defn:Zimin}
Define the \emph{$n$-th Zimin word} recursively by $Z_0 := \varepsilon$ and, for $n \in \N$, $Z_{n+1} = Z_nx_nZ_n$. Using the English alphabet rather than indexed letters:
\[Z_1 = \textbf{a}, \quad Z_2 = a\textbf{b}a, \quad Z_3 = aba\textbf{c}aba, \quad Z_4 = abacaba\textbf{d}abacaba, \quad \ldots . \]
\end{defn}
Equivalently, $Z_n$ can be defined over the natural numbers as the word of length $2^n-1$ such that the $i$-th letter, $1 \leq i < 2^n$, is the 2-adic order of $i$.
\begin{thm}[\cite{Z-84}] \label{thm:Zimin}
A word $V$ with $n$ distinct letters is unavoidable if and only if $Z_n$ encounters $V$.
\end{thm}
With Zimin's concise characterization of unavoidable words, a natural combinatorial question follows: How long must a $q$-ary word be to guarantee that it encounters a given unavoidable word?
Define $\f(n,q)$ to be the smallest integer $M$ such that every $q$-ary word of length $M$ encounters $Z_n$.
In 2014, three preprints by different authors appeared, each independently proving bounds for $\f(n,q)$: \textcite{CR-14}, \textcite{T-14}, and \textcite{RS-15}.
\section{Asymptotic Probability of Being Zimin} \label{ASYMP}
\begin{defn} \label{defn:density}
Let $\mathbb{I}_n(V,q)$ be the probability that a uniformly randomly selected length-$n$ $q$-ary word is an instance of $V$.
That is,
\[
\II_n(V,q) =\frac{|\{W \in [q]^n \mid \phi(V)=W \mbox{ for some nonerasing homomorphism } \phi:{\rm L}(V)^* \rightarrow [q]^*\}|}{ q^{n}}.
\]
Denote $\II(V,q) = \lim_{n\rightarrow \infty}\mathbb{I}_n(V,q)$.
\end{defn}
\textcite{CR-15} prove that $\II(V,q)$ exists for any word $V$.
Moreover, they establish the following dichotomy for $q \geq 2$: $\II(V,q) = 0$ if and only if $V$ is doubled (that is, every letter in $V$ occurs at least twice).
Trivially, if $V$ is composed of $k$ distinct, nonrecurring letters, then $\II_n(V,[q])=1$ for $n\geq k$, so $\II(V,q) = 1$.
But if $V$ contains at least one recurring letter, it becomes a nontrivial task to compute $\II(V,q)$.
We have from previous work the following bounds for the instance probability of Zimin words.
\begin{cor}
For $n,q \in \Z^+$,
\[ q^{-2^n + n + 1} \leq \II(Z_n,q) \leq \prod_{j = 1}^{n-1} \frac{1}{q^{(2^j-1)} - 1}. \]
\end{cor}
\begin{proof}
For the lower bound, note that $||Z_n|| = |Z_n| - |\L(Z_n)| = (2^n-1) - (n)$.
Theorem~3.3 from \textcite{CR-15} tells us that for all $q \in \Z^+$ and nondoubled $V$, $\II(V,q) \geq q^{-||V||}$.
For the upper bound, observe that the $n$ letters occurring in $Z_n$ have the following multiplicities: $\ang{r_j = 2^j : 0 \leq j < n}$.
Since there is exactly one nonrecurring letter in $Z_n$, $r_0 = 2^0 = 1$, Theorem~4.14 from \textcite{R-15} provides an upper bound of $\prod_{j = 1}^{n-1} \frac{1}{q^{(r_j-1)} - 1}$.
\end{proof}
A nice property of these bounds is that they are asymptotically equivalent as $q \rightarrow \infty$.
For some specific $V$, we can do better.
Presently, we provide infinite series for computing the asymptotic instance probability $\II(V,q)$ for two Zimin words, $V = Z_2 = aba$ (Section~\ref{IZ2}) and $V = Z_3 = abacaba$ (Section~\ref{IZ3}).
Table~\ref{table:Z2Z3} below gives numerical approximations for $2 \leq q \leq 6$.
Our method also provides upper bounds on $\II(Z_n,q)$ for general $n$ (Section~\ref{IZn}).
\begin{table}[ht]
\centering
\caption{Approximate values of $\II(Z_2,q)$ and $\II(Z_3,q)$ for $2 \leq q \leq 6$.} \label{table:Z2Z3}
\begin{tabular}{c}
$\begin{array}{c | c | c | c | c | c | c }
q & 2 & 3 & 4 & 5 & 6 & \cdots \\ \hline
\II(Z_2,q) & 0.7322132 & 0.4430202 & 0.3122520 & 0.2399355 & 0.1944229 & \cdots\\ \hline
\II(Z_3,q) & 0.1194437 & 0.0183514 & 0.0051925 & 0.0019974 & 0.0009253 & \cdots\\
\end{array}$
\end{tabular}
\end{table}
\section{Calculating \texorpdfstring{$\II(Z_2,q)$}{the asymptotic instance probability of Z2}} \label{IZ2}
\begin{defn}
Nonempty word $V$ is a \emph{bifix} of word $W$ provided $W = VA = BV$ for some nonempty words $A$ and $B$; that is, $V$ is both a proper prefix and suffix of $W$.
Moreover, if bifix $V$ is an instance of word $Z$, then $V$ is a \emph{$Z$-bifix} of $W$.
If word $W$ has no bifixes, $W$ is \emph{bifix-free}.
If $W$ has no $Z$-bifix, $W$ is \emph{$Z$-bifix-free}.
\end{defn}
\begin{lem}
If word $W$ has a bifix, then it has a bifix of length at most $\lfloor |W|/2 \rfloor$.
\end{lem}
\begin{proof}
Let $W$ be a word with minimal-length bifix of length $k$, $\lfloor |W|/2 \rfloor < k < |W|$. Then we can write $W = W_1W_2W_3$ where $W_1W_2 = W_2W_3$ and $|W_1W_2| = k = |W_2W_3|$. But then $W$ has bifix $W_2$ with $|W_2| < k$, which contradicts our selection of the shortest bifix of $W$.
\end{proof}
Although some words are neither $Z_2$-instances nor bifix-free, the proportion of such words is asymptotically $0$.
Hence, $1-\II(Z_2,q)$ was previously computed by \textcite{N-73} as the asymptotic probability that a word is bifix-free.
Equivalently, in a paper of \textcite{GO-81} on the period, or overlap, of words, $1-\II(Z_2,q)$ was computed as the proportion of strings with no period.
Rather than restate these results, we reformulate them presently for completeness and as a warm-up for calculating $\II(Z_3,q)$.
Let $a_\ell = a_\ell^{(q)}$ be the number of bifix-free $q$-ary strings of length $\ell$. For $q=2$, this is sequence oeis.org/A003000; for $q=3$, oeis.org/A019308 \parencite{OEIS}.
\begin{lem}[\cite{N-73}, Theorem 1]
$a_\ell = a_\ell^{(q)}$ has the following recursive definition:
\begin{eqnarray*}
a_0 & = & 0;\\
a_1 & = & q;\\
a_{2k} & = & qa_{2k-1} - a_k;\\
a_{2k+1} & = & qa_{2k}.
\end{eqnarray*}
\end{lem}
\begin{proof}
Fix a $q$-letter alphabet.
Let $W = UV$ be a bifix-free word with $|U| = \ceil{\frac{|W|}{2}}$ and $|V| = \floor{\frac{|W|}{2}}$.
Suppose $UaV$ has a bifix for some letter $a$.
Then by the lemma, $UaV$ has a bifix of length at most $|UaV|/2$.
But $W$ is bifix free, so the only possibility is $U = aV$.
Therefore, for every bifix-free word of length $2k$ there are $q$ bifix-free words of length $2k+1$.
For every bifix-free word of length $2k-1$, there are $q$ bifix-free words of length $2k$, with exception of the the length-$2k$ words that are the square of a bifix-free word of length $k$.
\end{proof}
\begin{thm} \label{thm:IZ2}
For $q \geq 2$,
\begin{eqnarray*}
\II(Z_2,q) &=& \sum_{j=0}^{\infty} \frac{(-1)^jq^{\left(1-2^{j+1}\right)}}{\prod_{k=0}^{j} \left(1 - q^{\left(1-2^{k+1}\right)}\right)}.
\end{eqnarray*}
\end{thm}
\begin{proof}
Since $a_\ell = a_\ell^{(q)}$ counts bifix-free words, the number of $q$-ary words of length $M$ that are $Z_2$-instances is (without double-count)
\[\sum_{\ell=0}^{\lceil M/2 \rceil -1} a_\ell q^{M - 2\ell},\]
so the proportion of $q$-ary words of length $M$ that are $Z_2$-instances is
\[\frac{1}{q^M} \sum_{\ell=0}^{\lceil M/2 \rceil -1} a_\ell q^{M - 2\ell} = \sum_{\ell=0}^{\lceil M/2 \rceil -1} \frac{a_\ell}{q^{2\ell}}.\]
Therefore $\II(Z_2,q) = f(1/q^2)$, where $f(x) = f^{(q)}(x)$ is the generating function for $\{a_\ell\}_{\ell = 0}^\infty$: \[f(x) = \sum_{\ell=0}^{\infty} a_\ell x^\ell.\]
From the recursive definition of $a_\ell$, we obtain the functional equation
\begin{eqnarray} \label{Z2func}
f(x) = qx + qxf(x) - f(x^2).
\end{eqnarray}
Solving \eqref{Z2func} for $f(x)$ gives \[f(x) = \frac{qx - f(x^2)}{1-qx} = \cdots = \sum_{j=0}^{\infty} \frac{(-1)^jqx^{2^j}}{\prod_{k=0}^{j} (1 - qx^{2^k})}.\]
\end{proof}
\begin{cor} \label{cor:IZ2}
For $q \geq 2$:
\[\frac{1}{q} < \II(Z_2,q) < \frac{1}{q-1}.\]
Moreover, as $q \rightarrow \infty$,
\[\II(Z_2,q) = \frac{1}{q-1} - \frac{1+o(1)}{q^3}.\]
\end{cor}
\begin{proof}
The lower bound follows from the fact that a word of length $M>2$ is a $Z_2$-instance when the first and last character are the same.
This occurrence has probability $1/q$.
Note that $f^{(q)}(q^{-2})$ is an alternating series.
Moreover, the terms in absolute value are monotonically approaching 0; the routine proof of monotonicity can be found in the appendices (Lemma~\ref{lemF}).
Hence, the partial sums provide successively better upper and lower bounds:
\begin{eqnarray*}
f^{(q)}\left(\frac{1}{q^2}\right) & = & \sum_{j=0}^{\infty} \frac{(-1)^j\left(q^{1-2^{j+1}}\right)}{\prod_{k=0}^{j} \left(1 - \left(q^{1-2^{k+1}}\right)\right)};\\
\\
f^{(q)}\left(\frac{1}{q^2}\right) &>& \sum_{j=0}^{1} \frac{(-1)^j\left(q^{1-2^{j+1}}\right)}{\prod_{k=0}^{j} \left(1 - \left(q^{1-2^{k+1}}\right)\right)}\\
& = & \frac{1/q}{1-1/q} - \frac{1/q^3}{(1 - 1/q)(1 - 1/q^3)}\\
& = & \frac{1}{q-1} - \frac{1+o(1)}{q^3};\\
\\
f^{(q)}\left(\frac{1}{q^2}\right) & < & \sum_{j=0}^{2} \frac{(-1)^jq\left(\frac{1}{q^2}\right)^{2^j}}{\prod_{k=0}^{j} \left(1 - q\left(\frac{1}{q^2}\right)^{2^k}\right)}\\
& = &\frac{1}{q-1} -\frac{1+o(1)}{q^3} + \frac{1/q^5}{(1 - 1/q)(1 - 1/q^3)(1 - 1/q^5)}\\
& = &\frac{1}{q-1} -\frac{1+o(1)}{q^3} +\frac{O(1)}{q^5}.
\end{eqnarray*}
\end{proof}
\begin{table}[ht]
\centering
\caption{Approximate values of $\II(Z_2,q)$ for $2 \leq q \leq 8$.}
\def1.3{1.3}
\begin{tabular}{c | c c c c c c c c c}
$q$ & 2 & 3 & 4 & 5 & 6 & 7 & 8 \\ \hline
$q^{-1}$ & 0.50000 & .33333 & .25000 & .20000 & .16667 & .14286 & .12500\\ \hline
$\II(Z_2,q)$ & 0.73221 & .44302 & .31225 & .23994 & .19442 & .16326 & .14062\\ \hline
$(q-1)^{-1} - q^{-3}$ & 0.87500 & .46296 & .31771 & .24200 & .19537 &.16375 &.14090\\ \hline
$(q-1)^{-1}$ & 1.00000 & .50000 & .33333 & .25000 & .20000 & .16667 & .14286\\
\end{tabular}
\end{table}
\section{Calculating \texorpdfstring{$\II(Z_3,q)$}{the asymptotic instance probability of Z3}} \label{IZ3}
Will use similar methods to compute $\II(Z_3,q)$.
To avoid unnecessary subscripts and superscripts, assume throughout this section that we are using a fixed alphabet with $q>1$ letters, unless explicitly stated otherwise.
Since $Z_2$ has more interesting structure than $Z_1$, there are more cases to consider in developing the necessary recursion.
\begin{lem}
\label{P3cases}
Fix bifix-free word $L$.
Let $W = LAL$ be a $Z_2$-instance with a $Z_2$-bifix. Then $LAL$ can be written in exactly one of the following ways:
\begin{enumerate}[$\<$i$\>$]
\item $LAL = LBLCLBL$ with $LBL$ the shortest $Z_2$-bifix of $W$ and $|C|>0$;
\item $LAL = LBLLBL$ with $LBL$ the shortest $Z_2$-bifix of $W$;
\item $LAL = LBLBL$ with $LBL$ the shortest $Z_2$-bifix of $W$;
\item $LAL = LLFLLFLL$ with $LLFLL$ the shortest $Z_2$-bifix of $W$;
\item $LAL = LLLL$.
\end{enumerate}
\end{lem}
\begin{proof}
With some thought, the reader should recognize that the five listed cases are in fact mutually exclusive.
The proof that these are the only possibilities follows.
Given that $W$ has a $Z_2$-bifix and $L$ is bifix-free, it follows that $W$ has a $Z_2$-bifix $LBL$ for some nonempty $B$.
Let $LBL$ be chosen of minimal length. We break this proof into nine cases depending on the lengths of $L$ and $LBL$ (Figure~\ref{overlap}).
Set $m = |W|$, $\ell = |L|$, and $k = |LBL|$.
\begin{figure}\label{overlap}
\end{figure}
\begin{enumerate}[\text{Case} (1):]
\item $2k < m$. This is $\< i\>$.
\item $2k = m$. This is $\<ii\>$.
\item $m < 2k < m + \ell$. In $LAL$, the first and last occurrences of $LBL$ overlap by a length strictly between $0$ and $\ell$. This is impossible, since $L$ is bifix-free.
\item $2k = m + \ell$. This is $\<iii\>$
\item $m + \ell < 2k < m + 2\ell$. The first and last occurrences of $LBL$ overlap by a length strictly between $\ell$ and $2\ell$. This is impossible, since $L$ is bifix-free.
\item $m + 2\ell = 2k < 2(m - \ell)$. $LAL = L(DL)(LE)L$ where $DL = B = LE$. Thus $L$ is a bifix of $B$, so $LAL = LLFLLFLL$ where $B = LFL$. If $|F|>0$, this is $\<iv\>$. If $|F|=0$, then $LAL = LLLLLL$. But this contradicts the minimality of $LBL$, since $LLLLLL$ has $Z_2$-bifix $LLL$, which is shorter than $LBL = LLLL$.
\item $m + 2\ell < 2k < 2(m - \ell)$. $LAL = LDLELD'L$ where $DLE = B = ELD'$.
Since $EL$ is a prefix of $B$, $LEL$ is a prefix of $LAL$.
Likewise, since $LE$ is a suffix of $B$, $LEL$ is a suffix of $LAL$.
Therefore, $LEL$ is a bifix of $LAL$ and $|LEL| < |LDLEL| = |LBL|$, contradicting the minimality of $LBL$.
\item $k = m - \ell$. $LAL = LLCLL$ where $LC = B = CL$. If $|C|=0$, this is $\<v\>$. Otherwise, $LCL$ is a bifix of $LAL$, contradicting the minimality of $LBL$.
\item $m - \ell < k < m$. The first and last occurrences of $LBL$ overlap by a length strictly between $k-\ell$ and $k$. This is impossible, since $L$ is bifix-free.
\end{enumerate}
\end{proof}
For fixed bifix-free word $L$ of length $\ell$, define $b_m^\ell$ to count the number of $Z_2$ words with bifix $L$ that are $Z_2$-bifix-free $q$-ary words of length $m$.
Then
\begin{equation} \label{eqn:IZ3}
\II(Z_3,q) = \sum_{\ell = 1}^{\infty} \left( a_\ell \sum_{m = 1}^\infty b_m^\ell q^{-2m} \right).
\end{equation}
In order to form a recursive definition of $b_n$ as we did for $a_n$, we now describe two new terms. Let $AB$ be a word of length $W$ with $|A| = \ceil{W/2}$ and $|B| = \floor{W/2}$. Then $AB$ has $q$ length-$(n+1)$ \textit{children} of the form $AxB$, each having $AB$ as its \textit{parent}. In this way every nonempty word has exactly $q$ children and exactly 1 parent, which establishes the 1:$q$ ratio of words of length $n$ to words of length $n+1$. The set of a word's children together with successive generations of progeny we refer to as that word's \textit{descendants}.
\begin{thm} \label{P3}
$b_n^\ell = c_n^\ell + d_n^\ell$ where $c_n=c_n^\ell$ and $d_n=d_n^\ell$ are defined recursively as follows:
\begin{eqnarray*}
\text{For even }\ell:\\
c_1 = \cdots = c_{2\ell} & = & 0,\\
c_{2\ell+1} & = & q,\\
c_{4\ell} & = & qc_{4\ell-1} - (c_{5\ell/2} + 1),\\
c_{5\ell} & = & qc_{5\ell - 1} - (c_{5\ell/2} + c_{3\ell} - 1),\\
c_{5\ell+1} & = & q(c_{5\ell} + c_{3\ell} - 1),\\
c_{6\ell} & = & qc_{6\ell-1} - (c_{3\ell} - 1 + c_{5\ell/2});\\
c_{2k} & = & qc_{2k-1} - (c_k + c_{k + \ell/2}) \text{ for } k>\ell,k \not\in\{2\ell,5\ell/2,3\ell\},\\
c_{2k+1} & = & q(c_{2k} + c_{k + \ell/2}) \text{ for } k>\ell, k \neq 5\ell/2,\\
d_1 = \cdots = d_{4\ell} & = & 0,\\
d_{4\ell+1} & = & q,\\
d_{5\ell} & = & qd_{5\ell-1} - 1,\\
d_{5\ell+1} & = & q(d_{5\ell} + 1),\\
d_{6\ell} & = & qd_{6\ell-1} - 1,\\
d_{2k} & = & qd_{2k-1} - (d_k + d_{k+\ell} + d_{k + \ell/2}) \text{ for } k>2\ell,k \not\in \{5\ell/2,3\ell\},\\
d_{2k+1} & = & q(d_{2k} + d_{k+\ell} + d_{k + \ell/2}) \text{ for } k\geq 2\ell, k \neq 5\ell/2.\\
\text{For odd }\ell>1:\\
c_1 = \cdots = c_{2\ell} & = & 0,\\
c_{2\ell+1} & = & q,\\
c_{4\ell} & = & q\left(c_{4\ell-1} + c_{\floor{\frac{5\ell}{2}}}\right) - (c_{2\ell} +1),\\
c_{5\ell} & = & qc_{5\ell - 1} - (c_{3\ell} - 1),\\
c_{5\ell+1} & = & q(c_{5\ell} + c_{3\ell} - 1) - c_{\ceil{\frac{5\ell}{2}}},\\
c_{6\ell} & = & q\left(c_{6\ell-1} + c_{\floor{\frac{7\ell}{2}}}\right) - (c_{3\ell} -1),\\
c_{2k} & = & q\left(c_{2k-1} + c_{k+\floor{\frac{\ell}{2}}}\right) - c_k; k>\ell,k \not\in \left\{2\ell,\ceil{\frac{\ell}{2}},3\ell\right\},\\
c_{2k+1} & = & qc_{2k} - c_{k+\ceil{\frac{\ell}{2}}}; k>\ell,k \neq \floor{\frac{5\ell}{2}};\\
\end{eqnarray*}
\\
\begin{eqnarray*}
d_1 = \cdots = d_{4\ell} & = & 0,\\
d_{4\ell+1} & = & q,\\
d_{5\ell} & = & qd_{5\ell-1} - 1,\\
d_{5\ell+1} & = & q(d_{5\ell} + 1),\\
d_{6\ell} & = & qd_{6\ell-1} - 1,\\
d_{2k} & = & q\left(d_{2k-1}+ d_{k + \floor{\frac{\ell}{2}}}\right) - (d_k + d_{k+\ell}); k>2\ell,k\not\in \left\{\ceil{\frac{5\ell}{2}},3\ell\right\},\\
d_{2k+1} & = & q\left(d_{2k} + d_{k+\ell}\right) - d_{k + \ceil{\frac{\ell}{2}}}; k> 2\ell, k \neq \floor{\frac{5\ell}{2}}.\\
\text{For }\ell=1:\\
c_1 = c_1 = c_2 & = & 0,\\
c_3 & = & q,\\
c_4 & = & qc_3 - 1,\\
c_5 & = & qc_4 - (c_3 - 1),\\
c_{6} & = & q(c_5 + c_3 - 1) - (c_3 - 1),\\
c_{2k} & = & q(c_{2k-1} + c_k) - c_k; k>3,\\
c_{2k+1} & = & qc_{2k} - c_{k+1}; k>2;\\
d_1 = d_2 = d_3 = d_4 & = & 0,\\
d_5 & = & q - 1,\\
d_{6} & = & q(d_5+1) - 1,\\
d_{2k} & = & q(d_{2k-1}+ d_k) - (d_k + d_{k+1}); k>3,\\
d_{2k+1} & = & q(d_{2k} + d_{k+1}) - d_{k + 1}; k> 2.
\end{eqnarray*}
\end{thm}
\begin{proof}
Fix a bifix-free word $L$ of length $\ell$.
The full recursion is too messy to prove all at once, so we build up to it in stages.
Within each stage, $\approx$ indicates an incomplete definition.
Example word trees with small $q$ and short $L$ are found in Appendix~\ref{TREES}.
\textbf{Stage I}\\
Since $L$ is bifix free, any $Z_2$-instance with $L$ as a bifix has to be of greater length than $2\ell.$ Thus we have $b_1 = \cdots = b_{2\ell} = 0$.
The only such words of length $2\ell+1$ are of the form $LxL$ for some letter $x$, therefore, $b_{2\ell+1} = q$.
Every word of length $n>2\ell+1$ has $L$ as a bifix if and only if its parent has $L$ as a bifix.
This is why, for $k>\ell$, the definition of $b_{2k}$ includes the term $qb_{2k-1}$, and the definition of $b_{2k+1}$ includes the term $qb_{2k}$. If $b_n$ were counting $Z_2$-instances with bifix $L$, we would be done.
However, we do not want $b_n$ to count words that have a $Z_2$-bifix.
Thus, we must deal with each of the 5 cases listed in Lemma~\ref{P3cases}.
First, let us deal with case $\ang{ii}$: $LAL = LBLLBL$ with $LBL$ the shortest $Z_2$-bifix of $LAL$.
The number of these of length $2k$, with $k > \ell$, is $b_k$.
Therefore, in the definition of $b_{2k}$, we subtract $b_k$.
Conveniently, the descendants of case-$\ang{ii}$ words are precisely words of case $\ang{i}$.
Therefore, we have accounted for two cases at once.
Next, let us look at case $\ang{iii}$: $LAL = LBLBL$ with $LBL$ the shortest $Z_2$-bifix of $LAL$.
For the moment, assume $|L| = \ell$ is even. Then $|LBLBL|$ is even.
The number of such words of length $2k$, with $k > \ell$, is $b_{k+\ell/2}$.
We want to exclude words of this form, but we do not necessarily want to exclude their children.
Therefore, in the definition of $b_{2k}$ we subtract $b_{k+\ell/2}$, but then we add $qb_{k+\ell/2}$ in the definition of $b_{2k+1}$.
Now we look at when $|L|$ is odd, so $|LBLBL|$ is odd.
The number of such words of length $2k+1$, with $k > \ell$, is $b_{k+\ceil{\ell/2}}$.
Therefore, in the definition of $b_{2k+1}$ we subtract $b_{k+\ceil{\ell/2}}$, but then we add $qb_{(k - 1) +\ceil{\ell/2}} = qb_{k + \floor{\ell/2}}$ in the definition of $b_{(2(k-1)+1) + 1} = b_{2k}$.
Our work so far renders the following tentative definition of $b_n$.
\begin{eqnarray*}
\text{For even }\ell:\\
b_1 = \cdots = b_{2\ell} & = & 0,\\
b_{2\ell+1} & = & q,\\
b_{2k} & \approx & qb_{2k-1} - (b_k + b_{k + \ell/2}) \text{ for } k>\ell,\\
b_{2k+1} & \approx & q(b_{2k} + b_{k + \ell/2}) \text{ for } k>\ell.\\
\text{For odd }\ell:\\
b_1 = \cdots = b_{2\ell} & = & 0,\\
b_{2\ell+1} & = & q,\\
b_{2k} & \approx & q(b_{2k-1} + b_{k+\floor{\ell/2}}) - b_k \text{ for } k>\ell,\\
b_{2k+1} & \approx & qb_{2k} - b_{k+\lceil\ell/2\rceil} \text{ for } k>\ell.
\end{eqnarray*}
We continue with case $\ang{iv}$: $LAL = LLFLLFLL$ with $LLFLL$ the shortest $Z_2$-bifix of $LAL$.
Note that $|LLFLLFLL|$ is even.
It would apear that the number of such words of length $2k$ would be $b_{k - \ell}$ (counting words of the form $LFL$), which we could deal with in the same fashion as we did for case $\ang{iii}$.
However, when counting words of the form $LFL$, we do not want words of the form $LLGLL$, because $LLFLLFLL = LLLGLLLLGLLL$ is already accounted for in case $\ang{i}$.
\textbf{Stage II}\\
To address this issue, we will define two different recursions.
Let $d_n$ count the $Z_2$-instances of the form $LLALL$ that are $Z_2$-bifix free.
Let $c_n$ count all other $Z_2$-instances of the form $LAL$ that are $Z_2$-bifix free. Therefore, $b_n = c_n + d_n$ by definition.
As with $b_n$, we quickly see that $c_n = 0$ for $n\leq 2\ell$ and $c_{2\ell+1} = q$.
Now the shortest words counted by $d_n$ are of the form $LLxLL$ for some letter $x$, so $d_n = 0$ for $n\leq 4\ell$ and $d_{4\ell+1} = q$.
To deal with cases $\ang{i}$ and $\ang{ii}$, we can do the same things as before, but recognizing that $LL$ is a bifix of $LBLLBL$ if and only if $LL$ is a bifix of $LBL$.
Therefore, subtract $c_k$ in the definition of $c_{2k}$ and subtract $d_k$ in the definition of $d_{2k}$ (both for $k>\ell$).
We also deal with case $\ang{iii}$ as before, recognizing that $LL$ is a bifix of $LBLBL$ if and only if $LL$ is a bifix of $LBL$.
For even $\ell$: subtract $c_{k+\ell/2}$ in the definition of $c_{2k}$ and add $qc_{k+\ell/2}$ in the definition of $c_{2k+1}$; subtract $d_{k+\ell/2}$ in the definition of $d_{2k}$ and add $qd_{k+\ell/2}$ in the definition of $d_{2k+1}$.
For odd $\ell$: subtract $c_{k+\ceil{\ell/2}}$ in the definition of $c_{2k+1}$ and add $qc_{k+\floor{\ell/2}}$ in the definition of $c_{2k}$; subtract $d_{k+\ceil{\ell/2}}$ in the definition of $d_{2k+1}$ and add $qd_{k+\floor{\ell/2}}$ in the definition of $d_{2k}$.
Having split $b_n$ into $c_n$ and $d_n$, we can address case $\ang{iv}$: $LAL = LLFLLFLL$ with $LLFLL$ the shortest $Z_2$-bifix of $LAL$.
These words are counted by $d_n$, not by $c_n$, and there are $d_{k+\ell}$ such words of length $2k$.
Therefore, we subtract $d_{k+\ell}$ in the definition of $d_{2k}$ and add $qd_{k+\ell}$ in the definition of $d_{2k+1}$.
This brings us to the following tentative definitions of $c_n$ and $d_n$.
\begin{eqnarray*}
\text{For even }\ell:\\
c_1 = \cdots = c_{2\ell} & = & 0,\\
c_{2\ell+1} & = & q,\\
c_{2k} & \approx & qc_{2k-1} - (c_k + c_{k + \ell/2}),\\
c_{2k+1} & \approx & q(c_{2k} + c_{k + \ell/2});\\
d_1 = \cdots = d_{4\ell} & = & 0,\\
d_{4\ell+1} & = & q,\\
d_{2k} & \approx & qd_{2k-1} - (d_k + d_{k+\ell} + d_{k + \ell/2}),\\
d_{2k+1} & \approx & q(d_{2k} + d_{k+\ell} + d_{k + \ell/2}).\\
\text{For odd }\ell:\\
c_1 = \cdots = c_{2\ell} & = & 0,\\
c_{2\ell+1} & = & q,\\
c_{2k} & \approx & q(c_{2k-1} + c_{k+\floor{\ell/2}}) - c_k,\\
c_{2k+1} & \approx & qc_{2k} - c_{k+\lceil\ell/2\rceil};\\
d_1 = \cdots = d_{4\ell} & = & 0,\\
d_{4\ell+1} & \approx & q,\\
d_{2k} & \approx & q(d_{2k-1}+ d_{k + \lfloor\ell/2\rfloor}) - (d_k + d_{k+\ell}),\\
d_{2k+1} & \approx & q(d_{2k} + d_{k+\ell}) - d_{k + \lceil\ell/2\rceil}.
\end{eqnarray*}
\textbf{Stage III}\\
Next, let us deal with case $\ang{v}$: $LLLL$.
We merely need to subtract 1 in the definition of $c_{4\ell}$.
Since all of the words counted by $d_n$ are descendants of $LLLL$, this is what prevents overlap of the words counted by $c_n$ and $d_n$.
There was a small omission in the previous stage.
When dealing with cases $\ang{i}$ and $\ang{ii}$, we pointed out that $LL$ is a bifix of $LBLLBL$ if and only if $LL$ is a bifix of $LBL$, this was a true and important observation.
The one problem is that $LLL$ has $LL$ as a bifix but is not of the form $LLALL$.
Therefore, $LLLLLL$ was ``removed'' in the definition of $c_{6\ell}$ when it should have been ``removed'' from $d_{6\ell}$.
We must account for this by adding 1 in the definition of $c_{6\ell}$ and subtracting 1 in the definition of $d_{6\ell}$.
Similarly, in dealing with case $\ang{iii}$, we ``removed'' $LLLLL$ in the definition of $c_{5\ell}$ and ``replaced'' its children in the definition of $c_{5\ell+1}$. These should have happened to $d_n$.
Therefore, we add 1 and subtract $q$ in the definitions of $c_{5\ell}$ and $c_{5\ell+1}$, respectively, then subtract 1 and add $q$ in the definitions of $d_{5\ell}$ and $d_{5\ell+1}$, respectively.
Since $LLL$ does not cause any trouble with case $\ang{iv}$, we are done building the recursive definition for even $\ell$ as found in the theorem statement.
\textbf{Stage IV}\\
The recursion for odd $\ell$ has the additional caveat that $\ell \neq 1$. When $\ell=1$, there exist conflicts in the recursive definitions: $4\ell + 1 = 5\ell$ and $5\ell+1 = 6\ell$. After consolidating the``adjustments'' for these cases, we get the definition for $\ell=1$ as appears in the theorem statement.
\end{proof}
With our recursively defined sequences $a_n$ and $b_n$, the latter in terms of $c_n$ and $d_n$, we are now able to formulate Theorem~\ref{thm:IZ2} for $Z_3$.
\begin{thm} \label{thm:IZ3}
For integers $q \geq 2$,
\begin{eqnarray*}
\II(Z_3,q) & = & \sum_{\ell = 1}^{\infty} a_\ell \left(\sum_{i = 0}^{\infty}(G(i) + H(i))\right).
\end{eqnarray*}
where
\begin{eqnarray*}
G(i) = G_\ell^{(q)}(i) &=& \frac{ (-1)^i r\!\left(q^{-2^{i+1}}\right) \prod_{j = 0}^{i-1} s\!\left(q^{-2^{j+1}}\right)}{ \prod_{k = 0}^i \left(1 - q^{1-2^{k+1}}\right)};\\
r(x) = r_\ell^{(q)}(x) & = & qx^{2\ell+1} - x^{4\ell} + x^{5\ell} - qx^{5\ell+1} +x^{6\ell};\\
s(x) = s_\ell^{(q)}(x) & = & 1 - qx^{1-\ell} + x^{-\ell};\\
H(i) = H_\ell^{(q)}(i) &=& \frac{ (-1)^i u\!\left(q^{-2^{i+1}}\right) \prod_{j = 0}^{i-1} v\!\left(q^{-2^{j+1}}\right) }{ \prod_{k = 0}^i \left(1 - q^{1-2^{k+1}}\right)};\\
u(x) = u_\ell^{(q)}(x) & = & qx^{4\ell+1} - x^{5\ell} + qx^{5\ell+1} - x^{6\ell};\\
v(x) = v_\ell^{(q)}(x) & = & 1 - qx^{1-\ell} + x^{-\ell} - qx^{1-2\ell} + x^{-2\ell}.
\end{eqnarray*}
\end{thm}
\begin{proof}
Recalling Equation~\eqref{eqn:IZ3},
\begin{eqnarray*}
\II(Z_3,q) & = & \sum_{\ell = 1}^{\infty} \left( a_\ell \sum_{m = 1}^\infty b_m^\ell q^{-2m} \right) \\
& = & \sum_{\ell = 1}^{\infty} \left( a_\ell \sum_{m = 1}^\infty \left(c_m^\ell + d_m^\ell\right) q^{-2m} \right).
\end{eqnarray*}
Similar to our proof for $\II(Z_2,q)$, let us define generating functions for the sequences $c_n = c_n^\ell$ and $d_n = d_n^\ell$:
\[ g(x) = g_\ell^{(q)}(x) = \sum_{i = 1}^\infty c_nx^n \text{ and } h(x) = h_\ell^{(q)}(x) = \sum_{i = 1}^\infty d_nx^n. \]
Despite having to write the recursive relations three different ways, depending on $\ell$, the underlying recursion is fundamentally the same and results in the following functional equations:
\begin{eqnarray}
g(x) & = & q\left(xg(x) + x^{1-\ell}g(x^2) + x^{2\ell+1} - x^{5\ell+1}\right) \label{eqn:g}\\
\nonumber & & - \left(g(x^2) + x^{-\ell}g(x^2) + x^{4\ell} - x^{5\ell} - x^{6\ell}\right);\\
h(x) & = & q\left(xh(x) + x^{1-2\ell}h(x^2) + x^{1-\ell}h(x^2) + x^{4\ell+1} + x^{5\ell+1}\right) \label{eqn:h} \\
\nonumber & & - \left(h(x^2) + x^{-2\ell}h(x^2) + x^{-\ell}h(x^2) + x^{5\ell} + x^{6\ell}\right).
\end{eqnarray}
Solving \eqref{eqn:g} for $g(x)$, we get
\begin{eqnarray}
g(x) = \frac{r(x) - s(x)g(x^2)}{1-qx}, \label{eqn:g2}
\end{eqnarray}
with $r(x)$ and $s(x)$ as defined in the theorem statement.
Expanding \eqref{eqn:g2} gives
\begin{eqnarray}
\nonumber g(x) & = & \frac{r(x) - s(x)g(x^2)}{1-qx} \\
\nonumber & = & \frac{r(x)}{1-qx}\left(1 - \frac{s(x)}{r(x)}g(x^2)\right)\\
\nonumber & = & \frac{r(x)}{1-qx}\left(1 - \frac{s(x)}{r(x)}\frac{r(x^2) - s(x^2)g(x^4)}{1-qx^2}\right)\\
\nonumber & = & \frac{r(x)}{1-qx}\left(1 - \frac{s(x)}{r(x)}\frac{r(x^2)}{1-qx^2}\left(1 - \frac{s(x^2)}{r(x^2)}g(x^4) \right)\right)\\
\nonumber & \vdots & \\
& = & \sum_{i = 0}^\infty \frac{ (-1)^i r\!\left(x^{2^i}\right) \prod_{j = 0}^{i-1} s\!\left(x^{2^j}\right) }{ \prod_{k = 0}^i \left(1 - qx^{2^k}\right)}.
\end{eqnarray}
Likewise, solving \eqref{eqn:h} for $h(x)$, we get
\begin{eqnarray}
h(x) &=& \frac{u(x) - v(x)h(x^2)}{1-qx}\\
& = & \sum_{i = 0}^\infty \frac{(-1)^i u\!\left(x^{2^i}\right) \prod_{j = 0}^{i-1} v\!\left(x^{2^j}\right) }{ \prod_{k = 0}^i \left(1 - qx^{2^k}\right)},
\end{eqnarray}
with $u(x)$ and $v(x)$ as defined in the theorem statement.
\end{proof}
\begin{cor} \label{P3bounds}
For integers $N \geq 0$ and $M \geq 0$,
\begin{eqnarray*}
\sum_{\ell = 1}^{N} a_\ell \left(\sum_{i = 0}^{2M+1} (G(i) + H(i)) \right) & \leq & \II(Z_3,q);\\
\II(Z_3,q) & \leq & q^{-N} + \sum_{\ell = 1}^{N} a_\ell \left(\sum_{i = 0}^{2M}(G(i) + H(i))\right),
\end{eqnarray*}
with $G(i) = G_\ell^{(q)}(i)$ and $H(i) = H_\ell^{(q)}(i)$ as defined in Theorem~\ref{thm:IZ3}.
\end{cor}
\begin{proof}
For fixed integers $q\geq 2$ and $\ell \geq 1$, $\sum_{i=0}^\infty (G(i) + H(i))$ is an alternating series.
We need to show that the sequence $|G(i)+H(i)|$ is decreasing. Since $(-1)^iG(i)>0$ and $(-1)^iH(i) > 0$ for each $i$, $|G(i) +H(i)| = |G(i)| + |H(i)|$.
Thus it suffices to show that $\left\{|G(i)|\right\}_{i=1}^{\infty}$ and $\left\{|H(i)|\right\}_{i = 1}^\infty$ are both decreasing sequences, the routine proof of which can be found in the appendices (Lemma~\ref{lemGH}).
Now for any integer $M\geq 0$:
\[\sum_{i=0}^{2M+1} G_\ell(i) + H_\ell(i) < \sum_{m = 0}^{\infty} b_m^\ell q^{-2m} < \sum_{i=0}^{2M} G_\ell(i) + H_\ell(i).\]
Moreover, since the $a_\ell$ are nonnegative, the lower bound for the theorem is evident.
For a bifix-free word $L$ of length $\ell$, $\sum_{m = 0}^{\infty} b_m^\ell q^{-2m}$ is the limit, as $M \rightarrow \infty$, of the probability that a word of length $M$ is a $Z_3$-instance of the form $LALBLAL$.
A necessary condition for such a word is that it starts and ends with $L$, which (for $M\geq 2\ell$) has probability $q^{-2\ell}$.
Also $a_\ell$ counts the number of bifix-free words of length $\ell$, so $a_\ell \leq q^\ell$.
Hence for any integer $N \geq 0$:
\begin{eqnarray*}
\II(Z_3,q) & < & \sum_{\ell=1}^{N}a_\ell \sum_{m = 0}^{\infty} b_m^\ell q^{-2m} + \sum_{\ell = N+1}^{\infty}q^\ell \left(q^{-2\ell}\right)\\
& = &\sum_{\ell=1}^{N}a_\ell \sum_{m = 0}^{\infty} b_m^\ell q^{-2m} + \sum_{\ell = N+1}^{\infty} q^{-\ell}\\
& \leq &\sum_{\ell=1}^{N}a_\ell \sum_{m = 0}^{\infty} b_m^\ell q^{-2m} + q^{-N}.
\end{eqnarray*}
\end{proof}
\begin{table}[ht]
\centering
\caption{Approximate values of $\II(Z_3,q)$ for $2 \leq q \leq 6$.} \label{table:IZ3}
\begin{tabular}{c | c | c | c | c | c}
$q$ & 2 & 3 & 4 & 5 & 6 \\ \hline
$\II(Z_3,q)$ & 0.11944370 & 0.01835140 & 0.00519251 & 0.00199739 & 0.00092532
\end{tabular}
\end{table}
The values in Table~\ref{table:IZ3} were generated by the Sage code found in Appendix~\ref{P3code}, which was derived directly from Corollary~\ref{P3bounds} and can be used to compute $\II(Z_3,q)$ to arbitrary precision for any $q\geq 2$.
\section{Bounding \texorpdfstring{$\II(Z_n,q)$ for Arbitrary $n$}{the asymptotic instance probability of Zn}} \label{IZn}
This programme is not practical for $n$ in general.
The number of cases for a generalization of Lemma 3.1 is likely to grow with $n$.
Even if that stabilizes somehow, the expression for calculating $\II(Z_n,q)$ requires $n$ nested infinite series.
Nevertheless, ignoring some of the more subtle details, we proceed with this method to obtain computable upper bounds for $\II(Z_n,q)$.
Fix a $Z_{n-2}$-instance $L$ of length $\ell \geq 1$, let $\hat{b}_m^\ell$ be the number of words of length $m$ of the form $LAL$ for $A \neq \varepsilon$ but not of the form $LBLBL$, $LBLLBL$, or $LBLCLBL$.
This corresponds to Stage~I from the proof of Theorem~\ref{P3}.
As we do not account for the structure of $L$, $\hat{b}$ is an overcount for the number of $Z_{n-1}$-instances of the form $LAL$ that do not have a $Z_{n-1}$-bifix of the form $LAL$.
Then $\hat{b}_m = \hat{b}_m^\ell$ is recursively defined as follows:
\begin{eqnarray*}
\text{For even } \ell: \\
\hat{b}_0 = \cdots = \hat{b}_{2\ell} & = & 0,\\
\hat{b}_{2k} & = & q\hat{b}_{2k-1} - (\hat{b}_k + \hat{b}_{k + \ell/2}) \text{ for } k>\ell, \\
\hat{b}_{2k+1} & = & q(\hat{b}_{2k} + \hat{b}_{k+\ell/2}) \text{ for } k>\ell. \\
\text{For odd } \ell: \\
\hat{b}_0 = \cdots = \hat{b}_{2\ell} & = & 0, \\
\hat{b}_{2k} & = & q(\hat{b}_{2k-1} + \hat{b}_{k + \floor{\ell/2}}) - \hat{b}_k \text{ for } k>\ell, \\
\hat{b}_{2k+1} & = & q\hat{b}_{2k} - \hat{b}_{k+\ceil{\ell/2}} \text{ for } k>\ell.
\end{eqnarray*}
The associated generating function $\hat{f}_\ell(x) := \hat{f}_\ell^{(q)}(x) = \sum_{m=1}^{\infty} \hat{b}_m^\ell x^m$ satisfies
\[\hat{f}_\ell(x) = q(x^{2\ell+1} + x\hat{f}(x) + x^{1-\ell}\hat{f}(x^2)) - (\hat{f}(x^2) + x^{-\ell}\hat{f}(x^2)).\]
Therefore, setting $t_\ell(x) = t_\ell^{(q)}(x) = 1 - qx^{1-\ell} + x^{-\ell}$,
\begin{eqnarray*}
\hat{f}_\ell(x) & = & \frac{qx^{2\ell+1} - t_\ell(x)\hat{f}(x^2)}{1-qx} \\
& = & q\cdot \sum_{i = 0}^\infty \frac{ (-1)^i x^{(2^i)(2\ell+1)} \prod_{j = 0}^{i-1} t_\ell\!\left(x^{2^j}\right) }{ \prod_{k = 0}^i \left(1 - qx^{2^k}\right)}.
\end{eqnarray*}
Now $\hat{f}_\ell(q^{-2})$ gives an upper bound for the limit (as word-length approaches infinity) of the probability that a word is a $Z_n$-instance of the form $LALBLAL$ with $|L| = \ell$.
Taking this one step further, for some $Z_i$-instance $K$ of length $\ell_i$, the asymptotic probability that a word is a $Z_n$-instance constructed with $2^{n-i+1}$ copies of $K$ is at most
\begin{equation*} \label{eq:sum}
\sum_{\ell_{i+1}=1}^\infty \cdots \sum_{\ell_{n-2}=1}^\infty\sum_{m=1}^\infty \hat{b}_{\ell_{i+1}}^{\ell_i} \cdots \hat{b}_{\ell_{n-2}}^{\ell_{n-3}}\hat{b}_{m}^{\ell_{n-2}} q^{-2m}.
\end{equation*}
Consequently,
\begin{eqnarray*}
\II(Z_n,q) & \leq &\sum_{\ell_1=1}^\infty \cdots \sum_{\ell_{n-2}=1}^\infty\sum_{m=1}^\infty a_{\ell_1}\hat{b}_{\ell_2}^{\ell_1} \cdots \hat{b}_{\ell_{n-2}}^{\ell_{n-3}}\hat{b}_{m}^{\ell_{n-2}} q^{-2m} \\
& = &\sum_{\ell_1=1}^\infty \cdots \sum_{\ell_{n-2}=1}^\infty a_{\ell_1}\hat{b}_{\ell_2}^{\ell_1} \cdots \hat{b}_{\ell_{n-2}}^{\ell_{n-3}}\hat{f}_{\ell-2}(q^{-2}) . \\
\end{eqnarray*}
We need to get control of the tails to turn this into a computable sum.
A trivial upper bound for the asymptotic probability that a word is a $Z_n$-instance constructed with $2^{n-i}$ copies of $K$, and thus starts and ends with $K$, is $q^{-2\ell_i}$.
Since there are at most $q^{\ell_i}$ $Z_i$-instances of length $\ell_i$, the asymptotic probability that a word is a $Z_n$-instance with a $Z_i$-component of length $\ell_i$ is at most $q^{-\ell_i}$.
Therefore, the asymptotic probability that a word is a $Z_n$-instance with a $Z_i$-component of length greater than $N_i$ is at most
\begin{equation*} \label{eq:tail}
\sum_{\ell_i = N_i+1}^\infty q^{-\ell_i} = \frac{q^{-N_1}}{q-1} .
\end{equation*}
Now in the upper bound of $\II(Z_n,q)$, we can replace the partial tail
\[
\sum_{\ell_1=1}^{N_1} \cdots \sum_{\ell_{i-1}=1}^{N_n} \sum_{\ell_{i}=N_i+1}^\infty \sum_{\ell_{i+1}=1}^\infty \cdots \sum_{\ell_{n-2}=1}^\infty a_{\ell_1}\hat{b}_{\ell_2}^{\ell_1} \cdots \hat{b}_{\ell_{n-2}}^{\ell_{n-3}}\hat{f}_{\ell-2}(q^{-2})
\]
with
\begin{eqnarray*}
& & \sum_{\ell_1=1}^{N_1} \cdots \sum_{\ell_{i-1}=1}^{N_n} a_{\ell_1}\hat{b}_{\ell_2}^{\ell_1} \cdots \hat{b}_{\ell_{i-1}}^{\ell_{i-2}} \frac{q^{-N_1}}{q-1} \\
& \leq & \left( \prod_{j = 1}^{i-1} N_j \right) \max_{\substack{\ell_j \leq N_j \\ 1 \leq j < i}}\left(a_{\ell_1}\hat{b}_{\ell_2}^{\ell_1} \cdots \hat{b}_{\ell_{i-1}}^{\ell_{i-2}}\right) \frac{q^{-N_1}}{q-1}\\
& \leq & \left( \prod_{j = 1}^{i-1} N_j \right) q^{N_{i-1}} \frac{q^{-N_1}}{q-1}.
\end{eqnarray*}
Therefore,
\begin{eqnarray*}
\II(Z_n,q) & \leq &\sum_{\ell_1=1}^{N_1} \cdots \sum_{\ell_{n-2}=1}^{N_n} a_{\ell_1}\hat{b}_{\ell_2}^{\ell_1} \cdots \hat{b}_{\ell_{n-2}}^{\ell_{n-3}}\hat{f}_{\ell-2}(q^{-2}) + \; \sum_{i = 1}^{n-2} \left( \left( \prod_{j = 1}^{i-1} N_j \right) q^{N_{i-1}} \frac{q^{-N_i}}{q-1} \right) .
\end{eqnarray*}
\appendix
\section{Proofs and Computations for Sections~\ref{IZ2} and~\ref{IZ3}}
\subsection{Proofs of Monotonicity}
\begin{lem} \label{lemF}
For fixed $q \geq 2$, $\left\{|F(i)|\right\}_{i=0}^{\infty}$ is a decreasing sequence, where \[F(i) = F^q(i) = \frac{(-1)^jq^{1-2^i}}{\prod_{k=0}^{i} (1 - q^{1-2^k})}.\]
\end{lem}
\begin{proof}
For $i>0$:
\begin{eqnarray*}
\frac{|F(i)|}{|F(i-1)|} & = & \frac{q^{1 - 2^i}}{q^{1 - 2^{(i-1)}}\left(1 - q^{1 - 2^i}\right)}\\
& = & \frac{q^{-2^{(i-1)}}}{1 - q^{1 - 2^i}}\cdot \frac{1 + q^{1 - 2^i}}{1 + q^{1 - 2^i}}\\
& = & \frac{q^{-2^{(i-1)}}\left(1 + q^{1 - 2^i}\right)}{1 + q^{2 - 2^{i+1}}}\\
& < & \frac{(2)^{-2^{((1)-1)}}\left(1 + (2)^{1 - 2^{(1)}}\right)}{1 +(0)}\\
& = & 2^{-1}\left(1 + 2^{1 - 2}\right)\\
& < & 1.
\end{eqnarray*}
\end{proof}
\begin{lem} \label{lemGH}
For fixed $\ell \geq 1$ and $q \geq 2$, $\left\{|G(i)|\right\}_{i=1}^{\infty}$ and $\left\{|H(i)|\right\}_{i = 1}^\infty$ are both decreasing sequences, where
\begin{eqnarray*}
G(i) = G_\ell^q(i) &=& \frac{ (-1)^i r\!\left(q^{-2^{i+1}}\right) \prod_{j = 0}^{i-1} s\!\left(q^{-2^{j+1}}\right)}{ \prod_{k = 0}^i \left(1 - q^{1-2^{k+1}}\right)};\\
r(x) = r_\ell^q(x) & = & qx^{2\ell+1} - x^{4\ell} + x^{5\ell} - qx^{5\ell+1} +x^{6\ell};\\
s(x) = s_\ell^q(x) & = & 1 - qx^{1-\ell} + x^{-\ell};\\
H(i) = H_\ell^q(i) &=& \frac{ (-1)^i u\!\left(q^{-2^{i+1}}\right) \prod_{j = 0}^{i-1} v\!\left(q^{-2^{j+1}}\right) }{ \prod_{k = 0}^i \left(1 - q^{1-2^{k+1}}\right)};\\
u(x) = u_\ell^q(x) & = & qx^{4\ell+1} - x^{5\ell} + qx^{5\ell+1} - x^{6\ell};\\
v(x) = v_\ell^q(x) & = & 1 - qx^{1-\ell} + x^{-\ell} - qx^{1-2\ell} + x^{-2\ell}.
\end{eqnarray*}
\end{lem}
\begin{proof}
For $i>0$:
\begin{eqnarray*}
\frac{|G(i)|}{|G(i-1)|} & = & \frac{r\!\left(q^{-2^{i+1}}\right)}{r\!\left(q^{-2^i}\right)}\cdot \frac{s\!\left(q^{-2^i}\right)}{1 - q^{1-2^{i+1}}}\\
& = & \frac{ q^{1-2^i(4\ell+2)} - q^{-2^i(8\ell)} + q^{-2^i(10\ell)} - q^{1-2^i(10\ell+2)} + q^{-2^i(12\ell)} }{ q^{1-2^i(2\ell+1)} - q^{-2^i(4\ell)} + q^{-2^i(5\ell)} - q^{1-2^i(5\ell+1)} + q^{-2^i(6\ell)} }\\
& &\cdot \frac{ 1 - q^{1+2^i(\ell-1)} + q^{2^i\ell} }{ 1 - q^{1-2^i(2)} }\\
& < & \frac{ q^{1-2^i(4\ell+2)} }{ q^{1-2^i(2\ell+1)} - q^{-2^i(4\ell)} }\cdot \frac{q^{2^i\ell} }{ 1 - q^{1-2^i(2)} }\\
& = & \frac{ q^{1-2^i(3\ell+2)} }{ q^{1-2^i(2\ell+1)} - q^{-2^i(4\ell)} - q^{2-2^i(2\ell+3)} + q^{1-2^i(4\ell+2)}}\cdot \frac{ q^{-1+2^i(2\ell+1)} }{ q^{-1+2^i(2\ell+1)} }\\
& = & \frac{ q^{-2^i(\ell+1)} }{ 1 - q^{-1-2^i(2\ell-1)} - q^{1-2^i(2)} + q^{2^i(2\ell+1)} }\\
& < & \frac{ (2)^{-2^1((1)+1)} }{ 1 - (2)^{-1-2^1(2(1)-1)} - (2)^{1-2^1(2)} + 0 }\\
& = & \frac{ 2^{-4} }{ 1 - 2^{-3} - 2^{-3} } < 1;\\
\\
\frac{|H(i)|}{|H(i-1)|} & = & \frac{u\!\left(q^{-2^{i+1}}\right)}{u\!\left(q^{-2^i}\right)}\cdot \frac{v\!\left(q^{-2^i}\right)}{1 - q^{1-2^{i+1}}}\\
& = & \frac{ q^{1-2^i(8\ell+2)} - q^{-2^i(10\ell)} + q^{1-2^i(10\ell+2)} - q^{-2^i(12\ell)} }{ q^{1-2^i(4\ell+1)} - q^{-2^i(5\ell)} + q^{1-2^i(5\ell+1)} - q^{-2^i(6\ell)} }\\
& &\cdot \frac{ 1 - q^{1+2^i(\ell-1)} + q^{2^i\ell} - q^{1+2^i(2\ell - 1)} + q^{2^i(2\ell)} }{ 1 - q^{1-2^i(2)} }\\
& < & \frac{ q^{1-2^i(8\ell+2)} }{ q^{1-2^i(4\ell+1)} - q^{-2^i(5\ell)} }\cdot \frac{q^{2^i(2\ell)} }{ 1 - q^{1-2^i(2)} }\\
& = & \frac{ q^{1-2^i(6\ell+2)} }{ q^{1-2^i(4\ell+1)} - q^{-2^i(5\ell)} - q^{2-2^i(4\ell+3)} + q^{1-2^i(5\ell+2)}}\cdot \frac{ q^{-1+2^i(4\ell+1)} }{ q^{-1+2^i(4\ell+1)} }\\
& = & \frac{ q^{-2^i(2\ell+1)} }{ 1 - q^{-1-2^i(\ell-1)} - q^{1-2^i(2)} + q^{2^i(\ell+1)} }\\
& < & \frac{ (2)^{-2^1(2(1)+1)} }{ 1 - (2)^{-1-2^1((1)-1)} - (2)^{1-2^1(2)} + 0 }\\
& = & \frac{ 2^{-6} }{ 1 - 2^{-1} - 2^{-3} } < 1.
\end{eqnarray*}
\end{proof}
\subsection{Sage Code for Table~\ref{table:IZ3} of \texorpdfstring{$\II(Z_3,q)$-Values}{Values of the Asymptotic Density of Z3}} \label{P3code}
The following code to generate Table~\ref{table:IZ3} was run with Sage 6.1.1 \parencite{S-14}.
\begin{lstlisting}[frame=single]
# Calculate G(i), term i of expanded g(q^(-2)).
def r(L, q, x):
X = x^L
return q*x*X^2 - X^4 + X^5 - q*x*X^5 + X^6
def s(L, q, x):
return 1 - q*x^(1 - L) + x^(-L)
def G(L, q, i):
num = prod([s(L, q, q^(-2^(j + 1))) for j in range(i)])
den = prod([1 - q^(1 - 2^(k + 1)) for k in range(i + 1)])
return (-1)^i * r(L, q, q^(-2^(i + 1))) * num / den
# Calculate H(i), term i of expanded h(q^(-2)).
def u(L, q, x):
return q*x^(4*L + 1) - x^(5*L) + q*x^(5*L + 1) - x^(6*L)
def v(L, q, x):
return 1 - q*x^(1 - L) + x^(-L) - q*x^(1 - 2*L) + x^(-2*L)
def H(L, q, i):
num = prod([v(L, q, q^(-2^(j + 1))) for j in range(i)])
den = prod([1 - q^(1 - 2^(k + 1)) for k in range(i+1)])
return (-1)^i * u(L, q, q^(-2^(i + 1))) * num / den
# Generate the first N terms of {a_n}.
def a(q,N):
A = [0, q]
for n in range(2, N + 1):
A.append(q*A[-1] - ((n + 1)
return A
# Calculate the partial sum of I(Z_3, q).
def I(q, N, M):
A, partial = a(q, N), 0
for L in range(1, N+1):
terms = [G(L, q, n) + H(L, q, n) for n in range(M + 1)]
partial += A[L]*sum(terms)
return partial
# Output bounds on I(Z_3, q) for small values of q.
N = 31 # Level of precision.
for q in range(2, 7):
print 'q =
L, U = round(I(q, N, 4), N), round(I(q, N, 5) + 2^(-N), N)
print 'Lower bound with N =
print 'Upper bound with N =
\end{lstlisting}
\section{Word Trees Illustrating Theorem \ref{P3}} \label{TREES}
From Section \ref{IZ3}: ``For fixed bifix-free word $L$ length $\ell$, define $b_m^\ell$ to count the number of $Z_2$ words with bifix $L$ that are $Z_2$-bifix-free $q$-ary words of length $m$.''
In each of the following images, a word is struck through if it is not counted by $b_m$ but its descendants are. It is hashed through if its descendants are also eliminated.
\begin{figure}\label{figure:T21}
\end{figure}
\endinput
\begin{tikzpicture}[grow'=right,level distance=51pt, sibling distance=-5pt]
\Tree [.010
[.0100
[.01000
[.010000
[.0100000
[.01000000
[.010000000 ]
[.010010000 ]
]
[.01001100
[.\sout{010001000} ]
[.010011000 ]
]
]
[.0101000
[.01010000
[.010100000 ]
[.010110000 ]
]
[.01011000
[.010101000 ]
[.010111000 ]
]
]
]
[.010100
[.\sout{0100100}
[.\xout{01000100} ]
[.01001100
[.010001100 ]
[.010011100 ]
]
]
[.0101100
[.01010100
[.010100100 ]
[.010110100 ]
]
[.01011100
[.010101100 ]
[.010111100 ]
]
]
]
]
[.01100
[.011000
[.0110000
[.01100000
[.011000000 ]
[.011010000 ]
]
[.01101000
[.011001000 ]
[.011011000 ]
]
]
[.0111000
[.01110000
[.011100000 ]
[.011110000 ]
]
[.01111000
[.011101000 ]
[.011111000 ]
]
]
]
[.011100
[.0110100
[.01100100
[.011000100 ]
[.011010100 ]
]
[.01101100
[.\sout{011001100} ]
[.011011100 ]
]
]
[.0111100
[.01110100
[.011100100 ]
[.011110100 ]
]
[.01111100
[.011101100 ]
[.011111100 ]
]
]
]
]
]
[.0110
[.\sout{01010}
[.\xout{010010} ]
[.010110
[.0100110
[.01000110
[.010000110 ]
[.010010110 ]
]
[.01001110
[.010001110 ]
[.010011110 ]
]
]
[.0101110
[.01010110
[.010100110 ]
[.010110110 ]
]
[.01011110
[.010101110 ]
[.010111110 ]
]
]
]
]
[.01110
[.011010
[.0110010
[.01100010
[.011000010 ]
[.011010010 ]
]
[.01101010
[.011001010 ]
[.011011010 ]
]
]
[.0111010
[.01110010
[.011100010 ]
[.011110010 ]
]
[.01111010
[.011101010 ]
[.011111010 ]
]
]
]
[.011110
[.\sout{0110110}
[.\xout{01100110} ]
[.01101110
[.011001110 ]
[.011011110 ]
]
]
[.0111110
[.01110110
[.011100110 ]
[.011110110 ]
]
[.01111110
[.\sout{011101110} ]
[.011111110 ]
]
]
]
]
]
]
\end{tikzpicture}
\begin{figure}\label{figure:T22}
\end{figure}
\begin{figure}\label{figure:T23}
\end{figure}
\begin{figure}\label{figure:T31}
\end{figure}
\printbibliography
\end{document}
|
\begin{document}
\title{Comparing the states of many quantum systems}
\author{IGOR JEX$^1$, ERIKA ANDERSSON$^2$, and ANTHONY CHEFLES$^3$\\
$^1$Department of Physics, FNSPE, Czech Technical University Prague,\\
B\v rehov\'a 7, 115 19 Praha, Czech Republic,\\
e-mail [email protected]\\
$^2$Department of Physics, University of Strathclyde, Glasgow G4 0NG, UK,\\
tel: +44 141 5483376, fax: +44 141 5522891,\\
e-mail [email protected] \\
$^3$Department of Physical Sciences, University of Hertfordshire,\\
Hatfield AL10 9AB, Hertfordshire, UK, \\
e-mail [email protected]}
\date{\today}
\begin{abstract}
We investigate how to determine whether the states of a set of quantum systems are identical or not.
This paper treats both error-free comparison, and comparison where errors in the result are allowed. Error-free comparison means that we aim to obtain definite answers, which are known to be correct, as often as possible. In general, we will have to accept also inconclusive results, giving no information. To obtain a definite answer that the states of the systems are not identical is always possible, whereas, in the situation considered here, a definite answer that they are identical will not be possible.
The optimal universal error-free comparison strategy is a projection onto the totally symmetric and the different non-symmetric subspaces, invariant under permutations and unitary transformations.
We also show how to construct optimal comparison strategies when allowing for some errors in the result, minimising either the error probability, or the average cost of making an error.
We point out that it is possible to realise universal error-free comparison strategies using only linear elements and particle detectors, albeit with less than ideal efficiency. Also minimum-error and minimum-cost strategies may sometimes be realised in this way. This is of great significance for practical applications of quantum comparison.
\end{abstract}
\pacs{03.67.-a, 03.65.Ta, 42.50.Dv}
\maketitle
\section{Introduction}
When comparing classical systems, a straightforward way to proceed is to measure observables of each system individually, and then compare the measurement results.
To compare the states of quantum systems is less straightforward, since results of quantum measurements are statistical in their nature. In addition, the measurement usually introduces a nonnegligible disturbance of the measured state. In quantum mechanics, simultaneous measurements of non-commuting observables are restricted. If only a single copy of each quantum system is available, we cannot measure all observables of the systems precisely, and thus cannot compare the states of quantum systems in the same way as classical systems. Based on measurements of the individual quantum systems, it is in general only possible to make statistical predictions of their similarities and differences. If an ensemble of identically prepared systems is available for each of the compared quantum states, the result will be more reliable.
Quantum mechanics, however, allows us to perform collective measurements, which sometimes have no classical analogue. An example is the projection onto entangled Bell states, used in classically impossible tasks such as dense coding \cite{ben}. Using collective measurements, it is possible to reliably compare many quantum systems, even when only a single copy of each system is available.
In this paper we will investigate how to determine whether the states of a set of quantum states are identical or different. We first consider the situation where the obtained results are {\it unambiguous}, meaning that, when an answer is obtained, for example, that the states of the systems are different, it is always true. This forces us to accept that the comparison may sometimes give an inconclusive answer, which means that the attempt to determine whether the states are identical or not has failed. The error-free character of the answers is useful whenever one wants to be absolutely certain that it is correct.
Dropping the requirement that the result of the comparison has to be error-free, we also show how to construct optimal comparison strategies giving the least possible error (or least possible cost of error) in the result. With such a strategy, no matter how many times the comparison is repeated on new copies of the quantum systems, one can only estimate that the probability that the answer is wrong is below a certain limit. To reach the limit, many repetitions may be required, whereas an error-free strategy sometimes gives a definite answer in only one try. On the other hand, the error-free strategy will often have a large probability to fail, and from this point of view we may prefer a minimum-error or minimum-cost comparison strategy.
Quantum comparison of two pure quantum systems has recently been considered in \cite{steve}. It is also possible to compare unitary transforms to each other \cite{ucomp}, a task which is related to state comparison. Quantum comparison may be used in many quantum information applications, such as error correction,
or for checking that copies of `keys' are identical and go into their `locks'. Small effects may be monitored by comparison with a highly reliable reference system. Two recent applications where quantum comparison is needed are quantum fingerprinting \cite{buhr} and quantum digital signatures \cite{chuang}. Because of this, it is important to understand the full possibilities offered, and the absolute limitations of quantum comparison, and to investigate how quantum comparison could be experimentally realised.
The universal error-free comparison strategy is investigated in section \ref{secuni}.
It is optimal when the distribution of possible quantum states is flat, and allows us to unambiguously (without error) detect a difference in the compared quantum systems. It is not possible to unambiguously determine that all the states are identical, unless we have more information about the states we want to compare \cite{tonyprivate}.
It is possible, however, to obtain more information about differences among the $N$ quantum systems, for example, that no $M$ particles, where $M\leq N$, have identical states. This `full universal error-free comparison strategy' will be introduced in section \ref{subspaces}. In section \ref{prob}, we show how to obtain another type of optimal comparison strategies, minimising either the error in the result, or the average cost of making an error. The optimal strategies can be constructed for comparing both pure and mixed quantum states, and noise and entanglement with an environment can be taken into account.
We also investigate, in section \ref{realisations}, how comparison strategies may be realised.
We find that universal error-free comparison can be effected using only beam splitters and particle detectors, making it possible to realise using linear optics. This is of great importance for experimental realisations. Also minimum-error and minimum-cost comparison can in principle be realised as a projective measurement in some basis, but the realisation depends very much on the individual situation.
We end with a discussion and conclusions.
\section{Universal error-free comparison strategy}
\label{secuni}
Let us suppose that we have a collection of quantum systems.
Is it possible to compare the states of these quantum systems, that is, to determine whether the states are all identical or not, without necessarily obtaining any information about the individual states?
In this section we will investigate error-free quantum comparison, meaning that whenever one obtains the result `same' or `different', it is known also to be correct. This will force us to accept that we sometimes get an inconclusive result, meaning no result at all. For reasons we will return to later, all the states are, in this section, assumed to be pure. We want to find the optimal comparison procedure when the quantum states are completely unknown, in other words, when the distribution of possible states for all the particles is flat. We refer to this as `universal comparison'. It is important to note that the resulting optimal universal comparison strategy will give correct answers also when there {\it is} prior information about the states, even if it in that case may not be optimal, in the sense that it may not give unambiguous answers as often as possible.
The optimal way to compare, without error, the states of two unknown,
pure quantum systems, is to check whether their combined state is symmetric or antisymmetric \cite{steve}.
If the two quantum systems have been prepared in the same state, then their combined state is necessarily symmetric. From this follows that finding the systems in an antisymmetric state definitely indicates that the two systems were different. On the other hand, finding the two systems in a symmetric state does not necessarily mean that they were identically prepared, but rather gives us no definite information. (We are comparing the internal states, assuming that the quantum systems may be distinguished at least through different spatial positions; their total state is of course symmetric (bosons) or antisymmetric (fermions).)
Also when comparing the states of more than two quantum systems, if they are all identically prepared, then their overall state will be symmetric. This means that finding the $N$-particle system in the non-symmetric part of its total Hilbert space indicates that the subsystems cannot all have been identically prepared.
The symmetric/non-symmetric projection is the optimal comparison strategy when we are asking for an error-free answer. When there is no prior information about the states of the particles, the best comparison strategy must be invariant under permutations of, and unitary transforms on the compared quantum systems (the same transform on each system). Physically, the permutation invariance comes from the fact that we are not asking which of the individual quantum states are different from each other, and the invariance under unitary transformations arises because we do not know in which basis they are given (in fact we do not know the states at all).
This leads us to consider the corresponding invariant subspaces, which
are the symmetric and non-symmetric subspaces.
In the non-symmetric subspace, the quantum systems are never all in the same internal state. In
the symmetric subspace, the states may be identical or different, corresponding to an
inconclusive outcome. There is no invariant subspace in which the quantum states always are
identical, and therefore we can never obtain an error-free answer that the states were all identical.
To further motivate the optimality, we may think of the comparison as a generalised measurement with three outcomes, `yes, all the states are the same', `no, all the states are not the same', and `no certain answer'. To these outcomes correspond three measurement operators $\Pi_Y, \Pi_N$ and $\Pi_?$, respectively. The probability of obtaining the different results are given by
\begin{equation}
p(Y)=\langle\psi|\Pi_Y|\psi\rangle,\quad
p(N)=\langle\psi|\Pi_N|\psi\rangle,\quad
p(?)=\langle\psi|\Pi_?|\psi\rangle,
\end{equation}
where $|\psi\rangle$ is the many-particle state of the system. The measurement operators $\Pi_Y, \Pi_N$ and $\Pi_?$ have to be positive, meaning that they have only nonnegative eigenvalues. This guarantees that all the probabilities $p(Y), p(N)$ and $p(?)$ are nonnegative for any $|\psi\rangle$. In addition, $\Pi_Y+\Pi_N+\Pi_?=\mathbf{1}$ has to hold, corresponding to $p(Y)+p(N)+p(?)=1$. The measurement operators do not have to be pure state projectors. Generalised measurements are usually referred to as POM (probability operator measure) or POVM (positive operator-valued measure) strategies \cite{hel}.
For error-free comparison, we demand that $p(Y)=0$ whenever the states of the systems were not identical, and $p(N)=0$ whenever they were all identical. This means that the expectation value of $\Pi_N$ for any state in the totally symmetric subspace must be zero. In the optimal comparison measurement, to make $p(?)$ as small as possible, the support of the operators $\Pi_Y$ and $\Pi_N$ should include as much of the total Hilbert space as possible. The optimal $\Pi_N$ is therefore seen to be the projector onto all of the non-symmetric subspace.
The invariance under permutations and unitary transformations applying to the comparison situation, is thus seen to correspond to the measurement operators possessing the same invariances.
Since there is no invariant subspace where the states are always identical, $\Pi_Y=0$, and $\Pi_?$ is the projector onto the symmetric subspace.
To summarise, for the optimal comparison strategy we have
\begin{equation}
\Pi_N=P_{nonsym} , \quad\Pi_?=P_{sym}, \quad\Pi_Y=0.
\end{equation}
Let us here also clarify that asking questions about the phases of the individual quantum states or systems is not meaningful, since the total combined quantum system only has an overall phase of its wave function.
The average probability of detecting the states of the systems as different may be understood as the average success rate of the comparison. This probability is given by the average expectation value of $P_{nonsym}$, where the average is taken over all possible states $|\Psi_N\rangle$ of the $N$-particle system. Since there is no prior information about the individual states, their distribution is flat and the integral $\int|\Psi_N\rangle\langle\Psi_N| d\Psi_N$ is equal to the identity operator in the total $N$-particle Hilbert space. The average success probability is therefore
\begin{eqnarray}
\label{successintegral}
p_{success}&=&\overline{\langle P_{nonsym}\rangle}=
\int\langle\Psi_N|P_{nonsym}|\Psi_N\rangle d\Psi_N\nonumber\\
&=&Tr\{P_{nonsym}\int|\Psi_N\rangle\langle\Psi_N| d\Psi_N\}
=Tr\{ P_{nonsym} \otimes \mathbf{1}^N\}\nonumber\\
&=&{D_{nonsym}\over D_{tot}}=1-\overline{\langle P_{sym}\rangle}=1-{D_{sym}\over D_{tot}},
\end{eqnarray}
where $D_{sym}$ and $D_{nonsym}$ are the dimensions of the symmetric and nonsymmetric subspaces, and $D_{tot}=D^N$ is the dimension of the total Hilbert space (the particles may be distinguished since they are spatially separated; their total quantum state including the spatial part will be symmetric or antisymmetric depending on whether they are bosons or fermions). The dimension of the symmetric subspace for $N$ quantum systems in $D$ dimensions is equal to the number of ways we can distribute $N$ particles in $D$ different boxes, which is
\begin{equation}
D_{sym}={D+N-1\choose D-1}={D+N-1\choose N}.
\end{equation}
For each way of distributing the quantum systems among the $D$ dimensions, there is one symmetric state; for two examples, see equations (\ref{threesym}) and (\ref{foursym}).
We may therefore write the optimal average success probability as
\begin{equation}
\label{success}
p_{success}=1-{{D+N-1\choose N}D^{-N}}.
\end{equation}
This result holds also if we allow only nonentangled states in $|\Psi_N\rangle$. In this case, $|\Psi_N\rangle=| \phi _1\rangle{\otimes}| \phi _2\rangle{\otimes}...{\otimes}| \phi _N\rangle$, and we obtain
\begin{eqnarray}
\overline{\langle P_{nonsym}\rangle}
&=&Tr\{P_{nonsym}\int|\phi_1\rangle{\otimes}|\phi_2\rangle{\otimes}...{\otimes}|\phi_N\rangle
\langle\phi_1|{\otimes}\langle\phi_2|{\otimes}...\otimes{}\langle\phi_N| d\phi_1 d\phi_2 ... d\phi_N\}\nonumber\\
&=&Tr\{P_{nonsym}\int|\phi_1\rangle\langle\phi_1|d\phi_1\int|\phi_2\rangle\langle\phi_2
|d\phi_2...\int|\phi_N\rangle\langle\phi_N|d\phi_N\}\nonumber\\
&=&Tr\{ P_{nonsym} \mathbf{1}_1\otimes \mathbf{1}_2\otimes ... \otimes\mathbf{1}_N\}={D_{nonsym}\over D_{tot}}
\end{eqnarray}
exactly as before, where $\mathbf{1}_i$ denotes the identity operator for the $i$th particle.
There are good reasons to allow the compared pure quantum systems to be entangled with each other. Quantum comparison may be used in quantum information applications, and here one often benefits from entanglement. Also, an adversary may prepare entangled states in order to cheat, and thus we need to consider this possibility. For example, no symmetric entangled state will ever fail the universal comparison test, even if its subsystems are not in the same quantum state.
On the other hand, entanglement with an environment has to be restricted for error-free comparison strategies to be possible. If the quantum systems that are to be compared are in mixed states, it will not always be possible to obtain unambiguous outcomes. This is because a set of identical mixed density matrices does not, in general, lie completely within the symmetric subspace, so that if the particles are found to be in the non-symmetric subspace, this does not necessarily mean that the mixed density matrices of the individual states are different. As a consequence, in the presence of noise, or entanglement with an environment, we will usually be forced to consider comparison strategies which may have errors in the results. Error-free comparison may still be possible, but only if the mixed states are restricted in some way.
\section{Detailed error-free comparison}
\label{subspaces}
When comparing the pure states of more than two quantum systems, we may also ask whether it is possible to obtain more information about the differences between their states. In this section we will show that it is possible to tell, without error, whether `all the $N$ states were not identical', `no $N-1$ states were all identical', and so on. The comparison strategy considered in section \ref{secuni} has only the results `all the $N$ states were not identical' and `no definite information'.
It corresponds to performing a projection onto the symmetric and non-totally symmetric subspaces. The non-totally symmetric subspace can be further decomposed into subspaces, which also are invariant under permutations and unitary transformations. These subspaces are connected with more specified information about differences among the states.
To understand this, let us first illustrate this with some examples. When comparing the states of three two-dimensional quantum systems (qubits), the totally symmetric subspace is four-dimensional. If the basis states of qubit $i$, $i=1,2,3$, are denoted by $|0\rangle_i$ and $|1\rangle_i$, a possible choice of symmetric basis states is
\begin{equation}
\label{threesym}
|0\rangle_1{\otimes}|0\rangle_2{\otimes}|0\rangle_3,\quad
|1\rangle_1{\otimes}|1\rangle_2{\otimes}|1\rangle_3,\quad
S(|0\rangle_1{\otimes}|0\rangle_2{\otimes}|1\rangle_3),\quad
S(|0\rangle_1{\otimes}|1\rangle_2{\otimes}|1\rangle_3),
\end{equation}
where $S$ denotes symmetrisation.The non-totally symmetric subspace is also four-dimensional. In this particular case, it does not decompose into further subspaces, but the case of three qubits is nevertheless useful for understanding the general situation. We may construct the non-symmetric states
\begin{equation}
|\psi_{i,j}^-\rangle \otimes|0\rangle_k, \quad|\psi_{i,j}^-\rangle \otimes|1\rangle_k,
\end{equation}
where $|\psi_{i,j}^-\rangle={1\over \sqrt{2}}(|0\rangle_i{\otimes}|1\rangle_j-|1\rangle_i{\otimes}|0\rangle_j)$ and $(i,j,k)=(1,2,3), (2,3,1)$ or (3,1,2). These six states form an overcomplete basis for the non-totally symmetric space. We have chosen this basis, rather than an orthogonal basis, because projecting onto $P_{i,j}$ and $\mathbf{1}-P_{i,j}$, with $P_{i,j}=|\psi_{i,j}^-\rangle\langle\psi_{i,j}^-|\otimes (|0\rangle_{kk}\langle 0|+|1\rangle_{kk}\langle 1|)$,
corresponds to {\it pairwise} comparison of qubits $i$ and $j$. The projector onto the non-totally symmetric subspace may be written
\begin{equation}
P_{nonsym}=\mathbf{1}-P_{sym}={2\over 3}(P_{1,2}+P_{2,3}+P_{3,1}).
\end{equation}
This can be understood to mean `in the non-symmetric subspace, at least one pair of quantum systems is different from each other', or, `at most two states are the same'.
When comparing four qubits, the symmetric subspace is five-dimensional with the basis states
\begin{eqnarray}
\label{foursym}
&&|0\rangle_1|0\rangle_2|0\rangle_3|0\rangle_4,\quad
|1\rangle_1|1\rangle_2|1\rangle_3|1\rangle_4,\quad\nonumber\\
&&S(|0\rangle_1|0\rangle_2|0\rangle_3|1\rangle_4),\quad
S(|0\rangle_1|0\rangle_2|1\rangle_3|1\rangle_4),\quad
S(|0\rangle_1|1\rangle_2|1\rangle_3|1\rangle_4),
\end{eqnarray}
where the tensor products have been suppressed for brevity.
It is possible to decompose the eleven-dimensional non-totally symmetric subspace into two further subspaces. These can be represented by Young tableaux \cite{youngtab} as shown in figure \ref{youngfig1}, where also the two Young tableaux for three qubits are shown. Young tableaux are a group-theoretic tool for determining the subspaces of a multi-particle system, and the dimensionalities of these subspaces. The number of boxes in a tableau is equal to the number of particles. There can be at most as many rows as there are dimensions for one quantum system, e.g. two rows for qubits. Roughly speaking, a horizontal row with $m$ boxes corresponds to $m$ particles in a symmetric state, and a vertical row with $n$ boxes correspond to $n$ particles in an antisymmetric state.
For four qubits, the non-symmetric subspace corresponding to figure 1d) is nine-dimensional and contains states of the type
\begin{equation}
\label{1sub}
|\psi_{i,j}^-\rangle{\otimes}|0\rangle_k{\otimes}|0\rangle_l,\quad
|\psi_{i,j}^-\rangle{\otimes}|1\rangle_k{\otimes}|1\rangle_l,\quad
|\psi_{i,j}^-\rangle{\otimes}{1\over\sqrt{2}}(|0\rangle_k{\otimes}|1\rangle_l+
|1\rangle_k{\otimes}|0\rangle_l),
\end{equation}
where $(i,j,k,l)$ is any permutation of (1,2,3,4). The qubits $i$ and $j$ are in an antisymmetric state with respect to each other, and qubits $k$ and $l$ are symmetric with respect to each other.
Subspace 1e) is two-dimensional and contains the states
\begin{equation}
\label{2sub}
|\psi_{i,j}^-\rangle\otimes|\psi_{k,l}^-\rangle,
\end{equation}
where again $(i,j,k,l)$ is any permutation of (1,2,3,4). Here both qubits $i,j$ and $k,l$ are antisymmetric with respect to each other.
The two sets of states (\ref{1sub}) and (\ref{2sub}) span their respective subspaces but are overcomplete. We now see that if the four compared qubits are found in the non-symmetric subspace 1e), no selection of three qubits can have been in exactly the same state. At most two can be identical. Equivalently, all the states where three or four qubits are identical are found in the totally symmetric subspace 1c) and the first non-symmetric subspace, 1d).
In the non-symmetric subspace 1d), no four qubits are the same, but three of them may be identically prepared. In the totally symmetric subspace, we find all sorts of combinations of states, for example, all four particles may be the in the same internal state, or the states may all be different. Therefore detection of the four qubits in the symmetric subspace gives no definite information about whether their states are same or different.
\begin{figure}
\caption{Young tableaux for three and four qubits. a) and b) correspond to three qubits, c), d) and e) to four qubits. a) and c) are the totally symmetric subspaces, b), d) and e) are non-symmetric.}
\label{youngfig1}
\end{figure}
In analogy with the three-qubit case, we may write the projector onto the non-symmetric subspace as
\begin{equation}
P_{nonsym}=\mathbf{1}-P_{sym}={1\over 2}\sum_{ijkl} P_{i,j} (\mathbf{1}-P_{k,l})
+{1\over 3}\sum_{ijkl} P_{i,j}P_{k,l},
\end{equation}
where the sums are taken with $(i,j,k,l)$ equal to permutations of (1,2,3,4) with $i>j$ and $k>l$.
Let us then think about comparing the states of three three-dimensional pure quantum systems. We will be able to use what we learned from comparing three qubits, but in addition to a symmetric subspace and a non-symmetric subspace, there is now also a totally antisymmetric subspace corresponding to the Slater determinant $A(|0\rangle_1{\otimes}|1\rangle_2{\otimes}|2\rangle_3)$, where $A$ denotes antisymmetrisation. This state is antisymmetric in any pair of particles, which implies that finding the particles in this state indicates that no two of them were the same (i.e. they were all different). The non-symmetric subspace, on the other hand, corresponds to no three states being identical, which is a weaker statement.
We now know enough to understand the general case of comparing the states of $N$ $D$-dimensional quantum systems. Usually, the non-totally symmetric subspace contains only states with no definite overall symmetry. Totally antisymmetric states will exist only when the dimensionality $D$ is greater than or equal to the number of systems $N$. In a totally antisymmetric state, no two states of quantum systems are identical. In this case, there will be non-symmetric subspaces corresponding to `no $N$ states are identical', `no $N-1$ states are identical', `no $N-2$ states are identical', and so on, all the way down to `no two states are identical'. If $D<N$, there will be non-symmetric subspaces ranging from `no $N$ states are identical' only to `no $ceil[N/D]$+1 states are identical'. Here $ceil[N/D]$ denotes the smallest integer greater than or equal to $N/D$, and it is equal to the number of particles in at least one slot, when fitting $N$ particles into $D$ different slots, spreading them out as much as possible.
There may be more than one subspace corresponding to each step; the subspaces are all invariant under permutations and unitary transformations.
The Young tableaux are the key to understanding this structure, and we give an example in figure \ref{youngfig2}. When comparing five five-dimensional quantum systems, there are seven different subspaces, corresponding to 2a), the totally symmetric subspace, 2b), a subspace where at most four particles are the same, 2c) and 2d) where at most three particles are the same, 2e) and 2f) where at most two particles are the same, and, finally, the totally antisymmetric subspace 2g) where no two particles are the same. The difference between subspaces 2c) and 2d) is that in 2c), three of the quantum systems may be identical to each other, and the remaining two in turn identical to each other, whereas in 2d), if three quantum systems are identical, the other two have to be different from each other. In 2e), there may be two identical pairs of states, whereas in 2f), only one identical pair may exist.
This structure holds also for comparing the states of five quantum systems of higher dimension than five. The only difference is that the dimensions of the subspaces will be different, thus affecting the probabilities for the different results. When there are only four dimensions, only subspaces 2a) to 2f) may occur, when there are only three dimensions subspaces 2a) to 2e), and for five qubits, only 2a), 2b) and 2c) occur.
\begin{figure}
\caption{Young tableaux for five five-dimensional quantum systems. a) is the totally symmetric subspace, b), c), d), e) and f) are non-symmetric, and g) is the totally antisymmetric subspace.}
\label{youngfig2}
\end{figure}
We have seen that it is possible to obtain more detailed information than `no, the quantum states of the particles are not all identical'. The full optimal universal error-free comparison strategy is a projection onto the different invariant subspaces of the $N$-particle system. The probabilities of the different outcomes can be calculated in analogy with equation (\ref{success}), and will be equal to $D_i/D_{tot}$, where $D_i$ is the dimension of the particular subspace considered, and $D_{tot}$ is the dimension of the total $N$-particle Hilbert space. Finding the systems in the symmetric subspace will correspond to an inconclusive result exactly as before.
\section{Minimum-error and minimum-cost comparison}
\label{prob}
In this section, we will drop the requirement that the comparison has to be error-free, and instead seek to minimise the error, or the cost of making an error. The outcome of the comparison is now either `all the systems were in the same state' (again, only the overall phase of the total state of all the systems is defined) or `the systems were not all in the same state'. There is no inconclusive result, but instead the result may sometimes be wrong. The minimum error probability will be less than if one uses an error-free comparison strategy, and then, if the result is inconclusive, guesses whether the states were identical or not. Comparing the states of the quantum systems is equivalent to distinguishing between two density matrices, $\rho_S$ for when the states are all identical, and $\rho_D$ for when the states where not all identical. The problem of distinguishing between two density matrices with minimum error was solved already by Helstrom in his pioneering work \cite{hel}. We will briefly outline the method here, applied to quantum comparison.
Any prior information, which is available about the quantum systems and their states, is used in the construction of $\rho_S$ and $\rho_D$.
Let us denote their respective prior probabilities by $p_S$ and $p_D$. We then form the operator
\begin{equation}
\hat{O}=p_S\rho_S-p_D\rho_D.
\end{equation}
A way to distinguish between $\rho_S$ and $\rho_D$ with least possible error is then to measure this operator, and whenever a positive eigenvalue $\lambda_i$ is obtained, take the result to be `same' ($S$). Whenever a negative eigenvalue is obtained, the result is `different' ($D$). If a zero eigenvalue occurs, one guesses randomly between `$S$' and `$D$'. In terms of measurement operators, the measurement operator $\Pi_S$ for the result `$S$' is a projection onto all eigenstates with positive eigenvalues, plus one half times the projection onto all eigenstates with zero eigenvalue. The measurement operator $\Pi_D$ for the result `$D$' is a projection onto all eigenstates with negative eigenvalues, plus one half times the projection onto all eigenstates with zero eigenvalue,
\begin{eqnarray}
\Pi_S&=&\sum_{\lambda_i>0}|\lambda_i\rangle\langle\lambda_i|+
\frac{1}{2}\sum_{\lambda_i=0}|\lambda_i\rangle\langle\lambda_i|\nonumber\\
\Pi_D&=&\sum_{\lambda_i<0}|\lambda_i\rangle\langle\lambda_i|+
\frac{1}{2}\sum_{\lambda_i=0}|\lambda_i\rangle\langle\lambda_i|.
\end{eqnarray}
The difference between $p_c$, which is the probability for the result to be correct, and the error probability $p_e$, can be expressed as
\begin{eqnarray}
p_c-p_e&=&p(S,S)+p(D,D)-p(S,D)-p(D,S)\nonumber\\
&=&\text{Tr}\left[(p_S\rho_S-p_D\rho_D)\Pi_S-(p_S\rho_S-p_D\rho_D)\Pi_D\right]\nonumber\\
&=&\sum_{i}|\lambda_i|=\text{Tr}\sqrt{(p_S\rho_S-p_D\rho_D)^2}.
\end{eqnarray}
Here $p(i,j)$ denotes the overall probability that the state is $\rho_j$ and the result obtained is $i$, and we have used the fact that the conditional probability to obtain result $i$ when the state is known to be $\rho_j$ is $\text{Tr}(\rho_j\Pi_i)$, so that $p(i,j)=p_j\text{Tr}(\rho_j\Pi_i)$.
Since $p_e+p_c=1$, the minimum error probability of the comparison is given by
\begin{equation}
p_e=\frac{1}{2}\left[1-\text{Tr}\sqrt{(p_S\rho_S-p_D\rho_D)^2}\right]=\frac{1}{2}(1-\sum_{i}|\lambda_i|).
\end{equation}
In practical applications, the cost of making errors of different kinds often varies.
When comparing quantum systems, the cost incurred when obtaining the result `different' when the states really were identical, may be very different from the cost of obtaining the result `same' when the states really were different. For example, the former case may mean throwing away a valid set of quantum systems, presented by honest participants in a quantum protocol, whereas the latter, accepting a set of non-identical states as identical, may mean that an attempt to cheat succeeds. Let us denote the cost of obtaining the result `same' when the states really were different by $C_{SD}$, and the cost of obtaining the result `different' when the states were the same by $C_{DS}$. By $C_{SS}$ and $C_{DD}$ we denote the costs for obtaining the correct results (usually these are less than or equal to zero). Instead of the error, we now want to minimise the Bayes cost
\begin{equation}
C_B=p(S,S)C_{S,S}+p(D,D)C_{D,D}+p(D,S)C_{D,S}+p(S,D)C_{S,D}.
\end{equation}
In order to do this, we have to slightly modify the minimum-error comparison procedure and instead form and diagonalise \cite{hel}
\begin{equation}
\hat{O}_B=p_S(C_{DS}-C_{SS})\rho_S-p_D(C_{SD}-C_{DD})\rho_D.
\end{equation}
Exactly as for the minimum-error comparison, positive eigenvalues of this operator correspond to the result `same' and negative eigenvalues correspond to the result `different', while zero eigenvalues correspond to a random guess. The minimum Bayes cost will be
\begin{equation}
C_B=\frac{1}{2}\left[(C_{SS}+C_{DS})p_S+(C_{DD}+C_{SD})p_D-\sum_{i}|\lambda_i|\right],
\end{equation}
where we could also use $\sum|\lambda_i|=\text{Tr}\sqrt{\hat{O}_B^2}$. Note that the choice $C_{SS}=C_{DD}=0, C_{SD}=C_{DS}=1$, corresponds to the situation where all errors are equally costly, leading to the minimum-error comparison strategy.
In this way, the optimal comparison strategy, minimising either the error or the Bayes cost, can be constructed for any comparison situation, numerically if not analytically. Notably, the obtained comparison strategy is optimal also in the presence of noise or entanglement with an environment.
The resulting strategy will be different from the error-free universal comparison strategy considered in section \ref{secuni}. What is the minimum-error comparison strategy when we have no prior information at all about the states, other than that they are pure? The distribution of possible states is flat, and this means that the prior probability for the states to be identical is zero. If minimising the error, it is therefore optimal to always directly guess that the states are different. Since this situation is perhaps not so interesting, we continue the discussion of minimum-error and minimum-cost comparison assuming that there is some kind of prior information available about the states which are compared. This prior information is used in the construction of $\rho_S$ and $\rho_D$ to obtain the optimal comparison strategy. Below we give an example to illustrate the procedure.
\subsection{Prior knowledge about the states of the systems}
\label{prior}
Often there is prior information about the quantum systems which are compared.
For example, it may be known that their states are identical with a certain probability $p$, and not identical with a probability $1-p$. Another example is when each state is known to be a member of a specified set of states $\{|\psi_1\rangle,|\psi_2\rangle,...,|\psi_m\rangle\}$. As an illustration of the method, we will explicitly construct the minimum-error comparison strategy for two quantum systems, when each of them are prepared in one of the so-called trine states
\begin{eqnarray}
|\psi_1\rangle&=&|+\rangle\nonumber\\
|\psi_2\rangle&=&\frac{1}{2}(-|+\rangle -\sqrt{3}|-\rangle)\nonumber\\
|\psi_3\rangle&=&\frac{1}{2}(-|+\rangle +\sqrt{3}|-\rangle),
\end{eqnarray}
with equal probability 1/3 for each of the trine states. Here $|+\rangle$ and $|-\rangle$ are orthogonal basis states.
If the states of the two quantum systems are identical, they can be either both in state $|\psi_1\rangle$, or both in state $|\psi_2\rangle$, or both in state $|\psi_3\rangle$. Each of these possibilities is equally likely, so the density matrix when the states are identical is given by
\begin{equation}
\rho_S=\frac{1}{3}(|\psi_{11}\rangle\langle\psi_{11}|+|\psi_{22}\rangle\langle\psi_{22}|+|\psi_{33}\rangle\langle\psi_{33}|),
\end{equation}
with prior probability $p_S=1/3$, where $|\psi_{ij}\rangle\equiv|\psi_i\rangle\otimes|\psi_j\rangle$. If the states are not identical, the density matrix is given by
\begin{equation}
\rho_D=\frac{1}{6}\left(|\psi_{12}\rangle\langle\psi_{12}|+|\psi_{23}\rangle\langle\psi_{23}|+|\psi_{31}\rangle\langle\psi_{31}|+|\psi_{21}\rangle\langle\psi_{21}|+|\psi_{32}\rangle\langle\psi_{32}|+|\psi_{13}\rangle\langle\psi_{13}|\right),
\end{equation}
with prior probability 2/3. An explicit calculation of $p_S\rho_S-p_D\rho_D$ and diagonalisation of this matrix gives the eigenvalues $\lambda_1=\lambda_2=-1/12$, $\lambda_3=-1/4$ and $\lambda_4=1/12$. We recall that negative eigenvalues correspond to the result `different', and positive eigenvalues correspond to the result `same'. The eigenvector corresponding to the only positive eigenvalue, $\lambda_4$, is $1/\sqrt{2}(|++\rangle+|--\rangle)$, and therefore the best comparison strategy is to make a projective measurement to see whether the two particles are found in this state or not. If it is, the result of the comparison is `identical', if it isn't, the result is `not identical'. The minimum error probability obtained in this way is $p_e=1/2(1-\sum|\lambda_i|)=1/4$. If we would instead use the error-free comparison strategy, which is a projection onto the symmetric versus antisymmetric subspaces, combined with a guess whenever we obtain an inconclusive outcome, it can be shown that the least error probability possible to attain is 1/3, which is larger than 1/4. (It turns out to be optimal to always guess that the states are different.)
We should note, however, that in the special case we are considering, we could combine the minimum-error and error-free comparison strategies, by making a Bell measurement as follows. When the two particles is found in the state $1/\sqrt{2}(|+-\rangle-|-+\rangle)$, we know with certainty that their states must have been different. If the result is $1/\sqrt{2}(|++\rangle+|--\rangle)$, our best guess, not necessarily correct, is that the states were identical. If $1/\sqrt{2}(|++\rangle-|--\rangle)$ or $1/\sqrt{2}(|+-\rangle+|-+\rangle)$ is obtained, the best guess is that the states were different. Strictly speaking we do not have to distinguish between all four Bell states, as we do not have to separate these last two states from each other.
Although the example given here concerns comparison of the pure states of only two quantum systems, the method is generally valid for any number of quantum systems, pure or mixed.
If we only have two outcomes of the comparison, the optimal strategy can always be constructed. These two outcomes do not have to be `all states are identical' and `not all identical', they could equally well be, for example, 'at least $M$ out of $N$ states are identical' and `at most $M-1$ states are identical'. In distinguishing between these two possibilities we follow a procedure analoguous to distinguishing between `all identical' and `not all identical'. This may be useful in a situation where we are willing to tolerate differences between the states up to a certain level.
\subsection{Detailed minimum-error and minimum-cost comparison}
As for error-free comparison, we can attempt to construct minimum-error and minimum-cost comparison strategies which will tell us more about similarities and differences among the states of the quantum systems. When comparing $N$ quantum systems, we can, for example, ask whether all states are identical, or $N-1$ states are identical, $N-2$ states identical, and so on, down to all states being different. Distinguishing between these cases is again equivalent to distinguishing between the density matrices corresponding to each of the cases. Unfortunately, the general problem of how to distinguish between more than two density matrices, with either minimum error or minimum Bayes cost, is extremely difficult to solve analytically. Even the problem of distinguishing between pure states has only been solved in a handful of special cases \cite{minerr}.
A set of conditions which the optimal solution has to satisfy is known \cite{hel}, but these conditions are of no great help in obtaining it. Nevertheless, given an explicit comparison situation, the optimal strategy could be obtained numerically by linear optimisation.
\section{Realisations with linear elements}
\label{realisations}
To experimentally realise quantum comparison may often require non-trivial manipulations, such as collective measurements on many quantum systems. An example of a collective measurement is Bell state detection, which is not possible with unit efficiency with only linear components \cite{john}. Bell state detection corresponds to distinguishing between two-particle entangled states. Universal error-free quantum comparison corresponds to projecting onto permutation- and unitary transformation-invariant $N$-particle subspaces, which is essentially the same as distinguishing between groups of entangled $N$-particle states. Therefore realisations of quantum comparison may well be non-trivial, essentially requiring quantum computation.
We shall see, however, that the statistics of indistinguishable quantum systems makes it possible to implement universal quantum comparison using only linear elements and particle detectors, for example with a linear optical network and photon detectors. This has great importance for experimental realisations and practical applications of quantum comparison strategies.
Throughout this paper, we have been concerned with the situation when only one sample each of the $N$ particles is available. Experimentally it is usually easier to prepare many copies of a certain quantum system, for example, many photons in a coherent state in a laser beam, rather than a only single copy. If many, or even an unlimited number of copies of each particle are available, it is possible to estimate the state of each quantum system individually and compare their states using this information.
In a sense, estimating the quantum systems individually resembles a `classical' comparison strategy. Here, however, we are interested in the quantum limits to comparison, and this is why we choose to consider the experimentally more difficult scenario where only one copy of each quantum system is available. We will mainly consider realisations of the error-free comparison strategy treated in section \ref{secuni}, and make some brief remarks about the realisation of minimum-error and minimum-cost comparison strategies.
\subsection{Comparing the states of two quantum systems}
A $2\times 2$ beam splitter may be used to distinguish between symmetric and antisymmetric polarisation states of two photons \cite{zeil}. The two photons will always exit in the same direction if they are in a symmetric state of polarisation, but in different directions if they are in the antisymmetric state of polarisation. In essence, this is a partial Bell state measurement. As a consequence, error-free comparison of two photonic qubits could be effected with a beam splitter, which is a linear optical element, with photon detectors at the outputs \cite{steve}. The outcome `different' will correspond to the photons exiting in different directions (an antisymmetric state), and the inconclusive outcome will correspond to the photons exiting in the same direction. This implements the desired projection with unit probability.
Polarised photons may here be thought of as qubits. Using different states of angular momentum, photons may also be used to represent qudits, with, in principle, arbitrary many levels per photon \cite{sonja}. We now note that the beam splitter universal comparison strategy will still be possible. This is because the non-polarising beam splitter actually affects the {\it spatial} state of the two photons.
The beam splitter effects the transformation
\begin{eqnarray}
\label{bs2}
\hat{a}^\dagger_{0,out}&=&{1\over{\sqrt{2}}}(\hat{a}^\dagger_{0,in}+
\hat{a}^\dagger_{1,in})\nonumber\\
\hat{a}^\dagger_{1,out}&=&{1\over{\sqrt{2}}}(\hat{a}^\dagger_{0,in}-\hat{a}^\dagger_{1,in}),
\end{eqnarray}
where $\hat{a}^\dagger_{i,in}$ and $\hat{a}^\dagger_{i,out}$ are the creation operators for a particle in the spatial input and output mode $i=0,1$. The overall state of the two photons has to be symmetric, since they are bosons. This means that a totally symmetric polarisation or angular momentum state, $|\psi_{sym}\rangle$, is connected with a totally symmetric spatial state,
$ \hat{a}_1^\dagger\hat{a}_0^\dagger |0\rangle$. Using equation (\ref{bs2}), one sees that this spatial input state always results in the two photons exiting together as $\hat{a}_0\hat{a}_0|0\rangle$ or $\hat{a}_1\hat{a}_1|0\rangle$. If the photons leave in different directions, their internal state can therefore not have been symmetric, and consequently the internal states were not identical. This is why the spatial beam splitter can be used for comparing the internal states of the photons, be it polarisation or angular momentum, even if it affects the spatial part of the wave function.
The beam splitter method of comparing two photons would also work for other types of quantum systems, if a beam splitter is experimentally available. Possible physical systems include ions \cite{iontrap} or neutral atoms in traps \cite{atomtrap}. For fermions, a totally symmetric internal state is connected with a totally {\it anti}symmetric spatial state. If two fermions exit together, leaving the other exit empty, their internal states must clearly have been different --- no two fermions may occupy the same quantum state.
\subsection{Comparing the states of more than two quantum systems}
For two quantum systems, a beam splitter is enough to implement universal error-free quantum comparison with unit efficiency. There is, however, no immediate reason why linear elements would be sufficient for error-free comparison of the states of more than two quantum systems. The fact that we are asking for unambiguous results complicates the situation. A careful investigation, however, shows that linear elements are enough, at least if one is satisfied with a difference detection probability which may be less than ideal, but still nonzero. One now has to use a $N\times N$ beam splitter, which can be implemented with a network of $2\times 2$ beam splitters \cite{igornetwork}. The $N\times N$ beam splitter transform affects only the spatial part of the quantum state and may be written as
\begin{equation}
\label{bstrafo}
\hat{a}^\dagger_{j,out}={1\over{\sqrt{N}}}\sum_{k=0}^{N-1}
\exp(i{{2\pi}\over N}kj)\hat{a}^\dagger_{k,in}
\end{equation}
where $\hat{a}^\dagger_{j,out}$ and $\hat{a}^\dagger_{k,in}$ are the creation operators for a particle in spatial output mode $j$ and input mode $k$, respectively.
The compared particles are fed into the beam splitter, each into one input port. If their internal states are all identical, the input state is given by
\begin{equation}
\label{bsinstate}
|\Psi_{in}\rangle = \hat{a}^\dagger_{0,in} \hat{a}^\dagger_{1,in}...\hat{a}^\dagger_{N-1,in}|0\rangle_{spatial}\otimes
|\psi\rangle_{internal}^N
\end{equation}
where $|\psi\rangle_{internal}$ is the internal quantum state of each of the $N$ particles we wish to compare, and the spatial part of the state is symmetric (bosons) or antisymmetric (fermions). The beam splitter does not affect the internal states of the particles. For fermions in identical internal states, we thus see that both the total spatial input and output states have to be antisymmetric, with no more than one fermion in each beam splitter output port. This implies that as soon as two or more of the fermions exit in the same path, or, equivalently, that there is no `click' in some other exit, all the fermions cannot have had identical internal states. A brief consideration shows that, if only $M$ out of the $N$ detectors fire, then at most $M$ of the fermions can have been identical.
For more than two bosons, such as photons, the click patterns indicating differences among the particles will be slightly different.
For three bosons, a calculation using equations (\ref{bstrafo}) and (\ref{bsinstate}) shows that, if their internal states are identical, the output state will be
\begin{equation}
\label{threebos}
{1\over {3\sqrt{3}}}(\hat{a}^{\dagger 3}_{1,out}+\hat{a}^{\dagger 3}_{2,out}+
\hat{a}^{\dagger 3}_{3,out} - 3\hat{a}^\dagger_{1,out}\hat{a}^\dagger_{2,out}\hat{a}^\dagger_{3,out})|0\rangle
\otimes|\psi\rangle ^3_{internal}
\end{equation}
(remember that $\hat{a}^{\dagger n}|0\rangle = \sqrt{n!}|n\rangle$).
All the bosons will either leave from different outputs, or all from the same. Two bosons leaving from the same output, and the third from a different output, is not possible. Therefore, when it occurs, a `two detectors fire, the third does not' click pattern indicates a difference among the internal states of the bosons. For four identical bosons, the output state will be (suppressing the `out' indices)
\begin{eqnarray}
\label{fourbos}
&&{1\over {16}}\left(\hat{a}^{\dagger 4}_{1}+\hat{a}^{\dagger 4}_{2}+\hat{a}^{\dagger 4}_{3}+ \hat{a}^{\dagger 4}_{4}
- 2\hat{a}^{\dagger 2}_{1}\hat{a}^{\dagger 2}_{3} +
2\hat{a}^{\dagger 2}_{2}\hat{a}^{\dagger 2}_{4} \right.\nonumber\\
&&\left.-4\hat{a}^{\dagger 2}_{1}\hat{a}^\dagger_{2}\hat{a}^\dagger_{4}
+4\hat{a}^{\dagger 2}_{2}\hat{a}^\dagger_{1}\hat{a}^\dagger_{3}
-4\hat{a}^{\dagger 2}_{3}\hat{a}^\dagger_{2}\hat{a}^\dagger_{4}
+4\hat{a}^{\dagger 2}_{4}\hat{a}^\dagger_{1}\hat{a}^\dagger_{3}\right)|0\rangle
\otimes|\psi\rangle ^4_{internal}.
\end{eqnarray}
From this expression we conclude, for example, that one of the many click patterns indicating a difference is when all four bosons exit through different outputs, since this is not possible when all internal states are identical. To find out exactly which click patterns indicate differences among the internal states of $N$ bosons is a straightforward but somewhat labour-intensive task. One of these click patterns will always be $N-1$ bosons exiting from the same output port, and the remaining one from a different output port.
A feasible option for comparing more than two photons, or other bosons, is also to use pairwise comparison. When comparing three photons, we may compare only two of them, when comparing four photons, compare them as two pairs etc.. If at least one pair of photons are different from each other, then obviously all the photons cannot have been identical. This method will, however, be less efficient.
If we only are able to count photons at the individual exits, the efficiency of the beam splitter realisation will not, in general, be as good as for a perfect projection onto different subspaces as described in sections \ref{secuni} and \ref{subspaces}. To reach the optimal efficiency, we would have to be able to project onto states like (\ref{threebos}) and (\ref{fourbos}) and the state spaces orthogonal to these. If we would be able to measure the relative phases of the detected particles, this would improve the efficiency, but would still not be enough to reach the optimum. Whether the beam splitter realisation is optimal, given that we only can use linear elements and particle detectors, is an open question. It is, however, clear that the optimal realisation must use a balanced multiport, since the optimal realisation should again be invariant under permutations and unitary transformations.
If we are not able to count the photons in each output detector, only tell whether a given detector is firing or not, this will further degrade the efficiency. We will then be unable to distinguish between events of the type `three photons in detector A and one photon in detector B' (indicating, say, a difference) and `two photons each in detector A and B' (say, indicating an inconclusive outcome). We will have to count both these events as `detectors A and B fired', giving an inconclusive result.
Noise, dark counts and missed detections will mean that the experimentally realised universal quantum comparison is not completely error-free anymore. The compared quantum states will also be destroyed unless we can detect in which path the individual particles exit without disturbing them.
But the important and perhaps somewhat surprising conclusion remains, that it is possible to compare the quantum states of arbitrarily many quantum systems, such as photons, ions or neutral atoms, using only a linear network of beam splitters and particle detectors.
\subsection{Realising minimum-error and minimum-cost comparison}
In section \ref{prob}, we saw that the minimum-error and the minimum Bayes cost comparison strategies always are projective measurements in some basis. Thus these comparison strategies can always be realised in principle, but as for the universal error-free comparison, it may require a measurement in a highly entangled basis, making the realisation difficult in practice.
We gave an example of a comparison strategy for determining, with minimum error, whether the states of two quantum systems were identical or not. The states of the each of the systems were known to be in one of three given quantum states. In the particular case we considered, the quantum comparison can be realised with a beam splitter in a way similar to the universal error-free comparison. We need to distinguish the state $1/\sqrt{2}(|++\rangle+|--\rangle)$ from the other Bell states, as this state corresponds to the outcome `same' and the other Bell states (or linear combinations of these) correspond to the outcome `different'. One way to do this is to apply a rotation of $\pi/2$ to one of the quantum systems, so that $|+\rangle\leftrightarrow |-\rangle$, and $1/\sqrt{2}(|++\rangle+|--\rangle)$ transforms into $1/\sqrt{2}(|+-\rangle+|-+\rangle)$. If two quantum systems in this state are incident on a beam splitter, both systems will exit together since the state is symmetric. But this case can readily be distinguished from the other symmetric states, $1/\sqrt{2}(|++\rangle\pm|--\rangle)$, since it is the only one where the quantum systems have different states when each of them is measured in the basis $\{|+\rangle,|-\rangle\}$. To do this, direct each of the beam splitter outputs onto a polarising beam splitter, separating $|+\rangle$ and $|-\rangle$, and look for clicks at both outputs of one of the polarising beam splitters. Note that since the comparison situation is not invariant under unitary transforms, we need to know in which basis to perform the measurement. Essentially, this method has already been used to individually distinguish two Bell states from each other and from the other two Bell states \cite{zeil}.
The given example is only an illustration and does by no means have any general significance.
Optimal minimum-error and minimum-cost comparison strategies are highly dependent on the information which is available about the states and the quantum systems, and therefore also their realisations have to be individually tailored.
\section{Discussion and conclusions}
In this paper we have considered comparison of the states of quantum systems, when only one copy of each quantum system is available. We have treated error-free comparison strategies, which give definite answers, at the expense of sometimes obtaining an inconclusive result, and minimum-error and minimum-cost comparison strategies, which sometimes may give the wrong result. Error-free strategies are useful whenever it is important that the answers obtained are guaranteed to be correct. Here a restriction to pure states is made, because for mixed quantum states, unambiguous answers will in general not be possible. In this case, one can instead opt for a minimum-error or minimum-cost comparison strategy. Error-free comparison of mixed states may be possible, but only if there is prior information about the mixed states.
The optimal universal error-free quantum comparison strategy, treated in sections \ref{secuni} and \ref{subspaces}, was found to be a projection onto different subspaces, invariant under permutations and unitary transformations of the quantum systems. Finding the state of the systems outside the totally symmetric subspace corresponds to the states being not all identical to each other. Finding the systems in the symmetric subspace corresponds to an inconclusive outcome, since in this case we cannot be sure if their states were all identical or not. By further checking in which subspace of the non-symmetric space the state of the systems is found in, one can obtain more detailed information about the differences and similarities among the quantum systems. For example, finding the particles in the totally antisymmetric subspace means that no two quantum systems were in the same state. This outcome is possible only if the dimensionality $D$ of the Hilbert space for each quantum particle is greater than the number of particles $N$.
Sometimes we may have prior information about the quantum systems we want to compare. In this case, it is often possible to improve the success probability of the comparison by using this information in designing the comparison strategy. In particular, it is sometimes possible to obtain also an unambiguous answer that the states of all the systems are identical. If the states of the compared quantum systems are known to belong to a given set of states, this will be possible if and only if there is at least one state in this set which is linearly independent of the other states in the set \cite{tonyprivate}. This requires an error-free comparison strategy different from the universal strategy, depending on the individual situation.
In section \ref{prob}, we explained how to construct the optimal comparison strategies minimising either the error probability, or the Bayes cost, which is the average cost of making an error. The resulting comparison strategy is optimal also when the states of the systems are mixed, when there is noise, or entanglement with an environment. The presented method could be used to obtain the optimal comparison strategy for applications such as quantum fingerprinting \cite{buhr} and quantum digital signatures \cite{chuang}. In these cases, there is prior information about the compared quantum states; essentially we have to compare codeword states to each other. The c-SWAP comparison strategy used in \cite{buhr, chuang} is actually one special realisation of the universal error-free comparison strategy described in section \ref{secuni}, with the difference that the inconclusive result is taken to mean that the states are identical. This way one obtains a comparison strategy with one-sided error. As we have shown, however, one can usually construct a minimum-error comparison strategy with less error than this. It is possible to use the universal comparison strategy, and this strategy may even be good, depending on the situation, but not necessarily optimal. An optimal strategy would of course improve the success rate of the protocols, but is also of importance to consider when designing strategies for cheating.
We found, in section \ref{realisations}, that error-free (and sometimes also minimum-error) quantum comparison can be realised using only beam splitters and particle detectors also for more than two quantum systems, making it possible to implement using linear optics. Quantum comparison could be realised this way also for any other physical systems, as soon as beam splitters and particle detectors are available. This is of great importance for practical applications.
The c-SWAP realisation, in contrast, requires rather non-trivial quantum computation. A recent suggestion for implementing the c-SWAP strategy on two polarised photons with linear optics, besides being more complicated than using a beam splitter, succeeds only with probability 1/8 \cite{grudka}. In contrast, the beam splitter realisation succeeds in implementing the desired projection with unit probability for two photons. The drawback is of course that the photons are absorbed by detectors, whereas the c-SWAP realisation retains the quantum states in the case when they are identical.
When comparing more than two quantum systems, the efficiency of the beam splitter realisation is somewhat lower than ideal. It is an open question whether it is possible to reach a higher efficiency, given that only linear elements and particle detectors are available for the realisation. Relatedly, the optimal efficiency of a Bell state measurement, realised with only linear elements, is 1/2, and this is indeed reached with a beam splitter \cite{john}. To reach higher efficiencies in a Bell state measurement requires the use of nonlinear effects, or additional entanglement in auxiliary degrees of freedom.
We should also note that recent results on measuring the overlap of two states \cite{olomouc} turn out to be closely related to state comparison for two states, in particular through the beam splitter realisation.
There exists a relation between quantum comparison and quantum state discrimination~\cite{hel, tonyrev}, which is currently a widely studied problem. In a state discrimination situation, one is given a quantum system, whose state belongs to some set of given quantum states, with associated prior probabilities. The task is then to determine which one of these states the system is in. The procedure may be optimised with respect to different criteria, for example minimising the error in the result. Finding optimal state discrimination strategies for a given set of possible states is usually a highly non-trivial problem.
Comparing the states of a set of given qantum systems can be rephrased as a state discrimination problem: We have to discriminate the case when the states of the quantum systems are the same from the case when they are different. In fact, this argument is explicitly used in section \ref{prob} to obtain the minimum-error and mimimun-cost comparison strategies, and is essentially underlying the discussion also of the error-free comparison strategies in sections \ref{secuni} and \ref{subspaces}.
Also related to quantum comparison is quantum template matching~\cite{sas1, sas2}. Here the task is to determine which one of a set of given template states is closest to an unknown input state.
Quantum comparison may be used in many applications in quantum information, for example in error correction schemes, where it could be used to detect whether the results of many runs of the same quantum calculation agree or not. It could also be used to detect differences between a quantum state and one or many reference states.
To conclude, we have seen that it is possible, given only one copy each of $N$ quantum systems, to compare the states of these quantum systems.
Quantum comparison gives collective information about the states of the compared quantum systems, without obtaining individual information about their states.
\section*{Acknowledgments}
We want to thank Prof. Stephen M. Barnett for valuable discussions.
Financial support for IJ by the Ministry of Education of the Czech republic (MSMT 210000018), GACR 202/01/0318, for EA by the EU Marie Curie programme, project number HPMF-CT-2000-00933, and for AC by the Engineering and Physics Research Council EPSRC, and the University of Hertfordshire, is gratefully acknowledged.
\end{document}
\begin{figure}
\caption{Young tableaux for three and four qubits. a) and b) correspond to three qubits, c), d) and e) to four qubits. a) and c) are the totally symmetric subspaces, b), d) and e) are non-symmetric.}
\label{youngfig1}
\end{figure}
\begin{figure}
\caption{Young tableaux for five five-dimensional quantum systems. a) is the totally symmetric subspace, b), c), d), e) and f) are non-symmetric, and g) is the totally antisymmetric subspace.}
\label{youngfig2}
\end{figure}
\section*{}
\end{document}
\thispagestyle{empty}
\section*{}
\center{Young tableaux for three and four qubits}
\center{\includegraphics[width=6.5cm,height=!]{young1}}
Figure 1
\thispagestyle{empty}
Young tableaux for five five-dimensional quantum systems
\center{\includegraphics[width=7cm,height=!]{young2}}
Figure 2
\end{document}
|
\begin{document}
\title{Index-zero closed geodesics and stable figure-eights in convex hypersurfaces}
\author{Herng Yi Cheng}
\date{}
\maketitle
\begin{abstract}
For each odd $n \geq 3$, we construct a closed convex hypersurface of $\mathbb{R}^{n+1}$ that contains a non-degenerate closed geodesic with Morse index zero. A classical theorem of J.\,L.~Synge would forbid such constructions for even $n$, so in a sense we prove that Synge's theorem is "sharp." We also construct stable figure-eights: that is, for each $n \geq 3$ we embed the figure-eight graph in a closed convex hypersurface of $\mathbb{R}^{n+1}$, such that sufficiently small variations of the embedding either preserve its image or must increase its length.
These index-zero geodesics and stable figure-eights are mainly derived by constructing explicit billiard trajectories with ``controlled parallel transport'' in convex polytopes.
\end{abstract}
2020 \emph{Mathematics Subject Classification.} Primary 53C22; Secondary 53C42
\section{Introduction}
Can one lasso a convex body? Or, more formally, can a convex hypersurface of Euclidean space contain a non-degenerate closed geodesic with Morse index zero? The answer will depend on the parity of the dimension of the hypersurface. In this paper we construct non-degenerate closed geodesics with Morse index zero in certain convex hypersurfaces, as well as \emph{stable geodesic nets}, which generalize those closed geodesics. Roughly speaking, a stable geodesic net in a Riemannian manifold $M$ is an immersion in $M$ of a connected graph $\Gamma$, so that every edge is immersed as a constant-speed geodesic and every sufficiently small\footnote{``Sufficiently small'' is defined with respect to a certain metric on the space of immersions $\Gamma \to M$ that is formally defined in \cite[Section~2]{Cheng_StableGeodesicNets}.} perturbation of the immersion either preserves its image or increases its length. This means that among the space of immersions of $\Gamma$ in a given manifold, a stable geodesic bouquet is a local minimum under the length functional. Stable geodesic bouquets are special cases of \emph{stationary geodesic nets}, which are stationary points in the space of immersions of a given connected graph $\Gamma$ \cite{Pitts_StationaryVarifolds,AllardAlmgren_StationaryVarifolds,NabutovskyRotman_GeodesicNets,Rotman_Flowers,LiokumovichStaffa_GenericDensity,HassMorgan_GeodesicNetS2}. Stationary geodesic nets and stable geodesic bouquets are formally defined in \cite[Section~2]{Cheng_StableGeodesicNets}. In this paper we will mainly construct non-degenerate closed geodesics with index zero (which are precisely the stable geodesic bouquets with one loop), as well as stable geodesic nets that are immersions of the figure-eight graph, which we call \emph{stable figure-eights}.
We will mostly consider stable geodesic nets that are immersions of ``flower-shaped'' graphs $\bq_k$ comprising $k \geq 1$ loops at a single vertex. Such nets are called \emph{stable geodesic bouquets}.
Index-zero closed geodesics and stable geodesic nets are easier to construct in manifolds $M$ that are not simply-connected or have some negative curvature. If $M$ is not simply-connected, then continuously contracting any closed loop that is not null-homotopic would eventually yield an index-zero closed geodesic. Similarly, if we have $k$ loops based at the same point $p \in M$ that represent non-trivial elements in $\pi_1(M,p)$, then we could construct a stable geodesic bouquet by gluing every loop at the basepoint to give an immersion $G : \bq_k \to M$, and then applying a length-shortening process that perserves the graph structure.\footnote{One formal way to shorten an immersion $\bq_k \to M$ is to ``discretize'' first by perturbing each loop until it is composed of some large but fixed number of tiny geodesic arcs, each arc being shorter than the injectivity radius of $M$. The space of such ``piecewise geodesic'' immersions $\bq_k \to M$ that are bounded in length and have a given number of geodesic arcs is a finite-dimensional manifold. Moving the discretized immersion along this manifold in the direction of fastest length decrease induces a length-shortening flow. A similar process is detailed in \cite[Section~4]{NabutovskyRotman_GeodesicNets}.} Generically, this ensemble of loops should converge to a stable geodesic bouquet.\footnote{It may be possible that the images of some of the loops of the resulting stable geodesic bouquet are the same closed geodesic, but we can avoid that by choosing the initial loops to represent elements $a_1, \dotsc, a_k \in \pi_1(M,p)$ that are ``independent'' in the sense that no $a_i$ is a power of another $a_j$.}
Even when $M$ is simply-connected, if it has a closed geodesic in a region with strictly negative curvature, then it must have index zero due to the formula for the second variation of length. Likewise, if a \emph{stationary geodesic net}---an immersion of a fixed connected graph that is a critical point under the length functional---lies in a region of strictly negative curvature then it must also be stable.
On the other hand, there are several negative results that rule out the existence of index-zero closed geodesics and stable geodesic bouquets in certain closed manifolds $M$ that are simply-connected and have positive sectional curvature. Indeed, if $M$ is also even-dimensional and orientable, then a classical theorem of J.\,L.~Synge prevents it from having an index-zero closed geodesic \cite{Synge_GeodesicPosCurv}. Moreover, a well-known conjecture of H.\,B.~Lawson and J.~Simons \cite{LawsonSimons} would imply that if $M$ is closed and simply-connected, and its curvature is $1/4$-pinched, then it cannot contain any stable submanifolds or stable varifolds of any dimension, which includes index-zero closed geodesics and stable geodesic bouquets. This conjecture has been proven under various additional pinching assumptions \cite{Howard_SuffPinched,ShenXu_QuarterPinchedHypersurfaces,HuWei_FifthPinched}. In particular, the conjecture holds for all round spheres, so that none of them contain any index-zero closed geodesic or stable geodesic bouquet. I.~Adelstein and F.\,V.~Pallete also proved that positively curved Riemannian 2-spheres cannot have a stable geodesic net in the shape of a $\theta$ graph \cite{AdelsteinPallete}.
Given these negative results, it is natural to ask which, if any, simply-connected and closed manifolds with positive sectional curvature can contain index-zero closed geodesics or stable geodesic bouquets. Constructions of such manifolds are rarer than the negative results. W.~Ziller constructed a 3-dimensional positively-curved homogeneous space that contains an index-zero closed geodesic \cite[Example~1]{Ziller_HomogSpaceStableClosedGeodesic}. In addition, the author recently constructed, in every dimension $n \geq 2$, a Riemannian $n$-sphere which is isometric to a convex hypersurface and which contains a stable geodesic bouquet. The bouquet has 3 loops if $n = 2$, and has $n$ loops if $n \geq 3$ \cite{Cheng_StableGeodesicNets}. The convex hypersurfaces have strictly positive sectional curvature. Those stable geodesic bouquets are also \emph{irreducible}, in the sense that no pair of tangent vectors at the basepoint are parallel. (This rules out ``degenerate'' stable geodesic bouquets that are merely unions of index-zero closed geodesics. Indeed, many proofs of the existence of stationary geodesic nets that rely on min-max methods are unable to tell whether the result is a union of closed geodesics \cite{NabutovskyRotman_GeodesicNets,Rotman_Flowers,LiokumovichStaffa_GenericDensity}.) To our understanding, that was the first existence result for irreducible stable geodesic nets in simply-connected and closed manifolds with strictly positive sectional curvature. In this paper we prove another existence result along the same lines, except that only two loops are necessary in the stable geodesic bouquet:
\begin{theorem}[Main result]
\label{thm:Stable2LoopPosCurv}
For every integer $n \geq 3$, there exists a closed convex hypersurface $M_n$ of $\R^{n+1}$ with positive sectional curvature that contains a simple, irreducible and stable figure-eight.
Furthermore, for each $n \geq 3$, every Riemannian $n$-sphere whose metric is sufficiently close to that of $M_n$ in the $C^\infty$ topology also contains a simple, irreducible and stable figure-eight.
\end{theorem}
(By \emph{simple} we mean that the stable figure-eight is an injective immersion. Note that no stable figure-eight in a Riemannian surface (i.e. $n = 2$) can be irreducible.)
On the face of it, this result seems stronger than the one in \cite{Cheng_StableGeodesicNets} in the sense that when we are allowed to have more loops, it seems easier to ``arrange'' the loops into a stable geodesic bouquet. Using only two geodesic loops $\gamma_1, \gamma_2 : I \to M$ gives us much less freedom, because the condition of being a critical point of the length functional requires that the same line must bisect the angle between $\gamma_1'(0)$ and $-\gamma_1'(1)$ as well as the angle between $\gamma_2'(0)$ and $-\gamma_2'(1)$. Moreover, both of those angles must be equal in magnitude.
Nevertheless, our main result is independent from the one in \cite{Cheng_StableGeodesicNets}. We will explain how using only two loops forces us to control the parallel transport map along the geodesic loops more precisely. The new techniques developed in this paper for such precise control will also yield ``elementary'' constructions of index-zero closed geodesics in convex hypersurfaces, which in some sense proves that the aforementioned theorem of Synge is ``sharp'':
\begin{theorem}
\label{thm:StableClosedGeodesicPosCurv}
For every odd integer $n \geq 3$, there exists a closed convex hypersurface $M_n'$ of $\R^{n+1}$ with positive sectional curvature that contains a simple closed geodesic with index zero.
Furthermore, for each odd $n \geq 3$, every Riemannian $n$-sphere whose metric is sufficiently close to that of $M_n'$ in the $C^\infty$ topology also contains a simple closed geodesic with index zero.
\end{theorem}
(\emph{Simple} means that the closed geodesic does not self-intersect.)
It may be possible that Ziller's construction can be generalized to find index-zero closed geodesics in positively-curved homogeneous spaces of higher odd dimension. However, these spaces could never be isometric to closed convex hypersurfaces of Euclidean space; if that were possible, then they would have to be isometric to a round sphere due to a result by S.~Kobayashi \cite{Kobayashi_HomogHypersurfaces}. The existence of an index-zero closed geodesic in this round sphere would then contradict a special case of the conjecture by Lawson and Simons which has been proven, for instance, by R.~Howard \cite{Howard_SuffPinched}.
We also point out that the index-zero closed geodesics and stable figure-eights that we construct can be considered as ``elementary'' because they essentially lie in manifolds with flat Riemannian metric away from some singularities (which we eventually smooth into convex hypersurfaces), and also because the geodesics involved are derived from certain billiard trajectories in convex polytopes that can be explicitly computed.
\subsection{Motivation for the key ideas}
\subsubsection{Index-zero closed geodesics as core curves of maximally-twisted tubes}
\label{sec:Intro_CoreCurves}
The connection between stability and parallel transport can be appreciated more immediately in the simplest case of a simple closed geodesic $\gamma : I \to M$ based at the point $p = \gamma(0) = \gamma(1)$ in an orientable manifold $M$. Consider the ``component of the parallel transport map along $\gamma$ that is orthogonal to $\gamma'$,'' that is, the map $P^\perp : T_pM^\perp \to T_pM^\perp$ that is the restriction of the parallel transport map to the subspace $T_pM^\perp \subset T_pM$ of vectors orthogonal to $\gamma'(0) = \gamma'(1)$. Synge defined the ``twist'' of $\gamma$ to be the smallest value of $\cos^{-1}\ip{P^\perp u,u}$ for all unit vectors $u \in T_pM^\perp$. He then proved that if $M$ has curvature bounded from below by a constant $K_0 > 0$ and $\gamma$ has index zero, then the twist of $\gamma$ must be at least $\length(\gamma)\sqrt{K_0}$ (see Theorem~II in \cite{Synge_GeodesicPosCurv}).
This suggests a rule of thumb: to construct a simple closed geodesic that is stable, construct it so that its tubular neighbourhood is ``sufficiently twisted.'' This leads us to some concrete examples that will form the basis of our proof of \cref{thm:StableClosedGeodesicPosCurv}, namely the index-zero closed geodesics that are the core curves of1 ``maximally-twisted tubes.'' More precisely, for some integer $n \geq 2$ and lengths $r, \ell > 0$, consider the space $(\D^{n-1}_r \times [0,\ell])/{\sim}$, where $\D^{n-1}_r$ is the closed disk of radius $r$ centered at the origin in $\R^{n-1}$, and the equivalence relation $\sim$ identifies $(x,0)$ with $(-x,\ell)$ for all $x \in \D^{n-1}_r$. The resulting space is a ``tube'' $T$ with a flat Riemannian metric, which we call an \emph{$n$-dimensional maximally-twisted tube}. (When $n = 2$ this is a M\"obius strip.) $T$ is orientable if and only if $n$ is odd. Parallel transport along the core curve $\gamma : [0,\ell] \to T$ satisfies $P^\perp(v) = -v$, so $\gamma$ has twist equal to the maximum possible value of $\pi$. An elementary argument shows that $\gamma$ attains the minimal length among all smooth closed curves in $T$ freely homotopic to $\gamma$.\footnote{The curve with minimal length must be a geodesic. Otherwise the curve-shortening flow would reduce its length. (The convexity of $T$ implies that curves in $T$ will stay in $T$ under that flow.) However, the only geodesics that stay in $T$ are curves that run parallel to $\gamma$. Being homotopic to $\gamma$ means that the minimizing length curve must close up after exactly one round around $T$, but that can only happen if the curve passes through the origin of some copy of $\D^{n-1}_r$ in $T$. Thus that curve must be $\gamma$.} Alternatively, one can see that $\gamma$ must have index zero because $T$ has a flat Riemannian metric, so the curvature term in the second variation of length vanishes, which forces the second variation to always be non-negative. We will prove that $\gamma$ is non-degenerate later in \cref{Lem:MaxTwistStableGeodesic}.
Maximally-twisted tubes will help us prove \cref{thm:StableClosedGeodesicPosCurv} in the following way. For each odd $n \geq 3$ we will first embed an $n$-dimensional maximally-twisted tube in a ``polyhedral manifold'' obtained by gluing two copies of a convex $n$-dimensional polytope $\X^n \subset \R^n$ (henceforth, \emph{$n$-polytope}) along their boundaries via the identity map. The result, called the \emph{double} of $\X^n$ and denoted by $\dbl\X^n$, is a topological manifold homeomorphic to the $n$-sphere. In fact, $\dbl\X^n$ is a Riemannian manifold with singularities at the image of the $(n-2)$-skeleton of $\X^n$. The \emph{smooth part} of $\dbl\X$ is the complement of its singularities, denoted by $\dbl[sm]\X^n$, which has a flat Riemannian metric induced from the Euclidean metric on $\X^n$. The maximally-twisted tube will be isometrically embedded in $\dbl[sm]\X^n$, and its core curve will be an index-zero closed geodesic $\gamma_n$. The final step will then be to smooth $\dbl\X^n$ into a convex hypersurface in $\R^{n+1}$ while preserving the stability of $\gamma_n$, using arguments from \cite[Appendices~B and C]{Cheng_StableGeodesicNets}. Here it is important that $\gamma_n$ is non-degenerate, to ensure that after sufficiently small changes in the ambient metric from the smoothing, $\gamma_n$ will be close to a new index-zero closed geodesic.
$\X^3$, $\dbl\X^3$ and $\gamma_3$ are illustrated in \cref{fig:MobiusStrip}(a)--(b). To illustrate that some tubular neighbourhood of $\gamma_n$ is in fact a 3-dimensional maximally-twisted tube---and therefore that $\gamma_3$ stable---note that in a maximally-twisted tube $(\D^2_r \times [0, \ell])/{\sim}$, every line segment $\sigma \subset \D^2_r$ centered at the origin corresponds to a Mo\"bius strip $(\sigma \times [0, \ell])/{\sim}$. This is the M\"obius strip illustrated in \cref{fig:MobiusStrip}(c)--(d).
\begin{figure}
\caption{Illustration of a polyhedron $\X^3$ whose double $\dbl\X^3$ contains an index-zero closed geodesic $\gamma_3$. The two copies of $\X^3$ in $\dbl\X^3$ are depicted in (a) and (b) respectively. The segments of $\gamma_3$ in each copy are drawn as thick dark line segments in (a) and (b). $\dbl\X^3$ contains a M\"obius strip $M$ whose core curve is $\gamma_3$; the pieces of the M\"obius strip in each copy of $\X^3$ are drawn in green in (c) and (d). (e) depicts the images of both $\gamma_3$ and the M\"obius strip $M$ under the canonical quotient map $\dbl\X^3 \to \X^3$. (f): $M$ is indeed a M\"obius strip because when one attempts to colour it differently on each side of its core curve, the colors will conflict at $p$. The M\"obius strip in (e) resembles the origami M\"obius strip in (g) that can be folded from paper.}
\label{fig:MobiusStrip}
\end{figure}
Let us briefly outline the construction of $\X^3$ and $\gamma_3$ and hint at how it can be generalized to prove all cases of \cref{thm:StableClosedGeodesicPosCurv}. We began with an equilateral triangle $\X^2$ and a geodesic $\gamma^2 : I \to \dbl\X^2$ that is ``almost a periodic geodesic'' except that it starts and ends on different copies of $\X^2$ in $\dbl\X^2$. More precisely, its endpoints are different but have the same image under the canonical quotient map $q : \dbl\X^2 \to \X^2$ that identifies the two copies of $\X^2$ in $\dbl\X^2$ (see \cref{fig:AntiPeriodicGeodesic}). This also identifies the tangent spaces at the endpoints of $\gamma^2$, and $\gamma^2$ is chosen such that under this identification, $(\gamma^2)'(0)$ and $(\gamma^2)'(1)$ are the same. This allows us to treat its parallel transport map $(T_{\gamma(0)}\dbl\X^2)^\perp \to (T_{\gamma(1)}\dbl\X^2)^\perp$ on the components of tangent spaces orthogonal to $(\gamma^2)'$ as a linear isomorphism on $(T_{\gamma(0)}\dbl\X^2)^\perp$, which is in fact $v \mapsto -v$. (Observe the resemblence to the definition of a maximally-twisted tube.) We then use our original constructions called \emph{beveling} and \emph{origami models}\footnote{The origami model construction derives its name from the resemblence between the projected M\"obius strip in \cref{fig:MobiusStrip}(e) and the M\"obius strip made using the art of paper-folding, or origami. A similar resemblence holds in higher dimensions as well in terms of higher-dimensional origami.} to convert $\X^2$ to $\X^3 \subset \R^3$ and $\gamma^2$ to $\gamma^3$ in such a way that the projection of $\X^3$ onto the first two coordinates is very similar to $\X^2$ (this can be observed to some extent by comparing \cref{fig:MobiusStrip,fig:AntiPeriodicGeodesic}), and the component of the parallel transport map along $\gamma^3$ that is orthogonal to $(\gamma^3)'$ is also $v \mapsto -v$. In other words, the ``maximal twist'' is preserved from $\gamma^2$ to $\gamma^3$, which gives rise to the maximally-twisted tube embedded in $\dbl\X^3$ around $\gamma^3$.
\begin{figure}
\caption{(a) The equilateral triangle $\X^2$ and the geodesic $\gamma^2 : I \to \dbl\X^2$ from which $\X^3$ and $\gamma^3$ in \cref{fig:MobiusStrip}
\label{fig:AntiPeriodicGeodesic}
\end{figure}
To prove \cref{thm:StableClosedGeodesicPosCurv}, we will apply beveling and origami models to $\X^3$ and $\gamma^3$ in a similar way to produce a 4-polytope $\X^4$ and geodesic $\gamma^4 : I \to \dbl\X^4$, and iterate in this way to produce a sequence of $\X^n$ and $\gamma^n$ for $n \geq 2$. When $n$ is odd $\gamma^n$ will be a periodic geodesic, but when $n$ is even $\gamma^n$ will be ``almost periodic'' in the same sense as $\gamma^2$. Nevertheless, in any case the component of the parallel transport map along $\gamma^n$ that is orthogonal to $(\gamma^n)'$ will always be $v \mapsto -v$. For odd $n \geq 3$ this will give an $n$-dimensional maximally-twisted tube around $\gamma^n$ in $\dbl\X^n$, which will prove most of \cref{thm:StableClosedGeodesicPosCurv}.\footnote{It is interesting to reflect on how this construction fails to disprove Synge's theorem: an even-dimensional maximally-twisted tube is non-orientable, so it cannot be embedded in the double of any convex polytope of the same dimension.}
\subsubsection{Stable figure-eights from ``incompatibly-twisted'' parallel transport}
\label{sec:Intro_FigureEights}
Our main result in this paper is proven by combining the new techniques of beveling and origami models with earlier tools from \cite{Cheng_StableGeodesicNets}. Our earlier result in \cite{Cheng_StableGeodesicNets} was proven by controlling the parallel transport maps along the geodesics in a stable geodesic bouquet. Essentially, given a stationary geodesic bouquet composed of loops $\gamma_1, \dotsc, \gamma_k$ based at a point $p$, we can associate each $\gamma_i$ with a vector space $\ker\opd{\gamma_i} \subset T_pM$ such that the stationary geodesic bouquet is stable if $\bigcap_{i = 1}^k \ker\opd{\gamma_i} = \{0\}$ and if the sectional curvature along the geodesic bouquet is sufficiently small. Each $\ker\opd{\gamma_i}$ is defined using the parallel transport map along $\gamma_i$, and is called the \emph{parallel defect kernel of $\gamma_i$}. In our construction of stable geodesic bouquets in \cite{Cheng_StableGeodesicNets}, the parallel defect kernels turn out to be hyperplanes of dimension $\dim(M) - 1$. Consequently, as $\dim(M)$ increased, we used more and more loops in our geodesic bouquet to guarantee that the intersection of the associated hyperplanes would be a point.
Our main result in this paper will be proven by using beveling and origami models to control parallel transport even more precisely to produce geodesic loops in $M$ whose parallel defect kernels have dimension at most $\dim(M)/2$. This will eventually allow us to construct stable geodesic bouquets using only two loops by ``twisting their parallel transport maps in incompatible ways,'' resulting in parallel defect kernels that intersect only at the origin. We will carry out the above process for $\dim(M) \in \{3,4,5\}$ in a somewhat ad-hoc fashion in each dimension. Finally, we will combine these low-dimensional examples into examples in all higher dimensions using the following technique.
\subsubsection{Combining low-dimensional constructions into high-dimensional ones}
\label{sec:Intro_Combining}
If $F$ and $G$ are stable geodesic bouquets with $k$ loops in the doubles of an $m$-polytope $\X_1$ and an $n$-polytope $\X_2$ respectively, then given a few additional minor assumptions we can combine them into a stable geodesic bouquet in the double of an $(m+n)$-polytope. The idea behind this is most naturally presented through the lens of a certain correspondence between billiard trajectories $\beta$ in $\X$ and geodesics $\gamma$ in $\dbl\X$. (A billiard trajectory in $\X$ is a path that travels in a straight line until it collides with the interior of a face of $\X$, after which its velocity vector gets reflected about that face and the trajectory proceeds with the new velocity in another straight line, and so on.\footnote{For a formal definition, see \cite[Section~4.1]{Cheng_StableGeodesicNets}.}) For instance, it is apparent from \cref{fig:AntiPeriodicGeodesic} that projecting the geodesic $\gamma^2$ via the canonical quotient map $q : \dbl\X^2 \to \X^2$ yields a billiard trajectory in $\X^2$. In general the correspondence is such that $\gamma$ projects to $\beta$ in the same way, and whenever $\beta$ makes a collision, $\gamma$ passes from one copy of $\X$ in $\dbl\X$ to the other.\footnote{The idea behind this correspondence is commonly used in, for example, the study of billiards in rational polygons (i.e. $n = 2$) via their corresponding geodesics in translation surfaces. It was also used in \cite{Cheng_StableGeodesicNets}.} (Note that each billiard trajectory could correspond to two possible geodesics, depending on which copy of $\X$ in $\dbl\X$ the geodesic begins from.)
If $\gamma_i : [a,b] \to \dbl\X_i$ are geodesics in the doubles of convex polytopes $\X_i$ for $i \in \{1,2\}$, then they correspond to billiard trajectories $\beta_i : [a,b] \to \X_i$. One can verify that the path $\beta(t) = (\beta_1(t),\beta_2(t))$ is a billiard trajectory in $\X_1 \times \X_2$, \emph{as long as $\beta_1$ and $\beta_2$ never collide simultaneously.} Under that assumption, $\beta$ corresponds to some geodesic in $\dbl(\X_1 \times \X_2)$. In this manner we can combine the geodesic loops of $F$ and $G$ into geodesic loops in $\dbl(\X_1 \times \X_2)$, which will form a stationary geodesic bouquet we denote by $F \oplus G$. We will prove that this is stable essentially by proving an inequality that relates the second variations in length of $F$, $G$ and $F \oplus G$.
\subsection{Organization of content}
In \cref{sec:Semigroup} we effectively reduce \cref{thm:Stable2LoopPosCurv} to the cases in dimensions 3, 4, and 5. We accomplish this by explaining how to combine constructions of stable geodesic bouquets in the doubles of low-dimensional polytopes into stable geodesic bouquets in the doubles of polytopes with higher dimension. In particular we prove that stability is preserved in the combination under certain minor conditions.
In \cref{sec:BevelingOrigamiModel} we define our key constructions of beveling and origami models, prove their key properties, and then use them to construct doubles of polytopes that contain maximally-twisted tubes. This will lead to a proof of \cref{thm:StableClosedGeodesicPosCurv}. Proving the properties of bevelings and origami models will be the most technical part of this paper.
In \cref{sec:Stable2Loop}, we construct stable figure-eights in the doubles of polytopes with dimensions 3, 4 and 5 using bevelings, origami models and a slight generalization of some ideas from \cref{sec:Semigroup}. Then we combine these constructions into stable figure-eights in polytopes of all higher dimensions using the techniques of \cref{sec:Semigroup}, and complete our proof of our main result.
\subsection{Notation and Terminology}
We will require that geodesics and billiard trajectories be parametrized at constant speed. We will say that paths $\alpha : [a,b] \to X$ like geodesics or billiard trajectories are \emph{simple} if they are injective except possibly that $\alpha(a) = \alpha(b)$. We may sometimes write $\alpha$ to denote its image. A stable geodesic bouquet, and more generally any stationary geodesic net, is called \emph{simple} if it is injective as an immersion. A billiard trajectory $\beta : [a,b] \to \X$ is called a \emph{billiard loop} if $\beta(a) = \beta(b)$. If it also satisfies $\beta'(a) = \beta'(b)$, then it is called a \emph{periodic billiard trajectory}.
When $V : I \to TM$ is a vector field along a geodesic $\gamma : I \to M$, $V(t)^\perp$ will refer to the component of $V(t)$ that is orthogonal to $\gamma'(t)$. $V^\perp$ will refer to the vector field $V^\perp(t) = V(t)^\perp$.
We will also denote function composition with the symbol $\circ$, but will often omit it when composing linear operators on the same space. If $f : X \to Y$ and $g : X \to Z$ are maps, let $(f,g)$ denote the map $X \to Y \times Z$ that sends $x$ to $(f(x),g(x))$.
\section{Direct Sum of Stationary Geodesic Bouquets}
\label{sec:Semigroup}
To prove \cref{thm:Stable2LoopPosCurv}, we can begin by constructing, for every integer $n \geq 3$, an $n$-polytope $\X^n$ such that $\dbl\X^n$ contains a simple and irreducible stable figure-eight. After that we can apply the smoothing arguments in \cite[Appendices~B and C]{Cheng_StableGeodesicNets} to derive \cref{thm:Stable2LoopPosCurv}. We will reduce the constructions of $\X^n$ to the cases where $n \in \{3,4,5\}$. We will do this by showing how to combine a stable geodesic bouquet in the double of an $n_1$-polytope with another stable geodesic bouquet in the double of an $n_2$-polytope to get a stable geodesic bouquet in the double of an $(n_1 + n_2)$-polytope.
Suppose that $F : \bq_k \to \dbl\X$ and $G : \bq_k \to \Y$ are stationary geodesic bouquets in an $m$-polytope $\X$ and an $n$-polytope $\Y$. respectively. Let the geodesic loops of $F$ and $G$ be $\phi_1, \dotsc, \phi_k : I \to M$ and $\gamma_1, \dotsc, \gamma_k : I \to N$ respectively. Then each geodesic $\phi_i$ corresponds to a billiard trajectory $\alpha_i$ in $\X$, and similarly each $\gamma_i$ corresponds to a billiard trajectory $\beta_i$ in $\Y$. As explained in \cref{sec:Intro_Combining}, $(\alpha_i, \beta_i)$ is a billiard trajectory in $\X \times \Y$ as long as $\alpha_i$ and $\beta_i$ never collide simultaneously. When this condition folds for all $1 \leq i \leq k$, we say that \emph{$F$ and $G$ are non-singular} and observe that each $(\alpha_i, \beta_i)$ corresponds to a geodesic loop in $\dbl(\X \times \Y)$. We can choose these $k$ geodesic loops to be based at the same point, and one can verify that they combine into a stationary geodesic bouquet which we denote by $F \oplus G$ and call the \emph{direct sum of $F$ and $G$}.
We aim to prove that whenever $F$ and $G$ are stable, so is $F \oplus G$, provided that $F$ and $G$ are non-singular. To this end, we will consider the \emph{index forms} of these three stationary geodesic bouquets: quadratic forms $Q_F(-)$, $Q_G(-)$ and $Q_{F \oplus G}(-)$ defined in \cite[Equation~(3.2)]{Cheng_StableGeodesicNets}. In particular, we will relate these index forms via an inequality. To prove this inequality it will be convenient to consider an intermediate object: the stationary geodesic bouquet $(F,G)$ in $\dbl\X \times \dbl\Y$. Then $F \oplus G = \Gamma \circ (F,G)$, where $\Gamma$ is a branched cover defined as follows:
\begin{equation}
\label{eq:BranchedCoverDefn}
\begin{gathered}
\Gamma : \dbl\X \times \dbl\Y \to \dbl(\X \times \Y) \\
\Gamma((x,\xi_1),(y,\xi_2)) = ((x,y),\xi_1 + \xi_2 \bmod2).
\end{gathered}
\end{equation}
This is a well-defined continuous map, and in fact is a 2-sheeted branched covering whose branch locus is the $(m+n-2)$-skeleton of $\dbl\X \times \dbl\Y$. ($\X$ has a natural CW structure given by its interior, faces, edges, vertices and so on. This induces a CW structure on $\dbl\X$, and eventually on $\dbl\X \times \dbl\Y$.) Away from the branch locus, $\Gamma$ is a local isometry. As a consequence, $F \oplus G = \Gamma \circ (F,G)$ is stable if and only if $(F,G)$ is stable.
\begin{lemma}[Superadditivity of index forms]
\label{lem:SuperadditiveIndexForm}
Let $F$ and $G$ be stationary geodesic bouquets with $k$ loops in Riemannian manifolds $M$ and $N$, and based at $p$ and $q$ respectively. For any vector field $W$ on $(F,G)$, let $W = (U,V)$ where $U$ is the component of $W$ in $TM$ and $V$ is the component in $TN$. Then
\begin{equation}
Q_{(F,G)}(W) \geq Q_F(U) + Q_G(V).
\end{equation}
\end{lemma}
\begin{proof}
Let the geodesic loops of $F$ and $G$ be $\phi_1, \dotsc, \phi_k : I \to M$ and $\gamma_1, \dotsc, \gamma_k : I \to N$ respectively. Let $\eta_i = (\phi_i, \gamma_i)$. First we will prove that for any vector field $W_i = (U_i, V_i)$ along $\eta_i$ (that may not agree at the basepoint $(p,q)$), $Q_{\eta_i}(W_i) = Q_{\phi_i}(U_i) + Q_{\gamma_i}(V_i)$.
Let $\nabla^M$ and $\nabla^N$ denote the Riemannian connections on $M$ and $N$ respectively. Then
\begin{align}
Q_{\eta_i}(W_i)
&= \int_0^1 \norm{\nabla_{\eta_i'(t)}W_i}^2 \,dt \\
&= \int_0^1 \norm{\left( \nabla^M_{\phi_i'(t)}U_i, \nabla^N_{\gamma_i'(t)}V_i \right)}^2 \,dt \\
&= \int_0^1 \norm{\nabla^M_{\phi_i'(t)}U_i}^2 \,dt
+ \int_0^1 \norm{ \nabla^N_{\gamma_i'( t)}V_i}^2 \,dt \\
&= Q_{\phi_i}(U_i)
+ Q_{\gamma_i}(V_i).
\end{align}
Consider some $1 \leq i \leq k$. Let $m = \dim(M)$, and let $X_1, \dotsc, X_{m-1}, X_m = \phi_i'/\norm{\phi_i'}$ be a parallel orthonormal frame along $\phi_i$. Let $U_i = \sum_{j = 1}^m a_jX_j$ for smooth functions $a_j : I \to \R$. Then
\begin{align}
Q_{\phi_i}(U_i)
&= \int_0^1 \norm{\nabla^M_{\phi_i'(t)}U_i}^2 \,dt \\
&= \int_0^1 \norm{\nabla^M_{\phi_i'(t)} \sum_{j = 1}^m a_j(t)X_j(t)}^2 \,dt \\
&= \int_0^1 \sum_{j = 1}^m a_j'(t)^2 \,dt \\
&\geq \int_0^1 \sum_{j = 1}^{m-1} a_j'(t)^2 \,dt \\
&= Q_{\phi_i}\left( \sum_{j = 1}^{m-1} a_jX_j \right).
\end{align}
Therefore $Q_{\phi_i}(U_i)$ does not increase when its argument is replaced with the component that is orthogonal to $\phi_i$. The same conclusion on $Q_{\gamma_i}(V_i)$ follows from a similar computation involving a parallel orthonormal frame $Y_1, \dotsc, Y_{n-1}, Y_n = \gamma_i'/\norm{\gamma_i'}$, where $n = \dim(N)$.
Let $(\tilde{U}_i, \tilde{V}_i)$ be the component of $W_i$ orthogonal to $\eta_i$. This is obtained by adding or subtracting some multiple of $(\phi_i',\gamma_i') = (\norm{\phi_i'}X_m, \norm{\gamma_i'}Y_n)$ from $W_i$. This implies that $U_i$ and $\tilde{U}_i$ have the same component $U_i^\perp$ that is orthogonal to $\phi_i$, and similarly $V_i$ and $\tilde{V}_i$ have the same component $V_i^\perp$ that is orthogonal to $\gamma_i$. Therefore \begin{equation}
Q_{\eta_i}\Big((\tilde{U}_i,\tilde{V}_i)\Big) = Q_{\phi_i}(\tilde{U}_i) + Q_{\gamma_i}(\tilde{V}_i) \geq Q_{\phi_i}(U_i^\perp) + Q_{\gamma_i}(V_i^\perp).
\end{equation}
However this implies that if $W_i$ is the restriction of $W$ to $\eta_i$, then
\begin{equation}
Q_{(F,G)}(W)
= \sum_{i = 1}^k Q_{\eta_i}\Big((\tilde{U}_i,\tilde{V}_i)\Big) \geq \sum_{i = 1}^k\Big(Q_{\phi_i}(U_i^\perp) + Q_{\gamma_i}(V_i^\perp)\Big) = Q_F(U) + Q_G(V).
\end{equation}
\end{proof}
The preceding lemma allows us to verify that if $F : \bq_k \to \dbl\X$ and $G : \bq_k \to \dbl\Y$ are stable geodesic bouquets for $m$-polytope $\X$ and $n$-polytope $\Y$, then $(F,G)$ is a stable geodesic bouquet in $\dbl\X \times \dbl\Y$.
Thus we can derive the following:
\begin{corollary}\label{cor:StableDirectSum}
If $F$ and $G$ are stable geodesic bouquets in the doubles of convex polytopes and $F$ and $G$ are non-singular, then $F \oplus G$ is also stable.
\end{corollary}
\begin{proof}
It suffices to show that $(F,G)$ is stable, which by \cite[Lemma~3.1]{Cheng_StableGeodesicNets} is equivalent to showing that $Q_{(F,G)}(W) = 0$ only if the vector field $W$ is tangent to $(F,G)$. Indeed, if $Q_{(F,G)}(W) = 0$ then by \cref{lem:SuperadditiveIndexForm}, $Q_F(U) = Q_G(V) = 0$ where $U$ (resp. $V$) is the component of $W$ in $T\dbl[sm]\X$ (resp. $T\dbl[sm]\Y$). Since $F$ is stable, \cite[Lemma~3.1]{Cheng_StableGeodesicNets} implies that $U$ is tangent to $F$. Similarly, $V$ must be tangent to $G$. This implies that $W$ is tangent to $(F,G)$.
\end{proof}
We also have the following condition for the direct sum to be simple:
\begin{lemma}
\label{Lem:DirectSumSimple}
If $q \circ F$ is injective, where $q : \dbl\X \to \X$ is the canonical quotient map, then $F \oplus G$ is simple.
\end{lemma}
\begin{proof}
Let the geodesic loops of $F$ be $\phi_1,\dotsc,\phi_k$ and let those of $G$ be $\gamma_1,\dotsc,\gamma_k$. By the hypotheses of the lemma, the billiard trajectories $\alpha_i$ in $\X$ corresponding to $\phi_i$ are all simple loops. Let each $\gamma_i$ correspond to a billiard trajectory $\beta_i$ in $\Y$. From the definition of direct sum we can see that as explained in \cref{sec:Intro_Combining}, the geodesic loops of $F \oplus G$ correspond to billiard trajectories $(\alpha_i, \beta_i)$ in $\X \times \Y$, for $1 \leq i \leq k$. The hypotheses of the lemma further imply that the billiard trajectories $(\alpha_i, \beta_i)$ are simple and intersect each other only at the basepoint. Therefore $F \oplus G$ also has to be simple.
\end{proof}
\begin{remark}
\label{rem:SemigroupIdeal}
Say that two stationary geodesic bouquets $F : \bq_k \to \dbl\X$ and $G : \bq_k \to \dbl\Y$ are \emph{equivalent} if $G$ can be obtained from $F$ by post-composing with an isometry $\dbl\X \to \dbl\Y$. It can easily be verified that $\oplus$ is commutative and associative, up to equivalence. Hence we will slightly abuse notation and treat it as an associative operation. This situation can be summarized by saying that the set $\mathcal{S}_k$ of equivalence classes of stationary geodesic bouquets with $k$ loops in doubles of convex polytopes forms a commutative semigroup under the $\oplus$ operation. Moreover, the stable figure-eights in $\mathcal{S}_k$ form a subsemigroup, and the irreducible and stable figure-eights in $\mathcal{S}_k$ form an ideal. These arguments have been stated for geodesic bouquets for convenience, but they extend to stable geodesic nets that are modeled on any graph.
\end{remark}
As explained at the beginning of this section, this operation nearly allows us to reduce \cref{thm:Stable2LoopPosCurv} to the cases where $n \in \{3,4,5\}$. If there are corresponding $n$-polytopes $\X^n$ and simple, irreducible and stable figure-eights $G^n : \bq_2 \to \dbl\X^n$, for $n \in \{3,4,5\}$, then the stable figure-eights of the form
\begin{equation}
G^{3m + n} = G^n \oplus G^3_1 \oplus \dotsb \oplus G^3_m \text{ for } n \in \{3,4,5\}, m \geq 0,
\end{equation}
are also irreducible, where the $G^3_i$'s are slight modifications of $G^3$ to ensure that the direct summands are pairwise non-singular. In \cref{sec:Stable2Loop} we will construct $G^3$, $G^4$ and $G^5$ such that all of those stable figure-eights $G^{3m+n}$ will also be simple.
\section{Index-zero Closed Geodesics from Bevelings and\\Origami Models}
\label{sec:BevelingOrigamiModel}
The main purpose of this section is to introduce our constructions of \emph{bevelings} and \emph{origami models} that allow us to derive, from a billiard trajectory $\beta$ in an $n$-polytope, a billiard trajectory $\hat\beta$ in an $(n+1)$-polytope such that the ``parallel transport along $\hat\beta$'' (to be defined later) is controlled in terms of the ``parallel transport along $\beta$.'' As a result we will also prove \cref{thm:StableClosedGeodesicPosCurv}.
As explained in \cref{sec:Intro_CoreCurves}, to prove \cref{thm:StableClosedGeodesicPosCurv}, for each odd $n \geq 3$, we will find an $n$-polytope $\X^n$ such that $\dbl[sm]\X^n$ contains an isometrically embedded $n$-dimensional maximally twisted tube. It was explained that the core curve $\gamma_n : [a,b] \to \dbl\X^n$ will be an index-zero closed geodesic. Let us prove that it will also be non-degenerate.
\begin{lemma}
\label{Lem:MaxTwistStableGeodesic}
Core curves of maximally-twisted tubes are non-degenerate closed geodesics.
\end{lemma}
\begin{proof}
Let $\gamma : [0,\ell] \to T$ be the core curve, parametrized by arc-length, of an $n$-dimensional maximally-twisted tube $T$. That is, we have to show that any variation along $\gamma$ (a vector field $V(t)$ such that $V(0) = V(\ell)$) gives a vanishing second variation of length only when $V(t)$ is always parallel to $\gamma'(t)$. Let $V(t) = a_1(t)E_1(t) + \dotsb + a_{n-1}(t)E_{n-1}(t) + a_n(t)\gamma'(t)$ for some parallel orthonormal frame $E_1,\dotsc, E_{n-1}, \gamma'$ and smooth functions $a_i$. The definition of a maximally-twisted tube $T = \D_r^{n-1} \times [0,\ell]/{\sim}$ implies that we can choose each $E_i(s)$ to be tangent to the image of $\D_r^{n-1} \times \{s\}$ in $T$ and such that $E_i(s)$ corresponds to the $i^\text{th}$ standard basis vector of $\R^{n-1}$ in each copy of $\D_r^{n-1}$. Thus $E_i(\ell) = -E_i(0)$.
Suppose that the second variation of length along $V$ vanishes; we will show that the functions $a_i$ vanish identically for $1 \leq i \leq n-1$. We apply the formula for the second variation in length, noting that it only involves the component of $V$ orthogonal to $\gamma$, and that the curvature term vanishes because $T$ has a flat metric.
\begin{equation}
0 = \int_0^\ell\norm{\nabla_{\gamma'} \left(\sum_{i = 1}^{n-1} a_i(t)E_i(t)\right)}^2 \,dt = \int_0^\ell\norm{\sum_{i = 1}^{n-1} a_i'(t)E_i(t)}^2 \,dt.
\end{equation}
However, that implies that the functions $a_1, \dotsc, a_{n-1}$ are all constant. In fact, they must vanish because $V(\ell) = V(0)$ but $E_i(\ell) = -E_i(0)$.
\end{proof}
$\gamma_n$ will correspond to some periodic billiard trajectory $\beta_n : [a,b] \to \X^n$, that will be constructed inductively as follows. We will begin with a certain choice of polygon $\X^2 \subset \R^2$ and periodic billiard trajectory $\beta_2 : [a,b] \to \X^2$. Then for each $n \geq 2$, from the pair of $\X^n$ and $\beta_n$ we will construct another $(n+1)$-polytope $\X^{n+1}$ that contains a periodic billiard trajectory $\beta_{n+1}$. For odd $n$, $\beta_n$ will have an even number of collisions and hence correspond to a closed geodesic $\gamma_n$ in $\dbl\X^n$. A possible set of $\X^3$, $\dbl\X^3$ and $\gamma_3$ was illustrated in \cref{fig:MobiusStrip}.
$\X^{n+1}$ will be constructed in such a way, based on the geometry of $\X^n$, such that $\beta_{n+1}$ will be a ``folded higher-dimensional version'' of $\beta_n$. This folding ``through the extra dimension'' twists the parallel transport map, which enables, for odd $n$, the isometric embedding of maximally-twisted tubes around $\gamma_n$.
\comment{Formally speaking, we will have $\beta_{n+1}(a) = (\beta_n(a),0)$ and $\beta_{n+1}'(a) = (\beta_n(a)',0)$. Hence, after choosing $\X^n$ for all $n \geq 2$ and $\beta_2$, the initial positions and velocities of every $\beta_n$ are completely determined, so the $\beta_n$'s are also completely determined over the same time interval $[a,b]$ (as long as they are well-defined, i.e. $\beta_n$ does not collide with the $(n-2)$-skeleton of $\X^n$).}
\subsection*{Outline of this section}
\Cref{sec:Bevelings} introduces a procedure called \emph{beveling} for constructing $(n+1)$-polytopes $\X^{n+1}$ out of $n$-polytopes $\X^n$. We will see that certain billiard trajectories $\hat\beta$ in $\X^{n+1}$ called \emph{folded billiard trajectories} resemble billiard trajectories $\beta$ in $\X^n$ when projected onto the first $n$ coordinates. \Cref{sec:Bevelings} will culminate in the proof of a simple relationship between the parallel transport maps of $\hat\beta$ and $\beta$ (\cref{lem:FlatteningParallelTransport}). In \cref{sec:FoldedBilliardTrajProperties} we will prove key properties of folded billiard trajectories. \Cref{sec:OrigamiModels} explains how beveling and folded billiard trajectories can be harnessed to produce the required pair $(\X^{n+1}, \beta_{n+1})$ from $(\X^n, \beta_n)$. \Cref{sec:StableClosedGeodesics} explains how to derive \cref{thm:StableClosedGeodesicPosCurv} from those ideas.
\subsection{Bevelings}
\label{sec:Bevelings}
We begin with some notation. Let $\X$ be a convex polytope. Given a billiard trajectory $\beta : [a,b] \to \X$, for all $a \leq s < t \leq b$ such that $\beta(s), \beta(t) \in \itr\X$, define the \emph{parallel transport map} $P_{st} : T_{\beta(s)}\R^n \to T_{\beta(t)}\R^n$ as $dR_{F_j} \dotsm dR_{F_i}$, where the collisions between times $s$ and $t$ are at the faces $F_i, F_{i+1}, \dotsc, F_j$. Note that this differs from the parallel transport map of a geodesic, but is closely related; see \cite[Lemma~4.2]{Cheng_StableGeodesicNets}. However, in this section we will deal almost exclusively with billiard trajectories instead of geodesics, so by an abuse of notation will write the parallel transport map of billiard trajectories as $P_{st}$.
For each integer $n \geq 1$, let $\pr_n : \R^{n+1} \to \R^n$ denote the projection onto the first $n$ coordinates. The subscript will be dropped when the dimensions are clear from the context. Let $\Pi_r = \R^n \times \{r\}$. A subset of $\R^n$ is called \emph{horizontal} if it lies in $\Pi_r$ for some $r \in \R$. A path is called horizontal if its image is horizontal. Call the last coordinate of a point $x \in \R^n$ its \emph{height}, denoted by $\height(x)$. If a set is horizontal, its height is the height of any point.
\begin{definition}[Beveling]
Given an $n$-polytope $\X$, a subset $\F$ of its faces and a function $h : \F \to \R$, a \emph{beveling of $\X$ along $\F$ at heights $h$} is an $(n+1)$-polytope $\bvl{\X}{\F}{h}$ defined as the intersection of $(n+1)$-polytopes $\B^h(F)$ one for each face $F$ of $\X$. The $\B^h(F)$ are defined as follows: for each face $F \notin \F$ of $\X$ whose supporting hyperplane is $\HS$, let $\B^h(F) = \HS \times \R$. For each face $F \in \F$, let $\B^h(F)$ be the $n$-polytope shaped like a ``wedge'' that contains $\X \times \{h(F)\}$, and that is bounded by the two hyperplanes that intersect $\X \times \{h(F)\}$ at $F \times \{h(F)\}$, and have a dihedral angle of $\pi/4$ with $\R^n \times \{h(F)\}$ (see \cref{fig:Beveling}(f)). Then $\bvl{\X}{\F}{h} = \bigcap_F \B^h(F)$ where the intersection is taken over all faces of $\X$ (see \cref{fig:Beveling}(b)).
To simplify our arguments, we will require that for each face $F$ of $\X$, every face of $\B^h(F)$ contains a face of $\bvl\X\F{h}$. If $\hat{F}$ is a face of $\bvl\X\F{h}$ that is contained in $\partial\B^h(F)$, then we say that $\hat{F}$ is \emph{inherited from $\B^h(F)$}.
\end{definition}
\Cref{fig:Beveling} illustrates an example of a beveling $\hat\X = \bvl\X\F{h}$, where $\X \subset \R^2$ is an equilateral triangle.
\begin{figure}
\caption{An example of a convex 2-polytope (polygon) $\X$ and a beveling $\bvl\X\F{h}
\label{fig:Beveling}
\end{figure}
If a billiard trajectory $\beta : [a,b] \to \X$ has collisions at times $t_1, t_2, \dotsc, t_k$ where $a < t_1 < \dotsb < t_k < b$, then the linear paths $\beta|_{[a,t_1]}, \beta|_{[t_1, t_2]}, \dotsc, \beta|_{[t_{k-1}, t_k]}, \beta|_{[t_k, b]}$ are called the \emph{segments} of $\beta$.
\begin{definition}[Folded billiard trajectories, crease-like segments, simulating colliding, simulating folding]
Given a beveling $\bvl\X\F{h}$ of $\X$, we say that a billiard trajectory $\hat\beta : [a,b] \to \bvl{\X}{\F}{h}$ is \emph{folded} if $\hat\beta(a)$ lies in the interior of $\X$, the first segment of $\hat\beta$ is horizontal, and whenever $\hat\beta$ collides with a face inherited from $\B^h(F)$ for some $F \in \F$, the next face it collides with---if any---is another face inherited from $\B^h(F)$. If $\hat\beta$ is folded, then we say it \emph{simulates colliding at $F_1, \dotsc, F_k$} if it collides with all of the face(s) inherited from $\B^h(F_1)$, followed by all of the face(s) inherited $\B^h(F_2)$, and so on until it collides with at least one face inherited from $\B^h(F_k)$.
We will say that a segment of $\hat\beta$ is \emph{crease-like} if it begins at a point in the interior of a face of $\bvl\X\F{h}$ that is inherited from $\B^h(F)$ for some $F \in \F$, and the segment extends into a straight line that intersects the other face of $\B^h(F)$ (possibly at a point outside of $\bvl\X\F{h}$). In this case we say that the segment \emph{simulates folding around $F$.} In the same vein, we will say that $\hat\beta$ \emph{simulates folding around $F_1', F_2', \dotsc, F_j'$} if its crease-like segments simulate creases at those faces in that order.
\end{definition}
In our applications of folded billiard trajectories $\hat\beta$, they will be characterized by their behaviour near the boundary of $\bvl\X\F{h}$. In particular, $\hat\beta$ can have two main types of behaviour near the boundary of $\B^h(F)$, depending on whether $F \in \F$, as illustrated in \cref{fig:CollisionTypes}. The projection of $\hat\beta$ via $\pr$ is also illustrated. If $F \in \F$, then near $\partial\B^h(F)$, $\hat\beta$ will contain a crease-like segment $\sigma$ that spans between the two faces of $\partial\B^h(F)$ as depicted in \cref{fig:CollisionTypes}(a). The segments before and after $\sigma$ will be horizontal. On the other hand, if $F \in \F$, then near $\partial\B^h(F)$, $\hat\beta$ will consist of two horizontal segments as depicted in \cref{fig:CollisionTypes}(b). These statements will be formally proven in \cref{sec:FoldedBilliardTrajProperties}.
\begin{figure}
\caption{The two main types of behaviour that folded billiard trajectories can have near the boundary of a beveling $\bvl\X\F{h}
\label{fig:CollisionTypes}
\end{figure}
(The definition of folded billiard trajectories may allow for behaviour that looks more complicated than shown in \cref{fig:CollisionTypes}, but we will choose $\X$ and the beveling $\bvl\X\F{h}$ carefully to keep the behaviour of folded billiard trajectories within these two simple cases.)
Here we provide some motivation for the preceding definitions. In the upper half of \cref{fig:CollisionTypes}(b), $\hat\beta$ is simulating colliding with $F$; this terminology is justified by the phenomenon that $\pr \circ \hat\beta$ actually looks like a billiard trajectory in $\X$ that collides with $F$, as shown in the lower half of \cref{fig:CollisionTypes}(b). A similar situation occurs in \cref{fig:CollisionTypes}(a) as well, where in the upper half $\hat\beta$ is simulating colliding with $F$. However, this time ${\pr} \circ \hat\beta$ only looks like a billiard trajectory in $\X$ after extending two of the segments along dashed red lines, as shown in the lower half. The resemblence between ${\pr} \circ \hat\beta$ and a billiard trajectory in $\X$ will be formally proven later in \cref{prop:FoldedTrajectoryProjection}. In fact, we will prove in \cref{lem:FlatteningParallelTransport} that if $\hat\beta$ simulates colliding with $F_1, \dotsc, F_k$ and $\beta$ is a billiard trajectory in $\X$ that collides with $F_1, \dotsc, F_k$, then the parallel transport maps of $\beta$ and $\hat\beta$ are closely related. This will help us derive $\beta^{n+1}$ from $\beta^n$ in a way that controls the parallel transport map, as explained at the beginning of this section.
As a concrete example, \cref{fig:FoldedBilliardTrajectory}(a)--(c) illustrates a folded billiard trajectory $\hat\beta : [a,b] \to \hat\X$ that begins at the point $\hat\beta(a)$, then passes through the points $x_1, x_2, \dotsc, x_6$ in that order before returning to $\hat\beta(b) = \hat\beta(a)$. The crease-like segments of $\hat\beta$ are $x_1x_2$, $x_3x_4$ and $x_5x_6$, which simulate folding around $F_1$, $F_2$ and $F_3$ respectively. In this case, $\hat\beta$ happens to be periodic with an even number of reflections, so it corresponds to a closed geodesic $\hat\gamma$ in $\dbl{\hat\X}$.
\begin{figure}
\caption{(a), (b), (c): Three different views of a folded billiard trajectory $\hat\beta$ in the beveling $\bvl\X\F{h}
\label{fig:FoldedBilliardTrajectory}
\end{figure}
In fact, some tubular neighbourhood of $\hat\gamma$ is a maximally twisted tube: at any point $x$ on $\hat\gamma$, the parallel transport map $T_x\dbl\hat\X \to T_x\dbl\hat\X$ must be $(-\id)$ (negation of the identity) when restricted to the orthogonal complement of $\hat\gamma'(x)$. This can be derived from the existence of a flat M\"obius band $M$ isometrically embedded in $\dbl\hat\X$ so that its core curve is $\hat\gamma$. The image of $M$ under the canonical quotient map $q : \dbl\hat\X \to \hat\X$ is illustrated in \cref{fig:FoldedBilliardTrajectory}(d)--(f). Observe that $\hat\beta$ is the core curve of $q(M)$, and that $q(M)$ appears to be ``folded'' around the edges of $\X$, which is the reason for saying that crease-like segments ``simulate folding''. (Let $v \in T_xM$ be orthogonal to $\gamma'(x)$, such that $dq_x(v)$ is the vector labeled $u$ in \cref{fig:FoldedBilliardTrajectory}(d). Then parallel transporting $v$ along $\gamma$ moves it along the blue portion of $M$, bringing it back to $-v$. Parallel transporting a vector orthogonal to $M$ at $x$ must also negate it, because parallel transportation preserves the orientation in the orientable $\dbl[sm]\hat\X$.)\footnote{In fact, the definition of a beveling was the result of an attempt to construct a polyhedron in $\R^3$ whose double would contain an isometrically embedded M\"obius band whose core curve is a closed geodesic---a necessary condition for the isometric embedding of a maximally twisted tube.} Observe the similarities between \cref{fig:FoldedBilliardTrajectory} and \cref{fig:MobiusStrip}; a similar procedure was used to generate the example illustrated in \cref{fig:MobiusStrip}.
Eventually we will iterate this beveling construction, producing a sequence $\X^2, \X^3, \X^4, \dotsc$ of convex polytopes with dimensions $2, 3, 4$ and so on, such that $\pr(\X^{n+1}) \subset \X^n$. For example, \cref{fig:Beveling}(a) shows a possible choice for $\X^2$, and \cref{fig:Beveling}(d) shows one view of a possible choice for $\X^3$; in this figure it can be seen that $\pr\X^3$ is very similar to $\X^2$, but the former is a strict subset of the latter. This relation is a consequence of the following lemma:
\begin{lemma}[Horizontal cross-sections of bevelings]
\label{lem:BevelingCrossSection}
Let $\X$ be an $n$-polytope, $\F$ be a subset of its faces, and $h : \F \to \R$ be a function. Then $\bvl\X\F{h} \cap \Pi_r$ is $\X' \times \{r\}$, where $\X'$ is the $n$-polytope obtained from $\X$ by, for each $F \in \F$, translating the supporting half-space of $F$ inwards by a distance $\abs{r - h(F)}$. Consequently, $\pr(\bvl\X\F{h}) \subset \X$.
\end{lemma}
\begin{proof}
For each face $F$ of $\X$, if $F \notin \F$ then $\pr(\B^h(F) \cap \Pi_r)$ is simply the supporting halfspace of $F$. If $F \in \F$ then $\pr(\B^h(F) \cap \Pi_r)$ is the supporting halfspace of $F$ but translated a distance $\abs{r - h(F)}$ along the inward-pointing unit vector. Therefore the result follows.
\end{proof}
The parallel transport maps of folded billiard trajectories in $\X^n$ will be related to those of billiard trajectories in $\X^{n+1}$ because the parallel transport maps will be ``compatible'' with the projections $\pr$, in turn because the reflections about faces of $\X^n$ are related to the reflections about faces of $\X^{n+1}$ via $\pr$. This property, which is essential to let us compute parallel transport maps using reflections, will be proven in the next lemma. Let us define some useful notation for reflections associated with a beveling $\bvl\X\F{h}$ of an $n$-polytope $\X$. For each face $F$ of $\X$, let $R_F : \R^n \to \R^n$ be the reflection about $F$, and let $\rho_F : \R^{n+1} \to \R^{n+1}$ denote the product of the reflections about the supporting hyperplanes of $\B^h(F)$. (There are either one or two supporting hyperplanes, and in the latter case the order of multiplication does not matter.)
\Cref{fig:ProjectUnraveling} illustrates some of this notation in the context of a rectangle $\X \subset \R^2$ and a beveling $\hat\X = \bvl\X{\{F_1,F_3\}}{h}$, where $h(F_1) = +\epsilon$ and $h(F_3) = -\epsilon$ for some $\epsilon > 0$. The figure also hints at the compatibility between the projection ($\pr : \R^3 \to \R^2$ in this case) and the reflections: from the figure it can be seen that $\pr(\rho_{F_1}(\hat\X)) = R_{F_1}(\X) = R_{F_1}(\pr(\hat\X))$. This phenonenon can be rigorously generalized into our next lemma.
\begin{figure}
\caption{The relationships between a polygon $\X$, its beveling $\hat\X$, the projection $\pr$ and the transformations $R_{F_i}
\label{fig:ProjectUnraveling}
\end{figure}
Before we state our next lemma we need some additional notation. If $V$ is a subspace of $\R^n$, let $V^\perp$ denote its orthogonal complement. Suppose that $V, W$ are subspaces of $\R^n$ such that $V + W = \R^n$ and $V \cap W = \{0\}$. Then we write $\R^n = V \oplus W$. We will write $\R^{m+n} = \R^m \oplus \R^n$ to mean $\R^{m+n} = \R^m \times \{0\} \oplus \{0\} \times \R^n$, where $\R^m \times \{0\}$ and $\{0\} \times \R^n$ are subspaces of $\R^{m+n}$. Furthermore, if $C : \R^n \to \R^n$ is a linear operator that restricts to $A : V \to V$ and $B : W \to W$, then we write $C = A \oplus B$. Let $\id_V$ denote the identity map on $V$.
\begin{lemma}[Compatibility with projection]
\label{lem:CompatibilityProjection}
Let $\bvl\X\F{h}$ be a beveling of an $n$-polytope $\X$. Then for each face $F$ of $\X$,
\begin{equation}
\label{eq:ProjectionCommuteReflection}
{\pr} \circ \rho_F = R_F \circ \pr.
\end{equation}
Moreover, $\Pi_0 = \R^n \times \{0\}$ is an invariant subspace of $d\rho_F$ and with respect to the decomposition $\R^{n+1} = \Pi_0 \oplus \Pi_0^\perp \iso \R^n \oplus \R$,
\begin{equation}
\label{eq:BevelingDirectSumDet}
d\rho_F = dR_F \oplus (-\det d\rho_F) \text{, where } \det d\rho_F = \begin{cases}
+1 & F \in \F\\
-1 & F \notin \F.
\end{cases}
\end{equation}
\end{lemma}
\begin{proof}
We will prove \cref{eq:BevelingDirectSumDet} and then derive \cref{eq:ProjectionCommuteReflection} from it. Let $v$ be a normal vector of $F$ in $\X$ and let its span be $V$ (see \cref{fig:ReflectionsCompatibleWithProjection}(a)). Then under the decomposition $\R^n = V^\perp \oplus V \iso \R^{n-1} \oplus \R$, we may write $dR_F$ as $\id_{V^\perp} \oplus (-1)$ because by definition of a reflection, $dR_F(v) = -v$ and $dR_F$ must fix every vector in the hyperplane of reflection. Let $u = (0,\dotsc,0,1) \in \R^{n+1}$, and let $W = \vspan\{(v,0),u\}$.
Now we work with the decomposition $\R^{n+1} = W^\perp \oplus W \iso \R^{n-1} \oplus \R^2$, and use $(v,0),u$ as a basis of $W$.
\begin{itemize}
\item If $F \notin \F$, let $\hat{F}$ be the face of $\bvl\X\F{h}$ that is inherited from $\B^h(F)$. Then $d\rho_F = dR_{\hat F} = \id_{W^\perp} \oplus \left[\begin{smallmatrix}
-1 & 0 \\
0 & 1
\end{smallmatrix}\right]$ because $d\rho_F$ is a reflection that should send $(v,0)$ to $(-v,0)$. Moreover, $d\rho_F$ should fix $u$ and any vector in $W^\perp$ because they all lie in the hyperplane of reflection. \Cref{eq:BevelingDirectSumDet} follows as a result.
\item On the other hand, if $F \in \F$, then let $\hat{F}$ and $\hat{F}'$ be the two faces of $\bvl\X\F{h}$ that are inherited from $\B^h(F)$; order them so that $dR_{\hat F}$ sends $(v,0) \mapsto u$ and vice versa, while $dR_{\hat{F}'}$ sends $(v,0) \mapsto -u$ and $u \mapsto -(v,0)$ (see \cref{fig:ReflectionsCompatibleWithProjection}(b)). Meanwhile, $W^\perp$ lies in the hyperplane of reflection of both $dR_{\hat{F}}$ and $dR_{\hat{F}'}$. Hence, using the decomposition $\R^{n + 1} = W^\perp \oplus W$ and the basis $(u,0),v$ of $W$, we can write $dR_{\hat{F}} = \id_{W^\perp} \oplus \left[\begin{smallmatrix}
0 & 1 \\
1 & 0
\end{smallmatrix}\right]$ and $dR_{\hat{F}'} = \id_{W^\perp} \oplus \left[\begin{smallmatrix}
0 & -1 \\
-1 & 0
\end{smallmatrix}\right]$. Therefore $d\rho_F = dR_{\hat{F}}dR_{\hat{F}'} = \id_{W^\perp} \oplus \left[\begin{smallmatrix}
-1 & 0 \\
0 & -1
\end{smallmatrix}\right]$.
\end{itemize}
From this representation, noting that $\Pi_0 = W^\perp \oplus \vspan\{(v,0)\}$, we can see that $\Pi_0$ is always an invariant subspace of $d\rho_F$. \Cref{eq:BevelingDirectSumDet} then follows, which implies that ${\pr} \circ d\rho_F = dR_F \circ \pr$. Note that since $R_F$ is an affine transformation, $R_F = T_x \circ dR_F \circ T_{-x}$, where $T_x$ denotes translation by a vector $x \in F$. Similarly, $\rho_F = T_{(x,h(F))} \circ d\rho_F \circ T_{(-x,-h(F))}$. Therefore
\begin{align}
{\pr} \circ \rho_F
&= {\pr} \circ T_{(x,h(F))} \circ d\rho_F \circ T_{(-x,-h(F))} \\
&= T_x \circ {\pr} \circ d\rho_F \circ T_{(-x,-h(F))} \\
&= T_x \circ dR_F \circ {\pr} \circ T_{(-x,-h(F))} \\
&= T_x \circ dR_F \circ T_{-x} \circ {\pr} \\
&= R_F \circ {\pr}.
\end{align}
\begin{figure}
\caption{(a) An illustration of some notation from the proof of \cref{lem:CompatibilityProjection}
\label{fig:ReflectionsCompatibleWithProjection}
\end{figure}
\end{proof}
As a consequence of the previous results, we can derive the following relationship between the parallel transport maps of billiard trajectories $\beta$ and $\hat\beta$, where $\hat\beta$ is a folded billiard trajectory in a beveling that simulates colliding at the same faces that $\beta$ collides at.
\begin{lemma}
\label{lem:FlatteningParallelTransport}
Let $\beta$ be a billiard trajectory in $\X$ that collides at faces $F_1, \dotsc, F_k$. Let $\hat\X = \bvl\X\F{h}$ be a beveling of $\X$, and suppose that $\hat\beta$ is a folded billiard trajectory in $\hat\X$ that simulates colliding at $F_1, \dotsc, F_k$. Assume further that $\hat\beta$ collides with every face inherited from $\B^h(F_k)$. Then the parallel transport maps $P : \R^n \to \R^n$ and $\hat{P} : \R^{n+1} \to \R^{n+1}$ of $\beta$ and $\hat\beta$ respectively (where tangent spaces have been canonically identified with ambient Euclidean spaces) are related by
\begin{equation}
\hat{P} = P \oplus (-1)^{\card{\F'}},
\end{equation}
with respect to the decomposition $\R^{n+1} \iso \R^n \oplus \R$, and where $\F'$ is the subset of faces in $\F$ that $\beta$ collides with. The number of collisions of $\hat\beta$ is also $\card{\F'}$ more than that of $\beta$.
\end{lemma}
\begin{proof}
We may assume that both billiard trajectories collide at most once with each face, as the general case follows in the same manner. For the number of collisions, observe from \cref{fig:CollisionTypes} that $\hat\beta$ experiences two collisions with faces inherited from $\B^h(F_i)$ if $F_i \in \F$, otherwise it collides only once with a face inherited from $\B^h(F_i)$. Either way, $\beta$ only collides once with $F_i$.
Note that $P = dR_{F_k} \dotsm dR_{F_1}$ and $\hat{P} = d\rho_{F_k} \dotsm d\rho_{F_1}$, and the rest follows from \cref{eq:BevelingDirectSumDet}.
\end{proof}
\subsection{Properties of folded billiard trajectories}
\label{sec:FoldedBilliardTrajProperties}
The hypotheses of \cref{lem:FlatteningParallelTransport} require that $\hat\beta$ simulates colliding at the same faces that $\beta$ collides at. We will prove \cref{prop:FoldedTrajectoryProjection} which asserts that under certain conditions, this occurs when $\beta$ is derived in a particular manner from ${\pr} \circ \hat\beta$.
To prove that, we will need to study the segments of $\hat\beta$. The example in \cref{fig:FoldedBilliardTrajectory}(a)--(c) of a billiard trajectory in a beveling $\bvl\X\F{h}$ has crease-like segments are $x_1x_2$, $x_3x_4$ and $x_5x_6$. Its other segments are horizontal. Hence the billiard trajectory begins with a horizontal segment, collides with a face of $\B^h(F)$ for some $F \in \F$, continues along a creaselike segment, collides with the other face of $\B^h(F)$, proceeds along a horizontal segment, and the same repeats. If, for some other beveling, a horizontal segment of the billiard trajectory collides with the face of $\B^h(F)$ for some $F \notin \F$, then the trajectory would continue along another horizontal segment.\footnote{One could consider the example in \cref{fig:ProjectUnraveling} of a beveling $\bvl\X\F{h}$ where $\F$ does not contain every face of $\X$.} (Compare with \cref{fig:CollisionTypes}.) The behaviour outlined above is confirmed by \cref{lem:FoldlikeOrHorizontal} and \cref{prop:FoldedTrajectoryProjection}.
\begin{lemma}
\label{lem:FoldlikeOrHorizontal}
Let $\bvl\X\F{h}$ be a beveling of an $n$-polytope $\X$ with a folded billiard trajectory $\hat\beta : [a,b] \to \bvl\X\F{h}$. Then each segment of $\hat\beta$ is horizontal if and only if it is not crease-like. Consequently, the first segment of $\hat\beta$ cannot be crease-like. Moreover, the segments before and after a crease-like segment must be horizontal.
\end{lemma}
\begin{proof}
The definition of a folded billiard trajectory requires the first segment of $\hat\beta$ to be horizontal. Let $\hat\beta$ simulate colliding at $F_1, \dotsc, F_k$. Suppose that $F_1 \notin \F$. Then the velocity vector of $\hat\beta$ just after the first collision is obtained by reflecting $\hat\beta'(a)$ about the hyperplane through the origin that is parallel to $\partial\B^h(F_1)$. Therefore the second segment of $\hat\beta$ would also be horizontal. Repeating the same argument shows that if $F_1, \dotsc, F_j \notin \F$ then the first $j+1$ segments of $\hat\beta$ will be horizontal. Now suppose that $F_{j+1} \in \F$. Let $\hat{F}, \hat{F}'$ be the faces of $\bvl\X\F{h}$ inherited from $\B^h(F_{j+1})$, among which $\hat\beta$ collides with $\hat{F}$ first at time $t$ (and may or may not collide with $\hat{F}'$). Just before time $t$, $\hat\beta$ is traveling on a horizontal segment $\eta$, and some elementary geometry will verify that the segment $\sigma$ after time $t$ satisfies the definition of a crease-like segment. If $\sigma$ is not the final segment, then the definition of a folded billiard trajectory requires that $\hat\beta$ collides with $\hat{F}'$ next. Therefore the velocity vector of the next segment is obtained from that of $\eta$ via the transformation $dR_{\hat{F}'}dR_{\hat{F}} = d\rho_{F_{j+1}}$, so by \cref{lem:CompatibilityProjection} the next segment is horizontal again. By continuing to apply the above arguments, we can show that all of the segments of $\hat\beta$ are either horizontal or crease-like.
\end{proof}
\begin{proposition}
\label{prop:FoldedTrajectoryProjection}
Let $\bvl\X\F{h}$ be a beveling of an $n$-polytope $\X$, and $\hat\beta : [a,b] \to \bvl\X\F{h}$ be a folded billiard trajectory that simulates colliding at $F_1, \dotsc, F_{i^*}$. (If $\hat\beta$ has no collisions, i.e. it is a line segment, then $i^* = 0$.). Then
\begin{statement}
\item\label{stmt:CreaselikeSegmentConstDistance} For each crease-like segment $\sigma$ of $\hat\beta$ that simulates folding around $F$, ${\pr} \circ \sigma$ is at constant distance $d_\sigma = \abs{\height(x) - h(F)} > 0$ away from the supporting hyperplane of $F$, where $x$ is the point from which $\sigma$ begins.
\end{statement}
If the final segment of $\hat\beta$ is horizontal then the following statements also hold:
\begin{statement}
\setcounter{statementi}{1}
\item\label{stmt:HorizontalSegmentHeight} Suppose that among the faces that $\hat\beta$ simulates colliding at, $F_{m_1}, \dotsc, F_{m_k}$ are those that lie in $\F$, where $1 \leq m_1 < \dotsb < m_k \leq i^*$. Then
\begin{equation}
\label{eq:HorizontalSegmentHeight}
\height(\hat\beta(b)) = 2\big( h(F_{m_k}) - h(F_{m_{k-1}}) + \dotsb + (-1)^{k-1}h(F_{m_1}) \big) + (-1)^k\height( \hat\beta(a)).
\end{equation}
\item\label{stmt:FlatteningFoldedTrajectory} There are $i^* + 1$ horizontal segments $\eta_0, \eta_1, \dotsc, \eta_{i^*}$, listed in the order they are traversed by $\hat\beta$. For each $1 \leq i \leq i^*$, the lines containing ${\pr} \circ \eta_{i-1}$ and ${\pr} \circ \eta_i$ intersect the supporting hyperplane of $F_i$ at the same point $x_i$. Assume further that each $x_i$ lies in the interior of $F_i$. Then $\pr(\hat\beta(a)), x_1, \dotsc, x_{i^*}, \pr(\hat\beta(b))$ is a billiard trajectory $\beta : [a,b] \to \X$ of the same length as $\hat\beta$. (If $\hat\beta$ has no collisions, then $\pr(\hat\beta(a)), \pr(\hat\beta(b))$ is a billiard trajectory in $\X$ that is also a line segment.)
\item\label{stmt:FlatteningProperties} Assuming that all of \ref{stmt:FlatteningFoldedTrajectory} holds,
\begin{equation}
\label{eq:DeviationFromProjection} \dist{\pr(\hat\beta(t)), \beta(t)} \leq \max_\sigma d_\sigma \qquad\text{for all } t \in [a,b],
\end{equation}
where the maximum is taken over all crease-like segments $\sigma$ of $\hat\beta$. Furthermore, $\pr(\hat\beta(t)) = \beta(t)$ whenever $\hat\beta(t)$ lies on a horizontal segment.
\end{statement}
\end{proposition}
\begin{remark}
\label{rem:FoldedTrajectoryProjection_Illustration}
Before we proceed to the proof, we note that \cref{eq:HorizontalSegmentHeight} is satisfied by the $\hat\beta$ illustrated in \cref{fig:FoldedBilliardTrajectory}: note that in this example, $\R^2 \times \{h(F_1)\}$ lies exactly halfway in height between the horizontal segments $\eta_0 = \hat\beta(a)x_1$ and $\eta_1 = x_2x_3$ (compare \cref{fig:FoldedBilliardTrajectory}(b) with \cref{fig:Beveling}(f)). Hence, $h(F_1) = \frac12(\height(\eta_0) + \height(\eta_1))$. \Cref{stmt:CreaselikeSegmentConstDistance} can also be partially verified in the same example: when the crease-like segment $x_1x_2$ is depicted in \cref{fig:FoldedBilliardTrajectory}(c) using the projection $\pr$, it looks parallel to $F_1$ in \cref{fig:Beveling}(a). (Compare also with \cref{fig:ProjectedCreaselikeSegments}.) \cref{stmt:FlatteningFoldedTrajectory} is also satisfied by the same example: The billiard trajectory in $\X$ obtained by applying \cref{stmt:FlatteningFoldedTrajectory} to the $\hat\beta$ in \cref{fig:FoldedBilliardTrajectory} is precisely $\beta$ from \cref{fig:Beveling}(a). \cref{stmt:FlatteningProperties} essentially asserts that ${\pr} \circ \hat\beta$ stays ``close'' to $\beta$; this property can be observed by comparing \cref{fig:Beveling}(a) to \cref{fig:FoldedBilliardTrajectory}(c).
When the assumptions of \cref{stmt:FlatteningFoldedTrajectory} are satisfied, the resulting billiard trajectory in $\X$ (or a slight modification) will play the role of $\beta_n$ from the discussion at the beginning of \cref{sec:BevelingOrigamiModel}, while $\hat\beta$ will play the role of $\beta_{n+1}$.
\end{remark}
\begin{figure}
\caption{The crease-like segments $\sigma_1$, $\sigma_2$ and $\sigma_3$ of a folded billiard trajectory $\hat\beta$ in the beveling $\bvl\X\F{h}
\label{fig:ProjectedCreaselikeSegments}
\end{figure}
\comment{This situation motivates the following definition: the \emph{flattening} of $\hat\beta$ is the unique billiard trajectory $\beta : [a,b] \to \X$ that initially follows the path of $\alpha$ (with initial velocity $\beta'(a) = \pr(\hat\beta'(a))$) and may extend past it (if the final segment of $\hat\beta$ is crease-like). (Note that the speed of $\beta$ is determined by fixing the initial velocity vector.) For $\beta$ to be well-defined, when $\alpha$ is extended into a longer billiard trajectory over the time interval $[a,b]$, it must stay away from the $(n-2)$-skeleton of $\X$. Otherwise, $\beta$ is not well-defined. But if $\beta$ is well-defined, then it is uniquely defined.}
\begin{proof}
The elements of our proof will be illustrated in \cref{fig:ComparingTrajWithOrigamiModel,fig:UnravelingExtensionsIntersectPlanes}, in the context of the $\X$ and $\hat\X = \bvl\X\F{h}$ from \cref{fig:ProjectUnraveling}. To simplify notation, we will write $\hat\X_1 = \rho_{F_1}(\hat\X)$. Note that in this situation, the face $F_1$ lies in $\F = \{F_1, F_3\}$.
\ref{stmt:CreaselikeSegmentConstDistance}: Suppose that $\sigma$ is a crease-like segment that simulates folding around the face $F_i$. This segment must start from some point $x$ in a face $\hat{F}$ of $\bvl\X\F{h}$ that is inherited from $\B^h(F_i)$. $\sigma$ must be preceded by a horizontal segment $\eta$ by \cref{lem:FoldlikeOrHorizontal}, and the definition of a billiard trajectory implies that $R_{\hat{F}} \circ \sigma$ and $\eta$ lie on the same horizontal line. (This is illustrated in \cref{fig:ComparingTrajWithOrigamiModel}(a) in the case where $i = 1$; in this scenario, $\eta = \eta_0$ and $R_{\hat{F}} \circ \sigma = \hat\ell_1$.) Let $\Pi$ be the supporting hyperplane of $F_i$. Then for every point $y \in \sigma$, $\dist{\pr(y),\Pi} = \dist{y,\Pi \times \R} = \dist{R_{\hat{F}}(y), R_{\hat{F}}(\Pi \times \R)}$. It can be seen that $R_{\hat{F}}(\Pi \times \R) = \R^2 \times \{h(F_i)\}$. Therefore $\dist{R_{\hat{F}}(y), \R^2 \times \{h(F_i)\}} = \dist{\eta, \R^2 \times \{h(F_i)\}} = \abs{\height(x) - h(F_i)}$. This number should be positive, because $x$ should lie in the interior of $\B^h(F_i)$.
Henceforth we will assume that the final segment of $\hat\beta$ is horizontal. The proof of \cref{lem:FoldlikeOrHorizontal}---in particular, how $\hat\beta$ switches from horizontal segments to crease-like segments and vice versa---implies that there are $i^* + 1$ horizontal segments, $\eta_0, \eta_1, \dotsc, \eta_{i^*}$.
\ref{stmt:HorizontalSegmentHeight}: We use the standard technique of ``unraveling'' a billiard trajectory into a straight line. Let $\hat\ell : [a,b] \to \R^{n+1}$ be a line segment that extends the first segment of $\hat\beta$ forward at a constant velocity equal to $\hat\beta'(a)$ (see \cref{fig:ComparingTrajWithOrigamiModel}(a)). Since the first segment of $\hat\beta$ is horizontal by the definition of a folded billiard trajectory, $\hat\ell$ must also be horizontal. Suppose that $\hat\beta$ collides with $j^*$ faces $\hat{F}_1,\dotsc,\hat{F}_{j^*}$ of $\hat\X = \bvl\X\F{h}$. Then $\hat\beta$ is the concatenation of several segments $R_{\hat F_j} \dotsm R_{\hat F_1} \circ \hat\ell_j$, where $\hat\ell_j$ is the restriction of $\hat\ell$ to the portion that lies in $R_{\hat{F}_1} \dotsm R_{\hat{F}_j}(\hat\X)$ (see \cref{fig:ComparingTrajWithOrigamiModel}(a)). This implies that for each $1 \leq i \leq i^*$, $\eta_i$ is a restriction of $\rho_{F_i} \dotsm \rho_{F_1} \circ \hat\ell$, where the $R$'s have been grouped into $\rho$'s. (This is possible because $\eta_i$ cannot be a crease-like segment by \cref{lem:FoldlikeOrHorizontal}.) Therefore $\rho_{F_i} \circ \eta_{i-1}$ and $\eta_i$ lie on the same line, and have the same height.
\begin{figure}
\caption{An illustration of the objects and notations from \cref{prop:FoldedTrajectoryProjection}
\label{fig:ComparingTrajWithOrigamiModel}
\end{figure}
From the proof of \cref{lem:CompatibilityProjection}, $d\rho_F$ can be expressed in terms of a matrix that depends on whether $F \in \F$ or $F \notin \F$. Whenever $d\rho_F(u) = v$, if $F \notin \F$ then $\height(u) = \height(v)$, whereas if $F \in \F$ then $\height(u) = -\height(v)$. Now suppose that $\rho_F(x) = y$. From the expression of $\rho_F$ as the conjugate of $d\rho_F$ by a translation (again from the proof of \cref{lem:CompatibilityProjection}), if $F \notin \F$ then $x$ and $y$ have the same height. On the other hand, if $F \in \F$, then $\height(y)$ is the reflection, in $\R$, of $\height(x)$ about $h(F)$. Thus the following recurrence relation can be deduced from the fact that $\rho_{F_i} \circ \eta_{i-1}$ and $\eta_i$ lie on the same line: For each $1 \leq i \leq i^*$, the heights of $\eta_{i-1}$ and $\eta_i$ satisfy the recurrence relation
\begin{equation}
\label{eq:SegmentHeightRecurrence}
\height(\eta_i) =
\begin{cases}
\height(\eta_{i-1}) & F_i \notin \F \\
2h(F_i) - \height(\eta_{i-1}) & F_i \in \F.
\end{cases}
\end{equation}
\Cref{eq:HorizontalSegmentHeight} can be derived from this recurrence relation.
\ref{stmt:FlatteningFoldedTrajectory}: As with our proof of \ref{stmt:HorizontalSegmentHeight}, $\hat\beta$ can be analysed by ``unraveling'' it into a line segment $\hat\ell$ passing through copies of $\bvl\X\F{h}$ that are transformed by a series of reflections. The $\hat\X_1$ we defined at the beginning of the proof is one of them while some others are of the form $\hat\X_i = \rho_{F_1} \dotsm \rho_{F_i}(\bvl\X\F{h})$. Under this ``unravelling'', each $\eta_i$ corresponds to a transformed copy $\eta_i^* = \rho_{F_1} \dotsm \rho_{F_i} \circ \eta_i$ that is the restriction of $\hat\ell$ to the portion that lies inside $\hat\X_i$. (In \cref{fig:ComparingTrajWithOrigamiModel}(a), $\eta_0^*$ is $\hat\ell_0$ and $\eta_1^*$ is $\hat\ell_2$. $\eta_0^*$ and $\eta_1^*$ are also depicted in \cref{fig:UnravelingExtensionsIntersectPlanes}(a).) Consider a similar setup for $\X$: $\ell = {\pr} \circ \hat\ell : [a,b] \to \R^n$ is the line segment that starts from $\pr(\hat\beta(a))$ (which lies in $\X$ due to \cref{lem:BevelingCrossSection}) with constant velocity equal to $\pr(\hat\beta'(a))$ (see \cref{fig:ComparingTrajWithOrigamiModel}(a) and \cref{fig:UnravelingExtensionsIntersectPlanes}(b)). $\ell$ passes through transformed copies of $\X$ of the form $\X_i = R_{F_1} \dotsm R_{F_i}(\X)$. Each line segment ${\pr} \circ \eta_i^*$ is a restriction of $\ell$ into a line segment that lies in $\X_i$ (because of \cref{lem:BevelingCrossSection}) but that may not touch $\partial\X_i$ (see \cref{fig:UnravelingExtensionsIntersectPlanes}(b)). To prove \ref{stmt:FlatteningFoldedTrajectory}, we essentially want to show that if we extend these segments all the way to touch $\partial\X_i$ and then transform the extended segments back into $\X$ via sequences of reflections $R_{F_i}\dotsm R_{F_1}$, then they will form a billiard trajectory.
\begin{figure}
\caption{An illustration of some notation from the proof of \cref{prop:FoldedTrajectoryProjection}
\label{fig:UnravelingExtensionsIntersectPlanes}
\end{figure}
\Cref{eq:ProjectionCommuteReflection} implies that
\begin{equation}
\label{eq:ProjectReflectedCopyHorizontalSegment}
{\pr} \circ \eta_i^* = {\pr} \circ \rho_{F_1} \dotsm \rho_{F_i} \circ \eta_i = R_{F_1} \dotsm R_{F_i} \circ {\pr} \circ \eta_i.
\end{equation}
This helps us to translate \cref{stmt:FlatteningFoldedTrajectory} into a statement in terms of the $\eta_i^*$'s. Consider any $1 \leq i \leq i^*$, and let $\Pi$ be the supporting hyperplane of $F_i$. First of all, note that the line containing $\eta_i$ must intersect $\Pi \times \R$ transversally at some point $y$, because of the horizontal nature of $\eta_i$ and the fact that, as part of a billiard trajectory, $\eta_i$ cannot be parallel to any face of $\bvl\X\F{h}$. Similarly, the line containing $\eta_{i-1}$ must intersect $\Pi \times \R$ transversally at some point $y'$. ($\Pi \times \R$, $y$ and $y'$ are depicted in \cref{fig:UnravelingExtensionsIntersectPlanes}(a), in the case where $i = 1$.) Therefore ${\pr} \circ \eta_i$ extends to a line that intersects $\Pi$ at $\pr(y)$, and this is equivalent, by \cref{eq:ProjectReflectedCopyHorizontalSegment}, to the fact that ${\pr} \circ \eta_i^*$ extends to a line that intersects $R_{F_1} \dotsm R_{F_i}(\Pi)$ at $R_{F_1} \dotsm R_{F_i}(\pr(y))$. Similarly, ${\pr} \circ \eta_{i-1}$ extends to a line that intersects $\Pi$ at $\pr(y')$, and that is equivalent to the fact that ${\pr} \circ \eta_{i-1}^*$ extends to a line that intersects $R_{F_1} \dotsm R_{F_{i-1}}(\Pi)$ at $R_{F_1} \dotsm R_{F_{i-1}}(\pr(y'))$. On the other hand, observe that ${\pr} \circ \eta_{i-1}^*$ and ${\pr} \circ \eta_i^*$ must extend into the same line segment $\ell$ (see \cref{fig:UnravelingExtensionsIntersectPlanes}(b)), and $R_{F_1} \dotsm R_{F_i}(\Pi) = R_{F_1} \dotsm R_{F_{i-1}}(\Pi)$ because $R_{F_i}$ must fix $\Pi$. Therefore the two points $R_{F_1} \dotsm R_{F_i}(\pr(y))$ and $R_{F_1} \dotsm R_{F_{i-1}}(\pr(y_i'))$ must be the same point $x_i^*$. Since $R_{F_i}$ fixes $\pr(y) \in \Pi$, we can conclude that $\pr(y)$ and $\pr(y')$ are the same point $x_i$ that is required in \cref{stmt:FlatteningFoldedTrajectory}. (When $i = 1$, $x_1 = x_1^*$, and this point is depicted in \cref{fig:UnravelingExtensionsIntersectPlanes}(b).)
Now we assume that $x_i$ lies in the interior of $F_i$. Translating this to the ``unraveling'', this means that $x_i^*$ lies in the interior of a face of $\X_i$ (which is also a face of $\X_{i-1}$). However this implies that $\ell$ is partitioned into segments by the points $x_1^*, \dotsc, x_{i^*}^*$, and each of those segments lie in some $\X_i$. We may then ``reverse the unraveling'' by transforming each segment in $\X_i$ back into $\X$ via $R_{F_i}\dotsm R_{F_1}$ to get a billiard trajectory $\pr(\hat\beta(a)), x_1, \dotsc, x_{i^*}, \pr(\hat\beta(b))$. Denote this billiard trajectory by $\beta : [a,b] \to \X$.
The conclusion that $\beta$ has the same length as $\hat\beta$ can be derived from the preservation of length by the process of ``unraveling'' $\hat\beta$ into $\hat\ell$, projecting it via $\pr$ into $\ell$, and ``reversing the unraveling''.
\ref{stmt:FlatteningProperties}: When $\hat\beta(s)$ lies on $\eta_i$, it corresponds to the point $\hat\ell(s) \in \eta_i^* \subset \hat\X_i$ and thus $\ell(s) = \pr(\hat\ell(s)) \in \X_i$. ($\pr(\hat\X_i) \subset \X_i$ because of \cref{eq:ProjectReflectedCopyHorizontalSegment} and \cref{lem:BevelingCrossSection}.) Therefore we may apply \cref{eq:ProjectReflectedCopyHorizontalSegment} again to get $\beta(s) = R_{F_i}\dotsm R_{F_1} \circ \pr(\hat\ell(s)) = {\pr} \circ \rho_{F_i}\dotsm\rho_{F_1}(\hat\ell(s)) = \pr(\hat\beta(s))$.
Therefore it suffices to prove \cref{eq:DeviationFromProjection} for all $s \in [a,b]$ such that $\hat\beta(s)$ lies in the interior of a crease-like segment $\sigma$ or is the ending point of $\sigma$. Let $\sigma$ simulate folding around $F_i$, and let $\sigma$ start from a point on a face $\hat{F}$ of $\hat\X$. For simplicity's sake, let us assume that $i = 1$; the argument will not change much. Let $\B = \B^h(F_1)$. Then in the unraveling, $\sigma$ corresponds to a segment of $\hat\ell$, namely $\hat\ell|_{[u,v]}$, that lies in $R_{\hat{F}}(\hat\X) \subset R_{\hat{F}}(\B)$ (see \cref{fig:DeviationFromProjection}(a)). In the unraveling, $\beta|_{[u,v]}$ corresponds to $\ell|_{[u,v]}$, which we will compare to $\hat\ell|_{[u,v]}$ indirectly by using another line segment $\mu : [u,v] \to \R^{n+1}$ that is obtained by translating $\hat\ell|_{[u,v]}$ ``vertically''---that is, by changing the last coordinate of every point until the line segment passes through $F_i \times \{h(F_i)\}$ (see \cref{fig:DeviationFromProjection}(a)). $\mu$ serves as a good proxy for $\ell|_{[u,v]}$ because ${\pr} \circ \mu = {\pr} \circ \hat\ell|_{[u,v]} = \ell|_{[u,v]}$, $\mu$ lies entirely in $\B \cup \rho_{F_1}(\B)$, $\pr(\mu \cap \B)$ is the portion of $\beta|_{[u,v]}$ before a collision, and $\pr(\rho_{F_1}(\mu \cap \rho_{F_1}(\B)))$ is the portion of $\beta|_{[u,v]}$ after a collision (see \cref{fig:DeviationFromProjection}(b)).
\begin{figure}
\caption{An illustration of the objects and notation used to prove \cref{prop:FoldedTrajectoryProjection}
\label{fig:DeviationFromProjection}
\end{figure}
Now note that for each $r \in [u,v]$, the line segment from $\hat\ell(r)$ to $\mu(r)$ (which would look like the blue line segment in \cref{fig:ComparingTrajWithOrigamiModel}(b)) is of length exactly $d_\sigma$. Hence when we ``undo the unraveling'' and transform $R_{\hat{F}}(\B)$ and $\rho_{F_1}(\B)$ back to $\B$ via the transformations $R_{\hat{F}}$ and $\rho_{F_1}$ respectively, these transformations send the line segment between $\hat\ell(r)$ and $\mu(r)$ to a piecewise-linear path---whose length is preserved---from $\hat\beta(r)$ to a point that projects under $\pr$ to $\beta(r)$. Since projecting via $\pr$ cannot increase the length of the piecewise-linear path, we have $\dist{\pr(\hat\beta(r), \beta(r))} \leq d_\sigma$. (The projection of the piecewise-linear path is drawn in blue in \cref{fig:ComparingTrajWithOrigamiModel}(c).)
\end{proof}
The billiard trajectory $\beta$ in $\X$ derived from $\hat\beta$ in \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningFoldedTrajectory} has a parallel transport map that is very closely related to that of $\hat\beta$, as shown in \cref{lem:FlatteningParallelTransport}. For this reason, given a periodic billiard trajectory $\beta^n$ in $n$-polytope $\X^n$, we will aim to construct a beveling $\X^{n+1}$ of $\X^n$ and a periodic folded billiard trajectory $\beta^{n+1}$ in $\X^{n+1}$ such that $\beta^n$ is the billiard trajectory derived from $\beta^{n+1}$ via \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningFoldedTrajectory}. This approach will let us construct periodic billiard trajectories in convex polytopes of arbitrarily high dimension while controlling their parallel transport maps.
\subsection{Origami models}
\label{sec:OrigamiModels}
As stated earlier, in order to prove \cref{thm:StableClosedGeodesicPosCurv}, we will find a way to take a periodic billiard trajectory $\beta : [a,b] \to \X$ in an $n$-polytope $\X$ and construct some beveling $\hat\X = \bvl\X\F{h}$ and a periodic folded billiard trajectory $\hat\beta : [a,b] \to \hat\X$ such that $\beta$ is the billiard trajectory derived from $\hat\beta$ via \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningFoldedTrajectory}. Our strategy to construct $\hat\beta$ will be to start with the initial conditions $\hat\beta(a) = (\beta(a),0)$ and $\hat\beta'(a) = (\beta'(a),0)$, extend $\hat\beta$ forward at constant velocity until it collides with $\partial\hat\X$, extend it so that it becomes a billiard trajectory, extend forward at constant velocity until the next collision with $\partial\hat\X$, extend so that it remains a billiard trajectory, and so on until $\hat\beta$ has the same length as $\beta$. Of course, this is only possible if $\hat\beta$ only ever collides in the interiors of faces of $\hat\X$. That is, $\hat\beta$ must avoid the $(n-1)$-cells of $\hat\X$. \Cref{fig:ExtendPastCollision} illustrates several scenarios in which we may attempt to extend $\hat\beta : [a,t] \to \hat\X$ past a collision at $\hat\beta(t)$, where $\hat\X$ is the same $\hat\X$ from \cref{fig:ProjectUnraveling}. The extension is possible in the scenario of \cref{fig:ExtendPastCollision}(a) as $\hat\beta(t)$ lies in the interior of a face of $\hat\X$, but not in \cref{fig:ExtendPastCollision}(b)--(d) because $\hat\beta(t)$ lies on an $(n-1)$-cell of $\hat\X$.
\begin{figure}
\caption{Extending a billiard trajectory $\hat\beta : [a,t] \to \hat\X$ past a collision at time $t$, where $\hat\X$ is the same $\hat\X$ from \cref{fig:ProjectUnraveling}
\label{fig:ExtendPastCollision}
\end{figure}
As we will see, the height function $h$ can be chosen carefully to avoid the scenarios in \cref{fig:ExtendPastCollision}(b)--(d) and ensure that $\hat\beta$ only ever collides in the interiors of faces of $\hat\X$. Every time $\hat\beta$ is extended past a collision, we wish for it to correspond to a longer and longer portion of $\beta$ under \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningFoldedTrajectory}, so that we can exploit \cref{lem:FlatteningParallelTransport} to maintain control on the parallel transport map of $\hat\beta$. The following definition summarizes a few of the desirable properties mentioned above.
\begin{definition}[Origami model]
Given an $n$-polytope $\X$ and a billiard trajectory $\beta : [a,b] \to \X$ and a subset $\F$ of the faces of $\X$, we say that a \emph{partial origami model of $(\X,\beta)$} is another pair $(\hat\X,\hat\beta)$ where $\hat\X = \bvl\X\F{h}$ for some $h : \F \to \R$ and $\hat\beta$ is the unique billiard trajectory $[a,t] \to \hat\X$ for some $t \in [a,b]$ such that the following is satisfied:
\begin{itemize}
\item $\hat\beta(a) = (\beta(a),0)$ and $\hat\beta'(a) = (\beta'(a),0)$.
\item $\hat\beta$ is folded.
\item Let $s \in [a,t]$ be the latest time such that $\hat\beta(s)$ lies on a horizontal segment. Then $\hat\X$ and $\hat\beta|_{[a,s]}$ satisfy the assumptions of \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningFoldedTrajectory}, and the resulting billiard trajectory in $\X$ is $\beta|_{[a,s]}$.
\end{itemize}
We say that this partial origam model is \emph{defined up to time $t$.} When $t = b$, we say that $(\hat\X,\hat\beta)$ is a \emph{complete origami model} of $(\X, \beta)$, or simply an \emph{origami model}.
\end{definition}
Consider the following example: the $(\hat\X, \hat\beta)$ illustrated in \cref{fig:FoldedBilliardTrajectory}(a)--(c) is an origami model of the $(\X,\beta)$ illustrated in \cref{fig:Beveling}(a) (See \cref{rem:FoldedTrajectoryProjection_Illustration}).
The main goal of this subsection is to prove \cref{thm:OrigamiModelConstruction}, which stipulates some conditions under which origami models can always be constructed.
\begin{remark}
\label{rem:OrigamiModelParallelTransport}
If $(\hat\X, \hat\beta)$ is an origami model of $(\X, \beta)$ and $a$ and $s$ are as in the definition, and $\beta|_{[a,s]}$ collides with $F_1, \dotsc, F_k$ then $\hat\beta$ must simulate colliding at $F_1, \dotsc, F_k$ and $\hat\beta$ must collide with every face inherited from $\B^h(F_k)$. In particular, the parallel transport maps of $\beta|_{[a,s]}$ and $\hat\beta$ are related by \cref{lem:FlatteningParallelTransport}.
\end{remark}
In \cref{fig:ExtendPastCollision}(b) we see $\hat\beta$ collide with an $(n-1)$-cell (i.e. edge) of $\B^h(F_1)$ at some point $\hat\beta(t)$. (The edges $F_1, \dotsc, F_4$ of $\X$ are labeled in \cref{fig:ProjectUnraveling}.) However, if $\hat\beta$ is a folded billiard trajectory that simulates colliding at the same faces as $\beta$, then $\height(\hat\beta(t))$ can be predicted using \cref{prop:FoldedTrajectoryProjection}\ref{stmt:HorizontalSegmentHeight}, so to avoid the collision of $\hat\beta$ with the $(n-1)$-cell of any $\B^h(F)$, we simply have to choose the height function $h$ carefully. The following definitions will help us make this choice.
\begin{definition}[Heights in general position, isolated collisions]
\label{def:HeightsGeneralPositionIsolatedCollisions}
Let $\beta$ be a billiard trajectory in an $n$-polytope, and let $\F$ be a subset of the faces of that polytope. Suppose that among the faces in $\F$, $\beta$ collides with $F_1, \dotsc, F_k$ in that order. For each function $h : \F \to \R$, define the sequence $H_0, H_1, \dotsc, H_k$ where $H_0 = 0$ and for $1 \leq i \leq k$, $H_i$ is defined by either of the following equivalent relations:
\begin{equation}
\label{eq:HeightFunctionRecurrence}
h(F_i) = \frac12(H_{i-1} + H_i)
\iff H_i = 2\big( h(F_i) - h(F_{i-1}) + \dotsb + (-1)^{i-1}h(F_1) \big)
\end{equation}
Then we say that $h$ is in \emph{general position with respect to $\beta$} if $h(F_i) \neq H_{i-1}$ (or equivalently, $h(F_i) \neq H_i$) for all $1 \leq i \leq k$. We also say that $h$ is \emph{closed with respect to $\beta$} if $H_k = H_0 = 0$.
Moreover, define the following key parameters:
\begin{align}
\Delta(h,\beta) &= \max_{1 \leq i \leq k} \abs{h(F_i) - H_i} = \max_{1 \leq i \leq k} \abs{h(F_i) - H_{i-1}} \\ \Hmax(h, \beta) &= \max_{0 \leq i \leq k} H_i \\
\Hmin(h, \beta) &= \min_{0 \leq i \leq k} H_i.
\end{align}
These parameters depend on both $h$ and $\beta$, but we will suppress this dependence when the $h$ and $\beta$ in question are clear from the context.
We say that $\beta$ has \emph{isolated collisions with respect to $h$} if every point on $\beta$ is within distance $\Delta + \Hmax - \Hmin$ from at most one supporting hyperplane of $\X$.
\end{definition}
\begin{remark}[Strategy to prove ``niceness'' of partial origami models]
The conditions of general position and isolated collisions are designed to guarantee the ``niceness'' of $\hat\beta$, a folded billiard trajectory in an origami model $(\hat\X, \hat\beta)$ of $(\X,\beta)$. Many of the subsequent results in this section are proven using some version of the following strategy: assume that $\hat\beta$ comes too close to ``singular features'' of $\hat\X$ (such as some $(\dim\hat\X - 2)$-cell). Previous results bounding the distances between points on $\hat\beta$ and parts of $\partial\X$ or $\beta$ (such as \cref{prop:FoldedTrajectoryProjection}) will be chained together using the triangle inequality to show that some point on $\hat\beta$ will be close to multiple points on $\partial \hat\X$, which will contradict some hypothesis in the result such as isolated collisions.
\end{remark}
\Cref{lem:HeightRangeAllPoints,lem:HeightParameterRelations} lay out basic consequences of the preceding definitions.
\begin{lemma}
\label{lem:HeightRangeAllPoints}
Let $(\bvl\X\F{h}, \hat\beta)$ be a partial origami model of $(\X, \beta)$. Then in the notation of \cref{def:HeightsGeneralPositionIsolatedCollisions}, $H_i$ is the height of the horizontal segments of $\hat\beta$ after a collision with $\B^h(F_i)$ and, if $i \leq k - 1$, before any collision with $\B^h(F_{i+1})$. Furthermore, the height of every point on $\hat\beta$ lies in the interval $[\Hmin, \Hmax]$.
\end{lemma}
\begin{proof}
The claim about $H_i$ can be deduced by computing with \cref{prop:FoldedTrajectoryProjection}\ref{stmt:HorizontalSegmentHeight}, thus the rest of the lemma holds for the horizontal segments of $\hat\beta$. This lemma must also hold over the crease-like segments that are not the final segment, as they must lie in the convex hull of the horizontal segments. If the final segment is crease-like and simulates folding around $F_i$, then a similar argument implies the heights of its points range from $H_{i-1}$ to $H_i$, which also satisfies the lemma.
\end{proof}
\begin{lemma}
\label{lem:HeightParameterRelations}
Let $\beta$ be a billiard trajectory in an $n$-polytope, and let $\F$ be a subset of the faces of that polytope. Let $h : \F \to \R$ be in general position with respect to $\beta$. Then $\im h \subset [\Hmin(h, \beta), \Hmax(h, \beta)]$ and $0 < \Delta(h, \beta) \leq \Hmax(h, \beta) - \Hmin(h, \beta)$.
\end{lemma}
\begin{proof}
The first claim follows from \cref{eq:HeightFunctionRecurrence}. The hypothesis of general position implies that $\Delta > 0$. The rest follows from the fact that $\Delta$ is equal to the difference between two values in the interval $[\Hmin, \Hmax]$.
\end{proof}
The definition of ``isolated collisions'' is meant to help ensure that when extending $\hat\beta$ as a billiard trajectory inside a partial origami model, it will only collide with the interiors of faces of $\hat\X$. Before demonstrating that, we need the following result that controls the position of the collision point $\hat\beta(t)$ in terms of $\beta(t)$.
\begin{lemma}
\label{lem:IsolatedCollisionsDeviationFromProjection}
Let $(\bvl\X\F{h}, \hat\beta)$ be a partial origami model of $(\X, \beta)$ such that $\beta$ has isolated collisions with respect to $h$. Then $0 < d_\sigma \leq \Delta$ for each crease-like segment $\sigma$ of $\hat\beta$, and $\dist{\pr(\hat\beta(s)), \beta(s)} \leq \Delta$ for all times $s$ in the domain of $\hat\beta$.
\end{lemma}
\begin{proof}
Let the partial origami model be defined up to time $t$. The first claim follows from \cref{lem:HeightRangeAllPoints}.
To demonstrate the second claim, it suffices to prove the case where the final segment of $\hat\beta : [a,t] \to \hat\X$ is a crease-like segment $\sigma_0$ that simulates folding around some $F_0 \in \F$, as the rest would follow from \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningProperties}. Let $\sigma_0$ begin at a point $\hat\beta(t_0)$. Consider any $s \in [t_0, t]$. Suppose for now that $\beta$ makes no collision during the time interval $[t_0, s]$, except possibly with $F_0$. Then the proof of \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningProperties} still works to show that $\dist{\pr(\hat\beta(s)), \beta(s)} \leq \max_\sigma d_\sigma \leq \Delta$. Otherwise, suppose that $r \in [t_0, s]$ is the earliest time at which $\beta$ collides with a face other than $F_0$, namely $F_1$. Then like before, the proof of \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningProperties} would still apply to $\hat\beta|_{[a,r]}$ to show that $\dist{\pr(\hat\beta(r)), \beta(r)} \leq \Delta$. But then we may apply \cref{prop:FoldedTrajectoryProjection}\ref{stmt:CreaselikeSegmentConstDistance} to $\sigma_0$ to show that $\pr(\hat\beta(r))$ lies within distance $\Delta$ from the supporting hyperplane of $F_0$. The triangle inequality implies that $\beta(r)$ lies within distance $2\Delta \leq \Delta + \Hmax - \Hmin$ (due to \cref{lem:HeightParameterRelations}) from the supporting hyperplane of $F_0$. However, $\beta(r) \in F_1$, so that would contradict the hypothesis of isolated collisions.
\end{proof}
\begin{lemma}
\label{lem:OrigamiModelWedgeEdge}
Let $\beta : [a,b] \to \X$ be a billiard trajectory in an $n$-polytope $\X$, and suppose that $(\bvl\X\F{h}, \hat\beta)$ is some partial origami model of $(\X, \beta)$, such that $h$ is in general position with respect to $\beta$ and $\beta$ has isolated collisions with respect to $h$. Then $\hat\beta$ cannot end at a point that lies in the $(n-1)$-cell of $\B^h(F)$ for any $F \in \F$.
\end{lemma}
\begin{proof}
Let the partial origami model be defined up to time $t$. Suppose for the sake of contradiction that $\hat\beta(t)$ lies in an $(n-1)$-cell of $\B^h(F)$ for some $F \in \F$. Two key implications are that $\height(\hat\beta(t)) = h(F)$ and $\pr(\hat\beta(t)) \in F$. The latter implication follows from the fact that the $(n-1)$-cell in question projects under $\pr$ to the supporting hyperplane of $F$, together with \cref{lem:BevelingCrossSection}. There are two cases to consider, depending on whether the final segment of $\hat\beta$ is horizontal or crease-like; we will derive a contradiction in both cases.
\begin{itemize}
\item Suppose that the final segment of $\hat\beta$ is horizontal. Then by the definition of an origami model and \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningProperties}, $\beta(t) = \pr(\hat\beta(t)) \in F$. Since $t < b$, this implies that $\beta$ collides with $F$ at time $t$. In the notation of \cref{def:HeightsGeneralPositionIsolatedCollisions}, \cref{lem:HeightRangeAllPoints} implies that $\height(\hat\beta(t)) = H_{i-1}$ where $F_i = F$. However, that would contradict the hypothesis of general position.
\item Suppose that the final segment of $\hat\beta$ is a crease-like segment $\sigma_0$ that simulates folding around some $F_0 \in \F$. Since \cref{prop:FoldedTrajectoryProjection}\ref{stmt:CreaselikeSegmentConstDistance} guarantees that $\pr(\hat\beta(t))$ must lie at positive distance from the supporting hyperplane of $F_0$, but $\pr(\hat\beta(t)) \in F$, we conclude that $F \neq F_0$.
\Cref{lem:IsolatedCollisionsDeviationFromProjection} yields $\dist{\pr(\hat\beta(t)), \beta(t)} \leq \Delta$, thus $\beta(t)$ lies within distance $\Delta$ of the supporting hyperplane of $F$. Moreover, \cref{prop:FoldedTrajectoryProjection}\ref{stmt:CreaselikeSegmentConstDistance} and \cref{lem:IsolatedCollisionsDeviationFromProjection} imply that $\pr(\hat\beta(t))$ lies at distance $d_{\sigma_0} \leq \Delta$ from the supporting hyperplane of $F_0$. By the triangle inequality, $\beta(t)$ must lie within distance $2\Delta \leq \Hmax - \Hmin$ (due to \cref{lem:HeightParameterRelations}) of the supporting hyperplanes of $F_0$ and $F$. However, this would contradict the hypothesis of isolated collisions.
\end{itemize}
\end{proof}
The preceding results can be applied to prove the following proposition, which asserts that billiard trajectories in a partial origami model have a simple geometric structure when we have general position and isolated collisions.
\begin{proposition}
\label{prop:OrigamiModelProperties}
Suppose that $(\hat\X = \bvl\X\F{h}, \hat\beta)$ is a partial origami model of $(\X, \beta)$, where $h$ is in general position with respect to $\beta$, and $\beta$ has isolated collisions with respect to $h$. Then the following properties hold:
\begin{statement}
\item\label{stmt:ProperCollisions} $\hat\beta$ ends either in the interior of $\hat\X$ or in the interior of some face of $\hat\X$.
\item\label{stmt:CreaselikeIsolation} For each crease-like segment $\sigma$ of $\hat\beta$ that simulates folding around $F \in \F$, each point on ${\pr} \circ \sigma$ lies at distance greater than $\Hmax - \Hmin$ from the supporting hyperplane of any other face of $\X$.
\item\label{stmt:CreaselikeStayAtWedge} Each crease-like segment $\sigma$ of $\hat\beta$ that simulates folding around $F \in \F$ can only intersect $\partial\hat\X$ at points in $\partial\B^h(F)$.
\end{statement}
\end{proposition}
\begin{proof}
Let the partial origami model be defined up to time $t$.
\Cref{stmt:ProperCollisions}: This can only be violated if $\hat\beta(t)$ lies in an $(n-1)$-cell of $\hat\X$. \Cref{lem:OrigamiModelWedgeEdge} rules out some of those possibilities, leaving only the case where $\hat\beta(t) \in \partial\B^h(F) \cap \partial\B^h(F')$ for some distinct faces $F$ and $F'$ of $\X$. (For instance, in \cref{fig:ExtendPastCollision}(c), $\hat\beta(t) \in \partial\B^h(F_1) \cap \partial\B^h(F_2)$ and in \cref{fig:ExtendPastCollision}(d), $\hat\beta(t) \in \partial\B^h(F_1) \cap \partial\B^h(F_3)$.)
\Cref{lem:HeightParameterRelations} implies that $\im h \subset [\Hmin, \Hmax]$, so applying \cref{lem:BevelingCrossSection} in conjunction with \cref{lem:HeightRangeAllPoints} tells us that $\pr(\hat\beta(t))$ would have to lie within distance $\Hmax - \Hmin$ from the supporting hyperplanes of $F$ and $F'$. We would also have $\dist{\pr(\hat\beta(t)), \beta(t)} \leq \Delta$, due to \cref{lem:IsolatedCollisionsDeviationFromProjection}. The triangle inequality would then imply that $\beta(t)$ lies within distance $\Delta + \Hmax - \Hmin$ of both of those supporting hyperplanes, contradicting the hypothesis of isolated collisions.
\Cref{stmt:CreaselikeIsolation}: Let $\sigma$ be a crease-like segment of $\hat\beta$ that simulates folding around $F$. Suppose that some point $y = \pr(\hat\beta(s)) \in \pr(\sigma)$ is within distance $\Hmax - \Hmin$ from the supporting hyperplane of some $F'$; we will prove that $F' = F$. By \cref{prop:FoldedTrajectoryProjection}\ref{stmt:CreaselikeSegmentConstDistance} and \cref{lem:IsolatedCollisionsDeviationFromProjection}, $y$ is also within distance $\Delta$ from the supporting hyperplane of $F$. By applying the triangle inequality together with \cref{lem:IsolatedCollisionsDeviationFromProjection}, we find that $\beta(t)$ lies within distance $2\Delta \leq \Delta + \Hmax - \Hmin$ (due to \cref{lem:HeightParameterRelations}) from the supporting hyperplane of $F$ and within distance $\Delta + \Hmax - \Hmin$ from the supporting hyperplane of $F'$. Thus the hypothesis of isolated collisions implies that $F' = F$.
\Cref{stmt:CreaselikeStayAtWedge}: Suppose that a crease-like segment $\sigma$ intersects $\partial\hat\X$ at some point $y$. Then $y \in \partial\B^h(F)$ for some face $F$ of $\X$. As shown in \cref{stmt:ProperCollisions}, \cref{lem:BevelingCrossSection} and \cref{lem:HeightRangeAllPoints} would require $\pr(y)$ to lie within distance $\Hmax - \Hmin$ of the supporting hyperplane of $F$. Hence \cref{stmt:CreaselikeIsolation} would require that $\sigma$ simulate folding around $F$.
\end{proof}
The properties of beveling and partial origami models that we have proved so far will help us prove our main technical theorem, which we will use to construct origami models:
\begin{theorem}[Construction of origami model]
\label{thm:OrigamiModelConstruction}
Let $\X$ be an $n$-polytope with a billiard trajectory $\beta : [a,b] \to \X$ that collides with each face at most once. Let $\F$ be a subset of the faces of $\X$, and let $h : \F \to \R$ be in general position and closed with respect to $\beta$, such that $\beta$ also has isolated collisions with respect to $h$. In addition, make the following assumptions: \begin{itemize}
\item $(\beta|_{[a, c]}, 0) \subset \hat\X = \bvl\X\F{h}$ for some $c > a$.
\item $\beta(b)$ lies at a distance greater than $2\Delta$ from the supporting hyperplane of every face that $\beta$ collides with.
\end{itemize}
Then $(\X,\beta)$ has an origami model $(\hat\X, \hat\beta)$ such that $\hat\beta(b) = (\beta(b),0)$ and $\hat\beta'(b) = (\beta'(b),0)$. Moreover, $\hat\beta$ collides with each face of $\hat\X$ at most once.
\end{theorem}
\begin{proof}
Let $\beta$ collide with the faces $F_1, \dotsc, F_k$. For each given value of $t \in [a,b]$, there may exist a billiard trajectory $\hat\beta : [a,t] \to \hat\X$ with $\hat\beta(a) = (\beta(a), 0)$ and $\hat\beta'(a) = (\beta'(a), 0)$. If it exists, then it is unique. In fact, $\hat\beta$ should exist as a billiard trajectory for $t = c$, by a hypothesis of the theorem. Thus $(\hat\X, \hat\beta)$ is a partial origami model of $(\X, \beta)$ that is defined up to time $c$. Intuitively, our strategy to prove the theorem will be to start with this partial origami model and extend $\hat\beta$ to longer and longer billiard trajectories $\hat\beta : [a,t] \to \hat\X$ such that $(\hat\X, \hat\beta)$ continues to be a partial origami model of $(\X, \beta)$, which is defined up to greater and greater values of time until it is defined up to time $b$.
Now suppose that $(\hat\X, \hat\beta)$ is a partial origami model of $(\X, \beta)$ that is defined up to time $t$. Whenever $\hat\beta(t)$ and $\beta(t)$ both lie in the interiors of $\hat\X$ and $\X$ respectively, $\hat\beta$ can be extended along its final segment while preserving the definition of a partial origami model. Thus we only have to check that continued extension is possible when $\hat\beta(t) \in \partial\hat\X$ or $\beta(t) \in \partial\X$.
First suppose that $\beta(t)$ is the point of collision with $F_i$. There are two cases depending on whether $\hat\beta(t)$ lies on a horizontal segment or on a crease-like segment. We will perform the extension in both cases.
\begin{enumerate}
\item Suppose that $\hat\beta(t)$ lies in the interior or at the ending point of a horizontal segment. Then the definition of a partial origami model and \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningProperties} imply that $\pr(\hat\beta(t)) = \beta(t)$. We must have $F_i \notin \F$, otherwise $\hat\beta(t)$ would have to lie on an $(n-1)$-cell of $\B^h(F_i)$, contradicting \cref{lem:OrigamiModelWedgeEdge}. By the definition of the beveling $\bvl\X\F{h}$, $\hat\beta(t) \in \partial\B^h(F_i)$. By \cref{prop:OrigamiModelProperties}\ref{stmt:ProperCollisions}, $\hat\beta(t)$ lies in the interior of a face of $\hat\X$. Therefore $\hat\beta$ can be extended slightly into a uniquely determined billiard trajectory $\alpha : [a, t + \epsilon] \to \hat\X$ such that $(\hat\X, \alpha)$ is a partial origami model of $(\X, \beta)$, defined up to time $t + \epsilon$. (Near the extension, $\hat\beta$ and $\beta$ would look like the $\hat\beta$ and ${\pr} \circ \hat\beta$ depicted in \cref{fig:CollisionTypes}(b).)
\item Suppose that $\hat\beta(t)$ lies on a crease-like segment $\sigma$. It would suffice to prove that $\hat\beta(t)$ lies in the interior of $\hat\X$, as that would allow us to extend $\hat\beta$ slightly along its final crease-like segment, without introducing new collisions, into a billiard trajectory $\alpha : [a, t + \epsilon] \to \hat\X$ for some $0 < \epsilon < b - t$. Moreover, for sufficiently small $\epsilon$, we can verify that $(\hat\X, \alpha)$ is a partial origami model of $(\X, \beta)$, defined up to time $t + \epsilon$.
We may apply \cref{lem:IsolatedCollisionsDeviationFromProjection} to deduce that $\pr(\hat\beta(t))$ lies within distance $\Delta \leq \Hmax - \Hmin$ (due to \cref{lem:HeightParameterRelations}) from $\beta(t) \in F_i$. Consequently, \cref{prop:OrigamiModelProperties}\ref{stmt:CreaselikeIsolation} requires that $\sigma$ simulates folding around $F_i$---that is, $F_i \in \F$. Examining the proof of \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningProperties} reveals that since $\beta(t)$ is a collision point on $F_i \in \F$, $\hat\beta(t)$ must lie in the interior of $\B^h(F_i)$. (For example, in the context of \cref{fig:UnravelingExtensionsIntersectPlanes}, if $\beta(t) = x_1$ then $\hat\beta$ would consist of $\eta_0$ and the first half of $\sigma$, ending at $\hat\beta(t)$ which is the midpoint of $\sigma$.) Together with \cref{prop:OrigamiModelProperties}\ref{stmt:CreaselikeStayAtWedge}, this implies that $\hat\beta(t)$ lies in the interior of $\hat\X$.
\end{enumerate}
Next, suppose that $\beta(t)$ lies in the interior of $\X$ but that $\hat\beta(t) \in \partial\hat\X$. In particular, \cref{prop:OrigamiModelProperties}\ref{stmt:ProperCollisions} guarantees that $\hat\beta(t)$ lies in the interior of a face of $\hat\X$ that is inherited from $\B^h(F)$ for some face $F$ of $\X$. Then we can extend $\hat\beta$ slightly to a billiard trajectory $\alpha : [a, t + \epsilon] \to \hat\X$ for some $\epsilon \in (0, b - t)$. We will prove that for sufficiently small $\epsilon$, $(\hat\X, \alpha)$ is a partial origami model of $(\X, \beta)$ that is defined up to time $t + \epsilon$.
If $F \notin \F$, then $\pr(\hat\beta(t))$ lies in the supporting hyperplane of $F$. The final segment of $\hat\beta$ has to be horizontal, otherwise \cref{prop:OrigamiModelProperties}\ref{stmt:CreaselikeIsolation} would imply that the final segment simulates folding around $F$, but that would contradict our assumption that $F \notin \F$. Then \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningProperties} implies that $\beta(t) = \pr(\hat\beta(t))$. Hence we are in case (1) that has been handled earlier.
Otherwise $F \in \F$. Suppose that the final segment of $\hat\beta$ is horizontal. Then \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningProperties} implies that $\pr(\hat\beta(t)) = \beta(t)$. Therefore $\hat\beta$ is extended to $\alpha$ along a crease-like segment which is disregarded by the definition of a partial origami model. Thus $(\hat\X, \alpha)$ is a partial origami model of $(\X, \beta)$.
On the other hand, suppose that $F \in \F$ and the final segment $\sigma$ of $\hat\beta$ is crease-like. \Cref{prop:OrigamiModelProperties}\ref{stmt:CreaselikeStayAtWedge} implies that $\sigma$ must simulate folding around $F$ and in particular it must start from one of the faces inherited from $\B^h(F)$ and end at the other. Thus if we extend $\hat\beta$ slightly along a new segment $\eta_1$ into a billiard trajectory $\alpha : [a, t + \epsilon] \to \hat\X$ for some $\epsilon \in (0, b - t)$, $\alpha$ remains a folded billiard trajectory.
Now we verify that $\alpha$ satisfies the assumption of \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningFoldedTrajectory}. Let $\eta_0$ be the segment of $\hat\beta$ preceding $\sigma$, and let $\hat\beta(s)$ be the point of collision of $\hat\beta$ between $\eta_0$ and $\sigma$. Applying \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningFoldedTrajectory}--\ref{stmt:FlatteningProperties} to $\hat\beta|_{[a,s']}$ for $s' \leq s$ implies that $\beta(s) = \pr(\hat\beta(s))$ and $\beta'(s) = \lim_{r \to s^-} \pr(\hat\beta'(r))$. It suffices to verify the assumption of \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningFoldedTrajectory} for $\eta_0$ and $\eta_1$. By the first part of \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningFoldedTrajectory}, ${\pr} \circ \eta_0$ and ${\pr} \circ \eta_1$ extend into lines that intersect the supporting hyperplane of $F$ at the same point $x$. To show that $x$ lies in the interior of $F$, it suffices to show that as $\beta$ continues past time $s$, the first face $F_i$ that it collides with must actually be $F$. It must make such a collision and it should occur at a time $t_i \leq t$, otherwise a contradiction would arise from the fact that $\hat\beta$ and $\beta$ travel at the same speed, thus $\beta$ would collide with $F$ first before time $t$. (This can be deduced from the unraveling of the parts of $\hat\beta$ and $\beta$ from $\eta_0$ and ${\pr} \circ \eta_0$ onwards; in \cref{fig:ComparingTrajWithOrigamiModel}, our assertion can be interpreted as saying that $\ell$ must hit $F_1$ before $\hat\ell$ finishes traversing $\hat\ell_1$.) By \cref{lem:IsolatedCollisionsDeviationFromProjection}, $\dist{\pr(\hat\beta(t_i)), \beta(t_i)} \leq \Delta$. In other words, ${\pr} \circ \sigma$ is within distance $\Delta$ from the supporting hyperplane of $F_i$. \Cref{lem:HeightParameterRelations} and \cref{prop:OrigamiModelProperties}\ref{stmt:CreaselikeIsolation} imply that $F_i = F$. Therefore $x$ is actually a point of collision of $\beta$, which must lie in the interior of $F$.
This means that $\beta$ continues past $\beta(s)$ to collide with $F_i$ at $x$. It then reflects as a billiard trajectory should and, for similar reasons as above, it cannot make any more collisions until time $t$. Moreover, recall our assumption that $\beta(t)$ lies in the interior of $\X$. Therefore it can be verified that $(\hat\X, \alpha)$ is a partial origami model of $(\X, \beta)$. (Near the extension, $\hat\beta$ and $\beta$ would look like the $\hat\beta$ and ${\pr} \circ \hat\beta$ depicted in \cref{fig:CollisionTypes}(a).)
Therefore we can eventually extend to get a complete origami model $(\hat\X, \hat\beta)$ of $(\X, \beta)$. Now we must show that $\hat\beta(b) = (\beta(b),0)$ and $\hat\beta'(b) = (\beta'(b),0)$. By \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningProperties}, \cref{lem:HeightRangeAllPoints} and our hypothesis that $h$ is closed with respect to $\beta$, it suffices to show that the final segment of $\hat\beta$ is horizontal. Indeed, \cref{lem:IsolatedCollisionsDeviationFromProjection} implies that $\dist{\pr(\hat\beta(b)), \beta(b)} \leq \Delta$. If the final segment of $\hat\beta$ was crease-like and simulated folding around $F_i$, then \cref{prop:FoldedTrajectoryProjection}\ref{stmt:CreaselikeSegmentConstDistance} and \cref{lem:IsolatedCollisionsDeviationFromProjection} would imply that $\pr(\hat\beta(b))$ lies within distance $\Delta$ of the supporting hyperplane of $F_i$. The triangle inequality would then imply that $\beta(b)$ lies within distance $2\Delta$ of the supporting hyperplane of $F_i$, contradicting one of the assumptions in the theorem statement.
By the definition of a complete origami model and the fact that $\hat\beta$'s final segment is horizontal, $\hat\beta$ must simulate colliding at $F_1, \dotsc, F_k$, which is a non-repeating sequence of faces, so $\hat\beta$ never collides twice with the same face of $\hat\X$.
\end{proof}
The preceding theorem helps us construct origami models $(\hat\X, \hat\beta)$ of $(\X, \beta)$. On top of that, since we are interested in constructing simple closed geodesics and simple stable figure-eights, the following lemma gives some conditions which guarantee that when $\beta$ is simple, $\hat\beta$ will also be simple.
\begin{lemma}
\label{lem:SimpleOrigamiModel}
Suppose that $(\bvl\X\F{h}, \hat\beta)$ is an origami model of $(\X, \beta)$, where $\beta$ is simple and collides with each face of $\X$ at most once. Assume also that $h$ is in general position with respect to $\beta$, and that $\beta$ has isolated collisions with respect to $h$. For each face $F$ of $\X$ that $\beta$ collides with, let $\beta_F$ be the intersection of $\im\beta$ with the neighbourhood of radius $\Delta(h,\beta)$ around the supporting hyperplane of $F$. Assume that each $\beta_F$ only intersects the two segments of $\beta$ that come immediately before and after the collision at $F$. Then $\hat\beta$ is also simple.
\end{lemma}
\begin{proof}
We will assume that $\beta(a) \neq \beta(b)$; the proof will be similar for the other case. Suppose for the sake of contradiction that $\hat\beta$ self-intersects. Then $\hat\beta$ must have two non-adjacent segments that intersect each other, where two segments of $\hat\beta$ are considered to be adjacent if $\hat\beta$ traverses one of them immediately after the other. They cannot both be horizontal segments, otherwise by the definition of an origami model and \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningFoldedTrajectory} they would project to subsets of non-adjacent segments of $\beta$, which cannot intersect since $\beta$ is simple.
They also cannot both be crease-like segments. Otherwise, they would simulate folding around two distinct $F_i, F_j \in \F$---where $\beta$ collides with $F_1, \dotsc, F_k$---because $\beta$ collides with each of those faces exactly once, and $\hat\beta$ simulates colliding at the same faces in the same order. If the crease-like segments intersect at $x$, then \cref{prop:FoldedTrajectoryProjection}\ref{stmt:CreaselikeSegmentConstDistance} and \cref{lem:IsolatedCollisionsDeviationFromProjection} would require $\pr(x)$ to lie within distance $\Delta \leq \Hmax - \Hmin$ (due to \cref{lem:HeightParameterRelations}) of the supporting hyperplanes of both $F_i$ and $F_j$, but that would contradict \cref{prop:OrigamiModelProperties}\ref{stmt:CreaselikeIsolation}.
Hence it remains to rule out the case where some crease-like segment $\sigma$ intersects a non-adjacent horizontal segment $\eta$. Let the $\sigma$ simulate folding around $F_i$. If $\sigma$ and $\eta$ intersect at some point $x$, then like before, $\pr(x)$ would have to lie within distance $\Delta$ of the supporting hyperplane of $F_i$, thus $\pr(x) \in \beta_{F_i}$. However, since $x$ also lies on a horizontal segment, \cref{prop:FoldedTrajectoryProjection}\ref{stmt:FlatteningProperties} guarantees that $\pr(x)$ lies on a segment of $\beta$, which cannot be one of the segments that come immediately before or after the collision at $F_i$, otherwise $\sigma$ would have to be adjacent to $\eta$. This contradicts our hypothesis on $\beta_{F_i}$.
\end{proof}
\subsection{Construction of index-zero closed geodesics in convex hypersurfaces}
\label{sec:StableClosedGeodesics}
Let us apply bevelings and origami models to construct index-zero closed geodesics in convex hypersurfaces. First we derive a corollary of our earlier results:
\begin{corollary}
\label{cor:TwistedBilliardTrajs}
For every integer $n \geq 2$, there exists an $n$-polytope $X^n$ that has a simple and periodic billiard trajectory $\beta^n : [a,b] \to \X^n$ whose parallel transport map is $1 \oplus (-I_{n-1})$, with respect to the decomposition $\R^n = \vspan\{(\beta^n)'(a)\} \oplus \vspan\{(\beta^n)'(a)\}^\perp$. Moreover, the number of collisions of $\beta^n$ has opposite parity from $n$, and $\beta^n$ collides with each face of $\X^n$ at most once.
\end{corollary}
\begin{proof}
We will construct $\X^n$ and $\beta^n$ inductively using \cref{thm:OrigamiModelConstruction} and prove the necessary properties by induction. For $n = 2$, we can choose $X^2$ and $\beta^2$ to be the $\X$ and $\beta$ from \cref{fig:Beveling}(a).
For any $n \geq 2$, assume that the statement of the corollary holds for some $\X^n$ and $\beta^n$. Let $\F = \{F_1, F_2, F_3\}$ be any set of 3 faces of $\X^n$ that $\beta^n$ collides with, indexed in the order of collision. For some $\delta > 0$, define the function $h : \F \to \R$ by $h(F_1) = \delta$, $h(F_2) = 0$ and $h(F_3) = -\delta$. With these settings, one can compute $\Delta(h, \beta) = 2\delta$, $\Hmax(h, \beta) = 2\delta$ and $\Hmin(h, \beta) = -2\delta$. Hence $\Delta + \Hmax - \Hmin = 6\delta$. Some geometry shows that we can choose $\delta$ small enough such that the hypotheses of \cref{thm:OrigamiModelConstruction} are satisfied. Since $\beta^n$ is simple and it collides with each face of $\X$ at most once, we can also choose $\delta$ small enough to satisfy the hypotheses of \cref{lem:SimpleOrigamiModel}. Consequently we get an origami model $(\X^{n+1} = \bvl\X\F{h}, \beta^{n+1})$ of $(X^n,\beta^n)$ such that $\beta^{n+1}$ is periodic and simple, and it collides with each faces of $\X^{n+1}$ at most once. (As an example, for $n = 2$, we can choose $\delta = 0.05$ to get \cref{fig:Beveling,fig:FoldedBilliardTrajectory}.)
By applying \cref{lem:FlatteningParallelTransport} along the lines of \cref{rem:OrigamiModelParallelTransport}, the parallel transport map of $\beta^{n+1}$ is $1 \oplus (-I_{n-1}) \oplus (-1)^3 = 1 \oplus (-I_n)$ with respect to the required decomposition of $\R^{n+1}$. Moreover, the number of collisions of $\beta^{n+1}$ will be 3 more than that of $\beta^n$, and will have opposite parity to $n + 1$.
\end{proof}
Now we are ready to prove \cref{thm:StableClosedGeodesicPosCurv}.
\begin{proof}[Proof of \cref{thm:StableClosedGeodesicPosCurv}]
For each odd integer $n \geq 3$, let $X^n$ and $\beta^n$ be the $n$-polytope and periodic billiard trajectory produced in \cref{cor:TwistedBilliardTrajs}. By that corollary, $\beta^n$ has an even number of collisions, and is a simple closed curve. Hence it corresponds to a simple closed geodesic $\gamma$ in $\dbl\X^n$. The parallel transport map is $1 \oplus (-I_{n - 1})$ with respect to the decomposition of $\R^n$ given by a tangent vector of $\beta^n$ and its orthogonal complement, which implies that a tubular neighbourhood of $\gamma$ is in fact an $n$-dimensional maximally-twisted tube $T$. As explained in \cref{sec:Intro_CoreCurves}, the flat metric of $T$ means that $\gamma$ has index zero. $\gamma$ is also non-degenerate by \cref{Lem:MaxTwistStableGeodesic}.
Since $\gamma$ is simple and it is also a stable geodesic bouquet with one loop, we may apply \cite[Propositions~4.7 and 4.8]{Cheng_StableGeodesicNets} to prove the desired result.
\end{proof}
\begin{remark}[Parity sequences for origami models]
\label{rem:ParityControl}
To prove \cref{cor:TwistedBilliardTrajs} we iterated the construction of an origami model, each time folding along 3 faces. Each iteration increases the dimension of the space acted on by the parallel transport map by 1, and folding along an odd number of faces is important as it ensures that the parallel transport maps acts by negation on that additional dimension. As a result, in the odd dimensions we have maximally-twisted tubes. Folding by an even number of faces would yield a parallel transport map that fixes some vector orthogonal to the billiard trajectory, potentially giving a parallel vector field along the corresponding closed geodesic. This could give rise to a length-decreasing variation of the corresponding geodesic after smoothing the double of the polytope to a positively-curved manifold, along the lines of the proof of Synge's theorem \cite{Synge_GeodesicPosCurv}. Hence in the proof of \cref{cor:TwistedBilliardTrajs}, the parity of the number of collisions of the initial billiard trajectory $\beta^2$, followed by the parities of the number of faces we fold along to get $\beta^n$ is ``odd, odd$,\dotsc$, odd''---a sequence of $n-1$ odd numbers. The number of collisions in the final billiard trajectory is the sum of odd numbers, which is even when $n$ is odd but odd otherwise. When the sum is even, the billiard trajectory corresponds to a closed geodesic that helps us to prove \cref{thm:StableClosedGeodesicPosCurv}. This does not occur when $n$ is even, so our construction does not contradict Synge's theorem.
The above discussion suggests that applying the construction of origami models may require careful choices on the parity of the number of collisions of the initial billiard trajectory and the number of faces we fold along. We wish to fold along an odd number of faces whenever possible to ``add twisting'' and obstruct the existence of parallel vector fields along the corresponding closed geodesic. However, we need these numbers to add up to an overall even number of collisions in the final billiard trajectory, so that it corresponds to a geodesic loop in the double. As we apply this construction to construct stable figure-eights in Riemannian spheres of every dimension, the latter constraint may sometimes force us to fold along an even number of faces. In such a situation, we will explain how to mitigate the problem by choosing the right dimensions to fold along an even number of faces.
\end{remark}
\section{Construction of Stable Figure-eights}
\label{sec:Stable2Loop}
From the discussion in \cref{sec:Semigroup}, we can reduce \cref{thm:Stable2LoopPosCurv} to the construction of a 3-polytope, 4-polytope, and 5-polytope whose doubles contain stable figure-eights. We will prove those cases in this section. Our arguments will frequently start with a billiard loop in a convex polygon and then apply the origami model construction iteratively so that the resulting billiard loop corresponds to a geodesic loop in the final stable figure-eight.
As explained in \cref{sec:Intro_FigureEights}, each geodesic loop $\gamma$ in a stationary geodesic bouquet can be associated with a subspace of the tangent space at the basepoint called the \emph{parallel defect kernel}, denoted by $\ker\opd\gamma$ and defined in \cite[Definition~3.2]{Cheng_StableGeodesicNets}. The bouquet is stable if the parallel defect kernels of all of its loops intersect at a point and the curvature near the bouquet is sufficiently small. When we derive the loops of the bouquet from origami models, then their parallel defect kernels have a very specific and convenient form that can be computed using the following pair of lemmas.
\begin{lemma}
\label{lem:2DBilliardParallelTransportParity}
Consider a convex polygon $\X \subset \R^2$ with a billiard trajectory $\beta : [a,b] \to \X$. Then after canonically identifying tangent spaces with $\R^2$, the parallel transport map $T_{\beta(a)}\X \to T_{\beta(b)}\X$ is the reflection in $O(2)$ that sends $\beta'(a)$ to $\beta'(b)$ if $\beta$ collides an odd number of times, and it is the rotation in $SO(2)$ that sends $\beta'(a)$ to $\beta'(b)$ if $\beta$ collides an even number of times.
\end{lemma}
\begin{proof}
The reflection and rotation stated in the lemma are the only possibilities given that the parallel transport maps are products of reflections in $O(2)$ that send the velocity vector of the trajectory before each collision to the velocity vector right after. The determinant of the parallel transport map is determined by the parity of the number of reflection factors.
\end{proof}
\begin{lemma}
\label{lem:OrigamiModelParallelDefectKernelParity}
Consider a convex polygon $\X^2 \subset \R^2$ with a billiard loop $\beta^2 : [0,1] \to \X$ based at $p$ such that $(\beta^2)'(0)$ is linearly independent from $v = (\beta^2)'(1)$. Consider a sequence $(\X^3, \beta^3), (\X^4, \beta^4), \dotsc, (\X^n, \beta^n)$ in which $(\X^{m+1} = \bvl{\X^m}{\F_m}{h_m}, \beta^{m+1})$ is an origami model of $(\X^m, \beta^m)$ for $2 \leq m \leq n - 1$, and suppose that the following conditions hold:
\begin{itemize}
\item $\beta^m(1) = (p, 0, \dotsc, 0)$ and $(\beta^m)'(1) = (v, 0, \dotsc, 0)$ for all $m$.
\item Each $\beta^m$ collides with every face in $\F_m$.
\item Let $c_m$ be the number of collisions of $\beta^m$, and suppose that $c_n$ is even, so that $\beta^n$ corresponds to a geodesic loop $\gamma$ in $\dbl\X^n$.
\end{itemize}
Then under the identification of $T_{\gamma(0)}\dbl\X^n$ with $\R^n$ induced by the quotient map $\dbl\X^n \to \X^n \subset \R^n$,
\begin{equation}
\ker\opd\gamma = V_2 \oplus V_3 \oplus \dotsb \oplus V_n,
\end{equation}
where $V_2 = \vspan\{(\beta^n)'(0) - (-1)^{c_2} (\beta^n)'(1)\}$ and for each $m \geq 3$, $V_m$ is the span of the $m^\text{th}$ standard basis vector of $\R^n$ if $\card{\F_{m-1}}$ is even, and $V_m = \{0\}$ if $\card{\F_{m-1}}$ is odd.
\end{lemma}
\begin{proof}
If $P_m : \R^m \to \R^m$ is the parallel transport map of $\beta^m$ (after canonically identifying tangent spaces with the ambient Euclidean space), then \cref{rem:OrigamiModelParallelTransport} and \cref{lem:FlatteningParallelTransport} imply that
\begin{equation}
P_n = \underbrace{P_2}_{P_n|_W} \oplus \underbrace{(-1)^{\card{\F_2}} \oplus (-1)^{\card{\F_3}} \oplus \dotsb \oplus (-1)^{\card{\F_{n-1}}}}_{P_n|_{W^\perp}},
\end{equation}
where $W = \vspan\{(\beta^n)'(0), (\beta^n)'(1)\} = \R^2 \times \{(0, \dotsc, 0)\}$ is an invariant subspace of $P_n$. The form of $V_2$ comes from the corresponding direct summand in Proposition~3.5 from \cite{Cheng_StableGeodesicNets}, after considering \cref{lem:2DBilliardParallelTransportParity}.
For $m \geq 3$, $V_m$ is obtained by computing the eigenspace of $P_n|_{W^\perp}$ that corresponds to the eigenvalue 1, according to \cite[Proposition~3.5]{Cheng_StableGeodesicNets}.
\end{proof}
\Cref{lem:OrigamiModelParallelDefectKernelParity} shows that if we wish to iterate the origami model construction to produce geodesic loops with parallel defect kernels of low dimension, then it is important to carefully choose the parities of several integers, reminiscent of \cref{rem:ParityControl}. For instance, the parity of $c_2$ controls $V_2$. We would also prefer most origami models to fold along an odd number of faces so that as many of the $V_m$'s are 0-dimensional as possible; but we also need $c_n = c_2 + \card{\F_2} + \dotsb + \card{\F_{n-1}}$ to be even so that the resulting billiard loop $\beta^n$ corresponds to a geodesic loop in $\dbl\X^n$. Hence we should pay attention to the \emph{parity sequence} of a series of origami model constructions, by which we mean the sequence of parities of $c_2, \card{\F_2}, \dotsc, \card{\F_{n-1}}$.
Let us proceed to perform the main construction for the 3-dimensional case of \cref{thm:Stable2LoopPosCurv}.
\begin{proposition}
\label{prop:StableFigureEightFlat3D}
There exists a convex polyhedron $\X^3 \subset \R^3$ and simple billiard loops $\alpha^3, \beta^3$ in $\X^3$ that correspond to two geodesic loops of a simple and irreducible stable figure-eight in $\dbl\X^3$. Moreover, $\alpha^3$ only intersects $\beta^3$ at their common basepoint.
\end{proposition}
\begin{proof}
We want to find such $\alpha^3, \beta^3$ that correspond to geodesic loops $\gamma_1, \gamma_2$. By applying Corollary~3.4 from \cite{Cheng_StableGeodesicNets} to the smooth (flat) part of $\dbl\X^3$, we need $\ker\opd{\gamma_1} \cap \ker\opd{\gamma_2} = \{0\}$. However this means that we need $\dim\ker\opd{\gamma_i} \leq 1$ for some $i$. We will in fact construct $\X^3$ and $\gamma_1,\gamma_2$ such that $\dim\ker\opd{\gamma_1} = \dim\ker\opd{\gamma_2} = 1$. To obtain each geodesic loop, we will apply the origami model construction once using the parity sequence ``odd, odd''.
Now consider the square $\Y^2 = [-2,0] \times [-1,1] \subset \R^2$, whose ``faces'' (edges) are $F_0$, $F_1$, $F_2$ and $F_3$, listed in counterclockwise order from $F_0$ which contains the origin. Let $\alpha^2 : [0,1] \to \Y^2$ be the billiard trajectory that starts from the origin, collides at the points $(-1,1)$, $(-2,0)$ and $(-1,-1)$ in order, and then returns to the origin (see \cref{fig:StableFigureEightFlat3D}(a)). Let $\F = \{F_1, F_2, F_3\}$ and $h : \F \to \R$ be the function defined by $h(F_1) = \delta$, $h(F_2) = 0$ and $h(F_3) = -\delta$, where $\delta = 0.08$. As in the proof of \cref{cor:TwistedBilliardTrajs}, $\Delta(h, \alpha^2) + \Hmax(h, \alpha^2) - \Hmin(h, \alpha^2) = 6\delta < 0.5$. Thus the hypotheses of \cref{thm:OrigamiModelConstruction} are satisfied, which guarantees the existence of an origami model $(\Y^3 = \bvl{\Y^2}\F{h}, \alpha^3)$ of $(\Y^2, \alpha^2)$. \Cref{fig:StableFigureEightFlat3D}(b)--(d) depicts $\Y^3$ and $\alpha^3$ from three different points of view, but for a larger value of $\delta$ to emphasize the important features. In particular, $\alpha^3(1) = (\alpha^2(1), 0) = \alpha^3(0)$ and $(\alpha^3)'(1) = ((\alpha^2)'(1), 0)$.
Now let $\rho \in SO(3)$ be the rotation by angle $\pi$ about the line spanned by the vector $(0,1,1)$. One can verify that $\X^3 = \Y^3 \cup \rho(\Y^3)$ is a convex polyhedron. As $\alpha^3$ is a billiard loop in $\X^3$ with an even number of collisions, it corresponds to a geodesic loop $\gamma_1$ in $\dbl\X^3$. Similarly, $\beta^3 = \rho \circ \alpha^3$ also corresponds to a geodesic loop $\gamma_2$ in $\dbl\X^3$ that shares the same basepoint $p$ as $\gamma_1$. $\X^3$, $\alpha^3$ and $\beta^3$ are illustrated in \cref{fig:StableFigureEightFlat3D}(f).
It can be verified that $\gamma_1$ and $\gamma_2$ form an irreducible stationary geodesic bouquet $G^3$ in $\dbl\X^3$ that is based at $p$. By \cite[Corollary~3.4]{Cheng_StableGeodesicNets}, it remains to show that $\ker\opd{\gamma_1} \cap \ker\opd{\gamma_2} = \{0\}$. \Cref{lem:OrigamiModelParallelDefectKernelParity} implies that $\ker\opd{\gamma_1}$ is the $y$-axis, under the identification of $T_p\dbl\X^3$ with $\R^3$ induced by the quotient map $\dbl\X^3 \to \X^3 \subset \R^3$.\footnote{This can be independently checked by observing that the parallel transport map is rotation by angle $\pi$ around the $y$-axis. This is consistent with the illustration in \cref{fig:StableFigureEightFlat3D}(e) of the projection of the parallel vector field along $\gamma_1$ that is induced by the quotient map $\dbl\X^3 \to \X^3$.} Due to the rotation by $\rho$, $\ker\opd{\gamma_2}$ is the $z$-axis, which intersects $\ker\opd{\gamma_1}$ only at the origin.
\Cref{lem:SimpleOrigamiModel} implies that $\alpha^3$ and $\beta^3$ are simple. By construction, they intersect only at their common basepoint. As a result, $G^3$ is also simple.
\end{proof}
\begin{figure}
\caption{Various stages from the construction of a stable figure-eight in the proof of \cref{prop:StableFigureEightFlat3D}
\label{fig:StableFigureEightFlat3D}
\end{figure}
\begin{remark}
\label{rem:StableFigureEightFlat3D}
In the proof of \cref{prop:StableFigureEightFlat3D}, the rotation $\rho$ has two roles. It rotates $\alpha^3$ to $\beta^3$ while also rotating $\ker\opd{\gamma_1}$ to $\ker\opd{\gamma_2}$, with the result that $\ker\opd{\gamma_1} \cap \ker\opd{\gamma_2} = \{0\}$. The rotation also ensures that the tangent vectors of the resulting stable figure-eight do not all lie in the same plane, so the stable figure-eight is not a closed geodesic. The values of $h$ are chosen such that the face $F$ of $\Y^3$ that lies on the $yz$-plane is the rectangle shown in \cref{fig:StableFigureEightFlat3D}(d), which is mapped back to itself by $\rho$. This helps $\Y^3 \cup \rho(\Y^3)$ to be a convex polyhedron. This fact is also helped by the choice of $\Y^2$ as a square, so that the dihedral angles in $\Y^3$ between $F$ and its adjacent faces would be $\pi/2$.
\end{remark}
Next, let us perform the main construction for the 4-dimensional case of \cref{thm:Stable2LoopPosCurv}.
\begin{proposition}
\label{prop:StableFigureEightFlat4D}
There exists a convex 4-polytope $\X^4$ and simple billiard loops $\alpha^4, \beta^4$ in $\X^4$ that correspond to two geodesic loops of a simple and irreducible stable figure-eight in $\dbl\X^4$. Moreover, $\alpha^4$ only intersects $\beta^4$ at their common basepoint.
\end{proposition}
\begin{proof}
The desired geodesic loops will correspond to billiard trajectories $\alpha^4$ and $\beta^4$ in a convex 4-polytope $\X^4$. $\alpha^4$ and $\beta^4$ are each obtained from two applications of the origami model construction, using the respective parity sequences ``odd, even, odd'' and ``even, odd, odd''. \comment{We will motivate these choices in a remark after the proof.}
We begin with convex polygons $\Y^2, \ZZ^2 \subset \R^2$ that contain billiard trajectories $\alpha^2$ and $\beta^2$ respectively. We choose both trajectories to be billiard loops based at the origin, so that the same will be true for $\alpha^4$ and $\beta^4$. Since the origami model construction from \cref{thm:OrigamiModelConstruction} does not change the angle of a billiard loop at the basepoint, in order for $\alpha^4$ and $\beta^4$ to correspond to geodesic loops in a stationary geodesic bouquet, we need $\alpha^2$ and $\beta^2$ to already form the same angle at the origin. These trajectories, and their convex polygons, are illustrated in \cref{fig:StableFigureEightFlat4D}(a) and (d). Note the positions of the points $C$ and $E$ in \cref{fig:StableFigureEightFlat4D}(a) are uniquely determined such that $\alpha^2$ is a billiard trajectory in $\Y^2$ with the angles labeled as shown. Similarly, the position of the point $I$ in \cref{fig:StableFigureEightFlat4D}(b) is uniquely determined. In accordance with the respective parity sequences, $\alpha^2$ collides an odd number of times, and $\beta^2$ collides an even number of times.
\begin{figure}
\caption{Various stages from the construction of a stable figure-eight in the proof of \cref{prop:StableFigureEightFlat4D}
\label{fig:StableFigureEightFlat4D}
\end{figure}
Let $\delta$ be a small positive number. Apply \cref{thm:OrigamiModelConstruction} to get an origami model $(\Y^3, \alpha^3)$ of $(\Y^2, \alpha^2)$, where $\Y^3 = \bvl{\Y^2}{\{AB, BC, EF, FG\}}g$, $g(AB) = g(EF) = \delta$ and $g(BC) = g(FG) = -\delta$ (``even'' in the parity sequence). (\Cref{fig:StableFigureEightFlat4D}(b)--(c) illustrates $\Y^3$ and $\alpha^3$ but for $\delta = 0.05$, which is large enough for us to see the finer details; $\delta = 0.05$ actually produces results that work for our purposes, but technically that is too large to satisfy the hypotheses of \cref{thm:OrigamiModelConstruction}.\footnote{\Cref{thm:OrigamiModelConstruction} requires $\delta$ to be very small because some of the interior angles of $\Y^2$ are nearly $\pi$.}) In fact, we will choose $\delta$ small enough to also allow us to apply \cref{thm:OrigamiModelConstruction} to get an origami model $(\ZZ^3, \beta^3)$ of $(\ZZ^2, \beta^2)$, where $\ZZ^3 = \bvl{\ZZ^2}{\{GH, HI, JA\}}h$, $h(GH) = -\delta$, $h(HI) = 0$ and $h(JA) = \delta$ (``odd'' in the parity sequence). \Cref{fig:StableFigureEightFlat4D}(e)--(f) depicts $\ZZ^3$ and $\beta^3$, but again for the too-large value of $\delta = 0.05$. As a consequence of the theorem, $\alpha^3$ collides at most once with each face of $\Y^3$, and similarly for $\beta^3$ in $\ZZ^3$.
Next we will prove that we can also choose $\delta$ to be small enough so that $\alpha^3$ and $\beta^3$ will also be simple. Note that they are already simple in \cref{fig:StableFigureEightFlat4D}(g), which uses a larger value of $\delta$, but let us formally justify why that is true for sufficiently small $\delta$. We cannot directly apply \cref{lem:SimpleOrigamiModel} because neither $\alpha^2$ nor $\beta^2$ are simple. On the other hand, the other hypotheses of the lemma are satisfied for sufficiently small $\delta$, and the proof of the lemma only uses the hypothesis that $\alpha^2$ is simple to show that two of the horizontal segments of $\alpha^3$ can intersect only if they share an endpoint. Yet, two of those horizontal segments could possibly intersect in their interior only if the same is true of their projections under $\pr_2$, so they would have to be correspond to $p_2p_3$ and $p_7p_8$ in \cref{fig:StableFigureEightFlat4D}(b). This would contradict the fact that $\height(p_2p_3) = 2\delta$ and $\height(p_7p_8) = -2\delta$. Therefore the rest of the proof of \cref{lem:SimpleOrigamiModel} demonstrates that $\alpha^3$ is simple, and similarly for $\beta^3$. Furthermore, from their construction, it is clear that $\alpha^3$ and $\beta^3$ intersect only at their common basepoint.
The same value of $\delta$ is used to construct $\Y^3$ and $\ZZ^3$ so that the rectangle $KLMN$ (see \cref{fig:StableFigureEightFlat4D}(g)) is a face of both $\Y^3$ and $\ZZ^3$. Now let $R \in O(3)$ be the reflection that fixes the $x$ coordinate but interchanges the $y$ and $z$ coordinates. Observe that $R$ maps $KLMN$ back to itself, so that $\X^3 = \Y^3 \cup R(\ZZ^3)$ is a convex polyhedron, and that $R \circ \beta^3$ is a billiard trajectory in $\X^3$. \Cref{fig:StableFigureEightFlat4D}(g) illustrates $\X^3$, $\alpha^3$ and $R \circ \beta^3$. This transformation $R$ is applied to ensure that the the tangent vectors of the final billiard trajectories $\alpha^4$ and $\beta^4$ at the origin will not lie in the same plane, so that the corresponding geodesic loops do not form a closed geodesic. Eventually this will lead to the irreducibility of the final stable figure-eight.\footnote{The rotation performed in the proof of \cref{prop:StableFigureEightFlat3D} also fulfills this purpose.}
Next, we will construct $\alpha^4$ and $\beta^4$ from $\alpha^3$ and $R \circ \beta^3$ using origami models again. Three of the faces of $\Y^3$ that $\alpha^3$ collides with are inherited from $\B^h(CE)$ and $\B^h(EF)$; in \cref{fig:StableFigureEightFlat4D}(g), they are the three faces around the vertex $u$. Index those faces $A_1$, $A_2$ and $A_3$ in the order that $\alpha^3$ collides with them. Similarly, three of the faces that $R \circ \beta^3$ collides with are images under $R$ of the faces of $\ZZ^3$ that are inherited from $\B^h(HI)$ and $\B^h(IJ)$; in \cref{fig:StableFigureEightFlat4D}(g), they are the three faces around the vertex $v$. Index those faces $B_1$, $B_2$ and $B_3$ in the order that $\beta^3$ collides with them. Apply \cref{thm:OrigamiModelConstruction} to get origami models $(\X^4, \alpha^4)$ of $(\X^3, \alpha^3)$ and $(\X^4, \beta^4)$ of $(\X^3, R \circ \beta^3)$, where $\X^4 = \bvl{\X^3}\F{f}$, $\F = \{A_1,A_2,A_3,B_1,B_2,B_3\}$ and
\begin{align}
f(A_1) &= f(B_1) = \varepsilon \\
f(A_2) &= f(B_2) = 0 \\
f(A_3) &= f(B_3) = -\varepsilon,
\end{align}
where $\varepsilon$ is chosen to be sufficiently small to satisfy the hypotheses of \cref{thm:OrigamiModelConstruction}. (This gives the final ``odd'' in each parity sequence.)
One can check that $\alpha^4$ and $\beta^4$ each collide an even number of times, so they correspond to geodesic loops $\gamma_1$ and $\gamma_2$ in $\dbl\X^4$ that are based at the same point $p$, giving a stationary figure-eight $G^4$. By from \cite[Corollary~3.4]{Cheng_StableGeodesicNets}, it remains to prove that $\ker\opd{\gamma_1} \cap \ker\opd{\gamma_2} = \{0\}$. Given the parity sequence used to construct $\alpha^4$, \cref{lem:OrigamiModelParallelDefectKernelParity} implies that $\ker\opd{\gamma_1} = \vspan\{(0,1,0,0), (0,0,1,0)\}$, under the identification of $T_p\dbl\X^4$ with $\R^4$ induced by the quotient map $\dbl\X^4 \to \X^4 \subset \R^4$. A similar application of the arguments used to prove \cref{lem:OrigamiModelParallelDefectKernelParity} to the parity sequence used to construct $\beta^4$, but taking into account the reflection $R$, implies that $\ker\opd{\gamma_2} = \vspan\{(1,0,0,0)\}$. Therefore $\ker\opd{\gamma_1} \cap \ker\opd{\gamma_2} = \{0\}$.
We could also have chosen $\varepsilon$ small enough to satisfy the hypotheses of \cref{lem:SimpleOrigamiModel}, to guarantee that $\alpha^4$ and $\beta^4$ are simple. They also only intersect at their common basepoint, because $\im({\pr_3} \circ \alpha^4)$ lies in the convex hull of $\im(\alpha^3)$, and similarly for $\beta^4$. Thus $G^4$ is simple.
\end{proof}
\comment{
\begin{remark}
[To motivate the choice of parity sequences in the proof of \cref{prop:StableFigureEightFlat4D}]
\end{remark}
}
Now we will perform the main construction in the proof of the 5-dimensional case of \cref{thm:Stable2LoopPosCurv}.
\begin{proposition}
\label{prop:StableFigureEightFlat5D}
There exists a convex 5-polytope $\X^5$ and simple billiard loops $\alpha^5, \beta^5$ in $\X^5$ that correspond to two geodesic loops of a simple, irreducible and stable figure-eight in $\dbl\X^5$. Moreover, $\alpha^5$ and $\beta^5$ intersect only at their common basepoint.
\end{proposition}
\begin{proof}
We will take the $\X^3$, $\alpha^3$ and $R \circ \beta^3$ from the proof of \cref{prop:StableFigureEightFlat4D} and ``direct sum'' with two billiard loops in a convex polygon to get the desired convex 5-polytope $\X^5$ and the desired billiard loops $\alpha^5$ and $\beta^5$, by mimicking the arguments of \cref{sec:Semigroup}. Let us parametrize $\alpha^3$ and $R \circ \beta^3$ as constant-speed curves over the interval $[0,1]$. Note however that $\alpha^3$ and $R \circ \beta^3$ each have an odd number of collisions, so they correspond to two geodesics $\phi_1$ and $\phi_2$ respectively from $(x,0)$ to $(x,1)$, where $(x,0)$ and $(x,1)$ are distinct points in $\dbl\X^3$---seen as a quotient space of $\X^3 \times \{0,1\}$---that map to the same point $x$ under the quotient map $q : \dbl\X^3 \to \X^3$.
We will also consider a regular decagon $\X^2$ and two periodic pentagonal billiard trajectories in it (see \cref{fig:StableFigureEightFlat5D}), which we also parametrize as constant-speed curves over $[0,1]$. Due to their odd number of collisions, the pentagonal billiard trajectories also correspond to geodesics $\gamma_1$ and $\gamma_2$ in $\dbl\X^2$ from points $(y,0)$ to $(y,1)$ in $\dbl\X^2$ for some $y \in \X^2$. Let $\gamma_1$ correspond to the red pentagonal billiard trajectory in \cref{fig:StableFigureEightFlat5D} and let $\gamma_2$ correspond to the black one.
Observe that the geodesics $(\phi_1, \gamma_1)$ and $(\phi_2, \gamma_2)$ in $\dbl\X^3 \times \dbl\X^2$ project to geodesic loops in $\dbl(\X^3 \times \X^2)$ under the branched cover $\Gamma : \dbl\X^3 \times \dbl\X^2 \to \dbl(\X^3 \times \X^2)$ defined in \cref{eq:BranchedCoverDefn}, \emph{as long as each $(\phi_i,\gamma_i)$ avoids the branch locus.} This is because these geodesic loops will be based at $((x,y), 0 + 0 \bmod2) = ((x,y), 1 + 1 \bmod2)$. These two geodesic loops would then form a stationary geodesic bouquet $G$.
As explained in \cref{sec:Semigroup}, $(\phi_1, \gamma_1)$ will avoid the branch locus if $\alpha^3$ never collides simultaneously with the red pentagonal billiard trajectory. This is true because trigonometry shows that the red trajectory never collides simultaneously with $\alpha^2$ from the proof of \cref{prop:StableFigureEightFlat4D} (parametrized over $[0,1]$), and the collision times of $\alpha^3$ can be made arbitrarily close to those of $\alpha^2$ by choosing $\delta$ from the proof of \cref{prop:StableFigureEightFlat4D} to be sufficiently small. A similar argument shows that $R \circ \beta^3$ never collides simultaneously with the black pentagonal billiard trajectory. Therefore each $(\phi_i,\gamma_i)$ avoids the branch locus, and we have the stationary geodesic bouquet $G$.
We will prove that $G$ is in fact a stable figure-eight. After that, irreducibility can be readily verified. We can also prove that $G$ is simple, because $\alpha^3$ and $R \circ \beta^3$ are simple loops that intersect only at their common basepoint, and the rest follows from adapting the proof of \cref{Lem:DirectSumSimple}.
\begin{figure}
\caption{A regular decagon $\X^2$ with two periodic billiard trajectories, in black and red, starting and ending at a point $y \in \X^2$.}
\label{fig:StableFigureEightFlat5D}
\end{figure}
We can mimic most of the analysis from \cref{sec:Semigroup} by defining analogues of the parallel defect operator, studying their kernels, and adapting our notion of stability. We can define the index forms $Q_{\gamma_i}$ using the same formulas as in \cite[Section~3.1]{Cheng_StableGeodesicNets}; they are non-negative definite because of the flat metric. We can adapt the proof of \cref{lem:SuperadditiveIndexForm} to prove the following. Let $W$ be a vector field along $G$ that pulls back along the branched cover $\Gamma$ to a vector field $U + V$ along $(\phi_1,\gamma_1)$ and $(\phi_2,\gamma_2)$, where $U$ (resp. $V$) is the component in $T\dbl\X^3$ (resp. $T\dbl\X^2$). Let $U_i$ (resp. $V_i$) be the restriction of $U$ (resp. $V$) to $\phi_i$ (resp. $\gamma_i$) for $i \in \{1,2\}$. Then
\begin{equation}
Q_G(W) \geq \Big(Q_{\phi_1}(U_1^\perp) + Q_{\phi_2}(U_2^\perp)\Big) + \Big(Q_{\gamma_1}(V_1^\perp) + Q_{\gamma_2}(V_2^\perp)\Big),
\end{equation}
where $U_i^\perp$ (resp. $V_i^\perp$) is the component of $U_i$ (resp. $V_i$) that is orthogonal to $\phi_i$ (resp. $\gamma_i$).
If $\phi_1$ and $\phi_2$ formed a stationary geodesic bouquet $F$, and $\gamma_1$ and $\gamma_2$ formed a stationary geodesic bouquet $F'$, the strategy from \cref{sec:Semigroup} would be to demonstrate that $F$ and $F'$ are stable, so that $Q_G(W)$ can only vanish if the right-hand side of the above inequality also vanishes, which would imply that $U$ and $V$ are tangent to $F$ and $F'$ respectively, which would mean that $W$ is tangent to $G$. (The tangency of a vector field to a stationary geodesic bouquet is defined in \cite[Section~2.1]{Cheng_StableGeodesicNets}.) Even though we are not in this situation, we will still be able to show that if the right-hand side of the inequality vanishes, then $U_i$ (resp. $V_i$) must be tangent to $\phi_i$ (resp. $\gamma_i$) for $i \in \{1,2\}$. That will complete the proof of this proposition.
First we bound the index forms from below in a manner analogous to \cite[Proposition~3.3]{Cheng_StableGeodesicNets}. In a sense we generalize the theory of parallel defect operators to billiard trajectories. We can define a linear operator $\opd{\phi_1} : T_x\X^3 \to T_x\X^3$ by $\opd{\phi_1} = dR_{F_k} \dotsm dR_{F_1} \bar{\pi}_0 - \bar{\pi}_1$, where $\alpha^3$ collides with the faces $F_1, \dotsc, F_k$, and $\bar{\pi}_t$ is projection onto the orthogonal complement of $(\alpha^3)'(t)$. Therefore $\bar{\pi}_t$ is related to the projection $\pi_t : T_{\phi(t)}\dbl\X^3 \to T_{\phi(t)}\dbl\X^3$ onto the component orthogonal to $\phi'$ via $\bar{\pi}_t = dq_{\phi_1(t)} \pi_t dq_{\phi_1(t)}^{-1}$, where $q : \dbl\X^3 \to \X^3$ is the canonical quotient map. Observe that $dq_{(x,0)}U_1(0) = dq_{(x,1)}U_1(1) = u$ for some $u \in T_x\X^3$, and $dq_{(y,0)}V_1(0) = dq_{(y,1)}V_1(1) = v$ for some $v \in T_y\X^3$; it may help to consider the following commutative diagram for all $i = 1,2$ and $t \in \{0,1\}$, whose diagonal maps are obtained from the differentials of quotient maps, and where $\Gamma$ is the branched cover defined in \cref{eq:BranchedCoverDefn}.
\begin{equation}
\begin{tikzcd}[column sep=small]
\overset{U_i(t) + V_i(t) \in}{T_{((x,t),(y,t))}(\dbl\X^3 \times \dbl\X^2)} \arrow[rr,"d\Gamma"] \arrow[rd] & & \overset{W((x,y),0) \in}{T_{((x,y),0)}\dbl(\X^3 \times \X^2)} \arrow[ld] \\
& \underset{u + v \in}{T_{(x,y)}(\X^3 \times \X^2)}
\end{tikzcd}
\end{equation}
If $P : T_{(x,0)}\dbl\X^3 \to T_{(x,1)}\dbl\X^3$ is the parallel transport map of $\phi_1$, then following the proof of \cite[Proposition~3.3]{Cheng_StableGeodesicNets}, we have
\begin{align}
Q_{\phi_1}(U_1^\perp)
&= \int_0^1 \norm{\nabla_{\phi_1'} U_1^\perp}^2 \,dt \\
&\geq \norm{P\pi_0 U_1(0) - \pi_1 U_1(1)}^2 \\
&= \norm{P\pi_0 dq_{(x,0)}^{-1} u - \pi_1 dq_{(x,1)}^{-1} u}^2 \\
&= \norm{dq_{(x,1)}P\pi_0 dq_{(x,0)}^{-1} u - dq_{(x,1)}\pi_1 dq_{(x,1)}^{-1} u}^2 \\
(\text{\cite[Lemma~4.2]{Cheng_StableGeodesicNets}})
&= \norm{dR_{F_k} \dotsm dR_{F_1} dq_{(x,0)} \pi_0 dq_{(x,0)}^{-1} u - dq_{(x,1)}\pi_1 dq_{(x,1)}^{-1} u}^2 \\
&= \norm{dR_{F_k} \dotsm dR_{F_1} \bar\pi_0 u - \bar\pi_1 u}^2 \\
&= \norm{\opd{\phi_1}(u)}^2.
\end{align}
Similarly we can define $\opd{\phi_2}$, $\opd{\gamma_1}$ and $\opd{\gamma_2}$ and we have $Q_{\phi_2}(U_2^\perp) \geq \norm{\opd{\phi_2}(u)}^2$ and similar lower bounds for $Q_{\gamma_1}$ and $Q_{\gamma_2}$.
From now on let us assume that $Q_G(W) = 0$. Then $u \in \ker\opd{\phi_1} \cap \ker\opd{\phi_2}$ and $v \in \ker\opd{\gamma_1} \cap \ker\opd{\gamma_2}$. It suffices to show that $\ker\opd{\phi_1} \cap \ker\opd{\phi_2} = \{0\}$ and $\ker\opd{\gamma_1} \cap \ker\opd{\gamma_2} = \{0\}$, because then we would have $u = 0$ and $v = 0$. We can then follow the proof of \cite[Corollary~3.4]{Cheng_StableGeodesicNets} to show that since $Q_{\phi_1}(U_1^\perp) = 0$, $U_1^\perp$ has to be parallel. But since $u = 0$, $U_1^\perp$ must identically vanish, so $U_1$ must be tangent to $\phi_1$. The same argument shows that $U_2$ must be tangent to $\phi_2$ and each $V_i$ must be tangent to $\gamma_i$.
From the proof of \cref{prop:StableFigureEightFlat4D}, the parallel transport maps of $\alpha^3$ and $\beta^3$ are $\diag(-1,1,1)$ and $M = \left[\begin{smallmatrix}
0 & -1 & 0 \\
1 & 0 & 0 \\
0 & 0 & -1
\end{smallmatrix}\right]$ respectively. Thus the parallel transport map of $R \circ \beta^3$ is $RMR^{-1} = \left[\begin{smallmatrix}
0 & 0 & -1 \\
0 & -1 & 0 \\
1 & 0 & 0
\end{smallmatrix}\right]$. From this it can be computed that $\ker\opd{\phi_1}$ is the $yz$-plane and $\ker\opd{\phi_2}$ is the $x$-axis, so they do intersect only at the origin. For the periodic billiard trajectories in $\X^2$, the parallel transport map is the reflection that fixes the tangent vector at the basepoint by \cref{lem:2DBilliardParallelTransportParity}. In fact this implies that $\ker\opd{\gamma_1}$ and $\ker\opd{\gamma_2}$ are the lines tangent to the periodic billiard trajectories at the basepoint, which intersect only at the origin. Thus we are done.
\end{proof}
Finally we can combine all of our previous results into a proof of our main result, \cref{thm:Stable2LoopPosCurv}.
\begin{proof}[Proof of \cref{thm:Stable2LoopPosCurv}]
From \cref{prop:StableFigureEightFlat3D,prop:StableFigureEightFlat4D,prop:StableFigureEightFlat5D} we can derive the convex polytopes $\X^3$, $\X^4$, and $\X^5$ in dimensions 3, 4 and 5 whose doubles contain the stable figure-eights $G^3$, $G^4$ and $G^5$ respectively. For each $n \in \{3,4,5\}$ and $m \geq 0$ we will modify $G^3$ into slightly different versions $G^3_1,\dotsc,G^3_m$ so that together with $G^n$ they are all pairwise non-singular. Once we accomplish that, \cref{cor:StableDirectSum} will guarantee that $G^n \oplus G^3_1 \oplus \dotsb \oplus G^3_m$ is a stable figure-eight in the double of a convex polytope in dimension $3m+n$, as explained in \cref{sec:Semigroup}.
We find the $G^3_i$ by doing the construction in \cref{prop:StableFigureEightFlat3D} but using different, smaller values of $\delta$. Denote the resulting billiard trajectories by $\alpha^3_\delta$ and $\beta^3_\delta$, and the resulting height function on faces of $\X^2$ be $h_\delta : \F \to \R$. We will show that there exist constants $\mu_i, \tau_i > 0$ for $i \in \{1,2,3\}$ such that $\alpha^3_\delta$ collides at times $\mu_1 - \tau_1\delta$, $\mu_1 + \tau_1\delta$, $\mu_2 - \tau_2\delta$, $\mu_2 + \tau_2\delta$, $\mu_3 - \tau_3\delta$, and $\mu_3 + \tau_3\delta$. The same will be true of $\beta^3_\delta$. Then clearly there will be an infinite number of ways to choose $\delta$ to obtain all of the necessary $G^3_i$.
The heights of the horizontal segments of $\alpha^3_\delta$ and the values of $h_\delta(F_1)$, $h_\delta(F_2)$, and $h_\delta(F_3)$ are all proportional to $\delta$. This implies that for any sufficiently small $\delta, \delta' > 0$, the part of $\alpha_\delta^3 \cup \partial\B^{h_\delta}(F_i)$ near each $F_i \times \R$ is homothetic to the part of $\alpha_{\delta'}^3 \cup \partial\B^{h_{\delta'}}(F_i)$ near $F_i \times \R$. This situation is illustrated in \cref{fig:Homothety}, where we have drawn the unraveled versions of $\alpha_\delta^3$ and $\alpha_{\delta'}^3$. We have also drawn how it relates to the unraveled version of $\alpha^2$ via the projection $\pr_2$. The existence of the constants $\mu_i$ and $\tau_i$ is then evident.
\begin{figure}
\caption{Using different values of $\delta$ produces homethetic configurations near collisions.}
\label{fig:Homothety}
\end{figure}
To complete the proof, it remains to apply \cite[Propositions~4.7 and 4.8]{Cheng_StableGeodesicNets}.
\end{proof}
\end{document}
|
\begin{document}
\title{\bf Energy of Taut Strings Accompanying Wiener Process}
\author{
Mikhail Lifshits\footnote{St.Petersburg State University, Russia, Stary
Peterhof, Bibliotechnaya pl.,2,
email {\tt [email protected]} and MAI, Link\"oping University.}
\and Eric Setterqvist\footnote{MAI, Link\"oping University, 58183
Link\"oping, Sweden, email \ {\tt [email protected]}.
}}
\, \mathrm{d}ate{\today}
\maketitle
\begin{abstract}
Let $W$ be a Wiener process. For $r>0$ and $T>0$ let
$I_W(T,r)^2$ denote the minimal value of the energy
$\int_0^T h'(t)^2 dt$
taken among all absolutely continuous functions $h(\cdot)$
defined on $[0,T]$, starting at zero and satisfying
\[
W(t)-r \le h(t)\le W(t)+r,\qquad 0\le t \le T.
\]
The function minimizing energy is a taut string, a classical object
well known in Variational Calculus, in Mathematical Statistics, and in a broad
range of applications.
We show that there exists a constant ${\mathcal C}\in (0,\infty)$ such that for any $q>0$
\[
\frac{r} { T^{1/2}}\, I_W(T,r) \stackrel{L_q}{\longrightarrow} {\mathcal C},
\qquad \textrm{as } \frac{r}{T^{1/2}}\to 0,
\]
and for any fixed $r>0$,
\[
\frac{r} { T^{1/2}}\, I_W(T,r) \stackrel{\textrm{a.s.}}{\longrightarrow} {\mathcal C},
\qquad \textrm{as } T\to\infty.
\]
Although precise value of ${\mathcal C}$ remains unknown, we give various theoretical
bounds for it, as well as rather precise results of computer simulation.
While the taut string clearly depends on entire trajectory of $W$, we also
consider an adaptive version of the problem by giving a construction
(called Markovian pursuit) of a random function $h(t)$ based only on the values
$W(s),s\le t$, and having minimal asymptotic energy. The solution, i.e. an optimal
pursuit strategy, turns out to be related with a classical minimization problem
for Fisher information on the bounded interval.
\end{abstract}
\vskip 1cm
\noindent
\textbf{2010 AMS Mathematics Subject Classification:}
Primary: 60G15; Secondary: 60G17, 60F15.
\noindent
\textbf{Key words and phrases:} Gaussian processes,
Markovian pursuit, taut string, Wiener process.
\section*{Introduction}
\setcounter{equation}{0}
Given a time interval $[0,T]$ and two functional boundaries
$g_1(t)<g_2(t)$, $0\le t \le T$, the {\it taut string} is a function $h_*$
that for any (!) convex function $\varphi$ provides minimum for
the functional
\[ F_\varphi(h):=\int_0^T \varphi(h'(t)) \, dt
\]
among all absolutely continuous functions $h$ with given starting
and final values and satisfying
\[
g_1(t) \le h(t)\le g_2(t),\qquad 0\le t \le T.
\]
The list of simultaneously optimized functionals includes energy
$\int_0^T h'(t)^2 dt$,
variation $\int_0^T |h'(t)| dt$, graph length
$\int_0^T \sqrt{1+h'(t)^2} dt$, etc.
The first instance of taut strings that we have found in the literature is in
G. Dantzig's paper \cite{Dantzig}. Dantzig notes there that the problem
under study and its solution was discussed in R. Bellman's seminar at RAND
Corporation in 1952. The taut strings were later used in Statistics, see
\cite{Mammen} and \cite{Davies}.
In the book \cite[Chapter 4, Subsection 4.4]{Scherzer},
taut strings are considered in connection with problems in image processing.
Quite recently, taut strings were applied to a buffer management
problem in communication theory, see \cite{Setterqvist}.
In this article, we study the energy of the taut string going through the tube
of constant width constructed around sample path of a Wiener process $W$,
i.e. for some $r>0$ we let $g_1(t):=W(t)-r$, $g_2(t):=W(t)+r$,
see Fig. \ref{fig:ts}.
\begin{center}
\begin{figure}
\caption{A fragment of taut string accompanying Wiener process.}
\label{fig:ts}
\end{figure}
\end{center}
We focus attention
on the behavior in a long run: we show that when $T\to \infty$, the taut string
spends asymptotically constant amount of energy ${\mathcal C}^2$ per unit of time.
Precise assertions are given in Theorems \ref{t:mean} and \ref{t:alsur} below.
The constant ${\mathcal C}$ shows {\it how much energy an absolutely continuous
function must spend if it is bounded to stay within a certain distance from the
non-differentiable trajectory of $W$}.
Although precise value of ${\mathcal C}$ remains unknown, we give various theoretical
bounds for it in Section \ref{s:estimates}, as well as the results
of computer simulation in Section \ref{s:simulation}. The latter suggest
${\mathcal C}\approx 0.63$.
If we take the pursuit point of view, considering $h(\cdot)$ as a trajectory
of a particle moving with finite speed and trying to stay close to a Brownian
particle, then it is much more natural to consider constructions that define
$h(t)$ in adaptive way, i.e. on the base of the known $W(s),s\le t$. Recall
that the taut string depends on the entire trajectory $W(s),s\le T$, hence
it does not fit the adaptive setting. In view of Markov property of $W$, the
reasonable pursuit strategy for $h(t)$ is to move towards $W(t)$ with the speed
depending on the distance $|h(t)-W(t)|$. In this class of algorithms we find
an optimal one in Section \ref{s:pursuit}. The corresponding function spends
in average $\tfrac{\pi}{2}\approx 1.57$ units of energy per
unit of time. Comparing of two constants shows that we have to pay more than
double price for not knowing the future of the trajectory of $W$.
To our great surprise, the search of optimal pursuit strategy boils down
to the well known variational problem: minimize Fisher information on the class
of distributions supported on a fixed bounded interval.
We conjecture that the provided algorithm is the optimal one in the entire
class of adaptive algorithms.
In Section \ref{s:misc} we establish some connections with other well known
settings and problems.
First, we recall that the famous Strassen's functional law of the iterated logarithm
(FLIL) and its extensions handling convergence rates in FLIL actually deal exactly
with the energy of taut strings. Not surprisingly, we borrowed some techniques
for evaluation of this energy from FLIL research. Yet it should be noticed that
FLIL requires very different range of parameters $r$ and $T$ than those emerging
in our case. The FLIL tubes are much wider, hence the taut string energy is much
lower than ours. This is why Strassen law with its super-slow loglog rates is so
hard to reproduce in simulations, while our results handling the same type of
quantities are easily observable in computer experiment.
Second, we briefly look at the taut string as a minimizer of
of variation
\[
{\mathbb V}(h):= \int_0^T |h'(t)| dt.
\]
Since $|\cdot|$ is not a {\it strictly}
convex function, the corresponding variational problem typically has
{\it many} solutions. In \cite{LoMil,Mil} another minimizer of ${\mathbb V}(h)$ is
described in detail, a so called {\it lazy function}. As E.~Schertzer pointed
to us, the relations between the taut strings and lazy functions are yet to be
clarified.
Finally, we briefly describe a discrete analogue of our problem thus giving
flavor of eventual applications.
As a conclusion, Section \ref{s:concl} traces some forthcoming or possible
developments of the treated subject.
\section{Notation and main results}
\label{s:nota}
\setcounter{equation}{0}
Throughout the paper, we consider uniform norms
\[
||h||_T:= \sup_{0\le t\le T} |h(t)|, \qquad h\in {\mathcal C}C[0,T],
\]
and Sobolev-type norms
\[
|h|^2_T:= \int_0^T h'(t)^2 dt, \qquad h\in AC[0,T],
\]
where $AC[0,T]$ denotes the space of absolutely continuous functions on
$[0,T]$. It is natural to call $|h|^2_T$ {\it energy}.
Let $W$ be a Wiener process. We are mostly interested in its approximation
characteristics
\[
I_W(T,r):=\inf\{|h|_T; h\in AC[0,T], ||h-W||_T\le r, h(0)=0 \}
\]
and
\[
I_Wz(T,r):=\inf\{|h|_T; h\in AC[0,T], ||h-W||_T\le r, h(0)=0, h(T)=W(T) \}.
\]
The unique functions at which the infima are attained are called {\it taut string},
resp. {\it taut string with fixed end}.
Our main results are as follows.
\begin{thm} \label{t:mean}
There exists a constant ${\mathcal C}\in (0,\infty)$ such that
if $\tfrac{r}{\sqrt{T}}\to 0$, then
\begin{eqnarray}
\label{toc}
\frac{r} { T^{1/2}}\, I_W(T,r) \stackrel{L_q}{\longrightarrow} {\mathcal C},
\\
\label{tocz}
\frac{r} { T^{1/2}}\, I_Wz(T,r) \stackrel{L_q}{\longrightarrow} {\mathcal C},
\end{eqnarray}
for any $q>0$.
\end{thm}
We may complete the mean convergence with almost sure convergence
to ${\mathcal C}$.
\begin{thm} \label{t:alsur}
For any fixed $r>0$, when $T\to\infty$, we have
\begin{eqnarray*}
\frac{r} { T^{1/2}}\, I_W(T,r) \stackrel{\textrm{a.s.}}{\longrightarrow} {\mathcal C},
\\
\frac{r} { T^{1/2}}\, I_Wz(T,r) \stackrel{\textrm{a.s.}}{\longrightarrow} {\mathcal C}.
\end{eqnarray*}
\end{thm}
\section{Basic properties of $I_W$ and $I_Wz$}
\setcounter{equation}{0}
\label{s:basic}
We prepare the proofs of the main results given below in Subsections \ref{ss:asym2}
and \ref{ss:asym3}
by exploring scaling and concetration properties of the taut string's energy.
\subsection{Scaling}
Given two functions $W(t)$ and $h(t)$ on $[0,T]$, let us rescale them onto the
time interval $[0,1]$ by letting
\[
X(s):= \frac{W(sT)}{\sqrt{T}}, \ g(s):= \frac{h(sT)}{\sqrt{T}}, \qquad 0\le s\le 1.
\]
Then
\[
||g-X||_1=\frac {||h-W||_T}{\sqrt{T}}
\]
and
\[
|g|_1^2= \int_0^1 g'(s)^2 ds = \int_0^1 \left( \frac{ h'(sT)T}{\sqrt{T}}\right)^2 ds
= T \int_0^1 h'(sT)^2 ds
= \int_0^T h'(t)^2 dt
=|h|^2_T.
\]
The boundary conditions are also transformed properly: namely, $h(0)=W(0)$ is equivalent to
$g(0)=X(0)$, while $h(T)=W(T)$ is equivalent to $g(1)=X(1)$. Therefore, $h$
belongs to the set $\{h: h\in AC[0,T], ||h-W||_T\le r, h(0)=0 \}$ iff
$g$ belongs to the analogous set
$\{g: g \in AC[0,1], ||g-X||_1\le \tfrac{r}{\sqrt{T}}, g(0)=0 \}$.
Recall that if $W$ is a Wiener process on $[0,T]$, then $X(s):= \tfrac{W(sT)}{\sqrt{T}}$
is a Wiener process on $[0,1]$. We conclude that
\[
I_W(T,r) \=L I_W \left(1, \tfrac{r}{\sqrt{T}}\right).
\]
Similarly,
\be \label{scal0}
I_Wz(T,r) \=L I_Wz \left(1, \tfrac{r}{\sqrt{T}}\right).
\ee
Therefore, assertions \eqref{toc} and \eqref{tocz} may be rewritten in a one-parameter form
\begin{eqnarray}
\label{toc1}
\varepsilon \, I_W(1,\varepsilon) \stackrel{L_q}{\longrightarrow} {\mathcal C}, \qquad \textrm{as } \varepsilon\to 0,
\\
\nonumber
\varepsilon\, I_Wz(T,\varepsilon) \stackrel{L_q}{\longrightarrow} {\mathcal C}, \qquad \textrm{as } \varepsilon\to 0.
\end{eqnarray}
\subsection{Finite moments}
We will show now that both $I_W(T,r)$ and $I_Wz(T,r)$ have finite exponential moments.
Yet in the following we only need that
\begin{eqnarray}
D(T,R) := \mathbb{E}\, I_W(T,r)^2 <\infty,
\\
D^0(T,R):= \mathbb{E}\, I_Wz(T,r)^2 <\infty.
\end{eqnarray}
Let $v$ be an even integer. Then $\, \mathrm{d}elta:=\tfrac 2v$ is inverse to an integer, and
we may cut the time interval $[0,1]$ into $\, \mathrm{d}elta^{-1}$ intervals of length $\, \mathrm{d}elta$.
Let $W_\, \mathrm{d}elta$ be the linear interpolation of $W$ based on the knots $(j\, \mathrm{d}elta,W(j\, \mathrm{d}elta))$,
$0\le j\le \, \mathrm{d}elta^{-1}$. Clearly, we have either $||W_\, \mathrm{d}elta-W||_1>r$ or
$I_Wz(1,r)\le |W_\, \mathrm{d}elta|_1$. It follows that
\be \label{p2}
{\mathbb{P}}\left(I_Wz(1,r)^2>v\right)
\le
{\mathbb{P}}\left(||W_\, \mathrm{d}elta-W||_1>r \right) + {\mathbb{P}}\left(|W_\, \mathrm{d}elta|_1^2>v \right).
\ee
Notice that
\begin{eqnarray}
\nonumber
||W_\, \mathrm{d}elta-W||_1 &=& \max_{0\le t\le 1} |W_\, \mathrm{d}elta(t)-W(t)|
\\ \nonumber
&=&
\max_{0\le j < \, \mathrm{d}elta^{-1}} \ \max_{j\, \mathrm{d}elta\le t\le (j+1)\, \mathrm{d}elta} |W_\, \mathrm{d}elta(t)-W(t)|
\\ \label{sum1}
&\=L& \max_{0\le j < \, \mathrm{d}elta^{-1}} \sqrt{\, \mathrm{d}elta} \max_{0\le t\le 1} |B_j(t)|,
\end{eqnarray}
where $(B_j)$ are independent Brownian bridges, and
\begin{eqnarray}
\nonumber
|W_\, \mathrm{d}elta|_1^2
&=& \int_0^1 W_\, \mathrm{d}elta'(t)^2 dt
\\ \label{sum2}
&=& \, \mathrm{d}elta^{-1} \sum_{0\le j < \, \mathrm{d}elta^{-1}}
\left(W((j+1) \, \mathrm{d}elta)-W(j \, \mathrm{d}elta)\right)^2
= \sum_{0\le j < \, \mathrm{d}elta^{-1}} \eta_j^2,
\end{eqnarray}
where $(\eta_j)$ are i.i.d. standard normal random variables.
Now we may evaluate the probabilities in \eqref{p2}.
By using \eqref{sum1}, we obtain
\begin{eqnarray*}
{\mathbb{P}}\left(||W_\, \mathrm{d}elta-W||_1>r \right)
&\le&
\, \mathrm{d}elta^{-1} \ {\mathbb{P}}\left(||B||_1> \frac{r}{\, \mathrm{d}elta} \right)
\le
\, \mathrm{d}elta^{-1} \ {\mathbb{P}}\left(||W||_1> \frac{r}{\, \mathrm{d}elta} \right)
\\
&\le&
\frac{v}{2}\cdot 2 \cdot \mathrm{e}p\left( - \frac{r^2}{2 \, \mathrm{d}elta} \right)
= v\ \mathrm{e}p (-r^2 v/4).
\end{eqnarray*}
On the other hand, by using Cram\'er--Chernoff theorem and \eqref{sum2},
\[
{\mathbb{P}}\left(|W_\, \mathrm{d}elta|_1^2>v \right)
= {\mathbb{P}}\left( \sum_{0\le j < v/2} \eta_j^2 >v \right)
\le \mathrm{e}p\{-c_1 v\}
\]
for all $v$ and some universal constant $c_1$.
It follows that
\[
{\mathbb{P}}\left(I_Wz(1,r)^2>v\right)
\le v\ \mathrm{e}p (-r^2 v/4) + \mathrm{e}p\{-c_1 v\}.
\]
Hence,
\[
\mathbb{E}\, \mathrm{e}p \left(c \, I_Wz(1,r)^2\right) <\infty,
\]
whenever $0<c<\min\{\tfrac{r^2}{4}, c_1\}$. By scaling we also have
\[
\mathbb{E}\, \mathrm{e}p \left(c\, I_Wz(T,r)^2\right) <\infty,
\]
for any $r,T>0$ and sufficiently small positive $c$.
It follows from the definitions that
\be \label{comp_simple}
I_W(T, r)\le I_Wz(T, r) \qquad \forall\, T,r>0.
\ee
Hence, the exponential moment of $I_W(T,r)$ is finite, too.
\subsection{ Relations between $I_W$ and $I_Wz$ }
We already noticed in \eqref{comp_simple} that
$I_W(T, r)\le I_Wz(T, r)$.
We will show now that a kind of converse estimate is also true.
\begin{prop} For all positive $T,r,\, \mathrm{d}elta$ it is true that
\be \label{lowerE}
\mathbb{E}\, I_W(T,r)^2 \ge \mathbb{E}\, I_Wz(T+1,r+\, \mathrm{d}elta)^2 - \mathbb{E}\, I_Wz(1,\, \mathrm{d}elta)^2 -r^2.
\ee
\end{prop}
\begin{proof}
Let us fix for a while the time interval $[0,1]$ and let
us approximate the trajectory of Wiener process $W$ by functions
starting from some arbitrary point $\rho\in\mathbb{R}$. Let $\, \mathrm{d}elta>0$ and
let $h(\cdot)$ be the taut string with fixed end at which $I_Wz(1,\, \mathrm{d}elta)$
is attained.
Then we have $h(0)=0$, $h(1)=W(1)$, $||h-W||_1\le \, \mathrm{d}elta$,
$|h|_1=I_Wz(1,\, \mathrm{d}elta)$. Let
\[
H(t):= \rho+h(t)-\rho t, \qquad 0\le t\le 1.
\]
Then $H(0)=\rho+h(0)=\rho$,
$H(1)=\rho+h(1)-\rho=h(1)=W(1)$,
\be \label{HW}
||H-W||_1\le ||h-W||_1 +|\rho| \max_{0\le t\le 1}|1-t| \le \, \mathrm{d}elta+|\rho|,
\ee
and
\begin{eqnarray} \nonumber
|H|_1^2&=& \int_0^1 H'(t)^2 dt = \int_0^1(h'(t)-\rho)^2 dt
\\ \nonumber
&=&
\int_0^1 h'(t)^2 dt +\rho^2 -2\rho \int_0^1 h'(t) dt
\\ \label{H1}
&=&
|h|_1^2 +\rho^2 -2\rho (h(1)-h(0))
= I_Wz(1,\, \mathrm{d}elta)^2 +\rho^2 -2\rho W(1).
\end{eqnarray}
Now we pass to the lower bound for $I_W(T, r)$. Let us fix
$r,\, \mathrm{d}elta,T$ and produce an approximation for $W$ on
$[0,T+1]$ with the fixed end. First, let $\widehat h (t), 0\le t\le T$,
be the taut string at which $I_W(T,r)$ is attained. The end point
is not fixed, thus $\rho:=\widehat h(T)-W(T)$ need not vanish.
Nevertheless we still have
\[
|\rho|\le ||\widehat h-W||_T\le r.
\]
Now we approximate the auxiliary Wiener process
\[
\widetilde W_T(s):=W(T+s)-W(T), \qquad 0\le s\le 1,
\]
by the function $H(\cdot)$ defined above and let
\[
\widehat h(T+s):= W(T) + H(s), \qquad 0\le s\le 1.
\]
At the boundary point $T$ the first definition yields the value
$\widehat h(T)=W(T)+\rho$, the second definition yields
$\widehat h(T):= W(T) + H(0)$; the two values coincide by the definition of
function $H$.
Moreover,
\[
\widehat h(T+1)= W(T) + H(1)= W(T)+ \widetilde W_T(1)= W(T)+W(T+1)-W(T)=W(T+1).
\]
Therefore, the extended function $\widehat h(\cdot)$ provides an absolutely
continuous approximation with fixed end to $W$ on $[0,T+1]$.
Furthermore, by \eqref{HW} for $0\le s\le 1$ we have
\[
|W(T+s)-\widehat h(T+s)|
= |\widetilde W_T(s)+ W(T) - W(T) -H(s)|
= |\widetilde W_T(s)-H(s)| \le \, \mathrm{d}elta+|\rho| \le \, \mathrm{d}elta+r.
\]
Finally, by \eqref{H1},
\[
\int_T^{T+1} \widehat h'(t)^2 dt
=
\int_0^{1} \widehat H'(s)^2 ds
= |H|_1^2
= I_{{\tW_T}}^0(1,\, \mathrm{d}elta)^2 +\rho^2 -2\rho \widetilde W_T(1).
\]
We conclude that
\begin{eqnarray}
\nonumber
&& I_Wz(T+1,r+\, \mathrm{d}elta)^2
\le |\widehat h|_{T+1}^2
\\ \label{lower}
&=& |\widehat h|_T^2+ \int_T^{T+1} \widehat h'(t)^2 dt
\le I_W(T,r)^2 + I_{{\tW_T}}^0(1,\, \mathrm{d}elta)^2 +r^2 -2\rho \widetilde W_T(1)
\end{eqnarray}
and turn this relation into the desired bound
\[
I_W(T,r)^2
\ge I_Wz(T+1,r+\, \mathrm{d}elta)^2 - I_{{\tW_T}}^0(1,\, \mathrm{d}elta)^2 - r^2 +2\rho \widetilde W_T(1).
\]
Notice that $\rho$ and $\widetilde W_T(1)$ are independent and $\mathbb{E}\, \widetilde W_T(1)=0$.
By taking expectations we get the desired relation \eqref{lowerE}.
\end{proof}
\subsection{Concentration}
We first notice an almost obvious Lipschitz property of the functionals under consideration.
\begin{prop}
For any $T,r>0$, any $w\in {\mathcal C}C[0,T], g\in AC[0,T]$ we have
\be \label{lip0}
\left| I_{w+g}^0(T,r)- I_w^0(T,r) \right|\le |g|_T
\ee
and
\be \label{lip}
\left| I_{w+g}(T,r)- I_w(T,r) \right|\le |g|_T
\ee
\end{prop}
It is remarkable that the Lipschitz constant in the right hand side does not depend
on $r$ and $T$.
\begin{proof} Let $h$ be the taut string at which $I_w^0(T,r)$ is attained. Then the function
$\widehat h:= h+g$ satisfies the boundary conditions $\widehat h(0)=(w+g)(0)$,
$\widehat h(T)=(w+g)(T)$ as well as
\[
||\widehat h-(w+g)||_T = ||(h+g)-(w+g)||_T = ||h-w||_T \le r.
\]
Therefore,
\[
I_{w+g}^0(T,r) \le |\widehat h|_T \le |h|_T + |g|_T = I_{w}^0(T,r) + |g|_T.
\]
By applying the latter inequality to $\tilde w:=w+g$ and $\tilde g:=-g$ in place of
$w$ and $g$ we obtain
\[
I_{w}^0(T,r) \le I_{w+g}^0(T,r) + |g|_T,
\]
and \eqref{lip0} follows. The proof of \eqref{lip} is exactly the same.
\end{proof}
In the rest of the subsection parameters $T$ and $r$ are fixed, and we drop them from our notation,
thus writing $I_Wz$ instead of $I_Wz(T,r)$, etc. Let $m^0$ be a median for the random
variable $I_Wz$. The famous concentration inequality for Lipschitz functionals of Gaussian random
vectors (see \cite[Section 12]{Lif}) asserts that for any $\rho>0$
\begin{eqnarray*}
{\mathbb{P}}\left( I_Wz\ge m^0+ \rho \right) &\le& {\mathbb{P}}\left( N\ge \rho \right),
\\
{\mathbb{P}}\left( I_Wz\le m^0- \rho \right) &\le& {\mathbb{P}}\left( N\ge \rho \right),
\end{eqnarray*}
where $N$ is a standard normal random variable.
It follows that
\begin{eqnarray*}
Var I_Wz &=&\inf_{y} \mathbb{E}\,(I_Wz-y)^2 \le \mathbb{E}\,(I_Wz-m^0)^2
=2\int_0^\infty \rho \, {\mathbb{P}}(|I_Wz-m^0|\ge \rho) \, d\rho
\\
&\le& 2\int_0^\infty \rho \, {\mathbb{P}}(|N|\ge \rho) \, d\rho
= \mathbb{E}\, |N|^2 =1.
\end{eqnarray*}
Moreover,
\[
|\mathbb{E}\, I_Wz-m^0| \le \mathbb{E}\, |I_Wz-m^0| \le \sqrt{\mathbb{E}\,(I_Wz-m^0)^2}
\le 1
\]
and
\[
\mathbb{E}\, I_Wz \le \sqrt{\mathbb{E}\, [(I_Wz)^2]} = \sqrt{[\mathbb{E}\, I_Wz]^2+ Var I_Wz}
\le \sqrt{[\mathbb{E}\, I_Wz]^2+ 1} \le \mathbb{E}\, I_Wz +1.
\]
Finally, we infer
\be \label{m0D0}
m^0-1 \le \sqrt{\mathbb{E}\, [(I_Wz)^2]} \le m^0 +2.
\ee
We will also need that for any $q>0$
\begin{eqnarray} \nonumber
\mathbb{E}\, |I_Wz-m^0|^q
&=& q \int_0^\infty \rho^{q-1} \, {\mathbb{P}}(|I_Wz-m^0|\ge \rho) \, d\rho
\\ \label{Imzmean}
&\le& q \int_0^\infty \rho^{q-1} \, {\mathbb{P}}(|N|\ge \rho) \, d\rho
= \mathbb{E}\, |N|^q.
\end{eqnarray}
Similarly, for the median $m$ of $I_W$ we obtain
\[
m-1 \le \sqrt{\mathbb{E}\, [(I_W)^2]} \le m +2.
\]
and
\be \label{Immean}
\mathbb{E}\, |I_W-m|^q \le \mathbb{E}\, |N|^q.
\ee
\section{Asymptotics}
\label{s:asymp}
\setcounter{equation}{0}
\subsection{Asymptotics of the second moments and medians}
Recall that
$D(T,R):=\mathbb{E}\, I_W(T,r)^2$ and $D^0(T,R):= \mathbb{E}\, I_Wz(T,r)^2$ are the second
moments. We prove the following.
\begin{prop}
There exists a constant ${\mathcal C}\in [0,\infty)$ such that
if $\tfrac{r}{\sqrt{T}}\to 0$, then
\begin{eqnarray}
\label{Dtoc2}
\frac{r^2} {T}\, D(T,r) \to {\mathcal C}^2,
\\
\label{Dtocz2}
\frac{r^2}{T} \, D^0(T,r) \to {\mathcal C}^2.
\end{eqnarray}
\end{prop}
\begin{proof}
In proving \eqref{Dtoc2}, the following sub-additivity property plays the key role.
For any $r,T_1,T_2>0$ we have
\be \label{add1}
I_Wz(T_1+T_2,r)^2 \le I_Wz(T_1,r)^2 + I_{{\widetilde W_{T_1}}}^0 (T_2,r)^2,
\ee
where $\widetilde W_{T_1}(s):=W(T_1+s)-W(T_1)$ is a Wiener process.
This means that we may approximate $W$ by taut strings with fixed
ends separately on the intervals $[0,T_1]$ and $[T_1, T_1+T_2]$
by gluing them at $T_1$ due to the fixed end condition
imposed on the first string.
Notice that $I_W(\cdot,r)$ does not possess such a nice subadditivity property.
By taking expectations in \eqref{add1}, we obtain
\be \label{add2}
D^0(T_1+T_2,r)\le D^0(T_1,r) + D^0(T_2,r).
\ee
Since $I_Wz(1,\varepsilon)$ is a decreasing random function w.r.t. argument
$\varepsilon$, the function $D^0(1,\varepsilon)=\mathbb{E}\,I_Wz(1,\varepsilon)^2$ is also decreasing
in $\varepsilon$. By the scaling argument \eqref{scal0} we observe that for
any fixed $r>0$
\be \label{scal2}
D^0(T,r)=D^0\left(1,\tfrac{r}{\sqrt{T}}\right)
\ee
is an increasing function w.r.t. the argument $T$.
Fix any $T_0>0$. By using monotonicity of $D^0(T,r)$ in $T$ and iterating
subadditivity \eqref{add2} we obtain
\begin{eqnarray*}
\limsup_{T\to \infty} \frac{D^0(T,r)}{T}
&=& \limsup_{k\to \infty} \max_{0\le \tau\le T_0}
\frac{D^0(kT_0+\tau,r)}{kT_0+\tau}
\\
&\le& \limsup_{k\to \infty} \frac{D^0((k+1)T_0,r)}{kT_0}
\\
&\le& \lim_{k\to \infty} \frac{(k+1) D^0(T_0,r)}{kT_0}
= \frac{D^0(T_0,r)}{T_0}.
\end{eqnarray*}
By optimizing over $T_0$ we find
\[
\limsup_{T\to \infty} \frac{D^0(T,r)}{T}
\le
\inf_{T>0} \frac{D^0(T,r)}{T}
\le \liminf_{T\to \infty} \frac{D^0(T,r)}{T}\ .
\]
It follows that there exists a finite limit
\[
\lim_{T\to \infty} \frac{D^0(T,r)}{T} = \inf_{T>0} \frac{D^0(T,r)}{T} := C_r.
\]
By using the scaling \eqref{scal2} we find the limit
\begin{eqnarray*}
{\mathcal C}^2 &:=& \lim_{\varepsilon\to 0} \varepsilon^2 D^0(1,\varepsilon)
\\
&=&
r^2 \lim_{T\to \infty} \frac{D^0(T,r)}{T} = C_r \, r^2.
\end{eqnarray*}
Now the relation \eqref{Dtocz2} with varying $r$ follows by
another application of the scaling argument \eqref{scal2}.
Now we pass to \eqref{Dtoc2}. For fixed $r>0$ it follows from
\eqref{comp_simple} and \eqref{Dtocz2} that
\[
\limsup_{T\to \infty} \frac{D(T,r)}{T}
\le
\lim_{T\to \infty} \frac{D^0(T,r)}{T} = C_r = {\mathcal C}^2 \, r^{-2}.
\]
Conversely, for any fixed $\, \mathrm{d}elta>0$ it follows from \eqref{lowerE} that
\[
\liminf_{T\to \infty} \frac{D(T,r)}{T}
\ge
\lim_{T\to \infty} \frac{D^0(T,r+\, \mathrm{d}elta)}{T} = {\mathcal C}^2 \, (r+\, \mathrm{d}elta)^{-2}.
\]
By letting $\, \mathrm{d}elta\to 0$ we infer
\[
\lim_{T\to \infty} \frac{D(T,r)}{T}
= {\mathcal C}^2 \, r^{-2}.
\]
which is \eqref{Dtoc2} for fixed $r$.
The case of varying $r$ in \eqref{Dtoc2} follows by the same scaling arguments
as above.
\end{proof}
\begin{remark}
We will show later in Subsection \ref{ss:olb} that ${\mathcal C}>0$.
\end{remark}
We may complete the convergence of second moments with convergence of medians.
\begin{cor}
Let $ m^0(T,r)$, resp. $m(T,r)$, be a median of $I_Wz(T,r)$,
resp. $I_W(T,r)$. If $\tfrac{r}{\sqrt{T}}\to 0$, then
\begin{eqnarray}
\label{mtoc}
\frac{r}{\sqrt{T}} \ m(T,r) \to {\mathcal C},
\\
\label{mtocz}
\frac{r}{\sqrt{T}} \ m^0(T,r) \to {\mathcal C}.
\end{eqnarray}
\end{cor}
\begin{proof} Indeed \eqref{m0D0} writes as
\[
m^0(T,r)-1 \le \sqrt{D^0(T,r)} \le m^0(T,r) +2.
\]
Therefore, \eqref{mtocz} follows immediately from \eqref{Dtocz2}.
Relation \eqref{mtoc} follows from \eqref{Dtoc2} in the same way.
\end{proof}
\subsection{ $L_q$-convergence}
\label{ss:asym2}
{\bf Proof of Theorem \ref{t:mean}. }
Let $q>0$. We have to prove that
if $\tfrac{r}{\sqrt{T}}\to 0$, then
\begin{eqnarray}
\label{Itocmean}
\frac{r}{\sqrt{T}} \ I_W(T,r) \stackrel{L_q}{\longrightarrow} {\mathcal C},
\\
\label{Itoczmean}
\frac{r}{\sqrt{T}} \ I_Wz(T,r) \stackrel{L_q}{\longrightarrow} {\mathcal C}.
\end{eqnarray}
In view of \eqref{mtocz} the proof of \eqref{Itoczmean} reduces to
\[
\frac{r}{\sqrt{T}} \left( I_Wz(T,r) - m^0(T,r) \right) \stackrel{L_q}{\longrightarrow} 0.
\]
Indeed by \eqref{Imzmean} we have
\[
\left( \frac{r}{\sqrt{T}} \right)^q \mathbb{E}\, |I_Wz(T,r)-m^0(T,r)|^q
\le \left(\frac{r^2}{T} \right)^{q/2} \mathbb{E}\, |N|^q \to 0
\]
and \eqref{Itoczmean} follows.
Relation \eqref{Itocmean} follows from \eqref{mtoc} and \eqref{Immean}
in the same way.
$\Box$
\subsection{ Almost sure convergence}
\label{ss:asym3}
{\bf Proof of Theorem \ref{t:alsur}. }
For any fixed $r>0$, when $T\to\infty$, we must prove
\begin{eqnarray}
\label{tocas2}
\frac{r} { T^{1/2}}\, I_W(T,r) \stackrel{\textrm{a.s.}}{\longrightarrow} {\mathcal C},
\\
\label{toczas2}
\frac{r} { T^{1/2}}\, I_Wz(T,r) \stackrel{\textrm{a.s.}}{\longrightarrow} {\mathcal C}.
\end{eqnarray}
Consider first an exponential subsequence $T_k:=a^k$ with arbitrary fixed
$a>1$. By moment estimate \eqref{Immean} and Chebyshev inequality, for any $\varepsilon>0$
we have
\begin{eqnarray*}
\sum_{k=1}^\infty {\mathbb{P}}\left( T_k^{-1/2} \, |I_W(T_k,r)- m(T_k,r)| >\varepsilon\right)
&\le&
\varepsilon^{-q} \, \sum_{k=1}^\infty T_k^{-q/2} \mathbb{E}\, |I_W(T_k,r)- m(T_k,r)|^q
\\
&\le&
\varepsilon^{-q} \mathbb{E}\, |N|^q \sum_{k=1}^\infty T_k^{-q/2} <\infty.
\end{eqnarray*}
Borel--Cantelli lemma yields
\[
\lim_{k\to \infty} T_k^{-1/2} \left(I_W(T_k,r)- m(T_k,r)\right) =0 \qquad \textrm{a.s.}
\]
Taking the convergence of medians \eqref{mtoc} into account, we obtain
\[
\lim_{k\to \infty} \frac{r} {T_k^{1/2}} \ I_W(T_k,r) ={\mathcal C} \qquad \textrm{a.s.}
\]
Since the function $I_W(\cdot,r)$ is non-decreasing, for any $T\in [T_k,T_{k+1}]$
we have the chain
\[
\frac{r I_W(T_k,r)}{(aT_k)^{1/2}}
= \frac{r I_W(T_k,r)}{T_{k+1}^{1/2}}
\le \frac{r I_W(T,r)}{T^{1/2}}
\le \frac{r I_W(T_{k+1},r)}{T_{k}^{1/2}}
= \frac{r I_W(T_{k+1},r)}{(T_{k+1}/a)^{1/2}}.
\]
It follows that
\[
a^{-1/2} {\mathcal C} \le \liminf_{T\to \infty} \frac{r} {T^{1/2}} \ I_W(T,r)
\le \limsup_{T\to \infty} \frac{r} {T^{1/2}} \ I_W(T,r)
\le a^{1/2} {\mathcal C} \qquad \textrm{a.s.}
\]
By letting $a\searrow 1$ we obtain \eqref{tocas2}.
The inequality \eqref{comp_simple} yields now the lower bound in \eqref{toczas2},
namely,
\[
\liminf_{T\to \infty} \frac{r} {T^{1/2}} \ I_Wz(T,r)
\ge
\lim_{T\to \infty} \frac{r} {T^{1/2}} \ I_W(T,r)
= {\mathcal C} \qquad \textrm{a.s.}
\]
The proof of the upper bound in \eqref{toczas2} requires more efforts
because the monotonicity of $I_Wz(\cdot,r)$ is missing. By using \eqref{lower},
for any $r>0,\, \mathrm{d}elta>0$ we have
\begin{eqnarray*}
\limsup_{T\to \infty} \frac{r^2} {T}\ I_Wz(T+1,r+\, \mathrm{d}elta)^2
&\le& \limsup_{T\to \infty} \frac{r^2} {T} \left(
I_W(T,r)^2 + I_{{\tW_T}}^0(1,\, \mathrm{d}elta)^2 +r^2 +2r |\widetilde W_T(1)|\right)
\\
&\le&
{\mathcal C}^2 + \limsup_{T\to \infty} \frac{r^2} {T} \,I_{{\tW_T}}^0(1,\, \mathrm{d}elta)^2
+2r \limsup_{T\to \infty} \frac{r^2} {T} \, |\widetilde W_T(1)|.
\end{eqnarray*}
Now we show that both remaining limits vanish. Indeed, It is well known
that
\[
\limsup_{T\to \infty} \frac{|\widetilde W_T(1)|} {\sqrt{2\ln T}}
= \limsup_{T\to \infty} \frac{|W(T+1)-W(T)|} {\sqrt{2\ln T}} = 1,
\]
hence,
\[
\limsup_{T\to \infty} \frac{|\widetilde W_T(1)|}{T} = 0.
\]
Now write
\[
\limsup_{T\to \infty} \frac{I_{{\tW_T}}^0(1,\, \mathrm{d}elta)^2} {T} =
\limsup_{k\to \infty} \sup_{k\le T\le k+1} \frac{I_{{\tW_T}}^0(1,\, \mathrm{d}elta)^2} {k}
:= \limsup_{k\to \infty} \frac{V_k(W)^2} {k}\ ,
\]
where $V_k(W)$ are identically distributed random variables satisfying Lipschitz
condition due to \eqref{lip0}. Let $m$ be the common median of $V_k$.
By concentration inequality it follows that for any $x>0$
\[
{\mathbb{P}}\{ V_k(W)>m+x\} \le {\mathbb{P}}(N>x) \le \mathrm{e}p\{-x^2/2\}.
\]
Borel--Cantelli lemma yields now that
\[
\limsup_{k\to \infty} \frac{V_k(W)} {\sqrt{2\ln k}} \le 1,
\]
Hence,
\[
\limsup_{k\to \infty} \frac{V_k(W)^2} {k} =0.
\]
We conclude that
\[
\limsup_{T\to \infty} \frac{r^2} {T}\ I_Wz(T+1,r+\, \mathrm{d}elta)^2
\le {\mathcal C}^2
\]
and by letting $\, \mathrm{d}elta\to 0$ we are done with proving upper
bound in \eqref{toczas2}.
$\Box$
\section{Quantitative estimates and algorithms}
\label{s:estimates}
\setcounter{equation}{0}
In this section we provide several theoretical lower and upper bounds
for ${\mathcal C}$.
\subsection{Isoperimetric and small deviation bounds}
\label{ss:gkbound}
This subsection closely follows the ideas of Griffin and Kuelbs \cite{GK}.
Let $c>0$. Then for any $\varepsilon>0$ we have
\[
{\mathbb{P}}\left(\varepsilon \, I_W(1,\varepsilon)\ge c \right)
= {\mathbb{P}}\left( I_W(1,\varepsilon)\ge c\, \varepsilon^{-1} \right)
= {\mathbb{P}}\left( W \not\in \varepsilon U + c\, \varepsilon^{-1} K \right)
\]
where $U:=\{x: ||x||_1\le 1\}$ and $K:=\{h: |h|_1\le 1\}$. According to the
Gaussian isoperimetric inequality (cf. \cite{Bor, STs}, or e.g. \cite[Section 11]{Lif}),
\[
{\mathbb{P}}\left( W\not \in \varepsilon U + c\, \varepsilon^{-1} K \right) \le
1- {\mathbb{P}}hi\left( c\, \varepsilon^{-1} + {\mathbb{P}}hi^{-1}\left({\mathbb{P}}(W\in\varepsilon U\right) \right),
\]
where ${\mathbb{P}}hi(\cdot)$ is the distribution function of the standard normal law.
It is well known that ${\mathbb{P}}hi^{-1}(p)\sim -\sqrt{2|\ln p|}$, as $p\to 0$.
On the other hand, by the classical small deviation estimate,
following from the Petrovskii formula of the distribution of $||W||_1$ (cf.
\cite{Ptr} or e.g. \cite[Section 18]{Lif})
\[
\ln {\mathbb{P}}(W\in\varepsilon U ) = \ln {\mathbb{P}}(||W||_1\le\varepsilon)
\sim -\frac{\pi^2}{8} \ \varepsilon^{-2} , \qquad \textrm{as } \varepsilon\to 0.
\]
Hence,
\[
{\mathbb{P}}hi^{-1}\left({\mathbb{P}}(W\in\varepsilon U)\right)
\sim -\frac{\pi}{2} \ \varepsilon^{-1} , \qquad \textrm{as } \varepsilon\to 0.
\]
It follows that
\[
{\mathbb{P}}\left( W\not \in \varepsilon U + c \, \varepsilon^{-1} K \right)
\le 1- {\mathbb{P}}hi\left( c \, \varepsilon^{-1} -\frac{\pi}{2} \, \varepsilon^{-1} (1+o(1)) \right)
\to 0, \qquad \textrm{as } \varepsilon\to 0,
\]
whenever $c> \frac{\pi}{2}$. Since $\varepsilon\, I_W(1,\varepsilon)\stackrel{\P}{\to} {\mathcal C}$ by \eqref{toc1},
we end up with the bound
\[
{\mathcal C} \le \frac{\pi}{2}\ .
\]
\subsection{Free knot approximation: constructive approach}
Here we provide a more constructive approach to building strings having
the right order of energy and properly approximating Wiener process.
Let $\varepsilon>0$ and let $W(s)$, $0\le s\le 1$, be a Wiener process. Consider
a sequence of stopping times $\tau_j$ defined by $\tau_0:=0$ and
\[
\tau_j:=\inf\{t\ge \tau_{j-1}:\, |W(t)-W(\tau_{j-1})|\ge \varepsilon/2\},\qquad j\ge 1.
\]
By continuity of $W$ we clearly have
\[
|W(\tau_j)-W(\tau_{j-1})| = \frac{\varepsilon}{2} \ .
\]
Let $g(\cdot)$ be the linear interpolation of $W(\cdot)$ built upon the knots
$(\tau_j,W(\tau_j))$. We stress that the knots are random, since they depend on
the process trajectory $W(\cdot)$. This randomness is typical for {\it free
knot approximation}, cf. \cite{Cr1,Cr2}.
We have a good approximation of $W$ by $g$ in the uniform norm, since for any
$t\in [\tau_{j-1}, \tau_j]$ it is true that
\begin{eqnarray*}
|W(t)-W(\tau_{j-1})| &\le& \frac{\varepsilon}{2}\ ,
\\
|g(t)- W(\tau_{j-1})| &\le& |W(\tau_j)-W(\tau_{j-1})| = \frac{\varepsilon}{2}\ ,
\end{eqnarray*}
hence
\be \label{gW}
||g-W||_1\le \varepsilon.
\ee
Let us now evaluate Sobolev norm $|g|_1$. First, we determine the required
number of knots $N_\varepsilon$ from the condition
\[
\tau_{N_\varepsilon-1} <1 \le \tau_{N_\varepsilon}.
\]
Then
\begin{eqnarray*}
|g|_1^2 &=& \int_0^1 g'(s)^2 ds
\\
&\le& \sum_{j=1}^{N_\varepsilon}
\frac{ \left( W(\tau_j)-W(\tau_{j-1})\right)^2} { \tau_{j} -\tau_{j-1}}
= \sum_{j=1}^{N_\varepsilon} \frac{(\varepsilon/2)^2}{\Delta_j} \,
\end{eqnarray*}
where $\Delta_j:= \tau_{j} -\tau_{j-1}$ are independent random variables identically
distributed with $(\varepsilon/2)^2\theta$ and
\[
\theta:=\inf\{t>0:\ |W(t)|=1\}.
\]
Therefore,
\be \label{sumtheta}
|g|_1^2\le \sum_{j=1}^{N_\varepsilon} \theta_j^{-1}
\ee
where $\theta_j$ are independent copies of $\theta$. Recall that
$E_1:=\mathbb{E}\,\theta<\infty$ and $E_2:=\mathbb{E}\,(\theta^{-1})<\infty$.
By applying the law of large numbers we show that $N_\varepsilon$ has order of
growth $\varepsilon^{-2}=n$, and that, by the same argument, the sum in the right hand side
of \eqref{sumtheta} also has the same order. Indeed, let $c>0$. Then
\begin{eqnarray*}
{\mathbb{P}}\left(N_\varepsilon>c\, \varepsilon^{-2}\right)
&=& {\mathbb{P}}\left( \sum_{j=1}^{c\,\varepsilon^{-2}} \Delta_j<1\right)
= {\mathbb{P}}\left( (\varepsilon^2/4) \sum_{j=1}^{c\, \varepsilon^{-2}} \theta_j <1\right)
\\
&=& {\mathbb{P}}\left( \frac{1}{c\,\varepsilon^{-2}} \sum_{j=1}^{c\,\varepsilon^{-2}} \theta_j < \frac{4}{c}\right)
\to 0,
\end{eqnarray*}
whenever $\tfrac{4}{c}<E_1$.
Furthermore, for any $v>0$
\[
{\mathbb{P}}\left( \sum_{j=1}^{c\,\varepsilon^{-2}} \theta_j^{-1}
\ge \frac{v^2}{\varepsilon^2}\right)
= {\mathbb{P}}\left( \frac{1}{c\,\varepsilon^{-2}}
\sum_{j=1}^{c\,\varepsilon^{-2}} \theta_j^{-1} \ge \frac{v^2}{c}\right)
\to 0,
\]
whenever $v^2>c\, E_2$.
By \eqref{gW}, we have $I_W(1,\varepsilon)\le |g|_1$. Therefore, by using
\eqref{sumtheta} and subsequent estimates, we have
\begin{eqnarray*}
{\mathbb{P}}\left(\varepsilon I_W(1,\varepsilon) \ge v \right) &\le& {\mathbb{P}}\left(\varepsilon |g|_1 \ge v \right)
= {\mathbb{P}}\left( |g|_1^2 \ge v^2\varepsilon^{-2} \right)
\\
&\le& {\mathbb{P}}\left( \sum_{j=1}^{N_\varepsilon} \theta_j^{-1} \ge v^2\varepsilon^{-2} \right)
\\
&\le& {\mathbb{P}}\left(N_\varepsilon>c\, \varepsilon^{-2}\right)
+ {\mathbb{P}}\left( \sum_{j=1}^{c\,\varepsilon^{-2}} \theta_j^{-1} \ge \frac{v^2}{\varepsilon^2}\right).
\end{eqnarray*}
It follows that
\[
{\mathbb{P}}\left(\varepsilon I_W(1,\varepsilon) \ge v \right) \to 0, \qquad \textrm{as } \varepsilon\to 0,
\]
whenever $\tfrac{4}{c}<E_1$ and $v^2>c\, E_2$. By letting $ v^2 \searrow c\,E_2$,
$c\searrow \tfrac{4}{E_1}$, we obtain
\[
{\mathbb{P}}\left(\varepsilon I_W(1,\varepsilon) \ge x \right) \to 0, \qquad \textrm{as } \varepsilon\to 0,
\]
whenever $x>2\sqrt{E_2/E_1}$. Since $\varepsilon\, I_W(1,\varepsilon)\stackrel{\P}{\to} {\mathcal C}$ by \eqref{toc1},
it follows that
\be \label{ce1e2}
{\mathcal C} \le 2\sqrt{E_2/E_1} .
\ee
It is of interest to calculate the constants $E_1$ and $E_2$ in this bound. By Wald identity,
\[
1=\mathbb{E}\, W(\theta)^2 = \mathbb{E}\, \theta= E_1.
\]
Next, let $M_t:=\sup_{0\le s\le t} |W(s)|$. Then for any $r>0$
\[
{\mathbb{P}}(\theta^{-1}\ge r)= {\mathbb{P}}(\theta\le r^{-1}) = {\mathbb{P}}( M_{r^{-1}}\ge 1)
= {\mathbb{P}}( r^{-1/2}M_{1}\ge 1) = {\mathbb{P}}(M_{1}^2 \ge r).
\]
Therefore, $\theta^{-1}$ and $M_1^2$ are equidistributed.
The distribution of $M_1$ is still inconvenient for calculations. However, it
is convenient to work with $M_\tau$, where $\tau$ is a standard exponential random variable
independent of $W$. Indeed, by \cite[Formula 1.15.2]{BS} we have for any $a>0$
\[
{\mathbb{P}}(M_\tau\ge a)=[\cosh(\sqrt{2}a)]^{-1},
\]
hence, by using \cite[Formula 860.531]{Dw},
\begin{eqnarray*}
\mathbb{E}\, M_\tau^2 &=& \int_0^\infty 2\, a\, {\mathbb{P}}(M_\tau\ge a) \, da
= \int_0^\infty \frac{2a}{\cosh(\sqrt{2}a)}\ da
\\
&=& \int_0^\infty \frac{x}{\cosh(x)}\ dx \approx 1.832.
\end{eqnarray*}
On the other hand,
\[
\mathbb{E}\, M_\tau^2= \int_0^\infty \mathbb{E}\, M_t^2\ e^{-t}\, dt
= \int_0^\infty \mathbb{E}\, M_1^2 \, t \, e^{-t} dt
=\mathbb{E}\, M_1^2.
\]
We conclude that
\[
E_2= \mathbb{E}\,\theta^{-1}= \mathbb{E}\, M_1^2=\mathbb{E}\, M_\tau^2 \approx 1.832.
\]
Thus numerical bound from \eqref{ce1e2} becomes ${\mathcal C}\le 2\sqrt{1.832}\approx 2.7$.
\subsection{Oscillation lower bound}
\label{ss:olb}
Fix an arbitrary $x>0$ (to be optimized later on). Let $n$ be a positive integer
and let denote $\varepsilon:= x n^{-1/2}$.
Let us split the interval $[0,1]$ into $n$ intervals
$\Delta_j:=[j/n, (j+1)/n]$ of length $n^{-1}$. Let
\[
Y_j:= \left( \max_{s\in \Delta_j} W(s) - \min_{t\in \Delta_j} W(t) -2\varepsilon\right)_+
= \left(W(t_j)-W(s_j)-2\varepsilon\right)_+
\]
where $s_j$, $t_j$ are the points where the maximum and the minimum of $W$ are attained.
Notice that by the standard properties of Wiener process (self-similarity, independence
and stationarity of increments) the variables $Y_j$
are independent and identically distributed with $n^{-1/2} \, Y_x$, where
\[
Y_x:= \left( \max_{0\le s \le 1} W(s) - \min_{0\le t\le 1} W(t) -2x \right)_+ \ge 0.
\]
Take any function $h\in {\mathcal C}C[0,1]$ such that $||h-W||_1\le \varepsilon$. We have
\begin{eqnarray*}
h(s_j)-h(t_j) &\ge& W(s_j)-\varepsilon - (W(t_j)+\varepsilon) = W(s_j) -(W(t_j)- 2\varepsilon;
\\
|h(s_j)-h(t_j)| &\ge& \left( W(s_j) -W(t_j)- 2\varepsilon \right)_+ =Y_j.
\end{eqnarray*}
Furthermore, by H\"older inequality,
\begin{eqnarray*}
|h|_1^2 &=& \int_0^1 h'(t)^2 dt =\sum_{j=0}^{n-1} \int_{\Delta_j} h'(t)^2 dt
\\
&\ge& \sum_{j=0}^{n-1} \int_{s_j}^{t_j} h'(t)^2 dt
\ge \sum_{j=0}^{n-1} \frac{ \left(\int_{s_j}^{t_j} |h'(t)| dt\right)^2}
{|s_j-t_j|}
\\
&\ge& \sum_{j=0}^{n-1} \frac{ \left| h(s_j)-h(t_j)\right|^2}
{|s_j-t_j|}
\ge n \sum_{j=0}^{n-1} Y_j^2 = \sum_{j=0}^{n-1} \ [Y^{(j)}]^2,
\end{eqnarray*}
where $Y^{(j)}$ are i.i.d. copies of $Y_x$.
It follows that
\[
I_W(1,\varepsilon)^2
\ge \sum_{j=0}^{n-1}\ [Y^{(j)}]^2 ,
\]
thus
\[
\varepsilon^2 I_W(1,\varepsilon)^2
\ge \varepsilon^2 \sum_{j=0}^{n-1}\, [Y^{(j)}]^2 =
x^2 n^{-1} \sum_{j=0}^{n-1}\ [Y^{(j)}]^2
\]
Since $\varepsilon^2\, I_W(1,\varepsilon)^2\stackrel{\P}{\to} {\mathcal C}^2$ by \eqref{toc1}, and
by the law of large numbers
$n^{-1} \sum_{j=0}^{n-1} \, [Y^{(j)}]^2\stackrel{\P}{\to} \mathbb{E}\, Y_x^2$,
we infer that
\[
{\mathcal C}^2\ge x^2 \mathbb{E}\, Y_x^2 >0.
\]
Let us explore what does it mean numerically. Recall that
$Y_x=(R-2x)_+$ where $R$ is the range of Wiener process on the unit interval of time.
According to \cite[Formula 1.15.4(1)]{BS}, $R$ has the following distribution function,
\[
{\mathbb{P}}(R\le y) =1 +4\sum_{k=1}^\infty (-1)^k \, \mathbb{E}\,rfc\left(\frac{ky}{\sqrt{2}}\right),
\]
where $\mathbb{E}\,rfc(x):=\tfrac{2}{\sqrt{\pi}}\int_x^\infty e^{-u^2} du$.
It follows that $R$ has a density
\[
p_R(y) = 4 \sqrt{2/\pi} \sum_{k=1}^\infty (-1)^{k+1} k^2 \mathrm{e}p\left\{-k^2y^2/2\right\}.
\]
Then
\begin{eqnarray*}
\mathbb{E}\, Y_x^2 &=& \int_{2x}^\infty (y-2x)^2 p_R(y) dy
= 4 \sqrt{2/\pi} \sum_{k=1}^\infty (-1)^{k+1} k^2
\int_{2x}^\infty (y-2x)^2 \mathrm{e}p\left\{-k^2y^2/2\right\} dy
\\
&=& 4 \sqrt{2/\pi} \sum_{k=1}^\infty (-1)^{k+1}
\left[ (4k x^2 +\tfrac 1k) \sqrt{2\pi}\ \widehat{{\mathbb{P}}hi}(2kx)-2x \mathrm{e}p\{-2k^2x^2\} \right],
\end{eqnarray*}
where $\widehat{\mathbb{P}}hi(\cdot)$ stands for the tail of standard normal distribution.
The series is so rapidly decreasing in $k$ that mostly the first term $k=1$ is relevant.
By making numerical optimization in $x$ we find the best values near $x\approx 0.5$
where we obtain $x^2 \mathbb{E}\, Y_x^2\approx 0.145$, thus
\[
{\mathcal C}\ge x \sqrt{\mathbb{E}\, Y_x^2} \approx 0.381.
\]
\section{Markovian pursuit}
\label{s:pursuit}
\setcounter{equation}{0}
In practice, it is often necessary to build an approximation to the process
adaptively, i.e. in real time, because as parameter (viewed as time)
advances, we may only know the trajectory of approximated process
(Wiener process in our case) up to the current instant.
In this setting, approximation problem becomes a {\it pursuit problem}.
One may think of a person walking with a dog along one-dimensional path.
Wiener process represents the disordered dog's walk, while the person
tries to keep the dog on a leash of given length
by moving with finite speed and expending minimal energy per unit
of time.
We will construct an absolutely continuous approximating process $x(t)$
such that
\[
|x(t)- W(t)| \le 1, \qquad t\in \mathbb{R}.
\]
In view of Markov property of Wiener process, the reasonable strategy is to
determine the derivative $x'(t)$ as a function of the distance to the target,
\[
x'(t) := b(x(t)-W(t))
\]
where the odd function $b(\cdot)$ defined on $[-1,1]$ explodes to $-\infty$ at $1$
and to $+\infty$ at $-1$, thus preventing the exit of $x(t)- W(t)$ from the
corridor $[-1,1]$.
In this section we will find an optimal speed function $b(\cdot)$. As a by-product,
this will give us another upper bound on ${\mathcal C}$.
Let $X(t):= x(t)-W(t)$. In our setting, $X(\cdot)$ is a diffusion process satisfying
a simple SDE
\[
dX= b(X)dt - dW(t).
\]
Recall some important facts about the univariate diffusion, cf. \cite[Chapter IV.11]{B},
\cite[Chapter 2]{BS}.
Let
\[
B(x):= 2 \int^x b(s) ds.
\]
We stress that $B$ is defined as an indefinite integral, i.e. up to an additive constant.
Let
\[
p_0(x) := e^{B(x)}.
\]
If
\be \label{entranceboard}
\int_{-1} \frac{dx}{p_0(x)} = \int^1 \frac{dx}{p_0(x)} =\infty,
\ee
then the boundaries $\pm 1$ belong to the {\it entrance} type and do not belong to the {\it exit}
type in Feller classification. This means that diffusion remains inside the corridor $[-1,1]$ all
along its infinite horizon of life.
Moreover, the normalized density
\[
p(x) := Q^{-1} p_0(x),
\]
where $Q:=\int_{-1}^{1} p_o(x) dx$ is the normalizing factor, is the density of the stationary
distribution of diffusion $X$ considered as a mixing Markov process.
We conclude that at large intervals of time $[0,T]$
\begin{eqnarray*}
\int_0^T x'(t)^2 dt &=& \int_0^T b(X(t))^2 dt \sim T \int_{-1}^1 b(x)^2 p(x) dx
\\
&=& T \int_{-1}^1 \left(\tfrac{\ln p}{2}\right)'(x)^2 p(x) dx
= \frac T4 \int_{-1}^1 \frac{p'(x)^2}{p(x)}\, dx
:= \frac{I(p)}{4} \ T,
\qquad \textrm{as } T\to\infty,
\end{eqnarray*}
where, quite unexpectedly, {\it Fisher information} $I(p)$ shows up in the asymptotics.
The next step is to solve the variational problem
\[
\min\left\{ I(p) \ \big| \int_{-1}^1 p(x) dx = 1\right\}
\]
over the set of even densities concentrated on $[-1,1]$ and satisfying
\eqref{entranceboard}.
Although the solution is well known (see the references below), we recall
it here for completeness. For Lagrange variation (with one indefinite
multiplier) we have, for any smooth function $\, \mathrm{d}elta(\cdot)$ supported by $(-1,1)$
\begin{eqnarray*}
&&I(p+\, \mathrm{d}elta) - \lambda^2 \int_{-1}^1(p+\, \mathrm{d}elta)(x)dx - \left(I(p)-\lambda^2 \int_{-1}^1 p(x)dx \right)
\\
&=& \int_{-1}^1 \left[ \frac{(p'(x)+\, \mathrm{d}elta'(x))^2}{p(x)+\, \mathrm{d}elta(x)}- \frac{p'(x)^2}{p(x)}
-\lambda^2\, \mathrm{d}elta(x)\right] dx
\\
&\sim& \int_{-1}^1 \left[ \frac{2p'(x)\, \mathrm{d}elta'(x)}{p(x)}- \frac{p'(x)^2\, \mathrm{d}elta(x)}{p(x)^2}
-\lambda^2\, \mathrm{d}elta(x)\right] dx
\\
&=& \int_{-1}^1 \left[ -2 \left(\frac{p'}{p}\right)'(x)- \frac{p'(x)^2}{p(x)^2}
-\lambda^2\right] \, \mathrm{d}elta(x) dx, \qquad \textrm{as } \, \mathrm{d}elta\to 0.
\end{eqnarray*}
We obtain variational equation,
\[
2 \left(\frac{p'}{p}\right)'(x) + \frac{p'(x)^2}{p(x)^2}+\lambda^2 =0.
\]
By letting $\beta(x):=(\ln p)'(x)= \frac{p'}{p}(x)$, we have
$
2\beta'+\beta^2+ \lambda^2=0
$
which yields $\tfrac{d\beta}{\beta^2+\lambda^2}=\tfrac{-dx}{2}$ and
\[
\frac{1}{\lambda} \arctan (\beta/\lambda) = c- \frac{x}{2}.
\]
Since by symmetry $p'(0)=0$, we have $\beta(0)=0$, thus $c=0$ and
\[
\frac{1}{\lambda} \arctan (\beta/\lambda) = - \frac{x}{2},
\]
or, equivalently,
\[
\beta(x) = -\lambda \tan(\lambda x/2).
\]
Furthermore, since $p(\cdot)$ should vanish on the boundary $\pm 1$, $\beta$ should explode, i.e.
$\beta(\pm 1)= \mp\infty$, we obtain $\lambda=\pi$. Hence,
\[
\beta(x) = - \pi \tan(\pi x/2).
\]
Next,
\begin{eqnarray*}
\ln p(x) &=& \int (\ln p)'(x) dx = \int \beta(x) dx
\\
&=& - \pi \int \tan(\pi x/2) dx = c + 2 \ln\cos(\pi x/2).
\end{eqnarray*}
Therefore, the density of the optimal invariant measure is
\[
p(x)= c_1 \, \cos^2(\pi x/2).
\]
Since
\[
\int_{-1}^1 \cos^2(\pi x/2)\, dx = \frac 12 \int_{-1}^1 \left(1+ \cos(\pi x)\right) dx =1,
\]
we have $c_1=1$, thus
\[
p(x)= \cos^2(\pi x/2).
\]
This distribution, as a minimizer of Fisher information on an interval, can be also found
in \cite{Hub,Lev}, \cite[p.63]{Shev}. Luckily for us, this $p(\cdot)$ satisfies
\[
\int^1 \frac{dx}{p(x)} = \int_{-1} \frac{dx}{p(x)} =\infty,
\]
so that \eqref{entranceboard} is satisfied, and we really have entrance boards
for the optimal regime.
It remains now to calculate the optimal Fisher information,
\begin{eqnarray*}
I(p) &=& \int_{-1}^1 \frac{p'(x)^2}{p(x)} \ dx = \int_{-1}^1 b(x)^2 p(x)\ dx
\\
&=& \pi^2 \int_{-1}^1 \tan^2(\pi x/2)\ \cos^2(\pi x/2) dx
= \pi^2 \int_{-1}^1 \sin^2(\pi x/2) dx
\\
&=&\pi^2\ .
\end{eqnarray*}
The optimum is attained at the speed strategy
\[
b(x) =\frac{\beta(x)}{2} = - \frac{\pi}{2}\ \tan(\pi x/2);
\]
see an example of its implementation in Fig. \ref{fig:mp}.
{\it We conjecture that the provided algorithm is the optimal one
in the entire class of adaptive algorithms.}
\begin{center}
\begin{figure}
\caption{Optimal Markovian pursuit accompanying Wiener process}
\label{fig:mp}
\end{figure}
\end{center}
As a by-product we get a bound for non-Markovian asymptotic
bound,
\[
{\mathcal C} \le \frac{I(p)^{1/2}}{2} = \frac{\pi}{2}\ ,
\]
surprisingly the same as the bound obtained in Subsection
\ref{ss:gkbound} by completely different method.
\section{Some simulation results}
\label{s:simulation}
\setcounter{equation}{0}
\begin{center}
\begin{figure}
\caption{Histogram for taut string energy constant ${\mathcal C}
\label{fig:hts}
\end{figure}
\end{center}
We simulate a path of Wiener process $W(t)$ on the interval $\left[0,T\right]$ with $N+1$ uniformly distributed knots, i.e. $\left(iT/N,W(iT/N)\right)$, $i=0,1,...,N$. For each simulated path we consider the tube of radius $1$ and compute the discrete taut string with fixed end and Markovian pursuit functions.
\par Let us describe the computation of the discrete Markovian pursuit. The stochastic differential equation
\begin{equation*}
h'(t):=-\frac{\pi}{2} \tan \left(\frac{\pi}{2}\left(h(t)-W(t)\right)\right).
\end{equation*}
is discretized with a backward finite difference method which result in the equations
\begin{equation} \label{1}
\frac{h\left(\frac{iT}{N}\right)-h\left(\frac{(i-1)T}{N}\right)}{\frac{T}{N}}
=-\frac{\pi}{2}\tan\left(\frac{\pi}{2}\left(h\left(\frac{(i-1)T}{N}\right)
-W\left(\frac{(i-1)T}{N}\right)\right)\right)
\end{equation}
for $i=1,...,N$ together with the initial condition $h(0)=0$. If $h(iT/N)$ is
close to the boundaries $W(iT/N)\pm 1$ we might experience numerical
instability at subsequent time points due to $\tan(\frac{\pi x}{2})
\rightarrow\pm\infty$ when $x\rightarrow\pm 1^{\mp}$. To avoid this, we constrain
$h(iT/N)$ to the interval
\begin{displaymath}
\left[W\left(\frac{iT}{N}\right)-0.99,W\left(\frac{iT}{N}\right)+0.99\right].
\end{displaymath}
If the computed value of $h(iT/N)$, given by (\ref{1}), is outside this interval
we set $h(iT/N)$ equal to the nearest endpoint of the interval.
\par From the discrete taut string and Markovian pursuit functions, piecewise linear functions $h(t)$ on $\left[0,T\right]$ are constructed by joining consecutive pair of the computed knots
\begin{displaymath}
\left(iT/N,h(iT/N)\right), i=0,1,...,N
\end{displaymath}
with line segments. The (square root of) energy $|h|_T$ can then be computed according to
\begin{equation*}
|h|_T:=\frac{1}{T^{1/2}} \left(\int_0^T h'(t)^2 dt \right)^{1/2}
= \frac{N^{1/2}}{T} \left(\sum^{N}_{i=1}\left(h\left(\frac{iT}{N}\right)
-h\left(\frac{(i-1)T}{N}\right)\right)^{2} \right)^{1/2}.
\end{equation*}
\par We simulated 3000 independent paths of the Wiener process on the interval $\left[0,T\right]$ with $T=1000$ and $N=1000000$. For this sample, the mean of $|h|_T$ for the taut string with fixed end was approximately $0.63$, see Fig. \ref{fig:hts} for the corresponding histogram. Using the same sample of paths, the mean of $|h|_T$ for the Markovian pursuit was approximately $1.62$, see Fig. \ref{fig:hmp}, which is reasonably close to the theoretical constant $\frac{\pi}{2}\approx 1.57$ when $T\rightarrow \infty$.
\begin{center}
\begin{figure}
\caption{Histogram for Markovian pursuit energy constant.}
\label{fig:hmp}
\end{figure}
\end{center}
\section{Some related problems}
\label{s:misc}
\setcounter{equation}{0}
By different reasons, taut strings and similar objects already appeared,
sometimes implicitly, in probabilistic problems. Therefore, it seems
reasonable to place our results into a historical context.
\subsection{Strassen's functional law of the iterated logarithm}
Strassen's functional law of the iterated logarithm \cite{Str,Lif} claims
that
\[
\limsup_{T\to \infty} \inf_{|h|_1\le 1}
\left\| \frac{W(\cdot T)}{\sqrt{2T\ln\ln T}} -h \right\|_1
=0
\qquad \textrm{a.s.}
\]
Grill and Talagrand \cite{Gr2,Tal} independently established the optimal convergence
rate in this law by proving that for some finite positive constants $c_1$,
$c_2$ it is true that
\[ c_1 <
\limsup_{T\to \infty} \ (\ln\ln T)^{2/3} \inf_{|h|_1\le 1}
\left\| \frac{W(\cdot T)}{\sqrt{2T\ln\ln T}} -h \right\|_1
< c_2
\qquad \textrm{a.s.}
\]
Due to scaling properties of the function $I_W(\cdot,\cdot)$, the latter statement
just means that
\begin{eqnarray*}
&&\limsup_{T\to\infty} \frac{I_W(T,c_1(2T)^{1/2}(\ln\ln T)^{-1/6})}{(2\ln\ln T)^{1/2}} >1,
\\
&&\limsup_{T\to\infty} \frac{I_W(T,c_2(2T)^{1/2}(\ln\ln T)^{-1/6})}{(2\ln\ln T)^{1/2}} <1.
\end{eqnarray*}
Grill \cite{Gr1} and Griffin and Kuelbs \cite{GK} showed a similar $\liminf$ result
asserting that for some $c_3>0$ and any $c_4>\tfrac{\pi}{8}$ it is true that
\[ c_3 <
\liminf_{T\to \infty} \ (\ln\ln T) \inf_{|h|_1\le 1}
\left\| \frac{W(\cdot T)}{\sqrt{2T\ln\ln T}} -h \right\|_1
< c_4
\qquad \textrm{a.s.}
\]
which means, in our notations, that
\begin{eqnarray*}
&&\liminf_{T\to\infty} \frac{I_W(T,c_3(2T)^{1/2}(\ln\ln T)^{-1/2})}{(2\ln\ln T)^{1/2}} >1,
\\
&&\liminf_{T\to\infty} \frac{I_W(T,c_4(2T)^{1/2}
(\ln\ln T)^{-1/2})}{(2\ln\ln T)^{1/2}} <1.
\end{eqnarray*}
We may conclude that the tubes relevant to the Strassen law are much larger
than ours. Accordingly, the respective minimal energy is much lower.
\subsection{$L_1$-optimizers: lazy functions}
By its definition, the taut string is the unique minimizer of
$\int_0^T h'(t)^2 dt$ among the functions whose graphs pass through
the corresponding tube. It is well known, however, that when both
endpoints are fixed, the taut string also is a minimizer for any
functional $\int_0^T \varphi(h'(t)) dt$ whenever $\varphi(\cdot)$
is a convex function. In the recent literature, much attention
was payed to the case $\varphi(x)=|x|$, i.e. to the minimization
of variation
\[
{\mathbb V}(h):= \int_0^T |h'(t)| dt,
\]
see \cite{LoMil,Mil}. Notice that since $|\cdot|$ is not a {\it strictly}
convex function, the corresponding variational problem typically has
{\it many} solutions. Moreover, since the variation is well defined not
only on
absolutely continuous functions, the natural functional domain for
optimization becomes wider. In \cite{Mil} another minimizer of ${\mathbb V}(h)$ is
described in detail, a so called "lazy function". When possible,
this function remains constant; otherwise, it follows the boundary of
the tube. Notice that lazy function need not be absolutely continuous;
it only has a bounded variation.
For the case when the tube is constructed around a sample path
of Wiener process, \cite{Mil} suggests a description of lazy function
as an inverse to appropriate subordinator. Although the taut string
and lazy function both solve the same variational problem,
the relations between them are yet to be clarified.
\subsection{A related discrete applied problem}
We describe in this section an interesting discrete applied problem
coming from information transmission that turns out to be related
with discrete taut string construction. This problem actually was
an initial motivation for our research.
Consider the following information transmission unit represented on
Fig. \ref{fig:channel}.
{\unitlength=0.4mm
\begin{center}
\begin{figure}
\caption{Information transmission unit.}
\label{fig:channel}
\end{figure}
\end{center}
}
We have the discrete time count: $j=1,2,3,\, \mathrm{d}ots$. At each time
$j$ an amount of information $S_j$ enters the system and should
be transmitted through a channel. The channel's {\it transmission capacity}
$C_j$ varies upon the time (for example, the channel may be shared
with other tasks external to our information flow). We are interested in
the situation when the channel capacity is insufficient for transmission,
i.e. $S_j\ge C_j$. We may place a part of the excessive information
into a {\it buffer} of given size $B$ and drop (loose) the remaining part.
Let $L_j$ denote the loss size. This variable remains under our
partial control, yet within buffer size limitations.
Let $B_j$ denote the amount of information stocked in the buffer. One
necessarily has
\be \label{BjB}
0\le B_j\le B.
\ee
Given $\varphi: [0,1] \mapsto \mathbb{R}_+$ -- an increasing convex {\it penalty
function}, define the {\it penalty functional} by the formula
\[
F := \sum_{j=1}^n \varphi\left(\tfrac{L_j}{S_j}\right)\, S_j.
\]
Given $(S_j),(C_j)$, and $B$, we are interested to minimize $F$ by controlling
$L_j$. It is important to notice that eventual non-linearity of $\varphi(\cdot)$
is a natural feature because a small loss of information, e.g. of a graphical one,
is more likely to be repaired by interpolation methods than a large loss.
The process of system work may be analyzed through the buffer balance equation.
We clearly have
\[
B_j = B_{j-1} +\left(S_j-C_j-L_j \right).
\]
Therefore,
\[
B_k = \sum_{j=1}^k \left(S_j-C_j\right) - \sum_{j=1}^k L_j.
\]
Now the buffer bounds \eqref{BjB} mean that
\[
\sum_{j=1}^k \left(S_j-C_j \right) -B
\le \sum_{j=1}^k L_j
\le \sum_{j=1}^k \left(S_j-C_j\right).
\]
In other words, the accumulated loss curve $\sum_{j=1}^k L_j$ must go within a (random)
band of fixed width $B$, see Fig. \ref{fig:bb}. Note that on the picture we use the
{\it operational time}, i.e. the accumulated entrance flow $\sum_{j=1}^k S_j$ instead of
the usual time $j$.
Therefore the minimum
\[
F=F(L) = \sum_{j=1}^n \varphi \left(\tfrac{L_j}{S_j}\right)\, S_j
= \int_0^{S} \varphi(L'(s)) ds \searrow min
\]
where $S:=\sum_{j=1}^n S_j$,
is attained at the corresponding taut string.
The greedy FIFO strategy ("first in, first out") which consists in
keeping the buffer full all the time corresponds to the accumulated loss graph
going along the lower border of the admissible corridor. It is usually non-optimal
at all.
Assuming that information excess $S_j-C_j$ is a sequence of identically distributed
random variables, we arrive to the problem of construction
of taut string accompanying sums of i.i.d. random variables with positive drift.
{\unitlength=0.4mm
\begin{center}
\begin{figure}
\caption{Transmission unit work graph.}
\label{fig:bb}
\end{figure}
\end{center}
}
\section{Final remarks}
\label{s:concl}
After energy evaluation for the taut strings accompanying Wiener process,
many similar questions arise.
Within the same framework, it would be natural
to study more general functionals of taut string by replacing energy with the functionals
$\int_0^T \varphi(h'(t))\, dt$ with more or less general convex function
$\varphi$. Since in the long run the derivative of accompanying taut string
seems to be close to an ergodic stationary process, characterized by
its invariant distribution, say $\mu$, it is natural to expect that an ergodic
theorem holds in the form
\[
\frac 1T \int_0^T \varphi(h'(t))\, dt \stackrel{\textrm{a.s.}}{\longrightarrow} \int_\mathbb{R} \varphi(x)\, \mu(dx)
\]
thus extending our Theorem \ref{t:alsur}.
It is natural to explore the energy and similar characteristics of the taut strings
accompanying other processes. The fractional Brownian motion is the first
obvious candidate, but in general, the class of processes with stationary increments
including non-Gaussian L\'evy processes seems to be a natural framework for
this extension. Notice that the energy we handled here has a special relation to
Wiener process, because it coincides with the squared norm of the corresponding
reproducing kernel. This makes our proofs easier but we hope that handling
energy for other processes is still possible.
One can also modify the form of the tube that defines required closeness between
the string and the process. For example if we measure the distance between the string
and the process in $L_2$-norm instead of the uniform one, then all calculations
become explicit, and the analogue of constant ${\mathcal C}$ may be calculated precisely.
This will be a subject of forthcoming publication.
{\bf Acknowledgement.}\ The authors are much indebted to Professor Natan
Kruglyak for providing strong motivation for this research and for
useful discussions.
They are also grateful to Z.~Kabluchko and E.~Schertzer for enlightening
comments.
The first named author work was supported by grants RFBR 13-01-00172
and SPbSU 6.38.672.2013.
\end{document}
|
\begin{document}
\title{A mean field model for the interactions between firms on the markets of their inputs}
\author{
Yves Achdou \thanks { Universit{\'e} de Paris Cité and Sorbonne Universit{\'e}, CNRS, Laboratoire Jacques-Louis Lions, (LJLL), F-75006 Paris, France, [email protected]}
\and
Guillaume Carlier\thanks{CEREMADE, Universit\'e Paris
Dauphine, PSL, Pl. de Lattre de Tassigny, 75775 Paris Cedex 16, FRANCE and INRIA-Paris, MOKAPLAN,
[email protected]},
\and
Quentin Petit \thanks{CEREMADE, Universit\'e Paris
Dauphine, PSL, and EDF R$\&$D, [email protected]}
\and
Daniela Tonon \thanks{Dipartimento di Matematica "Tullio Levi-Civita", Universit\`a degli Studi di Padova, via Trieste 63, 35121 Padova, Italy.
[email protected]}
}
\maketitle
\begin{abstract}
We consider an economy made of competing firms which are heterogeneous in their capital and use several inputs
for producing goods. Their consumption policy is fixed rationally by maximizing a utility and their capital cannot fall below a given threshold
(state constraint).
We aim at modeling the interactions between firms on the markets of the different inputs on the long term.
The stationary equlibria are described by a system of coupled non-linear differential equations:
a Hamilton-Jacobi equation describing the optimal control problem of a single atomistic firm;
a continuity equation describing the distribution of the individual state variable (the capital) in the population of firms;
the equilibria on the markets of the production factors. We prove the existence of equilibria under suitable assumptions.
\end{abstract}
\section{Introduction}
\label{sec:introduction}
We consider an economy made of competing firms which are heterogeneous in their capital, and use several inputs
for producing goods. These inputs, or factors of production, may include raw materials, energy, manpower, rented surface, etc...
We aim at modeling the interactions of the firms on the markets of the different inputs on the long term.
We make the following general assumptions:
\begin{itemize}
\item the economy is reduced to one sector of activity with a large number (in fact a continuum) of firms competing on the markets of inputs
\item the firms choose which amount of their capital is invested into production and which amount is consumed
(for retributing the owners). Their consumption policy is fixed rationally by maximizing a utility
\item the firms are identical in the sense that (1) two different firms with the same capital and quantities of inputs produce the same amounts of goods
(2) they have the same utility function
\item there is a state constraint: the capital of any firm must not fall below a given threshold, fixed to $0$ in the whole paper
\item for a given firm, all the others are indistinguishable and the firms interact only via the prices of the different inputs
\item a single firm has a negligible impact on the markets
\item equilibrium on the markets is reached when supply matches aggregate demand. Supply is assumed to be a given function of prices.
\item closure and creation of firms may happen. This will be modeled in what follows.
\end{itemize}
Because we are interested in long term tendencies, we aim at finding stationary equilibria. The outputs of our model will be
\begin{itemize}
\item the distribution of capital
\item the optimal investment/consumption policy of the firms given their capital
\item the unit prices of the different inputs
\end{itemize}
Our model falls into the wide class of mean field games.
The theory of mean field games ({\sl MFGs} for short), has been introduced and studied
in the pioneering works of J-M. Lasry and P-L. Lions~\cite{PLL-CDF,MR2269875,MR2271747,MR2295621},
and aims at studying deterministic or stochastic differential
games (Nash equilibria) as the number of agents tends to infinity.
It supposes that the rational agents are indistinguishable and individually have a negligible influence on the game,
and that each individual strategy is influenced by some averages of quantities depending on the states (or the controls) of the other agents.
The applications of MFGs are numerous, from economics to the study of crowd motion. For useful reference on mean field games, one can see for example \cite{MR2762362,cardaliaguet2010, MR4214773,MR3268061}.
Our model will be summarized by a system of coupled non-linear differential equations:
(1) a Hamilton-Jacobi-Bellman equation describing the optimal control problem of a single atomistic firm;
(2) a continuity equation describing the distribution of the individual state variable (the capital) in the population of firms;
(3) the equilibria on the markets of the production factors.
The present model has some similarities with the time continuous Aiyagari-Bewley-Huggett models \cite{bewley,aiyagari, huggett}
studied in \cite{MR3268061,MR4365976}. In particular, they all lead to a better understanding of the
individual accumulation of capital/investment policy. In the present paper, a key aspect for proving the existence of equilibria is the regularity properties of the individual optimal policies.
The paper is organized as follows: the model, the main results and important examples are presented in Section {\mathbb R}f{sec:secGeneralities:model}. The mathematical results concerning the optimal control problem of a single firm given the prices of inputs are proved in Section {\mathbb R}f{sec:optim-contr-probl-1}. As already mentioned, the stress will be put on regularity properties of the solutions, which will play an important role in the remaining part of the paper. Then, the distribution of capital among the firms given the prices of inputs will be studied in Section {\mathbb R}f{sec:secGeneralities:fokk-planck-equat}: in particular, we will prove that under the assumptions made, the distribution is absolutely continuous with respect to Lebesgue measure. Finally, in Section {\mathbb R}f{sec:equilibrium}, we use Brouwer topological degree in order to obtain the existence of equilibria.
For keeping the length of the paper reasonable, we have chosen not to discuss the numerical simulations that we have carried out for a model with two factors of production: the manpower and the surface rented by the firms. We refer to \cite{Petit2022phd} for a description of these simulations, a discussion of the results and comparisons with available statistics.
\section{The model and the main results}\label{sec:secGeneralities:model}
In what follow, we give more details and write down the different equations which summarize our model.
First, in paragraph {\mathbb R}f{sec:optim-contr-probl}, we address the strategy of a single firm given the prices of the inputs.
Second, in paragraph {\mathbb R}f{sec:distrib_capital}, we propose a model for the distribution of capital, supposing again that the prices of the inputs are given.
From the two steps above, we can deduce the aggregate demand for the different production factors.
Finally, the model is closed by matching the aggregate demand with the exogenous supply of production factors.
In the three steps mentioned above, we make some assumptions which allow us to prove the existence of a mean field equilibrium.
In subsection {\mathbb R}f{sec:import-exampl-util} below, we give examples in which these assumptions are satisfied.
In the following, we set ${\mathbb R}_+=[0,+\infty)$.
\subsection{The optimal control problem of a single firm given the prices of inputs}
\label{sec:optim-contr-probl}
The output of a given firm is $F(k,\ell)\in {\mathbb R}_+$, where $k\in {\mathbb R}_+$ and $\ell \in {\mathbb R}_+^d$ respectively stand for the capital of the firm and
for the quantities of the different inputs it uses. The function $F : {\mathbb R}_+ \times {\mathbb R}_+^d\to {\mathbb R}_+$ is the production function.
Let $w\in (0,+\infty)^d$ be the collection of the unit prices of the different factors of production: depending on $i\in \{1,\dots,d\}$,
$w_i$ may stand for the unit price of a raw material, the annual salary of a class of workers, the rental price of a unit of surface. The benefits of the firm in a unit of time are therefore given by $F(k,\ell)-w\cdot \ell -\delta k$, where $\delta \ge 0$ is the rate of depreciation of capital.
The dynamics of the capital of a given firm is described by
\begin{equation}
\label{eq:chap:MFG_model:secGeneralities:capitalDynamic}
\frac{dk}{dt}(t) = F(k(t),\ell(t))-w\cdot \ell(t) -\delta k(t)-c(t),
\end{equation}
where $c(t)$ stands for the consumption at time $t$, for example the share of the benefits that goes to the owners of the firm. The firm has two variables of control, its consumption $c(t)\in {\mathbb R}_+$ and the quantities of inputs $\ell(t)\in {\mathbb R}_+^d$.
The firms face the problem of how to split their benefits between consumption and investments that produce growth.
A given firm determines its policy by maximizing the following payoff:
\begin{equation}
\label{eq:chap:MFG_model:secGeneralities:2}
\int_0^{+\infty} U(c(t))e^{-\rho t}dt,
\end{equation}
where $U:[0,+\infty)\rightarrow [-\infty, +\infty) $ is a utility function and $\rho$ is a positive discount factor.
\\
It aims at finding the controls $t\mapsto c(t)\in [0,+\infty)$ and $t\mapsto \ell(t)\in [0,+\infty)^d$
which maximize \eqref{eq:chap:MFG_model:secGeneralities:2}, under the constraint that its capital stay nonnegative (state constraint).
The value of the optimal control problem when the firm has a capital $k_0\ge 0$ is
\begin{equation}
\label{eq:chap:MFG_model:secGeneralities:valueFunction}
\begin{split}
u(k_0,w)\quad =\quad &\sup_{c, \; \ell, \; k } \int_0^{+\infty} U(c(t))e^{-\rho t}dt \\
& \hbox{ subject to }\\
&
\left\{
\begin{array}[c]{l}
c \in L^1_{{\rm loc}}( {\mathbb R}_+; {\mathbb R}_+), \quad \ell\in L^1_{{\rm loc}}({\mathbb R}_+; {\mathbb R}_+^d),\quad k\in W^{1,1}_{\rm{loc}} ( {\mathbb R}_+),\\
k \hbox{ satisfies \eqref{eq:chap:MFG_model:secGeneralities:capitalDynamic} for a.a. $t>0$,}\\
k(0)=k_0,\\
k(t)\ge 0 \hbox{ for all }t.
\end{array}\right.
\end{split}
\end{equation}
We will see that under suitable assumptions, namely Assumptions {\mathbb R}f{ass:secHJ:1} and {\mathbb R}f{ass:secHJ:3} below, $u(k_0,w)\in {\mathbb R}$ for all $k_0\in (0,+\infty)$.
We expect that the value function $u$ can be found by solving a Hamilton-Jacobi equation in $(0,+\infty)$
with state constraints at $k=0$ (from the dynamic programming principle).
Let the Hamiltonian $H:{\mathbb R}_+\times{\mathbb R} \times (0,+\infty)^d \rightarrow (-\infty, +\infty]$ be defined as follows: for all $k\in {\mathbb R}_+$ and $q\in {\mathbb R}$,
\begin{eqnarray}
\label{eq:chap:MFG_model:sec:general:hamiltonian}
H(k,q,w)&=&\displaystyle \sup_{c\in {\mathbb R}_+,\; \ell\in {\mathbb R}_+^d}\left\{U(c) +q\left(F(k,\ell)-w\cdot \ell-\delta k-c\right)\right\}\\
\label{eq:chap:MFG_model:secGeneralities:32}
&=&\displaystyle \sup_{c\in {\mathbb R}_+}\left\{U(c) -cq\right\} +f(k,w)q,
\end{eqnarray}
where $f:{\mathbb R}_+\times (0,+\infty)^d \rightarrow {\mathbb R}$ is the {\sl net output} function:
\begin{equation}
\label{eq:chap:MFG_model:secGeneralities:33}
f(k,w)=\sup_{\ell\in {\mathbb R}_+^d}\left\{F(k,\ell)-w\cdot \ell\right\}-\delta k.
\end{equation}
\begin{remark}
By contrast with simpler applications of mean field games to price formation, see e.g. \cite{MR4215224},
the Hamiltonian of the problem does not involve a quantity which depends separately/additively on the price vector $w$ and on $q$.
\end{remark}
The Hamilton-Jacobi equation then reads:
\begin{equation}
\label{eq:chap:MFG_model:secGeneralities:HJ}
-\rho u(k,w) + H\left(k,\frac {\partial u}{\partial k} (k,w),w\right)=0,\quad \quad \hbox{ in } (0,+\infty).
\end{equation}
Recall that a function $\psi:{\mathbb R}_+^m \to {\mathbb R}$ is monotone if and only if for every $z,\tilde z \in {\mathbb R}_+^m$,
\begin{displaymath}
z \le \tilde z \quad\quad {\mathbb R}ightarrow \quad \quad \psi(z)\leq \psi(\tilde z),
\end{displaymath}
where the partial order $\;\le\; $ on ${\mathbb R}^{m}$ is defined as follows:
\begin{displaymath}
z\le \tilde z \quad \hbox{ if and only if } \quad z_i\leq \tilde z _i,\;\; \forall i\in \{1,\dots,m\}.
.
\end{displaymath}
We make the following assumptions on $U$ and $F$:
\begin{assumption}[Assumptions on $U$] \label{ass:secHJ:1}
The utility $ U:{\mathbb R}_+\rightarrow [-\infty, +\infty)$ has the following properties:
\begin{itemize}
\item[i)] $U$ is $C^2$ on $(0,+\infty)$.
\item[ii)] $U$ is increasing and strictly concave in $(0,+\infty)$.
\item[iii)] $\displaystyle \lim_{c\rightarrow 0^+}U'(c) = +\infty$ and $\displaystyle \lim_{c\rightarrow +\infty}U'(c) = 0$.
\end{itemize}
\end{assumption}
\begin{assumption}[Assumptions on $F$] \label{ass:secHJ:3}
The function $F$ is concave and monotone.
For any vector $w\in (0,+\infty)^d$ and for any $k\in {\mathbb R}_+$, the {\sl net output} $f(k,w)$ defined by ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:33}) is finite and
achieved by a unique $\ell =\ell^* (k,w)\in {\mathbb R}_+^d$, and $\ell^*$ is a $C^1$ function defined on $(0,+\infty)\times (0,+\infty)^d$.
\\ Moreover,
\begin{enumerate}
\item The function $f$ belongs to $C^0({\mathbb R}_+\times (0,+\infty)^d)\cap C^1( (0,+\infty)^{d+1}) $
\item for all $w\in (0,+\infty)^d$,
$f (\cdot, w):{\mathbb R}_+\rightarrow {\mathbb R}$ has the following properties:
\begin{enumerate}
\item[i)] $f(\cdot, w)$ is locally of class $C^{1,1}$ on $(0,+\infty)$
\item[ii)] $f(0,w)\geq 0$, $k\mapsto f(k,w)$ is strictly concave and $\lim_{k\rightarrow 0^+} \frac{\partial f }{\partial k} (k,w) = +\infty$
\item[iii)] $\lim_{k\rightarrow +\infty} \frac{\partial f }{\partial k} (k,w) =-\delta $
\end{enumerate}
\end{enumerate}
\end{assumption}
\begin{remark}\label{rem:1}
\begin{itemize}
\item From point 2.ii) in Assumption {\mathbb R}f{ass:secHJ:3}, $f(\cdot, w)$ is strictly concave. Hence,
$ \frac{\partial f }{\partial k} (k,w)$ has a limit as $k\to+\infty$, which belongs to $[-\infty, +\infty)$.
Therefore, point 2.iii) in Assumption {\mathbb R}f{ass:secHJ:3} is meaningful.
\item If $\delta =0$,
then the strict concavity of $k\mapsto f(k,w)$ implies that it
is increasing in $(0,+\infty)$. Then, because $f(0,w)\geq 0$, $f(k,w)>0$ for all $k>0$
and has a limit as $k\to +\infty$, which belongs to $(0,+\infty]$.
\item If $\delta>0$,
then $\lim_{k\rightarrow +\infty}f(k,w)=-\infty$, and
$f$ is negative for $k$ large enough.
\end{itemize}
\end{remark}
\begin{remark}\label{rem:2}
It is clear that $-f$ is monotone with respect to $w$. The optimal quantity of the input labeled $i$ is
\begin{equation*}
\ell_i^*(k,w)= - \frac{\partial f }{\partial w_i} (k,w).
\end{equation*}
\end{remark}
In Section {\mathbb R}f{sec:optim-contr-probl-1} below, we are going to prove the following theorem:
\begin{theorem}\label{th:secHJ:main}
Under Assumptions {\mathbb R}f{ass:secHJ:1} and {\mathbb R}f{ass:secHJ:3}, for all $w\in (0,+\infty)^d$,
there exists a unique classical solution $u(\cdot,w)\in C^1(0,+\infty)$ of \eqref{eq:chap:MFG_model:secGeneralities:HJ} with the following property:
there exists a critical value $\kappa^*(w) >0$, such that
\begin{eqnarray}
\label{eq:chap:MFG_model:secHJ:2}
H_q\left(k,\frac {\partial u}{\partial k} (k,w),w\right)> 0,\quad & \hbox{for } &0<k< \kappa^*(w),\\
\label{eq:chap:MFG_model:secHJ:3}
H_q\left(k,\frac {\partial u}{\partial k} (k,w),w\right)< 0,\quad & \hbox{for } &\kappa^*(w) < k <+\infty.
\end{eqnarray}
Here $H_q$ stands for the partial derivative of $H$ with respect to its second argument.\\
Moreover $\kappa^*(w)$ is characterized by the equation
\begin{equation}
\label{eq:1}
\frac{\partial f}{\partial k}(\kappa^*,w)=\rho.
\end{equation}
The function $u(\cdot, w)$ is strictly concave on $(0,+\infty)$ and belongs to $C^2( (0,\kappa^*(w))\cup (\kappa^*(w), +\infty))$.\\
Furthermore, $u(\cdot,w)$ is the value function of the optimal control problem ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:valueFunction}), and $H_q(k,\partial_k u(k,w),w)$
is the optimal investment policy of a firm with capital $k$.
\end{theorem}
\begin{remark}
\begin{enumerate}
\item The existence of $\kappa^*(w)>0$ such that the
capital of all firms converges towards $\kappa^*(w)$ is known as the golden rule
of investment \cite[Chapter 7]{Allais1947}.
\item We will see in Section {\mathbb R}f{sec:secGeneralities:fokk-planck-equat} below that a firm with an initial capital $k_0{{I\!\!Z}}ot=\kappa^*(w)$ never reaches this target capital $\kappa^*(w)$.
\end{enumerate}
\end{remark}
The difficulty in the proof of Theorem {\mathbb R}f{th:secHJ:main} lies in the fact that the Hamiltonian $H(k,q,w)$ is defined only for nonnegative values of $q$ (i.e. $H(k,q,w)=+\infty$ if $q<0$)
and may blow up as $q\to 0_+$. Hence, classical results on viscosity solutions of Hamilton-Jacobi equations for state constrained optimal control problems cannot be applied in a straightforward manner. We will use a different strategy: in particular, in the simplest case in which $\delta = -\lim_{k\to \infty} \frac {\partial f}{\partial k} (k,w)=0$, our proof of existence is based on the fact that the function $q\mapsto H(k,q,w)$ is strictly convex, strictly decreasing in $(0,q_{\min})$ and strictly increasing in $(q_{\min}, +\infty)$, where $q_{\min}=U'(f(k,w))$, see Lemma {\mathbb R}f{lem:secHJ:Hmin} below. In this case, our strategy consists in solving two ordinary differential equations by means of shooting methods: the first (resp. second) one involves the inverse of the increasing (resp. decreasing) part of $ q\mapsto H(k,q,w)$.
Note that a different strategy has been studied in \cite{Petit2022phd}; it was inspired by
a method proposed in \cite{Santambrogio2017} for studying Ramsey model of optimal growth with non local externalities. It consists in introducing a relaxed lagrangian version of the original optimal control problem, then obtaining compactness properties which lead to the existence of an optimal control and of a solution of the original problem.
However, this approach needs an assumption stronger than Assumption {\mathbb R}f{ass:secHJ:3}.
\subsection{The distribution of capital given the prices of inputs}
\label{sec:distrib_capital}
The distribution of capital corresponding to the optimal investment policy of the firms is a bounded positive measure on $(0,+\infty)$.
In our model, its density is characterized by the following continuity equation:
\begin{equation}
\label{eq:chap:MFG_model:FP}
\frac{\partial}{\partial k}\left(m(\cdot,w) H_q\left(\cdot,\frac {\partial u}{\partial k} (\cdot,w),w\right)\right) = \eta(\cdot, u(\cdot,w)) - {{I\!\!Z}}u m(\cdot,w),
\end{equation}
which may first be understood in the sense of distributions. The parameter ${{I\!\!Z}}u\ge 0$ is the extinction rate of the firms
and the source term $\eta $ stands for the exogenous creation of firms.
Note that the latter term depends on the value $u$. We make the following assumption:
\begin{assumption}[Assumptions on ${{I\!\!Z}}u$ and $\eta$] \label{ass:secFP:1}
We assume that ${{I\!\!Z}}u$ is positive (${{I\!\!Z}}u>0$), that $\eta$ is a continuous function on $[0,+\infty)\times {\mathbb R}$, and that there exists a continuous probability density $\hat \eta:{\mathbb R}_+\rightarrow {\mathbb R}_+$ with a compact support contained in $(0,+\infty)$
and a positive constant $\hat c\ge 1$ such that for all $k\ge 0$ and $v\in {\mathbb R}$,
\begin{displaymath}
\frac 1 {\hat c} \hat \eta(k) \le \eta( k,v) \le \hat c \hat \eta(k).
\end{displaymath}
\end{assumption}
Equation \eqref{eq:chap:MFG_model:FP} is supplemented with the condition
\begin{equation}\label{eq:12}
{{I\!\!Z}}u \int_{{\mathbb R}_+} m(k,w) dk= \int_{{\mathbb R}_+} \eta(k, u(k,w)) dk.
\end{equation}
Since $H_q(k, \frac {\partial u}{\partial k} (k,w),w)>0$ for small values of $k$ and $H_q(k, \frac {\partial u}{\partial k} (k,w),w)<0$ for large values of $k$, see Theorem {\mathbb R}f{th:secHJ:main}, \eqref{eq:12} is a weak way to say that the flux \\
$m(\cdot,w)H_q(\cdot, \frac {\partial u}{\partial k} (\cdot,w),w)$ vanishes at $k=0$ and as $k\to +\infty$.
\begin{proposition}\label{prop:FP_1}
Under Assumptions {\mathbb R}f{ass:secHJ:1}, {\mathbb R}f{ass:secHJ:3} and {\mathbb R}f{ass:secFP:1},
the unique solution of \eqref{eq:chap:MFG_model:FP}-\eqref{eq:12} is given by
\begin{equation}
\label{eq:chap:MFG_model:FP10}
\begin{split}
m(k,w)=\\
\left\{ \begin{array}[c]{rcl}
\displaystyle \frac{1}{ b(k,w) } \int_0^{k}\eta(\kappa, u(\kappa,w))
\exp\left(-\displaystyle \int_\kappa^k\frac{{{I\!\!Z}}u}{ b(z,w)}dz\right)d\kappa , \quad&\text{ if } &\quad k <\kappa^*(w),\\
\displaystyle -\frac{1}{ b(k,w)}\int_k^{+\infty}\eta(\kappa, u(\kappa,w))\exp
\left(\displaystyle \int_k^\kappa\frac{{{I\!\!Z}}u}{ b(z,w)}dz\right)d\kappa, \quad &\text{ if } & \quad k>\kappa ^*(w),
\end{array}\right.
\end{split}
\end{equation}
where, for brevity, $b(k,w)$ stands for the optimal investment when the capital is $k$:
\begin{equation}
\label{eq:b}
b(k,w)=H_q\left(k,\frac {\partial u} {\partial k}(k,w),w\right).
\end{equation}
\end{proposition}
A key step in the proof of Proposition {\mathbb R}f{prop:FP_1} consists of showing that the quantities in the right hand side of \eqref{eq:chap:MFG_model:FP10} are well defined. This comes from an intermediate result which states that, under Assumptions {\mathbb R}f{ass:secHJ:1} and {\mathbb R}f{ass:secHJ:3},
$\left|H_q(k,\frac {\partial u} {\partial k}(k,w),w) \right|= O(| k-\kappa^*(w)|)$ for $k$ in a neighborhood of $\kappa^* (w)$.
The latter information implies that, with the optimal investment strategy, a firm starting with a capital $k_0{{I\!\!Z}}ot= \kappa^* (w)$ never reaches $\kappa^* (w)$,
even though its capital does tend to $\kappa^* (w)$ as $t\to \infty$.
\begin{remark} \label{sec:rk_support}
Note that \eqref{eq:chap:MFG_model:FP10} implies that $ \frac 1{{{I\!\!Z}}u \hat c} \le \int_{{\mathbb R}_+} m(k,w) dk\le \frac {\hat c} {{I\!\!Z}}u$ (the two bounds do not depend on $w$).
Moreover, the support of $m(\cdot, w)$ is contained in the interval
\begin{displaymath}
\left [ \min\left( \min\{k\in \hbox{support}(\hat \eta)\}, \kappa^*(w)\right),
\max\left( \max\{k\in \hbox{support}(\hat \eta)\}, \kappa^*(w)\right) \right ].
\end{displaymath}
Hence, from the continuity of $w\mapsto \kappa^*(w)$, for any compact set $K\subset {\mathbb R}_+^d$, there exists
a compact interval of ${\mathbb R}_+$ containing the supports of $m(\cdot, w)$ for all $w\in K$.
\end{remark}
\subsection{Equilibria}
\label{sec:equilibria}
The supply of inputs is assumed to be of the form $S(w)$, where $w\in {\mathbb R}_+^d$ is the collection of prices.\\
At the equilibrium, we require that the clearing condition on the markets of inputs holds, i.e.
\begin{equation}
\label{eq:clearing_condition}
S(w)= \int_{{\mathbb R}_+} \ell^* \left(k, w\right) m(k,w) dk.
\end{equation}
where $ \ell_i^* \left(k, w\right) = - \frac{\partial f }{\partial w_i} (k,w)$, and $m(\cdot, w)$ is the solution of \eqref{eq:chap:MFG_model:FP}-\eqref{eq:12}.\\
We aim at proving the existence of equilibria by using Brouwer degree theory.
This requires additional assumptions:
\begin{assumption}[Assumptions on the supply]
\label{ass:chap:MFG_model:S}
The function $S: \;{\mathbb R}_+^d\rightarrow {\mathbb R}_+^d$ is of the form $S(w)= D_w\Phi(w)$,
where
\begin{enumerate}
\item $\Phi:\; {\mathbb R}_+^d\rightarrow {\mathbb R} $ is $C^1$ regular and strictly convex
\item $\Phi$ is bounded from below (for example by $0$)
\item $\Phi$ is coercive, i.e. $\lim_{\|w\|\to \infty} \Phi(w)=+\infty$.
\end{enumerate}
\end{assumption}
\begin{remark}
\label{ass:chap:MFG_model:Sr1}
The Legendre-Fenchel transform of $\Phi$, $\Phi^*(S)= \sup_{w\in {\mathbb R}_+^d} S\cdot w- \Phi(w)$ is convex and semi-continuous on $ {\mathbb R}_+^d$ with values in $(-\infty,+\infty]$. It can be interpreted as a collective cost or disutility associated to the supply of inputs. Concerning raw materials, it may be linked to their scarsity or to the environmental/social damages caused by their production. For manpower, the disutility captures negative effects of labour on the welfare of the workers.
\end{remark}
\paragraph{Examples}
\begin{enumerate}
\item If, for any $i=1,\dots, d$, the $i$-th component of the supply function is a non negative, continuous and increasing function of $w_i$, i.e. $S_i(w)=S_i(w_i)$, then Assumption {\mathbb R}f{ass:chap:MFG_model:S} is satisfied with $\Phi(w)= \sum_{i=1}^d \int_0 ^{w_i} S_i(t) dt$.
\item Given two positive numbers $\sigma$ and $w_0$, if
\begin{displaymath}
S_i(w)= \frac { \exp( w_i/\sigma)} {\sum_{j=0} ^d \exp( w_j/\sigma)}
\end{displaymath}
for all $i=1,\dots, d$,\\
then Assumption {\mathbb R}f{ass:chap:MFG_model:S} is satisfied with $\Phi(w)= \sigma \log\left(\sum_{j=0}^d \exp( w_j/\sigma)\right)$. In the limit $\sigma\to 0$, the price $w_0$ can be seen as a reserve price under which the production factors cannot be acquired.
\end{enumerate}
Set
\begin{equation}\label{eq:supply0}
g(k,w)= f(k,w) +\delta k=\sup_{\ell\in {\mathbb R}_+^d} F(k,\ell) -w\cdot \ell,
\end{equation}
which can be seen as the Legendre-Fenchel transform of $\ell\mapsto -F(k,\ell)$ evaluated at $-w$. From Assumption {\mathbb R}f{ass:secHJ:3},
$g(k,w)$ is finite, nonnegative and achieved by the unique maximizer
$\ell^* (k,w)\in {\mathbb R}_+^d$, and $\ell^*$ is $C^1$ on $(0,+\infty)\times (0,+\infty)^d$.
A further technical assumption involving both $F$ and the fixed measure $\hat \eta$ arising in Assumption {\mathbb R}f{ass:secFP:1} will be needed:
\begin{assumption}
\label{ass:chap:MFG_model:technical_assump}
Let ${\mathds{1}}\in {\mathbb R}^d$ be defined by ${\mathds{1}}=(1,\dots,1)$. We assume that there exists $\epsilon\in (0,1)$ such that for all $\lambda\in [0,1]$, if
\begin{equation}\label{eq:10004}
\begin{split}
& \Phi(w)+\int_0^{+\infty} g(k,w) \Bigl( (1-\lambda) d\hat \eta (k) + \lambda dm(k,w)\Bigr) \\ \le & \Phi({\mathds{1}})+\int_0^{+\infty} g(k,{\mathds{1}}) \Bigl( (1-\lambda) d\hat \eta (k) + \lambda dm(k,w)\Bigr)
\end{split}
\end{equation}
then
\begin{equation}
\label{eq:10012}
w\in\left(\epsilon, \frac 1 \epsilon\right)^d.
\end{equation}
\end{assumption}
\begin{remark}
The proof that Assumption {\mathbb R}f{ass:chap:MFG_model:technical_assump} holds for classical examples of production functions will be given in Section {\mathbb R}f{sec:equilibrium}.
\end{remark}
Section {\mathbb R}f{sec:equilibrium} will be devoted to the proof of the following existence result:
\begin{theorem}[Existence of equilibria]\label{th_ex_equil}
Under Assumptions {\mathbb R}f{ass:secHJ:1}, {\mathbb R}f{ass:secHJ:3}, {\mathbb R}f{ass:secFP:1}, {\mathbb R}f{ass:chap:MFG_model:S} and {\mathbb R}f{ass:chap:MFG_model:technical_assump},
there exists an equilibrium, i.e. $w\in (0,+\infty)^d$ such that
the market clearing condition \eqref{eq:clearing_condition} holds with $m(\cdot,w)$ and $u(\cdot,w)$ uniquely defined respectively by Proposition {\mathbb R}f{prop:FP_1} and Theorem {\mathbb R}f{th:secHJ:main}.
\end{theorem}
\subsection{Classical examples of utility and production functions}
\label{sec:import-exampl-util}
\subsubsection{Examples of utility functions}
The constant relative risk aversion (CRRA) utility is a common example of a utility that satisfies Assumption {\mathbb R}f{ass:secHJ:1}:
\begin{displaymath}
U(c) = \ln(c)\quad \hbox{ or } \quad U(c) = \frac 1 b c^b \quad \hbox { with }b\in(0,1)
\end{displaymath}
\subsubsection{Examples of production functions}
\begin{enumerate}
\item A classical example is the Cobb-Douglas function:
\begin{displaymath}
F(k,\ell)= A k^\alpha \ell^\beta,
\end{displaymath}
where $\beta \in (0,1)^d$, $\sum_{i=1}^d \beta_i <1$,
$\ell^\beta=\prod_{i=1}^d \ell_i^{\beta_i}$, and $0< \alpha < 1-\sum_{i=1}^d \beta_i$.
Let us set $|\beta|=\sum_{i=1}^d \beta_i$.
In this example, the parameters $\beta$ and $\alpha$ respectively stand for the elasticities of the output
with respect to the different inputs and to the capital, and $A>0$ is a global factor of productivity.
The net output is given by
\begin{equation}
\label{eq:chap:examples:net_output_CobbDouglas}
f(k,w) = (1-|\beta|) \left(Ak^\alpha\prod_{i = 1}^d\left(\frac{\beta_i}{w_i}\right)^{\beta_i}\right)^\frac{1}{1-|\beta|}-\delta k.
\end{equation}
It can be checked that the first order partial derivatives of $f$ with respect to $k$ and $w_i$ are
\begin{equation}
\label{eq:dfdk_cobb_douglas}
\frac{\partial f}{\partial k}(k,w) = \alpha \left(A\prod_{j = 1}^d\left(\frac{\beta_j}{w_j}\right)^{\beta_j}\right)^\frac{1}{1-|\beta|}
k^{-\frac{1-\alpha-|\beta|}{1-|\beta|}}-\delta,
\end{equation}
and
\begin{equation}
\label{eq:dfdwcobbdouglas}
\frac{\partial f}{\partial w_i}(k,w) = - \left(Ak^\alpha\prod_{j = 1}^d\left(\frac{\beta_j}{w_j}\right)^{\beta_j}\right)^\frac{1}{1-|\beta|}\frac{\beta_i}{w_i}\le 0.
\end{equation}
It is easy to see that Assumption {\mathbb R}f{ass:secHJ:3} is satisfied. In particular, \\$\lim_{k\to +\infty } \frac{\partial f}{\partial k}(k,w) =-\delta$.
The capital $\kappa^*(w)$ in \eqref{eq:1} is given by
\begin{equation}
\label{eq:chap:examples:sec:Cobb_Douglas:target_capital}
\kappa^*(w) = \left(\frac{\alpha}{\alpha+\rho}\right)^\frac{1-|\beta|}{1-\alpha-|\beta|}
\left(A\prod_{j = 1}^d\left(\frac{\beta_j}{w_j}\right)^{\beta_j}\right)^\frac{1}{1-\alpha-|\beta|}.
\end{equation}
\item We now consider a production function with a constant elasticity of substitution:
\begin{displaymath}
F(k,\ell) = \left(k^\alpha + \sum_{i = 1}^d\ell_i^{\beta_i}\right)^\gamma,
\end{displaymath}
where $\alpha \in(0,1)$, $\beta\in(0,1)^d$ and $\gamma \in (0,1)$.
For any $(k,w)\in {\mathbb R}_+\times(0,+\infty)^d$, it can be checked that there exists a unique parameter $\lambda(k,w)>0$ such that
\begin{equation}
\label{eq:lambda_CES}
\lambda\left(k^\alpha + \sum_{j = 1}^d\left(\frac{\lambda\beta_j}{w_j}\right)^\frac{\beta_j}{1-\beta_j}\right)^{1-\gamma}= \gamma.
\end{equation}
The net output is then
\begin{displaymath}
f(k,w) = \left(k^\alpha+\sum_{j=1}^d\left(\frac{\lambda(k,w)\beta_j}{w_j}\right)^\frac{\beta_j}{1-\beta_j}\right)^\gamma - \sum_{j = 1}^dw_i\left(\frac{\lambda(k,w)\beta_j}{w_j}\right)^\frac{1}{1-\beta_j}-\delta k.
\end{displaymath}
It can be checked that the first order partial derivatives of $f$ with respect to $k$ and $w_i$ are
\begin{equation}
\label{eq:df/dk_constant_elasticity}
\frac{\partial f}{\partial k}(k,w) = \alpha \lambda(k,w)k^{\alpha - 1}- \delta,
\end{equation}
and
\begin{equation}
\label{eq:Dwf_CES}
\frac{\partial f}{\partial w_i}(k,w) = - \left(\frac{\lambda(k,w)\beta_i}{w_i}\right)^\frac{1}{1-\beta_i}<0.
\end{equation}
Assumption {\mathbb R}f{ass:secHJ:3} is satisfied. In particular, $\lim_{k\to +\infty } \frac{\partial f}{\partial k}(k,w) =-\delta$.
The capital $\kappa^*(w)$ in \eqref{eq:1} is the unique solution of
\begin{equation}
\label{eq:target_capital_CES}
\alpha\lambda(\kappa^*(w),w) (\kappa^*(w))^{\alpha-1} = \delta+\rho.
\end{equation}
\end{enumerate}
\section{The optimal control problem of a single firm}
\label{sec:optim-contr-probl-1}
In this section, we assume that $w$, the prices of the production factors, is a fixed vector in $(0,+\infty)^d$. Thus, in order to alleviate the notation,
we everywhere omit the dependency upon $w$; for example we write $H(k,q)$ and $u(k)$ instead of $H(k,q,w)$ and $u(k,w)$. Similarly, we set
$u'(k)= \frac {\partial u}{\partial k} (k,w)$ and $f'(k)= \frac {\partial f}{\partial k} (k,w)$.
The proof of Theorem {\mathbb R}f{th:secHJ:main} is simpler when $\delta=0$
because $f$ is positive on $(0,+\infty)$. We will first focus on the latter case, then we will address the other case, i.e. $\delta >0$.
\subsection{The particular case where $\delta=0$ }
\subsubsection{Some properties of the Hamiltonian}
\label{sec:secHJ:propertiesH}
\begin{lemma}\label{lem:secHJ:Hconvex}
Under Assumption {\mathbb R}f{ass:secHJ:1}, for any $k>0$, the function $q \mapsto H\left(k,q\right)$, defined on $(0,+\infty)$,
is strictly convex and of class $C^2$.
\end{lemma}
\begin{proof}
From Assumption~{\mathbb R}f{ass:secHJ:1}, the function $U'$ is one to one on $(0,+\infty)$.
Let $c^*$ denote the inverse function, which is decreasing and $C^1$ on $(0,+\infty)$; its derivative is $q\mapsto 1/ U''(c^* (q))$.
For any $q>0$, $c^*(q)>0$ is the unique consumption which achieves the supremum in ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:32}),
because $U'(c^*(q)) = q$. The derivative of $q\mapsto H(k,q)$ is
\begin{equation}
\label{eq:chap:MFG_model:secHJ:8}
H_q(k,q)= -c^*(q)+f(k).
\end{equation}
Hence, $q\mapsto H(k,q)$ is $C^2$ on $(0,+\infty)$ and $H_{qq}(k,q)= -1/{U''( c^* (q))}>0$. This implies the strict convexity of $q\mapsto H(k,q)$.
\end{proof}
\begin{remark}\label{sec:some-prop-hamilt}
Note that the consumption achieving the supremum in ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:32}) does not depend on $k$.
\end{remark}
\begin{lemma}\label{lem:secHJ:Hmin}
We make Assumptions {\mathbb R}f{ass:secHJ:1} and {\mathbb R}f{ass:secHJ:3} and suppose furthermore that $\delta =0$, hence $\lim_{k\to +\infty} f'(k)= 0$.
Then, for any $k>0$,
\begin{eqnarray}
\label{eq:chap:MFG_model:secHJ:5}
\displaystyle \min_{q>0}H(k,q) &=& U(f(k)),\\
\label{eq:chap:MFG_model:secHJ:6}
\displaystyle \argmin_{q>0}H(k,q) &=& \displaystyle \left\{U'(f(k))\right\}.
\end{eqnarray}
\end{lemma}
\begin{proof}
For $k>0$, $f(k)>0$ by Remark {\mathbb R}f{rem:1}. From ({\mathbb R}f{eq:chap:MFG_model:secHJ:8}), $H_q(k,q)=0$ if and only if $c^*(q)=f(k)$, i.e. $q=U'(f(k))$.
This proves that the infimum of the strictly convex function $q\mapsto H(k,q)$ is a minimum, which is achieved by $q=U'(f(k))$.
The minimal value is $U(c^* (U'(f(k))))=U(f(k))$.
\end{proof}
\begin{remark}\label{rem:2-2}
From Assumption {\mathbb R}f{ass:secHJ:1}, we see that if $f(0)=0$, then \\ $\lim_{k\to 0} U'(f(k)) = +\infty$.
On the contrary, from Remark {\mathbb R}f{rem:1}, if $f(0)>0$, then $U'\circ f$ remains bounded on bounded subsets of $[0,+\infty)$.
\end{remark}
\begin{lemma}\label{sec:some-prop-hamilt-1}
Under Assumption {\mathbb R}f{ass:secHJ:1},
\begin{eqnarray}
\label{eq:chap:MFG_model:secHJ:23}
\displaystyle \lim_{q\to 0+} H(k,q)&=& \displaystyle \lim_{c\to +\infty} U(c)-cU'(c) = \lim_{c\to +\infty} U(c) \in (-\infty,+\infty],\\
\label{eq:chap:MFG_model:secHJ:24}
\displaystyle \lim_{q\to 0+} H_q(k,q)&=&-\infty.
\end{eqnarray}
\end{lemma}
\begin{proof}
Since $c^*$ is the inverse of $U'$ on $(0,+\infty)$, Assumption {\mathbb R}f{ass:secHJ:1} implies that $\lim_{q\to 0} c^*(q)=+\infty$.
Therefore, from \eqref{eq:chap:MFG_model:secHJ:8}, $\lim_{q\to 0} H_q (k,q)=-\infty$.
We know that $U$ is increasing: let us set $\ell_1= \lim_{c\to +\infty} U(c)=\sup_{c\ge 0} U(c) \in (-\infty, +\infty]$.
On the other hand, the function $c\mapsto U(c)-cU'(c)$ is increasing in ${\mathbb R}_+$, because its derivative is $c\mapsto -cU''(c)$; let us set $\ell_2= \lim_{c\to +\infty} U(c)-cU'(c) \in (-\infty,+\infty]$.
Since $H(k,q)\sim U(c^* (q))- c^*(q) U'(c^*(q))$ as $q\to 0$, we see that $\displaystyle \lim_{q\to 0} H(k,q)= \ell_2$.
We need to compare $\ell_1$ and $\ell_2$. It is obvious that $\ell_2\le \ell_1$. We wish to prove that $\ell_2= \ell_1$.
We argue by contradiction and assume that $\ell_2<\ell_1$. We make out two cases:
\begin{enumerate}
\item $\ell_1\in {\mathbb R}$ and $\ell_2<\ell_1$: we see that $c U'(c)$ tends to $\ell_1-\ell_2>0 $ as $c$ tends to $+\infty$. This implies that $U(c)$ blows up like a logarithm of $c$ as $c$ tends to $+\infty$, in contradiction with the fact that $\ell_1<+\infty$. Therefore, if $\ell_1$ is finite, then $\ell_1=\ell_2$.
\item $\ell_1=+\infty$ and $\ell_2\in {\mathbb R}$.
We see that $c U'(c)= U(c)-\ell_2 + o(1)$ where $\lim _{c\to \infty} o(1)=0$.
Using Gronwall lemma, we deduce that there
exists a real number $\chi$ such that $U(c)= \chi c +\ell_2 + o(1)$. Since $U(c)\to +\infty$ as $c\to +\infty$, we see that $\chi>0$. We deduce that $\lim _{c\to \infty} U'(c)=+\infty$ in contradiction with Assumption {\mathbb R}f{ass:secHJ:1}.
\end{enumerate}
The proof is complete.
\end{proof}
Lemmas {\mathbb R}f{lem:secHJ:Hconvex} and {\mathbb R}f{lem:secHJ:Hmin} above allow us to define the increasing and decreasing parts of the Hamiltonian:
\begin{definition}\label{def:monotone_enveloppes}
We make Assumptions {\mathbb R}f{ass:secHJ:1} and {\mathbb R}f{ass:secHJ:3} and suppose furthermore that
$\delta=0$.
\begin{itemize}
\item Define the sets
\begin{eqnarray*}
\Theta^\uparrow&=&\left\{(k,q)\hbox{ such that } k>0 \hbox{ and } q\geq U'(f(k))\right\},\\
\Theta^\downarrow&=&\left\{(k,q)\hbox{ such that } k>0 \hbox{ and } q\leq U'(f(k))\right\}.
\end{eqnarray*}
\item Let $H^\uparrow(\cdot,\cdot)$ be the restriction of $H(\cdot,\cdot)$ to $ \Theta^\uparrow$. The function $q\mapsto H^\uparrow(k,q)$ is increasing in $[U'(f(k)), \infty)$.
\item Let $H^\downarrow(\cdot,\cdot)$ be the restriction of $H(\cdot,\cdot)$ to $ \Theta^\downarrow$. The function $q\mapsto H^\downarrow(k,q)$ is decreasing on $(0,U'(f(k))]$.
\end{itemize}
The graphs of $H(k,\cdot)$, $H^\uparrow (k,\cdot)$ and $H^\downarrow (k,\cdot)$ are displayed on Figure {\mathbb R}f{fig:1}.
\end{definition}
\begin{center}
\begin{tikzpicture}[scale=0.8]
\draw[->] (0,0) -- (5,0) node[right] {$q$};
\draw[->] (0,0) -- (0,6) ;
\draw[color=PineGreen, dashed] (1,1) -- (1,0) ;
\draw[color=PineGreen, dashed] (1,1) -- (0,1) ;
\draw[domain=0.1:1, ultra thick, variable=\x, color=blue] plot ({\x}, {1+ 0.5*(\x+1/\x-2)});
\draw[domain=1:4, ultra thick, variable=\x, color=Red] plot ({\x}, {1+0.5*(\x-1)*(\x-1)});
\draw(0,6) node[left] {$ H(k,q)$};
\draw[color=PineGreen] (1,0) node{$\bullet$};
\draw(1,0) node[below, color=PineGreen] {$\quad \quad q_{\min}= U'(f(k))$};
\draw[color=PineGreen] (0,1) node{$\bullet$};
\draw(0,1) node[left, color=PineGreen] {$ U(f(k))$};
\end{tikzpicture}
\captionof{figure}{\label{fig:1} The bold line (blue and red) is the graph of the function $H(k,\cdot)$. The blue line is the graph of $H^{\downarrow}(k,\cdot)$. The red line is the graph of $H^{\uparrow}(k,\cdot)$. In the present figure, $\lim_{q\to 0_+} H(k,q)=+\infty$, but it is also possible that $\lim_{q\to 0_+} H(k,q)\in {\mathbb R}$.}
\end{center}
\begin{lemma}\label{sec:secHJ:H_C1_reg}
Under the same assumptions as in Lemma~{\mathbb R}f{lem:secHJ:Hmin},
$H^\downarrow(\cdot,\cdot)$ (respectively $H^\uparrow(\cdot,\cdot)$) is of class $C^1$ on $\Theta^\downarrow$ (respectively $\Theta^\uparrow$).
\end{lemma}
\begin{proof}
We have already seen in the proof of Lemma {\mathbb R}f{lem:secHJ:Hconvex} that $q\mapsto H(k,q)$ is of class $C^2$.
Moreover, from Assumption {\mathbb R}f{ass:secHJ:3}, $k\mapsto f(k)q$ is of class $C^1$, so $k\mapsto H(k,q)$ is also of class $C^1$. Hence $(k,q) \mapsto H^\downarrow(k,q) $ is of class $C^1 $ on $\Theta^\downarrow$, and so is $(k,q) \mapsto H^\uparrow(k,q) $ on $\Theta^\uparrow$.
\end{proof}
\subsubsection{General orientation}\label{sec:general-orientation}
Heuristically, if $u$ is a classical solution of ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:HJ}) such that $u'(k)>0$ for $k>0$ and
$u''$ is locally bounded,
then, taking the derivative of ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:HJ}), we get that for $k>0$,
\begin{displaymath}
\left(f'(k)-\rho\right) u'(k) = -H_q\left(k,u'(k)\right) u''(k) .
\end{displaymath}
We deduce that if the optimal investment is $0$, i.e.
$H_q\left(k,u'(k)\right)=0$,
then
\begin{equation}
\label{eq:2}
f'(k)=\rho.
\end{equation}
From Assumption {\mathbb R}f{ass:secHJ:3}, ({\mathbb R}f{eq:2}) has a unique solution which we name $\kappa^*$ (note that $\kappa^*$ depends on $w$, see ({\mathbb R}f{eq:1}) in Theorem~{\mathbb R}f{th:secHJ:main}).
\\
Moreover, $H_q\left(\kappa^*,u'(\kappa^*)\right)=0$ implies that
$ u'(\kappa^*)= U'(f(\kappa^*))$ and $H(\kappa^*,u'(\kappa^*))=U(f(\kappa^*))$,
see Figure {\mathbb R}f{fig:1}. Hence, from ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:HJ}), we deduce that
$u(\kappa^*)=U(f(\kappa^*))/\rho$.
\\
On the other hand, because of the state constraint, we expect that
$H_q\left(k,u'(k)\right)$ is positive for small values of $k$. Hence, we expect that for a classical state constrained solution $u$ of
({\mathbb R}f{eq:chap:MFG_model:secGeneralities:HJ}),
\begin{displaymath}
H\left(k, u'(k)\right)= \left\{ \begin{array}[c]{ll}
H^{\uparrow} \left(k, u'(k)\right), \quad \hbox{if } k<\kappa^*,\\
H^{\downarrow} \left(k, u'(k)\right), \quad \hbox{if } k>\kappa^*.
\end{array}\right.
\end{displaymath}
Therefore, we are going to look for $u$ as the solution of two ordinary differential equations in $(0,\kappa^*)$ and $(\kappa^*, +\infty)$ which respectively involve
the inverse functions of $q\mapsto H^{\uparrow} (k,q)$ and $q\mapsto H^{\downarrow}(k,q)$, with the boundary condition
\begin{displaymath}
u(\kappa^*)=U(f(\kappa^*))/\rho.
\end{displaymath}
In order to carry out this program, we need to consider the inverse functions of
$q\mapsto H^{\uparrow}(k,q) $ and $q\mapsto H^{\downarrow}(k,q)$:
\begin{definition}
We make Assumptions {\mathbb R}f{ass:secHJ:1} and {\mathbb R}f{ass:secHJ:3} and suppose furthermore that
$\delta=0$.
\begin{itemize}
\item Define the sets
\begin{eqnarray}
\label{eq:chap:MFG_model:secHJ:12}
\Omega^\uparrow&=&\left\{(k,v):\; k\in\left(0,\kappa^*\right] \hbox{ and } \rho v\in\left(U(f(k)),+\infty\right)\right\},\\
\label{eq:chap:MFG_model:secHJ:13}
\Omega^\downarrow&=&\left\{(k,v):\ k\in\left[\kappa^*,+\infty\right) \hbox{ and } \rho v\in\left(U(f(k)), \lim_{q\to 0 ^ +} H(k,q)\right)\right\}.
\end{eqnarray}
\item Set
\begin{eqnarray}
\label{eq:chap:MFG_model:secHJ:10}
{\mathcal F}^\uparrow(k,v) &=& \left(H^\uparrow(k,\cdot)\right)^{-1}(\rho v),\quad\quad \hbox{for } (k,v) \in \Omega^\uparrow,\\
\label{eq:chap:MFG_model:secHJ:11}
{\mathcal F}^\downarrow(k,v) &=& \left(H^\downarrow(k,\cdot)\right)^{-1}(\rho v),\quad\quad \hbox{for } (k,v) \in \Omega^\downarrow.
\end{eqnarray}
\end{itemize}
\end{definition}
\paragraph{Program} Our program will be as follows:
\begin{enumerate}
\item
Prove that the following Cauchy problem has a unique solution \\ $u^\downarrow: [\kappa^*,+\infty) \to {\mathbb R}$:
\begin{eqnarray}\label{eq:chap:MFG_model:secHJ:ODEdown1}
\frac {du^\downarrow}{dk}(k) &=& {\mathcal F}^\downarrow (k,u^\downarrow(k)),\quad \quad\quad \hbox{for } k\ge \kappa^*,\\
\label{eq:chap:MFG_model:secHJ:ODEdown2}
(k,u^\downarrow(k))&\in& \Omega^\downarrow,\quad \quad\quad\quad\quad \quad\quad \hbox{for } k> \kappa^*,\\
\label{eq:chap:MFG_model:secHJ:ODEdown3}
u^\downarrow(\kappa^*) &=& \frac{1}{\rho}U(f(\kappa^*)).
\end{eqnarray}
\item Prove that the following Cauchy problem has a unique solution $u^\uparrow: (0,\kappa^*] \to {\mathbb R}$:
\begin{eqnarray}\label{eq:chap:MFG_model:secHJ:ODEup1}
\frac {d u^\uparrow}{dk}(k) &=& {\mathcal F}^\uparrow (k,u^\uparrow(k)),\quad \quad\quad \hbox{for } k\le \kappa^*,\\
\label{eq:chap:MFG_model:secHJ:ODEup2}
(k,u^\uparrow(k))&\in& \Omega^\uparrow,\quad\quad\quad \quad\quad\quad\quad \hbox{for } 0<k< \kappa^*,\\
\label{eq:chap:MFG_model:secHJ:ODEup3}
u^\uparrow(\kappa^*) &=& \frac{1}{\rho}U(f(\kappa^*)).
\end{eqnarray}
\item Prove that the function $u$ which coincides with $ u^\uparrow$ on $[0,\kappa^*]$ and $ u^\downarrow$ on $[\kappa^*,+\infty)$
is the solution of ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:HJ})-({\mathbb R}f{eq:chap:MFG_model:secHJ:3}).
\end{enumerate}
Before starting this program, let us state a useful lemma:
\begin{lemma}\label{sec:secHJ:F_C1_reg}
Under the same assumptions as in Lemma~{\mathbb R}f{lem:secHJ:Hmin}, ${\mathcal F}^\downarrow(\cdot,\cdot)$ (respectively ${\mathcal F}^\uparrow(\cdot,\cdot)$) is of class $C^1$
on $\Omega ^\downarrow$ (respectively $\Omega^\uparrow$).
\end{lemma}
\begin{proof}
We skip the proof for brevity and refer to \cite{Petit2022phd}, which contains an extended version of the present paper.
\end{proof}
\subsubsection{The Cauchy problem ({\mathbb R}f{eq:chap:MFG_model:secHJ:ODEdown1})-({\mathbb R}f{eq:chap:MFG_model:secHJ:ODEdown3})}
\label{sec:cauchy-probl-refeq:c}
Let us first consider the maximal solution $\phi_\lambda$ of the following Cauchy problem:
\begin{eqnarray}\label{eq:chap:MFG_model:secHJ:14}
\phi_\lambda'(k) &=& {\mathcal F}^\downarrow (k,\phi_\lambda(k)),\quad \quad\quad \hbox{for } k\ge \kappa^*,\\
\label{eq:chap:MFG_model:secHJ:15}
(k,\phi_\lambda(k))&\in& \Omega^\downarrow,\\
\label{eq:chap:MFG_model:secHJ:16}
\phi_\lambda(\kappa^*) &=& \lambda,
\end{eqnarray}
for $\lambda$ such that $(\kappa^*,\lambda)\in\Omega^\downarrow$, see \eqref{eq:chap:MFG_model:secHJ:13}. Cauchy-Lipschitz theorem may be applied because
${\mathcal F}^\downarrow$ is regular enough on $\Omega^\downarrow$.
After having proved the existence and uniqueness of $\phi_\lambda$,
we will let $\lambda$ tend to $U(f(\kappa^*))/\rho$ and obtain that the sequence $\phi_\lambda$ converges to a solution of ({\mathbb R}f{eq:chap:MFG_model:secHJ:ODEdown1})-({\mathbb R}f{eq:chap:MFG_model:secHJ:ODEdown3}). One reason for not applying directly the standard existence results
to the Cauchy problem with initial condition $\lambda= U(f(\kappa^*))/\rho$
is that ${\mathcal F}^\downarrow(\cdot,\cdot)$ is not regular at the boundary of $\Omega^\downarrow$. In particular, $ v\mapsto {\mathcal F}^\downarrow (\kappa^* ,v)$ is not Lipschitz continuous in the neighborhood of $(\kappa^*, U(f(\kappa^*))/\rho)$.
Moreover, the point $(\kappa^*,U(f(\kappa^*))/\rho)$ belongs to the boundary of $\Omega^\downarrow$;
this forbids the direct use of Cauchy-Peano-Arzel{\`a} theorem for obtaining the existence of a solution.
\begin{proposition}\label{prop:secHJ:vlambda}
We make Assumptions {\mathbb R}f{ass:secHJ:1} and {\mathbb R}f{ass:secHJ:3} and suppose furthermore that $\delta=0$.
For every $\lambda$ such that $(\kappa^*,\lambda)\in\Omega^\downarrow$, there exists a unique global solution $ \phi_\lambda$ of \eqref{eq:chap:MFG_model:secHJ:14}-\eqref{eq:chap:MFG_model:secHJ:16} in $[\kappa^*, +\infty)$. The function $ \phi_\lambda$ is increasing and strictly concave.
\end{proposition}
\begin{proof}
Setting $\Theta(k)= (k, \phi_\lambda(k))$, it is convenient to rewrite \eqref{eq:chap:MFG_model:secHJ:14}-\eqref{eq:chap:MFG_model:secHJ:16} in the equivalent form:
find $k\mapsto \Theta(k) \in \Omega^\downarrow$ such that
\begin{eqnarray}
\label{eq:chap:MFG_model:secHJ:17}
\Theta'(k)&=& \left (1, {\mathcal F}^\downarrow (\Theta(k))\right) ,\quad k\ge \kappa^*,\\
\label{eq:chap:MFG_model:secHJ:18}
\Theta(\kappa^*) &=&(\kappa^*, \lambda).
\end{eqnarray}
We may apply Cauchy-Lipschitz theorem; indeed, from Lemma, the map \\ $\Theta\mapsto \left (1, {\mathcal F}^\downarrow (\Theta)\right) $ is $C^1$ on $\Omega^\downarrow$. Therefore, there exists a unique maximal solution $\Theta$ of \eqref{eq:chap:MFG_model:secHJ:17}-\eqref{eq:chap:MFG_model:secHJ:18} in $[\kappa^*, \bar k)$. We observe that for $k\in [\kappa^*,\overline{k})$,
$\phi_\lambda'(k) = {\mathcal F}^\downarrow(k,\phi_\lambda(k))>0$, so $\lim_{k\to \bar k^-} \phi_\lambda(k)$ exists.
Moreover, by taking the derivative,
\begin{displaymath}
\phi_\lambda''(k) =\frac {\rho-f'(k) } { H_q\left(k,\phi_\lambda'(k) \right)} \phi_\lambda'(k)<0. \end{displaymath}
Therefore $\phi_\lambda$ is strictly concave in $[\kappa^*, \bar k)$.
If $\bar k<\infty$, then from Cauchy-Lipschitz theorem,
$\rho \lim_{k\to \bar k^-} \phi_\lambda(k)$ must be equal either to $ U(f(\bar k))$ or to $\lim_{q\to 0} H(k,q)= \lim_{c\to +\infty} U(c)$
(which does not depend on $k$). Let us show by contradiction that both cases are impossible.
\begin{enumerate}
\item Assume first that $\rho \lim_{k\to \bar k^-} \phi_\lambda(k)=\lim_{q\to 0} H(k,q)= \lim_{c\to +\infty} U(c)$;
let us make out two subcases:
\begin{enumerate}
\item If $ \lim_{c\to +\infty} U(c)=+\infty$, then $\lim_{k\to \bar k^-} \phi_\lambda(k) =+\infty$, which yields
that $\lim_{k\to \bar k^-} {\mathcal F}^\downarrow(k,\phi_\lambda(k)) =0$. From ({\mathbb R}f{eq:chap:MFG_model:secHJ:17}), we see
that $\lim_{k\to \bar k^-} \phi_\lambda'(k) =0$, in contradiction with $\lim_{k\to \bar k^-} \phi_\lambda(k)) =+\infty$.
\item If $ \lim_{c\to +\infty} U(c)= \ell\in {\mathbb R}$, then it is possible to extend continuously $\phi_\lambda$ to $\bar k$ by setting $\phi_\lambda(\bar k)= \ell/\rho$.
Since $H(k,0)=\ell$ for all $k$, we see that
\begin{equation}
\label{eq:3}
{\mathcal F}^{\downarrow}(k,\ell/\rho)=0,\quad \hbox{ for all }k\ge \kappa^*.
\end{equation}
On the other hand, since $U'(c^* (q))=q$, Assumption {\mathbb R}f{ass:secHJ:1} implies that $\lim_{q\to 0} c^* (q)=+\infty$. This implies that
\begin{equation}
\label{eq:4}
\frac {\partial {\mathcal F}^\downarrow}{\partial v}(k,\ell/\rho)=0,\quad \hbox{ for all }k\ge \kappa^*.
\end{equation}
But ({\mathbb R}f{eq:3}) and ({\mathbb R}f{eq:4}) prevent the state $\ell/\rho$ to be reached in finite time by a solution of \eqref{eq:chap:MFG_model:secHJ:14}-\eqref{eq:chap:MFG_model:secHJ:15}; we have obtained the desired contradiction.
\end{enumerate}
\item Assume that $\lim_{k\to \bar k^-} \phi_\lambda(k)= U(f(\bar k))/ \rho$.
It is then possible to extend continuously $\phi_\lambda$ to $\bar k$ by setting $\phi_\lambda(\bar k)= U(f(\bar k))/ \rho$,
and ({\mathbb R}f{eq:chap:MFG_model:secHJ:14}) holds in $[\kappa^*, \bar k]$. On the other hand,
\begin{equation}
\label{eq:5}
\frac d {dk} \left( \frac {U(f(k))} \rho \right)- {\mathcal F}^\downarrow \left(k, \frac {U(f(k))} \rho \right) = U'(f(k)) \frac{f'(k)-\rho}{\rho} <0,\quad \hbox{ for } k> \kappa^*,
\end{equation}
from the definition of $\kappa^*$ and Assumption {\mathbb R}f{ass:secHJ:3}. Thus, $k\mapsto U(f(k))/\rho$ is a subsolution of the ordinary differential equation
satisfied by $\phi_\lambda$, which yields that $U(f(k))/\rho > \phi_\lambda(k)$ for $k< \bar k$. This is impossible, since
$(k, \phi_\lambda(k))\in \Omega^\downarrow$ for $k<\bar k$.
\end{enumerate}
We have proved that $\bar k=+\infty$. The unique maximal solution of \eqref{eq:chap:MFG_model:secHJ:17}-\eqref{eq:chap:MFG_model:secHJ:18} is a global solution.
\end{proof}
Letting $\lambda$ tend to $U(f(\kappa^*))/\rho$, we shall prove the following result:
\begin{proposition}\label{prop:secHJ:ex_down}
Under the same assumptions as in Proposition~{\mathbb R}f{lem:secHJ:philambda},
the \\ Cauchy problem ({\mathbb R}f{eq:chap:MFG_model:secHJ:ODEdown1})-({\mathbb R}f{eq:chap:MFG_model:secHJ:ODEdown3}) has a unique solution
$u^\downarrow \in C^1( [\kappa^*,+\infty))\cap C^2(\kappa^*,+\infty )$. Moreover $u^\downarrow$ is strictly concave on $(\kappa^*, +\infty)$.
\end{proposition}
\begin{proof}
Consider a decreasing sequence $(\lambda_n)_{n\in{\mathbb N}}$, such for all $n\in {\mathbb N}$,
$(\kappa^*,\lambda_n)\in\Omega^\downarrow$ and $\lim_{n\to \infty} \lambda_n= U(f(\kappa^*))/\rho$.
A direct consequence of Cauchy-Lipschitz theorem is that
$\phi_{\lambda_n} (k)> \phi_{\lambda_{n+1}}(k)$ for all $k\ge \kappa^*$.
On the other hand, we know that $\phi_{\lambda_n} (k)\ge U(f(k))/\rho$. This implies that there exists a function $\phi: [\kappa^*, +\infty)\to {\mathbb R}$ such that $\phi_{\lambda_n}$ converge to
$\phi$ pointwise as $n$ tends to $+\infty$.\\
Since $(\phi_{\lambda_n})_{n\in{\mathbb N}}$ is a sequence of concave functions locally uniformly bounded, we see from \cite[Theorem 3.3.3]{MR2041617} that the convergence is uniform on every compact set, so the limit $\phi$ is continuous.\\
Since ${\mathcal F}^\downarrow(\cdot,\cdot)$ is continuous on the closure of $\Omega^\downarrow$,
we may pass to the limit in the integral form of \eqref{eq:chap:MFG_model:secHJ:14}: for all $k\ge \kappa^*$,
\begin{displaymath}
\phi(k) = \frac{1}{\rho}U(f(\kappa^*))+\int_{\kappa^*}^k{\mathcal F}^\downarrow(\kappa,\phi(\kappa))d\kappa.
\end{displaymath}
This implies that $\phi\in C^1([\kappa^*, +\infty))$ and that $\phi$ satisfies \eqref{eq:chap:MFG_model:secHJ:ODEdown1} and \eqref{eq:chap:MFG_model:secHJ:ODEdown3}. Hence $\phi$ is an increasing function.\\
On the other hand, \eqref{eq:5} implies that $\phi(k)> U(f(k))/\rho$ for all $k>\kappa^*$. This shows that $\phi$ satisfies \eqref{eq:chap:MFG_model:secHJ:ODEdown2}.\\
Arguing as in the proof of Proposition {\mathbb R}f{prop:secHJ:vlambda}, we see that $\phi$ is $C^2$ on $(\kappa^*, +\infty)$ and strictly concave.
We have proved the existence of a solution of \eqref{eq:chap:MFG_model:secHJ:ODEdown1}-\eqref{eq:chap:MFG_model:secHJ:ODEdown3}.
Assume that there are two such solutions $\phi_1$ and $\phi_2$.
If there exists $k_0>\kappa^*$ such that $\phi_1(k_0)=\phi_2(k_0)$, then $\phi_1$ and $\phi_2$ coincide from
Cauchy-Lipschitz theorem. Hence we may assume that $\phi_1(k)<\phi_2(k)$ for $k>\kappa^*$.
Then, using the non increasing character of ${\mathcal F}^\downarrow(k,\cdot)$, we see that, for every $k>\kappa^*$,
\begin{displaymath}
0>\phi_1(k)-\phi_2(k) = \int_{\kappa^*}^k{\mathcal F}^\downarrow(\kappa,\phi_1(\kappa))-{\mathcal F}^\downarrow(\kappa,\phi_2(\kappa))d\kappa\geq 0.
\end{displaymath}
We have found a contradiction and achieved the proof of uniqueness.
\end{proof}
\subsubsection{The Cauchy problem \eqref{eq:chap:MFG_model:secHJ:ODEup1}-\eqref{eq:chap:MFG_model:secHJ:ODEup3}}
\label{sec:cauchy-probl-refeq:c2}
Also in this case, $ {\mathcal F}^\uparrow (k ,\cdot)$ is not Lipschitz continuous in the neighborhood of $(\kappa^*, U(f(\kappa^*))/\rho)$
and $(\kappa^*, U(f(\kappa^*))/\rho)$ belongs to the boundary of $\Omega^\uparrow$. This prevents us
from applying directly standard existence results to \eqref{eq:chap:MFG_model:secHJ:ODEup1}-\eqref{eq:chap:MFG_model:secHJ:ODEup3}.\\
For this reason, we start by considering the Cauchy problem:
\begin{eqnarray}\label{eq:chap:MFG_model:secHJ:19}
\psi_{\epsilon,\lambda}'(k) &=& {\mathcal F}^\uparrow (k,\psi_{\epsilon,\lambda}(k)),\quad 0<k \le \kappa^*,
\\
\label{eq:chap:MFG_model:secHJ:20}
(k,\psi_{\epsilon,\lambda}(k))&\in& \Omega^\uparrow,\\
\label{eq:chap:MFG_model:secHJ:21}
\psi_{\epsilon,\lambda}(\epsilon) &=& \lambda,
\end{eqnarray}
for $(\epsilon,\lambda)\in \Omega^\uparrow$, see \eqref{eq:chap:MFG_model:secHJ:11} (thus $0<\epsilon<\kappa^*$).
As above, Cauchy-Lipschitz theorem may be applied to \eqref{eq:chap:MFG_model:secHJ:19}-\eqref{eq:chap:MFG_model:secHJ:21}.
After having obtained the existence and uniqueness of a maximal solution $\psi_{\epsilon,\lambda}$,
we will prove that there exists $\lambda$ such that $\psi_{\epsilon,\lambda}$ is a global solution, i.e. defined on $(0, \kappa^*]$,
and that $ \psi_{\epsilon,\lambda}(\kappa^*) = U(f(\kappa^*))/\rho$.
\begin{lemma} \label{lem:secHJ:philambda}
We make Assumptions {\mathbb R}f{ass:secHJ:1} and {\mathbb R}f{ass:secHJ:3} and suppose furthermore that
$\delta=0$.
For every $(\epsilon,\lambda)\in\Omega^\uparrow$ with $0<\epsilon<\kappa^*$,
there exists a unique maximal solution of the Cauchy problem \eqref{eq:chap:MFG_model:secHJ:19}-\eqref{eq:chap:MFG_model:secHJ:21}
of the form $\Bigl( (0,\overline{k}(\epsilon,\lambda)), \psi_{\epsilon,\lambda} \Bigr)$ where $\epsilon< \overline{k}(\epsilon,\lambda)\le \kappa^*$. The function $\psi_{\epsilon,\lambda}$ is strictly concave and increasing in $(0,\overline{k}(\epsilon,\lambda))$.
\end{lemma}
\begin{proof}
Existence and uniqueness of a maximal solution follow from the Cauchy-Lipschitz theorem.
The strict monotonicity and concavity of $\psi_{\epsilon,\lambda}$ are obtained as in Proposition {\mathbb R}f{prop:secHJ:vlambda}.
Assume by contradiction that the interval in the definition of the maximal solution is
not of the form $(0,\overline{k}(\epsilon,\lambda))$. This implies that there exists $\underline{k}\in (0,\epsilon)$ such that either $\lim_{k\to \underline k} \psi_{\epsilon,\lambda}(k)=-\infty$ or $\psi_{\epsilon,\lambda}(\underline k)=U(f(\underline k))/\rho$. Let us rule out both situations:
\begin{itemize}
\item If $\lim_{k\to \underline k} \psi_{\epsilon,\lambda}(k)=-\infty$, then
$\lim_{k\to \underline k} \psi'_{\epsilon,\lambda}(k)=+\infty$. This implies that
$\lim_{k\to \underline k} \psi_{\epsilon,\lambda}(k) =U(f(\underline k))/\rho$, and we have obtained the desired contradiction.
\item If $\psi_{\epsilon,\lambda}(\underline k)=U(f(\underline k))/\rho$, then proceeding as in the end of the proof of Proposition {\mathbb R}f{prop:secHJ:vlambda}, this implies that $\psi_{\epsilon,\lambda}( k)\le U(f( k))/\rho$ for all $k\in [\underline k,\epsilon]$, in contradiction with $ \psi_{\epsilon,\lambda}(\epsilon)=\lambda> U(f( \epsilon))/\rho$.
\end{itemize}
Therefore the maximal solution is defined in an interval of the form $(0,\overline{k}(\epsilon,\lambda))$.
\end{proof}
\begin{remark}\label{rem:4-2}
Note that if $f(0)=0$, then $ \psi'_{\epsilon,\lambda}(k)$ blows up when $k\to 0^+$: indeed, from \eqref{eq:chap:MFG_model:secHJ:8},
$ 0 < H_q(k,\psi'_{\epsilon,\lambda}(k)) = f(k) - c^*\left( \psi'_{\epsilon,\lambda}(k)\right)$,
hence $ c^*\left( \psi'_{\epsilon,\lambda}(k)\right) <f(k)$. Therefore,
$U'\left(c^*\left( \psi'_{\epsilon,\lambda}(k)\right) \right)> U'(f(k))$. Thus, from Assumption {\mathbb R}f{ass:secHJ:1},
$ \psi'_{\epsilon,\lambda}(k)=U'\left( c^*\left( \psi'_{\epsilon,\lambda}(k)\right)\right) > U'(f(k))$
tends to $+\infty$ as $k\to 0$.
\end{remark}
\begin{lemma} \label{lem:secHJ:Lambda}
Under the same assumptions as in Lemma~{\mathbb R}f{lem:secHJ:philambda}, for every $\epsilon \in (0, \kappa^*)$, the set
\begin{equation}\label{eq:chap:MFG_model:secHJ:22}
\Lambda_\epsilon=\left\{\lambda>U(f( \epsilon))/\rho \quad \hbox{such that}\quad
\overline{k}(\epsilon,\lambda)= \kappa^*\right\}
\end{equation}
is not empty.
\end{lemma}
\begin{proof}
Take $\lambda>U(f(\kappa^*))/\rho$. Assume by contradiction that $\overline{k}(\epsilon,\lambda)<\kappa^*$, where
$\Bigl( (0,\overline{k}(\epsilon,\lambda)), \psi_{\epsilon,\lambda} \Bigr)$ is the maximal solution
of the Cauchy problem \eqref{eq:chap:MFG_model:secHJ:19}-\eqref{eq:chap:MFG_model:secHJ:21}, (note that $\epsilon<\overline{k}(\epsilon,\lambda)$).
\\
Observe first that $\psi_{\epsilon,\lambda}$ cannot blow up as $k\to \overline{k}(\epsilon,\lambda)$.
Indeed $v\mapsto {\mathcal F}^\uparrow( k,\rho v)$ is Lipschitz continuous on $[ \max_{k\in [\epsilon, \kappa^*] } U(f(k))+1, +\infty) $
with a Lipschitz constant that does not depend on $k\in [\epsilon, \kappa^*]$. This property prevents $\psi_{\epsilon,\lambda}$ from blowing up in finite time.
\\
Therefore, the function
$\psi_{\epsilon,\lambda}$ can be extended to $\overline{k}(\epsilon,\lambda)$ by continuity, and
\begin{equation}
\label{eq:chap:MFG_model:secHJ:maximal}
\psi_{\epsilon,\lambda}( \overline{k}(\epsilon,\lambda)) = U(f(\overline{k}(\epsilon,\lambda)))/\rho,
\end{equation}
otherwise it would not be the maximal solution.
On the other hand, we know that $f$ is increasing in $(0,\kappa^*]$, hence $U(f(\kappa^*))> U(f(k))$ for all $k<\kappa^*$.
In particular, $ U(f(\kappa^*))> U(f(\overline{k}(\epsilon,\lambda)))$.
From the monotonicity of $\psi_{\epsilon,\lambda}$, we obtain that
\begin{displaymath}
\psi_{\epsilon,\lambda} ( \overline{k}(\epsilon,\lambda)) \ge
\psi_{\epsilon,\lambda} ( \epsilon)=\lambda > U(f(\kappa^*))/\rho > U(f(\overline{k}(\epsilon,\lambda)))/\rho,
\end{displaymath}
which contradicts \eqref{eq:chap:MFG_model:secHJ:maximal}.
\\
We have proved that if $\lambda>U(f(\kappa^*))/\rho$, then the maximal solution is defined on $(0,\kappa^*]$. Therefore, $\Lambda_\epsilon$ is not empty.
\end{proof}
\begin{proposition}\label{prop:secHJ:psilambda}
For all $\epsilon<\kappa^*$, there exists $\lambda$ such that $(\epsilon, \lambda) \in \Omega^\uparrow$ and
a global solution $\psi_{\epsilon,\lambda}$ (i.e. defined on $(0,\kappa^*]$) of the Cauchy problem \eqref{eq:chap:MFG_model:secHJ:19}-\eqref{eq:chap:MFG_model:secHJ:21} such that $\psi_{\epsilon,\lambda}(\kappa^*) = U(f(\kappa^*))/\rho$.
\end{proposition}
\begin{proof}
Consider a decreasing sequence $(\lambda_n)_{n\in{\mathbb N}}$ in $\Lambda_\epsilon$ (see ({\mathbb R}f{eq:chap:MFG_model:secHJ:22})) such that \\ $\lim_{n\to \infty} \lambda_n= \underline \lambda_\epsilon=\inf_{\lambda \in \Lambda_\epsilon} \lambda$.
It is clear that $(\psi_{\epsilon, \lambda_n})_{n\in {\mathbb N}}$ is a decreasing sequence of functions defined on $(0,\kappa^*]$.
Moreover, since $(k, \psi_{\epsilon, \lambda_n}(k))\in \Omega^\uparrow$ for $k\in (0, \kappa^*)$, $\psi_{\epsilon, \lambda_n}$
is bounded from below by the function $U\circ f /\rho$. Hence, there exists a function $\psi_\epsilon$ defined on $(0,\kappa^*]$ such that $\lim_{n\to +\infty} \psi_{\epsilon, \lambda_n}(k)=\psi_\epsilon(k)$ for all $k\in (0,\kappa^*]$.
\\
Since $(\psi_{\epsilon, \lambda_n})_{n\in{\mathbb N}}$ is a sequence of concave functions locally uniformly bounded,
\cite[Theorem 3.3.3]{MR2041617} ensures that the convergence is uniform on every compact set, thus
$\psi_\epsilon$ is continuous on $(0,\kappa^*]$.
Extending ${\mathcal F}^\uparrow(\cdot,\cdot)$ by continuity on the set $\{ (k, U(f(k))/\rho): k\in (0,\kappa^*] \}$,
we may pass to the limit in the integral form of the differential equation satisfied by $\psi_{\epsilon,\lambda_n}$ and get
\begin{displaymath}
\psi_{\epsilon}(k) = \underline{\lambda}_\epsilon +\int_\epsilon^k{\mathcal F}^\uparrow(\kappa,\psi_{\epsilon}(\kappa))d\kappa.
\end{displaymath}
Hence $ \psi_{\epsilon}$ is a solution of \eqref{eq:chap:MFG_model:secHJ:19} on $(0,\kappa^*)$, which implies that $ \psi_{\epsilon}$ is $C^1$ and increasing in $(0,\kappa^*)$.
\\
We are left with proving that $\psi_{\epsilon}(\kappa^* )= U(f(\kappa^*))/\rho$.
It is already known that $\psi_{\epsilon}(\kappa^* )\ge U(f(\kappa^*))/\rho$. Assume by contradiction that $\psi_{\epsilon}(\kappa^* )> U(f(\kappa^*))/\rho$. Then, set
\begin{displaymath}
b=\frac {\psi_{\epsilon}(\kappa^* )+ U(f(\kappa^*))/\rho} 2,
\end{displaymath}
and consider the Cauchy problem on $(0,\kappa^*]$:
\begin{eqnarray*}
\xi'(k) &=& {\mathcal F}^\uparrow (k,\xi(k)),
\\
(k,\xi(k))&\in& \Omega^\uparrow,\\
\xi(\kappa^*) &=& b.
\end{eqnarray*}
It can be proved by contradiction (with the same kind of argument as in the end of the proof of Proposition {\mathbb R}f{prop:secHJ:vlambda})
that the maximal solution of this problem is in fact global, therefore defined on $(0,\kappa^*]$. Cauchy-Lipschitz theorem implies that $\xi(k)< \psi_\epsilon(k)$ for all $k\in (0,\kappa^*]$. Therefore, $\xi(\epsilon)\in \Lambda_\epsilon$ and $\xi(\epsilon)< \psi_\epsilon(\epsilon)=\underline{\lambda}_\epsilon$, which contradicts the definition of $\underline{\lambda}_\epsilon$.\\
Therefore, $\psi_{\epsilon}(\kappa^* )= U(f(\kappa^*))/\rho$. The same arguments as in the proof of Proposition {\mathbb R}f{prop:secHJ:vlambda} yield that
$\psi_{\epsilon}(k )>U(f(k))/\rho$ for all $k\in (0, \kappa^*)$. Hence $\psi_{\epsilon}=\psi_{\epsilon,\underline \lambda_\epsilon }$.
This achieves the proof.
\end{proof}
\begin{proposition} \label{prop:secHJ:ex_up}
Under the same assumptions as in Lemma~{\mathbb R}f{lem:secHJ:philambda},
the Cauchy problem ({\mathbb R}f{eq:chap:MFG_model:secHJ:ODEup1})-({\mathbb R}f{eq:chap:MFG_model:secHJ:ODEup3}) has a unique solution
$u^\uparrow \in C^1( (0,\kappa^*])\cap C^2(0,\kappa^*)$. Moreover $u^\uparrow$ is strictly concave on $(0,\kappa^*)$.
\end{proposition}
\begin{proof}
Existence is a consequence of Proposition {\mathbb R}f{prop:secHJ:psilambda}. Uniqueness is proved exactly with the same arguments as in the proof of Proposition {\mathbb R}f{prop:secHJ:ex_down}.
\end{proof}
\begin{remark}\label{rem:4-3}
From Remark {\mathbb R}f{rem:4-2}, it is possible that $\lim_{k\to 0} \frac{du^\uparrow}{dk}(k)=+\infty$ and that $
\lim_{k\to 0} u^\uparrow(k)=-\infty$.
\end{remark}
\subsubsection{ End of the proof of Theorem~{\mathbb R}f{th:secHJ:main} in the particular case where
$\delta=0$}
\label{sec:proof-theor-refth:s}
$\;$
\paragraph{Existence}
With $u^\uparrow$ and $u^\downarrow$ as in Propositions {\mathbb R}f{prop:secHJ:ex_up} and {\mathbb R}f{prop:secHJ:ex_down},
define
\begin{equation} \label{eq:chap:MFG_model:secHJ:28}
u(k) = \begin{cases}
&u^\uparrow(k),\quad\quad \hbox{if } k\in (0,\kappa^*],\\
&u^\downarrow(k), \quad\quad \hbox{if } k\in[\kappa^*,+\infty).
\end{cases}
\end{equation}
The properties of $u^\uparrow$ and $u^\downarrow$ ensure that
$u$ is of class $C^1$, increasing and strictly concave in $(0,+\infty)$, and $C^2$ in
$(0,\kappa^*)\cup (\kappa^*, +\infty)$. In particular,
$ u^\uparrow(\kappa^*)= u^\downarrow(\kappa^*)=\frac{1}{\rho} U(f(\kappa^*))$ and
$ \frac {du^\uparrow}{dk} (\kappa^*)=\frac { du^\downarrow}{dk}(\kappa^*)=U'(f(\kappa^*))$.
Moreover,
\begin{displaymath}
H_q\left(k,u'(k)\right) \begin{cases}
&>0,\quad\quad \hbox{if } k\in (0,\kappa^*),\\
&<0, \quad\quad \hbox{if } k\in (\kappa^*,+\infty),\\
&= 0 \quad\quad \hbox{ if } k= \kappa^*.
\end{cases}
\end{displaymath}
Hence, $u$ satisfies ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:HJ})-({\mathbb R}f{eq:chap:MFG_model:secHJ:3}).
\paragraph{Uniqueness and characterization by ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:valueFunction})}
Let us now prove that if \\
$u\in C^1(0,+\infty)\cap C^2( (0,\kappa^*) \cup (\kappa^* , +\infty)) $ satisfies ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:HJ})-({\mathbb R}f{eq:chap:MFG_model:secHJ:3}), then it is the
value function of problem ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:2}). This will yield the uniqueness of a classical solution of ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:HJ})-({\mathbb R}f{eq:chap:MFG_model:secHJ:3}) as well as the characterization of the value function of ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:2}).
\\
Let us set $\chi(\cdot)= c^* (u'(\cdot)) =f(\cdot) -H_q(\cdot, u'(\cdot))$.
Assumptions {\mathbb R}f{ass:secHJ:1}, {\mathbb R}f{ass:secHJ:3}, and
Lemma {\mathbb R}f{lem:secFP:1} below imply that $k\mapsto H_q(\cdot, u'(\cdot))$ is locally Lipschitz continuous on $(0,+\infty)$.
This property and ({\mathbb R}f{eq:chap:MFG_model:secHJ:2})-({\mathbb R}f{eq:chap:MFG_model:secHJ:3}) imply that for any $k_0\in (0,+\infty)$,
there is a unique solution $k$ of the Cauchy problem
\begin{equation*}
\begin{split}
\frac{dk}{dt}(t) &= f\left(k(t)\right)- \chi\left(k(t)\right) ,\quad t>0\\
k(0)&=k_0,
\end{split}
\end{equation*}
It is an admissible trajectory for problem ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:2}). Therefore $u$ is not greater than
the value function of the optimal control problem ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:valueFunction}).
\\
On the other hand, consider $c \in L^1_{{\rm loc}}( {\mathbb R}_+; {\mathbb R}_+)$, $\ell\in L^1_{{\rm loc}}({\mathbb R}_+; {\mathbb R}_+^d)$, $k\in W^{1,1}_{\rm{loc}} ( {\mathbb R}_+) $,
such that
\begin{equation*}
\begin{split}
\frac{dk}{dt}(t) = F(k(t),\ell(t))-w\cdot \ell(t) -\delta k(t)-c(t), \quad \hbox{ for a.a. }t>0,
\\
k(0)= k_0,
\\
k(t)\geq 0, \quad \hbox{ for a.a. }t>0.
\end{split}
\end{equation*}
Observe that for almost every $t\geq 0$,
\begin{equation*}
\begin{split}
&\sup_{\bar{c}\geq 0,\bar{l}\geq 0}\left\{U(\bar{c})+u'(k(t))\left( F(k(t),\bar{l})-w\bar{l}-\delta k(t)- \bar{c}\right)\right\}
\\
\ge & U(c(t))+u'(k(t))\left( F(k(t),\ell(t))-w\ell(t)-\delta k(t)- c(t)\right) \\ =& U(c(t))+u'(k(t)) \frac{dk}{dt}(t).
\end{split}
\end{equation*}
The left hand side coincides with $H\left(k(t), u'(k(t))\right)= \rho u (k (t))$. Hence,
$U(c(t) ) \le - u'(k(t)) \frac{dk}{dt}(t) + \rho u (k (t))$.
This implies that $\int_0^\infty U(c(t)) e^{-\rho t} dt \le u(k_0)$. Hence, $u$ is
not smaller than the value function of problem ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:valueFunction}).
\\
We have proved that if $u\in C^1(0,+\infty)$ satisfies ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:HJ})-({\mathbb R}f{eq:chap:MFG_model:secHJ:3}), then it is the
value function of problem ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:2}).
\subsection{The case where $\delta>0$}
\label{sec:proof-theor-refth:s-1}
$\;$
\begin{lemma}\label{lem:secHJ:ext-1}
We make Assumption {\mathbb R}f{ass:secHJ:3} and suppose furthermore that $\delta>0$.
Then there exits a unique $k_0\in(0,+\infty)$ such that
\begin{equation}
\label{eq:6}
f(k_0) = 0.
\end{equation}
The function $f$ takes positive values on $(0,k_0)$ and negative values on $(k_0,+\infty)$. Moreover,
$f'(k_0)<0$ and $\kappa^*<k_0$, where $\kappa^*$ is the unique positive number such that $f'(\kappa^*)=\rho$, see ({\mathbb R}f{eq:1}).
\end{lemma}
\begin{proof}
Since the proof is elementary, we skip it for brevity.
\end{proof}
\begin{proof}[ Proof of Theorem {\mathbb R}f{th:secHJ:main} when $\delta>0$]
Lemma {\mathbb R}f{lem:secHJ:ext-1} implies that in the interval $(0,k_0)$
which contains $\kappa^*$ and where $f$ is positive, it is possible to repeat the construction
done in paragraphs {\mathbb R}f{sec:cauchy-probl-refeq:c} and {\mathbb R}f{sec:cauchy-probl-refeq:c2}. New arguments will be needed to construct the solution in $[k_0,+\infty)$.
{\bf Step 1.} In $(0,k_0)$, it is possible to repeat the construction made in paragraphs {\mathbb R}f{sec:cauchy-probl-refeq:c} and {\mathbb R}f{sec:cauchy-probl-refeq:c2}:
there exists a unique classical solution $u_1\in C^1(0, k_0)$ of the following problem:
\begin{eqnarray}
\label{eq:chap:MFG_model:secHJ:ext-2}
-\rho u_1(k) + H\left(k,u_1'(k)\right)&=&0, \quad \hbox{for } 0<k<k_0,\\
\label{eq:chap:MFG_model:secHJ:ext-3} H_q\left(k,u_1'(k)\right)&>& 0,\quad \hbox{for } 0<k< \kappa^*,\\
\label{eq:chap:MFG_model:secHJ:ext-4} H_q\left(k,u_1'(k)\right)&<& 0,\quad \hbox{for } \kappa^* < k <k_0.
\end{eqnarray}
The function $u_1$ is strictly concave and increasing in $(0,k_0)$.\\
Since f is continuous and concave and $\lim_{k\to 0} f'(k)=+\infty$, $f'(k_0)<0$, there exists $\bar{k}\in(\kappa^* ,k_0)$ such that $ f(\bar{k}) = \max_{k\in[0,k_0]}f(k)$. Since $u_1(\cdot)$ is increasing, $ \lim_{k\to k_0} u_1(k_0)\ge u_1(\bar k)$. On the other hand, $ \rho u_1(\bar k)>U (f(\bar k))$ (see paragraph {\mathbb R}f{sec:cauchy-probl-refeq:c}). Since $U$ is increasing, $U (f((\bar k))> \lim_{k\to k_0} U(f(k))= \lim_{c\to 0} U(c)$ (which may be $-\infty$). Therefore,
\begin{equation*}
\rho\lim_{k\to k_0} u_1(k_0)> \lim_{c\to 0} U(c).
\end{equation*}
With the same kind of arguments as in the proof of Proposition {\mathbb R}f{prop:secHJ:vlambda},
it can also be proved that $\rho u_1(k_0)< \lim_{c\to +\infty} U(c)$.
This implies that $u_1(\cdot)$ can be extended by continuity to $(0,k_0]$ and that
\begin{equation}
\label{eq:7}
\lim_{c\to 0} U(c)< \rho u_1(k_0)< \lim_{c\to \infty} U(c) .
\end{equation}
The function $u'_1(\cdot)$ can then be extended by continuity to $k=k_0$ and \eqref{eq:chap:MFG_model:secHJ:ext-2} holds up to $k=k_0$.
{\bf Step 2.} We are left with constructing the solution in $(k_0,+\infty)$.\\
Observe first that,
for any $k\ge k_0$, $q\mapsto H(k,q)$ is decreasing from \eqref{eq:chap:MFG_model:secHJ:8}, and that
\begin{enumerate}
\item $\lim_{q\to 0} H(k,q)= \lim_{c\to +\infty} U(c)$
\item Since $\lim_{q\to +\infty} c^* (q)= 0$ and $U(c)-cq +f(k)q \le U(c)$, we deduce that
\begin{displaymath}
\lim_{q\to +\infty} H(k,q) \le \lim_{c\to 0} U(c).
\end{displaymath}
\end{enumerate}
Hence, for any $k\ge k_0$, $q\mapsto H(k,q)$ maps $(0,+\infty)$
onto the interval \\ $ \left(\lim_{c\to 0} U(c), \lim_{c\to +\infty} U(c)\right)$
and has a right inverse $z\mapsto {\mathcal F}(k,z)$: \\
for any $z\in \left(\lim_{c\to 0} U(c), \lim_{c\to +\infty} U(c)\right)$, there is a unique ${\mathcal F}(k,z)>0$ such that $H(k, {\mathcal F}(k,z)) =z$.
Let $\varepsilon>0$ be small enough so that $ \rho (u_1(k_0)-\varepsilon) > \lim_{c\to 0} U(c) $, see ({\mathbb R}f{eq:7}). Set
\begin{equation}
\Omega=\left\{(k,v) \;: \; k_0 \leq k \text{ and } \rho (u_1(k_0)-\varepsilon) <\rho v<\lim_{c\to +\infty} U(c) \right\}.
\end{equation}
Note that $(k_0,u_1(k_0))\in\Omega$.
It is possible to prove that ${\mathcal F}(\cdot,\cdot)$ is of class $C^1$ on $\Omega$.
Furthermore, it can be seen that $v\mapsto {\mathcal F}(k,v)$ is Lipschitz continuous on $[u_1(k_0)-\varepsilon,\lim_{c\to \infty }U(c)/\rho]$
with a Lipschitz constant which does not depend on $k\in[k_0,+\infty)$.
Consider the Cauchy problem
\begin{eqnarray}\label{eq:chap:MFG_model:secHJ:ext-5}
u_2'(k) &=& {\mathcal F} (k,u_2(k)),\quad \quad\quad \hbox{for } k\ge k_0,\\
\label{eq:chap:MFG_model:secHJ:ext-6}
(k,u_2(k))&\in& \Omega,\\
\label{eq:chap:MFG_model:secHJ:ext-7}
u_2(k_0) &=& u_1(k_0).
\end{eqnarray}
From Cauchy-Lipchitz theorem, there is a unique maximal solution of \eqref{eq:chap:MFG_model:secHJ:ext-5}-\eqref{eq:chap:MFG_model:secHJ:ext-7}.
The same arguments as in the proof of Proposition {\mathbb R}f{prop:secHJ:vlambda} yield that the solution is indeed global,
i.e. defined on $[k_0,+\infty)$, increasing and strictly concave.
{\bf Step 3.}
Set
\begin{equation*}
u(k) = \begin{cases}
u_1(k),\quad \quad \text{if }k\in(0,k_0],\\
u_2(k),\quad \quad \text{if }k\in [k_0,+\infty).
\end{cases}
\end{equation*}
From what precedes, $u\in C^1(0,+\infty)$, and $\rho u(k) = H(k,u'(k))$ for any $k\in(0,+\infty)$.
Note that $u$ is also $C^2$ in $(0,\kappa^*)\cup (\kappa^*,+\infty)$.
Hence, $u$ is a classical solution of ({\mathbb R}f{eq:chap:MFG_model:secGeneralities:HJ})-\eqref{eq:chap:MFG_model:secHJ:3}.
The remaining part of the proof (uniqueness and verification result) is exactly as in paragraph~{\mathbb R}f{sec:proof-theor-refth:s}.
\end{proof}
\section{The distribution of capital}\label{sec:secGeneralities:fokk-planck-equat}
We still assume that $w$, the prices of the production factors, is a fixed vector in $(0,+\infty)^d$; we keep omitting $w$ everywhere.
The optimal investment policy of a firm with capital $k$ is $H_q(k, u'(k))$, where $u$ is the solution of \eqref{eq:chap:MFG_model:secGeneralities:HJ}-\eqref{eq:chap:MFG_model:secHJ:3}. We are interested in finding a weak solution $m$ of the following problem:
\begin{eqnarray}
\label{eq:13}
\frac{d}{d k}\left(m H_q\left(\cdot, \frac{du}{d k}(\cdot) \right)\right) = \eta(\cdot, u (\cdot)) - {{I\!\!Z}}u m(\cdot),
\\
\label{eq:14}
{{I\!\!Z}}u \int_{{\mathbb R}_+} m(k) dk= \int_{{\mathbb R}_+} \eta(k, u(k)) dk.
\end{eqnarray}
From \eqref{eq:chap:MFG_model:secHJ:2}-\eqref{eq:chap:MFG_model:secHJ:3}, we see that if
\eqref{eq:13} holds, then the optimal investment strategy has the effect of pushing $m$ toward $\kappa ^ *$.
It is therefore important to understand whether $m$ has a singularity at $k=\kappa ^ *$. For that,
the following lemma gives information on the behavior of $u$ near $\kappa ^ *$:
\begin{lemma}
\label{lem:secFP:1}
Under Assumptions {\mathbb R}f{ass:secHJ:1} and {\mathbb R}f{ass:secHJ:3}, there exist $\epsilon>0$ and $M>0$ such that
\begin{eqnarray}
\label{eq:chap:MFG_model:FP1}
0\leq &H_q(\kappa,u'(k)) \leq &M(\kappa^*-k), \quad \quad \hbox{ if } \quad k\in [ \kappa^*-\epsilon, \kappa^*],\\
\label{eq:chap:MFG_model:FP2}
M(\kappa^*-k)\leq &H_q (k,u'(k)) \leq &0, \quad \quad \quad \quad \quad \quad \hbox{ if } \quad k \in [\kappa^*, \kappa^* +\epsilon].
\end{eqnarray}
\end{lemma}
\begin{proof}
We focus on the proof of \eqref{eq:chap:MFG_model:FP1}, since the proof of \eqref{eq:chap:MFG_model:FP2} is completely similar.
\\
Since $u\in C^1(0,+\infty)$, and $u$ is $C^2$ in $(0,\kappa ^*)\cup (\kappa^*,+\infty)$,
it is possible to take the derivative of \eqref{eq:chap:MFG_model:secGeneralities:HJ} at $\kappa{{I\!\!Z}}ot= \kappa^ *$:
\begin{equation}
\label{eq:chap:MFG_model:FP3}
\rho u'(\kappa) - H_k(\kappa,u'(\kappa)) = H_q(\kappa,u'(\kappa))u''(\kappa).
\end{equation}
Let us set
\begin{equation}
\label{eq:21}
\chi (\kappa)= c^* (u'(\kappa)).
\end{equation}
Note that $\chi(\kappa^*)=f(\kappa^*)$.
The function $\chi$ is positive, continuous and increasing in $(0,+\infty)$, and $C^1$ on $(0,\kappa^*)\cup (\kappa^*,+\infty)$. Recall that
\begin{displaymath}
H_k(\kappa,u'(\kappa)) = f'(\kappa)u'(\kappa) , \quad u'(\kappa)=U'(\chi(\kappa)), \quad \hbox{ and } \quad H_q(\kappa,u'(\kappa)) = f(\kappa) - \chi(\kappa).
\end{displaymath}
Then \eqref{eq:chap:MFG_model:FP3} can be written as follows:
\begin{equation}
\label{eq:chap:MFG_model:FP4}
U'(\chi(\kappa))(\rho - f'(\kappa)) = (f(\kappa) - \chi(\kappa))U''(\chi(\kappa)) \chi'(\kappa).
\end{equation}
The inequality on the left hand side of \eqref{eq:chap:MFG_model:FP1} is already known since $f(k) - \chi(k)>0$ for $k <\kappa^*$.
We are left with proving the other inequality for $k$ sufficiently close to $\kappa^*$. \\
We first claim that there exist
$\epsilon>0$ and $C_2>0$ such that for every $k \in [\kappa^* -\epsilon, \kappa^*]$,
\begin{equation}
\label{eq:15} \chi(\kappa^*)-\chi(k)= f(\kappa^*)-\chi(k) \leq C_2(\kappa^*-k).
\end{equation}
\paragraph{Proof of \eqref{eq:15}}
For $0< \epsilon$ small enough, dividing \eqref{eq:chap:MFG_model:FP4} by $U''(\chi(\kappa))$ and integrating between $ k $ and $\kappa^*$ yields
\begin{equation}
\label{eq:chap:MFG_model:int_euler}
\begin{split}
& \int_k^{\kappa^*}\frac{U'(\chi(\kappa))}{U''(\chi(\kappa))}
(\rho - f'(\kappa))d\kappa+
\int_k^{\kappa^*}(f(\kappa^*)-f(\kappa))\chi'(\kappa)d\kappa \\
=& \int_k^{\kappa^*} (\chi(\kappa^*)-\chi(\kappa))\chi'(\kappa)d\kappa= \frac{1}{2}(\chi(\kappa^*)-\chi(k))^2.
\end{split}
\end{equation}
Let us deal with the first integral in the left hand side of \eqref{eq:chap:MFG_model:int_euler}.
Since $f\in W^{2,\infty}_{\rm loc}$, there exists $\epsilon_0>0$ and $C_0>0$ such that for all $k\in [\kappa^*-\epsilon_0, \kappa^*]$,
\begin{displaymath}
\rho - f'(\kappa)= f'(\kappa^*)- f'(\kappa)=\int^{\kappa^*}_{\kappa}f''(z)dz \geq -C_0(\kappa^*-\kappa),
\end{displaymath}
thus
\begin{equation}
\label{eq:chap:MFG_model:FP5}
\int_k^{\kappa^*}\frac{U'(\chi(\kappa))}{U''(\chi(\kappa))}(\rho - f'(\kappa))d\kappa \leq -C_0\int_k^{\kappa^*}\frac{U'(\chi(\kappa))}{U''(\chi(\kappa))}(\kappa^*-\kappa)d\kappa
\end{equation}
Since $U'(\chi(\kappa))/ U''(\chi(\kappa))$ admits a negative limit as $\kappa\to \kappa^*$, there exists $C_1>0$ such that for all
$k\in [\kappa^*-\epsilon_0, \kappa^*]$,
\begin{equation}\label{eq:100000}
\int_k^{\kappa^*}\frac{U'(\chi(\kappa))}{U''(\chi(\kappa))}(\rho - f'(\kappa))d\kappa\leq C_1(\kappa^*-k)^2.
\end{equation}
Next, integrating by part the second integral in \eqref{eq:chap:MFG_model:int_euler} yields
\begin{equation}
\label{eq:chap:MFG_model:FP6}
\begin{split}
\int_k^{\kappa^*}(f(\kappa^*)-f(\kappa))\chi'(\kappa)d\kappa
&= \int_k^{\kappa^*}f'(\kappa) (\chi(\kappa)-\chi(k)) d\kappa\\
&= (\chi(\kappa^*)-\chi(k)) \int_k^{\kappa^*}f'(\kappa) \frac {\chi(\kappa)-\chi(k)}
{ \chi(\kappa^*)-\chi(k)}
d\kappa.
\end{split}
\end{equation}
Setting $J(k) = \int_k^{\kappa^*}f'(\kappa)\frac{\chi(\kappa)-\chi(k)}{\chi(\kappa^*)-\chi(k)}d\kappa$, and using that
both $f$ and $\chi$ are increasing, we obtain
\begin{displaymath}
0\leq J(k)\leq f(\kappa^*)-f(k).
\end{displaymath}
Hence, there exists $\epsilon_1>0$ and $M_1>0$ and such that if
\begin{equation}
\label{eq:chap:MFG_model:FP8}
0\leq J(k) \le M_1 ( \kappa^*-k), \quad\quad \hbox{ for all } k\in [ \kappa^*-\epsilon_1, \kappa^*].
\end{equation}
From \eqref{eq:chap:MFG_model:int_euler},
\eqref{eq:100000}
and \eqref{eq:chap:MFG_model:FP6}, one deduces
that for $\epsilon\le \min(\epsilon_0,\epsilon_1) $,
\begin{equation}
\label{eq:chap:MFG_model:proof_lem1_1}
(\chi(\kappa^*)-\chi(k))^2 \leq
2C_1(\kappa^*-k)^2+ 2(\chi(\kappa^*)-\chi(k))J(k) .
\end{equation}
Elementary algebra yields that for all $k\in [\kappa^*-\epsilon, \kappa^*]$,
\begin{equation}
\label{eq:chap:MFG_model:FP9}
\begin{split}
0\le \chi(\kappa^*)-\chi(k) & \le J(k) + \Bigl( J^2(k)+ 2C_1 (\kappa^*-k)^2 \Bigr)^\frac{1}{2} \\
& \le \Bigl(M_1 + \left( M_1^2 + 2C_1 \Bigr)^\frac{1}{2}\right) (\kappa^*-k),
\end{split}
\end{equation}
where the last inequality is a consequence of \eqref{eq:chap:MFG_model:FP8}.
The bound in \eqref{eq:15} is proved.
Finally, the definition of $\kappa^*$ in \eqref{eq:1}
implies that $f(k)-\chi(\kappa^*)=f(k)-f(\kappa^*) = -\rho(\kappa^*-k)+o(\kappa^*-k)$.
Therefore, from \eqref{eq:15}, there exists $\epsilon>0$ and $M>0$ such that for all $k\in [\kappa^*-\epsilon, \kappa^*]$,
\begin{displaymath}
0\le H_q(k,u'(k)) = f(k) - \chi(k) \leq M(\kappa^*-k),
\end{displaymath}
which achieves the proof of \eqref{eq:chap:MFG_model:FP1}.
\end{proof}
\begin{remark}
Note that under the additional assumption that $f$ is locally uniformly concave,
(i.e. for every compact set $K\subset (0,+\infty)$, there exists $\theta>0$ such that $
f''(k)\leq -\theta$ for all $k\in K$),
it can be checked with a similar argument to the one in the proof of Lemma {\mathbb R}f{lem:secFP:1} that there exists $\epsilon >0$ and $M_1>0$
such that for every $k\in[\kappa^*-\epsilon,\kappa^*+\epsilon]$,
\begin{equation}
\label{eq:chap:MFG_model:OS_estimate}
|H_q(k,u'(k))|\geq M_1|\kappa^*-k|.
\end{equation}
Consider $k{{I\!\!Z}}eq \kappa^*$ such that $|k-\kappa^*|\leq \epsilon$; by differentiating \eqref{eq:chap:MFG_model:secGeneralities:HJ} at $k$, we obtain
\begin{displaymath}
u''(k) = \frac{u'(k)\left(\rho - f'(k)\right)}{H_q(k,u'(k))}.
\end{displaymath}
Using estimate \eqref{eq:chap:MFG_model:OS_estimate} and the regularity of $f$, we deduce that there exists a constant $M_2>0$ independent of $k$
taken in $[\kappa^*-\epsilon,\kappa^*+\epsilon]$ such that
\begin{displaymath}
|u''(k)| \leq M_2 u'(k).
\end{displaymath}
This shows that $u''\in L^\infty(\kappa^*-\epsilon,\kappa^*+\epsilon)$. Finally, $u\in W^{2,\infty}_{\rm loc}(0,+\infty)$.
\end{remark}
\begin{proof}[Proof of Proposition {\mathbb R}f{prop:FP_1}]
For brevity, we use the notation $b(k)= H_q(k,u'(k))$. If $m$ satisfies ({\mathbb R}f{eq:chap:MFG_model:FP}) in the sense of distributions
and ({\mathbb R}f{eq:12}), then the weak derivative of $bm$ is $\eta(\cdot, u(\cdot))-{{I\!\!Z}}u m$,
a bounded measure from ({\mathbb R}f{eq:12}) and Assumption {\mathbb R}f{ass:secFP:1}. Hence $bm \in {\rm BV}_{\rm loc}(0,+\infty)$. On the other hand,
$1/b\in C^1((0,\kappa^*)\cup(\kappa^*,+\infty))$. Therefore, the restriction of $m$ to $(0,\kappa^*)\cup(\kappa^*,+\infty)$
can be written $(bm)/b$ and identified with a function in ${\rm BV}_{\rm loc}(
(0,\kappa^*)\cup(\kappa^*,+\infty))$. The Lebesgue decomposition of $m$ is $m=m_{ac}+m_s$;
the singular part $m_s$ is supported in $\{\kappa^*\}$, hence $m_s=\lambda \delta_{\kappa*}$ with $\lambda\ge 0$;
the regular part $m_{ac}$ can be identified with a nonnegative function in $L^1(0,+\infty)$. \\
We claim that $\lambda=0$. To prove this fact, consider a family $(\varphi_\varepsilon)_{\varepsilon>0}$
such that
\begin{itemize}
\item $\varphi_\varepsilon \in C^\infty_c(0,+\infty)$
\item ${\rm supp} (\varphi_\varepsilon) \subset [\kappa^*- \varepsilon,\kappa^*+\varepsilon]$
\item $\varphi_\varepsilon(\kappa^*)=1$
\item $\varphi_\varepsilon$ is non decreasing on $[0,\kappa^*]$, and non increasing in $[\kappa^*,+\infty)$
\item $\|{\varphi_\varepsilon'}\|_\infty\leq 2/\varepsilon$
\end{itemize}
We deduce from ({\mathbb R}f{eq:chap:MFG_model:FP})-({\mathbb R}f{eq:12}) that for $\varepsilon$ small enough,
\begin{displaymath}
-\int_{{\mathbb R}_+} \varphi_\varepsilon'(k)b(k)dm(k) =
-{{I\!\!Z}}u\int_{{\mathbb R}_+} \varphi_\varepsilon(k) dm(k) + \int_{{\mathbb R}_+} \varphi_\varepsilon(k) \eta(k, u(k)) dk.
\end{displaymath}
For $\varepsilon\in(0, \kappa^*/2)$, this leads to
\begin{equation*}
\begin{split}
&-\int_{\kappa^* - \varepsilon} ^ {\kappa^* + \varepsilon} \varphi_\varepsilon'(k)b(k)m_{ac}(k) dk \\=&
-{{I\!\!Z}}u \int_{\kappa^* - \varepsilon} ^ {\kappa^* + \varepsilon} \varphi_\varepsilon(k) m_{ac}(k) dk
+ \int_{\kappa^* - \varepsilon} ^ {\kappa^* + \varepsilon} \varphi_\varepsilon(k) \eta(k, u(k)) dk -{{I\!\!Z}}u \lambda ,
\end{split}
\end{equation*}
because $b(\kappa^*)=0$.
The construction of $ \varphi_\varepsilon$ and ({\mathbb R}f{eq:chap:MFG_model:FP1})-({\mathbb R}f{eq:chap:MFG_model:FP2}) ensure that
\begin{displaymath}
\sup_{k\in [\kappa^*-\varepsilon,\kappa^*+\varepsilon]} |\varphi'_\varepsilon(k) b(k)|\leq 2M.
\end{displaymath}
This yields
\begin{displaymath}
0\le {{I\!\!Z}}u \lambda\leq 2M\int_{k^*-\varepsilon}^{k^*+\varepsilon} m_{ac}(k)dk+
\int_{\kappa^* - \varepsilon} ^ {\kappa^* + \varepsilon} \varphi_\varepsilon(k) \eta(k, u(k)) dk.
\end{displaymath}
Letting $\varepsilon\to 0$, we obtain that $\lambda=0$ by applying Lebesgue dominated convergence theorem. The claim is proved.
\\
Therefore, $m\in L^1(0,+\infty)$, and ({\mathbb R}f{eq:chap:MFG_model:FP1}) implies that $bm\in W^{1,1}_{\rm loc}(0,+\infty)$,
and that
$0\le m\in L^1(0,+\infty)\cap C^1((0,\kappa^*)\cup (\kappa^*,+\infty))$.
\\
Integrating equation \eqref{eq:chap:MFG_model:FP} over the intervals $(0,\kappa^*)$ and
$(\kappa^*,+\infty)$, we see that
\begin{equation*}
\begin{split}
& b(k)m(k)=\\
&\left\{ \begin{array}[c]{rl}
\displaystyle \int_0^{k}\eta(\kappa, u(\kappa))
\exp\left(-\displaystyle \int_\kappa^k\frac{{{I\!\!Z}}u}{ b(z)}dz\right)d\kappa + A \exp\left(-\displaystyle \int_{\frac {\kappa^*} 2}^k\frac{{{I\!\!Z}}u}{ b(z)}dz\right) ,&\text{ if } 0<k <\kappa^*,\\
\displaystyle -\int_k^{\infty}\eta(\kappa, u(\kappa))\exp
\left(\displaystyle \int_k^\kappa\frac{{{I\!\!Z}}u}{ b(z)}dz\right)d\kappa +
B
\exp\left(\displaystyle \int_k^{\frac {3\kappa^*} 2} \frac{{{I\!\!Z}}u}{ b(z)}dz \right) , &\text{ if } k>\kappa ^*,
\end{array}\right.
\end{split}
\end{equation*}
for two real numbers $A$ and $B$. But, from Lemma {\mathbb R}f{lem:secFP:1}, we see that a necessary condition for the integrability of $m$ is that $A=B=0$.
Imposing $A=B=0$, we see that $m$ is a nonnegative function.
It remains to check \eqref{eq:12}.
Set $I_1=\int_0^{\kappa^*}m(k)dk$ and $I_2=\int_{\kappa^*}^{+\infty}m(k)dk$.\\
Focusing on $I_1$,
\begin{equation}
\label{eq:chap:MFG_model:FP11}
\begin{array}[c]{rcl} I_1 &=& \displaystyle \int_0^{\kappa^*}\frac{1}{b(k)}
\int_0^k\eta(\kappa, u(\kappa))\exp\left(-\int^k_\kappa\frac{{{I\!\!Z}}u}{b(z)}dz\right) d\kappa dk,\\
&=& \displaystyle \int_0^{\kappa^*}\eta(\kappa, , u(\kappa))\int_\kappa^{\kappa^*}\frac{1}{b(k)}\exp\left(-\int^k_\kappa\frac{{{I\!\!Z}}u}{b(z)}dz\right)dkd\kappa, \\
&=&\displaystyle \frac{1}{{{I\!\!Z}}u}\int_0^{\kappa^*}\eta(\kappa, u(\kappa)) d\kappa.
\end{array}
\end{equation}
The second line in \eqref{eq:chap:MFG_model:FP11} is obtained using the non negativity of the integrand and Tonelli's theorem.
The third line in \eqref{eq:chap:MFG_model:FP11} comes from the fact that
$ \int_\kappa^{\kappa^*}\frac{{{I\!\!Z}}u}{b(z)}dz = +\infty$,
which is a consequence of Lemma {\mathbb R}f{lem:secFP:1}.
\\
It can be proved in the same way that
\begin{displaymath}
I_2 = \frac{1}{{{I\!\!Z}}u}\int_{\kappa^*}^{+\infty}\eta(\kappa, u(\kappa)) d\kappa.
\end{displaymath}
Hence ${{I\!\!Z}}u (I_1+I_2)= \int_{{\mathbb R}_+}\eta(\kappa, u(\kappa)) d\kappa $,
and $m$ given by ({\mathbb R}f{eq:chap:MFG_model:FP10}) is the unique solution of
\eqref{eq:chap:MFG_model:FP}-\eqref{eq:12}.
\end{proof}
\section{Equilibrium}
\label{sec:equilibrium}
This paragraph is devoted to existence of equilibria.
\subsection{Stability results for \eqref{eq:chap:MFG_model:secGeneralities:HJ}-\eqref{eq:chap:MFG_model:secHJ:3}}
\label{sec:equil1}
\begin{lemma}
\label{lem2}
Under Assumptions {\mathbb R}f{ass:secHJ:1} and {\mathbb R}f{ass:secHJ:3}, the value function $-u$ is monotone with respect to $w$, i.e. for every $w,\tilde w \in(0,+\infty)^d$,
\begin{displaymath}
w \le \tilde w \quad \quad {\mathbb R}ightarrow \quad \quad u(\cdot,w)\geq u(\cdot,\tilde w).
\end{displaymath}
\end{lemma}
\begin{proof}
Assume $w \le \tilde w$ and consider an admissible trajectory associated with the vector of prices $\tilde w$: it satisfies
$\frac{dk}{dt}(t)=f(k(t),\tilde w) -c(t)$ with $k(0)=k_0$. The differential equation also reads:
$\frac{dk}{dt}(t)=f(k(t), w) -( c(t) +f(k(t), w)- f(k(t),\tilde w)) $, and $ c(t) +f(k(t), w)- f(k(t),\tilde w)\ge c(t)\ge 0$, which can be used as a control. This yields that
\begin{displaymath}
u(k_0,w)\ge \int_0^\infty U\left( c(t) +f(k(t), w)- f(k(t),\tilde w)\right) e^{-\rho t} dt \ge \int_0^\infty U( c(t) ) e^{-\rho t} dt .
\end{displaymath}
Taking the supremum on all admissible trajectories associated with $\tilde w$, we deduce that for all $k_0>0$, $u(k_0,w)\ge u(k_0,\tilde w)$.
\end{proof}
\begin{lemma}
\label{lem:equilibrium:continuity_k*}
Under Assumption {\mathbb R}f{ass:secHJ:3}, the map $(0,+\infty)^d {{I\!\!Z}}i w\mapsto \kappa^*(w)\in (0,+\infty)$ defined in \eqref{eq:1}
is continuous.
\end{lemma}
\begin{proof}
Consider a sequence $(w_n)_{n\in {\mathbb N}}$, $w_n\in (0,+\infty)^d$, such that $w_n$ tends to $w\in (0,+\infty)^d$ as $n\to +\infty$.\\
We first claim that $\kappa^*(w_n)$ remains in a compact subset of $(0,+\infty)$. We proceed by contradiction:
\begin{itemize}
\item Assume first that up to the extraction of a subsequence,
$\kappa^*(w_n)$ tends to $+\infty$ as $n\to +\infty$. Hence, for any $k>0$, there exists $N>0$ such that if $n\ge N$, then
$\frac {\partial f}{\partial k} (k, w_n)> \rho$. Passing to the limit using the $C^1$ regularity of $f$ (see Assumption {\mathbb R}f{ass:secHJ:3}),
we get that $\frac {\partial f}{\partial k} (k, w)\ge \rho$ for all $k>0$. But $k\mapsto f(k,w)$ is strictly concave: Hence, $\frac {\partial f}{\partial k} (k, w)> \rho$ for all $k>0$.
This contradicts point 2.iii in Assumption {\mathbb R}f{ass:secHJ:3} (see also Remark {\mathbb R}f{rem:1}).
\item Assume that up to the extraction of a subsequence, $\kappa^*(w_n)$ tends to $0$ as $n\to +\infty$: arguing as above, this implies that
$\frac {\partial f}{\partial k} (k, w)< \rho$ for all $k>0$. This contradicts point 2.ii in Assumption {\mathbb R}f{ass:secHJ:3}.
\end{itemize}
The claim is proved.\\
Possibly after the extraction of a subsequence, $\kappa^*(w_n)$ tends to a positive limit $\tilde \kappa$. It is easy to deduce from Assumption {\mathbb R}f{ass:secHJ:3}
that $\frac {\partial f}{\partial k} (\tilde \kappa, w)=\rho$. Therefore $\tilde \kappa=\kappa^* (w)$, and the uniqueness of the cluster point implies that the whole sequence
$\kappa^*(w_n)$ tends to $\kappa^*(w)$. This achieves the proof.
\end{proof}
\begin{lemma}[Continuity of $w\mapsto u(\cdot,w)$]
\label{lem:equilibrium:stability}
Let $(w_n)_{n\in{\mathbb N}}$, $w_n\in (0,+\infty)^d$, be a sequence converging to $w\in(0,+\infty)^d$ as $n\to \infty$.
Then, under Assumptions {\mathbb R}f{ass:secHJ:1} and {\mathbb R}f{ass:secHJ:3},
\begin{displaymath}
u(\cdot,w_n)\rightarrow u(\cdot, w)
\end{displaymath}
in $C^1(K)$ for every compact subset $K$ of $(0,+\infty)$.
\end{lemma}
\begin{proof}
We may assume without loss of generality that there exist two vectors $\underline{w},\overline{w}\in (0,+\infty)^d$ such that, for all $n\ge 0$,
\begin{displaymath}
\underline{w} \le w_n \le \overline{w}.
\end{displaymath}
From Lemma {\mathbb R}f{lem2}, the following inequalities hold for all $n\ge 0$:
\begin{displaymath}
u(\cdot,\overline{w})\leq u(\cdot,w_n)\leq u(\cdot,\underline{w}).
\end{displaymath}
Using \eqref{eq:chap:MFG_model:secGeneralities:HJ} and the coercivity of $q\mapsto H(k,q,w_n)$ uniform w.r.t. $n$ and
$k\in K$, where $K$ is a compact subset of $ (0,+\infty)$,
we see that $\frac{\partial u}{\partial k}(k,w_n)$ is bounded uniformly in $n$ and $k\in K$. Moreover, if
$\lim_{c\to +\infty} U(c)=+\infty$, then $\frac{\partial u}{\partial k}(\cdot,w_n)$ is bounded uniformly away from $0$ w.r.t. $n$ and $k\in K$.
\\
Since $(u(\cdot,w_n))_{n\in{\mathbb N}}$ is a sequence of concave functions uniformly bounded on every compact subset of $(0,+\infty)$,
there exists a continuous and concave function $v:(0,+\infty)\rightarrow {\mathbb R}$ such that, after the extraction of a subsequence,
\begin{itemize}
\item $u(\cdot,w_n)\rightarrow v$ locally uniformly in $(0,+\infty)$
\item $\frac{\partial u}{\partial k}(\cdot,w_n)\rightarrow v'$ almost everywhere in $(0,+\infty)$.
\end{itemize}
On the other hand, from Lemma {\mathbb R}f{lem:equilibrium:continuity_k*}, there exist $\underline \kappa>0$ and $\overline \kappa> \underline \kappa$ such that
\begin{displaymath}
\underline \kappa <\min_{\underline{w}\leq w \leq \overline{w}}\kappa ^*(w)\leq \max_{\underline{w}\leq w \leq
\overline{w}}\kappa^*(w)< \overline \kappa.
\end{displaymath}
Take any compact interval $[a,b]$ such that $0<a< \underline \kappa $ and $ \overline \kappa<b$.
The functions $u(\cdot,w_n)$ are uniformly Lipschitz viscosity solutions (with $\frac{\partial u}{\partial k}(\cdot,w_n)$ bounded away from $0$ if $\lim_{c\to +\infty} U(c)=+\infty$) of
\eqref{eq:chap:MFG_model:secGeneralities:HJ} (with $w=w_n$) on $( a , b)$ with state constrained boundary conditions at $ a $ and $b$.
From the continuity of $H$ on $[ a ,b]\times(0,+\infty)^d\times (0,+\infty)^d$, the uniform bounds on $\frac{\partial u}{\partial k}(\cdot,w_n)$ stated above and
the uniform convergence of $(u(\cdot,w_n))_{n\in{\mathbb N}}$ towards $v$ on $[ a ,b ]$,
stability results on viscosity solutions, see e.g. \cite{MR1484411} can be used and yield that
$v$ is a viscosity solution of
\begin{displaymath}
\rho v(k) = H(k,v'(k), w),
\end{displaymath}
on $( a,b)$, with state constrained boundary conditions at $k= a$ and $k=b$.
Note that the eventuality that $H(k,q,w)\to+\infty$ as $q\to 0$ does not imply any difficulty, because in this case, $\frac{\partial u}{\partial k}(\cdot,w_n)$ is uniformly bounded away from $0$. From this observation, we can also use well-known results on the uniqueness of state constrained solutions of the Hamilton-Jacobi equation, see e.g. \cite{MR1484411}, and find that $v=u(\cdot,w)$.
In fact, the convergence holds locally in $C^1$. We know that
\begin{itemize}
\item $u(\cdot,w_n)$ tends to $ u(\cdot,w)$ uniformly in $[a,b]$
\item there exists a measurable subset $E$ of $[a,b]$, such that the Lebesgue measure of $[a,b] \setminus E$ is zero and that
$\frac{\partial u}{\partial k} (\cdot,w_n)$ tends to $ \frac{\partial u}{\partial k} (\cdot,w)$ pointwise in $E$.
\end{itemize}
Note that after slightly modifying $a$ or $b$ if necessary, we can always assume that $a\in E$ and $b\in E$. A variant of Dini's first theorem yields that the convergence
of $ \frac{\partial u}{\partial k} (\cdot,w_n)$ is in fact uniform in $[a,b]$: for completeness, the proof is given in what follows.\\
The function $ \frac{\partial u}{\partial k}(\cdot,w)$ is continuous, thus uniformly continuous on $[a,b]$; hence, given $\epsilon>0$,
it is possible to choose $\delta>0$ small enough such that
\begin{displaymath}
|k-k'|\le \delta\quad {\mathbb R}ightarrow \quad \left| \frac{\partial u}{\partial k}(k,w)- \frac{\partial u}{\partial k}(k',w)\right|<\frac{\varepsilon}{2},\quad\quad \forall k,k'\in[a,b].
\end{displaymath}
For such a choice of $\delta>0$, it is possible to define a finite
subdivision $(\sigma_i)_{i\in\{0,\dots,I\}}$ of $[a,b]$ such that
\begin{itemize}
\item for every $i\in \{ 0,\dots,I \}$, $\sigma_i\in E$.
\item for any $i\in\{0,\dots,I-1\}$, $0<\sigma_{i+1}-\sigma_i<\delta$.
\end{itemize}
On the other hand, for any $k\in [a,b]$, there exists $i_0\in\{0,\dots I-1\}$ such that $\sigma_{i_0}\leq k\leq \sigma_{i_0+1}$. Then the concavity of $u$ with respect to $k$ yields
\begin{align*}
\frac{\partial u}{\partial k}(k,w_n) - \frac{\partial u}{\partial k}(k,w)&\leq \frac{\partial u}{\partial k}(\sigma_{i_0},w_n) - \frac{\partial u}{\partial k}(\sigma_{i_0+1},w)\\
&=\frac{\partial u}{\partial k}(\sigma_{i_0},w_n) - \frac{\partial u}{\partial k}(\sigma_{i_0},w) + \frac{\partial u}{\partial k}(\sigma_{i_0},w) - \frac{\partial u}{\partial k}(\sigma_{i_0+1},w).
\end{align*}
Taking $N\in {\mathbb N}$ large enough such that for every $n\geq N$,
\begin{displaymath}
\max_{0\le i\le I }\left| \frac{\partial u}{\partial k}(\sigma_{i},w_n) - \frac{\partial u}{\partial k}u(\sigma_{i},w)\right| <\frac{\varepsilon}{2}
\end{displaymath}
yields that
\begin{displaymath}
\frac{\partial u}{\partial k}(k,w_n) - \frac{\partial u}{\partial k}u(k,w) < \varepsilon,\quad \forall n\geq N.
\end{displaymath}
A similar argument can be used to bound $ \frac{\partial u}{\partial k}(k,w_n) - \frac{\partial u}{\partial k}(k,w)$ from below. Finally, for any $ \varepsilon>0$ there exists
$N>0$ such that
\begin{displaymath}
\sup_{k\in[a,b]}\left| \frac{\partial u}{\partial k}(k,w_n) - \frac{\partial u}{\partial k}(k,w)\right|< \varepsilon,\quad\quad \forall n\geq N.
\end{displaymath}
This achieves the proof.
\end{proof}
\subsection{Existence of equilibria}\label{sec:equil2}
\begin{proof} [Proof of Theorem {\mathbb R}f{th_ex_equil}]
Recall that $\Phi$ and $g$ are respectively defined in Assumption {\mathbb R}f{ass:chap:MFG_model:S} and formula \eqref{eq:supply0}. Let $\epsilon$ be the constant appearing in Assumption
{\mathbb R}f{ass:chap:MFG_model:technical_assump}. There exist two constants $0<\underline{\kappa}\le \overline \kappa<+\infty$ such that for all $w \in [\epsilon, 1/\epsilon]^d$, $\underline \kappa < \kappa^* (w)< \overline \kappa$. Hence,
$m(\cdot, w)$ is supported in the compact inteval $ J= \hbox{conv} \left([
\underline \kappa, \overline \kappa]\cap \hbox{support}(\hat \eta) \right)$.
We claim that the map $w\mapsto m(\cdot, w)$ is continuous from $[\epsilon, 1/\epsilon]^d$ to the set of probability measures supported in $J$.
Indeed, let $(w_n)_{n\in{\mathbb N}}$, $w_n\in [\epsilon, 1/\epsilon]^d $, be a sequence converging to $w$ as $n\to +\infty$. From Lemma {\mathbb R}f{lem:equilibrium:stability}, $ u(\cdot,w_n)\to u(\cdot, w)$ in $C^1(K)$ for any compact subset $K$ of $(0,+\infty)$. The probability measures $m(\cdot, w_n) $ are all supported in $J$.
Hence, the sequence $m(\cdot, w_n) $ has a cluster point $\mu$ in the weak $*$ topology. Let us prove that $\mu=m(\cdot, w)$: for any test function $\phi(\cdot)\in C^\infty_c(0,+\infty)$,
\begin{displaymath}
-\int_0^{+\infty}\phi'(k)b(k,w_n)m(k,w_n)dk = \int_0^{+\infty}\phi(k)\eta(k)dk - {{I\!\!Z}}u \int_0^{+\infty}\varphi(k)m(k,w_n)dk.
\end{displaymath}
where $b$ is given by \eqref{eq:b}.\\
The right-hand side converges to $\displaystyle
\int_0^{+\infty}\phi(k)\eta(k)dk -{{I\!\!Z}}u \int_0^{+\infty}\phi(k)\mu(k)dk$.
On the other hand, the $C^1$ convergence of $u(\cdot,w_n)$ to $u(\cdot,w)$ on every compact subset of $(0,+\infty)$ implies the uniform convergence of $ H_q(\cdot,\frac{\partial u}{\partial k}\left(\cdot,w_n),w_n\right)$ to $H_q\left(\cdot,\frac{\partial u}{\partial k}(\cdot,w),w\right)$ in $J$.
We deduce that
\begin{displaymath}
\int_0^{\infty}\phi'(k)H_q\left(k,\frac{\partial u}{\partial k}(k,w_n),w_n\right)m(k,w_n)dk \to \int_0^{\infty}\phi'(k)H_q\left(k,\frac{\partial u}{\partial k}(k,w),w\right)\mu(k)dk.
\end{displaymath}
Therefore $\mu = m(\cdot,w)$ and the whole sequence $ m(\cdot,w_n)$ weakly $*$ converges to $m(\cdot,w)$ as $n\to \infty$. The map $w\mapsto m(\cdot,w)$ is continuous on $[\epsilon, 1/\epsilon]^d$.
For $\lambda\in [0,1]$, we then consider the map $T_\lambda$ defined on $[\epsilon, 1/\epsilon]^d$ by
\begin{equation}
\label{eq:10001}
T_\lambda(w)=\argmin\left\{ \Phi(\cdot) + \int_0^\infty g(k, \cdot) \Bigl( (1-\lambda) d\hat \eta (k) + \lambda dm(k,w) \Bigr) \right\},
\end{equation}
where the function $g$ has been defined in \eqref{eq:supply0}, (recall that $k\mapsto g(k)$ is convex).
From the observation made above on $m(\cdot, w)$ and from Assumption {\mathbb R}f{ass:chap:MFG_model:S},
the function to be minimized is continuous, strictly convex and coercive on $[0,+\infty)^d$; hence $T_\lambda(w)$ is well defined. Moreover, $\|T_\lambda(w)\|_\infty$ is bounded uniformly in $w\in [\epsilon, 1/\epsilon]^d$.
\\
Let $w_n$ and $\lambda_n$ be two sequences taking their values respectively in $ [\epsilon, 1/\epsilon]^d$ and
in $[0,1]$; assume that $w_n$ tends to $w$ and that $\lambda_n$ tends to $\lambda$. The sequence $T_{\lambda_n } (w_n)$ takes its values in a compact; hence, up to the extraction of a subsequence, we may assume that $T_{\lambda_n } (w_n)$ converges to some $\tilde w$. Since
$m(\cdot, w_n)$ weakly $*$ converges to $m(\cdot, w)$, it is easy to check that $\tilde w=T_\lambda(w)$ and that the whole sequence $ T_{\lambda_n } (w_n)$ converges. Hence, the map $(\lambda, w)\mapsto T_\lambda(w)$ is continuous.
For $\lambda\in [0,1]$, we consider the equation: find $w\in [\epsilon, 1/\epsilon]^d$ such that $w-T_\lambda(w)=0$, which we write $\chi(w,\lambda)=0$. We now aim at applying Brouwer degree theory to $\chi$.
First, setting $t_0= \argmin\left\{ \Phi(\cdot) + \int_0^\infty g(k, \cdot)d\hat \eta (k)\right\}$ which does not depend on $w$, the equation $\chi(w,0)=0$ writes $w=t_0\in (\epsilon, 1/\epsilon)^d$. Therefore,
\begin{equation}
\label{eq:10002}
\hbox{deg}\left(\chi(\cdot, 0), (\epsilon, 1/\epsilon)^d, 0_{{\mathbb R}^d}\right)=1.
\end{equation}
Second, for all $\lambda \in [0,1]$, we know from Assumption {\mathbb R}f{ass:chap:MFG_model:technical_assump} that the equation $w-T_\lambda(w)=0$ has no solution on the boundary of $[\epsilon, 1/\epsilon]^d$.
From the two observations above, we see that for all $\lambda\in [0,1]$,
\begin{equation}
\label{eq:10003}
\hbox{deg}\left(\chi(\cdot, \lambda), (\epsilon, 1/\epsilon)^d, 0_{{\mathbb R}^d}\right)=1.
\end{equation}
We deduce that there exists $w^*\in (\epsilon, 1/\epsilon)^d$ such that
\begin{displaymath}
w^*= \argmin\left\{ \Phi(\cdot) + \int_0^\infty g(k, \cdot) dm(k,w^*) \right\}.
\end{displaymath}
Writing the first order necessary optimality conditions associated with this minimization problem, we see that $w^*$ satisfies \eqref{eq:clearing_condition}.
\end{proof}
\begin{remark}
We have actually proved more than the existence of an equilibrium, namely that $\hbox{deg}(\chi, (\epsilon, 1/\epsilon)^d, 0)=1$.
\end{remark}
\subsection{Assumption {\mathbb R}f{ass:chap:MFG_model:technical_assump} holds in the examples of Subsection {\mathbb R}f{sec:import-exampl-util}}
\label{sec:assumption_holds}
\subsubsection{The Cobb-Douglas production function} \label{sec:assump_cobb_douglas}
\begin{proposition}
Assumption {\mathbb R}f{ass:chap:MFG_model:technical_assump} holds with
the Cobb-Douglas production function described in Subsection {\mathbb R}f{sec:import-exampl-util} .
\end{proposition}
\begin{proof}
From \eqref{eq:chap:examples:net_output_CobbDouglas}, we deduce that for two positive constants $c_1$ and $c_2$
\begin{equation}
\label{eq:10006}
g(k,w) = c_1 k^{\frac \alpha {1-|\beta|}} G_\beta (w)\quad \hbox{and}\quad \kappa^*(w)=c_2 \left(G_\beta (w)\right) ^{\frac {1-|\beta|} {1-\alpha-|\beta|}},
\end{equation}
where
\begin{displaymath}
G_\beta (w)= \prod_{i=1}^d w_i^{-\frac {\beta_i} {1-|\beta|}}.
\end{displaymath}
Setting
\begin{displaymath}
M_\lambda(w)= \left( \lambda
c_1 \int_0^\infty k^{\frac \alpha {1-|\beta|}} dm(k,w) + (1-\lambda) M_0\right) \quad \quad \hbox{with} \quad
M_0 = c_1\int_0^\infty k^{\frac \alpha {1-|\beta|}} d\hat \eta(k),
\end{displaymath}
\eqref{eq:10004} becomes
\begin{equation}
\label{eq:10008}
\Phi(w)+ M_\lambda (w) G_\beta(w) \le \Phi({\mathds{1}})+M_\lambda(w) .
\end{equation}
Since $\Phi(w)\ge 0$, \eqref{eq:10008} implies that $G_\beta(w) \le 1+ \Phi({\mathds{1}})/ M_\lambda(w)$.
On the other hand, \eqref{eq:10006} yields
\begin{equation}
\label{eq:10005}
M_\lambda(w)\ge c_1 \lambda \min \left( \underline a, c_2 \left(G_\beta (w)\right) ^{\frac {1-|\beta|} {1-\alpha-|\beta|}}\right) ^\frac \alpha {1-|\beta|} +(1-\lambda) M_0 ,
\end{equation}
where $\underline a$ is the minimal value in the support of $\hat \eta$. Combining the latter two estimates yields
\begin{equation}
\label{eq:10007}
G_\beta(w) \le 1+ \frac { \Phi({\mathds{1}}) } {
c_1 \lambda \min \left( \underline a, c_2 \left(G_\beta (w)\right) ^{\frac {1-|\beta|} {1-\alpha-|\beta|}}\right) ^\frac \alpha {1-|\beta|} +(1-\lambda)M_0 }.
\end{equation}
It is easy to deduce from \eqref{eq:10007} that $ G_\beta(w) < c_3 $, for a positive constant $c_3$ independent of $w$.
\\
If $\overline a$ is the maximal value in the support of $\hat \eta$, this implies that
\begin{equation}
\label{eq:10010}
\begin{split}
M_\lambda(w)& \le c_1 \lambda \max \left( \overline a, c_2 \left(G_\beta (w)\right) ^{\frac {1-|\beta|} {1-\alpha-|\beta|}}\right) ^\frac \alpha {1-|\beta|} +(1-\lambda) M_0 \\
&\le c_1 \lambda\max \left( \overline a, c_2 c_3 ^{\frac {1-|\beta|} {1-\alpha-|\beta|}}\right) ^\frac \alpha {1-|\beta|} +(1-\lambda) M_0 \\
& =c_4,
\end{split}
\end{equation}
where $c_4$ is a positive constant.
We deduce from this and \eqref{eq:10008} that
\begin{equation}
\label{eq:10009}
\Phi(w)\le \Phi({\mathds{1}})+M_\lambda(w) \le \Phi({\mathds{1}}) +c_4.
\end{equation}
From the coercivity of $\Phi$, this yields that $\max_{i} w_i < c_5$, for a positive constant $c_5$.
Then $ G_\beta(w) < c_3 $ implies that $\min_{i} w_i>\epsilon$, where $\epsilon$ is a positive constant which
can be obtained from the exponents $\beta_i$ and the constants $c_3$ and $c_5$.
Finally, taking a smaller value of $\epsilon$ if necessary, we get \eqref{eq:10012}.
\end{proof}
\subsubsection{Constant elasticity of substitution} \label{sec:assump_ces}
\begin{proposition}
Assumption {\mathbb R}f{ass:chap:MFG_model:technical_assump} holds with
the example of the production function with the constant elasticity of substitution
described in Subsection {\mathbb R}f{sec:import-exampl-util} .
\end{proposition}
\begin{proof}
Combining \eqref{eq:lambda_CES} and \eqref{eq:target_capital_CES} implies that
\begin{equation*}
\gamma = \frac {\delta+\rho} \alpha \left((\kappa^* (w))^\alpha + \sum_{j = 1}^d\left(\frac{\lambda\beta_j}{w_j}\right)^\frac{\beta_j}{1-\beta_j}\right)^{1-\gamma} (\kappa^*(w))^{1-\alpha}.
\end{equation*}
Since $\gamma\in (0,1)$, this yields
\begin{equation}
\label{eq:10015}
\frac {\delta+\rho} \alpha (\kappa^*(w))^{1-\alpha +\alpha (1-\gamma)}\le \gamma.
\end{equation}
Hence $\kappa^*(w) $ is bounded from above by a positive constant independent of $w$.
From this information and the coercivity of $\Phi$, we proceed as for the Cobb-Douglas function and see that
there exists a positive constant $c_1$ such that \eqref{eq:10004} implies that $\|w\|_{\infty}< c_1$.
Next, we claim that
\begin{equation}
\label{eq:10016}
\lim_{
\left\{ \begin{array}[c]{l}
\min_{i=1,\dots, d} w_i \to 0,\\ \|w\|_{\infty} \le c_1
\end{array}\right.
} g(0, w)=+\infty.
\end{equation}
Since $g(\cdot, w)$ is non decreasing, we deduce from \eqref{eq:10016} that there exists a constant $\epsilon>0$ independent of $\lambda$ such that \eqref{eq:10004} implies $\min_{i} w_i >\epsilon$ and taking a smaller value of $\epsilon$ if necessary, we get \eqref{eq:10012}.
We are left with proving \eqref{eq:10016}: we know that
\begin{displaymath}
g(k,w)\ge g(0,w)=\sup_{\ell}\left( \sum_{i} \ell_i^{\beta_i}\right)^\gamma -w\cdot \ell.
\end{displaymath}
A competitor can be chosen by taking $\tilde \ell_i= w_i^{-\frac b {\beta_i}}$ where $b=\min_i \beta_i/2$. Therefore $g(k,w)\ge \left(\sum_i w_i^{-b}\right)^\gamma -\sum_i w_i ^{1-\frac b {\beta_i}}$. The first term tends to $+\infty$ if $\min_i w_i\to 0$, while the second term is bounded since $\|w\|_\infty\le c_1$.
\end{proof}
\paragraph{\bf Acknowledgements}
All the authors were partially supported by the ANR (Agence Nationale de la Recherche) through MFG project ANR-16-CE40-0015-01.
Y.A. acknowledges partial support from the Chair Finance and Sustainable Development and the FiME Lab (Institut Europlace de Finance) . The paper was completed when Y.A spent a semester at INRIA matherials. G.C. acknowledges the support of the Lagrange Mathematics and Computing Research Center.
\end{document}
With $g$ and $\underline \kappa$ respectively defined by \eqref{eq:supply0} and \eqref{eq:borne_inf}, set
\begin{equation}
\label{eq:supply11}
\Xi = \left\{ \mu\in {\mathcal M}_+({\mathbb R}_+)\;:\:\left|
\begin{array}[c]{l}
\hbox{support}(\mu) \hbox{ is a compact contained in } [\underline \kappa, +\infty) ,\\
\frac 1{{{I\!\!Z}}u \hat c} \le \int_{{\mathbb R}_+} \mu(k) dk\le \frac {\hat c} {{I\!\!Z}}u
\\ \int_{{\mathbb R}_+} g(k, {\mathds{1}}) d\mu(k) \le \int_{{\mathbb R}_+} g(k, {\mathds{1}}) m(k, {\mathds{1}}) dk
\end{array}
\right.\right \},
\end{equation}
which contains $m(\cdot,{\mathds{1}})$, see Remark {\mathbb R}f{sec:rk_support}. Consider also the function
\begin{equation}\label{eq:supply1}
J: \Xi\times {\mathbb R}_+^d \to {\mathbb R}_+,\quad \quad (\mu,w)\mapsto \Phi(w)+\int_{{\mathbb R}_+} g(k,w) d\mu(k),
\end{equation}
which continuous and strictly convex w.r.t. $w$ and $C^1$ w.r.t. $w$ on $(0,+\infty)^d$.
\begin{lemma}
There exists $\theta>0$, such that for any $w \in [0,W]^d$, if $\min_{ w_i}<\theta$, then $J(\mu,w)> J(m(\cdot,{\mathds{1}}), {\mathds{1}})$ for all $\mu \in \Xi$.
\end{lemma}
With $\underline \kappa $ defined by \eqref{eq:borne_inf} and $A> \max \left( \max\{k\in \support {\hat \eta}\}, \underline \kappa\right)$,
let us set
\begin{equation}
\label{eq:supply11}
\Xi_A = \left\{ \mu\in {\mathcal M}_+({\mathbb R}_+)\;:\:\left|
\begin{array}[c]{l}
\hbox{support}(\mu) \subset [\underline \kappa, A] ,\\
\frac 1{{{I\!\!Z}}u \hat c} \le \int_{{\mathbb R}_+} \mu(k) dk\le \frac {\hat c} {{I\!\!Z}}u
\end{array}
\right.\right \},
\end{equation}
which is compact with the weak $*$ topology of measures.
Note that $\Xi_A$ contains $\frac 1 {{I\!\!Z}}u \hat \eta$. The constant $A$ will be fixed later.
With $g$ defined in \eqref{eq:supply0}, consider the function
\begin{equation}\label{eq:supply1}
J: \Xi_A\times {\mathbb R}_+^d \to {\mathbb R}_+,\quad \quad (\mu,w)\mapsto \Phi(w)+\int_{{\mathbb R}_+} g(k,w) d\mu(k),
\end{equation}
which is continuous w.r.t. $(\mu, w)$, strictly convex w.r.t. $w$ and $C^1$ w.r.t. $w$ on $(0,+\infty)^d$.
\begin{lemma}
There exists $\theta>0$, such that for any $w \in [0,W]^d$, if $\min_{ w_i}<\theta$, then $J(\mu,w)> J(\mu, {\mathds{1}})$ for all $\mu \in \Xi_A$.
\end{lemma}
\begin{proof}
Observe first that $\sup_{\mu\in \Xi_A} J(\mu,{\mathds{1}})$ is finite from the continuity of $J$ and the compactness of $\Xi_A$. \\
For any $\mu\in \Xi_A$,
\begin{equation*}
\begin{split}
J(\mu,w)\ge \int_{{\mathbb R}_+} g(k,w) d\mu(k)
&= \int_{\underline \kappa} ^{+\infty} g(k,w) d\mu(k)
\\ &\ge g\left(\underline{k}_W,w\right) \int_{\underline \kappa} ^{+\infty} d\mu(k)
\\ &\ge \frac {g\left(\underline{k}_W,w\right)} {{{I\!\!Z}}u \hat c} ,
\end{split}
\end{equation*}
where the first inequality comes from the nonnegativity of $\Phi$, the next identity and the last inequality come from \eqref{eq:supply11};
to obtain the second line, we use the facts that
$\underline{k}_W\le \underline \kappa$ and that
$g$ is nondecreasing w.r.t. $k$ because $F$ is monotone. Assumption {\mathbb R}f{ass:chap:MFG_model:technical_assump} then yields that $ J(\mu,w)$ tends to $+\infty$ uniformly in $\mu \in \Xi_A$ as $\min_{i} w_i \to 0$ while $w$ stays in $(0,W]^d$. Hence, there exists $\theta>0$ such that $\inf_{w\in (0,+\infty)^d} J(\mu,w)= \inf_{w\in [\theta,W]^d} J(\mu,w)$.
\end{proof}
In view of Remark {\mathbb R}f{sec:rk_support}, let us set
\begin{equation}
\label{eq:supply11}
\Xi = \left\{ \mu\in {\mathcal M}_+({\mathbb R}_+)\;:\:\left|
\begin{array}[c]{l}
\hbox{support}(\mu)\subset \hbox{support}(\hat \eta),\\
\frac 1{{{I\!\!Z}}u \hat c} \le \int_{{\mathbb R}_+} \mu(k) dk\le \frac {\hat c} {{I\!\!Z}}u
\end{array}
\right.\right \},
\end{equation}
which is compact with the weak $*$ topology of measures.
With $g$ defined in \eqref{eq:supply0}, consider the function
\begin{equation}\label{eq:supply1}
J: \Xi\times {\mathbb R}_+^d \to {\mathbb R}_+,\quad \quad (\mu,w)\mapsto \Phi(w)+\int_{{\mathbb R}_+} g(k,w) d\mu(k),
\end{equation}
which is continuous w.r.t. $(\mu, w)$,
strictly convex w.r.t. $w$ and $C^1$ w.r.t. $w$ on $(0,+\infty)^d$.
\\
\begin{lemma}\label{sec:lem_equil2}
Given $\mu\in \Xi$, the minimization problem
\begin{equation}
\label{eq:equil1}
\inf_{w\in (0,+\infty)^d} J(\mu,w)
\end{equation}
has a unique solution $ {\mathcal W}(\mu)$ which belongs to a compact and convex subset $\Theta$ of ${\mathbb R}_+^d$, contained in $(0,W]^d$ ($W$ is defined in Assumption {\mathbb R}f{ass:chap:MFG_model:technical_assump}) and independent on $\mu\in \Xi$. Moreover,
\begin{equation}
\label{eq:optim_cond_}
S({\mathcal W}(\mu))=\int_{{\mathbb R}_+} \ell^* (k, {\mathcal W}(\mu)) \mu(k) dk.
\end{equation}
The map $\mu\mapsto {\mathcal W}(\mu)$ is continuous on $\Xi$.
\end{lemma}
\begin{proof}
From the definition of $W$ in Assumption {\mathbb R}f{ass:chap:MFG_model:technical_assump},
\begin{displaymath}
\inf_{w\in (0,+\infty)^d} J(\mu,w)= \inf_{w\in (0,W]^d} J(\mu,w).
\end{displaymath}
On the other hand, for all $\mu\in \Xi$,
\begin{equation*}
\begin{split}
J(\mu,w)\ge \int_{{\mathbb R}_+} g(k,w) d\mu(k)
&= \int_{\underline \kappa} ^{+\infty} g(k,w) d\mu(k)
\\ &\ge g\left(\underline{k}_W,w\right) \int_{\underline \kappa} ^{+\infty} d\mu(k)
\\ &\ge \frac {g\left(\underline{k}_W,w\right)} {{{I\!\!Z}}u \hat c} ,
\end{split}
\end{equation*}
where the first inequality comes from the nonnegativity of $\Phi$, the next identity and the last inequality come from \eqref{eq:supply11};
to obtain the second line, we use the facts that
$\underline{k}_W\le \underline \kappa$ and that
$g$ is nondecreasing w.r.t. $k$ because $F$ is monotone. Assumption {\mathbb R}f{ass:chap:MFG_model:technical_assump} then yields that $ J(\mu,w)$ tends to $+\infty$ uniformly in $\mu \in \Xi$ as $\min_{i} w_i \to 0$ while $w$ stays in $(0,W]^d$. Hence, there exists $\theta>0$ such that $\inf_{w\in (0,+\infty)^d} J(\mu,w)= \inf_{w\in [\theta,W]^d} J(\mu,w)$. Since $J(\mu,\cdot)$ is continuous, there exists a minimizer in $\Theta=[\theta,W]^d$. The
coercivity and strict convexity properties of $J(\mu,\cdot)$ imply that the
minimizer is unique in ${\mathbb R}_+^d$.
We have constructed the map ${\mathcal W}$. Since ${\mathcal W}(\mu)$ lies in the interior of ${\mathbb R}_+^d$, writing the first order necessary optimality conditions lead to \eqref{eq:optim_cond_}. \\
Consider now a sequence $(\mu_n)_{n\in {\mathbb N}}$, $\mu_n\in \Xi$ which converges weakly $*$ to $\mu\in \Xi$. The sequence $({\mathcal W}(\mu_n))_n$ remains in $\Theta$; thus, it has a cluster point $w_\infty\in \Theta$. The continuity of $J$ then yields that $w_\infty$ is a minimizer of $J(\mu,\cdot)$. Hence $w_{\infty}= {\mathcal W}(\mu)$, and the whole sequence converges. The map $\mu\mapsto {\mathcal W}(\mu)$ is continuous in the weak $*$ topology.
\end{proof}
\begin{remark}
\label{ass:chap:MFG_model:Sr2}
Note that the (Fenchel--Rockafellar) dual problem of \eqref{eq:equil1} consists in maximizing with the respect to the supply vector $S$
the aggregate production corresponding to $S$ and $\mu$ net of the social cost $\Phi^*(S)$.
\end{remark}
\begin{proof} [Proof of Theorem {\mathbb R}f{th_ex_equil}]
With the compact and convex set $\Theta$ defined in Lemma {\mathbb R}f{sec:lem_equil2}, the proof consists of applying Brouwer's fixed point theorem to the map $\Psi: \Theta\to \Theta$ constructed as follows:
\begin{enumerate}
\item The vector $w\in \Theta$ is first mapped to $u(\cdot, w)$, the solution of \eqref{eq:chap:MFG_model:secGeneralities:HJ}-\eqref{eq:chap:MFG_model:secHJ:3}, see Theorem {\mathbb R}f{th:secHJ:main}. This map enjoys the continuity property stated in Lemma {\mathbb R}f{lem:equilibrium:stability}.
\item Next, the bounded measure $m(\cdot, w)$ is defined as the unique solution of \eqref{eq:chap:MFG_model:FP}-\eqref{eq:12} given by Proposition {\mathbb R}f{prop:FP_1}. Note that $m(\cdot, w)\in \Xi$, with $\Xi$ defined by \eqref{eq:supply11}, see Remark {\mathbb R}f{sec:rk_support}. We will see below that $w\mapsto m(\cdot, w)$ is continuous from $\Theta$ to $\Xi$ endowed with the weak $*$ topology.
\item Finally, we set $\Psi(w)= {\mathcal W}(m(\cdot, w))\in \Theta$.
\end{enumerate}
Let us prove that $w\mapsto m(\cdot, w)$ is continuous. Let $(w_n)_{n\in{\mathbb N}}$, $w_n\in \Theta$, be a sequence converging to $w\in \Theta$ as $n\to +\infty$. From Lemma {\mathbb R}f{lem:equilibrium:stability}, $ u(\cdot,w_n)\to u(\cdot, w)$ in $C^1(K)$ for any compact subset $K$ of $(0,+\infty)$. The sequence $m(\cdot, w_n) $ remains in $\Xi$. Hence, it has a cluster point $\mu$ in the weak $*$ topology. Let us prove that $\mu=m(\cdot, w)$: for any test function $\varphi(\cdot)\in C^\infty_c(0,+\infty)$,
\begin{displaymath}
-\int_0^{+\infty}\varphi'(k)b(k,w_n)m(k,w_n)dk = \int_0^{+\infty}\varphi(k)\eta(k)dk - {{I\!\!Z}}u \int_0^{+\infty}\varphi(k)m(k,w_n)dk.
\end{displaymath}
where $b$ is given by \eqref{eq:b}.\\
The right-hand side converges to $\displaystyle
\int_0^{+\infty}\varphi(k)\eta(k)dk -{{I\!\!Z}}u \int_0^{+\infty}\varphi(k)\mu(k)dk$.
On the other hand, the $C^1$ convergence of $u(\cdot,w_n)$ to $u(\cdot,w)$ on every compact subset of $(0,+\infty)$ implies the uniform convergence of $ H_q(\cdot,\frac{\partial u}{\partial k}\left(\cdot,w_n),w_n\right)$ to $H_q\left(\cdot,\frac{\partial u}{\partial k}(\cdot,w),w\right)$ on the support of $\hat \eta$. Since $m(k,w_n)$ and $\mu$ belong to $\Xi$, we deduce that
\begin{displaymath}
\int_0^{\infty}\varphi'(k)H_q\left(k,\frac{\partial u}{\partial k}(k,w_n),w_n\right)m(k,w_n)dk \to \int_0^{\infty}\varphi'(k)H_q\left(k,\frac{\partial u}{\partial k}(k,w),w\right)\mu(k)dk.
\end{displaymath}
Therefore $\mu = m(\cdot,w)$ and the whole sequence $ m(\cdot,w_n)$ weakly $*$ converges to $m(\cdot,w)$ as $n\to \infty$. The map $w\mapsto m(\cdot,w)$ is continuous on $\theta$.
\\ Combining this with Lemma {\mathbb R}f{sec:lem_equil2}, we may apply Brouwer's fixed point theorem to $\psi$, which yields the existence of an equilibrium.
\end{proof}
\end{document}
|
\betagin{document}
\title{The exact asymptotics for hitting probability of a remote orthant by a multivariate L\'evy process: the Cram\'er case}
\author{K.~Borovkov$^1$ and Z.~Palmowski$^2$}
\date{}
\varphiootnotetext[1]{School of Mathematics and Statistics, The University of Melbourne, Parkville 3010, Australia. E-mail: [email protected].}
\varphiootnotetext[2]{Department of Applied Mathematics, Wroc{\l}aw University of Science and Technology, 27 Wybrze\.{z}e Wyspia\'nskiego st.,
50-370 Wroc{\l}aw, Poland. E-mail: [email protected].}
aketitle
\betagin{abstract}
For a multivariate L\'evy process satisfying the Cram\'er moment condition and having a drift vector with at least one negative component, we derive the exact asymptotics of the probability of ever hitting the positive orthant that is being translated to infinity along a fixed vector with positive components. This problem is motivated by the multivariate ruin problem introduced in F.~Avram et~al.\ (2008) in the two-dimensional case. Our solution relies on the analysis from Y.~Pan and K.~Borovkov~(2017) for multivariate random walks and an appropriate time discretization.
{\em Key words and phrases:} large deviations, exact asymptotics, multivariate L\'evy process, multivariate ruin problem, Cram\'er moment condition, boundary crossing.
{\em AMS Classifications:} 60F10; 60G51.
\end{abstract}
\section{Introduction}
In this note we consider the following large deviation problem for continuous time processes with independent increments that was motivated by the multivariate simultaneous ruin problem introduced in~\cite{pal1}.
Let $\{\bv{X}(t)\}_{t\ge 0}$ be a $d$-dimensional ($d\ge 2$) right-continuous L\'evy process with $\bv{X}(0)=0$.
One is interested in finding the precise asymptotics for the hitting probability
of the orthant $sG$ as $s\to\infty$, where
$$
G:= \bv{g}+ Q^+
$$
for some fixed
\[
\bv{g}\in Q^+,
\quad
Q^+ := \{
\bv{x}=(x_1, \ldots, x_d) \in
athbb{R}^d: x_j > 0,\ 1 \le j \le d \}.
\]
Clearly, $sG= s\bv{g}+ Q^+,$ which is just the positive orthant translated by~$s\bv{g}.$
We solve this problem under appropriate Cram\'er moment assumptions and further conditions on the process~$\bv{X}$ and vertex~$\bv{g} $ that, roughly speaking, ensure that the ``most likely place" for $\bv{X}$ to hit $sG$ when $s$ is large is in vicinity of the ``corner point"~$s\bv{g}$. More specifically, we show that the precise asymptotics of the hitting probability of $sG$ are given by the following expression: letting
\[
\tau (V) :=\inf\{t\ge 0: \bv{X}(t)\in V \}
\]
be the first hitting time of the set $V\subset \mathbb{R}^d$ by the process~$\bv{X},$ one has
\betagin{equation}
\lambdabel{first}
f{P}\big( \tau(sG) < \infty \big) =
A_0 s^{-(d-1)/2}e^{-sD(G)}(1+o(1))
\quad
box{as}\quad s\to\infty,
\end{equation}
where the ``adjustment coefficient" $D(G)$ is the value of the second rate function (see~\eqref{Dv} below) for the distribution of $\bv{X}(1)$ on the set~$G$ and the constant $A_0 \in (0, \infty)$ can be computed explicitly.
The asymptotics \eqref{first} extend a number of known results.
The main body of literature on the topic of precise asymptotics for boundary crossing large deviation probabilities in the multivariate case concerns the random walk theory, see \cite{paper23,Borbook,mainpaper} and references therein for an overview of the relevant results. The crude logarithmic asymptotics in the multivariate case was also derived independently in~\cite{Coll}.
The entrance probability to a remote set for L\'evy processes was analyzed later, usually under some specific assumptions on the structure of these processes. For example, paper~\cite{pal1} dealt with the two-dimensional reserve process of the form
\betagin{equation}
\lambdabel{CL}
\bv{X}(t)=(X_1(t), X_2(t))=(c_1,c_2)\sum_{i=1}^{N(t)}C_i-(p_1,p_2)t,\quad t\ge 0,
\end{equation}
where $c_i, p_i>0,$ $i=1,2,$ are constants, $\{C_i\}_{i\ge 1}$ is a sequence of i.i.d.\ claim sizes, and $N(t)$ is an independent of the claim sizes Poisson process. That model admits the following interpretation: the process $\bv{X}$ describes the dynamics of the reserves $X_i(t),$ $i=1,2,$ of two insurance companies that divide between them both claims and premia in some pre-specified proportions. In that case, $
f{P}\big( \tau(sG) < \infty \big)$ corresponds to the simultaneous ruin probability of the two companies. The main result of the present paper generalizes the assertion of Theorem~5 of~\cite{pal1} to the case of general L\'evy processes. One may also wish to mention here the relevant papers~\cite{BD,PalPist}.
\section{The main result}
To state the main result, we will need some notations. For brevity, denote by $\bv{\xi}$ a random vector such that
\betagin{equation}\lambdabel{defxi}\bv{\xi}\deltaq\bv{X}(1).
\end{equation}
Our first condition on~$\bv{X}$ is stated as follows.
allskip
[$\textbf{C}_1$]~{\em The distribution of $\mbox{\boldmath$\xi$}$ is non-lattice and there is no hyperplane $H = \{ \mbox{\boldmath$x$} : \lambdag \bv{a}, \mbox{\boldmath$x$} \rangle = c \} \subset \mathbb{R}^d$ such that ${\bf P} (\xi\in H) = 1.$}
allskip
That condition can clearly be re-stated in terms of the covariance matrix of the Brownian component and spectral measure of $\bv{X}$, although such re-statement will not make it more compact nor transparent.
Next denote by
\betagin{equation}\lambdabel{cumulant}
K(\mbox{\boldmath$\lambda$}cktrianglerighta):=\ln {\bf E} e^{ \lambdangle \mbox{\scriptsize\boldmath$\lambda$} , \sbv{\xi}\rangle}, \quad \bv{\lambdambda} \in \mathbb{R}^d,
\end{equation}
the cumulant function of~$\bv{\xi}$ and let
\betagin{equation*}
\Theta_{\psi} := \{ \bv{\lambdambda} \in \mathbb{R}^d: K(\bv{\lambdambda}) < \infty \}
\end{equation*}
be the set on which the moment generating function of $\bv{\xi}$ is finite. We will need the following Cram\'{e}r moment condition on $\bv{X}$:
allskip
[$\textbf{C}_2$]~\textit{$\Theta_{\psi}$ contains a non-empty open set}.
allskip
The first rate function $\Lambda(\bv{\alphapha})$ for the random vector $\bv{\xi}$ is defined as the Legendre transform of the cumulant function~$K:$
\betagin{equation}
\lambdabel{rf1}
\Lambda(\bv{\alphapha}) := \sup_{\sbv{\lambdambda} \in \Theta_{\psi}} (\lambdangle \bv{\alphapha},\bv{\lambdambda} \rangle - K(\bv{\lambdambda})), \quad \bv{\alphapha} \in \mathbb{R}^d.
\end{equation}
The probabilistic interpretation of the first rate function is given by the following relation (see e.g.~\cite{mainpaper}): for any $\mbox{\boldmath$\alpha$} \in \mathbb{R}^d$,
\betagin{equation}
\lambdabel{rf2}
\Lambda(\mbox{\boldmath$\alpha$}) = -\lim_{\varepsilon \to 0}\lim_{n \to \infty}
\varphirac{1}{n}\ln
f{P}\biggl(\varphirac{\bv{X}(n)}{n} \in U_{\varepsilon}(\mbox{\boldmath$\alpha$}) \biggr),
\end{equation}
where $U_{\varepsilon}(\mbox{\boldmath$\alpha$})$ is the $\varepsilon$-neighborhood of $\mbox{\boldmath$\alpha$}$.
Accordingly, for a set $B \subset \mathbb{R}^d$, any point $\mbox{\boldmath$\alpha$} \in B$ such that
\betagin{equation}
\lambdabel{MPPdef}
\Lambda(\mbox{\boldmath$\alpha$}) =\Lambda (B):= \inf_{\sbv{v} \in B}\Lambda(\bv{v})
\end{equation}
is called a most probable point (MPP) of the set $B$ (cf.\ relation~(11) in~\cite{PaBo}).
If such a point $\mbox{\boldmath$\alpha$}$ is unique for a given set~$B$, we denote it by
\betagin{equation}
\lambdabel{MPP}
\mbox{\boldmath$\alpha$}[B] := \argminA_{\sbv{v} \in B}\Lambda(\bv{v}).
\end{equation}
Now recall the definition of the second rate function $D$ that was introduced and studied in~\cite{paper23}: letting $ D_u(\bv{v}):=u\Lambda( \bv{v}/u) $ for $\bv{v} \in \mathbb{R}^d,$ one sets
\betagin{equation}
\lambdabel{Dv}
D(\bv{v}) := \inf_{u > 0} D_u(\bv{v}), \quad \bv{v}\in \mathbb{R}^d, \qquad
D(B):= \inf_{\sbv{v}\in B} D(\bv{v}), \quad B\subset \mathbb{R}^d
\end{equation}
(see also~\cite{PaBo}). Further, we put
\betagin{equation}
\lambdabel{mpt}
r_B := \argminA_{r > 0} D_{1/r}(B).
\end{equation}
Recall the probabilistic meaning of the function~$D$ and the value $r_B$.
While the first rate function $\Lambda$ specifies the main term in the asymptotics of the probabilities for the random walk values $\bv{X}(n)$ to be inside ``remote sets" (roughly speaking, $\Lambda (B)$ equals the RHS of~\eqref{rf2} with the neighbourhood of $\mbox{\boldmath$\alpha$}$ in it replaced with~$B$), the second rate function $D$ does that for the probabilities of {\em ever hitting\/} ``remote sets" by the whole random walk trajectory $\{\bv{X}(n)\}_{n\ge 0},$ the meaning of $r_B$ being that $1/r_B$ gives (after appropriate scaling) the ``most probable time" for the walk to hit the respective remote set. For more detail, we refer the interested reader to~\cite{paper23, PaBo}.
Define the Cram\'{e}r range $\Omegaega_{\Lambda}$ for $\bv{\xi}$ as follows:
\betagin{equation*}
\Omegaega_{\Lambda}
:= \big\{ \mbox{\boldmath$\alpha$} ={\rm grad}\, K (\mbox{\boldmath$\lambda$}cktrianglerighta) : \mbox{\boldmath$\lambda$}cktrianglerighta \in {\rm int}(\Theta_{\psi}) \big\},
\end{equation*}
where the cumulant function $K(\mbox{\boldmath$\lambda$}cktrianglerighta)$ of $\bv{\xi}$ was defined in~\eqref{cumulant} and $ {\rm int}(B)$ stands for the interior of the set~$B$. In words, the set $\Omegaega_{\Lambda}$ consists of all the vectors that can be obtained as the expectations of the Cram\'{e}r transforms of the law of $\bv{\xi}$, i.e.\ the distributions of the form $ e^{ \lambdangle \mbox{\scriptsize\boldmath$\lambda$}, \mbox{\scriptsize\boldmath$x$}\rangle- K(\mbox{\scriptsize\boldmath$\lambda$} )}{\bf P} (\bv{\xi}\in d\bv{x})$, for parameter values $\mbox{\boldmath$\lambda$}cktrianglerighta \in {\rm int}(\Theta_{\psi})$.
For $\mbox{\boldmath$\alpha$} \in \mathbb{R}^d$, denote by $\mbox{\boldmath$\lambda$}cktrianglerighta(\mbox{\boldmath$\alpha$})$ the vector $\mbox{\boldmath$\lambda$}cktrianglerighta$ at which the upper bound in \eqref{rf1} is attained (when such a vector exists, in which case it is always unique):
\betagin{equation*}
\Lambda(\bv{\alphapha})
=\lambdangle \bv{\alphapha},\bv{\lambdambda}(\mbox{\boldmath$\alpha$})\rangle - K(\bv{\lambdambda}(\mbox{\boldmath$\alpha$})).
\end{equation*}
For $r>0,$ assuming that $\mbox{\boldmath$\alpha$}[rG] \in \Omegaega_{\Lambda},$ introduce the vector
\betagin{equation}
\lambdabel{normaln}
\bv{N}(r) := {\rm grad}\, \Lambda(\mbox{\boldmath$\alpha$})\big|_{\mbox{\scriptsize\boldmath$\alpha$} = \mbox{\scriptsize\boldmath$\alpha$}[rG]}= \mbox{\boldmath$\lambda$}cktrianglerighta(\mbox{\boldmath$\alpha$}[rG]),
\end{equation}
which is a normal to the level surface of $\Lambda$ at the point $\mbox{\boldmath$\alpha$}[rG]$
(see e.g.\ (22) in~\cite{PaBo}).
The last condition that we will need to state our main result depends on the parameter $r > 0$ and is formulated as follows:
allskip
[\textbf{C}$_3(r)$]~\textit{One has }
\betagin{equation*}
\Lambda(rG) = \Lambda(r\bv{g}), \quad r\bv{g} \in \Omegaega_{\Lambda}, \quad \bv{N}(r) \in Q^+, \quad \lambdag
f{E} \mbox{\boldmath$\xi$}, \bv{N}(r) \rangle < 0.
\end{equation*}
The first part of condition [\textbf{C}$_3(r)$] means that the vertex $r\bv{g}$ is an MPP for the set~$rG$. Note that under the second part of the condition, this MPP $r\bv{g}$ for $rG$ is unique (e.g., by Lemma~1 in~\cite{PaBo}). Since $ \bv{N}(r)$ always belongs to the closure of $Q^+$, the third part of condition [\textbf{C}$_3(r)$] just excludes the case when the normal $ \bv{N}(r)$ to the level surface of~$\Lambda$ at the point~$r\bv{g}$ belongs to the boundary of the set $rG$.
\betagin{theorem*}
Let conditions {\rm [\textbf{C}$_1$], [\textbf{C}$_2$]} and {\rm [\textbf{C}$_3(r_G)$]} be met. Then the asymptotic relation~\eqref{first} holds true,
where $D(G)$ is the value of the second rate function~\eqref{Dv} on $G$ and the constant $A_0 \in (0, \infty)$ can be computed explicitly.
\end{theorem*}
The value of the constant $A_0 \in (0, \infty)$ is given by the limit as $\deltalta\to 0$ of the expressions given by formula~(68) in~\cite{PaBo} for the distribution of~$\bv{\xi}\deltaq \bv{X}(\deltalta)$. When proving the theorem below, we demonstrate that that limit does exist and is finite and positive.
\betagin{proof}
For a $\deltalta >0,$ consider the embedded random walk $\{\bv{X}(n\deltalta)\}_{n\in \mathbb{N}}$ and, for a set $V\subset\mathbb{R}^d$, denote the first time that random walk hits that set~$V$ by
\[
\eta_\deltalta (V) :=\inf\{n\in \mathbb{N}: \bv{X}(n\deltalta)\in V\}.
\]
First observe that, on the one hand, for any $\deltalta>0,$ one clearly has
\betagin{equation}
\lambdabel{>s}
{\bf P} (\tau (sG)<\infty)\ge {\bf P} (\eta_\deltalta (sG)<\infty) .
\end{equation}
On the other hand, assuming without loss of generality that $
in_{1\le j\le d}g_j\ge 1$ and setting $I(s):= (\tau(sG), \tau(sG)+\deltalta]\subset \mathbb{R}$ on the event $\{\tau (sG)<\infty\},$ we have, for any $\varepsilon>0,$
\betagin{align}
{\bf P} (\eta_\deltalta ((s-\varepsilon)G)<\infty)
& \ge
{\bf P} \mathcal{B}gl(\tau (sG)<\infty,
\sup_{ t \in I(s)} \|\bv{X} (t) - \bv{X}(\tau (sG))\|\le \varepsilon\mathcal{B}gr)
\notag
\\
& = {\bf P} (\tau (sG)<\infty)
{\bf P} \mathcal{B}gl(\sup_{ t \in I(s)}\|\bv{X} (t) - \bv{X}(\tau (sG))\|\le \varepsilon\mathcal{B}gr)
\notag
\\
& = {\bf P} (\tau (sG)<\infty)
{\bf P} \mathcal{B}gl(\sup_{ t \in (0,\deltalta]}\|\bv{X} (t) \|\le \varepsilon\mathcal{B}gr),
\lambdabel{rf3}
\end{align}
where the last two relations follow from the strong Markov property and homogeneity of~$\bv{X}$.
Now take an arbitrary small $\varepsilon>0$. As the process $\bv{X}$ is right-continuous, there exists a $\deltalta (\varepsilon)>0$ such that
\[
{\bf P} \mathcal{B}gl(\sup_{ t \in (0,\deltalta(\varepsilon)]}\|\bv{X} (t) \|\le \varepsilon\mathcal{B}gr)>(1+\varepsilon)^{-1},
\]
which, together with~\eqref{rf3}, yields for all $\deltalta\in (0,\deltalta(\varepsilon)]$ the inequality
\betagin{equation}
\lambdabel{<s}
{\bf P} (\tau (sG)<\infty)\le (1+\varepsilon) {\bf P} (\eta_\deltalta ((s-\varepsilon)G)<\infty).
\end{equation}
The precise asymptotics of the probability on the RHS of~\eqref{>s} were obtained in~\cite{PaBo}. It is given in terms of the second rate function $D^{[\deltalta]}$ for the distribution of the jumps $\bv{X}(n\deltalta)-\bv{X}((n-1)\deltalta)\deltaq\bv{X}(\deltalta)$ in the random walk $\{\bv{X}(n\deltalta)\}_{n\ge 0}$. Recalling the well-known fact that the cumulant of $\bv{X}(\deltalta)$ is given by $\deltalta K$, we see that the first rate function $ \Lambda^{[\deltalta]}$ for $\bv{X}(\deltalta)$ equals
\betagin{align*}
\Lambda^{[\deltalta]} (\bv{\alphapha}) & =
\sup_{\sbv{\lambdambda} \in \Theta_{\psi}} (\lambdangle \bv{\alphapha},\bv{\lambdambda} \rangle - \deltalta K(\bv{\lambdambda}))
\\
&= \deltalta \sup_{\sbv{\lambdambda} \in \Theta_{\psi}} (\lambdangle \bv{\alphapha}/\deltalta,\bv{\lambdambda} \rangle - K(\bv{\lambdambda}))
=\deltalta \Lambda (\bv{\alphapha}/\deltalta)
, \quad \bv{\alphapha} \in \mathbb{R}^d
\end{align*}
(cf.~\eqref{rf1}). Therefore the second rate function (see~\eqref{Dv}) $D^{[\deltalta]}$ for $\bv{X}(\deltalta)$ is
\[
D^{[\deltalta]} (\bv{v}) := \inf_{u > 0} u\Lambda^{[\deltalta]}( \bv{v}/u)
= \inf_{u > 0} (u \deltalta) \Lambda (\bv{\alphapha}/(u \deltalta)) =D (\bv{v}),
\quad \bv{v} \in \mathbb{R}^d.
\]
That is, the second rate function for the random walk $\{\bv{X}(n\deltalta)\}$ is the same for all $\deltalta>0$, which makes perfect sense as one would expect the same asymptotics for the probabilities ${\bf P} (\eta_\deltalta (sG)<\infty)$ for different~$\deltalta$. Hence the respective value $r^{[\deltalta]}_G$ (see~\eqref{mpt}) can easily be seen to be given by $\deltalta r_G$.
Therefore, applying Theorem~1 in~\cite{PaBo} to the random walk $\{\bv{X}(n\deltalta)\}$ and using notation~$A^{[\deltalta]}$ for the constant $A$ appearing in that theorem for the said random walk, we conclude that, for any $\deltalta\in (0,\deltalta(\varepsilon)],$ as $s\to \infty,$
\betagin{equation}
{\bf P}\big( \eta_\deltalta(sG) < \infty \big) = A^{[\deltalta]}s^{-(d-1)/2}e^{-sD(G)}(1+o(1)),
\end{equation}
and likewise
\betagin{equation}
{\bf P}\big( \eta_\deltalta((s-\varepsilon)G) < \infty \big) = A^{[\deltalta]} (s-\varepsilon)^{-(d-1)/2}e^{-(s-\varepsilon) D(G)}(1+o(1)) .
\end{equation}
Now from~\eqref{>s} and~\eqref{<s} we see that, as $ s\to
\infty,$
\[
A^{[\deltalta]}(1+o(1))
\le
R(s):=\varphirac{{\bf P} (\tau (sG)<\infty)}{ s^{-(d-1)/2}e^{-sD(G)}}
\le
\varphirac{A^{[\deltalta]}(1+\varepsilon)e^{\varepsilon D(G)}}{(1-\varepsilon/s)^{(d-1)/2}}\, (1+o(1))
\]
Therefore, setting $\underline{R}:= \liminf_{s\to\infty}R(s),$ $\overline{R}:= \limsup_{s\to\infty}R(s),$ we have
\[
A^{[\deltalta]} \le \underline{R}
\le \overline{R} \le (1+\varepsilon)e^{\varepsilon D(G)} A^{[\deltalta]}
\]
for any $\deltalta\in (0,\deltalta(\varepsilon)],$ and hence
\[
\limsup_{\deltalta \to 0} A^{[\deltalta]}
\le \underline{R}
\le \overline{R}
\le (1+\varepsilon)e^{\varepsilon D(G)} \liminf_{\deltalta \to 0} A^{[\deltalta]}.
\]
As $\varepsilon>0$ is arbitrary small, we conclude that there exists $\lim_{\deltalta\to 0} A^{[\deltalta]}=:A_0\in (0,\infty).$ Therefore there also exists
\[
\lim_{s\to\infty}R(s)=A_0.
\]
The theorem is proved.
\end{proof}
{\bf Acknowledgements.} The authors are grateful to MATRIX Research Institute for hosting and supporting the Mathematics of Risk program during which they obtained the result presented in this note. This work was partially supported by Polish National Science Centre Grant No.~2015/17/B/ST1/01102
(2016-2019) and the ARC Discovery grant DP150102758.
The authors are also grateful to Enkelejd Hashorva who pointed at a bug in the original version of the note.
\betagin{thebibliography}{99}
\bibitem{pal1}
{\sc Avram, F., Palmowski, Z. and Pistorius, M. R.} (2008).
Exit problem of a two-dimensional risk process
from the quadrant: exact and asymptotic results
{\em Ann.\ Appl.\ Probab.} {\bf 18}, 2421--2449.
\bibitem{BD}
{\sc Bertoin, J. and Doney, R.A.} (1994). Cram\'er’s estimate for L\'evy processes.
{\em Stat.\ Probab.\ Lett.} {\bf 21}, 363--365.
\bibitem{paper23}
{\sc Borovkov, A. A. and Mogulskii, A. A.} (1996). The second rate function and the asymptotic problems of renewal and hitting the boundary for multidimensional random walks. {\em Siberian Math.\ J.} {\bf 37}, 745--782.
\bibitem{Borbook}
{\sc Borovkov, A. A.} (2013). {\em Probability Theory.} 2nd edn. Springer, London.
\bibitem{mainpaper}
{\sc Borovkov, A. A. and Mogulskii, A. A.} (2001).
Limit theorems in the boundary hitting problem for a multidimensional random walk.
{\em Siberian Math.~J.} {\bf 42}, 245--270.
\bibitem{Coll}
{\sc Collamore, J.F.} (1996).
Hitting probabilties and large deviations.
{\em Ann.\ Prob.} {\bf 24}, 2065--2078.
\bibitem{PalPist}
{\sc Palmowski, Z. and Pistorius, M.} (2009).
Cram\'er asymptotics for finite time first passage probabilities of general
L\'evy processes.
{\em Stat.\ Probab.\ Lett.} {\bf 79}, 1752--1758.
\bibitem{PaBo}
{\sc Pan, Y. and Borovkov, K.} (2017).
The exact asymptotics of the large deviation probabilities in the multivariate boundary crossing problem.
{arXiv:1708.09605} (31 pp.).
\end{thebibliography}
\end{document}
\end{document}
|
\begin{document}
\def\normalfont\small\sffamily{\normalfont\small\sffamily}
\selectlanguage{english}
\mathbb{A}ketitle
\begin{abstract}
\noindent
We extend the work of \cite{DelongImkeller,DelongImkeller2} concerning Backward stochastic differential equations with time delayed generators (delay BSDE). We give moment and a priori estimates in general $L^p$-spaces and provide sufficient conditions for the solution of a delay BSDE to exist in $L^p$. We introduce decoupled systems of SDE and delay BSDE (delay FBSDE) and give sufficient conditions for their variational differentiability. We connect these variational derivatives to the Malliavin derivatives of delay FBSDE via the usual representation formulas. We conclude with several path regularity results, in particular we extend the classic $L^2$-path regularity to delay FBSDE.
\end{abstract}
{\bf 2010 AMS subject classifications:}
Primary: 60H10;
Secondary:
60H30,
60H07,
60G17;
\\
{\bf Key words and phrases:} Backward stochastic differential equation, BSDE, delay, time delayed generators, Lp-solutions, differentiability, calculus of variations, Malliavin Calculus, path regularity.
\section*{Introduction}
The theory of nonlinear \emph{backward stochastic differential equations} (BSDEs) was introduced by \cite{PardouxPeng90} with its main motivations being mathematical finance (see \cite{97KPQ}) and stochastic control theory (see \cite{YongZhou}). In the last twenty years much effort has been given to this type of equations and nowadays many classes of BSDEs and results on them are available.
Due to tractability, common results are achieved within a Markovian framework. Under certain conditions the BSDE's solution exhibits a Markov structure and hence can be interpreted as an instantaneous transformation of the underlying Markov process that spans the stochastic basis of the underlying probability space. This in turn yields access to the theory of partial differential equations via the non-linear Feynman-Kac formula.
Moving away from the Markovian setting, \cite{DelongImkeller,DelongImkeller2} introduce a new class of BSDE labeled \textit{backward stochastic differential equations with time delayed generators} (delay BSDEs). The dynamics of these BSDEs are governed by
\begin{align*}
Y_t=\xi + \int_t^T f(s,Y(s),Z(s))\mathrm{d}s - \int_t^T Z_s \mathrm{d}ws,\quad t\in[0,T],
\end{align*}
where the generator $f$ at time $s\in[0,T]$ is allowed to depend on the past values of the solution $(Y,Z)$ over the time interval $[0,s]$ and $\xi$ is a measurable random variable. In these two works the authors answered thoroughly several fundamental questions: existence and uniqueness of a square integrable solution, comparison principles, existence of a measure solution, BMO martingale properties for the control component $Z$ of the solution, Malliavin differentiability for delay BSDEs driven by a Wiener process and a generalized Poisson martingale. To the best of our knowledge the only existence and uniqueness results for this class of BSDEs follow from those two works. As pointed out by \cite{Delong2010}, delay BSDEs appear naturally in finance and insurance related problems of pricing and hedging of contracts. In the same work the author analyses a vast scope of contracts to which this class of BSDEs can be applied to.
Paying consideration to and seeking reference from the state of the art of BSDEs with non-time delayed generators, the next step concerning delay BSDEs is to obtain a feasible numerical scheme. Here, the main obstacle is the presence of the control process $Z$ in the generator. This process is usually obtained via the predictable representation property of the underlying stochastic basis, and initially all one knows about $Z$ is that it is a square integrable process. To steer in the direction of a numerical scheme a deeper analysis on the fine properties of the solution of such equations is required. As for numerics for Lipschitz continuous BSDEs (see for example \cite{04BT} or \cite{BenderZhang2008}) one is usually forced to gather several results concerning the \emph{path regularity} properties of the solution process before being able to give proper convergence results. Such path properties include not only sample path continuity but also estimations on the time increments of the components of the solution by the size of the time increment. For the purpose of establishing such path properties we first need to prove several auxiliary results.
Our agenda consists of refining and extending the existence and uniqueness results obtained in \cite{DelongImkeller,DelongImkeller2} and then steer into the direction of the smoothness properties of the solution of delay BSDEs. We start by improving the original results of \cite{DelongImkeller} concerning their a priori estimates by reformulating them in a more standard fashion.
In Lemma 2.1 from \cite{DelongImkeller}, the a priori estimates expresses the difference (in norm) of the solution of two delay BSDE as the difference of the respective terminal conditions and generators. These a priori estimates fall short of the usual a priori estimates one expects to see due to the presence of the solutions of \emph{both} delay BSDE on the right hand side of the estimate. We establish a priori estimates in the classical form where the right hand side of the estimate contains the difference of generators evaluated at their zero spatial state and hence is independent of the BSDE solutions. Within the topic of a priori estimates we extend the results of \cite{DelongImkeller} in another direction. We show that given extra integrability of the terminal condition and the generator, the solution will inherit this integrability. This allows us to state moment and a priori estimates in general $L^p$-spaces and not solely in $L^2$. The proof of these estimates relies on techniques from \cite{DelongImkeller} and on computations carried out for non-time delayed BSDEs in the spirit of \cite{WangRanChen}. The usual techniques to obtain higher order moment estimates fail in the setting of delay BSDEs, the reason for this will be seen in \eqref{eq:auxi1} below. A rough explanation would be that for the usual (non-delay) BSDE setting the dynamics of $Y_t$ is given by sums of Lebesgue and It\^o integrals over the interval $[t,T]$ but for delay BSDEs the dynamics of $Y_t$ depends also on a integral over the whole interval $[0,T]$ which doesn't allow the usual techniques to be used. The general estimates we obtain pave the way to a result of existence and uniqueness of solutions to delay BSDE with Lipschitz continuous generators in general $L^p$ spaces for $p\geq 2$. Inevitably, in analogy to \cite{DelongImkeller,DelongImkeller2} a compatibility condition on the Lipschitz constant and terminal time is required to obtain existence of solutions (see our Theorem \ref{theo:picard}).
A customary field of application of BSDEs consists in coupling them with SDEs, giving rise (in our case) to systems of delay forward-backward SDEs (delay FBSDEs). We show that when coupling a delay BSDE with a forward diffusion and assuming appropriate regularity conditions, we obtain smoothness properties of the solution in terms of the involved parameters, in particular with respect to the initial condition of the forward diffusion. Combining this with the Malliavin differentiability proved in \cite{DelongImkeller2} enables us to derive the usual representation formulas for FBSDE which display the relationship between the Malliavin derivatives of the solution process and their variational (classical) derivatives. It is somewhat surprising that such a relationship still holds since it is usually a consequence of the BSDE's Markov property which clearly fails to materialize in the context of delay FBSDE.
With this collection of results we are finally able to address the path regularity issue of delay BSDE. Using the techniques employed in \cite{ImkellerDosReis,pathregcorrection2010}, we establish path continuity for the components of the solution of delay FBSDE and we give a result that bounds the norm of the increments in time of $Y$ and $Z$ by the size of the time increment. We expect that these results will open the door to the derivation of concrete numerical schemes and their convergence rate and intend to tackle these problems in our future research.
The paper is organized as follows: in Section \ref{section:prelim} we fix notations and elaborate on the type of time-delayed BSDEs that we consider. In Section \ref{section:apriori} we refine and extend the a priori estimates obtained in \cite{DelongImkeller} and then use them to establish existence and uniqueness of solutions in general $L^p$ spaces. In Section \ref{section:diff} we introduce the delay FBSDE framework and use results from the previous sections to obtain the differentiability of the solution process with respect to the initial state of a forward diffusion. The representation formulas and the path regularity results are presented in Section \ref{section:representation}.
\section{Preliminaries}
\label{section:prelim}
Let $(\Omega,\mathcal{F},\mathbb{P})$ be a probability space equipped with a standard $d$-dimensional Brownian motion $W$. For a fixed real number $T>0$ we consider the filtration $\mathbb{F}:= (\mathcal{F}_t)_{t\geq 0}$ generated by $W$ and augmented by all $\mathbb{P}$-null sets. The filtered probability space $(\Omega,\mathcal{F},\mathbb{F},\mathbb{P})$ satisfies the usual conditions. Depending on whether we work on $\mathbb{R}^d$ or $\mathbb{R}^{m{t_i}mes d}$, the Euclidean norm respectively the Hilbert-Schmidt operator norm is denoted by $|\cdot|$. Furthermore, $\nabla$ denotes the canonical gradient differential operator and for a function $h(x,y):\mathbb{R}^m{t_i}mes \mathbb{R}^d\to \mathbb{R}^n$, we write $\nabla_x h$ or $\nabla_y h$ for the derivatives with respect to $x$ and $y$. We work with the following topological vector spaces:
\begin{itemize}
\item For $p\geq 2$, let $L^p(\mathbb{R}^m)$ be the space of $\mathcal{F}_T$-measurable random variables $\xi:\Omega\to\mathbb{R}^m$ normed by $\|\xi \|_{L^p}:=\mathbb{E} \big[\,|\xi|^p\, \big]^{1/p}$.
\item
For $\beta \geq 0$ and $p\geq 1$, $\mathcal{H}^{p}_\beta(\mathbb{R}^{m{t_i}mes d})$ denotes the space of all predictable process $\varphi$ with values in $\mathbb{R}^{m{t_i}mes d}$
such that the norm $\| \varphi \|_{\mathcal{H}^p_\beta} := \mathbb{E} \Big[ \Big(\int_0^T e^{\beta s} |\varphi_s|^2 \mathrm{d}s \Big)^{p/2} \Big]^{1/p}<\infty$.
\item For $\beta\geq 0$ and $p\geq 2$, $\mathcal{S}^{p}_\beta(\mathbb{R}^{m{t_i}mes d})$ denotes the space of all predictable processes $\eta$ with values in $\mathbb{R}^{m{t_i}mes d}$
such that the norm $\| \eta \|_{\mathcal{S}^p_\beta} := \mathbb{E} \Big[ \Big( \sup_{0 \leq t \leq T} e^{\beta t} |\eta_t|^2 \Big)^{p/2} \Big]^{1/p}<\infty$.
\end{itemize}
We omit referencing the range space if no ambiguity arises. It is fairly easy to see that for any $\beta, \bar{\beta}\geq 0$ the norms on $\mathcal{H}_\beta^{p}$, $\mathcal{H}_{\bar\beta}^{p}$ and $\mathcal{S}_\beta^{p}$, $\mathcal{S}_{\bar\beta}^{p}$ are equivalent.
\subsubsection*{Some notation}
We introduce a notational convention which will be used throughout the text: for an arbitrarily given integrable function $f: [0,T] \to \mathbb{R}^m$, trivially extended to $[-T,0)$ via $f(t) \mathbb{A}thbbm{1}_{[-T,0)}(t) = 0$, and a given deterministic finite measure $\alpha$ supported on $[-T,0)$ which is not necessarily atomless, we denote for $t\in [0,T]$ and any $p\geq 2$
\begin{align*}
(f \cdot \alpha)(t) := \int_{-T}^0 f(t+v) \alpha(\mathrm{d} v)
\quad \text{ and }\quad
(f^p \cdot \alpha)(t) := \int_{-T}^0 |f(t+v)|^p \alpha(\mathrm{d} v).
\end{align*}
Similarly, for a given process $(\varphi_t)_{t\in[0,T]}$, extended to $[-T,0)$ by imposing $\varphi_t=0$ on $[-T,0)$, we denote
\begin{equation}
\label{eq:notation1}
(\varphi \cdot \alpha)(t) := \int_{-T}^0 \varphi_{t+v} \alpha(\mathrm{d} v),\qquad t\in[0,T],
\end{equation}
and
\begin{equation}
\label{eq:notation2}
(\varphi^p \cdot \alpha)(t) := \int_{-T}^0 |\varphi_{t+v}|^p \alpha(\mathrm{d} v), \quad \quad t\in[0,T], \ p\geq 2.
\end{equation}
We now give a lemma concerning the change of integration order for \eqref{eq:notation1} and \eqref{eq:notation2}, which will become useful in the sequel.
\begin{lemma}\label{lemma:interchange}
Let $\varphi$ be a process and $\alpha$ a non-random finite measure supported on $[-T,0)$. Then we have the following change of integration order: for every $k\geq 1$
\begin{align*}
\int_t^T (\varphi^k\cdot \alpha)(s) \mathrm{d}s =\int_{0}^T \alpha\big( [r-T,(r-t)\wedge 0) \big) |\varphi_r|^k \mathrm{d} r, \quad \forall t\in [0,T], \; \mathbb{P}-a.s.
\end{align*}
Moreover, if we have for $p \geq 1$ that $\varphi \in \mathcal{H}^p_0$, then we also have that
\[
\| (\varphi \cdot \alpha) \|_{\mathcal{H}^p_\beta}^p
\leq
M_p \| \varphi \|_{\mathcal{H}^p_0}^p,
\] where $M_p = (e^{\beta T})^{p/2} \big( \alpha([-T,0)) \big)^p$.
\end{lemma}
\begin{proof}
Let $t$ in $[0,T]$ and $k\in[1,+\infty)$. We have that
\begin{align*}
\int_t^T (\varphi^k\cdot\alpha)(s) \mathrm{d}s&= \int_t^T \int_{-T}^0 |\varphi_{s+v}|^k \alpha(\mathrm{d} v) \mathrm{d}s
= \int_{-T}^0 \int_t^T |\varphi_{s+v}|^k ~\mathrm{d}s ~\alpha(\mathrm{d} v)\\
&= \int_{-T}^0 \int_{(t+v) \vee 0}^{T+v} |\varphi_r|^k ~\mathrm{d} r ~\alpha(\mathrm{d} v)
= \int_{0}^T \int_{(r-T)}^{(r-t) \wedge 0} |\varphi_r|^k ~\alpha(\mathrm{d} v) ~\mathrm{d} r \\
& = \int_{0}^T \alpha\big( [r-T,(r-t)\wedge 0) \big) |\varphi_r|^k \mathrm{d} r.
\end{align*}
The second claim follows by applying Jensen's inequality and changing the integration order as done above, i.e. for any $\beta\geq 0$ and $p\geq 1$ we have
\begin{align*}
\mathbb{E}\left[ \Big( \int_0^T e^{\beta s} \vert (\varphi \cdot \alpha)(s) \vert^2 \mathrm{d}s \Big)^{p/2}\right] &\leq \big(e^{\beta T}\alpha([-T,0)) \big)^{p/2} ~ \mathbb{E}\left[ \Big( \int_0^T (|\varphi|^2 \cdot \alpha)(s) \mathrm{d}s \Big)^{p/2} \right]
\\
&
\leq M_p \mathbb{E}\left[ \Big( \int_0^T \vert \varphi_s \vert^2 \mathrm{d}s \Big)^{p/2} \right]
= M_p \|\varphi\|_{\mathcal{H}_0^p}^p,
\end{align*}
which concludes the proof.
\end{proof}
\section{General results on BSDE with time delayed generators}
\label{section:apriori}
In this section we give a brief overview of BSDEs with time delayed generators and discuss the setting they are studied under. We then establish convenient a priori estimates on the difference of two solutions to such equations which will play a central role in proving existence and uniqueness of solutions in the more general $\mathcal{H}^p$-spaces.
\subsection{BSDEs with time delayed generators}\label{sec:delay_bsde}
Let us start with a recap on BSDE with time delayed generators. Throughout the text, we assume
\begin{enumerate}
\item[(H0)] ${\alpha_{\scriptscriptstyle{\mathcal{Y}}}},{\alpha_{\scriptscriptstyle{\mathcal{Z}}}}$ are two non-random, finitely valued measures supported on $[-T,0)$
\end{enumerate}
We also define
\begin{align}
\label{eq:alpha1}
\alpha := {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}([-T,0)) \vee {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}([-T,0)).
\end{align}
Given $p\geq 2$, we assume that the following holds:
\begin{enumerate}[(H1)]
\item $\xi$ is an $\mathcal{F}_T$-measurable random variable which belongs to $L^p(\mathbb{R}^m)$;
\item the generator $f:\Omega{t_i}mes [0,T]{t_i}mes \mathbb{R}^m {t_i}mes \mathbb{R}^{m{t_i}mes d} \to \mathbb{R}^m$ is measurable, $\mathbb{F}$-adapted and satisfies the following Lipschitz like condition: there exists a constant $K>0$ such that
\begin{align*}
\big| f(t,y,z) - f(t,y',z') \big|^2
&
\leq K \big( |y-y'|^2 + |z-z'|^2 \big)
\end{align*}
holds for $\mathrm{d} \mathbb{P} \otimes \mathrm{d}t$-almost all $({\omega}ega,t) \in \Omega {t_i}mes[0,T]$ and for every $(y,z),(y'z') \in \mathbb{R}^m {t_i}mes \mathbb{R}^{m{t_i}mes d}$;
\item $\mathbb{E} \Big[ \big( \int_0^T |f(s,0,0)|^2 \mathrm{d}s \big)^{p/2} \Big] < \infty$;
\item $f(t,\cdot,\cdot)=0$ if $t <0$.
\end{enumerate}
Following the notation from equation \eqref{eq:notation1}, we write
\begin{align*}
(Y\cdot{\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(t) = \int_{-T}^0 Y_{t+v} {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}(\mathrm{d} v)\ \text{ and }\
(Z\cdot{\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(t) = \int_{-T}^0 Z_{t+v} {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}(\mathrm{d} v), \quad 0\leq t \leq T,
\end{align*}
for some processes $(Y_t)_{t\in[0,T]}$ and $(Z_t)_{t\in[0,T]}$ satisfying appropriate integrability conditions. Assumption (H2) and Jensen's inequality then imply
\begin{align*}
\text{(H2')} \quad
&\big| f\big(t,(Y\cdot{\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(t),(Z\cdot{\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(t) \big) - f\big(t,(Y'\cdot{\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(t),(Z'\cdot{\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(t) \big) \big|^2
\nonumber\\
&\qquad\quad
\leq K \big\{ \big| \big((Y-Y')\cdot{\alpha_{\scriptscriptstyle{\mathcal{Y}}}}\big)(t) \big|^2 + \big| \big((Z-Z')\cdot{\alpha_{\scriptscriptstyle{\mathcal{Z}}}}\big)(t) \big|^2 \big\}
\nonumber\\
&\quad\qquad
\leq L \big\{ \big((Y-Y')^2\cdot{\alpha_{\scriptscriptstyle{\mathcal{Y}}}}\big)(t) + \big((Z-Z')^2\cdot{\alpha_{\scriptscriptstyle{\mathcal{Z}}}}\big)(t)\big\},
\end{align*}
where $L:=K\alpha$ with the real number $\alpha$ given by \eqref{eq:alpha1}. The focus of our study are BSDE with time delayed generators which are of the type
\begin{align}
\label{eq:bsde1}
Y_t &= \xi + \int_t^T f\big(s,\Gamma(s)\big) \mathrm{d}s - \int_t^T Z_s \mathrm{d}ws, \quad 0 \leq t \leq T,
\end{align}
where $\Gamma$ abbreviates for $t\in[0,T]$
\begin{align}
\label{eq:Gamma1}
\Gamma(t) := \Big( \int_{-T}^0 Y_{t+v} {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}(\mathrm{d} v), \int_{-T}^0 Z_{t+v} {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}(\mathrm{d} v) \Big) = \Big( (Y\cdot{\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(t),(Z\cdot{\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(t) \Big).
\end{align}
\begin{definition}[Solution of a Delay BSDE]
We say $(Y,Z)$ is a solution to the delay BSDE \eqref{eq:bsde1} if $(Y,Z)$ belongs to the space $\mathcal{S}^p_0{t_i}mes \mathcal{H}^p_0$ and satisfies \eqref{eq:bsde1}.
\end{definition}
Using a fixed point argument, \cite{DelongImkeller} have shown that a BSDE of the type \eqref{eq:bsde1}-\eqref{eq:Gamma1} admits a unique solution if the parameters of the equation \eqref{eq:bsde1} are sufficiently small, i.e. if the Lipschitz constant $K>0$ or the terminal time $T>0$ satisfy a smallness condition. The following $L^2$-existence and uniqueness result is a straightforward modification of Theorem 2.1 from \cite{DelongImkeller}.
\begin{theorem}
\label{theo:DelongImkeller_thm2.1}
Let $p=2$ and assume that (H0)-(H4) are satisfied. For $\alpha$ defined as in \eqref{eq:alpha1}, assume that the non-negative constants $T$, $L=K\alpha$, $\beta$ are such that
\[
(8 T + \frac1\beta) L \int_{-T}^0 e^{-\beta u}\rho(\mathrm{d} u)\mathbb{A}x\{1,T\}<1,\quad \text{for }\rho\in\{{\alpha_{\scriptscriptstyle{\mathcal{Y}}}},{\alpha_{\scriptscriptstyle{\mathcal{Z}}}}\}.
\]
Then the delay BSDE \eqref{eq:bsde1}-\eqref{eq:Gamma1} has a unique solution $(Y,Z)\in \mathcal{S}^2_\beta(\mathbb{R}^m){t_i}mes \mathcal{H}^2_\beta(\mathbb{R}^{m{t_i}mes d})$.
\end{theorem}
\begin{remark}
In \cite{DelongImkeller}, this result is proved for the one-dimensional case $d=m=1$. It is clear that by the nature of the fixed point argument, the proof is insensitive to the dimension of the equation.
\end{remark}
\begin{remark} Given that a compatibility condition is necessary in order to establish existence and uniqueness of solutions and moreover that we will be giving an extended version of it, all the proofs in this section are given with extra detail in order to better control the constants involved in each result.
\end{remark}
\subsection{Moment and a priori estimates}
In Lemma 2.1 from \cite{DelongImkeller} the authors provide a priori estimates for the time delayed BSDE \eqref{eq:bsde1} which estimates the norms of the difference between the solution of two BSDE in terms of the terminal condition and the difference of the generators applied to the solution processes. More specifically, for $i\in\{1,2\}$ let $(Y^i,Z^i)$ be the solution of a BSDE with dynamics \eqref{eq:bsde1} with terminal condition $\xi^i$ and driver $f^i$ satisfying (H1)-(H4), then it holds that
\begin{align}
\label{eq:DI}
\nonumber
&\|Y^1-Y^2 \|^2_{\mathcal{H}^2_\beta} + \| Z^1-Z^2 \|^2_{\mathcal{H}^2_\beta}
\leq C_2 \Big\{ \mathbb{E} \big[\, e^{\beta T} |Y_T^1-Y_T^2|^2\, \big]
\\
&\qquad \quad + \mathbb{E} \big[ \int_0^T e^{\beta s} |f^1(s,(Y^1 \cdot \alpha)(s),(Z^1 \cdot \alpha)(s))-f^2(s,(Y^2 \cdot \alpha)(s),(Z^2 \cdot \alpha)(s))|^2 \mathrm{d}s\, \big] \Big\},
\end{align}
where the authors assume that $\alpha$ is some deterministic measure on $[-T,0)$ with mass one. Thus Lemma 2.1 from \cite{DelongImkeller} establishes the a priori estimate \eqref{eq:DI} whose right hand side depends again on the solution of \emph{both} delay BSDE. In the context of \cite{DelongImkeller} such a result suffices to establish existence and uniqueness of solutions in $\mathcal{S}^2_\beta {t_i}mes \mathcal{H}^2_\beta$ but the situation becomes more intricate when the same issues are considered on $\mathcal{S}^p_\beta {t_i}mes \mathcal{H}^p_\beta$ for $p>2$. More precisely, we are not able to obtain an estimate similar to \eqref{eq:DI} when $p>2$. In addition, the study of differentiability of the solution (for both $p=2$ and $p>2$), made in Section \ref{section:diff}, requires a priori estimates where the right hand side of the estimate depends only on the problem's data: the differences between the terminal conditions and a quantity of the form $\delta_2f_s:=f^1(s,(Y^2 \cdot \alpha)(s),(Z^2 \cdot \alpha)(s))-f^2(s,(Y^2 \cdot \alpha)(s),(Z^2 \cdot \alpha)(s))$. For a clear view of the required estimates, compare for instance \eqref{eq:DI} with \eqref{eq:apriori_p2}.
\subsubsection*{Moment estimates - part I}
As a starting observation, we have that if \eqref{eq:bsde1} admits a solution $(Y,Z)$ in $\mathcal{H}^p_\beta(\mathbb{R}^m) {t_i}mes \mathcal{H}^p_\beta(\mathbb{R}^{m{t_i}mes d})$, then we also have that $Y \in \mathcal{S}^p_\beta(\mathbb{R}^m)$.
\begin{lemma}
Let $\beta\geq0$, $p\geq 2$ and assume that (H0)-(H4) hold. If the delay BSDE \eqref{eq:bsde1} admits a solution $(Y,Z) \in \mathcal{H}^p_\beta(\mathbb{R}^m) {t_i}mes \mathcal{H}^p_\beta(\mathbb{R}^{m{t_i}mes d})$ then we have also that $Y \in \mathcal{S}^p_{\beta}(\mathbb{R}^m)$.
\end{lemma}
\begin{proof}
Throughout let $t\in[0,T]$ and $p\geq 2$. Since all $\beta$-norms are equivalent, it suffices to show the result for $\beta=0$. We drop the $\beta$-subscripts in the following. The pair $(Y,Z)$ satisfies
\begin{align*}
Y_t &= \xi + \int_t^T f\big(s,(Y\cdot{\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(s),(Z\cdot{\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(s)\big) \mathrm{d}s - \int_t^T Z_s \mathrm{d}ws,
\end{align*}
and in turn we have
\begin{align*}
\sup_{0\leq t \leq T}|Y_t| & \leq |\xi| + \int_0^T \big| f\big(s,(Y\cdot{\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(s),(Z\cdot{\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(s)\big) \big| \mathrm{d}s + \sup_{0\leq t \leq T} \big|\int_t^T Z_s \mathrm{d}ws \big|.
\end{align*}
Combining the fact of $Z\in\mathcal{H}^p$ with the inequalities by Young, Doob and Burkholder-Davis-Gundy (BDG), we obtain
\begin{align*}
\mathbb{E} \Big[ \Big( \sup_{0\leq t \leq T} \big| \int_t^T Z_s \mathrm{d}ws \big|^2 \Big)^{p/2} \Big]
&
\leq 2^{p/2} ~ \mathbb{E} \Big[ \Big( \big| \int_0^T Z_s \mathrm{d}ws \big|^2 + \sup_{0\leq t \leq T} \big| \int_0^t Z_s \mathrm{d}ws \big|^2 \Big)^{p/2} \Big]
\\
&
\leq 2^p ~ \mathbb{E} \Big[ \sup_{0\leq t \leq T} \big| \int_0^t Z_s \mathrm{d}ws \big|^p \Big]
\leq 2^p C_p \|Z\|_{\mathcal{H}^p_0}^p
< \infty.
\end{align*}
Next observe that by the Lipschitz property of the generator $f$ (notice that (H2) implies (H2')), it follows that
\begin{align*}
&
\Big( \int_0^T \big| f\big(s,(Y\cdot{\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(s),(Z\cdot{\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(s) \big) \big|^2 \mathrm{d}s \Big)^{p/2}\\
&\quad \leq 2^{p/2} \Big( \int_0^T \big| f(s,0,0) \big|^2 \mathrm{d}s
+ \int_0^T \Big| f\big(s,(Y\cdot{\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(s),(Z\cdot{\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(s) \big) - f(s,0,0) \Big|^2 \mathrm{d}s \Big)^{p/2}
\\
&\quad
\leq 2^{p/2}2^{p/2-1} \bigg\{ \Big( \int_0^T \big| f(s,0,0) \big|^2 \mathrm{d}s \Big)^{p/2}+ \Big( L \int_0^T \Big( (|Y|^2\cdot{\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(s) + (|Z|^2\cdot{\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(s) \big) \mathrm{d}s \Big)^{p/2} \bigg\}.
\end{align*}
The second term in the bracket can be further estimated by
\begin{align*}
&\left( L \int_0^T \Big( (|Y|^2\cdot{\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(s) + (|Z|^2\cdot{\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(s) \Big) \mathrm{d}s \right)^{p/2}
\\
&\qquad
\leq 2^{p/2-1}L^{p/2} \left\{ \Big( \int_0^T (|Y|^2\cdot{\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(s) \mathrm{d}s \Big)^{p/2} + \Big( \int_0^T (|Z|^2\cdot{\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(s) \mathrm{d}s\Big)^{p/2} \right\}
\\
&\qquad
\leq 2^{p/2-1} L^{p/2}\alpha^{p/2} \left\{ \Big( \int_0^T |Y_s|^2 \mathrm{d}s \Big)^{p/2} + \Big( \int_0^T |Z_s|^2 \mathrm{d}s\Big)^{p/2} \right\},
\end{align*}
where the last line follows from
Lemma \ref{lemma:interchange}. This estimate together with
(H3) yields
\begin{align*}
\mathbb{E} \Big[ \Big( \int_0^T \big| f\big(s,(Y\cdot{\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(s),(Z\cdot{\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(s) \big) \big|^2 \mathrm{d}s \Big)^{p/2} \Big] &< \infty.
\end{align*}
Using hypothesis (H1), i.e. that $\xi$ is in $\in L^p$, we can conclude that $Y\in\mathcal{S}^p$ must hold.
\end{proof}
\subsubsection*{A priori estimates}
Let us define the weighted variant ${t_i}lde\alpha$ of $\alpha$ as the maximum of the weighted measures ${\alpha_{\scriptscriptstyle{\mathcal{Y}}}}$ and ${\alpha_{\scriptscriptstyle{\mathcal{Z}}}}$ on $[-T,0)$ by
\begin{align}
\label{eq:alpha_tilde}
{t_i}lde\alpha := \int_{-T}^0 e^{-\beta s} {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}(\mathrm{d}s) \vee \int_{-T}^0 e^{-\beta s} {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}(\mathrm{d}s), \quad \beta\geq 0.
\end{align}
\begin{remark}
We emphasize that ${t_i}lde{\alpha}$ depends on $\beta$. To keep the notation to a minimum we simply write ${t_i}lde{\alpha}$ instead of making the dependence explicit.
\end{remark}
The next results establishes \textit{canonical} a priori estimates (in the sense that the right hand side of the estimate only depends on the problem's data) for the solutions of two time-delayed BSDEs as given by \eqref{eq:bsde1}. We distinguish between the cases $p=2$ and $p>2$, and we start with the case $p=2$.
\begin{proposition}[A priori estimates for $p=2$]
\label{prop:apriori_p2}
Let $p=2$. Consider $i\in\{1,2\}$ and let $(Y^i,Z^i) \in\mathcal{S}^2_0 {t_i}mes \mathcal{H}^2_0$ be the solution of the delay BSDE \eqref{eq:bsde1} with terminal condition $\xi^i$ and generator $f^i$ satisfying (H0)-(H4). Denote by $K>0$ the Lipschitz constant of $f^1$ as given in (H2') and set $\delta Y = Y^1 - Y^2$, $\delta Z = Z^1 - Z^2$.
If either $T$ or $K$ or $\alpha$ are small enough then there exist two constants $\beta, \gamma>0$ satisfying
\begin{align}
\label{eq:consts}
D_1:=\beta - \gamma - \frac{{t_i}lde{\alpha} L}{\gamma} > 0
\quad\text{and}\quad
D_2:= 1- \frac{{t_i}lde{\alpha} L }\gamma>0
\qquad (\text{with }L=K\alpha \text{ and }\alpha\text{ as in \eqref{eq:alpha1}}),
\end{align}
and a constant $C_2 =C_2(\beta,\gamma,{t_i}lde\alpha,L,T)>0$ depending on $\beta,\gamma,{t_i}lde\alpha,L,T$ such that:
for $i\in\{1,2\}$, $(Y^i,Z^i) \in\mathcal{S}^2_\beta {t_i}mes \mathcal{H}^2_\beta$ and
\begin{align}
\label{eq:apriori_p2}
\|\delta Y \|^2_{\mathcal{S}^2_\beta} + \|\delta Y \|^2_{\mathcal{H}^2_\beta} + \| \delta Z \|^2_{\mathcal{H}^2_\beta} &\leq C_2 \Big\{ \mathbb{E} \Big[ e^{\beta T} |\delta Y_T|^2 \Big] + \mathbb{E} \Big[ \int_0^T e^{\beta s} |\delta_2 f_s|^2 \mathrm{d}s \Big] \Big\},
\end{align}
where $\delta_2 f_t := f^1\big(t,(Y^2 \cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(t),(Z^2 \cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(t)\big) - f^2\big(t,(Y^2 \cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(t),(Z^2 \cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(t)\big)$ for $t\in[0,T]$.
\end{proposition}
\begin{proof}
Let $\gamma, K, T, \alpha$ be such that the relations in \eqref{eq:consts} are satisfied (i.e. $D_1>0$ and $D_2>0$). Throughout let $t\in[0,T]$, $i\in\{1,2\}$ and define $\Gamma^i$ as in \eqref{eq:Gamma1} for the pair $(Y^i,Z^i)$. An application of It\^o's formula to the semimartingale $e^{\beta t}|\delta Y_t|^2$ for $\beta>0$ yields
\begin{align*}
&e^{\beta t}|\delta Y_t|^2 + \int_t^T \beta e^{\beta s}|\delta Y_s|^2 \mathrm{d}s + \int_t^T e^{\beta s}|\delta Z_s|^2 \mathrm{d}s\\
&\qquad = e^{\beta T}|\delta Y_T|^2 + \int_t^T 2e^{\beta s} \big\langle \delta Y_s, f^1(s,\Gamma^1(s)) - f^2(s,\Gamma^2(s)) \big\rangle \mathrm{d}s - \int_t^T 2e^{\beta s} \langle \delta Y_s,\delta Z_s \mathrm{d}ws \rangle \\
&\qquad \leq e^{\beta T}|\delta Y_T|^2 + \int_t^T \gamma e^{\beta s} |\delta Y_s|^2 \mathrm{d}s + \int_t^T \frac{e^{\beta s}}{\gamma} \Big( \big| f^1(s,\Gamma^1(s)) - f^1(s,\Gamma^2(s)) \big|^2 \Big) \mathrm{d}s\\
& \qquad\qquad + 2 \int_t^T e^{\beta s} \big\langle \delta Y_s, \delta_2 f_s \big\rangle \mathrm{d}s - \int_t^T 2e^{\beta s} \langle \delta Y_s,\delta Z_s \mathrm{d}ws \rangle,
\end{align*}
where the last inequality results from Young's inequality for $\gamma$. Reorganizing and taking condition (H2') for the generator $f^1$ into account, we get
\begin{align*}
\nonumber
&e^{\beta t}|\delta Y_t|^2 + \int_t^T (\beta-\gamma) e^{\beta s}|\delta Y_s|^2 \mathrm{d}s + \int_t^T e^{\beta s}|\delta Z_s|^2 \mathrm{d}s\\
\nonumber
&\qquad \leq e^{\beta T}|\delta Y_T|^2 + \int_t^T \frac{e^{\beta s}}{\gamma} L \Big[ (|\delta Y|^2\cdot{\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(s) + (|\delta Z|^2\cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(s) \Big] \mathrm{d}s \nonumber\\
\nonumber
& \qquad\qquad + 2 \int_t^T e^{\beta s} \big\langle \delta Y_s, \delta_2 f_s \big\rangle \mathrm{d}s - \int_t^T 2e^{\beta s} \langle \delta Y_s,\delta Z_s \rangle \mathrm{d}ws.
\end{align*}
By a change of integration order argument similar to that in the proof of Lemma \ref{lemma:interchange} we obtain for
$j\in\{ {\scriptstyle{\mathbb{A}thcal{Y}}}, {\scriptstyle{\mathbb{A}thcal{Z}}}\}$ and $\phi^{\scriptscriptstyle{\mathbb{A}thcal{Y}}}=\delta Y$, $\phi^{\scriptscriptstyle{\mathbb{A}thcal{Z}}}=\delta Z$
\begin{align}
\label{eq:tmp_02}
&\int_t^T e^{\beta s} (|\phi^j|^2\cdot \alpha_j)(s) \mathrm{d}s
\nonumber \\
&\quad= \int_t^T \int_{-T}^0 e^{\beta (s+v)} e^{-\beta v} \mathbb{A}thbbm{1}_{\{s+v \geq 0\}} |\phi^j_{s+v}|^2 \alpha_j(\mathrm{d} v) \mathrm{d}s
\nonumber \\
&\quad = \int_{-T}^0 \int_{(t+v)\vee 0}^{T+v} e^{\beta r} e^{-\beta v} \mathbb{A}thbbm{1}_{\{r \geq 0\}} |\phi^j_r|^2 \mathrm{d} r ~ \alpha_j(\mathrm{d} v)
= \int_0^T \int_{r-T}^{(r-t)\wedge 0} e^{\beta r} e^{-\beta v} |\phi^j_r|^2 \alpha_j(\mathrm{d} v) ~ \mathrm{d} r
\nonumber \\
& \quad \leq \int_0^T e^{\beta r} |\phi^j_r|^2\big( \int_{-T}^0 e^{-\beta v} \alpha_j(\mathrm{d} v) \big)\mathrm{d} r
\leq \int_0^T {t_i}lde{\alpha} e^{\beta r} |\phi^j_r|^2 \mathrm{d} r,
\end{align}
with ${t_i}lde{\alpha}$ given by \eqref{eq:alpha_tilde}. Continuing the inequality from above we get
\begin{align}
&e^{\beta t}|\delta Y_t|^2 + \int_t^T (\beta-\gamma) e^{\beta s}|\delta Y_s|^2 \mathrm{d}s + \int_t^T e^{\beta s}|\delta Z_s|^2 \mathrm{d}s \leq e^{\beta T}|\delta Y_T|^2 + 2 \int_t^T e^{\beta s} \big\langle \delta Y_s, \delta_2 f_s \big\rangle \mathrm{d}s \nonumber\\
&\qquad\qquad + \int_0^T \frac{{t_i}lde{\alpha} L}{\gamma} e^{\beta s} \Big( |\delta Y_s|^2 + |\delta Z_s|^2 \Big) \mathrm{d}s - \int_t^T 2e^{\beta s} \langle \delta Y_s,\delta Z_s \mathrm{d}ws \rangle.\label{eq:auxi1}
\end{align}
Taking the expectations for $t=0$ yields
\begin{align*}
&\big(\beta - \gamma - \frac{{t_i}lde\alpha L}{\gamma} \big) \mathbb{E} \Big[ \int_0^T e^{\beta s}|\delta Y_s|^2 \mathrm{d}s \Big] + \big(1-\frac{{t_i}lde\alpha L}{\gamma} \big) \mathbb{E} \Big[ \int_0^T e^{\beta s}|\delta Z_s|^2 \mathrm{d}s \Big]
\\
&\quad \leq \mathbb{E} \Big[ e^{\beta T}|\delta Y_T|^2 \Big] + 2 \mathbb{E}\Big[ \int_0^T e^{\beta s} \big\langle \delta Y_s, \delta_2 f_s \big\rangle \mathrm{d}s \Big]
\\
&\quad \leq \mathbb{E} \Big[ e^{\beta T}|\delta Y_T|^2 \Big] + 2 \mathbb{E}\Big[ \sup_{0\leq t \leq T} e^{\frac{\beta}{2} t} \vert \delta Y_t\vert \int_0^T e^{\frac{\beta}{2} s} |\delta_2 f_s| \mathrm{d}s \Big]
\\
&\quad \leq \mathbb{E} \Big[ e^{\beta T}|\delta Y_T|^2 \Big] + \gamma' \mathbb{E}\Big[ \sup_{0\leq t \leq T} e^{\beta t} \vert \delta Y_t\vert^2\Big] + \frac{1}{\gamma'} \mathbb{E}\Big[\big(\int_0^T e^{\frac{\beta}{2} s} |\delta_2 f_s| \mathrm{d}s\big)^2 \Big]
\end{align*}
where we have used Young's inequality with some $\gamma'>0$ to be specified later. From the last expression and since $D_1,D_2>0$ (see \eqref{eq:consts}) we deduce that
\begin{align}\label{eq:tmp_01}
\| \delta Y \|^2_{\mathcal{H}^2_\beta} + \| \delta Z \|^2_{\mathcal{H}^2_\beta} &\leq C \Big\{ \mathbb{E} \Big[ e^{\beta T} |\delta Y_T|^2 \Big] + \gamma'
\|\delta Y\|_{\mathcal{S}^2_\beta}^2
+ \frac{1}{\gamma'} \mathbb{E}\Big[\big(\int_0^T e^{\frac{\beta}{2} s} |\delta_2 f_s| \mathrm{d}s\big)^2 \Big\},
\end{align}
where $C >0$ is a constant depending $\beta,\gamma,{t_i}lde\alpha,L$ and $T$. In order to obtain the $\mathcal{S}^2_\beta$-estimate for $\delta Y$ we observe that we have
\begin{align*}
\delta Y_t &\leq \delta Y_T + \int_t^T \big|f^1\big(s,\Gamma^1(s)\big) - f^1\big(s,\Gamma^2(s)\big) \big| \mathrm{d}s + \int_t^T \big| \delta_2 f_s \big| \mathrm{d}s - \int_t^T \delta Z_s \mathrm{d}ws.
\end{align*}
Multiplying by the monotone increasing function $e^{\frac{\beta}{2} t}$ and taking the conditional expectation with respect to $\mathcal{F}_t$ we get
\begin{align*}
e^{\frac\beta2 t} \delta Y_t &\leq \mathbb{E} \left[ e^{\frac\beta2 t}|\delta Y_T| + e^{\frac\beta2 t} \int_t^T \big|f^1\big(s,\Gamma^1(s)\big) - f^1\big(s,\Gamma^2(s)\big) \big| \mathrm{d}s + e^{\frac\beta2 t} \int_t^T \big|\delta_2 f_s \big| \mathrm{d}s \big| \mathcal{F}_t \right]
\\
&
\leq \mathbb{E} \left[ e^{\frac\beta2 T}|\delta Y_T| + \int_t^T e^{\frac\beta2 s} \big|f^1\big(s,\Gamma^1(s)\big) - f^1\big(s,\Gamma^2(s)\big) \big| \mathrm{d}s \right.
\\
&
\qquad \left. + \int_0^t e^{\frac\beta2 s} \big|f^1\big(s,\Gamma^1(s)\big) - f^1\big(s,\Gamma^2(s)\big) \big| \mathrm{d}s + \int_t^T e^{\frac\beta2 s} \big|\delta_2 f_s \big| \mathrm{d}s + \int_0^t e^{\frac\beta2 s} \big|\delta_2 f_s \big| \mathrm{d}s \big| \mathcal{F}_t \right]
\\
&= \mathbb{E} \left[ e^{\frac\beta2 T}|\delta Y_T| + \int_0^T e^{\frac\beta2 s} \big|f^1\big(s,\Gamma^1(s)\big) - f^1\big(s,\Gamma^2(s)\big) \big| \mathrm{d}s + \int_0^T e^{\frac\beta2 s} \big|\delta_2 f_s \big| \mathrm{d}s \big| \mathcal{F}_t \right].
\end{align*}
Using Doob's inequality, we obtain
\begin{align*}
&
\|\delta Y\|_{\mathcal{S}^2_\beta}^2
\\
&\leq 4 ~ \mathbb{E} \Big[ \Big(\mathbb{E} \Big[ e^{\frac\beta2 T}|\delta Y_T| + \int_0^T e^{\frac\beta2 s} \big|f^1\big(s,\Gamma^1(s)\big) - f^1\big(s,\Gamma^2(s)\big) \big| \mathrm{d}s
+ \int_0^T e^{\frac\beta2 s} \big|\delta_2 f_s \big| \mathrm{d}s ~ \big| ~ \mathcal{F}_T\Big]\Big)^2 \Big]
\\
&
\leq 12 ~ \mathbb{E} \Big[ e^{\beta T}|\delta Y_T|^2 + T \int_0^T e^{\beta s} \big|f^1\big(s,\Gamma^1(s)\big) - f^1\big(s,\Gamma^2(s)\big) \big|^2 \mathrm{d}s
+ \big(\int_0^T e^{\frac{\beta}{2} s} \big|\delta_2 f_s \big| \mathrm{d}s\big)^2 \Big],
\end{align*}
where the last line follows by Jensen's inequality. Since $f^1$ satisfies (H2'), an application of Lemma \ref{lemma:interchange} yields
\begin{align*}
\|\delta Y\|_{\mathcal{S}^2_\beta}^2
&
\leq 12\Big\{ \mathbb{E} \Big[ e^{\beta T}|\delta Y_T|^2 \Big] + {t_i}lde\alpha T L \Big( \| \delta Y \|^2_{\mathcal{H}^2_\beta} + \| \delta Z \|^2_{\mathcal{H}^2_\beta} \Big)+ \mathbb{E}\Big[ \big(\int_0^T e^{\frac{\beta}{2} s} \big|\delta_2 f_s \big| \mathrm{d}s\big)^2\Big] \Big\}.
\end{align*}
Hence, plugging into \eqref{eq:tmp_01} we find
\begin{align*}
&\big(1-12 C \gamma' {t_i}lde{\alpha} T L\big)
\mathbb{E} \Big[ \sup_{0\leq t \leq T} e^{\beta t} |\delta Y_t|^2 \Big]
\\
&\qquad \leq 12\Big\{ \big(1+C {t_i}lde{\alpha} T L\big) \mathbb{E} \Big[ e^{\beta T}|\delta Y_T|^2 \Big]
+ \big(1+C \gamma'^{-1} {t_i}lde{\alpha} T L\big) \mathbb{E}\Big[\big(\int_0^T e^{\frac{\beta}{2} s} \big|\delta_2 f_s \big| \mathrm{d}s\big)^2 \Big] \Big\}.
\end{align*}
Choosing $\gamma'$ small enough such that $(1-12 C \gamma' {t_i}lde{\alpha} T L)>0$ is satisfied we conclude that estimate \eqref{eq:apriori_p2} holds for a constant $C_2 = C_2(\beta,\gamma,{t_i}lde\alpha,L,T)$.
\end{proof}
\begin{remark}
Note that in the previous result we have three degrees of freedom: the Lipschitz constant of the driver $K$, the time horizon $T$ and the duration of the time delay given by $\alpha$.
\end{remark}
The proof for the case $p>2$ is more involved and uses techniques from the proof of Proposition \ref{prop:apriori_p2}. The main reason for the proof to be more involved can be seen in \eqref{eq:auxi1}. Usually the dynamics of $Y_t$ is described by integrals over the interval $[t,T]$ but for delay BSDEs we see from \eqref{eq:auxi1} that the dynamics of $Y_t$ depends also on a integral over the whole interval $[0,T]$. We also remark that the techniques of \cite{DelongImkeller} cannot be extended in $L^p$ (for $p>2$), see for instance estimate (2.3) present in the proof of Lemma 2.1 in \cite{DelongImkeller}.
The next proposition gives a result that will be central in establishing existence and uniqueness of $L^p$-solutions to delay BSDEs as well as in proving the differentiability results of Section \ref{section:diff}.
\begin{proposition}[A priori estimates for $p>2$]
\label{lemma:apriori}
Let $p> 2$. Consider $i\in\{1,2\}$ and denote by $(Y^i,Z^i)\in\mathcal{S}^p_0 {t_i}mes \mathcal{H}^p_0$ a solution of the delay BSDE \eqref{eq:bsde1} with terminal condition $\xi^i$ and generator $f^i$ satisfying (H0)-(H4). Denote by $K>0$ the Lipschitz constant of $f^1$ in (H2') and set $\delta Y = Y^1 - Y^2$, $\delta Z = Z^1 - Z^2$. If either $T$ or $K$ or $\alpha$ are small enough (for $L=K\alpha$, $\alpha$ as in \eqref{eq:alpha1} and ${t_i}lde\alpha$ as in \eqref{eq:alpha_tilde}) then there exists $\beta, \gamma>0$ satisfying \eqref{eq:consts} (i.e. $D_1,D_2>0$) and
\begin{align}
\label{eq:D_4}
D_3 &:= 1 - 2^{4p - 4} d^2_{p/2} \big(\frac{p}{p-2}\big)^{p/2} \big(\frac{{t_i}lde{\alpha} L}{\gamma - {t_i}lde{\alpha} L}\big)^{p/2} D_2^{-p/2}- \big(\frac{{t_i}lde{\alpha} L}{\gamma}T \big)^{p/2} \big(\frac{p}{p-2}\big)^{p/2}2^{p-2}>0
\end{align}
where $m\in \mathbb{N}$ denotes the dimension of the $\delta Y$ process and the constant $d_{p/2}$ is given by
\begin{align}
\label{BDGconstant}
d_{p/2} := m^{p/2+1}\big(\frac{p}{p-1}\big)^{p^2/2} \Big( \frac{p(p-1)}{2} \Big)^{p/2}.
\end{align}
In addition, $(Y^i,Z^i) \in\mathcal{S}^p_\beta {t_i}mes \mathcal{H}^p_\beta$ ($i\in\{1,2\}$) and there exists a constant $C_p = C_p(\beta,\gamma,{t_i}lde\alpha,L,T,m) > 0$ explicitly given in \eqref{eq:dummy_whatever} such that
\begin{align}
\label{eq:apriori_p}
\|\delta Y \|^p_{\mathcal{S}^p_\beta} + \|\delta Y \|^p_{\mathcal{H}^p_\beta} + \| \delta Z \|^p_{\mathcal{H}^p_\beta}
&\leq C_p \Big\{ \mathbb{E} \Big[ \big(e^{\beta T} |\delta Y_T|^2 \big)^{p/2} \Big] + \mathbb{E} \Big[ \big(\int_0^T e^{\frac{\beta}{2} s}|\delta_2 f_s| \mathrm{d}s \big)^p \Big] \Big\},
\end{align}
with
$\delta_2 f_t = f^1\big(t,Y^2(t),Z^2(t)\big) - f^2\big(t,Y^2(t),Z^2(t)\big)$, for $t\in[0,T]$.
\end{proposition}
\begin{remark}
\label{remark1onhowsmallKTalphaare}
A closer analysis on the constants $D_1$, $D_2$ and $D_3$ shows:
\[
\lim_{K \alpha \to 0} (D_1,D_2,D_3)> (0,0,0).
\]
This means that with either a small $T$ or a small $K$ or a small $\alpha$ the conditions of the previous result can be verified.
\end{remark}
\textit{Proof of Proposition \ref{lemma:apriori}.}
Throughout let $t\in[0,T]$, $i\in\{1,2\}$ and from \eqref{eq:consts} define $D_1 := \beta - \gamma - \frac{{t_i}lde\alpha L}{\gamma}$ and $D_2 := 1 - \frac{{t_i}lde\alpha L}{\gamma}$. We emphasize that ${t_i}lde{\alpha}$ as defined in \eqref{eq:alpha_tilde} depends on $\beta$. Recall \eqref{eq:auxi1} from the proof of Proposition \ref{prop:apriori_p2}:
\begin{align}\label{eq:dummy1}
&e^{\beta t}|\delta Y_t|^2 + \int_t^T (\beta-\gamma) e^{\beta s}|\delta Y_s|^2 \mathrm{d}s + \int_t^T e^{\beta s}|\delta Z_s|^2 \mathrm{d}s \leq e^{\beta T}|\delta Y_T|^2 + 2 \int_t^T e^{\beta s} \big\langle \delta Y_s, \delta_2 f_s \big\rangle \mathrm{d}s \nonumber\\
&\qquad\qquad + \int_0^T \frac{{t_i}lde{\alpha} L}{\gamma} e^{\beta s} \Big( |\delta Y_s|^2 + |\delta Z_s|^2 \Big) \mathrm{d}s - \int_t^T 2e^{\beta s} \langle \delta Y_s,\delta Z_s \mathrm{d}ws \rangle.
\end{align}
By assumption $\beta,\gamma,T,K,\alpha$ are such that \eqref{eq:consts} holds and hence we have that $D_1>0$ and $D_2>0$. We carry out the proof in several steps.
\textsf{Step 1:} We claim that
\begin{align}
&\mathbb{E} \left[ \Big( \int_0^T e^{\beta s}|\delta Z_s|^2 \mathrm{d}s \Big)^{p/2} \right]
\leq D_2^{-p/2} \Big\{ 2^{p/2} \mathbb{E} \Big[ \big( e^{\beta T}|\delta Y_T|^2 \big)^{p/2} \Big]
+ 2^{3p -2} d^2_{p/2} D_2^{-p/2}\|\delta Y\|_{\mathcal{S}^p_\beta}^p
\nonumber\\
& \hspace{5cm}
+ 2^{3p/2 - 1} \mathbb{E} \Big[\,\big|\int_0^T e^{\beta s} \big\langle \delta Y_s, \delta_2 f_s \big\rangle \mathrm{d}s \big|^{p/2}\Big]
\Big\}\label{eq:auxi4a},
\end{align}
where $d_{p/2} > 0$ is a given constant appearing in the BDG inequality which only depends on $p > 2$ and the dimension. Estimate \eqref{eq:auxi4a} can be deduced as follows: putting $t=0$ in \eqref{eq:dummy1} and noticing that by \eqref{eq:consts} the constants $D_1$ and $D_2$ are positive we get
\begin{align*}
\big(1-\frac{{t_i}lde\alpha L}{\gamma} \big) \int_0^T e^{\beta s}|\delta Z_s|^2 \mathrm{d}s &\leq \big(\beta-\gamma-\frac{{t_i}lde\alpha L}{\gamma} \big) \int_0^T e^{\beta s}|\delta Y_s|^2 \mathrm{d}s + \big(1-\frac{{t_i}lde\alpha L}{\gamma} \big) \int_0^T e^{\beta s}|\delta Z_s|^2 \mathrm{d}s\\
&\leq e^{\beta T}|\delta Y_T|^2 + 2 \int_0^T e^{\beta s} \big\langle \delta Y_s, \delta_2 f_s \big\rangle \mathrm{d}s - 2 \int_0^T e^{\beta s} \langle \delta Y_s,\delta Z_s \mathrm{d}ws \rangle.
\end{align*}
Now raising both sides to the power $p/2>1$, making use of the fact that for $a,b,c \in \mathbb{R}$
\begin{align*}
\big|a+2b-2c\big|^{p/2}
&
\leq 2^{p/2-1} \Big( |a|^{p/2} + |2b-2c|^{p/2} \Big)
\leq 2^{p/2-1} \Big( |a|^{p/2} + 2^{p/2-1} \big( |2b|^{p/2} + |2c|^{p/2} \big) \Big)
\\
&
= 2^{p/2 - 1} |a|^{p/2} + 2^{3p/2-2}|b|^{p/2} + 2^{3p/2-2}|c|^{p/2}
\end{align*}
and taking expectations, we get
\begin{align}
&\big(1-\frac{{t_i}lde\alpha L}{\gamma} \big)^{p/2} ~ \mathbb{E} \Big[ \Big( \int_0^T e^{\beta s}|\delta Z_s|^2 \mathrm{d}s \Big)^{p/2} \Big]
\leq 2^{p/2-1} \mathbb{E} \Big[ \big( e^{\beta T}|\delta Y_T|^2 \big)^{p/2} \Big]
\nonumber\\
& \hspace{1cm}
+ 2^{3p/2 - 2} \mathbb{E} \Big[\, \Big|\int_0^T e^{\beta s} \big\langle \delta Y_s, \delta_2 f_s \big\rangle \mathrm{d}s \Big|^{p/2}\Big]
+ 2^{3p/2 - 2} \mathbb{E} \Big[\, \Big|\int_0^T e^{\beta s} \langle \delta Y_s,\delta Z_s \mathrm{d}ws \rangle \Big|^{p/2} \Big] \label{eq:auxi2}.
\end{align}
Denoting $$ \mathrm{d} N^j_t := \sum_{k=1}^d \delta Z^{k,j}_t \mathrm{d} W^k_t,$$
we apply the BDG inequality with the constant
\[
C^* := \big(\frac{p}{p-1}\big)^{p^2/2} \Big( \frac{p(p-1)}{2} \Big)^{p/2} >0,
\]
(see Theorem 3.9.1 from \cite{Khosh} and solution to Problem 3.29, p. 231, in \cite {KaratzasShreve}) and Young's inequality with some constant $\gamma_2>0$ and obtain
\begin{align}
&\mathbb{E} \Big[ \Big|\int_0^T e^{\beta s} \langle \delta Y_s,\delta Z_s \mathrm{d}ws \rangle \Big|^{p/2} \Big]
\leq \mathbb{E} \Big[ \Big( \sum_{j=1}^m \big|\int_0^T e^{\beta s} \delta Y^j_s ~ \mathrm{d} N^j_s \Big)^{p/2} \Big]
\nonumber \\
& \qquad
\leq m^{p/2} ~ \sum_{j=1}^m \mathbb{E} \Big[ \big| \int_0^T e^{\beta s} \delta Y^j_s ~ \mathrm{d} N^j_s \big|^{p/2} \Big]
\leq C^* m^{p/2} ~ \sum_{j=1}^m \mathbb{E} \Big[ \int_0^T e^{2\beta s} |\delta Y^j_s|^2 ~ \mathrm{d} \langle N^j \rangle_s \big|^{p/4} \Big]
\nonumber\\
&
\qquad\leq C^* m^{p/2} ~ \sum_{j=1}^m \mathbb{E} \Big[ \big( \sup_{0\leq t \leq T} e^{\beta t} |\delta Y^j_t|^2 \big)^{p/4} ~ \big(\int_0^T e^{\beta s} ~ \mathrm{d} \langle N^j \rangle_s \big)^{p/4} \Big]
\nonumber\\
&\qquad
\leq C^* m^{p/2} ~ \sum_{j=1}^m \Big( \gamma_2 \mathbb{E} \Big[ \big( \sup_{0\leq t \leq T} e^{\beta t} |\delta Y^j_t|^2 \big)^{p/2} \Big] + \frac1\gamma_2 \mathbb{E} \Big[ \big(\int_0^T e^{\beta s} ~ \mathrm{d} \langle N^j \rangle_s \big)^{p/2} \Big] \Big)
\nonumber\\
&\qquad
\leq C^* m^{p/2} ~ \Big( \gamma_2 \| \delta Y \|^p_{\mathcal{S}^p_\beta} + \frac{m}{\gamma_2} \mathbb{E} \Big[ \Big( \sum_{j=1}^m\int_0^T e^{\beta s} ~ \mathrm{d} \langle N^j \rangle_s \Big)^{p/2} \Big] \Big)
\nonumber\\
&\qquad
\leq C^* m^{p/2+1} ~ \left( \gamma_2 \| \delta Y \|^p_{\mathcal{S}^p_\beta} + \frac1\gamma_2 \| \delta Z \|^p_{\mathcal{H}^p_\beta} \right)
\leq d_{p/2} ~ \Big\{ \gamma_2 \| \delta Y \|^p_{\mathcal{S}^p_\beta} + \frac{1}{\gamma_2}\|\delta Z\|_{\mathcal{H}^p_\beta}^p
\Big\},
\label{eq:auxi3}
\end{align}
where by \eqref{BDGconstant} we have that $C^*m^{p/2+1}=d_{p/2}$.
With the particular choice of
\[
\gamma_2 := 2^{3p/2-1} d_{p/2} D_2^{-p/2} = 2^{3p/2-1} d_{p/2}\left( \frac{\gamma}{\gamma-{t_i}lde{\alpha}L} \right)^{p/2} > 0,
\] plugging \eqref{eq:auxi3} into \eqref{eq:auxi2} yields
\begin{align*}
&\Big( \big(1 - \frac{{t_i}lde{\alpha}L}{\gamma} \big)^{p/2} - \frac{2^{3p/2 -2 }}{\gamma_2} d_{p/2} \Big) ~
\| \delta Z \|_{\mathcal{H}^p_\beta}^p = \frac12 D_2^{p/2} ~ \| \delta Z \|_{\mathcal{H}^p_\beta}^p \leq\\
&\quad \leq 2^{p/2 - 1} \mathbb{E} \Big[ \big( e^{\beta T}|\delta Y_T|^2 \big)^{p/2} \Big] + 2^{3p/2-2} \mathbb{E} \Big[\Big|\int_0^T e^{\beta s} \big\langle \delta Y_s, \delta_2 f_s \big\rangle \mathrm{d}s \Big|^{p/2}\Big]
+ 2^{3p/2 - 2} d_{p/2} \gamma_2
\|\delta Y\|_{\mathcal{S}^p_\beta}^p,
\end{align*}
which implies the claim.
\textsf{Step 2:} We claim that
\begin{align}
\label{eq:dummy2}
D_3 \|\delta Y\|_{\mathcal{S}^p_\beta}^p
&\quad
\leq \big(\frac{p}{p-2} \big)^{p/2} ~ \Big\{ \big( 2^{p-2} + 2^{3p/2-2} \big( \frac{{t_i}lde\alpha L}{\gamma - {t_i}lde\alpha L} \big)^{p/2} \big) \mathbb{E} \Big[ \big(e^{\beta T}|\delta Y_T|^2\big)^{p/2} \Big]
\nonumber\\
&\qquad
+ \big( 2^{3p/2-2} + 2^{5p/2 - 3} \big( \frac{{t_i}lde\alpha L}{\gamma - {t_i}lde\alpha L} \big)^{p/2} \big) ~ \mathbb{E} \Big[\Big(\int_0^T e^{\beta s} \big| \big\langle \delta Y_s, \delta_2 f_s \big\rangle \big| \mathrm{d}s \Big)^{p/2}\Big] \Big\},
\end{align}
holds for
\begin{align}
\label{constant D4}
D_3 &:= 1 - 2^{4p - 4} d^2_{p/2} \big(\frac{p}{p-2}\big)^{p/2} \big(\frac{{t_i}lde\alpha L}{\gamma - {t_i}lde\alpha L}\big)^{p/2} D_2^{-p/2}- \big(\frac{{t_i}lde\alpha L}{\gamma}T \big)^{p/2} \big(\frac{p}{p-2}\big)^{p/2}2^{p-2}.
\end{align}
Note that the choice of $K, T$ and $\alpha$ has been such that $D_3 > 0$ is satisfied. To prove \eqref{eq:dummy2}, we go back to \eqref{eq:dummy1}, where we take the conditional expectation with respect to $\mathcal{F}_t$, then the supremum over $t\in[0,T]$, raise to the power $p/2$ and finally apply Doob's inequality to obtain
\begin{align}
&\mathbb{E} \Big[ \sup_{0\leq t \leq T} \big(e^{\beta t}|\delta Y_t|^2 \big)^{p/2} \Big]\nonumber \\
&\quad
\leq \mathbb{E} \Big[ \sup_{0\leq t \leq T} \Big(\mathbb{E} \Big[ e^{\beta T} |\delta Y_T|^2 + 2 \int_0^T e^{\beta s} \big| \big\langle \delta Y_s, \delta_2 f_s \big\rangle \big| \mathrm{d}s
\nonumber \\
&\hspace{4.8cm}
+ \int_0^T \frac{{t_i}lde\alpha L}{\gamma} e^{\beta s} \big(|\delta Y_s|^2 + |\delta Z_s|^2 \big) \mathrm{d}s \big|\mathcal{F}_t\Big]\Big)^{p/2} \Big]
\nonumber \\
& \quad
\leq \big(\frac{p}{p-2} \big)^{p/2} ~ \Big\{ 2^{p-2} \mathbb{E} \Big[ \big( e^{\beta T}|\delta Y_T|^2 \big)^{p/2} \Big] + 2^{3p/2-2} \mathbb{E} \Big[ \big( \int_0^T e^{\beta s} \big| \big\langle \delta Y_s, \delta_2 f_s \big\rangle \big| \mathrm{d}s \big)^{p/2} \Big]
\nonumber \\
&
\hspace{2cm}
+ 2^{p-2} \mathbb{E} \Big[ \big( \int_0^T \frac{{t_i}lde\alpha L}{\gamma}e^{\beta s}|\delta Y_s|^2 \mathrm{d}s \big)^{p/2} \Big] + 2^{p-2} \mathbb{E} \Big[ \big( \int_0^T \frac{{t_i}lde\alpha L}{\gamma}e^{\beta s}|\delta Z_s|^2 \mathrm{d}s \big)^{p/2} \Big] \Big\}\label{eq:auxi4b}.
\end{align}
Note that we made use of the fact that for $a,b,c,d \in \mathbb{R}$ and $p>2$, we have
\begin{align*}
\big| a + 2b + c + d \big|^{p/2} &\leq 2^{p/2-1} \big( |a+2b|^{p/2} + |c+d|^{p/2} \big)
\\
&
\leq 2^{p-2} |a|^{p/2} + 2^{3p/2-2}|b|^{p/2} + 2^{p-2} |c|^{p/2} + 2^{p-2} |d|^{p/2}.
\end{align*}
Plugging \eqref{eq:auxi4a} into \eqref{eq:auxi4b}, we get
\begin{align*}
\|\delta Y\|_{\mathcal{S}_\beta^p}^p
&\quad
\leq \big(\frac{p}{p-2} \big)^{p/2} \bigg\{ 2^{p-2}\mathbb{E} \Big[ \big( e^{\beta T}|\delta Y_T|^2 \big)^{p/2} \Big] + 2^{3p/2 - 2} \mathbb{E} \Big[ \big( \int_0^T e^{\beta s} \big| \big\langle \delta Y_s, \delta_2 f_s \big\rangle \big| \mathrm{d}s \big)^{p/2} \Big]
\nonumber\\
&\qquad + 2^{p-2} \big(\frac{{t_i}lde\alpha L}{\gamma}\big)^{p/2}
\|\delta Y\|_{\mathcal{H}^p_\beta}^{p}
+ \big( \frac{{t_i}lde\alpha L}{\gamma} \big)^{p/2} ~ D_2^{-1} {t_i}mes 2^{p-2} \Big\{ 2^{p/2}\mathbb{E} \Big[ \big( e^{\beta T}|\delta Y_T|^2 \big)^{p/2} \Big]
\\
&\qquad + 2^{3p/2-1} \mathbb{E} \Big[\big(\int_0^T e^{\beta s} \big| \big\langle \delta Y_s, \delta_2 f_s \big\rangle \big| \mathrm{d}s \big)^{p/2}\Big] + 2^{3p-2} d^2_{p/2}D_2^{-p/2} \| \delta Y \|^p_{\mathcal{S}^p_\beta} \Big\} \bigg\}\\
&\quad
\leq \big(\frac{p}{p-2} \big)^{p/2} ~ \bigg\{ \Big(2^{p-2} + 2^{3p/2-2} \big( \frac{{t_i}lde\alpha L}{\gamma - {t_i}lde\alpha L} \big)^{p/2}\Big)\mathbb{E} \Big[ \big( e^{\beta T}|\delta Y_T|^2 \big)^{p/2} \Big]
\nonumber\\
&\qquad + \big( 2^{3p/2-2} + 2^{5p/2-3} \big( \frac{{t_i}lde\alpha L}{\gamma - {t_i}lde\alpha L} \big)^{p/2} \big)\mathbb{E} \Big[\big(\int_0^T e^{\beta s} \big| \big\langle \delta Y_s, \delta_2 f_s \big\rangle \big| \mathrm{d}s \big)^{p/2}\Big]
\\
&\qquad + \Big( 2^{p-2}\big( \frac{{t_i}lde\alpha L}{\gamma} T \big)^{p/2} + 2^{4p-4}\big( \frac{{t_i}lde\alpha L}{\gamma - {t_i}lde\alpha L} \big)^{p/2} D_2^{-p/2} d^2_{p/2} \Big)\| \delta Y \|^p_{\mathcal{S}^p_\beta} \bigg\},
\end{align*}
from which the estimate \eqref{eq:dummy2} follows.
\textsf{Step 3:} At this stage, estimating $\mathbb{E} \big[\Big(\int_0^T e^{\beta s} \big| \big\langle \delta Y_s, \delta_2 f_s \big\rangle \big| \mathrm{d}s \Big)^{p/2}\Big]$ will yield \eqref{eq:apriori_p}. This itself is a consequence of \eqref{eq:dummy2}: Young's inequality combined with the $\mathcal{S}^p_\beta$-norm yields
\begin{align}
\label{eq:dummy7}
\mathbb{E} \big[\Big(\int_0^T e^{\beta s} \big| \big\langle \delta Y_s, \delta_2 f_s \big\rangle \big| \mathrm{d}s \Big)^{p/2}\Big]
&\leq \mathbb{E} \Big[\big(\int_0^T e^{\beta s} | \delta Y_s | ~ |\delta_2 f_s | \mathrm{d}s \big)^{p/2}\Big] \nonumber\\
&
\leq \gamma_3
\| \delta Y \|_{\mathcal{S}^p_\beta}^p
+ \frac{1}{\gamma_3} \mathbb{E}\Big[\Big(\int_0^T e^{\frac{\beta}{2} s}\big |\delta_2 f_s| \mathrm{d}s \Big)^p\Big],
\end{align}
which in conjunction with the particular choice
\begin{equation}
\label{eq:gamma3}
\gamma_3 := \frac12 D_3 \big(\frac{p-2}{p}\big)^{p/2} \frac{(\gamma - {t_i}lde\alpha L)^{p/2}}{2^{3p/2 - 2}(\gamma - {t_i}lde\alpha L)^{p/2} + 2^{5p/2 - 3} ({t_i}lde\alpha L)^{p/2} } > 0.
\end{equation}
Estimate \eqref{eq:dummy2} now leads to
\begin{align}
\label{eq:dummy8}
&\frac12 D_3\| \delta Y \|^p_{\mathcal{S}^p_\beta}
\leq \big(\frac{p}{p-2} \big)^{p/2} ~ \Big\{ \big( 2^{p-2} + 2^{3p/2-2} \big( \frac{{t_i}lde\alpha L}{\gamma - {t_i}lde\alpha L} \big)^{p/2} \big) \mathbb{E} \big[ \big(e^{\beta T}|\delta Y_T|^2\big)^{p/2} \big]
\nonumber\\
&\hspace{2.5cm}+ \big( 2^{3p/2-2} + 2^{5p/2 - 3} \big( \frac{{t_i}lde\alpha L}{\gamma - {t_i}lde\alpha L} \big)^{p/2} \big) \gamma_3^{-1} ~ \mathbb{E} \big[\big(\int_0^T e^{\frac{\beta}{2} s} |\delta_2 f_s| \mathrm{d}s \big)^p\big] \Big\}.
\end{align}
Notice that we trivially have $\| \delta Y \|^p_{\mathcal{H}^p_\beta} \leq T^{p/2} ~ \| \delta Y \|^p_{\mathcal{S}^p_\beta}$ so that
\begin{align*}
&
\| \delta Y \|^p_{\mathcal{S}^p_\beta} + \| \delta Y \|^p_{\mathcal{H}^p_\beta}
\leq
C^1_p ~ \mathbb{E} \big[ \big(e^{\beta T}|\delta Y_T|^2\big)^{p/2} \big] + C^2_p ~ \mathbb{E} \big[\big(\int_0^T e^{\frac{\beta}{2} s} |\delta_2 f_s| \mathrm{d}s \big)^p\big],
\end{align*}
where the constants $C^1_p$ and $C^2_p$ are defined as
\begin{align*}
&
C^1_p:=
2 (1 + T^{p/2}) D_3^{-1}\big(\frac{p}{p-2}\big)^{p/2}
\big( 2^{p-2} + 2^{3p/2-2} \big( \frac{{t_i}lde\alpha L}{\gamma - {t_i}lde\alpha L} \big)^{p/2} \big),
\\
&
C^2_p:=
2 (1 + T^{p/2}) D_3^{-1}\big(\frac{p}{p-2}\big)^{p/2}
\big( 2^{3p/2-2} + 2^{5p/2 - 3} \big( \frac{{t_i}lde\alpha L}{\gamma - {t_i}lde\alpha L} \big)^{p/2} \big) \gamma_3^{-1} .
\end{align*}
Moreover, it follows from \eqref{eq:auxi4a}, \eqref{eq:dummy7} and \eqref{eq:dummy8} that
\begin{align*}
&\| \delta Z \|^p_{\mathcal{H}^p_\beta} \leq C^3_p ~ \mathbb{E} \big[ \big(e^{\beta T}|\delta Y_T|^2\big)^{p/2} \big] + C^4_p ~ \mathbb{E} \big[\big(\int_0^T e^{\frac{\beta}{2} s} |\delta_2 f_s| \mathrm{d}s \big)^p\big].
\end{align*}
where the constants $C^3_p$ and $C^4_p$ are defined as
\begin{align*}
C^3_p:=&
2D_3^{-1}\big(\frac{p}{p-2}\big)^{p/2} D_2^{-p/2}
\Big[ 2^{p/2} \\
&\hspace{3.2cm} + \big( 2^{3p-2}d^2_{p/2}D_2^{-p/2} + 2^{3p/2 - 1} \gamma_3 \big) \big( 2^{p-2} + 2^{3p/2 -2} \big(\frac{{t_i}lde\alpha L}{\gamma - {t_i}lde\alpha L} \big)^{p/2} \big) \Big],
\\
C^4_p:=&
2D_3^{-1}\big(\frac{p}{p-2}\big)^{p/2} D_2^{-p/2}
\Big[ \big( 2^{3p-2} d^2_{p/2} D_2^{-p/2} + 2^{3p/2 -1} \gamma_3 \big){t_i}mes \\
&\hspace{3.2cm} {t_i}mes \big( 2^{3p/2 -2} + 2^{5p/2-3} \big(\frac{{t_i}lde\alpha L}{\gamma - {t_i}lde\alpha L}\big) \big)^{p/2} \gamma_3^{-1} + 2^{3p/2 - 1} \gamma_3 \Big],
\end{align*}
(recall that $\gamma_3$ is defined by \eqref{eq:gamma3}).
From the above inequalities we obtain \eqref{eq:apriori_p}, where the positive constant $C_p$ is given by
\begin{align}
\label{eq:dummy_whatever}
C_p := \mathbb{A}x \big\{ C_p^1 + C_p^3, C_p^2 + C_p^4 \big\}.
\end{align}
$\Box$
\begin{remark}
\label{constants Di independent of data}
Notice that none of the constants $C_p$, $C_p^i$ and $D_i$ ($i\in\{1,\cdots,4\}$) depend on the terminal condition or $f(\cdot,0,0)$. The only problem related data they do depend on are: $K$, $T$, $\alpha$ and $m$.
\end{remark}
\begin{remark}
In the previous proof it is clear that our choices for the constants $\gamma_2$ and $\gamma_3$ do not lead to the most general statement of Proposition \ref{lemma:apriori}. They were chosen in this way to avoid a more complex statement, i.e. the constant $C_p$ given in \eqref{eq:dummy_whatever} would then depend on $\gamma_2$ and $\gamma_3$ and jointly with \eqref{eq:D_4} we would also have the condition $D_3>0$. The conditions of Theorem \ref{theo:picard} below depend on the smallness of $C_p$ as given by \eqref{eq:dummy_whatever}. The particular choices for $\gamma_2$ and $\gamma_3$ lead to simpler expressions in our statements.
\end{remark}
\subsubsection*{Moment estimates - part II}
As a by-product of the two previous propositions we obtain
a result on the moment estimates for the solution of BSDE \eqref{eq:bsde1}.
\begin{corollary}[Moment estimates]
\label{coro:momentestimates}
Let $p \geq 2$ and $\beta>0$. Let $(Y,Z) \in\mathcal{S}^p_\beta {t_i}mes \mathcal{H}^p_\beta$ be the solution of the delay BSDE \eqref{eq:bsde1} with terminal condition $\xi$ and generator $f$ satisfying (H0)-(H4). For $K,T,\alpha$ small enough, there exists a constant $C_p$ (which, like in Propositions \ref{prop:apriori_p2} and \ref{lemma:apriori}, depends on several constants that can be suitably chosen) such that
\begin{align*}
\| Y \|^p_{\mathcal{S}^p_\beta} + \| Y \|^p_{\mathcal{H}^p_\beta} + \| Z \|^p_{\mathcal{H}^p_\beta}
&
\leq C_p \Big\{ \mathbb{E} \Big[ \big(e^{\beta T} | Y_T|^2 \big)^{p/2} \Big]
+\mathbb{E} \Big[ \big(\int_0^T e^{\beta s}| f(s,0,0)|^2 \mathrm{d}s \big)^p \Big] \Big\}.
\end{align*}
\end{corollary}
\subsubsection*{The existence and uniqueness result}
The moment and a priori estimates in \cite{DelongImkeller} are tailor-made for a Picard iteration procedure in $\mathcal{H}^2{t_i}mes \mathcal{H}^2$. To make such a technique work in general $L^p$-spaces we needed to state a priori estimates in the form of Proposition \ref{prop:apriori_p2} and Proposition \ref{lemma:apriori}. In view of those results one can naturally expect a compatibility condition on $K,T$ and $\alpha$ more complicated than that of Theorem \ref{theo:DelongImkeller_thm2.1} for a solution to exist.
With estimate \eqref{eq:apriori_p} at hand, we now proceed to show the existence and uniqueness of solutions to \eqref{eq:bsde1} in $\mathcal{S}^p_\beta {t_i}mes \mathcal{H}^p_\beta$ for $p>2$. For $p=2$, Theorem 2.1 from \cite{DelongImkeller} (recalled in our Theorem \ref{theo:DelongImkeller_thm2.1}) yields a sufficient condition which guarantees the standard Picard iteration to converge and proves the existence and uniqueness of solutions to \eqref{eq:bsde1}. We will show in the following result that for $p>2$, the convergence of the same Picard iteration is retained. What is needed to achieve this goal is to put up some extra effort to show that the Picard iterates $(Y^n,Z^n)$ satisfy the corresponding $\mathcal{S}^p_\beta, \mathcal{H}^p_\beta$-integrability properties.
\begin{theorem}\label{theo:picard}
Let $p>2$ and assume that (H0)-(H4) hold. Let $K$ or $T$ or $\alpha$ be small enough such that for some $\beta,\gamma>0$ the conditions of Proposition \ref{lemma:apriori} are satisfied. If further $K$ or $T$ or $\alpha$ are small enough such that we have
\begin{align}
\label{eq:contraction}
2^{p/2-1}C_p \Big(L T \int_{-T}^0 e^{-\beta s} \rho(\mathrm{d} s) \Big)^{p/2} \mathbb{A}x\{1,T^{p/2}\} < 1,\ \text{ for }\rho \in \{{\alpha_{\scriptscriptstyle{\mathcal{Y}}}},{\alpha_{\scriptscriptstyle{\mathcal{Z}}}} \},
\end{align}
where $C_p = C_p(\beta,\gamma,{t_i}lde\alpha,L,T,m) > 0$ is given by \eqref{eq:dummy_whatever}, ${t_i}lde\alpha$ is given by \eqref{eq:alpha_tilde} and $L=K\alpha$, then the BSDE \eqref{eq:bsde1} admits a unique solution $(Y,Z)$ in $\mathcal{S}^p_{\beta} {t_i}mes \mathcal{H}^p_{\beta}$.
\end{theorem}
\begin{remark}
Note that, by definition of the constant $C_p$, condition \eqref{eq:contraction} is satisfied if either $T$ or $K$ or $\alpha$ is small enough since $ \displaystyle{\lim_{T K \alpha \to 0} C_p <+\infty }$ which in turn implies
$$ \lim_{T K \alpha \to 0} C_p (\alpha K T )^{p/2} =0.$$
\end{remark}
\textit{Proof of Theorem \ref{theo:picard}.}
Let $p>2$.
Throughout let $t\in[0,T]$. The proof is based on the standard Picard iteration: we initialize by $Y^0 = 0$ and $Z^0=0$ and define recursively
\begin{align}
\label{eq:iterate}
Y^{n+1}_t &= \xi + \int_t^T f\big( s,\Gamma^n(s) \big) \mathrm{d}s - \int_t^T Z^{n+1}_s \mathrm{d}ws, \quad 0 \leq t \leq T,
\end{align}
with
$\Gamma^n(s) = \big(\int_{-T}^0 Y^n_{s+v} {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}(\mathrm{d} v), \int_{-T}^0 Z^n_{s+v} {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}(\mathrm{d} v) \big)$ for $s\in[0,T]$ and $n\in\mathbb{N}$.
In the following, let $C>0$ denote some generic constant which may vary from line to line but is always independent of $n\in\mathbb{N}$. We proceed by induction, where the existence of $(Y^1,Z^1)\in \mathcal{S}^p_{\beta} {t_i}mes \mathcal{H}^p_{\beta}$ follows from classic stochastic analysis arguments. For $n\geq 1$, assume that $(Y^n,Z^n) \in \mathcal{S}^p_{\beta} {t_i}mes \mathcal{H}^p_{\beta}$ solves the BSDE \eqref{eq:iterate} and we now prove that \eqref{eq:iterate} has a unique solution $(Y^{n+1},Z^{n+1}) \in \mathcal{S}^p_{\beta} {t_i}mes \mathcal{H}^p_{\beta}$. Note that due to
\begin{align}
&\mathbb{E} \Big[ \big( \int_0^T |f(s,\Gamma^n(s))| \mathrm{d}s \big)^p \Big]
\nonumber \\
&\quad
\leq \mathbb{E} \Big[ \Big( \int_0^T |f(s,0,0)| \mathrm{d}s + \int_0^T |f(s,\Gamma^n(s))-f(s,0,0)| \mathrm{d}s \Big)^{p} \Big]
\nonumber\\
&\quad
\leq 2^{p-1}~\mathbb{E} \Big[ \Big( \int_0^T |f(s,0,0)| \mathrm{d}s \Big)^p + \Big( T \int_0^T |f(s,\Gamma^n(s))-f(s,0,0)|^2 \mathrm{d}s \Big)^{p/2} \Big]
\nonumber\\
&\quad
\leq 2^{p-1} ~ \mathbb{E} \Big[ \big( \int_0^T |f(s,0,0)| \mathrm{d}s \big)^p
\nonumber\\
&\qquad + L^{p/2} T^{p/2} \Big\{ \int_0^T \int_{-T}^0 |Y^n_{s+v}|^2 {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}(\mathrm{d} v) \mathrm{d}s + \int_0^T\int_{-T}^0 |Z^n_{s+v}|^2 {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}(\mathrm{d} v)\mathrm{d}s \Big\}^{p/2} \Big]
\nonumber\\
&\quad
\leq 2^{p-1} \mathbb{E} \Big[ \big( \int_0^T |f(s,0,0)| \mathrm{d}s \big)^p + (\alpha K T)^{p/2} \Big\{ \int_0^T |Y^n_{s}|^2 \mathrm{d}s + \int_{0}^T |Z^n_{s}|^2 \mathrm{d}s\Big\}^{p/2} \Big]
\nonumber\\
&\quad
\leq 2^{p-1} \mathbb{E} \Big[ \big( \int_0^T |f(s,0,0)| \mathrm{d}s \big)^p \Big] + 2^{p/2-1}(2 \alpha K T)^{p/2} \Big( T^{p/2} \| Y^n \|^p_{\mathcal{S}^p_0} + \| Z^n \|^p_{\mathcal{H}^p_0} \Big)
<\infty, \label{eq:dummy-gonca}
\end{align}
the martingale representation yields a uniquely determined process $Z^{n+1} \in \mathcal{H}^2_0$ such that
\begin{align*}
\mathbb{E} \Big[ \xi + \int_0^T f\big(s,\Gamma^n(s)\big) \mathrm{d}s \big| \mathcal{F}_t \Big] = \mathbb{E} \big[ \xi + \int_0^T f\big(s,\Gamma^n(s)\big) \mathrm{d}s \big] + \int_0^t Z^{n+1}_s \mathrm{d}ws,\quad \text{for any }t\in[0,T].
\end{align*}
We then define $Y^{n+1}$ to be a continuous version of
$Y^{n+1}_t = \mathbb{E}[ \xi + \int_t^T f(s,\Gamma^n(s)) \mathrm{d}s |\mathcal{F}_t]$.
Let us first show that $Y^{n+1} \in \mathcal{S}^p_{0}$:
\begin{align*}
\| Y^{n+1}\|_ {\mathcal{S}^p_0}^p
= \mathbb{E} \Big[ \sup_{t\in[0,T]} |Y^{n+1}_t|^p \Big]
&\leq \mathbb{E} \Big[ \sup_{t\in[0,T]} \Big(\mathbb{E} \big[\, |\xi| + \int_0^T |f(s,\Gamma^n(s))| \mathrm{d}s\,|\mathcal{F}_t\big]\Big)^p \Big]
\\
&
\leq \Big( \frac{p}{p-1} \Big)^{p} \mathbb{E} \Big[ \Big( |\xi| + \int_0^T |f(s,\Gamma^n(s))| \mathrm{d}s \Big)^p \Big]
\\
&
\leq 2^{p-1} \Big( \frac{p}{p-1} \Big)^{p} ~ \mathbb{E} \Big[ |\xi|^p + \Big(\int_0^T |f(s,\Gamma^n(s))| \mathrm{d}s \Big)^p \Big]
<\infty,
\end{align*}
where the last inequality follows from the fact that $\xi \in L^p$ and \eqref{eq:dummy-gonca}. This proves that $Y^{n+1} \in \mathcal{S}^p_{0}$. Since all $\| \cdot \|_{\mathcal{S}^p_\beta}$-norms are equivalent it follows that $Y^{n+1} \in \mathcal{S}^p_{\beta}$. To see that $Z^{n+1} \in \mathcal{H}^p_\beta$, recall that It\^o's formula applied to $e^{\beta t} |Y^{n+1}_t|^2$ yields
\begin{align*}
&e^{\beta t}|Y^{n+1}_t|^2 + \int_t^T \beta e^{\beta s} |Y^{n+1}_s|^2 \mathrm{d}s + \int_t^T e^{\beta s} |Z^{n+1}_s|^2 \mathrm{d}s\\
&\quad = e^{\beta T} |\xi|^2 + \int_t^T 2e^{\beta s} \langle Y^{n+1}_s,f(s,\Gamma^n(s))\rangle \mathrm{d}s - \int_t^T 2 e^{\beta s} \langle Y^{n+1}_s, Z^{n+1}_s \mathrm{d}ws\rangle.
\end{align*}
In the above drop the two $Y$ terms in the LHS of the equation, take $t=0$, apply absolute values to both sides and then raise to power $p/2$. It follows that
\begin{align}
\label{eq:tmp1}
&
\big( \int_0^T e^{\beta s} |Z^{n+1}_s|^2 \mathrm{d}s \big)^{p/2}
\nonumber\\
& \quad
\leq \Big( e^{\beta T} |\xi|^2 + \int_0^T 2e^{\beta s} |Y^{n+1}_s| ~ |f(s,\Gamma^n(s))| \mathrm{d}s +
\big| \int_0^T 2 e^{\beta s} \langle Y^{n+1}_s, Z^{n+1}_s \mathrm{d}ws \rangle \big| \Big)^{p/2}
\nonumber\\
&\quad
\leq ~ 2^{p/2-1} \big(e^{\beta T} |\xi|^2\big)^{p/2} + 2^{p-2} \big(\int_0^T 2 e^{\beta s} |Y^{n+1}_s| ~ |f(s,\Gamma^n(s))| \mathrm{d}s\big)^{p/2}
\nonumber\\
&\qquad
+ 2^{3p/2-2}\big| \int_0^T e^{\beta s} \langle Y^{n+1}_s, Z^{n+1}_s \mathrm{d}ws \rangle \big|^{p/2}.
\end{align}
On the one hand, we have
\begin{align}
\label{eq:tmp-between-tmp1and-tmp3}
&\mathbb{E} \Big[ \Big( \int_0^T 2e^{\beta s} |Y^{n+1}_s| ~ |f(s,\Gamma^n(s))| \mathrm{d}s \Big)^{p/2} \Big]
\nonumber
\\
&\quad
\leq \mathbb{E} \Big[ \Big( \int_0^T 2e^{\beta s} |Y^{n+1}_s| ~ |f(s,\Gamma^n(s))-f(s,0,0)| \mathrm{d}s + \int_0^T 2e^{\beta s} |Y^{n+1}_s| ~ |f(s,0,0)| \mathrm{d}s \Big)^{p/2} \Big]
\nonumber
\\
&\quad
\leq C \Big\{ \| Y^{n+1} \|^p_{\mathcal{S}^p_\beta} + \mathbb{E} \Big[ \big( \int_0^T e^{\frac{\beta}{2} s}|f(s,0,0)| \mathrm{d}s \big)^p \Big] + \| Y^{n} \|^p_{\mathcal{S}^p_\beta} + \| Z^{n} \|^p_{\mathcal{H}^p_\beta} \Big\}
< \infty,
\end{align}
where we have used the Lipschitz condition of $f$ combined with calculations similar to those of \eqref{eq:dummy-gonca} and
\[
\int_0^T 2e^{\beta s} |Y^{n+1}_s| ~ |f(s,0,0)| \mathrm{d}s
\leq
\sup_{0\leq t\leq T} e^{\beta t} |Y^{n+1}_t|^2 + \Big(\int_0^T e^{\frac{\beta}{2} s} |f(s,0,0)| \mathrm{d}s\Big)^2.
\]
On the other hand, by the same arguments as in \eqref{eq:auxi3} we find the following estimate
\begin{align}
\label{eq:tmp3}
\mathbb{E} \Big[\,\big| \int_0^T e^{\beta s} \langle Y^{n+1}_s, Z^{n+1}_s \mathrm{d}ws \rangle \big|^{p/2} \Big]
&
\leq d_{p/2} ~ \Big\{ \kappa~
\| Y^{n+1} \|_{\mathcal{S}^p_\beta}^p
+ \frac1\kappa
\| Z^{n+1} \|_{\mathcal{H}^p_\beta}^p
\Big\},
\end{align}
where the last line the constant $\kappa>0$ appear due to Young's inequality. Now choosing $\kappa>0$ such that $1- 2^{2p-2}~d_{p/2}\kappa^{-1} > 0$, it follows from \eqref{eq:tmp1}, \eqref{eq:tmp-between-tmp1and-tmp3} and \eqref{eq:tmp3} that
\begin{align*}
\big(1- \frac{2^{2p-2}~d_{p/2}}{\kappa}\big)
\|Z^{n+1}\|_{\mathcal{H}^p_\beta}^p
&
\leq C \Big\{ \mathbb{E} \big[ \big(e^{\beta T} |\xi|^2 \big)^{p/2} \big] + \| Y^{n+1} \|^p_{\mathcal{S}^p_\beta}
\\
&\hspace{1cm}
+ \mathbb{E} \big[ \big( \int_0^T |f(s,0,0)| \mathrm{d}s \big)^p \big] + \| Y^{n} \|^p_{\mathcal{S}^p_\beta} + \| Z^{n} \|^p_{\mathcal{H}^p_\beta} \Big\}
< \infty.
\end{align*}
This proves that $Z^{n+1} \in \mathcal{H}^p_\beta$.
In the next step, we prove that the sequence $(Y^n,Z^n)$ converges in $\mathcal{S}^p_\beta {t_i}mes \mathcal{H}^p_\beta$. Under the current assumptions one is able to apply a priori estimate \eqref{eq:apriori_p} to obtain
\begin{align*}
&\| Y^{n+1} - Y^n \|^p_{\mathcal{S}^p_\beta} + \| Z^{n+1} - Z^n \|^p_{\mathcal{H}^p_\beta}\\
&\quad \leq C_p ~ \mathbb{E} \Big[ \Big( \int_0^T e^{\frac\beta2 s} \big| f(s,\Gamma^n(s)) - f(s,\Gamma^{n-1}(s)) \big| \mathrm{d}s \Big)^p \Big]\\
&\quad \leq C_p T^{p/2}~ \mathbb{E} \Big[ \Big( \int_0^T e^{\beta s} \big| f(s,\Gamma^n(s)) - f(s,\Gamma^{n-1}(s)) \big|^2 \mathrm{d}s \Big)^{p/2} \Big].
\end{align*}
In analogy to the calculation carried out in Equation (2.7) in \cite{DelongImkeller}[Proof of Theorem 2.1], it is easy to see that we have
\begin{align*}
&\| Y^{n+1} - Y^n \|^p_{\mathcal{S}^p_\beta} + \| Z^{n+1} - Z^n \|^p_{\mathcal{H}^p_\beta}
\\
&\quad
\leq C_p T^{p/2} ~ \mathbb{E} \Big[ \Big( L \mathbb{A}x\big\{ \int_{-T}^0 e^{-\beta s} {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}(\mathrm{d}s), \int_{-T}^0 e^{-\beta s} {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}(\mathrm{d}s) \big\}
\\
&\qquad\qquad {t_i}mes \big( T\sup_{t\in[0,T]} e^{\beta t}|Y^n_t-Y^{n-1}_t|^2 + \int_0^T e^{\beta s}|Z^n_s - Z^{n-1}_s|^2 \mathrm{d}s \big) \Big)^{p/2} \Big]
\\
&\quad
\leq C_p T^{p/2} ~ 2^{p/2-1}~\Big(L \mathbb{A}x\big\{ \int_{-T}^0 e^{-\beta s} {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}(\mathrm{d}s), \int_{-T}^0 e^{-\beta s} {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}(\mathrm{d}s) \big\}\Big)^{p/2}
\\
&\qquad\qquad
{t_i}mes \Big( T^{p/2} \| Y^n-Y^{n-1} \|^p_{\mathcal{S}^p_\beta} + \| Z^n-Z^{n-1} \|^p_{\mathcal{H}^p_\beta} \Big)
\\
&\quad
\leq C_p ~ 2^{p/2-1}~\Big(L T \mathbb{A}x\big\{ \int_{-T}^0 e^{-\beta s} {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}(\mathrm{d}s), \int_{-T}^0 e^{-\beta s} {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}(\mathrm{d}s) \big\}\Big)^{p/2} ~ \mathbb{A}x\big\{ 1,T^{p/2} \big\}
\\
&\qquad\qquad
{t_i}mes \Big( \| Y^n-Y^{n-1} \|^p_{\mathcal{S}^p_\beta} + \| Z^n-Z^{n-1} \|^p_{\mathcal{H}^p_\beta} \Big).
\end{align*}
Hence, by \eqref{eq:contraction}, the standard fixed point argument yields that $(Y^n,Z^n)$ converges in $\mathcal{S}^p_\beta {t_i}mes \mathcal{H}^p_\beta$, which finishes the proof.
$\Box$
\section{Decoupled FBSDE with time delayed generators}
\label{section:diff}
The objective of this section is to extend the results from \cite{DelongImkeller,DelongImkeller2} to the case of decoupled forward-backward stochastic differential equations. For measurable functions $b,\sigma,g,f$, specified in more detail below, we study the time delayed FBSDE
\begin{align}
\label{eq:fwd1}
X^x_t &= x + \int_0^t b(s,X^x_s) \mathrm{d}s + \int_0^t \sigma(s,X^x_s) \mathrm{d}ws, \quad x \in \mathbb{R}^d,\\
\label{eq:bwd1}
Y^x_t &= g(X^x_T) + \int_t^T f\big(s,\Theta^x(s)\big) \mathrm{d}s - \int_t^T Z^x_s \mathrm{d}ws, \quad 0 \leq t \leq T,
\end{align}
where for $t\in[0,T]$, we write
\begin{align}
\nonumber
\Theta^x(t)
&=\big((X^x \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(t),(Y^x\cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(t),(Z^x\cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(t)\big)\\
\label{eq:theta}
&=\Big(\int_{-T}^0 X^x_{t+v} {\alpha_{\scriptscriptstyle{\mathcal{X}}}}(\mathrm{d} v),\int_{-T}^0 Y^x_{t+v} {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}(\mathrm{d} v),\int_{-T}^0 Z^x_{t+v} {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}(\mathrm{d} v) \Big),
\end{align}
with given deterministic finite measures ${\alpha_{\scriptscriptstyle{\mathcal{X}}}},{\alpha_{\scriptscriptstyle{\mathcal{Y}}}}$ and ${\alpha_{\scriptscriptstyle{\mathcal{Z}}}}$ supported on $[-T,0)$. The coefficients $b,\sigma,g,f$ appearing in \eqref{eq:fwd1}-\eqref{eq:bwd1} are assumed to satisfy certain smoothness and integrability conditions such that the backward equation \eqref{eq:bwd1} falls back into the setting of (H0)-(H4) from Section \ref{sec:delay_bsde}.
More precisely, we assume the following to hold:
\begin{enumerate}[(F1)]
\item[(F0)] ${\alpha_{\scriptscriptstyle{\mathcal{X}}}}$, ${\alpha_{\scriptscriptstyle{\mathcal{Y}}}},{\alpha_{\scriptscriptstyle{\mathcal{Z}}}}$ are three non-random, finitely valued measures supported on $[-T,0)$;
\item $g:\mathbb{R}^d\to\mathbb{R}^m$ is continuous differentiable with uniformly bounded first order derivatives, i.e. there exists $K'>0$ such that $|\nabla g|\leq K'$;
\item $f:[0,T]{t_i}mes\mathbb{R}^d{t_i}mes\mathbb{R}^m{t_i}mes \mathbb{R}^{m{t_i}mes d} \to \mathbb{R}^m$ is continuously differentiable with uniformly bounded derivatives, i.e. there exists a constant $K>0$ such that\footnote{\label{matrixtensorfootnote}We remark that this bound is taken over the corresponding Euclidean norm of the derivatives matrix/tensor. To avoid possible confusion when using tensors one can always interpret $f$ in the variable $z\in\mathbb{R}^{m{t_i}mes d}$ as taking not a matrix but a sequence of $d$-dimensional vectors $z_i\in \mathbb{R}^d$ ($i\in\{1,\cdots,m\}$). The condition would then read $\sum_{i=1}^m |\nabla_{z_i} f| \leq \sqrt{K/3}$ where
$f:[0,T]{t_i}mes\mathbb{R}^d{t_i}mes\mathbb{R}^m{t_i}mes \underbrace{\mathbb{R}^{d}{t_i}mes \cdots {t_i}mes \mathbb{R}^d}_{m-\text{times}} \to \mathbb{R}^m$.} $|\nabla_x f|,\, |\nabla_y f|,\,|\nabla_z f| \leq \sqrt{K/3}$ holds uniformly in all variables; $f$ satisfies a uniform Lipschitz condition with Lipschitz constant $\sqrt{K/3}$.
\item $b: [0,T] {t_i}mes \mathbb{R}^d \to \mathbb{R}^d$ and $\sigma : [0,T] {t_i}mes \mathbb{R}^d \to \mathbb{R}^{d{t_i}mes d}$ are continuously differentiable functions with bounded derivatives; $|b(\cdot,0)|$ and $|\sigma(\cdot,0)|$ are uniformly bounded; $\sigma$ is elliptic;
\item $\big( \int_0^T |f(s,0,0,0)|^2 \mathrm{d}s \big)^{p/2} < \infty$ for $p\geq 2$;
\item $f(t,\cdot,\cdot,\cdot)\mathbb{A}thbbm{1}_{(-\infty,0)}(t) = 0$;
\end{enumerate}
Condition (F3) is a standard assumption which guarantees the existence and uniqueness of the solution of SDE \eqref{eq:fwd1}. Furthermore, condition (F2) implies that the generator is uniformly Lipschitz continuous in $(x,y,z) \in \mathbb{R}^d {t_i}mes \mathbb{R}^m {t_i}mes \mathbb{R}^{m{t_i}mes d}$. In analogy to conditions (H2) and (H2') from section \ref{sec:delay_bsde}, let us write down the following implication of the Lipschitz condition (F2): with the constant $K>0$ chosen above, for any $t\in[0,T]$ and any sufficiently integrable vector or matrix valued processes $u,u'$, $y,y'$ and $z,z'$ it holds that
\begin{align*}
\text{(F2')} \quad &\Big| f\big(t,(u\cdot{\alpha_{\scriptscriptstyle{\mathcal{X}}}})(t),(y\cdot{\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(t),(z\cdot{\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(t) \big) - f\big(t,(u'\cdot{\alpha_{\scriptscriptstyle{\mathcal{X}}}})(t),(y'\cdot{\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(t),(z'\cdot{\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(t) \big) \Big|^2
\\
&\qquad
\leq K\Big( \big| (u\cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}}) (t) - (u'\cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}}) (t) \big|^2
\\
&\hspace{2.6cm} + \big| (y\cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}) (t) - (y'\cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}) (t) \big|^2
+ \big| (z\cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}) (t) - (z'\cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}) (t) \big|^2 \Big)
\\
&\qquad
\leq K {\alpha_{\scriptscriptstyle{\mathcal{X}}}}([-T,0]) \big((x-x')^2\cdot{\alpha_{\scriptscriptstyle{\mathcal{X}}}}\big)(t)+ L\Big(\big((y-y')^2\cdot{\alpha_{\scriptscriptstyle{\mathcal{Y}}}}\big)(t) + \big((z-z')^2\cdot{\alpha_{\scriptscriptstyle{\mathcal{Z}}}}\big)(t)\Big)
\end{align*}
where $L:=K \alpha$ with $\alpha$ defined in \eqref{eq:alpha1}. For a fixed $x \in\mathbb{R}^d$, the existence and uniqueness of solutions to the backward equation \eqref{eq:bwd1} in $\mathcal{S}^2_\beta {t_i}mes \mathcal{H}^2_\beta$ is guaranteed under the assumptions (F0)-(F5) together with the compatibility criterion from Theorem \ref{theo:DelongImkeller_thm2.1} on the terminal time and the Lipschitz constant $L=K\alpha$, i.e.
\begin{align*}
\big(8T + \frac1\beta \big) L \int_{-T}^0 e^{-\beta s} \rho(\mathrm{d}s) \mathbb{A}x\{1,T\} < 1,\ \text{ for }\rho \in \{ {\alpha_{\scriptscriptstyle{\mathcal{Y}}}},{\alpha_{\scriptscriptstyle{\mathcal{Z}}}}\}.
\end{align*}
To extend the result to $\mathcal{S}^p_\beta {t_i}mes \mathcal{H}^p_\beta$ for $p >2$, one only needs to replace the condition above by the compatibility condition from Theorem \ref{theo:picard},
\begin{align*}
2^{p/2-1}C_p \Big(L T \int_{-T}^0 e^{-\beta s} \rho(\mathrm{d} s) \Big)^{p/2} \mathbb{A}x\{1,T^{p/2}\} < 1,\ \text{ for }\rho \in \{{\alpha_{\scriptscriptstyle{\mathcal{Y}}}},{\alpha_{\scriptscriptstyle{\mathcal{Z}}}} \}.
\end{align*}
Throughout this section, given $p \geq 2$, we will assume that for every $x \in \mathbb{R}^d$, the FBSDE \eqref{eq:fwd1}-\eqref{eq:bwd1} admits a unique solution $(X^x,Y^x,Z^x) \in \mathcal{S}^q_\beta(\mathbb{R}^d) {t_i}mes \mathcal{S}^p_\beta(\mathbb{R}^m) {t_i}mes \mathcal{H}^p_\beta(\mathbb{R}^{m {t_i}mes d})$ for all $q\geq 2$.
\subsection{G\^ateaux and Norm differentiability}
In this section we investigate the variational differentiability of the solution $(X^x,Y^x,Z^x)$ of the time delayed FBSDE \eqref{eq:fwd1}-\eqref{eq:bwd1} with respect to the Euclidean parameter $x\in\mathbb{R}^d$, i.e. with respect to the initial condition of the forward diffusion. By a well known result (see e.g. \cite{Protter}), (F3) implies that the forward component $X^x$ is differentiable with respect to the parameter $x\in\mathbb{R}^d$. It is natural to pose the question whether this smoothness is carried over to $(Y^x,Z^x)$ in the setting of FBSDE with time delayed generators.
In all this section we fix $h$ an element of $\mathbb{R}^{d} \setminus \{0\}$. Our goal is to show that the variational equations of \eqref{eq:fwd1}-\eqref{eq:bwd1} are given by
\begin{align}
\label{eq:nabla_X}
\nabla X^x_t h&= h + \int_0^t \nabla b(s,X^x_s) \nabla X^x_s h \; \mathrm{d}s + \int_0^t \nabla \sigma(s,X^x_s) \nabla X_s h \; \mathrm{d}ws,\\
\label{eq:nabla_Y}
\nabla Y^x_t h &= \nabla g(X^x_T) \nabla X^x_T h \; - \int_t^T \nabla Z^x_s h \; \mathrm{d}ws
+ \int_t^T
\big\langle (\nabla f)\big(s, \Theta^x(s)\big)
, (\nabla \Theta^x h)(s) \big\rangle \mathrm{d}s,
\end{align}
where the notation $\nabla X^x$ (respectively $\nabla Y^x$ and $\nabla Z^x$) denote the G\^ateaux derivatives of $X^x$ (respectively $Y^x$ and $Z^x$) in the direction $h$ and $(\nabla\Theta^x h)(t)$ is to be understood in the same fashion as in \eqref{eq:theta}, i.e.
\begin{align}
\label{eq:nablatheta}
(\nabla\Theta^x h)(t)
&=\big((\nabla X^x h \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(t),(\nabla Y^x h \cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(t),(\nabla Z^x h \cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(t)\big), \quad t\in[0,T].
\end{align}
Note that (F3) implies that \eqref{eq:nabla_X} admits a unique solution in $\mathcal{S}^p_\beta$ for every $\beta \geq 0$ and $p \geq 2$. Let $(X,Y,Z)$ and $\nabla X h$ solve \eqref{eq:fwd1}-\eqref{eq:bwd1} and \eqref{eq:nabla_X} respectively and let $\Theta^x$ be as defined by \eqref{eq:theta}. Now consider the BSDE with the linear time delayed generator for $t\in[0,T]$
\begin{align}
\label{eq:nabla_YBis}
P_t h &= \nabla g(X^x_T) \nabla X_T^x h - \int_t^T Q_s h \; \mathrm{d}ws
+\int_t^T \widehat{F}\big(s,(P h \cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(s),(Q h \cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(s)\big)\mathrm{d}s ,
\end{align}
where $\widehat{F}:\Omega {t_i}mes [0,T] {t_i}mes \mathbb{R}^m{t_i}mes \mathbb{R}^{m {t_i}mes d}\to \mathbb{R}^m$,
$\widehat{F}(t,p,q)=\langle (\nabla f)\big(t, \Theta^x(t)\big) , \big((\nabla X^x h \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(t),p,q\big) \rangle$.
The next corollary states, using Theorem \ref{theo:DelongImkeller_thm2.1} and Proposition \ref{prop:apriori_p2}, a result concerning the existence and uniqueness of solution to \eqref{eq:nabla_YBis}. This solution process will then serve as the natural candidate (in some sense) for $\nabla_x Y^xh$ and $\nabla_x Z^x h$, solution to \eqref{eq:nabla_Y}.
\begin{corollary}
\label{cor:nabla_Y_exists}
Let $p \geq 2$, $h\in \mathbb{R}^{d} \setminus \{0\}$ and $\beta>0$. Assume that (F0)-(F5) are satisfied and let $L>0$ be as in (F2'). If $p>2$ assume that $T$, $K$, $\alpha$ are chosen like in Proposition \ref{lemma:apriori} and satisfy in addition
\begin{align}
\label{section 3 compatibility condition}
2^{p/2-1}C_p \Big(L T \int_{-T}^0 e^{-\beta s} \rho(\mathrm{d} s) \Big)^{p/2} \mathbb{A}x\{1,T^{p/2}\} < 1,\ \text{ for }\rho\in\{{\alpha_{\scriptscriptstyle{\mathcal{Y}}}},{\alpha_{\scriptscriptstyle{\mathcal{Z}}}}\},
\end{align}
If $p=2$ assume $T$, $K$, $\alpha$ are chosen such that the conditions of Theorem \ref{theo:DelongImkeller_thm2.1} and of Proposition \ref{prop:apriori_p2} hold. Then for every fixed $x$ in $\mathbb{A}thbb{R}^d$, BSDE \eqref{eq:bwd1} has a unique solution $(Y,Z) \in \mathcal{S}^p_\beta {t_i}mes \mathcal{H}^p_\beta$ and
BSDE \eqref{eq:nabla_YBis} has a unique solution $(P h,Q h) \in \mathcal{S}^p_\beta {t_i}mes \mathcal{H}^p_\beta$.
\end{corollary}
\begin{proof}
Given the known properties of $X$ and $\nabla X$ (and hence of $\nabla Xh$) it is easy to see that $\xi=\nabla g(X^x_T) \nabla X^x_T h$ and $\widehat{F}(\cdot,0,0)$ satisfy conditions (H1), (H3) and (H4). We recall Remark \ref{constants Di independent of data} to say that the several compatibility conditions \eqref{section 3 compatibility condition} as well as the conditions in Proposition \ref{lemma:apriori} depend only on the Lipschitz constant $K$ of (F2), the delay measures ${\alpha_{\scriptscriptstyle{\mathcal{Y}}}}$, ${\alpha_{\scriptscriptstyle{\mathcal{Z}}}}$, $T$ and the dimension of the equations.
From the definition of $\widehat{F}$ and using the bounds of the (spatial) derivatives of $f$ assumed in (F2) it is clear that $\widehat{F}$ satisfies a standard Lipschitz condition (in the spatial variables). In particular, take $p,p'\in \mathbb{R}^m$ and\,\footnote{Or a sequence of $q_i,q_i'\in\mathbb{R}^m$ with $i\in\{1,\cdots,d\}$ as we saw in page \pageref{matrixtensorfootnote}'s footnote.
} $q,q'\in\mathbb{R}^{m{t_i}mes d}$, then via Minkowski's and Cauchy-Schwarz inequalities along with (F2) we have
\begin{align*}
|\widehat{F}(t,p,q)-\widehat{F}(t,p',q')|
&
\leq \big|\langle (\nabla_y f)\big(t, \Theta^x(t)\big) , (p-p') \rangle\big|+\big|
\langle (\nabla_z f)\big(t, \Theta^x(t)\big) , (q-q') \rangle \big|
\\
&
\leq |(\nabla_y f)|\,|p-p'| +|(\nabla_z f)|\,|q-q'|
\leq \sqrt{K/3}(\,|p-p'| + |q-q'|\,).
\end{align*}
And hence $\widehat{F}$ satisfies exactly the same Lipschitz condition as $f$.
Furthermore, the delay measures appearing in $\widehat{F}$ are exactly the same ones as those that appear in $f$. We can thus conclude that the Lipschitz constant, the delay measures, terminal time $T$ and dimensions for $f$ and $\widehat{F}$ are the same. Under this corollary's assumptions, the conditions of
Theorem \ref{theo:picard} are satisfied for both BSDE \eqref{eq:bwd1} and \eqref{eq:nabla_YBis}. The existence of a unique solution $(Y,Z)$ and $(Ph,Qh)$ in $\mathcal{S}^p_\beta {t_i}mes \mathcal{H}^p_\beta$ of \eqref{eq:bwd1} and \eqref{eq:nabla_YBis} (respectively) follows from Theorem \ref{theo:picard} (and Theorem \ref{theo:DelongImkeller_thm2.1}).
\end{proof}
The solution of BSDE \eqref{eq:nabla_YBis} serves now as the natural candidate for the variational derivatives of $(Y,Z)$ solution of \eqref{eq:nabla_Y}. If one shows that $(\nabla Y^xh,\nabla Z^xh)$ exist in some sense then by the uniqueness of the solution of \eqref{eq:nabla_YBis}, the solutions to \eqref{eq:nabla_Y} and \eqref{eq:nabla_YBis} must coincide, i.e. $\big(\nabla Y^x h,\nabla Z^x h\big) = \big(P h,Q h\big)$ holds almost surely.
For the rest of the section, we assume that all assumptions ensuring the existence and uniqueness of the variational equations \eqref{eq:nabla_X}-\eqref{eq:nabla_Y} are fulfilled, i.e. we assume that the assumptions of Corollary \ref{cor:nabla_Y_exists} hold. In our next result we show the mapping $x\mathbb{A}psto (Y^x,Z^x)$ is differentiable in an adequate sense.
\begin{proposition}
\label{prop:diff}
Take $p\geq 2$ and assume the conditions of Corollary \eqref{cor:nabla_Y_exists} hold. Then for any $x\in\mathbb{R}^d$ the solution $(X^x, Y^x, Z^x)$ of the FBSDE \eqref{eq:fwd1}-\eqref{eq:bwd1} is norm-differentiable in the following sense:
\[
\lim_{\varepsilon \to 0} \left\| \frac{Y^{x+\varepsilon h}-Y^x}{\varepsilon} - \nabla Y^x h \right\|^p_{\mathcal{S}^p_\beta}=\lim_{\varepsilon \to 0} \left\| \frac{Z^{x+\varepsilon h}-Z^x}{\varepsilon} - \nabla Z^x h\right\|^p_{\mathcal{H}^p_\beta}=0, \quad \forall h \in \mathbb{A}thbb{R}^d \setminus\{0\},
\]
where $(\nabla Y^x h, \nabla Z^x h)$ is the unique solution of the BSDE
\begin{align*}
\nabla Y^x_t h &= \nabla g(X^x_T) \nabla X_T^x h- \int_t^T \nabla Z_s^x h \; \mathrm{d}ws + \int_t^T
\big\langle (\nabla f)\big(s, \Theta^x(s)\big) , (\nabla\Theta^x h)(s) \big\rangle \mathrm{d}s,
\end{align*}
with $\Theta^x$ and $\nabla\Theta^x$ defined by \eqref{eq:theta} and \eqref{eq:nablatheta} respectively.
\end{proposition}
\begin{proof}
Let $x \in \mathbb{R}^d$,
$t\in[0,T]$ and $\varepsilon>0$. We use the following notations
\begin{align}
\label{eq:A}
A_{s,\mathbb{A}thcal{X}}&:=\int_0^1 \nabla_x f\Big(s,(X^{x}\cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(s)+\theta \big((X^{x+\varepsilon h}-X^{x})\cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}}\big)(s),
\nonumber\\
&\hspace{3.0cm}(Y^{x+\varepsilon h}\cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(s),(Z^{x+\varepsilon h}\cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(s)\Big) \mathrm{d} \theta,
\nonumber\\
A_{s,\mathbb{A}thcal{Y}}&:=\int_0^1 \nabla_y f\Big(s,(X^{x}\cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(s),
\nonumber\\
&\hspace{3.0cm}(Y^{x}\cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(s)+\theta \big((Y^{x+\varepsilon h}-Y^{x})\cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}\big)(s),(Z^{x+\varepsilon h}\cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(s)\Big) \mathrm{d} \theta,
\\
A_{s,\mathbb{A}thcal{Z}}&:=\int_0^1 \nabla_z f\Big(s,(X^{x}\cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(s),
\nonumber\\
&\hspace{3cm}(Y^{x}\cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(s),(Z^{x}\cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(s)+\theta \big((Z^{x+\varepsilon h}-Z^{x})\cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}\big)(s)\Big) \mathrm{d} \theta.
\nonumber
\end{align}
We remark that although the processes $A$ depends on $\varepsilon$ and $x$, for the sake of notational simplicity we do not write this dependence explicitly. We remark also that by assumption (F2) the processes $\vert A_{\cdot,\ast} \vert \leq \sqrt{K/3}$ for $\ast=\mathbb{A}thcal{X}, \mathbb{A}thcal{Y}, \mathbb{A}thcal{Z}$, in particular they are uniformly bounded in $x$ and $\varepsilon$.
We denote by $(P h,Q h$ the solution of the BSDE \eqref{eq:nabla_YBis} which coincides with $(\nabla Y h,\nabla Z h)$. We define the auxiliary processes
$\xi:=\big(g(X_T^{x+\varepsilon h})-g(X_T^x)\big)/{\varepsilon}-\nabla g(X_T^x) \nabla X_T^x h$,
\begin{align}
\label{definition-of-X-tilde}
&U:=\frac{Y^{x+\varepsilon h}-Y^x}{\varepsilon}-P h,\ \ V:=\frac{Z^{x+\varepsilon h}-Z^x}{\varepsilon}-Q h,\ \text{ and }\ {t_i}lde{X}:=\frac{X^{x+\varepsilon h}-X^x}{\varepsilon}-\nabla X^x h.
\end{align}
Notice that from Assumption (F2) and the standard SDE theory we have that ${t_i}lde{X}$ is well defined and ${t_i}lde{X}\in\mathcal{S}^p_\beta$ for any $b\geq 0$ and $p\geq 2$. We now claim and prove that
\[
\lim_{\varepsilon \to 0} \| U \|^p_{\mathcal{S}^p_\beta}=\lim_{\varepsilon \to 0} \| V \|^p_{\mathcal{H}^p_\beta}=0,\quad \text{for arbitrary } x \in \mathbb{R}^d.
\]
This result obviously proves the norm differentiability. To start with, we have
\begin{align*}
U_t&=\xi+\int_t^T \frac{f(s,\Theta^{x+\varepsilon h}(s))-f(s,\Theta^{x}(s))}{\varepsilon} \mathrm{d}s \\
&- \int_t^T \big\langle (\nabla f)\big(s, \Theta^x(s)\big)
, \big( (\nabla X^x h \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(s), (P h \cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(s), (Q h \cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(s)\big) \big\rangle \mathrm{d}s - \int_t^T V_s \mathrm{d}ws.
\end{align*}
By construction the above equation is well defined, since for any $x$ and $\varepsilon$ all the involved processes are known a priori to exist and have the convenient integrability properties. The format of the above dynamics is still not convenient for our computations so we transform it into the more familiar dynamics of a delay BSDE. Using the identity $\phi(x)-\phi(y)=(x-y) \int_0^1 \nabla\phi(y+\theta(x-y)) \mathrm{d}\theta$ for a continuously differentiable function $\phi:\mathbb{A}thbb{R}^a \to \mathbb{A}thbb{R}^b$ ($a$ and $b$ being arbitrary non-zero integers), the previous equation leads to
\begin{align}
\label{eq:diffBSDEtemp}
\nonumber
U_t&=\xi+\frac{1}{\varepsilon}\int_t^T\big[\, A_{s,\mathbb{A}thcal{X}} \big((X^{x+\varepsilon h}-X^x)\cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}}\big)(s)
\\
\nonumber
& \hspace{3cm}
+ A_{s,\mathbb{A}thcal{Y}} \big((Y^{x+\varepsilon h}-Y^x)\cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}\big)(s)+A_{s,\mathbb{A}thcal{Z}} \big((Z^{x+\varepsilon h}-Z^x\big)\cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}\big)(s) \,\big]\mathrm{d}s
\\
\nonumber
&
- \int_t^T \big\langle (\nabla f)\big(s, \Theta^x(s)\big)
, \big( (\nabla X^x h \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(s), (P h \cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(s), (Q h \cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(s)\big) \big\rangle \mathrm{d}s - \int_t^T V_s \mathrm{d} W_s
\\
&
=\xi+\int_t^T \Phi\Big(s,({t_i}lde{X} \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(s),(U \cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(s),(V \cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(s) \Big) \mathrm{d}s - \int_t^T V_s \mathrm{d} W_s,
\end{align}
with ${t_i}lde{X}$ given in \eqref{definition-of-X-tilde}, $ \Phi(t,x,y,z):=R_t+x A_{t,\mathbb{A}thcal{X}} + y A_{t,\mathbb{A}thcal{Y}} + z A_{t,\mathbb{A}thcal{Z}}$
and
\begin{align*}
R_t&:= -\big\langle (\nabla f)\big(t, \Theta^x(t)\big)
, \big( (\nabla X^x h \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(t), (P h \cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(t), (Q h \cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(t)\big) \big\rangle\\
&\qquad \qquad + A_{t,\mathbb{A}thcal{X}} (\nabla X^x \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(t) + A_{t,\mathbb{A}thcal{Y}} (P h \cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(t) + A_{t,\mathbb{A}thcal{Z}} (Q h \cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(t).
\end{align*}
We now aim at using the results of Section 2 on the family (index by $\varepsilon$) of auxiliary delay BSDEs \eqref{eq:diffBSDEtemp}. In view of the uniform boundedness of the processes $A$ and the linearity of the driver $\Phi$, we can repeat the arguments used in the proof of Corollary \ref{cor:nabla_Y_exists} to conclude that under the assumptions of this proposition the data of BSDE \eqref{eq:diffBSDEtemp} (Lipschitz constant, delay measure and terminal time) satisfies uniformly in $\varepsilon$ the assumptions of Corollary \ref{cor:nabla_Y_exists} as well.
Applying the a priori estimate of Proposition \ref{lemma:apriori} or the moment estimate from Corollary \ref{coro:momentestimates} to the BSDE \eqref{eq:diffBSDEtemp} and taking into account that $\Phi$ satisfies (F2), we get
\begin{align}
\label{eq:diffBSDEtemp2}
\| U \|^p_{\mathcal{S}^p_\beta} +\| V \|^p_{\mathcal{H}^p_\beta} &\leq C_p \Big\{ \mathbb{E} \Big[ (e^{\beta T}|\xi|^2)^{p/2} \Big] + \mathbb{E} \Big[ \big(\int_0^T e^{\beta s} \big|\Phi\big(s,({t_i}lde{X} \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(s),0,0\big)\big| \mathrm{d}s \big)^p \Big] \Big\}
\nonumber
\\
&
\leq C \Big\{ \mathbb{E} \Big[ (e^{\beta T}|\xi|^2)^{p/2} \Big] + \| {t_i}lde{X} \|^2_{\mathcal{H}^p_\beta} + \mathbb{E} \Big[ \big( \int_0^T e^{\beta s} |R_s| \mathrm{d}s \big)^p \Big] \Big\},
\end{align}
for some constant $C>0$ (where we have used that $A_{\cdot,\mathbb{A}thcal{X}}$ is uniformly bounded). We proceed to compute the limit of each term on the right hand side of \eqref{eq:diffBSDEtemp2} as $\varepsilon$ goes to zero.
We first deal with the second term of the right hand side of \eqref{eq:diffBSDEtemp2}. Define
$$ \hat{\sigma}_t:=\int_0^1 \nabla \sigma\big(t,X_t^{x}+\theta (X_t^{x+\varepsilon h}-X_t^{x})\big) \mathrm{d}\theta \quad \textrm{ and } \quad \hat{b}_t:=\int_0^1 \nabla b\big(t,X_t^{x}+\theta (X_t^{x+\varepsilon h}-X_t^{x})\big) \mathrm{d}\theta. $$
Note that ${t_i}lde{X}\in \mathcal{S}^p$ for any $p\geq 2$ (see \eqref{definition-of-X-tilde}) and solves the linear SDE
\begin{align}
\label{X-tilde-SDE}
{t_i}lde{X}_t=J_t+\int_0^t [\,\hat{\sigma}_s {t_i}lde{X}_s\,] \mathrm{d} W_s + \int_0^t\,[ \hat{b}_s {t_i}lde{X}_s\,] \mathrm{d}s,
\end{align}
where $J$ is given by
\[
J_t:=\int_0^t [\,\nabla X_s^x h (\hat{\sigma}_s-\nabla \sigma(s,X_s^x))\,] \mathrm{d} W_s + \int_0^t[\, \nabla X_s^x h \big(\hat{b}_s-\nabla b(s,X_s^x)\big) \,]\mathrm{d}s.
\]
Given the known properties of $\nabla X$ and the fact that $\hat{b},\hat{\sigma},\nabla b$, and $\nabla \sigma$ are uniformly bounded we have that $J\in \mathcal{S}_0^p$ for any $p\geq 2$. Indeed, Doob's inequality leads to
\begin{align*}
&\mathbb{E}\Big[ \Big( \sup_{t\in [0,T]} \big\vert \int_0^t[\, \nabla X_s^x h\big(\hat{\sigma}_s-\nabla \sigma(s,X_s^x)\big)\,] \mathrm{d} W_s \big\vert^2 \Big)^{p/2} \Big]
\leq C
\big\|\nabla X^x h \big(\hat{\sigma}-\nabla \sigma(\cdot,X^x)\big)\big\|_{\mathcal{H}^p}^{p}
<\infty.
\end{align*}
Moreover, note that by Lebesgue's dominated convergence theorem
\[
\lim_{\varepsilon \to 0}
\|\nabla X^x h \big(\hat{\sigma}-\nabla \sigma(\cdot,X^x)\big)\|_{\mathcal{H}^p}^{p}
=0.\]
Similarly, using Jensen's inequality, the finite variation part of $J$ is an element of $\mathcal{S}^p_0(\mathbb{A}thbb{R})$ and
$$\lim_{\varepsilon \to 0} \|J\|_{\mathcal{S}^p_0}=0.$$
Now we derive the following estimate for ${t_i}lde{X}$ in terms of the norm of $J$
\begin{equation}
\label{eq:esttildeX}
\|{t_i}lde{X} \|_{\mathcal{S}^p_\beta}\leq C\, \mathbb{E}[\sup_{t\in [0,T]} |{t_i}lde{X}_t|^p] \leq C\, \| J \|_{\mathcal{S}^p_0}
\end{equation}
which will show that $\lim_{\varepsilon \to 0} \| {t_i}lde{X} \|_{\mathcal{S}^p_\beta}=0$.
Indeed equation \eqref{X-tilde-SDE} implies that:
\[
\mathbb{E}[\sup_{0\leq r\leq t} |{t_i}lde{X}_r|^p] \leq C\, \mathbb{E}\Big[ \sup_{0\leq r \leq t} |J_r|^p + \sup_{0\leq r\leq t} \big| \int_0^r [\,\hat{\sigma}_s {t_i}lde{X}_s\,] \mathrm{d} W_s \big|^p + \sup_{0 \leq r \leq t} \big| \int_0^r \,[ \hat{b}_s {t_i}lde{X}_s\,] \mathrm{d}s \big|^p \Big].
\]
Applying Burkholder-Davis-Gundy inequality to the second term in the right hand side, we get:
\[
\mathbb{E}[\sup_{0\leq r\leq t} |{t_i}lde{X}_r|^p] \leq C\, \mathbb{E}\Big[ \sup_{0\leq r \leq t} |J_r|^p + \big| \int_0^t |\hat{\sigma}_s {t_i}lde{X}_s|^2 \mathrm{d}s \big|^{p/2} + \sup_{0 \leq r \leq t} \big| \int_0^r \,[ \hat{b}_s {t_i}lde{X}_s\,] \mathrm{d}s \big|^p \Big].
\]
Jensen's inequality and the fact that $\hat{\sigma}$ and $\hat{b}$ are bounded imply that:
\[
\mathbb{E}\big[\sup_{0\leq r\leq t} |{t_i}lde{X}_r|^p\big] \leq C \, \mathbb{E}\Big[ \sup_{0\leq r \leq t} |J_r|^p +\int_0^t |{t_i}lde{X}_s|^p \mathrm{d}s \Big]
\]
hence
\[
\mathbb{E}\big[\sup_{0\leq r\leq t} |{t_i}lde{X}_r|^p\big] \leq C\, \Big\{ \mathbb{E}[\sup_{0\leq r \leq t} |J_r|^p] + \int_0^t \mathbb{E}[\sup_{0\leq r \leq s} |{t_i}lde{X}_r|^p] \mathrm{d}s \Big\}.
\]
Gronwall's lemma finally entails estimate \eqref{eq:esttildeX} and
thus $\lim_{\varepsilon \to 0} \| {t_i}lde{X} \|_{\mathcal{S}^p_\beta}=0$.
Let us consider the terminal condition term in \eqref{eq:diffBSDEtemp2}. Denoting
$$
\hat{g}:=\int_0^1 \nabla g\big(X_T^{x}+\theta (X_T^{x+\varepsilon h}-X_T^{x})\big) \mathrm{d}\theta,
$$
it holds that
\begin{align*}
\mathbb{E} \Big[ (e^{\beta T}|\xi|^2)^{p/2} \Big]
&
=
e^{\beta T p/2}\big\|\hat{g} \big(\frac{X^{x+\varepsilon h}_T-X^x_T}{\varepsilon}-\nabla X_T^x h \big) + \big(\hat{g}-\nabla g(X_T^x)\big) \nabla X_T^x h\big\|_{L^p}^p
\\
&
\leq C\Big\{
\big\|\frac{X^{x+\varepsilon h}_T-X^x_T}{\varepsilon}-\nabla X_T^x h \big\|_{L^p}^p
+
\big\|\,|\nabla X_T^x h \vert\, \vert \hat{g}-\nabla g(X_T^x)|\,\big\|_{L^p}^p
\\
&
\leq C \Big\{
\|{t_i}lde{X}\|_{\mathcal{S}^p_0}^p
+
\big\|\,|\nabla X_T^x h \vert\, \vert \hat{g}-\nabla g(X_T^x)|\,\big\|_{L^p}^p\Big\}
\Big\}
\underset{\varepsilon \to 0}{\longrightarrow} 0,
\end{align*}
where we have used Lebesgue's dominated convergence theorem for the second summand and the estimate obtained above on the norm of ${t_i}lde{X}$ for the first one.
Now, let us consider the last term on the right hand side of \eqref{eq:diffBSDEtemp2}. We have that
\begin{align*}
\mathbb{E} \Big[ \Big( \int_0^T e^{\beta s} |R_s| \mathrm{d}s \Big)^p \Big]
&\leq C\, \mathbb{E} \left[ \Big( \int_0^T e^{\beta s} \left|\left(A_{s,\mathbb{A}thcal{X}}-\nabla_x f\big(s, \Theta^x(s)\big)\right) (\nabla X^x h \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(s)\right| \mathrm{d}s \Big)^p \right]\\
&\qquad+ C\, \mathbb{E} \left[ \Big( \int_0^T e^{\beta s} \left|\left(A_{s,\mathbb{A}thcal{Y}}-\nabla_y f\big(s, \Theta^x(s)\big)\right) ( P h \cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(s)\right| \mathrm{d}s \Big)^p \right]\\
&\qquad+ C\, \mathbb{E} \left[ \Big( \int_0^T e^{\beta s} \left|\left(A_{s,\mathbb{A}thcal{Z}}-\nabla_z f\big(s, \Theta^x(s)\big)\right) ( Q h \cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(s)\right| \mathrm{d}s \Big)^p \right].
\end{align*}
Standard arguments yield (note that $\varepsilon>0$ is implicitly contained in $A_{t,\mathbb{A}thcal{X}}$, see \eqref{eq:A})
\begin{align*}
A_{t,\mathbb{A}thcal{X}} \longrightarrow \nabla_x f\big(t, \Theta^x(t)\big) \quad \text{as $\varepsilon \to 0$ in probability, for }
\mathrm{d}t\text{-a.a. } t \in [0,T].
\end{align*}
Moreover, Proposition \ref{lemma:apriori} and the previous calculations show that
\begin{align*}
&\| Y^{x+\varepsilon h} -Y^x \|^p_{\mathcal{S}^p_\beta} + \| Z^{x+\varepsilon h} -Z^x \|^p_{\mathcal{H}^p_\beta}
\\
&\hspace{1cm}
\leq C \, \big\{
e^{ \beta T\,p}\|g(X^{x+\varepsilon h}) - g(X^{x}) \|^p_{L^p}
+
\|X^{x+\varepsilon h}-X^x \|^p_{\mathcal{H}^p_\beta} \big\}\underset{\varepsilon \to 0}{\longrightarrow} 0,
\end{align*}
for some positive constant $C$. This implies for $\mathrm{d}t$-a.a. $t\in[0,T]$
\begin{align*}
Y_t^{x+\varepsilon h} \to Y_t^x, \quad Z_t^{x+\varepsilon h} \to Z_t^x, \quad \text{as $\varepsilon\to 0$ in probability.}
\end{align*}
Since $\nabla_y f$, $\nabla_z f$ are continuous, it follows that for $\mathrm{d}t$-a.a. $t\in[0,T]$
\begin{align*}
A_{t,\mathbb{A}thcal{Y}} \longrightarrow \nabla_y f\big(t, \Theta^x(t)\big), \quad \text{as $\varepsilon\to 0$ in probability,}\\
A_{t,\mathbb{A}thcal{Z}} \longrightarrow \nabla_z f\big(t, \Theta^x(t)\big), \quad \text{as $\varepsilon\to 0$ in probability.}
\end{align*}
Thus, using Lemma \ref{lemma:interchange} and the fact that $P$ and $Q$ are square integrable, Lebesgue's dominated convergence theorem (which also holds, if almost sure convergence is replaced by convergence in probability, \textit{cf.} \cite{Shiryaev}, remark on p. 258) yields $\lim_{\varepsilon \to 0} \mathbb{E} \big[ \big( \int_0^T e^{\beta s} |R_s| \mathrm{d}s \big)^p \big]=0$. Now \eqref{eq:diffBSDEtemp2} yields that
\begin{align*}
\lim_{\varepsilon \to 0} \big\{ \| U \|^p_{\mathcal{S}^p_\beta} +\| V \|^p_{\mathcal{H}^p_\beta} \big\} = 0,
\end{align*}
which proves the claim.
\end{proof}
\subsection{Strong differentiability}
All previous assumptions on existence and uniqueness remain in force. In this section, we concentrate on the smoothness properties of the paths associated to the processes $(Y^x,Z^x)$. We assume throughout this section that $m=1$, i.e. the delay BSDE are now one-dimensional. A first result is obtained in the following
\begin{proposition}
\label{prop:cont}
Set $m=1$ and under the assumptions of Corollary \ref{cor:nabla_Y_exists}
we have for $x,x'\in\mathbb{R}^d$
$$ \mathbb{E}\big[ \sup_{0\leq t \leq T} |X_t^x-X_t^{x'}|^q \big] \leq C |x-x'|^{q},\quad \text{for any }\ q\geq 2,$$
and for any $p>2$
$$ \mathbb{E}\Big[ \sup_{0\leq t \leq T} \big( e^{\beta t} |Y_t^{x}-Y_t^{x'}|^2 \big)^{p/2} \Big] + \mathbb{E}\Big[ \big( \int_0^T e^{\beta s} |Z_s^{x}-Z_s^{x'}|^2 \mathrm{d}s \big)^{p/2} \Big] \leq C |x-x'|^p.$$
Thus for every $x \in \mathbb{R}^d$,
\begin{itemize}
\item the mapping $x\mathbb{A}psto Y^x$ from $\mathbb{A}thbb{R}^d$ to the space of c\`adl\`ag functions equipped with the topology given by the uniform convergence on compacts sets is continuous $\mathbb{P}$-almost surely,
\item the mapping $x\mathbb{A}psto Z^x$ is continuous from $\mathbb{A}thbb{R}^d$ to $L^2([0,T])$ $\mathbb{P}$-almost surely.
\end{itemize}
In particular, for every $x \in \mathbb{R}^d$,
\begin{itemize}
\item the mapping $x\mathbb{A}psto Y_t^x$ from $\mathbb{A}thbb{R}^d$ to $\mathbb{A}thbb{R}$ is continuous for all $t \in [0,T]$, $\mathbb{P}$-almost surely,
\item the mapping $x\mathbb{A}psto Z^x_t({\omega}ega)$ is continuous for every $x \in \mathbb{R}^d$ and $ \mathrm{d}t \otimes \mathrm{d} \mathbb{P}$-almost all $(t,{\omega}ega)$.
\end{itemize}
\end{proposition}
\begin{proof}
The estimate on the forward process is classical (see \textit{e.g.} \cite[Theorem V.37 Equation (***) p.~309]{Protter}). In this proof, $C>0$ denotes a generic constant which may differ from line to line. We apply the a priori estimate from Proposition \ref{lemma:apriori} and get
\begin{align*}
&\mathbb{E}\Big[ \sup_{0\leq t \leq T} \big( e^{\beta t} |Y_t^{x}-Y_t^{x'}|^2 \big)^{p/2} \Big] + \mathbb{E}\Big[ \big( \int_0^T e^{\beta s} |Z_s^{x}-Z_s^{x'}|^2 \mathrm{d}s \big)^{p/2} \Big]\\
& \quad \leq C_p \Big\{ \mathbb{E}\Big[ \big(e^{\beta T} |g(X_T^x)-g(X_T^{x'})|^2 \big)^{p/2} \Big]\\
&\quad\quad + \mathbb{E}\Big[ \big(\int_0^T e^{\frac{\beta}{2} s} |f\big(s,(X^x \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(s),\zeta(s)\big)-f\big(s,(X^{x'} \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(s),\zeta(s)\big)| \mathrm{d}s \big)^{p} \Big] \Big\}\\
& \quad \leq C \Big\{ \mathbb{E}\Big[ \big(e^{\beta T} |g(X_T^x)-g(X_T^{x'})|^2 \big)^{p/2} \Big]\\
&\quad\quad + \mathbb{E}\Big[ \big(\int_0^T e^{\beta s} |f(s,(X^x \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(s),\zeta(s))-f(s,(X^{x'} \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(s),\zeta(s))|^2 \mathrm{d}s \big)^{p/2} \Big] \Big\},
\end{align*}
with $ \zeta(\cdot):=\big((Y^{x'} \cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(\cdot),(Z^{x'} \cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(\cdot)\big)$.
Using the mean value theorem and the boundedness of $\nabla f$ and $\nabla g$ (i.e. the Lipschitz property of $f$ and $g$), we deduce
\begin{align*}
&\mathbb{E}\Big[ \sup_{0\leq t \leq T} \big( e^{\beta t} |Y_t^{x}-Y_t^{x'}|^2 \big)^{p/2} \Big] + \mathbb{E}\Big[ \big( \int_0^T e^{\beta s} |Z_s^{x}-Z_s^{x'}|^2 \mathrm{d}s \big)^{p/2} \Big]\\
& \quad \leq C \Big\{ \mathbb{E}\Big[ \big(e^{\beta T} |X_T^x-X_T^{x'}|^2 \big)^{p/2} \Big] + \mathbb{E}\Big[ \big(\int_0^T e^{\beta s} |((X^x-X^{x'}) \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(s)|^2 \mathrm{d}s \big)^{p/2} \Big] \Big\}
\\
& \quad
\leq C \Big\{ \mathbb{E}\Big[ \big(e^{\beta T} |X_T^x-X_T^{x'}|^2 \big)^{p/2} \Big] + \mathbb{E}\Big[ \big(\int_0^T e^{\beta s} |X_s^x-X_s^{x'}|^2 \mathrm{d}s \big)^{p/2} \Big] \Big\}
\\
&\quad\leq C |x-x'|^p,
\end{align*}
where the last two lines follow by applying the change of integration from \eqref{eq:tmp_02} and the first claim of the proposition. The continuity properties of the mappings $x \mathbb{A}psto Y^x$ and $x \mathbb{A}psto Z^x$ are now obtained by an application of Kolmogorov's continuity criterion (see for example \cite[IV.7 Corollary 1]{Protter}).
\end{proof}
If the generator exhibits additional regularity, it even turns out that the paths of $x \mathbb{A}psto Y^x$ are continuously differentiable.
\begin{theorem}
\label{strong-diff-theorem}
Let $\beta>0$ and assume the conditions of Proposition \ref{prop:diff} can be verified for some $\widehat{p}>4$. Assume moreover that all (spatial) second order partial derivatives of $b,\sigma,g$ and $f$ exist, are continuous and uniformly bounded. Then, for any $(x,\varepsilon), (x',\varepsilon') \in \mathbb{A}thbb{R}^d {t_i}mes (0,\infty)$, $h\in\mathbb{R}^d$ and $p\in (2, {\widehat{p}}/{2}]$ it holds that
\begin{align*}
\mathbb{E}\Big[\sup_{0\leq t \leq T} \Big(e^{\beta t}
\Big|\frac{Y_t^{x+\varepsilon h}-Y_t^x}{\varepsilon}
-\frac{Y_t^{x'+\varepsilon' h}-Y_t^{x'}}{\varepsilon'}\Big|^2
\Big)^{p/2}\Big] \leq C\, \big(|x-x'|^2 + |\varepsilon-\varepsilon'|^2\big)^{p/2}.
\end{align*}
Thus $\nabla_x Y^x$ belongs to $\mathcal{H}_\beta^{\widehat{p}}$ and the mapping $x\mathbb{A}psto Y^x_t({\omega}ega)$ is continuously differentiable for all $t\in[0,T]$, $\mathbb{P}$-almost surely.
\end{theorem}
It is known that the existence of the partial derivatives (or even all of the directional derivatives) of a function does not guarantee that the function is differentiable at a point. But it is if all the partial derivatives of the function exist and are continuous in a neighborhood of the point, then the function must be differentiable at that point and is in fact of class $C^1$.
Under the assumption that $m=1$ and the subsequent corollary of the Theorem in the previous section, we know that the all (spatial) partial derivatives of $Y^x$ exist. The main result of Theorem \ref{strong-diff-theorem} is the continuity of those partial derivatives.
\begin{proof}
As in the previous proof, $C>0$ denotes a generic constant which can differ from line to line. Let $p>2$, $t\in[0,T]$ and $h\in \mathbb{A}thbb{R}^d\setminus\{0\}$.
For $(x,\varepsilon) \in \mathbb{A}thbb{R}^d {t_i}mes (0,\infty)$ let $U^{x,\varepsilon} := \frac{Y^{x+\varepsilon h} - Y^x}{\varepsilon}$, $V^{x,\varepsilon} := \frac{Z^{x+\varepsilon h} - Z^x}{\varepsilon}$, $\xi^{x,\varepsilon} := \frac{g(X_T^{x+\varepsilon h}) - g(X_T^x)}{\varepsilon}$ and ${t_i}lde{X}^{x,\varepsilon} := \frac{X^{x+\varepsilon h}-X^x}{\varepsilon}$. Using the notation from the proof of Proposition \ref{prop:diff}, the pair $(U^{x,\varepsilon},V^{x,\varepsilon})$ satisfies the BSDE
$$ U_t^{x,\varepsilon}=\xi^{x,\varepsilon}+\int_t^T \Phi(s,\zeta^{x,\varepsilon}(s)) \mathrm{d}s - \int_t^T V_s^{x,\varepsilon}\mathrm{d}ws,$$
with
$\zeta^{x,\varepsilon}(t):=\big((U^{x,\varepsilon} \cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(t),(V^{x,\varepsilon} \cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(t)\big)$
and $\Phi(t,y,z):=({t_i}lde{X}^{x,\varepsilon} \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(t) A_{t,\mathbb{A}thcal{X}}^{x,\varepsilon} + y A_{t,\mathbb{A}thcal{Y}}^{x,\varepsilon} + z A_{t,\mathbb{A}thcal{Z}}^{x,\varepsilon}$.
Note that the terms $A_{\cdot,\ast}^{x,\varepsilon}$ with $\ast = \mathbb{A}thcal{X},\mathbb{A}thcal{Y}, \mathbb{A}thcal{Z}$ are given by \eqref{eq:A}.
For whatever choice of $(x,\varepsilon)$ we emphasize that the arguments used in the proof of Corollary \ref{cor:nabla_Y_exists} and Proposition \ref{prop:diff} hold true for the above auxiliary BSDE in what the applicability of the a priori estimate of Proposition \ref{lemma:apriori} is concerned.
Let another pair $(x',\varepsilon') \in \mathbb{R}^d{t_i}mes(0,\infty)$ be given. Applying Proposition \ref{lemma:apriori} yields
\begin{align*}
\| U^{x,\varepsilon}-U^{x',\varepsilon'} \|_{\mathcal{S}^p_\beta}^p
&
\leq C_p \Big\{ \mathbb{E}\Big[ \big(e^{\beta T} |\xi^{x,\varepsilon}-\xi^{x',\varepsilon'}|^2 \big)^{p/2} \Big] + \mathbb{E}\Big[ \big( \int_0^T e^{\frac{\beta}{2} s} |\delta_2 \Phi(s)| \mathrm{d}s \big)^p \Big] \Big\},
\end{align*}
with
\begin{align*}
&\delta_2 \Phi(t):= ({t_i}lde{X}^{x,\varepsilon} \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(t) A_{t,\mathbb{A}thcal{X}}^{x,\varepsilon}-({t_i}lde{X}^{x',\varepsilon'} \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(t) A_{t,\mathbb{A}thcal{X}}^{x',\varepsilon'}
\\
&\hspace{2cm}
+(U^{x',\varepsilon'} \cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(t) (A_{t,\mathbb{A}thcal{Y}}^{x,\varepsilon}-A_{t,\mathbb{A}thcal{Y}}^{x',\varepsilon'})+(V^{x',\varepsilon'} \cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(t)(A_{t,\mathbb{A}thcal{Z}}^{x,\varepsilon}-A_{t,\mathbb{A}thcal{Z}}^{x',\varepsilon'}).
\end{align*}
Using the hypotheses on $f$ (\textit{i.e.} all partial derivatives up to order two are bounded), we find
\begin{align*}
|\delta_2 \Phi(t)|
&
\leq C \Big\{ |(({t_i}lde{X}^{x,\varepsilon}-{t_i}lde{X}^{x',\varepsilon'}) \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(t)| |A_{t,\mathbb{A}thcal{X}}^{x,\varepsilon}| + |({t_i}lde{X}^{x',\varepsilon'} \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(t)| |A_{t,\mathbb{A}thcal{X}}^{x,\varepsilon}-A_{t,\mathbb{A}thcal{X}}^{x',\varepsilon'}|
\\
&\quad
+ |(U^{x',\varepsilon'} \cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(s)||A_{t,\mathbb{A}thcal{Y}}^{x,\varepsilon}-A_{t,\mathbb{A}thcal{Y}}^{x',\varepsilon'}|+|(V^{x',\varepsilon'} \cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(t)| |A_{t,\mathbb{A}thcal{Z}}^{x,\varepsilon}-A_{t,\mathbb{A}thcal{Z}}^{x',\varepsilon'}|\Big\}.
\end{align*}
As a consequence
\begin{align*}
&
\| U^{x,\varepsilon}-U^{x',\varepsilon'} \|_{\mathcal{S}^p_\beta}^p
\\
&
\quad
\leq C \Big\{
\|\xi^{x,\varepsilon}-\xi^{x',\varepsilon'}\|_{L^p}^{p}
+
\mathbb{E}\Big[ \big( \int_0^T e^{\frac{\beta}{2} s} |(({t_i}lde{X}^{x,\varepsilon}-{t_i}lde{X}^{x',\varepsilon'}) \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(s)| |A_{s,\mathbb{A}thcal{X}}^{x,\varepsilon}| \mathrm{d}s \big)^p \Big]
\\
&\quad \quad
+
\mathbb{E}\Big[ \big( \int_0^T e^{\frac{\beta}{2} s} |({t_i}lde{X}^{x',\varepsilon'} \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(s)| |A_{s,\mathbb{A}thcal{X}}^{x,\varepsilon}-A_{s,\mathbb{A}thcal{X}}^{x',\varepsilon'}| \mathrm{d}s \big)^p \Big]
\\
&\quad \quad
+
\mathbb{E}\Big[ \big( \int_0^T e^{\frac{\beta}{2} s} |(U^{x',\varepsilon'} \cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(s)||A_{s,\mathbb{A}thcal{Y}}^{x,\varepsilon}-A_{s,\mathbb{A}thcal{Y}}^{x',\varepsilon'}| \mathrm{d}s \big)^p \Big]
\\
&\quad \quad
+
\mathbb{E}\Big[ \big( \int_0^T e^{\frac{\beta}{2} s} |(V^{x',\varepsilon'} \cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(s)||A_{s,\mathbb{A}thcal{Z}}^{x,\varepsilon}-A_{s,\mathbb{A}thcal{Z}}^{x',\varepsilon'}| \mathrm{d}s \big)^p \Big] \Big\}
\\
&\quad
\leq C \Big\{
\|\xi^{x,\varepsilon}-\xi^{x',\varepsilon'}\|_{L^p}^{p}
+
\| {t_i}lde{X}^{x,\varepsilon}-{t_i}lde{X}^{x',\varepsilon'} \|_{\mathcal{H}_\beta^{2p}}^{p}
\| A_{\cdot,\mathbb{A}thcal{X}}^{x,\varepsilon} \|_{\mathcal{H}_\beta^{2p}}^{p}
+
\| {t_i}lde{X}^{x',\varepsilon'} \|_{\mathcal{H}_\beta^{2p}}^{p}
\| A_{\cdot,\mathbb{A}thcal{X}}^{x,\varepsilon} - A_{\cdot,\mathbb{A}thcal{X}}^{x',\varepsilon'} \|_{\mathcal{H}_\beta^{2p}}^{p}
\\
&
\qquad\qquad
+
\| U^{x',\varepsilon'} \|_{\mathcal{H}_\beta^{2p}}^{p}
\| A_{\cdot,\mathbb{A}thcal{Y}}^{x,\varepsilon} - A_{\cdot,\mathbb{A}thcal{Y}}^{x',\varepsilon'} \|_{\mathcal{H}_\beta^{2p}}^{p}
+
\| V^{x',\varepsilon'} \|_{\mathcal{H}_\beta^{2p}}^{p}
\| A_{\cdot,\mathbb{A}thcal{Z}}^{x,\varepsilon} - A_{\cdot,\mathbb{A}thcal{Z}}^{x',\varepsilon'} \|_{\mathcal{H}_\beta^{2p}}^{p}
\Big\},
\end{align*}
where for each term we used the Cauchy-Schwarz inequality twice, that $e^{\frac{\beta}{2} t} \leq e^{\beta t}$ and \eqref{eq:tmp_02}.
Since $(U^{x',\varepsilon'},V^{x',\varepsilon'})$ is a solution in $\mathcal{S}^p_\beta {t_i}mes \mathcal{H}^p_\beta$ of a BSDE, it follows from Corollary \ref{coro:momentestimates} that the quantities $\mathbb{E}\big[ \big( \int_0^T e^{\beta s} |U_s^{x',\varepsilon'}|^2 \mathrm{d}s \big)^p \big]$ and $\mathbb{E}\big[ \big( \int_0^T e^{\beta s} |V_s^{x',\varepsilon'}|^2 \mathrm{d}s \big)^p \big]$ are finite and uniformly bounded in $\varepsilon'$. By the assumptions on $b$ and $\sigma$, we have
$$\mathbb{E}\Big[ \big( \int_0^T e^{\beta s} |{t_i}lde{X}_s^{x',\varepsilon'}|^2 \mathrm{d}s \big)^p \Big]^{1/2}<\infty.$$
In addition, by the boundedness of $\nabla f$ we have that $|A_{\cdot,\ast}^{x,\varepsilon}|$ and $|A_{\cdot,\ast}^{x',\varepsilon'}|$ are uniformly bounded (in their several parameters) with $\ast = \mathbb{A}thcal{X},\mathbb{A}thcal{Y},\mathbb{A}thcal{Z}$. Thus the estimate reduces to
\begin{align}
\label{eq:strongdiff2}
\nonumber
\| U^{x,\varepsilon}-U^{x',\varepsilon'} \|_{\mathcal{S}^p_\beta}^p
&\leq C \Big\{
\|\xi^{x,\varepsilon}-\xi^{x',\varepsilon'}\|_{L^p}^{p}
+
\| {t_i}lde{X}^{x,\varepsilon}-{t_i}lde{X}^{x',\varepsilon'} \|_{\mathcal{H}_\beta^{2p}}^{p}
+
\| A_{\cdot,\mathbb{A}thcal{X}}^{x,\varepsilon} - A_{\cdot,\mathbb{A}thcal{X}}^{x',\varepsilon'} \|_{\mathcal{H}_\beta^{2p}}^{p}
\nonumber
\\
& \hspace{1cm}
+
\| A_{\cdot,\mathbb{A}thcal{Y}}^{x,\varepsilon} - A_{\cdot,\mathbb{A}thcal{Y}}^{x',\varepsilon'} \|_{\mathcal{H}_\beta^{2p}}^{p}
+
\| A_{\cdot,\mathbb{A}thcal{Z}}^{x,\varepsilon} - A_{\cdot,\mathbb{A}thcal{Z}}^{x',\varepsilon'} \|_{\mathcal{H}_\beta^{2p}}^{p}
\Big\}.
\end{align}
Using the mean value theorem and the fact that the second order partial derivatives are bounded it holds that
\begin{align*}
&|A_{t,\mathbb{A}thcal{X}}^{x,\varepsilon}-A_{t,\mathbb{A}thcal{X}}^{x',\varepsilon'}| + |A_{t,\mathbb{A}thcal{Y}}^{x,\varepsilon}-A_{t,\mathbb{A}thcal{Y}}^{x',\varepsilon'}| + |A_{t,\mathbb{A}thcal{Z}}^{x,\varepsilon}-A_{t,\mathbb{A}thcal{Z}}^{x',\varepsilon'}|
\\
& \hspace{0.5cm}
\leq C \Big\{ \big(|X^{x+\varepsilon h}-X^{x'+\varepsilon' h}| \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}}\big)(t) + \big(|Y^{x+\varepsilon h}-Y^{x'+\varepsilon' h}| \cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}\big)(t)
\\
& \hspace{1cm}
+\big(|Z^{x+\varepsilon h}-Z^{x'+\varepsilon' h}| \cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}\big)(t) + \big(|X^{x}-X^{x'}| \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}}\big)(t)
\\
& \hspace{1cm}
+ \big(|Y^{x}-Y^{x'}| \cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}\big)(t) + \big(|Z^{x}-Z^{x'}| \cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}\big)(t)\Big\}.
\end{align*}
Plugging the right hand side of this inequality in \eqref{eq:strongdiff2} and using Lemma \ref{lemma:interchange} one gets
\begin{align*}
\| U^{x,\varepsilon}-U^{x',\varepsilon'} \|_{\mathcal{S}^p_\beta}^p
&
\leq C \Big\{
\|\xi^{x,\varepsilon}-\xi^{x',\varepsilon'}\|_{L^p}^{p}
+
\| {t_i}lde{X}^{x,\varepsilon}-{t_i}lde{X}^{x',\varepsilon'} \|_{\mathcal{H}_\beta^{2p}}^{p}
+
\| X^x-X^{x'} \|_{\mathcal{H}_\beta^{2p}}^{p}
\\
&\qquad
+
\| X^{x+\varepsilon h}-X^{x'+\varepsilon' h} \|_{\mathcal{H}_\beta^{2p}}^{p}
+
\| Y^{x+\varepsilon h}-Y^{x'+\varepsilon' h} \|_{\mathcal{H}_\beta^{2p}}^{p}
\\
&\qquad
+
\| Z^{x+\varepsilon h}-Z^{x'+\varepsilon' h} \|_{\mathcal{H}_\beta^{2p}}^{p}
+
\| Y^x-Y^{x'} \|_{\mathcal{H}_\beta^{2p}}^{p}
+
\| Z^x-Z^{x'} \|_{\mathcal{H}_\beta^{2p}}^{p}
\Big\}.
\end{align*}
Since $b$, $\sigma$ and $g$ are twice continuously differentiable with bounded derivatives we have the following estimate
$$ \mathbb{E}\big[ \,|\xi^{x,\varepsilon}-\xi^{x',\varepsilon'}|^p \big] \leq C (|x-x'|^2 + |\varepsilon-\varepsilon'|^2)^{p/2},$$
which is proved for example in \cite[Lemma 7.4]{AnkirchnerImkellerDosReis}. This result combined with Proposition \ref{prop:cont} leads to
$$\mathbb{E}\Big[\sup_{0\leq t \leq T} \big( e^{\beta t} |U_t^{x,\varepsilon}-U_t^{x',\varepsilon'}|^2\big)^{p/2}\Big] \leq C \big(|x-x'|^2 + |\varepsilon-\varepsilon'|^2\big)^{p/2}.$$
The last claim of the theorem follows using Kolmogorov's continuity criterion (see for example \cite[IV.7 Corollary 1]{Protter}).
\end{proof}
\section{Representation formulas and path regularity}
\label{section:representation}
One of the fundamental results in the setting of FBSDE concerns the relationship between the Malliavin and the variational (classical) derivatives of the solution process: the Malliavin derivative of the solution of the BSDE can be expressed as a product of the BSDE's solution variational derivatives (with respect to the initial parameter of the SDE) and the variational derivatives of the forward diffusion. This relationship is known to hold both in the standard Lipschitz generator setting (see Proposition 5.9 of \cite{97KPQ}) as well as the quadratic generator case (see e.g. Theorem 2.9 of \cite{ImkellerDosReis}) for classical BSDE without time delayed generators.
In this section we show that this relationship still holds for decoupled FBSDE with time delayed generators. Such a result is somewhat surprising since it is normally dependent on a Markovian structure for the solution of the BSDE that exists for non-time delayed BSDE and which fails to materialize for time delayed BSDE. Imperative for this relationship to hold is the fact that the forward process $X$ is Markovian along with a good behavior of the terminal condition.
As in the previous section, whenever we consider the delay FBSDE \eqref{eq:fwd1}-\eqref{eq:bwd1}, we assume that all conditions to ensure the existence of a unique solution $(X,Y,Z)$ are in force. Moreover, since for $\beta \geq 0$, all $\beta$-norms are equivalent, in the following we content ourselves with giving results for $\beta=0$. Recall that we assume $m=1$, i.e. the delay BSDE is \emph{not} vector-valued.
\subsection*{Malliavin's differentiability of FBSDE with time delayed generators}
We recall Theorem 4.1 of \cite{DelongImkeller2}, modified to our the FBSDE setting. Theorem 4.1 from \cite{DelongImkeller2} shows that the solutions of time delayed BSDE are Malliavin differentiable, and as a consequence, it can be deduced that the solution of the time delayed FBSDE \eqref{eq:fwd1}-\eqref{eq:bwd1} is also Malliavin differentiable. Under the condition (F3) on the coefficients of the forward equation \eqref{eq:fwd1}, the Malliavin differentiability of the forward process $X$ is a standard result, see for instance Theorem 2.2.1 in \cite{nualart1995}. We denote the solution to the equations \eqref{eq:fwd1}-\eqref{eq:bwd1} by $(X,Y,Z)$. The next result states the Malliavin differentiability of $(X,Y,Z)$. Using the notation introduced in Section 3, we define for $0\leq u\leq t\leq T$
\begin{align}
\label{eq:malliaviontheta}
\nonumber
(D_u \Theta)(t)
&=\big((D_u X \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(t),(D_u Y\cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(t),(D_u Z\cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(t)\big)
\\
&=\Big( \int_{-T}^0 D_u X_{t+v} {\alpha_{\scriptscriptstyle{\mathcal{X}}}}(\mathrm{d} v), \int_{-T}^0 D_u Y_{t+v} {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}(\mathrm{d} v), \int_{-T}^0 D_u Z_{t+v} {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}(\mathrm{d} v) \Big).
\end{align}
We define in the canonical way\footnote{See Section 2.2 of \cite{ImkellerDosReis}, Section 5.2 of \cite{97KPQ} or simply \cite{nualart1995}} the space $\mathbb{L}_{1,2}$ as the space of progressively measurable processes, $X\in\mathcal{H}^2$, that are Malliavin differentiable and normed by $\|X\|_{\mathbb{L}_{1,2}}=\mathbb{E}[ \int_0^T |X_s|^2 \mathrm{d}s +\int_0^T \int_0^T |D_u X_s|^2 \mathrm{d}s \mathrm{d} u]^{1/2}$.
\begin{theorem}
\label{malliavindifftheo}
Take $p=2$, $m=1$ and assume the conditions of Corollary \ref{cor:nabla_Y_exists} hold. Then $(X,Y,Z)$ are Malliavin differentiable and their derivatives $(DX,DY,DZ)$ solve uniquely in $\mathbb{L}_{1,2}{t_i}mes \mathbb{L}_{1,2} {t_i}mes\mathbb{L}_{1,2}$ the following time delayed FBSDE:
\begin{align}
\label{eq:DX}
D_u X_t &= \sigma(u,X_u) + \int_u^t \nabla_x b(s,X_s)D_u X_s \mathrm{d}s + \int_u^t \nabla_x \sigma(s,X_s) D_u X_s \mathrm{d}ws,\\
\label{eq:malliavin_Y}
D_u Y_t &= \nabla g(X_T)D_u X_T - \int_t^T D_u Z_s \mathrm{d}ws + \int_t^T
\big\langle (\nabla f)\big(s, \Theta(s)\big) , (D_u \Theta)(s) \big\rangle \mathrm{d}s,
\end{align}
for $0 \leq u \leq t \leq T$ (zero otherwise)
with $\Theta$ and $D\Theta$ given by \eqref{eq:theta} and \eqref{eq:malliaviontheta} respectively. Furthermore, $\{D_t Y_t: t\in[0,T]\}$ is a version of $\{Z_t:t\in[0,T]\}$.
\end{theorem}
\begin{proof}
The results concerning the forward component are well known, see \cite{nualart1995} or \cite{ImkellerDosReis}. The conditions of Corollary \ref{cor:nabla_Y_exists} ensure that Theorem 4.1 from \cite{DelongImkeller2} can be applied. Hence $Y$ and $Z$ are Malliavin differentiable. The representation of $Z$ by the trace of of the Malliavin derivative of $Y$ follows as well from the cited result.
\end{proof}
\subsection*{The representation formulas}
We now present the representation formulas for \eqref{eq:DX} and \eqref{eq:malliavin_Y} which are effectively expressed in terms of the variational $\nabla X, \nabla Y$ and $\nabla Z$.
\begin{theorem}\label{proporepresentationformulas}
Let the conditions of Theorem 4.1 hold. Let $(X,Y,Z)$, $(\nabla X, \nabla Y, \nabla Z)$ and $(D X, D Y, D Z)$ denote the solutions of FBSDE \eqref{eq:fwd1}-\eqref{eq:bwd1}, \eqref{eq:nabla_X}-\eqref{eq:nabla_Y} and \eqref{eq:DX}-\eqref{eq:malliavin_Y} respectively. Then the following representation formulas hold:
\begin{align}
\label{repformulaforDX}
D_u X_t &= \nabla X_t (\nabla X_u)^{-1} \sigma(u,X_u)\mathbb{A}thbbm{1}_{\{u\leq t\}},& t,u\in[0,T],\ \mathrm{d}\mathbb{P}-a.s.\\
\nonumber
D_u Y_t &= \nabla Y_t (\nabla X_u)^{-1} \sigma(u,X_u)\mathbb{A}thbbm{1}_{\{u\leq t\}},& t,u\in[0,T],\ \mathrm{d} \mathbb{P}-a.s.\\
\label{repformulaforZ}
Z_t &= \nabla Y_t (\nabla X_t)^{-1} \sigma(t,X_t),& t\in[0,T],\ \mathrm{d} \mathbb{P}\otimes \mathrm{d} t-a.s.\\
\nonumber
D_u Z_t &= \nabla Z_t (\nabla X_u)^{-1} \sigma(t,X_u)\mathbb{A}thbbm{1}_{\{u\leq t\}},& t,u\in[0,T],\ \mathrm{d} \mathbb{P}\otimes \mathrm{d} t-a.s.
\end{align}
\end{theorem}
\begin{proof}
As in Theorem \ref{malliavindifftheo} we remark briefly that the properties of the forward component are well known and hence equality \eqref{repformulaforDX} holds, see \cite{nualart1995} or \cite{ImkellerDosReis}. Theorem \ref{malliavindifftheo} ensures that $(DX,DY,DZ)$ is the unique solution of the time delayed FBSDE \eqref{eq:DX}-\eqref{eq:malliavin_Y}. Throughout let $t \in [0,T]$ and $u \in [0,t]$. We define the processes
\[
U_{u,t}=\nabla Y_t (\nabla X_u)^{-1} \sigma(X_u) \mathbb{A}thbbm{1}_{\{u\leq t\}}
\ \text{ and }\
V_{u,t}=\nabla Z_t (\nabla X_u)^{-1} \sigma(X_u) \mathbb{A}thbbm{1}_{\{u\leq t\}},
\]
and for $s\in [0,T]$, we set $D_u X(s) = \int_{-T}^0 D_u X_{s+v} {\alpha_{\scriptscriptstyle{\mathcal{X}}}}( \mathrm{d} v )$,
\begin{align*}
U_u (s) &= \int_{-T}^0 U_{u,s+v} {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}(\mathrm{d} v) = \int_{-T}^0 \nabla Y_{s+v} \big( \nabla X_u \big)^{-1} \sigma(u,X_u) \mathbb{A}thbbm{1}_{\{u \leq s+v\}} {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}(\mathrm{d} v),\\
V_u (s) &= \int_{-T}^0 V_{u,s+v} {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}(\mathrm{d} v) = \int_{-T}^0 \nabla Z_{s+v} \big( \nabla X_u \big)^{-1} \sigma(u,X_u) \mathbb{A}thbbm{1}_{\{u \leq s+v\}} {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}(\mathrm{d} v),
\end{align*}
compare also with the notation in \eqref{eq:notation1}. Multiplying the BSDE \eqref{eq:nabla_Y} with $(\nabla X_u)^{-1} \sigma(u,X_u)$ and then using \eqref{repformulaforDX} we obtain for any $0\leq u\leq t\leq T$ $\mathrm{d} \mathbb{P}$-a.s. that
\begin{align*}
U_{u,t} &= \nabla g(X_T)D_u X_T - \int_t^T V_{u,s} \mathrm{d}ws\\
& \qquad + \int_t^T
\big\langle (\nabla f)\big(s, \Theta(s)\big)
, \big( D_{u}X(s), U_{u}(s), V_{u}(s)\big) \big\rangle \mathrm{d}s,
\end{align*}
where $\Theta$ is given by $\Theta(\cdot) = \big((X \cdot {\alpha_{\scriptscriptstyle{\mathcal{X}}}})(\cdot),(Y\cdot {\alpha_{\scriptscriptstyle{\mathcal{Y}}}})(\cdot),(Z\cdot {\alpha_{\scriptscriptstyle{\mathcal{Z}}}})(\cdot)\big)$ (compare with \eqref{eq:theta} from section \ref{section:diff}). Now, Theorem \ref{malliavindifftheo} states that the solution of BSDE \eqref{eq:malliavin_Y} is unique, hence $(U,V)$ must coincide with $(DY,DZ)$. Another way to see this would be to use the a priori estimates of Proposition \ref{lemma:apriori} with \eqref{eq:malliavin_Y} and the above BSDE.
Formula \eqref{repformulaforZ} follows easily from a combination of the representation formula for $D_u Y_t$ combined with $D_t Y_t = Z_t$, $\mathrm{d} \mathbb{P}\otimes \mathrm{d} t$-a.s. (see Theorem \ref{malliavindifftheo}).
\end{proof}
\subsection*{Implications of the representation formula}
The representation formulas in the previous theorem allow for a deeper analysis of the control process $Z$ concerning its path properties.
\begin{theorem}\label{continuitytheorem}
Let $p\geq 2$, assume that $|f(\cdot,0,0,0)|$ is uniformly bounded
and that the conditions of Corollary \ref{cor:nabla_Y_exists} hold.
Then for $p \geq 2$, the mapping $t\mathbb{A}psto Z_t$ is continuous $\mathrm{d} \mathbb{P}$-a.s. If moreover we have $p>2$, then we also have \[\|Z\|_{\mathcal{S}^q_0}<\infty \ \text{ for }q\in[2,p).\] In particular, for $p> 2$ we have for every $s,t \in [0,T]$ that $\mathbb{E}\big[\,|Y_t-Y_s|^p\big]\leq C |t-s|^{p/2}$ and that $Y$ has continuous paths.
\end{theorem}
\begin{proof}
It is fairly easy to show that $\big(\nabla Y_t (\nabla X_t)^{-1} \sigma(t,X_t)\big)_{t\in[0,T]}$ is continuous. By assumption, $\sigma$ is a continuous function and it is well known that both processes $(\nabla X)^{-1}$ and $X$ have continuous paths. $\nabla Y$ is continuous because its dynamics is given as a sum of a stochastic integral of a predictable process against a Brownian motion (so a continuous martingale) and a Lebesgue integral with well behaved integrand. If two processes are versions of each other and one is continuous then they are in fact modifications of each other and hence $Z$ has continuous paths.
Now since $Z$ has continuous paths, then the representation formula \eqref{repformulaforZ} does not only hold $\mathrm{d}\mathbb{P}\otimes\mathrm{d} t$-almost surely but in fact holds for all $t\in[0,T]$ and $\mathbb{P}$-almost all ${\omega}ega\in\Omega$. Using that $\nabla Y \in \mathcal{S}^p_0$ for some $p>2$ (see Corollary \ref{cor:nabla_Y_exists} and Proposition \ref{prop:diff}), $(\nabla X)^{-1},\sigma(\cdot,X)\in \mathcal{S}^q_0$ for any $r\geq 2$ and H\"older's inequality, we conclude that $Z\in\mathcal{S}^q_0$ for every $q\in[2,p)$.
The property concerning the increments of $Y$ is easy to prove since $X,Y,Z\in \mathcal{S}^p_0$ for some $p> 2$. For $0\leq s\leq t\leq T$, we have (recall that $| f(\cdot,\Theta(\cdot)) | \leq |f(\cdot,\Theta(\cdot)) - f(\cdot,0,0,0)| + |f(\cdot,0,0,0)|$ and that $|f(\cdot,0,0,0)|$ is uniformly bounded)
\begin{align*}
Y_t-Y_s &= 0 +\int_s^t f\big(u, \Theta(u)\big)\mathrm{d} u-\int_s^t Z_u\mathrm{d} W_u,
\end{align*}
so using the assumptions and the Burkholder-Davis-Gundy inequality, we get for a generic constant $C$ which may vary from line to line and some $p> 2$
\begin{align*}
\mathbb{E}\big[\,|Y_t-Y_s|^p\big] &\leq C\, \mathbb{E}\Big[ \,\Big|\int_s^t f\big(u, \Theta(u)\big)\mathrm{d} u\Big|^p+\Big|\int_s^t Z_u\mathrm{d} W_u\Big|^p \Big]\\
&\leq C\, |t-s|^{p/2} \big( 1 + \|X\|_{\mathcal{S}^p_0}^p+ \|Y\|_{\mathcal{S}^p_0}^p + \|Z\|_{\mathcal{S}^p_0}^p\big)+
\mathbb{E}\big[\Big(\int_s^t |Z_u|^2\mathrm{d} u\Big)^{p/2} \big]\\
&\leq C\, |t-s|^{p/2}.
\end{align*}
This in particular yields the applicability of Kolmogorov's continuity criterion to $Y$.
\end{proof}
\subsubsection*{The $L^2$-regularity result}
We finish this section with the $L^2$-regularity result for the control component $Z$ of the solution of the time delayed FBSDE. Let $\pi$ be a partition of the time interval $[0,T]$ with $N$ points and mesh size $|\pi|$. We define a set of random variables via
\begin{align*}
\bar{Z}^\pi_{t_i}&=\frac1{{t_i}p-{t_i}}\mathbb{E}\Big[\int_{t_i}^{t_i}p
Z_s\mathrm{d}s\big|\mathcal{F}_{t_i}\Big], \textrm{ for all partition
points } t_i,\ 0\le i\le N-1.
\end{align*}
The best square integrable $\mathcal{F}_{t_i}$-measurable approximation of $\frac{1}{t_{i+1}-t_i}\int_{t_i}^{t_i}p Z_s\mathrm{d}s$ is given by $\bar{Z}^\pi_{t_i}$, i.e.
\begin{align}\label{eq:leastsquare}
\mathbb{E} \Big[\, \big| \frac{1}{t_{i+1}-t_i}\int_{t_i}^{t_i}p Z_s\mathrm{d}s - \bar{Z}^\pi_{t_i} \big|^2 \Big] &= \inf_{V \in L^2(\mathcal{F}_{t_i})} \mathbb{E} \Big[\, \big| \frac{1}{t_{i+1}-t_i}\int_{t_i}^{t_i}p Z_s\mathrm{d}s - V \big|^2 \Big].
\end{align}
We associate the process $(\bar{Z}^\pi_t)_{t\in[0,T]}$ to $\{\bar{Z}^\pi_{t_i}\}_{i=0,\cdots,N-1}$ via $\bar{Z}^\pi_t = \bar{Z}^\pi_{t_i}$ for $t\in[{t_i}, {t_i}p),\, 0\le i\le N-1$. Similarly, for the set of random variables
$\{Z_{t_i}:{t_i}\in\pi\}$, we associate the process $(Z^\pi_t)_{t\in[0,T]}$ via $Z^\pi_t = Z^\pi_{t_i}$ for $t\in[{t_i}, {t_i}p),\, 0\le i\le N-1$. The definition of the conditional expectation implies that for every $i=0,\ldots,N-1$, we have
$$ \mathbb{E}[\,|Z^\pi_{{t_i}}|^2] -2\, \mathbb{E}[\,Z^\pi_{{t_i}}\, \bar{Z}^\pi_{{t_i}}\,] \geq -\mathbb{E}[\,|\bar{Z}^\pi_{{t_i}}|^2],$$
from which it follows that $\bar{Z}^\pi$ is the best $\mathcal{H}^2$-approximation of $Z$, leading to
\[
\| Z-\bar Z^\pi \|_{\mathcal{H}^2}\leq \|Z-Z^\pi \|_{\mathcal{H}^2}\to 0,\ \textrm{ as }\ |\pi|\to 0.
\]
Using Theorem \ref{continuitytheorem} we are able to determine explicitly the rate of convergence of the above limit. The following result extends Theorem 5.6 from \cite{ImkellerDosReis} to the setting of FBSDE with time delayed generators.
\begin{theorem}[$L^2$-regularity]
\label{theo:l^21regularity}
Assume that the conditions of Theorem \ref{continuitytheorem} hold for some $p>2$ and assume further that $\sigma$ is $\frac12$-H\"older continuous function in its time variable. Then
\begin{align*}
\mathbb{A}x_{0\leq i\leq N-1}\Big\{
\sup_{{t_i}\leq t\leq {t_i}p}
\mathbb{E}\big[\, |Y_t -Y_{t_i}|^2\ \big]\, \Big\}+
\sum_{i=0}^{N-1} \mathbb{E}\Big[ \int_{t_i}^{t_{i+1}}|Z_s-\bar{Z}^\pi_{t_i}|^2\mathrm{d}s \Big]\leq C |\pi|.
\end{align*}
\end{theorem}
\begin{proof}
The result concerning the $Y$ component follows immediately from Theorem \ref{continuitytheorem}. As for the result for $Z$, let us remark that since $\bar{Z}^\pi$ is the best $\mathcal{H}^2$-approximation of $Z$ over $\pi$ in the sense of \eqref{eq:leastsquare}, it follows that
\[
\sum_{i=0}^{N-1} \mathbb{E}\Big[ \int_{t_i}^{t_{i+1}}|Z_s-\bar{Z}^\pi_{t_i}|^2\mathrm{d}s \Big]
\leq
\sum_{i=0}^{N-1} \mathbb{E}\Big[ \int_{t_i}^{t_{i+1}}|Z_s-Z_{t_i}|^2\mathrm{d}s \Big]
=
\sum_{i=0}^{N-1} \int_{t_i}^{t_{i+1}}\mathbb{E}\big[\, |Z_s-Z_{t_i}|^2 \big] \mathrm{d}s,
\]
where the last equality follows from the use of Fubini's theorem to switch the integration order (recall that $Z\in\mathcal{S}^p_0$ for some $p>2$).
Theorem \ref{continuitytheorem} allows to use \eqref{repformulaforZ} to rewrite the difference inside the expectation. We have $Z_s-Z_{t_i}= I_1+I_2+I_3$
with $I_1=[\nabla Y_s-\nabla Y_{t_i}] (\nabla X_{t_i})^{-1}\sigma({t_i},X_{t_i})$,
$I_2=\nabla Y_s[(\nabla X_s)^{-1}-(\nabla X_{t_i})^{-1}]\sigma({t_i},X_{t_i})$, $I_3=\nabla Y_s(\nabla X_s)^{-1}[\sigma(s,X_s)-\sigma({t_i},X_{t_i})]$ and $s\in[{t_i},{t_i}p]$.
From the proof of part (ii) of Theorem 5.8 in \cite{pathregcorrection2010} one obtains that
\[
\sum_{i=0}^{N-1} \mathbb{E}\Big[ \int_{t_i}^{t_{i+1}} |I_2|^2\mathrm{d}s +\int_{t_i}^{t_{i+1}} |I_3|^2\mathrm{d}s \Big] \leq C|\pi|.
\]
The calculations that lead to the above result are quite easy to carry out. They rely on known estimates for SDEs found for instance in Theorem 2.3 and 2.4 of \cite{ImkellerDosReis} combined with the fact that $\nabla Y\in\mathcal{S}^p$ for some $p>2$.
To handle the term $I_1$ one needs to proceed with more care. Let us start with a simple trick:
\begin{align}
\label{trickwithconditionalexpectation}
\mathbb{E}\Big[\,|(\nabla Y_s-\nabla Y_{t_i}) (\nabla X_{t_i})^{-1}\sigma({t_i},X_{t_i})|^2\Big]
=
\mathbb{E}\Big[\,\mathbb{E}\big[\,|\nabla Y_s-\nabla Y_{t_i}|^2\big|\mathcal{F}_{t_i}\big] |(\nabla X_{t_i})^{-1}\sigma({t_i},X_{t_i})|^2\Big].
\end{align}
Writing the BSDE for the difference $\nabla Y_s-\nabla Y_{t_i}$ for $s\in[{t_i},{t_i}p]$ we get for a generic constant $C>0$ that
\begin{align*}
\mathbb{E}\Big[\,|\nabla Y_s-\nabla Y_{t_i}|^2\Big|\mathcal{F}_{t_i}\Big] &
\leq C\,
\mathbb{E}\Big[\,|\int_{t_i}^s \big\langle (\nabla f)\big(r,\Theta(r)\big), (\nabla \Theta)(r)\big\rangle\mathrm{d} r|^2+\big|\int_{t_i}^s \nabla Z_r\mathrm{d} W_r\big|^2\Big|\mathcal{F}_{t_i}\Big]
\\
& \leq C\,
\mathbb{E}\Big[\,|\pi|\int_{t_i}^{t_i}p \big |(\nabla \Theta)(r)|^2\mathrm{d} r + \int_{t_i}^{t_i}p |\nabla Z_r|^2\mathrm{d} r\Big|\mathcal{F}_{t_i}\Big],
\end{align*}
where we used the uniform boundedness of the derivatives of $f$, Jensen's inequality, It\^o's isometry and proceeded to maximize over the time interval $[{t_i},{t_i}p]$. Combining the last line with \eqref{trickwithconditionalexpectation} and using the tower property, we obtain
\begin{align*}
& \sum_{i=0}^{N-1} \int_{t_i}^{t_i}p \mathbb{E}\Big[\,\mathbb{E}\Big[\,|\nabla Y_s-\nabla Y_{t_i}|^2\Big|\mathcal{F}_{t_i}\Big] |(\nabla X_{t_i})^{-1}\sigma({t_i},X_{t_i})|^2\Big]\mathrm{d}s\\
&\qquad \leq C \sum_{i=0}^{N-1} |\pi|
\mathbb{E}\Big[\Big( |\pi|\int_{t_i}^{t_i}p \big |(\nabla \Theta)(r)|^2\mathrm{d} r + \int_{t_i}^{t_i}p |\nabla Z_r|^2\mathrm{d} r \Big) |(\nabla X_{t_i})^{-1}\sigma({t_i},X_{t_i})|^2\Big]\\
&\qquad \leq |\pi|
\mathbb{E}\Big[
\sup_{0\leq t\leq T}|(\nabla X_t)^{-1}\sigma(t,X_t)|^2\,
\sum_{i=0}^{N-1}
\Big( |\pi|\int_{t_i}^{t_i}p \big |(\nabla \Theta)(r)|^2\mathrm{d} r + \int_{t_i}^{t_i}p |\nabla Z_r|^2\mathrm{d} r \Big) \Big]\\
&\qquad = |\pi| \mathbb{E}\Big[
\sup_{0\leq t\leq T}|(\nabla X_t)^{-1}\sigma(t,X_t)|^2\,
\Big( |\pi|\int_0^T \big |(\nabla \Theta)(r)|^2\mathrm{d} r + \int_0^T |\nabla Z_r|^2\mathrm{d} r \Big) \Big]\\
& \qquad \leq C |\pi|,
\end{align*}
where in the last line we used the fact that $\nabla X, (\nabla X)^{-1},X \in \mathcal{S}^q_0$ for every $q\geq 2$ and that $\nabla Y, \nabla Z \in\mathcal{H}^p_0$ for some $p> 2$ (in combination with H\"older's inequality) to conclude the finiteness of the expectation.
Combining this estimate with the ones for $I_2$ and $I_3$ finishes the proof.
\end{proof}
\subsection*{Towards a time discretization of delay FBSDE}
Having established a path regularity result for FBSDE with time-delayed generators one can now start discussing a working numerical scheme. Given the nature of this class of BSDE, a time discretization would naturally require some decoupling technique to handle the backward-in-time feature of the equation and the backward-in-time feature of the delay.
Applying the backward time discretization from \cite{04BT} to \eqref{eq:fwd1}-\eqref{eq:bwd1}, we obtain for a partition $\pi: 0=t_0 < t_1 < \ldots < t_N = T$ with step size $\Delta_i=t_{i+1}-t_i$
\begin{align*}
Y^\pi_{t_N} &= g( X^\pi_{t_N}),
\\
Z^\pi_{t_i} &= \mathbb{E} \Big[ \frac{W_{t_{i+1}} - W_{t_{i} } }{ \Delta_i } Y^\pi_{t_{i+1}} | \mathcal{F}_{t_i} \Big],
\qquad
Y^\pi_{t_i} = \mathbb{E} \Big[ Y^\pi_{t_{i+1}} | \mathcal{F}_{t_i} \Big] + \Delta_i\, f( t_i, \Theta^\pi_{t_i} ),
\\
\text{where }\quad & \Theta^{\pi}_{t_i} =
\Big( \sum_{j=0}^{i} X^{\pi}_{t_j} {\alpha_{\scriptscriptstyle{\mathcal{X}}}}\big([t_j,t_{j+1})\big) , \sum_{j=0}^{i} Y^{\pi}_{t_j} {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}\big([t_j,t_{j+1})\big) , \sum_{j=0}^{i} Z^{\pi}_{t_j} {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}\big([t_j,t_{j+1})\big)\Big).
\end{align*}
This backward scheme cannot be implemented because in the computation of each $Y^\pi_{t_i}$ running backward from $i=N-1$ to $i=0$, we must evaluate $\Theta^\pi(t_i)$ which depends on all $Y^\pi_{t_j}$, $Z^\pi_{t_j}$ running in forward direction $j=0,\ldots,i$.
However, \cite{BenderDenk} propose for standard Lipschitz BSDEs a time discretization which mimics the Picard iteration technique for proving existence and uniqueness of BSDEs. Due to the fact that in each iteration step, one solves an explicit BSDE, the scheme from \cite{BenderDenk} runs \emph{forward} in time. The price to pay is to control apart from the error contribution of the time discretization the additional error arising from the Picard iterates (see Theorem 2 in \cite{BenderDenk}). This idea adapts to equations \eqref{eq:fwd1}-\eqref{eq:bwd1} by exploiting the fact that the solution $(Y,Z)$ is obtained as a limit of $(Y^p,Z^p)$ as $p$ goes infinity. Setting up $(Y^0,Z^0) = (0,0)$ and then for $p\in \mathbb{N}_0$ we have
\begin{align*}
Y^{p+1}_t &
= g(X_T) + \int_t^T f\big( s,\Theta^{p}(s) \big) \mathrm{d}s - \int_t^T Z^{p+1}_s \mathrm{d}ws,\quad t\in[0,T]
\\
\text{where }\quad & \Theta^{p}(t)=
\Big(\int_{-T}^0 X_{t+v} {\alpha_{\scriptscriptstyle{\mathcal{X}}}}(\mathrm{d} v), \int_{-T}^0 Y^p_{t+v} {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}(\mathrm{d} v), \int_{-T}^0 Z^p_{t+v} {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}(\mathrm{d} v) \Big).
\end{align*}
The discretization hereof is for $p \in \mathbb{N}_0$, initiated by setting $(Y^{\pi,0},Z^{\pi,0}) = (0,0)$, then iteratively for $p\geq 1$ and $0\leq i\leq N-1$
\begin{align*}
Y^{\pi,p+1}_{t_i} &= \mathbb{E} \Big[\ g\big( X^{\pi}_{t_N} \big) + \sum_{j=i}^{N-1}
f(t_j,\Theta^{\pi,p}_{t_j} ) \Delta_j \ \big| \mathcal{F}_{t_i}\Big],
\\
Z^{\pi,p+1}_{t_i} &= \mathbb{E} \Big[\ \frac{ W_{t_{i+1}} - W_{t_i} }{ \Delta_i} \Big( g(X^\pi_{t_N})+\sum_{j=i+1}^{N-1} f(t_j,\Theta^{\pi,p}_{t_j} \big)\Delta_j \Big) \big| \mathcal{F}_{t_i}\Big],
\\
\text{where }\quad
& \Theta^{\pi,p}_{t_i} =
\Big( \sum_{j=0}^{i} X^{\pi}_{t_j} {\alpha_{\scriptscriptstyle{\mathcal{X}}}}\big([t_j,t_{j+1})\big) , \sum_{j=0}^{i} Y^{\pi,p}_{t_j} {\alpha_{\scriptscriptstyle{\mathcal{Y}}}}\big([t_j,t_{j+1})\big) , \sum_{j=0}^{i} Z^{\pi,p}_{t_j} {\alpha_{\scriptscriptstyle{\mathcal{Z}}}}\big([t_j,t_{j+1})\big)\Big).
\end{align*}
The proof of convergence for this time discretization scheme is left for future research.
\section*{Acknowledgments}
The authors are grateful to the anonymous referee for suggestions and comments which have greatly improved the readability of the paper.
\end{document}
|
\mathbfegin{document}
\swapnumbers
\theoremstyle{definition}
\newtheorem{defi}{Definition}[section]
\newtheorem{rem}[defi]{Remark}
\newtheorem{ques}[defi]{Question}
\newtheorem{expl}[defi]{Example}
\newtheorem{conj}[defi]{Conjecture}
\newtheorem{claim}[defi]{Claim}
\newtheorem{nota}[defi]{Notation}
\newtheorem{noth}[defi]{}
\newtheorem{hypo}[defi]{Hypotheses}
\theoremstyle{plain}
\newtheorem{prop}[defi]{Proposition}
\newtheorem{lemma}[defi]{Lemma}
\newtheorem{cor}[defi]{Corollary}
\newtheorem{thm}[defi]{Theorem}
\renewcommand{\textsl{\textbf{Proof}}}{\textsl{\textbf{Proof}}}
\setcounter{MaxMatrixCols}{20}
\mathbfegin{center}
{\mathbff\Large On Ext-Quivers of Blocks of Weight Two\\ for Symmetric Groups}
Susanne Danz and Karin Erdmann
\today
\mathbfegin{abstract}
\noindent
In this paper we investigate blocks of symmetric groups of weight 2 over fields of odd characteristic $p$.
We develop an algorithm that relates the quivers of two such blocks forming a $(2:1)$-pair, as introduced by Scopes.
We then apply the algorithm to blocks whose $p$-cores are hook partitions, in order to explicitly determine the quivers of these blocks.
As a consequence we conclude that two $p$-blocks $B_1$ and $B_2$ of symmetric groups of weight $2$ whose $p$-cores are hook partitions
are Morita equivalent if and only if $B_2$ or its conjugate block is in the same Scopes class as $B_1$.
\noindent
{\mathbff Keywords:} symmetric group, Ext-quiver, Specht module, hook partition, Morita equivalence
\noindent
{\mathbff MR Subject Classification:} primary 20C20, 20C30; secondary 16D90
\end{abstract}
\end{center}
\section{Introduction}\label{sec intro}
In this paper, we study $p$-blocks of symmetric groups $\mathfrak{S}_n$ over $F$, where $F$ is an algebraically closed field of characteristic $p>0$.
As is well known, each such block is
parametrized by its ($p$-)weight $w\geqslant 0$ and its ($p$-)core, the latter being a partition
of $n-pw$.
It was proved by Scopes in \cite{Scopes1991} that there are only finitely many Morita equivalence classes of $p$-blocks for symmetric groups with
any given weight.
More precisely, given a non-negative integer $w$, there is a minimal list of $p$-blocks of symmetric groups of weight $w$,
described in terms of $p$-cores, such that every block of weight $w$ is Morita equivalent to one
of the blocks in this list; see \cite[3.10]{Richards1996}. This list has
size
$\frac{1}{2p}{2p \choose p-1} + \frac{1}{2}{ \lfloor{pw/2}\rfloor \choose \lfloor{p/2}\rfloor}$),
and it has recently been conjectured by Sambale in \cite{Sambale2018} that when $w=2$, no two blocks in this list are Morita equivalent.
(Sambale's Conjecture is stated for $p$-blocks over $\mathbb{Z}$, and he has verified his conjecture computationally, for $p\leqslant 11$.)
Here we now focus on $p$-blocks of symmetric groups of weight $w=2$, over $F$, for $p\geqslant 3$. Our aim is to give a precise graph-theoretic description
of the (Gabriel or Ext-) quivers of such blocks, provided their $p$-cores are hook partitions. Since Morita equivalent blocks have isomorphic quivers, we shall
also gain more evidence for Sambale's Conjecture. The case $p=3$ is somewhat special, as we shall explain in \ref{noth p=3}. Therefore, we shall
mostly consider the case where $p\geqslant 5$.
By work of Scopes \cite{Scopes1995}, Richards \cite{Richards1996}, Chuang--Tan \cite{ChuangTan2001}, Martin \cite{Martin1989,Martin1990}, and
others, much is known about the structure of weight-2 blocks in odd characteristic. In particular, Scopes \cite{Scopes1995} has shown that, for every $p$-block $B$ of weight $w=2$ in the minimal list mentioned above,
there is a finite sequence
of blocks of weight $2$ starting with the principal block of
$F\mathfrak{S}_{2p}$ such that two consecutive
blocks in the sequence are related by
what is called a $(2:1)$-pair.
Scopes has also shown that, whenever $(B,\mathbfar{B})$ is a $(2:1)$-pair
of weight-2 blocks of $F\mathfrak{S}_n$ and $F\mathfrak{S}_{n-1}$, respectively,
a substantial part of the representation theory of $B$ can already be determined from that of $\mathbfar{B}$; in particular,
the quivers of $B$ and $\mathbfar{B}$ almost coincide, except for one vertex and its adjacent edges. Exploiting this, she has further shown that, for every
block $B$ of $F\mathfrak{S}_n$ of weight 2 and all simple $B$-modules $S$ and $T$, the dimension ${\rm Ext}^1_B(S, T)$ is at most 1.
Since simple modules of symmetric groups are self-dual (see \cite[(7.1.14)]{JK1981}), one also has ${\rm Ext}^1_B(S, T)\cong {\rm Ext}^1_B(T,S)$. Hence, if
${\rm Ext}^1_B(S, T)\neq \{0\}$, then
one usually simply connects $S$ and $T$ by a single edge.
Improvements of Scopes's results due to
Richards \cite{Richards1996} and
Chuang--Tan \cite{ChuangTan2001}
lead to a
general description of the quiver of any such weight-2 block.
We now turn this into a combinatorial algorithm, which is the content of Theorem~\ref{thm Loewy except}. With this, we obtain our first main result of this paper:
\mathbfegin{thm}\label{thm main1}
Let $p\geqslant 5$, and let $B_{k,l}(n,p)$ be a block of $F\mathfrak{S}_n$ of $p$-weight $2$ and $p$-core $(k,1^l)$, for
some $k,l\in\mathbb{N}_0$. With the graphs defined in Appendix~\ref{sec quiv}, the Ext-quiver of $B_{k,l}(n,p)$ is isomorphic, as an undirected graph, to
\mathbfegin{enumerate}
\item[{\rm (a)}] $Q_{0,0}(p)$, if $k=l=0$,
\item[{\rm (b)}] $Q_{k,l}(p)$, if $1\leqslant k+l\leqslant p-1$,
\item[{\rm (c)}] $Q_{k-1,l-1}(p)$, if $p+1\leqslant k+l\leqslant 2p-1$.
\end{enumerate}
\end{thm}
In fact, in Section~\ref{sec main1}, we shall prove a more detailed version of this statement.
To do so, we start with the quiver of the
principal block of $F\mathfrak{S}_{2p}$, which has already been known by work of Martin \cite{Martin1989}.
In Section~\ref{sec B0} we shall give an elementary and self-contained proof of this result in the case $p\geqslant 5$. Our most important
ingredient here will be the decomposition matrix of the principal block of $F\mathfrak{S}_{2p}$, a precise description of the Loewy structures of the Specht modules
in this block, and the results of Chuang--Tan \cite{ChuangTan2001}.
The quiver of the principal block of $F\mathfrak{S}_{2p}$ at hand, we shall then be able to apply Theorem \ref{thm Loewy except}, which gives an algorithm that describes how the quivers of two blocks labelled by hook partitions and forming
a $(2:1)$-pair are related.
In principle, using this algorithm, one can compute the quiver of an arbitrary block of weight $2$.
However, the number of quivers one has to consider increases with the prime, so that finding a general description
seems to be rather difficult. We expect that Sambale's Conjecture might follow if one knew
the precise graph structure of the quivers of all weight-2 blocks.
Note that, in the notation of Theorem~\ref{thm main1}, the blocks $B_{k,0}(n,p)$, for $k\in\{0,\ldots,p-1\}$
are principal blocks.
It should be emphasized that the quivers of these have already been computed by Martin~\cite{Martin1989,Martin1990}, although the graphs
are not all
drawn correctly there, since they are not bipartite as they should be, by \cite{ChuangTan2001}.
The information on the quivers provided by Theorem~\ref{thm main1} is sufficient to distinguish Morita
equivalence classes of blocks of weight $w=2$ whose cores are hook partitions.
The following result
is a direct consequence of Proposition~\ref{prop main2}, which shows that, for such blocks, there are only the known Morita equivalences, that is, the isomorphism between a block
and its conjugate, and the Scopes equivalences.
\mathbfegin{thm}\label{thm main2}
Let $p\geqslant 5$ be a prime. Then there are precisely $(p-1)p/2+1$ Morita equivalence classes of $p$-blocks of symmetric groups
of $p$-weight $2$ whose $p$-cores are hook partitions. Representatives of these are labelled by those $p$-core partitions
$(k,1^l)$ satisfying one of the following conditions:
\mathbfegin{enumerate}
\item[{\rm (a)}] $k=l=0$, or
\item[{\rm (b)}] $k>l$ and $1\leqslant k+l\leqslant p-1$, or
\item[{\rm (c)}] $k>l$ and $p+1 < k+l\leqslant 2p-1$.
\end{enumerate}
\end{thm}
We shall give a proof of Proposition~\ref{prop main2} at the end of Section~\ref{sec main1}, by applying Proposition~\ref{prop graph isos}. The arguments will be completely graph-theoretic.
A remark on the cases $p\in\{2,3\}$ seems to be in order. There are five Scopes equivalence classes of $3$-blocks of symmetric groups of weight 2. Their
structure is completely understood, and their quivers easily determined. We shall present them in \ref{noth p=3}. The case $p=2$ is
not covered by \cite{Scopes1995}, and behaves differently. However, by \cite{Scopes1991}, every $2$-block of a symmetric
group of weight 2 is Morita equivalent to $F\mathfrak{S}_4$, or to the principal block of $F\mathfrak{S}_5$. Both are well known, and their quivers can be found in \cite[Appendix D(2B), D(2A)]{Erdmann1990}.
The present paper is organized as follows:
In Section 2 we summarize background on representations of symmetric groups and fix some general notation.
Section 3 recalls relevant results on blocks of weight $2$ from \cite{Scopes1995}, \cite{Richards1996}, and \cite{ChuangTan2001}. Furthermore,
we establish Theorem \ref{thm Loewy except}, which will be the key ingredient in our inductive proof of Theorem~\ref{thm main1} in Section~\ref{sec main1}.
In Section 4 we prove our above main results on
blocks whose cores are hook partitions, that is, Theorems \ref{thm main1} and \ref{thm main2}.
In the Appendix, we collect some useful abacus combinatorics that is used extensively throughout this paper. Furthermore, for convenience, we discuss the principal blocks of $F\mathfrak{S}_{2p}$ and $F\mathfrak{S}_{2p+1}$ and their quivers. The results presented in Appendix~\ref{sec B0} are not new,
but not too easily available in the literature. In the last part of the appendix, we introduce the graphs appearing in Theorem~\ref{thm main1}, and prove the combinatorial details for Theorem \ref{thm main2}.
\noindent
{\mathbff Acknowledgements:} We are grateful to the Mathematical Institute of the University of Oxford and the Department of Mathematics of the University of Eichst\"att-Ingolstadt
for their kind hospitality
during mutual visits. Moreover, we gratefully acknowledge financial support through a
Scheme 4 grant of the London Mathematical Society and a proFOR+ grant of the University of Eichst\"att-Ingolstadt.
Lastly, we should like to thank Tommy Hofmann and David Craven for their help with TikZ, and the referee for their careful reading of
an earlier version of the manuscript.
\section{Notation and Preliminaries}\label{sec pre}
Throughout this section, let $F$ be an algebraically closed field of characteristic $p>0$.
Whenever $G$ is a finite group, by an $FG$-module we understand a finitely generated left $FG$-module.
For background on general representation theory of finite groups we refer to \cite{NT}, for the standard notation and results
concerning representations of symmetric groups we refer to \cite{James1978,JK1981}.
\mathbfegin{noth}{\mathbff General notation.}\,\label{noth block nota}
(a)\, Suppose that $M$ and $N$ are $FG$-modules.
If $N$ is isomorphic to a direct summand of $M$, we write
$N\mid M$.
If $N$ and $M$ have the same composition factors, that is, represent the same element in the Grothendieck group
of $FG$, then we write $M\sim N$.
If $M$ and $N$ have no common composition factor, then we say that $M$ and $N$ are \textit{disjoint}.
For every simple $FG$-module $D$, we denote by $[M:D]$ the multiplicity of $D$ as a composition factor of $M$.
The $F$-linear dual of $M$ will be denoted by $M^*$.
(b)\, Let $G$ be a finite group and $H\leqslant G$. Let further $B$ be a block of $FG$ and $b$ a block of $FH$. We have
the usual (block) restriction and (block) induction functors
\mathbfegin{alignat*}{2}
\res_H^G&: FG-\textbf{mod}\to FH-\textbf{mod}\,,& \quad \ind_H^G&: FH-\textbf{mod}\to FG-\textbf{mod}\,,\\
\res_b^B&: B-\textbf{mod}\to b-\textbf{mod}\,, &\quad \ind_b^B&: b-\textbf{mod}\to B-\textbf{mod}\,.\\
\end{alignat*}
For ease of notation, we shall also write $M\downarrow_b:=\res_b^B(M):=b\cdot \res_H^G(M)$ and $N\uparrow^B:=\ind_b^B(N):=B\cdot \ind_H^G(N)$, for every
$B$-module $M$ and every $b$-module $N$.
(c)\, If $M$ is an $FG$-module and
$i\geqslant 0$, then we denote the $i$th radical of $M$ by $\Rad^i(M)$ and the $i$th socle of $M$ by $\Soc_i(M)$. Moreover, we denote by
$\Hd(M):=M/\Rad(M)$
the \textit{head} of $M$.
Suppose that $M$ has Loewy length $l\geqslant 1$ with
Loewy layers $\Rad^{i-1}(M)/\Rad^i(M)\cong D_{i1}\oplus\cdots \oplus D_{i r_i}$, for $i\in\{1,\ldots,l\}$, $r_1,\ldots,r_l\in \mathbb{N}$
and simple $FG$-modules
$D_{i1},\ldots,D_{ir_i}$. Then we write
\mathbfegin{equation}\label{eqn Loewy}
M \ \approx \ \ \mathbfegin{matrix} D_{11}\oplus\cdots \oplus D_{1r_1}\\\vdots\\D_{l1}\oplus\cdots \oplus D_{lr_l} \end{matrix}\,,
\end{equation}
and say that $M$ has \textit{Loewy structure} (\ref{eqn Loewy}).
\end{noth}
\mathbfegin{noth}\label{noth Ext quiver}{\mathbff The Ext-quiver.}\,
Suppose that $G$ is a finite group and that $A$ is the group algebra $FG$ or a block of $FG$. Let further $D_1,\ldots,D_r$ be
representatives of the isomorphism classes of simple $A$-modules with projective covers
$P_1,\ldots,P_r$. The \textit{Ext-quiver} of $A$ is the directed graph with
vertices $D_1,\ldots,D_r$, and, for $i,j\in\{1,\ldots,r\}$, the number of arrows from $D_i$ to $D_j$ equals
$$[\Rad(P_i)/\Rad^2(P_i)):D_j]=\dim_F(\mathrm{Ext}^1_A(D_i,D_j))=
[\Soc_2(P_j))/\Soc(P_j):D_i];
$$
see, \cite[I.6.3]{Erdmann1990}.
Recall that, for $i,j\in\{1,\ldots,r\}$, one also has $\Ext^1_A(D_i,D_j)\cong \Ext^1 _A(D_j^*,D_i^*)$. We want to apply this
when $A$ is a block of a group algebra of a symmetric group. For these, all simple modules
are self-dual (see [(7.1.14)]\cite{JK1981}), so that we shall simply connect $D_i$ and $D_j$ by $\dim_F(\mathrm{Ext}^1_A(D_i,D_j))$ undirected edges, for all $i,j\in\{1,\ldots,r\}$.
In fact for a block $A$ of weight $w=2$ in characteristic $p>2$, the dimension of ${\rm Ext}^1_A(D_i, D_j)$ is at most $1$, by \cite{Scopes1995}.
\end{noth}
\mathbfegin{noth}{\mathbff Partitions, modules and blocks of $F\mathfrak{S}_n$.}\,\label{noth part}
(a)\, We write $\mu\vdash n$, for every partition $\mu$ of $n$, and $\lambda\vdash_p n$, for every $p$-regular partition $\lambda$ of $n$.
By $\mu'$ we denote the conjugate of $\mu$, that is, the Young diagram of $\mu'$ is the transposed of the Young diagram of $\mu$.
Recall that if $\mu$ is $p$-regular, then $\mu'$ is $p$-restricted.
As usual, the dominance ordering on partitions of $n$ will be denoted by $\trianglerighteqslant$, the lexicographic ordering on partitions of $n$ by $\geqslant $.
(b)\, The Specht $F\mathfrak{S}_n$-module labelled by $\mu\vdash n$
and the simple $F\mathfrak{S}_n$-module labelled by $\lambda\vdash_p n$ will be denoted by $S^\mu$ and $D^\lambda$, respectively.
Recall that $D^\lambda$ is self-dual.
Suppose that $p\geqslant 3$. Then, for every $p$-regular partition $\lambda$ of $n$ we denote by $\mathbf{m}(\lambda)$ its \textit{Mullineux conjugate}, that is, the $p$-regular partition of $n$ such that $D^{\mathbf{m}(\lambda)}\cong D^\lambda\otimes \sgn$.
Recall from \cite[Theorem 8.15]{James1978} that one has $S^{\lambda'}\cong (S^\lambda\otimes \sgn)^*$; in particular, the socle of $S^{\lambda'}$
is isomorphic to $D^{\mathbf{m}(\lambda)}$.
(c)\, Given a block $B$ of $F\mathfrak{S}_n$, we denote by $\kappa_B$ its $p$-core and by $w_B$ its $p$-weight.
As well, for every partition $\lambda\vdash n$, we denote by $\kappa_\lambda$ and $w_\lambda$ its $p$-core
and its $p$-weight, respectively. We say that $\lambda$ \textit{belongs to} $B$ (or \textit{$B$ contains $\lambda$}), if
$\kappa_\lambda=\kappa_B$.
Conversely, if $\kappa$ is some $p$-core partition and $w\geqslant 0$ is an integer, then $F\mathfrak{S}_{|\kappa|+pw}$ has a
block with $p$-core $\kappa$ and $p$-weight $w$.
\end{noth}
\mathbfegin{noth}\label{noth Specht filtration}
{\mathbff Specht filtrations.}\, An $F\mathfrak{S}_n$-module $M$ is said to admit a \textit{Specht filtration} if there are a series
of $F\mathfrak{S}_n$-submodules
$$\{0\}=M_0\subset M_1\subset\cdots\subset M_r\subset M_{r+1}=M$$
and partitions $\rho_1,\ldots,\rho_{r+1}$ of $n$ such that $M_i/M_{i-1}\cong S^{\rho_i}$, for $i\in\{1,\ldots,r+1\}$.
In general, $M$ may have several Specht filtrations.
Moreover, if $p\in\{2,3\}$, then the number of factors isomorphic to a given Specht $F\mathfrak{S}_n$-module $S^\lambda$ may depend on the chosen filtration; this has been shown by Hemmer and Nakano in \cite{HN2004}. If, however, $p\geqslant 5$, then, by \cite{HN2004}
again,
the number of factors isomorphic to a Specht $F\mathfrak{S}_n$-module $S^\lambda$
is the same for every Specht filtration of $M$; we shall denote this multiplicity by $(M:S^\lambda)$.
Every projective $F\mathfrak{S}_n$-module admits a Specht filtration; this is well known, see for example \cite[(2.6)]{Donkin}, or \cite[(6.1)]{Green}. If $p\geqslant 5$ and if $\lambda$ is a $p$-regular partition of $n$, then
one has $(P^\lambda:S^\lambda)=1$. Moreover, if $\mu\neq \lambda$ is any partition of $n$ such that $(P^\lambda:S^\mu)>0$,
then $\mu\rhd\lambda$, by Brauer Reciprocity.
\end{noth}
\mathbfegin{noth}{\mathbff Abacus displays.}\,\label{noth abacus}
Throughout this article, we shall employ some standard combinatorial methods to identify partitions with suitable abacus displays; see \cite[Section 2.7]{JK1981}
and \cite{Scopes1995}.
(a)\,
Given a partition $\lambda=(\lambda_1,\ldots,\lambda_s)$ of $n$ and any integer $t\geqslant s$, we can display $\lambda$ on an abacus
$\Gamma_\lambda:=\Gamma_{\lambda,t}$ with $p$ runners and $t$ beads, one at each of the positions $\mathbfeta_i:= \lambda_i-i+t$,
for $i\in\{1,\ldots,s\}$ and $\mathbfeta_i:= -i+t$ if $i>s$. Here we label the positions
from left to right, then top to bottom, starting with $0$.
In accordance with \cite{Scopes1995}, we label the runners of a fixed abacus from $1,\ldots,p$. Then the places on
runner $i$ represent the non-negative integers with residue $i-1$ modulo $p$.
Note that, given any abacus display $\Gamma_\lambda$ of $\lambda$, one can
easily read off $\lambda$ as follows: for each bead on the abacus, count the number of gaps preceding the bead. Then this number of gaps equals the corresponding part of $\lambda$. For instance, if $p=3$, then the following abaci represent the partition $\lambda=(6,3^3,2^2)$:
\mathbfegin{center}
$\Gamma_{\lambda,6}$: \quad \mathbfegin{tabular}{ccc}
$-$&$-$&$\mathbfullet$\\
$\mathbfullet$&$-$&$\mathbfullet$\\
$\mathbfullet$&$\mathbfullet$&$-$\\
$-$&$-$&$\mathbfullet$
\end{tabular}\,\quad \text{ and }\quad
$\Gamma_{\lambda, 7}$: \quad
\mathbfegin{tabular}{ccc} $\mathbfullet$ & $-$& $-$\\
$\mathbfullet$&$\mathbfullet$ & $-$\\
$\mathbfullet$ & $\mathbfullet$ & $\mathbfullet$\\
$-$ & $-$ & $-$\\
$\mathbfullet$& & \end{tabular}
\end{center}
This also illustrates the effect of varying the total number of beads on $\Gamma_{\lambda}$:
inserting a bead
at position 0 of a given abacus $\Gamma_{\lambda,s}$ and moving every other bead to the next position gives
an abacus display $\Gamma_{\lambda,s+1}$.
Recall further that moving a bead on some runner of $\Gamma_\lambda$ one position up corresponds to removing a rim $p$-hook from
the Young diagram $[\lambda]$, while moving a bead from some runner of $\Gamma_\lambda$ one position to the left (respectively, to the right)
corresponds to removing (respectively, adding) a node to $[\lambda]$. This describe the branching rules.
Moving all beads on all runners as far up as possible, one obtains an
abacus display of the $p$-core $\kappa_\lambda$.
Lastly, suppose that $B$ is any block of $F\mathfrak{S}_n$ and $\Gamma_{\kappa_B}$ is any abacus display of
$\kappa_B$ and suppose further that there are $k>0$ more beads on some runner $i>1$ than on runner $i-1$ of
$\Gamma_{\kappa_B}$. One may interchange runners $i$ and $i-1$ to get an abacus display of $\kappa_{\mathbfar{B}}$, where
$\mathbfar{B}$ is a block of $F\mathfrak{S}_{n-k}$ with $w_{\mathbfar{B}}=w_B=:w$; in this case one says that
$(B,\mathbfar{B})$ is a
\textit{$(w:k)$-pair.}
In the above example we get, for instance, $\kappa_\lambda=\kappa_B=(3,1)$, and the block $B$ of $F\mathfrak{S}_{19}$
forms a $(5:2)$-pair with the block $\mathbfar{B}$ of $F\mathfrak{S}_{17}$ with $\kappa_{\mathbfar{B}}=(2)$.
As Scopes has shown in \cite{Scopes1991}, if $k\geqslant w$, then $B$ and $\mathbfar{B}$ are Morita equivalent; the particular Morita equivalence
between $B$ and $\mathbfar{B}$ established in \cite{Scopes1991} is called \textit{Scopes equivalence}.
Moreover, she proved that for a fixed $w$, there is a finite list of blocks $\mathbfar{B}$ such that every other block can be obtained from
some block in this list by a sequence of $(w:k)$ pairs, for some $k\geqslant w$.
We shall come back to
the notion of Scopes equivalence later in \ref{noth Scopes} and \ref{noth partial Scopes}.
(b)\, In this paper we shall focus on partitions and blocks of weight 2.
To this end, we recall one last bit of notation from
\cite{Scopes1995}. Suppose that $\lambda=(\lambda_1,\ldots,\lambda_r)$ is a partition of $n$ of $p$-weight $2$. As above, let
$\Gamma_\lambda:=\Gamma_{\lambda,s}$ be an abacus display of $\lambda$, for some $s\geqslant r$.
Suppose that, for $i\in\{1,\ldots,p\}$
we have $m_i$ beads on runner $i$ of $\Gamma_\lambda$.
The we shall say that we represent $\lambda$ on an \textit{$[m_1,\ldots,m_p]$-abacus}.
For $\lambda$ of weight 2, there are exactly two beads that can be moved up on their respective runner in
$\Gamma_\lambda$, and there are three possible constellations:
\quad (i)\, There is a bead on some runner $i$ that can be moved two positions up. Then we denote $\lambda$ by $\langle i\rangle$.
\quad (ii)\, There are $1\leqslant i<j\leqslant p$ such that there is a movable bead on runner $i$ and a movable bead on runner $j$. Then we denote
$\lambda$ by $\langle j,i\rangle$ or $\langle i,j\rangle$.
\quad (i)\, There is some runner $i$ that has a gap followed by two consecutive beads. Thus one can first move the upper bead one position up, then
the lower bead. In this case, we denote $\lambda$ by $\langle i,i\rangle$.
Note that this labelling depends on the fixed choice of an abacus.
We shall always state clearly which abacus
is used.
\end{noth}
\section{Blocks of Weight Two}\label{sec weight 2}
Throughout this section, let $p\geqslant 3$ be a prime and let $F$ be an algebraically closed field of characteristic $p$.
We begin by recalling some crucial notation from \cite[Section 4]{Richards1996} and \cite[Section 2]{ChuangTan2001}.
\mathbfegin{noth}{\mathbff Colours and $\partial$-values.}\,\label{noth partial}
Suppose that $\lambda$
is a partition of $n$ with $p$-weight $2$ and $p$-core $\kappa=(\kappa_1,\ldots,\kappa_t)$.
(a)\, One can remove exactly two rim $p$-hooks from $[\lambda]$ to obtain $[\kappa]$. Although there is, in general, not a unique way to do so,
the absolute value of the difference in the leg lengths of the two rim hooks is well defined, and is denoted by $\partial(\lambda)$; see \cite[Lemma~4.1]{Richards1996}.
(b)\, Consider the hook diagram $H_\lambda$ of $\lambda$.
Since $\lambda$ has $p$-weight $2$, there are exactly two entries in $H_\lambda$ that are divisible by $p$; see \cite[2.7.40]{JK1981}.
There are two possibilities for these entries: either both are equal to $p$, or one equals $p$ and the other equals $2p$. Suppose further that
$\partial(\lambda)=0$. If $H_\lambda$ has two entries equal to $p$, then the leg lengths of the corresponding hooks differ by $1$; see \cite[p. 397]{Richards1996}. If the larger leg length is even, one calls $\lambda$ \textit{black}, otherwise \textit{white}.
If $H_\lambda$ has an entry equal to $2p$ and if the leg length of the corresponding hook has residue $0$ or $3$ modulo $4$, then one also calls
$\lambda$ \textit{black}, otherwise \textit{white}.
(c)\, If $\lambda$ is $p$-restricted, then there is a ($p$-regular) partition $\lambda_+$ of $n$
that is the lexicographically smallest partition with the following properties: $\lambda_+>\lambda$, $\lambda_+$ has $p$-core
$\kappa$, $p$-weight $2$ as well as the same $\partial$-value and (if $\partial(\lambda)=0$) the same colour as $\lambda$.
As well, if $\lambda$ is $p$-restricted, then $\lambda'$ is $p$-regular, and one has $\lambda_+=\mathbf{m}(\lambda')$.
If $\lambda$ is not $p$-restricted, then $\lambda_+$ does not exist; see \cite[Remarks 2.1]{ChuangTan2001}.
If $\lambda$ is $p$-regular, then there is a ($p$-restricted) partition $\lambda_-$ of $n$ that is the lexicographically largest partition
with the following properties: $\lambda_-<\lambda$, $\lambda_-$ has $p$-core
$\kappa$, $p$-weight $2$ as well as the same $\partial$-value and (if $\partial(\lambda)=0$) the same colour as $\lambda$.
Moreover, one then has $\lambda_-=\mathbf{m}(\lambda)'$.
If $\lambda$ is not $p$-regular, then $\lambda_-$ does not exist; see \cite[Remarks 2.1]{ChuangTan2001}.
\end{noth}
Next we recall from \cite[Section 3]{Scopes1995} some properties of $(2:1)$-pairs of blocks that will be
fundamental later in this article.
\mathbfegin{noth}{\mathbff Exceptional partitions of $(2:1)$-pairs.}\,\label{noth (2:1)}
Suppose that $B$ is a block of $F\mathfrak{S}_n$ of weight $2$ with $\kappa_B=(\kappa_1,\ldots,\kappa_t)$ where $\kappa_t\neq 0$.
Furthermore, let $\mathbfar{B}$ be a block of $F\mathfrak{S}_{n-1}$ of weight $2$ such that $(B,\mathbfar{B})$ is a $(2:1)$-pair.
Let $s\geqslant t$.
As in \ref{noth abacus}, we display $\kappa_B$, $\kappa_{\mathbfar{B}}$ as well as all partitions of $B$ and $\mathbfar{B}$, respectively, on an $[m_1, \ldots, m_p]$-abacus with $2p+s$ beads.
For ease of notation we identify partitions with their abacus displays as explained in \ref{noth abacus}.
With a suitable choice of $s$, there is a unique $i\in\{2,\ldots,p\}$ such that $\kappa_{\mathbfar{B}}$ is obtained from $\kappa_B$ by interchanging the $i$th and the $(i-1)$st runner.
Following \cite[Definition 3.1, Definition 3.2]{Scopes1995}, we consider the following partitions of $B$ and $\mathbfar{B}$, respectively:
\mathbfegin{align*}
\alpha&:=\alpha(B,\mathbfar{B}):=\langle i,i\rangle\,,\quad \mathbfeta:=\mathbfeta(B,\mathbfar{B}):=\langle i,i-1\rangle\,,\quad \gamma:=\gamma(B,\mathbfar{B}):=\langle i-1\rangle\,,\\
\mathbfar{\alpha}&:=\mathbfar{\alpha}(B,\mathbfar{B}):=\langle i\rangle\,,\quad \mathbfar{\mathbfeta}:=\mathbfar{\mathbfeta}(B,\mathbfar{B}):=\langle i,i-1\rangle\,,\quad \mathbfar{\gamma}:=\mathbfar{\gamma}(B,\mathbfar{B}):=\langle i-1,i-1\rangle\,.
\end{align*}
From now on we shall refer to $\alpha,\mathbfeta$ and $\gamma$ as the \textit{exceptional partitions} of $B$ (with respect to the pair $(B,\mathbfar{B})$).
Analogously, we shall call $\mathbfar{\alpha},\mathbfar{\mathbfeta}$ and $\mathbfar{\gamma}$ the \textit{exceptional partitions} of $\mathbfar{B}$ (with respect to the pair $(B,\mathbfar{B})$). Every partition of $B$ and $\mathbfar{B}$, respectively, that is not exceptional will be called \textit{good} (with respect to the pair $(B,\mathbfar{B})$).
A $B$-module (respectively, $\mathbfar{B}$-module) will be called \textit{good} if all its composition factors are labelled by good partitions.
By \cite[Lemma~3.5]{Scopes1995}, one has a bijection $\Phi:=\Phi(B,\mathbfar{B})$ between the set of good partitions of $B$ and the set
of good partitions of $\mathbfar{B}$ that preserves the lexicographic ordering as well as $p$-regularity and $p$-singularity. Given a good partition $\lambda$ of $B$, one obtains $\Phi(\lambda)$ by interchanging the
$i$th and $(i-1)$st runner of the abacus. We shall often denote $\Phi(\lambda)$ by $\mathbfar{\lambda}$, for every good partition of $B$,
and $\Phi^{-1}(\mu)$ by $\hat{\mu}$, for every good partition $\mu$ of $\mathbfar{B}$.
It should be emphasized that neither the exceptional partitions of $B$ and $\mathbfar{B}$ nor the bijection $\Phi$ depends on the chosen
abacus displays.
\end{noth}
\mathbfegin{rem}\label{rem l and r}
Suppose that $B$ and $\mathbfar{B}$ are blocks of $F\mathfrak{S}_n$ and $F\mathfrak{S}_{n-1}$, respectively, of weight $2$ that form a
$(2:1)$-pair. Moreover, let $\kappa_B=(\kappa_1,\ldots,\kappa_t)$ with $\kappa_t\neq 0$. As in \ref{noth (2:1)}, we display
$\kappa_B$, $\kappa_{\mathbfar{B}}$ and all partitions of $B$ and $\mathbfar{B}$ on an abacus with $s+2p$ beads, for a fixed $s\geqslant t$.
Suppose that $\kappa_{\mathbfar{B}}$ is obtained from $\kappa_B$ by swapping runners $i$ and $i-1$.
Again in the notation
of \ref{noth (2:1)}, we consider the $i$th and $(i-1)$st runner of the abaci displaying the exceptional partitions of $B$ and $\mathbfar{B}$. Then we
have the following constellations, where, in each case, $l_1,l_2,r_1,r_2$ are understood to be the numbers of beads in the respective parts of the
abacus under consideration, as shown in the diagrams below.
\mathbfegin{center}
\mathbfegin{tabular}{cccccc}
\multicolumn{6}{c}{$\mathbfar{\alpha}=\langle i\rangle$:}\\
&&&&&\\
$\cdots$&$\cdots$&$\mathbfullet$&$\mathbfullet$&$\cdots$&$\cdots$\\
$\cdots$&$\cdots$&$\vdots$&$\vdots$&$\cdots$&$\cdots$\\
$\cdots$&$\cdots$&$\mathbfullet$&$\mathbfullet$&$\cdots$&$\cdots$\\
$\cdots$&$\cdots$&$\mathbfullet$&$-$&\multicolumn{2}{c}{\cellcolor{lightgray} $r_1$}\\
\multicolumn{2}{c}{\cellcolor{lightgray} $l_1$}&$\mathbfullet$&$-$&\multicolumn{2}{c}{\cellcolor{gray} $r_2$}\\
\multicolumn{2}{c}{\cellcolor{gray} $l_2$}&$-$&$\mathbfullet$&$\cdots$&$\cdots$\\
\end{tabular}\;, \quad \quad
\mathbfegin{tabular}{cccccc}
\multicolumn{6}{c}{$\mathbfar{\mathbfeta}=\langle i-1,i\rangle$:}\\
&&&&&\\
$\cdots$&$\cdots$&$\mathbfullet$&$\mathbfullet$&$\cdots$&$\cdots$\\
$\cdots$&$\cdots$&$\vdots$&$\vdots$&$\cdots$&$\cdots$\\
$\cdots$&$\cdots$&$\mathbfullet$&$\mathbfullet$&$\cdots$&$\cdots$\\
$\cdots$&$\cdots$&$\mathbfullet$&$-$&\multicolumn{2}{c}{\cellcolor{lightgray} $r_1$}\\
\multicolumn{2}{c}{\cellcolor{lightgray} $l_1$}&$-$&$\mathbfullet$&\multicolumn{2}{c}{\cellcolor{gray} $r_2$}\\
\multicolumn{2}{c}{\cellcolor{gray} $l_2$}&$\mathbfullet$&$-$&$\cdots$&$\cdots$\\
\end{tabular}\;,
\end{center}
\mathbfigskip
\mathbfigskip
\mathbfegin{center}
\mathbfegin{tabular}{cccccc}
\multicolumn{6}{c}{$\mathbfar{\gamma}=\langle i-1,i-1\rangle$:}\\
&&&&&\\
$\cdots$&$\cdots$&$\mathbfullet$&$\mathbfullet$&$\cdots$&$\cdots$\\
$\cdots$&$\cdots$&$\vdots$&$\vdots$&$\cdots$&$\cdots$\\
$\cdots$&$\cdots$&$\mathbfullet$&$\mathbfullet$&$\cdots$&$\cdots$\\
$\cdots$&$\cdots$&$-$&$\mathbfullet$&\multicolumn{2}{c}{\cellcolor{lightgray} $r_1$}\\
\multicolumn{2}{c}{\cellcolor{lightgray} $l_1$}&$\mathbfullet$&$-$&\multicolumn{2}{c}{\cellcolor{gray} $r_2$}\\
\multicolumn{2}{c}{\cellcolor{gray} $l_2$}&$\mathbfullet$&$-$&$\cdots$&$\cdots$\\
\end{tabular}\;, \quad\quad
\mathbfegin{tabular}{cccccc}
\multicolumn{6}{c}{$\alpha=\langle i,i\rangle$:}\\
&&&&&\\
$\cdots$&$\cdots$&$\mathbfullet$&$\mathbfullet$&$\cdots$&$\cdots$\\
$\cdots$&$\cdots$&$\vdots$&$\vdots$&$\cdots$&$\cdots$\\
$\cdots$&$\cdots$&$\mathbfullet$&$\mathbfullet$&$\cdots$&$\cdots$\\
$\cdots$&$\cdots$&$\mathbfullet$&$-$&\multicolumn{2}{c}{\cellcolor{lightgray} $r_1$}\\
\multicolumn{2}{c}{\cellcolor{lightgray} $l_1$}&$-$&$\mathbfullet$&\multicolumn{2}{c}{\cellcolor{gray} $r_2$}\\
\multicolumn{2}{c}{\cellcolor{gray} $l_2$}&$-$&$\mathbfullet$&$\cdots$&$\cdots$\\
\end{tabular}\;,\quad\quad
\end{center}
\mathbfigskip
\mathbfigskip
\mathbfegin{center}
\mathbfegin{tabular}{cccccc}
\multicolumn{6}{c}{$\mathbfeta=\langle i-1,i\rangle$:}\\
&&&&&\\
$\cdots$&$\cdots$&$\mathbfullet$&$\mathbfullet$&$\cdots$&$\cdots$\\
$\cdots$&$\cdots$&$\vdots$&$\vdots$&$\cdots$&$\cdots$\\
$\cdots$&$\cdots$&$\mathbfullet$&$\mathbfullet$&$\cdots$&$\cdots$\\
$\cdots$&$\cdots$&$-$&$\mathbfullet$&\multicolumn{2}{c}{\cellcolor{lightgray} $r_1$}\\
\multicolumn{2}{c}{\cellcolor{lightgray} $l_1$}&$\mathbfullet$&$-$&\multicolumn{2}{c}{\cellcolor{gray} $r_2$}\\
\multicolumn{2}{c}{\cellcolor{gray} $l_2$}&$-$&$\mathbfullet$&$\cdots$&$\cdots$\\
\end{tabular}\;, \quad\quad
\mathbfegin{tabular}{cccccc}
\multicolumn{6}{c}{$\gamma=\langle i-1\rangle$:}\\
&&&&&\\
$\cdots$&$\cdots$&$\mathbfullet$&$\mathbfullet$&$\cdots$&$\cdots$\\
$\cdots$&$\cdots$&$\vdots$&$\vdots$&$\cdots$&$\cdots$\\
$\cdots$&$\cdots$&$\mathbfullet$&$\mathbfullet$&$\cdots$&$\cdots$\\
$\cdots$&$\cdots$&$-$&$\mathbfullet$&\multicolumn{2}{c}{\cellcolor{lightgray} $r_1$}\\
\multicolumn{2}{c}{\cellcolor{lightgray} $l_1$}&$-$&$\mathbfullet$&\multicolumn{2}{c}{\cellcolor{gray} $r_2$}\\
\multicolumn{2}{c}{\cellcolor{gray} $l_2$}&$\mathbfullet$&$-$&$\cdots$&$\cdots$\\
\end{tabular}\;.
\end{center}
Note that we have $l_1\geqslant l_2$ and $r_1\geqslant r_2$, since none of the above
partitions has a movable bead on any runner different from $i$ and $i-1$.
For our subsequent considerations, in particular those in Theorem~\ref{thm Loewy except}, it will turn out to be
useful to distinguish the following cases:
\mathbfegin{itemize}
\item[\rm{(1)}] $l_1+r_1=0=l_2+r_2$;
\item[\rm{(2)}] $0=l_2+r_2< l_1+r_1<p-2$;
\item[\rm{(3)}] $0=l_2+r_2<l_1+r_1=p-2$;
\item[\rm{(4)}] $0<l_2+r_2< l_1+r_1=p-2$;
\item[\rm{(5)}] $l_2+r_2=p-2=l_1+r_1$;
\item[\rm{(6)}] $0<l_2+r_2\leqslant l_1+r_1<p-2$.
\end{itemize}
Observe that case (3) occurs precisely when $B$ is the principal block of $F\mathfrak{S}_{2p+1}$, which has $p$-core $(1)$, and
when $\mathbfar{B}$ is the principal block of $F\mathfrak{S}_{2p}$, which has $p$-core $\emptyset$.
In this case, we further have $\mathbfeta=(p+1,1^p)$ and $\alpha=(p+1,2,1^{p-2})$. By \cite[Theorem 23.7]{James1978}, $S^\mathbfeta$ is simple, and by \cite[Lemma 4.3]{Scopes1995},
one has $[S^\mathbfeta:D^\alpha]\neq 0$. Thus $S^\mathbfeta\cong D^\alpha$, in this case.
\end{rem}
\mathbfigskip
The next lemma shows in which of the six cases of Remark~\ref{rem l and r}
the exceptional partitions of a $(2:1)$-pair of
weight-$2$ blocks are $p$-regular or $p$-restricted. We also record the $\partial$-values
of the partitions in question.
The result is an easy consequence of the abacus combinatorics in Appendix~\ref{sec abacus}, and will be important
for our proof of Theorem~\ref{thm main1}.
\mathbfegin{lemma}\label{lemma l and r}
Retain the hypotheses and notation as in Remark~\ref{rem l and r}. Moreover, set $d:=l_1+r_1-l_2-r_2$. Then one has the following
\mathbfegin{center}
\mathbfegin{tabular}{|c|c|c|c|}\hat{l}ine
partition & $p$-regular &$p$-restricted& $\partial$\\\hat{l}ine\hat{l}ine
$\mathbfar{\alpha}$& {\rm (1), (2), (3),} & {\rm (4), (5), (6)}&$d+1$\\
& {\rm (4), (5), (6)}&&\\\hat{l}ine
$\mathbfar{\mathbfeta}$& {\rm (1), (2), (3),} & {\rm (2), (3), (4),}&$d$\\
& {\rm (4), (6)}. & {\rm (5), (6)} &\\\hat{l}ine
$\mathbfar{\gamma}$& {\rm (1), (2), (6)} & {\rm (1), (2), (3),}&$d+1$\\
& & {\rm (4), (5), (6)} &\\\hat{l}ine
$\alpha$& {\rm (1), (2), (3),} & {\rm (2), (3), (4)}&$d$ \\
& {\rm (4), (5), (6)} & {\rm (5), (6)} &\\\hat{l}ine
$\mathbfeta$& {\rm (1), (2), (6)}& {\rm (4), (5), (6)}&$d+1$\\\hat{l}ine
$\gamma$& {\rm (1), (2), (3),}& {\rm (1), (2), (3),}&$d$\\
& {\rm (4), (6)} & {\rm (4), (5), (6)} &\\\hat{l}ine
\end{tabular}
\end{center}
In particular, one has $\partial(\mathbfeta)=\partial(\mathbfar{\alpha})=\partial(\mathbfar{\gamma})>0$.
Furthermore, $\partial(\mathbfar{\mathbfeta})=\partial(\alpha)=\partial(\gamma)=0$ if and only if $l_1=l_2$ and $r_1=r_2$; in this case,
$\mathbfar{\mathbfeta}$, $\alpha$ and $\gamma$ have the same colour, which is black if and only if $l_2+r_2$ is odd.
\end{lemma}
\mathbfegin{proof}
We represent the respective partitions on an abacus with $s+2p$ beads as before.
The assertions concerning $p$-regularity and $p$-restrictedness then follow from \ref{noth abacus}(a).
Note that part of this already appears in \cite[Lemma~4.4]{Scopes1995}.
The assertions concerning $\partial$-values and colours are immediate from \ref{noth hooks diagram} and \ref{noth colour weight 2}.
\end{proof}
\mathbfegin{lemma}\label{lemma colour good}
Suppose that $B$ and $\mathbfar{B}$ are blocks of $F\mathfrak{S}_n$ and $F\mathfrak{S}_{n-1}$, respectively,
with $w_B=2=w_{\mathbfar{B}}$ that form a $(2:1)$-pair. If $\lambda$ is a good partition of $B$ and $\Phi(\lambda)$ its corresponding good partition of $\mathbfar{B}$, then $\partial(\lambda)=\partial(\Phi(\lambda))$.
If $\partial(\lambda)=\partial(\Phi(\lambda))=0$, then $\lambda$ and $\Phi(\lambda)$ have the same colour.
Moreover, if $\partial(\mathbfar{\mathbfeta})=0=\partial(\gamma)$, then also the exceptional partitions
$\mathbfar{\mathbfeta}$ and $\gamma$ have the same colour.
\end{lemma}
\mathbfegin{proof}
Suppose that $\kappa_B=(\kappa_1,\ldots,\kappa_t)$, for some $t\geqslant 1$, $\kappa_t\geqslant 1$.
For convenience, we set $\mathbfar{\lambda}:=\Phi(\lambda)$. We consider an abacus display $\Gamma_\lambda$
of $\lambda$ and an $[m_1,\ldots,m_p]$-abacus display $\Gamma_{\mathbfar{\lambda}}$ of $\mathbfar{\lambda}$ with $s+2p$ beads, for some $s\geqslant t$, such that
$\Gamma_{\mathbfar{\lambda}}$ is obtained from $\Gamma_\lambda$
by swapping runners $i$ and $i-1$, where $i\in \{ 2, \ldots, p\}$.
We first observe the following: suppose that there is a runner $j\neq i$ of $\Gamma_\lambda$
such that there is a bead in row $x$ on runner $j$ and a gap in some row $y<x$ on runner $j$. Then the same holds for
$\Gamma_{\mathbfar{\lambda}}$. Moreover, the number of beads passed when moving the bead to position $(y,j)$ in $\Gamma_\lambda$
equals the number of beads passed when moving the bead to position $(y,j)$ in $\Gamma_{\mathbfar{\lambda}}$.
Next, in the notation of \ref{noth abacus}(b), there are three possibilities: $\lambda=\langle j\rangle$, $\lambda=\langle j,j\rangle$
or $\lambda=\langle k,j\rangle$, for some $j,k\in\{1,\ldots,p\}$, $j<k$.
If $j,k\notin\{i,i-1\}$, then, in each of the three possible cases, the assertion of the lemma is an easy consequence
of the abacus combinatorics in \ref{noth colour weight 2} and our above observation. Thus, since
$\lambda\notin\{\alpha,\mathbfeta,\gamma\}=\{\langle i,i\rangle,\langle i-1,i\rangle, \langle i-1\rangle\}$, it remains to treat the following four cases, where we
draw runners $i-1$ and $i$, and where $i-1\neq l\neq i$.
As well,
$a,b,c,d$ are the numbers of beads in the respective parts of the abacus.
\mathbfegin{center}
\mathbfegin{tabular}{|c|c|c|}\hat{l}ine
case & $\lambda$ & $\mathbfar{\lambda}$\\\hat{l}ine\hat{l}ine
(a) & $\langle i-1,i-1\rangle$ &$\langle i,i\rangle$\\\hat{l}ine
&\mathbfegin{tabular}{cccc}
$\cdots$&$\mathbfullet$&$\mathbfullet$& $\cdots$\\
$\vdots$&$\vdots$&$\vdots$&$\vdots$\\
$\cdots$& $\mathbfullet$&$\mathbfullet$&$\cdots$\\
$\cdots$& $-$&$\mathbfullet$&{\cellcolor{lightgray} $c$}\\
{\cellcolor{lightgray} $a$} & $\mathbfullet$&$\mathbfullet$&{\cellcolor{gray} $d$}\\
{\cellcolor{gray} $b$}&$\mathbfullet$&$\mathbfullet$&$\cdots$
\end{tabular}
&
\mathbfegin{tabular}{cccc}
$\cdots$&$\mathbfullet$&$\mathbfullet$& $\cdots$\\
$\vdots$&$\vdots$&$\vdots$&$\vdots$\\
$\cdots$& $\mathbfullet$&$\mathbfullet$&$\cdots$\\
$\cdots$& $\mathbfullet$&$-$&{\cellcolor{lightgray} $c$}\\
{\cellcolor{lightgray} $a$} & $\mathbfullet$&$\mathbfullet$&{\cellcolor{gray} $d$}\\
{\cellcolor{gray} $b$}&$\mathbfullet$&$\mathbfullet$&$\cdots$
\end{tabular}\\\hat{l}ine
\end{tabular}
\mathbfegin{tabular}{|c|c|c|}\hat{l}ine
(b) & $\langle i\rangle$ &$\langle i-1\rangle$\\\hat{l}ine
&
\mathbfegin{tabular}{cccc}
$\cdots$&$\mathbfullet$&$\mathbfullet$& $\cdots$\\
$\vdots$&$\vdots$&$\vdots$&$\vdots$\\
$\cdots$&$\mathbfullet$&$\mathbfullet$&$\cdots$\\
$\cdots$& $-$&$-$&{\cellcolor{lightgray} $c$}\\
{\cellcolor{lightgray} $a$} & $-$&$-$&{\cellcolor{gray} $d$}\\
{\cellcolor{gray} $b$}&$-$&$\mathbfullet$&$\cdots$
\end{tabular}
&
\mathbfegin{tabular}{cccc}
$\cdots$&$\mathbfullet$&$\mathbfullet$& $\cdots$\\
$\vdots$&$\vdots$&$\vdots$&$\vdots$\\
$\cdots$&$\mathbfullet$&$\mathbfullet$&$\cdots$\\
$\cdots$& $-$&$-$&{\cellcolor{lightgray} $c$}\\
{\cellcolor{lightgray} $a$} & $-$&$-$&{\cellcolor{gray} $d$}\\
{\cellcolor{gray} $b$}&$\mathbfullet$&$-$&$\cdots$
\end{tabular}\\\hat{l}ine
\end{tabular}
\mathbfegin{tabular}{|c|c|c|}\hat{l}ine
(c) & $\langle i-1,l\rangle$ &$\langle i,l\rangle$\\\hat{l}ine
&\mathbfegin{tabular}{cccc}
$\cdots$&$\mathbfullet$&$\mathbfullet$& $\cdots$\\
$\vdots$&$\vdots$&$\vdots$&$\vdots$\\
$\cdots$& $\mathbfullet$&$\mathbfullet$&{\cellcolor{lightgray} $c$}\\
{\cellcolor{lightgray} $a$} & $-$&$\mathbfullet$&{\cellcolor{gray} $d$}\\
{\cellcolor{gray} $b$}&$\mathbfullet$&$\mathbfullet$&$\cdots$
\end{tabular}
&
\mathbfegin{tabular}{cccc}
$\cdots$&$\mathbfullet$&$\mathbfullet$& $\cdots$\\
$\vdots$&$\vdots$&$\vdots$&$\vdots$\\
$\cdots$& $\mathbfullet$&$\mathbfullet$&{\cellcolor{lightgray} $c$}\\
{\cellcolor{lightgray} $a$} & $\mathbfullet$&$-$&{\cellcolor{gray} $d$}\\
{\cellcolor{gray} $b$}&$\mathbfullet$&$\mathbfullet$&$\cdots$
\end{tabular}\\\hat{l}ine\hat{l}ine
(d) & $\langle i,l\rangle$ &$\langle i-1,l\rangle$\\\hat{l}ine
&\mathbfegin{tabular}{cccc}
$\cdots$&$\mathbfullet$&$\mathbfullet$& $\cdots$\\
$\vdots$&$\vdots$&$\vdots$&$\vdots$\\
$\cdots$& $\mathbfullet$&$\mathbfullet$&{\cellcolor{lightgray} $c$}\\
{\cellcolor{lightgray} $a$} & $-$&$-$&{\cellcolor{gray} $d$}\\
{\cellcolor{gray} $b$}&$-$&$\mathbfullet$&$\cdots$
\end{tabular}
&
\mathbfegin{tabular}{cccc}
$\cdots$&$\mathbfullet$&$\mathbfullet$& $\cdots$\\
$\vdots$&$\vdots$&$\vdots$&$\vdots$\\
$\cdots$& $\mathbfullet$&$\mathbfullet$&{\cellcolor{lightgray} $c$}\\
{\cellcolor{lightgray} $a$} & $-$&$-$&{\cellcolor{gray} $d$}\\
{\cellcolor{gray} $b$}&$\mathbfullet$&$-$&$\cdots$
\end{tabular}\\\hat{l}ine
\end{tabular}
\end{center}
We use \ref{noth hooks diagram}.
In case (a), we thus get $\partial(\lambda)=|(a+c+1)-(b+d+1)|=\partial(\mathbfar{\lambda})$.
The hook diagram of $\lambda$ and $\mathbfar{\lambda}$, respectively, has an entry equal to $2p$.
If $\partial(\lambda)=0$,
then the leg length of the $2p$-hook of $\lambda$ and $\mathbfar{\lambda}$ is $a+b+c+d+3$, so that $\lambda$ and $\mathbfar{\lambda}$ have the same colour, by \ref{noth partial}.
In case (b), we get $\partial(\lambda)=|(b+d)-(a+c)|=\partial(\mathbfar{\lambda})$. The hook diagram of $\lambda$ and $\mathbfar{\lambda}$, respectively, has an entry equal to $2p$.
If $\partial(\lambda)=0$,
then the leg length of the $2p$-hook of $\lambda$ and $\mathbfar{\lambda}$ is $a+b+c+d$. So
$\lambda$ and $\mathbfar{\lambda}$ have the same colour, by \ref{noth partial}.
In case (c), $\lambda$ and $\mathbfar{\lambda}$ both have a movable bead on runner $l$. The number $m$ of beads passed when moving
this bead one position up is the same for $\lambda$ and $\mathbfar{\lambda}$, by our initial observation.
Moreover, the hook diagrams of $\lambda$ and $\mathbfar{\lambda}$ have two entries equal to $p$. The
leg lengths of the
corresponding hooks are $b+d+1$ and $m$, both for $\lambda$ and $\mathbfar{\lambda}$.
As for the $\partial$-values, we may first move the bead on runner $i-1$ of $\Gamma_\lambda$ one position up.
The leg length of the corresponding (rim) $p$-hook equals $b+d+1$. Then we move the movable bead on runner $l$ one position up.
The leg length of the corresponding (rim) $p$-hook equals $m'$, for some $m'\geqslant 0$. So we have $\partial(\lambda)=|(b+d+1)-m'|$.
On the other hand, we first move the bead on runner $i$ of $\Gamma_{\mathbfar{\lambda}}$ one position up.
The leg length of the corresponding (rim) $p$-hook equals $b+d+1$.
Then we move the movable bead on runner $l$ one position up.
The leg length of the corresponding rim $p$-hook equals $m'$ again. Thus also
$\partial(\mathbfar{\lambda})=|(b+d+1)-m'|$.
Hence
we have $\partial(\lambda)=\partial(\mathbfar{\lambda})$ and if $\partial(\lambda)=0$, then
$\lambda$ and $\mathbfar{\lambda}$ have the same colour.
In case (d), $\lambda$ and $\mathbfar{\lambda}$ both have a movable bead on runner $l$. The number $m$ of beads passed when moving
this bead one position up is the same for $\lambda$ and $\mathbfar{\lambda}$, by our initial observation.
Moreover, the hook diagrams of $\lambda$ and $\mathbfar{\lambda}$ have two entries equal to $p$. The leg length of the corresponding rim $p$-hook equals $b+d$. Then we move the bead on runner $l$ one position up, and the leg lengths of the
corresponding hooks are $b+d$ and $m$, both for $\lambda$ and $\mathbfar{\lambda}$.
To determine the $\partial$-values, we proceed as in case (3), that is, here we first move the bead on runner $i$ and then the bead on runner $l$ of
$\Gamma_\lambda$ one position up.
Analogously, we first move the bead on runner $i-1$ and then the bead on runner $l$ of $\Gamma_{\mathbfar{\lambda}}$ one position up.
Then there is some $m'\geqslant 0$ such that
$\partial(\lambda)=|(b+d)-m'|=\partial(\mathbfar{\lambda})$ and if $\partial(\lambda)=0$, then
$\lambda$ and $\mathbfar{\lambda}$ have the same colour.
The assertion concerning $\mathbfar{\mathbfeta}$ and $\gamma$ has been established in Lemma~\ref{lemma l and r}.
This completes the proof of the lemma.
\end{proof}
\mathbfegin{noth}\label{noth Scopes}
{\mathbff Scopes equivalences and $\partial$-values.}\,
Suppose that $k\geqslant 2$ and that $B$ and $\mathbfar{B}$ are blocks of $F\mathfrak{S}_n$ and $F\mathfrak{S}_{n-k}$, respectively, of weight $2$ such that
$(B,\mathbfar{B})$ is a $(2:k)$-pair. Let $\kappa_B=(\kappa_1,\ldots,\kappa_t)$, and display $\kappa_B,\kappa_{\mathbfar{B}}$ as well as all partitions
of $B$ and $\mathbfar{B}$ on an $[m_1,\ldots,m_p]$-abacus with $s+2p$ beads, for some $s\geqslant t$. By \cite{Scopes1991}, we may choose $s$ such that there is some $i>1$ such that $\Gamma_{B}$ has
$k$ more beads on runner $i$ than on runner $i-1$, and $\Gamma_{\mathbfar{B}}$ is obtained by interchanging runners $i$ and $i-1$ of $\Gamma_B$.
Moreover, by \cite{Scopes1991}, the blocks $B$ and $\mathbfar{B}$ are Morita equivalent. Thus, in particular, there is a bijection
between the isomorphism classes of simple $B$-modules and the isomorphism classes of simple $\mathbfar{B}$-modules.
As Scopes has also shown in \cite{Scopes1991}, this bijection can be described combinatorially.
More precisely,
there is a bijection, say $\Psi$, between the set of partitions of $B$ and the set of partitions of $\mathbfar{B}$ that preserves $p$-regularity and
the lexicographic ordering. Whenever $\lambda$ is a partition of $B$ with abacus display $\Gamma_\lambda$, one obtains
$\Gamma_{\Psi(\lambda)}$ by interchanging runners $i$ and $i-1$. If $\lambda$ is $p$-regular, then
the simple $\mathbfar{B}$-module $D^{\Psi(\lambda)}$ is the Morita correspondent of $D^{\lambda}$.
We should like to emphasize that, for every partition $\lambda$ of $B$, whenever there
is some bead on runner $i-1$ of $\Gamma_\lambda$, there is also a bead on runner $i$ in the same row. This is due to
the fact that $k\geqslant 2$. With this, the arguments
used in the proof of Lemma~\ref{lemma colour good}, easily generalize and show that $\partial(\lambda)=\partial(\Psi(\lambda))$, for
every partition $\lambda$ of $B$. Moreover, if $\partial(\lambda)=0$, then $\lambda$ and $\Psi(\lambda)$ have the same colour.
\end{noth}
\mathbfegin{noth}\label{noth partial Scopes}
{\mathbff Partial Scopes equivalences and Ext-quivers.}\,
Suppose that $B$ and $\mathbfar{B}$ are blocks of $F\mathfrak{S}_n$ and $F\mathfrak{S}_{n-1}$, respectively, of weight $2$ such that
$(B,\mathbfar{B})$ is a $(2:1)$-pair. As in \ref{noth (2:1)}, we denote by $\mathbfar{\alpha},\mathbfar{\mathbfeta},\mathbfar{\gamma}$ and
$\alpha$, $\mathbfeta$, $\gamma$ the exceptional partitions of $B$ and $\mathbfar{B}$, respectively. Moreover, retain the notation fixed in
\ref{noth (2:1)}.
Furthermore, consider the pair of exact, two-sided adjoint functors
\mathbfegin{align*}
\res_{\mathbfar{B}}^B&: B-\mathbf{mod}\to \mathbfar{B}-\mathbf{mod}\,, M\mapsto M\downarrow_{\mathbfar{B}}\,,\\
\ind_{\mathbfar{B}}^B&: \mathbfar{B}-\mathbf{mod}\to B-\mathbf{mod}\,, N\mapsto N\uparrow^B\,
\end{align*}
as in \ref{noth block nota}(b).
Whenever $\lambda$ is a good partition of $B$ and $\mu$ is a good partition of $\mathbfar{B}$, we have
$S^\lambda\downarrow_{\mathbfar{B}}\cong S^{\Phi(\lambda)}$ and $S^\mu\uparrow ^B\cong S^{\Phi^{-1}(\mu)}$,
by \cite[Lemma 3.3]{Scopes1995}.
Moreover, whenever $\lambda$ is a good $p$-regular partition of $B$ and $\mu$ is a good $p$-regular partition of $\mathbfar{B}$, by \cite[Corollary 3.7]{Scopes1995}, we have $D^\lambda\downarrow_{\mathbfar{B}}\cong D^{\Phi(\lambda)}$ and $D^\mu\uparrow ^B\cong D^{\Phi^{-1}(\mu)}$.
Moreover, by Lemma~\ref{lemma l and r} and \cite[Remark 4.4]{Scopes1995}, we also know that $\mathbfar{\gamma}$ is $p$-regular if and only
if $\mathbfeta$ is. If so, then $D^\mathbfeta\downarrow_{\mathbfar{B}}\cong D^{\mathbfar{\gamma}}$ and
$D^{\mathbfar{\gamma}}\uparrow ^B\cong D^{\mathbfeta}$. We set $\Phi(\mathbfeta):=\mathbfar{\gamma}$, in this case.
Analogously, $\gamma$ is $p$-regular if and only if $\mathbfar{\mathbfeta}$ is. If so, then
$D^\gamma\downarrow_{\mathbfar{B}}\cong D^{\mathbfar{\mathbfeta}}$ and
$D^{\mathbfar{\mathbfeta}}\uparrow ^B\cong D^{\gamma}$. We set $\Phi(\gamma):=\mathbfar{\mathbfeta}$, in this case.
Next let $\mathcal{M}$ be the full subcategory of $B-\mathbf{mod}$
whose objects do not have any composition factor isomorphic to $D^\alpha$. Analogously, let $\mathcal{N}$ be
the full subcategory of $\mathbfar{B}-\mathbf{mod}$ whose objects do not have any composition factor isomorphic to
$D^{\mathbfar{\alpha}}$. It is well known that the restriction of the functors $\res_{\mathbfar{B}}^B$ and $\ind_{\mathbfar{B}}^B$
yield an equivalence between the categories $\mathcal{M}$ and $\mathcal{N}$; for some further explanations see, for instance, \cite[Section 4]{DanzErdmann2012}.
On the combinatorial level, this equivalence entails the following bijection, which generalizes the map $\Phi$ in \ref{noth (2:1)} and which
we denote by $\Phi$ as well:
$$\Phi:\{\lambda\vdash_p n: D^\lambda\in\mathcal{M}\}\to \{\mu\vdash_p n-1: D^\mu\in\mathcal{N}\}\,.$$
It should be emphasized, however, that this latter bijection now preserves the lexicographic ordering only on good partitions, since we always have $\mathbfar{\alpha}>\mathbfar{\mathbfeta}>\mathbfar{\gamma}$ as well as $\alpha>\mathbfeta>\gamma$.
By \cite[Theorem I]{Scopes1995}, the dimension of the Ext-space of any pair of simple $B$-modules (respectively, $\mathbfar{B}$-modules)
is either $0$ or $1$.
So, since block restriction and block induction are exact functors, for any pair of simple modules $D^{\lambda_1}$ and
$D^{\lambda_2}$ of $\mathcal{M}$, we get an $F$-vector space isomorphism
\mathbfegin{align*}
\Ext_{B}^1(D^{\lambda_1},D^{\lambda_2})&\cong \Ext_{\mathcal{M}}^1(D^{\lambda_1},D^{\lambda_2})\cong \Ext_{\mathcal{N}}^1(D^{\Phi(\lambda_1)},D^{\Phi(\lambda_2)})\\
&\cong \Ext_{\mathbfar{B}}^1(D^{\Phi(\lambda_1)},D^{\Phi(\lambda_2)})\,.
\end{align*}
Consequently, suppose we already know the Ext-quiver of $\mathbfar{B}$. In order to determine the Ext-quiver of $B$, we may in fact proceed as follows:
first remove $\mathbfar{\alpha}$ and all edges connected to $\mathbfar{\alpha}$, and replace every partition $\mu\neq \mathbfar{\alpha}$ of $\mathbfar{B}$ by $\Phi^{-1}(\mu)$.
Then determine those $p$-regular partitions $\lambda$ of $B$ such that
$\Ext_{B}^1(D^{\lambda},D^{\alpha})\neq \{0\}$; recall that the latter Ext-space is at most $1$-dimensional.
This yields the edges connected to $\alpha$. To this end, we record the following lemma.
\end{noth}
\mathbfegin{lemma}\label{lemma Ext alpha}
Suppose that $B$ and $\mathbfar{B}$ are blocks of $F\mathfrak{S}_n$ and $F\mathfrak{S}_{n-1}$, respectively, of weight $2$ such that
$(B,\mathbfar{B})$ is a $(2:1)$-pair.
\mathbfegin{enumerate}
\item[{\rm (a)}] Let $\mu$ be a $p$-regular partition of $n-1$ with $p$-core $\kappa_{\mathbfar{B}}$. Then $\Ext_{\mathbfar{B}}^1(D^\mu,D^{\mathbfar{\alpha}})\neq \{0\}$
if and only if
\mathbfegin{enumerate}
\item[\rm{(i)}] either $\mu=\mathbfar{\mathbfeta}$,
\item[\rm{(ii)}] or $\mu>\mathbfar{\alpha}$, $[S^{\mathbfar{\alpha}}:D^\mu]\neq 0$ and $|\partial(\mathbfar{\alpha})-\partial(\mu)|=1$.
\end{enumerate}
\item[\rm{(b)}] Let $\lambda$ be a $p$-regular partition of $n$ with $p$-core $\kappa_B$. Then
$\Ext_B^1(D^\lambda,D^{\alpha})\neq \{0\}$
if and only if
\mathbfegin{enumerate}
\item[\rm{(i)}] either $\lambda=\mathbfeta$,
\item[\rm{(ii)}] or $\lambda>\alpha$, $[S^{\alpha}:D^\lambda]\neq 0$ and $|\partial(\alpha)-\partial(\lambda)|=1$.
\end{enumerate}
\end{enumerate}
\end{lemma}
\mathbfegin{proof}
(a)\, Suppose first that $\mathbfar{\alpha}\geqslant \mu$. Then, by \cite[Theorem 6.1]{ChuangTan2001}, we have
$\Ext_{\mathbfar{B}}^1(D^\mu,D^{\mathbfar{\alpha}})\neq \{0\}$ if and only if $[S^\mu:D^{\mathbfar{\alpha}}]\neq 0$ and $|\partial(\mathbfar{\alpha})-\partial(\mu)|=1$.
By \cite[Lemma 4.3]{Scopes1995}, we further know that the only Specht $F\mathfrak{S}_{n-1}$-modules with a composition factor isomorphic to $D^{\mathbfar{\alpha}}$ are $S^{\mathbfar{\alpha}}$, $S^{\mathbfar{\mathbfeta}}$ and $S^{\mathbfar{\gamma}}$. Since, by Lemma~\ref{lemma l and r}, we have
$\partial(\mathbfar{\gamma})=\partial(\mathbfar{\alpha})=\partial(\mathbfar{\mathbfeta})+1$, we deduce that
$\Ext_{\mathbfar{B}}^1(D^\mu,D^{\mathbfar{\alpha}})\neq \{0\}$ if and only if $\mu=\mathbfar{\mathbfeta}$ and $\mathbfar{\mathbfeta}$ is $p$-regular.
So we may suppose that $\mu>\mathbfar{\alpha}$. By \cite[Theorem 6.1]{ChuangTan2001} again, we deduce that
$\Ext_{\mathbfar{B}}^1(D^\mu,D^{\mathbfar{\alpha}})\neq \{0\}$ if and only if $\mu$ satisfies condition (ii).
Analogously one obtains assertion (b), also by \cite[Theorem 6.1]{ChuangTan2001}, \cite[Lemma 4.3]{Scopes1995}
and Lemma~\ref{lemma l and r}.
\end{proof}
Let $(B,\mathbfar{B})$ be a $(2:1)$-pair of blocks of $F\mathfrak{S}_n$ and
$F\mathfrak{S}_{n-1}$, respectively, of weight $2$.
Suppose that $\kappa_B=(\kappa_1,\ldots,\kappa_t)$. As in Section~\ref{sec weight 2}, we display $\kappa_B$, $\kappa_{\mathbfar{B}}$ as
well as all partitions of $B$ and $\mathbfar{B}$ on a fixed $[m_1,\ldots,m_p]$-abacus with $s+2p$ beads, where $s\geqslant t$.
As in \ref{noth (2:1)}, we denote by $\mathbfar{\alpha},\mathbfar{\mathbfeta},\mathbfar{\gamma},\alpha,\mathbfeta,\gamma$ the exceptional partitions
associated to the $(2:1)$-pair $(B,\mathbfar{B})$. Lastly, recall the notation introduced in Remark~\ref{rem l and r} and \ref{noth partial}.
The main aim of the next theorem is to give detailed information on the Loewy structures of the Specht modules of
$B$ and $\mathbfar{B}$, respectively, labelled by the exceptional partitions. This will be the crucial ingredient for our
inductive proof of Theorem~\ref{thm main1}. It will turn out that this heavily depends on
whether the exceptional partitions are $p$-regular or $p$-restricted. To this end, we shall again distinguish the cases (1)--(6)
as in Remark~\ref{rem l and r}.
For ease of notation, in Theorem~\ref{thm Loewy except} below, we shall often
identify simple $B$-modules and simple $\mathbfar{B}$-modules with their labelling partitions.
We expect the Loewy structures of the Specht modules treated in Theorem~\ref{thm Loewy except} to be known, but we have not seen them in print so far.
\mathbfegin{thm}\label{thm Loewy except}
With the above notation, the Specht $F\mathfrak{S}_{n-1}$-modules $S^{\alpha},S^{\mathbfeta}$ and $S^{\gamma}$
and the Specht $F\mathfrak{S}_n$-modules $S^{\mathbfar{\alpha}},S^{\mathbfar{\mathbfeta}}$ and $S^{\mathbfar{\gamma}}$, respectively,
have the following Loewy structures.
\mathbfegin{center}
\mathbfegin{tabular}{|c||c|c|c||c|c|c|}\hat{l}ine
case & $S^{\mathbfar{\alpha}}$ & $S^{\mathbfar{\mathbfeta}}$& $S^{\mathbfar{\gamma}}$&$S^\alpha$ &$S^\mathbfeta$& $S^\gamma$\\\hat{l}ine\hat{l}ine
{\rm (1)}& $\mathbfegin{matrix} \mathbfar{\alpha}\\\mathbfar{Z}\\ \\\end{matrix}$ & $\mathbfegin{matrix} \mathbfar{\mathbfeta}\\\mathbfar{\alpha}\oplus\mathbfar{Y}\\ \\\end{matrix}$ &$\mathbfegin{matrix}\mathbfar{\gamma}\\\mathbfar{\mathbfeta}\oplus\mathbfar{Z}\\\mathbfar{\alpha}\end{matrix}$ & $\mathbfegin{matrix} \alpha\\ Y\\ \\\end{matrix}$ & $\mathbfegin{matrix} \mathbfeta \\\alpha\oplus Z\\ \\\end{matrix}$ &$\mathbfegin{matrix}\gamma\\\mathbfeta\oplus Y\\\alpha\end{matrix}$ \\\hat{l}ine\hat{l}ine
{\rm (2)} & $\mathbfegin{matrix} \mathbfar{\alpha}\\\mathbfar{\mathbfeta}_+\oplus \mathbfar{Z}\\ \\\end{matrix}$ &$\mathbfegin{matrix} \mathbfar{\mathbfeta}\\\mathbfar{\alpha}\oplus\mathbfar{Y}\\ \mathbfar{\mathbfeta}_+\end{matrix}$ &$\mathbfegin{matrix}\mathbfar{\gamma}\\\mathbfar{\mathbfeta}\oplus\mathbfar{Z}\\\mathbfar{\alpha}\end{matrix}$&$\mathbfegin{matrix} \alpha\\ Y\\ \alpha_+\\\end{matrix}$ &$\mathbfegin{matrix} \mathbfeta\\\alpha\oplus Z\\ \\\end{matrix}$ &$\mathbfegin{matrix}\gamma\\\mathbfeta\oplus Y\\\alpha\end{matrix}$\\\hat{l}ine\hat{l}ine
{\rm (3)} & $\mathbfegin{matrix} \mathbfar{\alpha}\\\mathbfar{\mathbfeta}_+\\ \\\end{matrix}$ &$\mathbfegin{matrix} \mathbfar{\mathbfeta}\\\mathbfar{\alpha}\oplus\mathbfar{Y}\\ \mathbfar{\mathbfeta}_+\end{matrix}$ & $\mathbfegin{matrix}\\ \mathbfar{\mathbfeta}\\\mathbfar{\alpha}\end{matrix}$& $\mathbfegin{matrix}\alpha\\ Y\\ \alpha_+\end{matrix}$&$\mathbfegin{matrix} \alpha\end{matrix}$&$\mathbfegin{matrix}\gamma\\ Y \\ \alpha\end{matrix}$\\\hat{l}ine\hat{l}ine
{\rm (4)} &$\mathbfegin{matrix} \mathbfar{\alpha}\\\mathbfar{\mathbfeta}_+\oplus \mathbfar{Z}\\\mathbfar{\alpha}_+\end{matrix}$& $\mathbfegin{matrix} \mathbfar{\mathbfeta}\\\mathbfar{\alpha}\oplus\mathbfar{Y}\\ \mathbfar{\mathbfeta}_+\end{matrix}$ & $\mathbfegin{matrix}\\ \mathbfar{\mathbfeta}\oplus\mathbfar{Z}\\\mathbfar{\alpha}\end{matrix}$& $\mathbfegin{matrix} \alpha\\\mathbfeta_+\oplus Y\\ \alpha_+\end{matrix}$& $\mathbfegin{matrix} \\\alpha\oplus Z\\ \mathbfeta_+\end{matrix}$ & $\mathbfegin{matrix}\gamma \\ Y\\\alpha\end{matrix}$ \\\hat{l}ine
\end{tabular}
\mathbfegin{tabular}{|c||c|c|c||c|c|c|}\hat{l}ine
{\rm (5)}& $\mathbfegin{matrix} \mathbfar{\alpha}\\\mathbfar{\mathbfeta}_+\oplus \mathbfar{Z}\\\mathbfar{\alpha}_+\end{matrix}$ & $ \mathbfegin{matrix} \\\mathbfar{\alpha}\oplus\mathbfar{Y}\\ \mathbfar{\mathbfeta}_+\end{matrix}$ &$\mathbfegin{matrix}\\ \mathbfar{Z}\\\mathbfar{\alpha}\end{matrix}$&$\mathbfegin{matrix} \alpha\\\mathbfeta_+\oplus Y\\\alpha_+\end{matrix}$ & $ \mathbfegin{matrix} \\\alpha\oplus Z\\ \mathbfeta_+\end{matrix}$ &$\mathbfegin{matrix}\\ Y\\\alpha\end{matrix}$\\\hat{l}ine\hat{l}ine
{\rm (6)} & $\mathbfegin{matrix} \mathbfar{\alpha}\\\mathbfar{\mathbfeta}_+\oplus \mathbfar{Z}\\\mathbfar{\alpha}_+\end{matrix}$ & $ \mathbfegin{matrix} \mathbfar{\mathbfeta}\\\mathbfar{\alpha}\oplus\mathbfar{Y}\\ \mathbfar{\mathbfeta}_+\end{matrix}$ &$\mathbfegin{matrix}\mathbfar{\gamma}\\\mathbfar{\mathbfeta}\oplus\mathbfar{Z}\\\mathbfar{\alpha}\end{matrix}$&
$\mathbfegin{matrix} \alpha\\\mathbfeta_+\oplus Y\\\alpha_+\end{matrix}$ & $ \mathbfegin{matrix} \mathbfeta\\\alpha\oplus Z\\ \mathbfeta_+\end{matrix}$ &$\mathbfegin{matrix}\gamma\\\mathbfeta\oplus Y\\\alpha\end{matrix}$\\\hat{l}ine
\end{tabular}
\end{center}
Here $\mathbfar{Y}$ and $\mathbfar{Z}$ are good semisimple $\mathbfar{B}$-modules, and $Y$ and $Z$ are good semisimple $B$-modules. Let $d:=\partial(\alpha)$.
If $D^\mu$ is a composition factor of $\mathbfar{Y}$ and $D^\rho$ is a composition factor of $\mathbfar{Z}$, then
$\partial(\mu)\in\{d-1,d+1\}$ and $\partial(\rho)\in \{d,d+2\}$.
Moreover, $Y\cong\mathbfar{Y}\uparrow^B$ and
$Z\cong\mathbfar{Z}\uparrow^B$.
If $D^\lambda$ is a composition factor of $Y$ and $D^\nu$ is a composition factor of $Z$, then
$\partial(\lambda)\in\{d-1,d+1\}$ and $\partial(\nu)\in \{d,d+2\}$.
The partition $\mathbfar{\alpha}_+$ exists if and only if $\mathbfeta_+$ does; if so, then both partitions are good
and
$\mathbfar{\alpha}_+=\Phi(\mathbfeta_+)$. The partition $\mathbfar{\mathbfeta}_+$ exists if and only if $\alpha_+$ does; if so, then both partitions are good and
$\mathbfar{\mathbfeta}_+=\Phi(\alpha_+)$. In the cases {\rm (4), (5)} and {\rm (6)}, one also has $[\mathbfar{Y}:D^{\mathbfar{\alpha}_+}]=0$.
\end{thm}
\mathbfegin{noth}\label{noth ingredients}{\mathbff Strategy of proof.}\,
Before proving Theorem~\ref{thm Loewy except}, we shall collect a number of important properties of
the blocks $B$ and $\mathbfar{B}$ and their modules that we shall use extensively.
(a)\, By \cite[Theorem I]{Scopes1995}, every principal indecomposable $B$-module (respectively, $\mathbfar{B}$-module)
has Loewy length 5 and is \textit{stable}, that is, its Loewy and socle series coincide. Moreover, all Specht modules in $B$ and $\mathbfar{B}$, respectively,
are multiplicity-free. The $\Ext^1$-space between any two simple $B$-modules (respectively, simple $\mathbfar{B}$-modules) is at most one-dimensional,
and there are no self-extensions of any simple $B$-module (respectively, simple $\mathbfar{B}$-module).
As well, by \cite[Remark 4.4]{Scopes1995}, the projective cover $P^\alpha$ of $D^\alpha$ has a Specht filtration with quotients, from top
to bottom, isomorphic to $S^\alpha$, $S^\mathbfeta$ and $S^\gamma$. Analogously, the projective cover $P^{\mathbfar{\alpha}}$ of $D^{\mathbfar{\alpha}}$
has a Specht filtration with quotients, from top
to bottom, isomorphic to $S^{\mathbfar{\alpha}}$, $S^{\mathbfar{\mathbfeta}}$ and $S^{\mathbfar{\gamma}}$.
(b)\, By \cite[Theorem~6.1]{ChuangTan2001}, the Ext-quiver of $B$ is bipartite; more precisely, if $D^\lambda$ and $D^\mu$ are
simple $B$-modules with $\Ext^1_B(D^\lambda,D^\mu)\neq \{0\}$, then $\partial(\lambda)\in\{\partial(\mu)-1,\partial(\mu)+1\}$. The analogous
statement holds for $\mathbfar{B}$.
(c)\, By \cite[Proposition 6.2]{ChuangTan2001}, every Specht module $S^\lambda$ in $B$ (respectively, in $\mathbfar{B}$) has
Loewy length at most 3, and has Loewy length 3 if and only if $\lambda$ is both $p$-regular and $p$-restricted.
(d)\, By \cite[Lemma 3.3]{Scopes1995}, one has
$$(S^{\mathbfar{\alpha}})\uparrow^B\sim S^\alpha\oplus S^\mathbfeta\,, \quad (S^{\mathbfar{\mathbfeta}})\uparrow^B\sim S^\alpha\oplus S^\gamma\;, \quad (S^{\mathbfar{\gamma}})\uparrow^B\sim S^\mathbfeta\oplus S^\gamma\,.$$
(e)\, Our general strategy towards proving the assertions of Theorem~\ref{thm Loewy except} will be as follows: with (a)-(c), we shall already
deduce the Loewy lengths and most of the Loewy structure of the Specht modules in question. Part (d) will then provide us with
systems of linear equations from which we shall
obtain the claimed connections between the composition factors of the exceptional Specht $\mathbfar{B}$-modules and
those of the exceptional Specht $B$-modules.
\end{noth}
\mathbfegin{proof}(of Theorem~\ref{thm Loewy except})
We prove the assertions case by case, starting with case (6), which is in some sense the most general one. Throughout this proof, let
$d:=\partial(\alpha)=\partial(\mathbfar{\mathbfeta})$.
Recall from (\ref{eqn Loewy}) in \ref{noth block nota} our notation for Loewy structures.
\underline{Case (6):} By Lemma~\ref{lemma l and r}, we know that all exceptional partitions of $B$ and $\mathbfar{B}$, respectively,
are both $p$-regular and $p$-restricted. By \ref{noth ingredients}(c), the Specht modules
labelled by the exceptional partitions must, thus, have Loewy length 3. We examine $S^{\mathbfar{\alpha}}$, $S^{\mathbfar{\mathbfeta}}$ and
$S^{\mathbfar{\gamma}}$ first.
By \ref{noth part}(a) and \ref{noth partial}, we know that $\Soc(S^{\mathbfar{\alpha}})\cong D^{\mathbfar{\alpha}_+}$,
$\Soc(S^{\mathbfar{\mathbfeta}})\cong D^{\mathbfar{\mathbfeta}_+}$ and $\Soc(S^{\mathbfar{\gamma}})\cong D^{\mathbfar{\gamma}_+}$. Hence
$$S^{\mathbfar{\alpha}}\ \approx \ \mathbfegin{matrix}D^{\mathbfar{\alpha}}\\ H^{\mathbfar{\alpha}}\\ D^{\mathbfar{\alpha}_+}\end{matrix}\,,\quad S^{\mathbfar{\mathbfeta}}\ \approx \ \mathbfegin{matrix}D^{\mathbfar{\mathbfeta}}\\ H^{\mathbfar{\mathbfeta}}\\ D^{\mathbfar{\mathbfeta}_+}\end{matrix}\,,\quad S^{\mathbfar{\gamma}}\ \approx \ \mathbfegin{matrix}D^{\mathbfar{\gamma}}\\ H^{\mathbfar{\gamma}}\\ D^{\mathbfar{\gamma}_+}\end{matrix}\,,$$
for non-zero semisimple $\mathbfar{B}$-modules $H^{\mathbfar{\alpha}}, H^{\mathbfar{\mathbfeta}}$ and $H^{\mathbfar{\gamma}}$.
By \ref{noth ingredients}(a), the projective cover $P^{\mathbfar{\alpha}}$ of $D^{\mathbfar{\alpha}}$ has a Specht filtration with quotients, from top to bottom, isomorphic to $S^{\mathbfar{\alpha}}, S^{\mathbfar{\mathbfeta}}$ and $S^{\mathbfar{\gamma}}$. Hence, in particular,
$D^{\mathbfar{\gamma}_+}\cong \Soc(S^{\mathbfar{\gamma}})\cong \Soc(P^{\mathbfar{\alpha}})\cong D^{\mathbfar{\alpha}}$ and
$\mathbfar{\gamma}_+=\mathbfar{\alpha}$. Since $P^{\mathbfar{\alpha}}$ is stable, we further deduce that $D^{\mathbfar{\gamma}}$ is isomorphic to a
submodule of $\Rad^2(P^{\mathbfar{\alpha}})/\Rad^3(P^{\mathbfar{\alpha}})$, and $H^{\mathbfar{\gamma}}$ is isomorphic to a submodule of $\Rad^3(P^{\mathbfar{\alpha}})/\Rad^4(P^{\mathbfar{\alpha}})$.
Analogously, we conclude that $H^{\mathbfar{\alpha}}$ is isomorphic to a submodule of the second Loewy layer
and $D^{\mathbfar{\alpha}_+}$ is isomorphic to a submodule of the third Loewy layer of $P^{\mathbfar{\alpha}}$.
Next, by \cite[Theorem 6.1]{ChuangTan2001} and Lemma~\ref{lemma Ext alpha}, we know that $\dim(\Ext^1_{\mathbfar{B}}(D^{\mathbfar{\alpha}},D^{\mathbfar{\mathbfeta}}))=1$ and $[S^{\mathbfar{\mathbfeta}}:D^{\mathbfar{\alpha}}]=1$. Since $S^{\mathbfar{\mathbfeta}}$ is multiplicity-free, this implies
$H^{\mathbfar{\mathbfeta}}\cong D^{\mathbfar{\alpha}}\oplus \mathbfar{Y}$, for some good semisimple $\mathbfar{B}$-module $\mathbfar{Y}$. Again using the fact that the quiver of $\mathbfar{B}$ is bipartite, we so far get
\mathbfegin{equation}\label{eqn Palpha}
P^{\mathbfar{\alpha}}\; \ \approx\; \ \mathbfegin{matrix}D^{\mathbfar{\alpha}}\\H^{\mathbfar{\alpha}}\oplus D^{\mathbfar{\mathbfeta}}\\ D^{\mathbfar{\alpha}_+}\oplus H^{\mathbfar{\mathbfeta}}\oplus D^{\mathbfar{\gamma}}\\ D^{\mathbfar{\mathbfeta}_+}\oplus H^{\mathbfar{\gamma}}\\ D^{\mathbfar{\gamma}_+}\end{matrix}\; \ \approx\; \ \mathbfegin{matrix}D^{\mathbfar{\alpha}}\\H^{\mathbfar{\alpha}}\oplus D^{\mathbfar{\mathbfeta}}\\ D^{\mathbfar{\alpha}_+}\oplus D^{\mathbfar{\alpha}}\oplus \mathbfar{Y}\oplus D^{\mathbfar{\gamma}}\\ D^{\mathbfar{\mathbfeta}_+}\oplus H^{\mathbfar{\gamma}}\\ D^{\mathbfar{\alpha}}\end{matrix}\,.
\end{equation}
By \cite[Lemma 4.3]{Scopes1995}, $[S^{\mathbfar{\gamma}}:D^{\mathbfar{\mathbfeta}}]=1=[S^{\mathbfar{\gamma}}:D^{\mathbfar{\alpha}}]$, thus
$H^{\mathbfar{\gamma}}\cong D^{\mathbfar{\mathbfeta}}\oplus \mathbfar{Z}$, for some good semisimple $\mathbfar{B}$-module $\mathbfar{Z}$. The assertion concerning the $\partial$-values of
the composition factors of $\mathbfar{Y}$ and $\mathbfar{Z}$ follows from \cite[Theorem 6.1(3)]{ChuangTan2001}.
Since $P^{\mathbfar{\alpha}}$ is stable, this now gives $H^{\mathbfar{\alpha}}\cong D^{\mathbfar{\mathbfeta}_+}\oplus \mathbfar{Z}$. Since all Specht $\mathbfar{B}$-modules are multiplicity-free, we also see that $\mathbfar{\alpha}_+$ and $\mathbfar{\mathbfeta}_+$ are good.
It remains to show that $[\mathbfar{Y}:D^{\mathbfar{\alpha}_+}]=0$. Assume not, so that $[S^{\mathbfar{\mathbfeta}}:D^{\mathbfar{\alpha}_+}]=1$ and $\mathbfar{\alpha}_+\rhd \mathbfar{\mathbfeta}$.
Furthermore, by (\ref{eqn Palpha}), we get $\Ext^1_{\mathbfar{B}}(D^{\mathbfar{\mathbfeta}},D^{\mathbfar{\alpha}_+})\neq \{0\}$
implying $\mathbfar{\alpha}_+\rhd \mathbfar{\mathbfeta}\rhd \mathbfar{\alpha}$, by \cite[Theorem 6.1(3)]{ChuangTan2001}, a contradiction, since $\mathbfar{\alpha}\rhd \mathbfar{\mathbfeta}$. This completes the proof concerning the Loewy structure of $S^{\mathbfar{\alpha}}$, $S^{\mathbfar{\mathbfeta}}$ and
$S^{\mathbfar{\gamma}}$, and gives
\mathbfegin{equation}\label{eqn Salphabar}
S^{\mathbfar{\alpha}}\ \approx \ \mathbfegin{matrix}\alpha\\ \mathbfar{\mathbfeta}_+\oplus \mathbfar{Z}\\\mathbfar{\alpha}_+\end{matrix}\,,\ \quad S^{\mathbfar{\mathbfeta}}\ \approx \ \mathbfegin{matrix}\mathbfar{\mathbfeta}\\\mathbfar{\alpha}\oplus \mathbfar{Y}\\\mathbfar{\mathbfeta}_+\end{matrix}\,,\ \quad S^{\mathbfar{\gamma}}\ \approx \ \mathbfegin{matrix}\mathbfar{\gamma}\\ \mathbfar{\mathbfeta}\oplus \mathbfar{Z}\\\mathbfar{\alpha}\end{matrix}\,.
\end{equation}
Next we consider $S^\alpha$, $S^{\mathbfeta}$ and
$S^{\gamma}$, still in the case (6). By \ref{noth ingredients}(a), the projective cover $P^\alpha$ of $D^\alpha$ has a
Specht filtration with quotients, from top to bottom, isomorphic to $S^\alpha$, $S^\mathbfeta$ and $S^\gamma$. The above arguments now
work completely analogously and give
\mathbfegin{equation}\label{eqn Salpha}
S^\alpha\ \approx \ \mathbfegin{matrix}\alpha\\ \mathbfeta_+\oplus R\\\alpha_+\end{matrix}\,,\ \quad S^\mathbfeta\ \approx \ \mathbfegin{matrix}\mathbfeta\\\alpha\oplus T\\\mathbfeta_+\end{matrix}\,,\quad \ S^\gamma \ \approx \ \mathbfegin{matrix}\gamma\\ \mathbfeta\oplus R\\\alpha\end{matrix}\,,
\end{equation}
for good semisimple $B$-modules $R$ and $T$ that are disjoint. Moreover, $\alpha_+$, $\mathbfeta_+$ are good, and
$[T:D^{\alpha_+}]=0$.
Every composition factor of $R$ is labelled by a $p$-regular partition with $\partial$-value $d-1$ or $d+1$, every composition
factor of $T$ is labelled by a $p$-regular partition with $\partial$-value $d$ or $d+2$.
To complete the proof of case (6), we
need to show that
\mathbfegin{equation}\label{eqn R=Y}
R\cong Y\;, T\cong Z\;, (D^{\mathbfar{\alpha}_+})\uparrow^B\cong D^{\mathbfeta_+}\;, (D^{\mathbfar{\mathbfeta}_+})\uparrow^B\cong D^{\alpha_+}\,.
\end{equation}
To this end, we
exploit the partial Scopes equivalence between $B$ and $\mathbfar{B}$ given by block restriction and block induction, as explained in \ref{noth partial Scopes}. We set $Y:=\mathbfar{Y}\uparrow^B$ and $Z:=\mathbfar{Z}\uparrow^B$. By \ref{noth partial Scopes}, we know that
$D^{\mathbfar{\mathbfeta}}\uparrow^B\cong D^{\gamma}$ and $D^{\mathbfar{\gamma}}\uparrow^B\cong D^\mathbfeta$. Therefore, with
\ref{noth ingredients}(d), we obtain the following
\mathbfegin{align}\label{eqn upalpha}
S^\alpha\oplus S^\mathbfeta &\sim (S^{\mathbfar{\alpha}})\uparrow^B \sim (D^{\mathbfar{\alpha}})\uparrow^B\oplus (D^{\mathbfar{\mathbfeta}_+})\uparrow^B\oplus Z\oplus (D^{\mathbfar{\alpha}_+})\uparrow^B\\ \label{eqn upbeta}
S^\alpha\oplus S^\gamma&\sim (S^{\mathbfar{\mathbfeta}})\uparrow^B\sim D^\gamma\oplus (D^{\mathbfar{\alpha}})\uparrow^B\oplus (D^{\mathbfar{\mathbfeta}_+})\uparrow^B\oplus Y\\ \label{eqn upgamma}
S^\mathbfeta\oplus S^\gamma &\sim (S^{\mathbfar{\gamma}})\uparrow^B \sim D^\mathbfeta\oplus D^\gamma\oplus Z\oplus (D^{\mathbfar{\alpha}})\uparrow^B\,.
\end{align}
To exploit these identities, recall from Lemma~\ref{lemma l and r} that
$\partial(\mathbfar{\alpha})=d+1=\partial(\mathbfar{\gamma})=\partial(\mathbfeta)$ and $\partial(\mathbfar{\mathbfeta})=d=\partial(\alpha)=\partial(\gamma)$. By \ref{noth partial}, we have $\partial(\theta)=\partial(\theta_+)$, for every $p$-restricted partition $\theta$ belonging to $B$ or $\mathbfar{B}$.
By Lemma~\ref{lemma colour good}, we also know that $\partial(\mu)=\partial(\Phi^{-1}(\mu))$, for every good $p$-regular partition of $\mathbfar{B}$.
Subtracting (\ref{eqn upalpha}) from (\ref{eqn upbeta}) (in the Grothendieck group) and using (\ref{eqn Salpha}), we get
$$T\oplus Y\oplus D^{\mathbfeta_+}\sim R\oplus Z\oplus (D^{\mathbfar{\alpha}_+})\uparrow^B\,.$$
As we have proved above, $\mathbfar{Z}\oplus D^{\mathbfar{\alpha}_+}$ and $\mathbfar{Y}$ are disjoint and good. Thus the same holds for $Z\oplus (D^{\mathbfar{\alpha}_+})\uparrow^B$ and $Y$.
As well, $R$ and $T\oplus D^{\mathbfeta_+}$ are disjoint. Therefore, we must have $T\oplus D^{\mathbfeta_+}\sim Z\oplus (D^{\mathbfar{\alpha}_+})\uparrow^B$
and $R\cong Y$. Comparing the $\partial$-values, we deduce $D^{\mathbfeta_+}\cong (D^{\mathbfar{\alpha}_+})\uparrow^B$
and $T\cong Z$.
It remains to show that $(D^{\mathbfar{\mathbfeta}_+})\uparrow^B\cong D^{\alpha_+}$. Subtracting (\ref{eqn upbeta}) from
(\ref{eqn upgamma}) (in the Grothendieck group) and using what we have just proved about $S^\mathbfeta$, we get
$S^\alpha\sim D^\alpha\oplus D^{\mathbfeta_+}\oplus R\oplus (D^{\mathbfar{\mathbfeta}_+})\uparrow^B$. On the other hand, by (\ref{eqn Salpha}),
$S^\alpha\sim D^\alpha\oplus D^{\mathbfeta_+}\oplus R\oplus D^{\alpha_+}$, hence $(D^{\mathbfar{\mathbfeta}_+})\uparrow^B\cong D^{\alpha_+}$
and $\Phi(\alpha_+)=\mathbfar{\mathbfeta}_+$.
This completes the proof of the assertion of the theorem in the case (6).
\mathbfigskip
\underline{Case (2):} By Lemma~\ref{lemma l and r}, the partitions $\mathbfar{\alpha}$ and $\mathbfeta$ are $p$-regular and not $p$-restricted.
The remaining exceptional partitions are both $p$-regular and $p$-restricted. Using \ref{noth ingredients} and
arguing as in the proof of case (6) above, we this time deduce
\mathbfegin{equation}\label{eqn Specht case (2)}
S^{\mathbfar{\alpha}}\ \approx \ \mathbfegin{matrix} \mathbfar{\alpha}\\\mathbfar{\mathbfeta}_+\oplus \mathbfar{Z}\\ \\\end{matrix}\;, \ \quad S^{\mathbfar{\mathbfeta}}\ \approx \ \mathbfegin{matrix} \mathbfar{\mathbfeta}\\\mathbfar{\alpha}\oplus\mathbfar{Y}\\ \mathbfar{\mathbfeta}_+\end{matrix}\;, \ \quad S^{\mathbfar{\gamma}}\ \approx \ \mathbfegin{matrix}\mathbfar{\gamma}\\\mathbfar{\mathbfeta}\oplus\mathbfar{Z}\\\mathbfar{\alpha}\end{matrix}\,
\end{equation}
as well as
\mathbfegin{equation}\label{eqn Specht B case (2)}
S^{\alpha}\ \approx \ \mathbfegin{matrix} \alpha\\ R\\ \alpha_+\end{matrix}\;, \ \quad S^{\mathbfeta}\ \approx \ \mathbfegin{matrix} \mathbfeta\\\alpha\oplus T\\ \\\end{matrix}\;, \ \quad S^{\gamma}\ \approx \ \mathbfegin{matrix}\gamma\\\mathbfeta\oplus R\\\alpha\end{matrix}\,.
\end{equation}
Here $\mathbfar{Y}$ and $\mathbfar{Z}$ are good, semisimple and disjoint. Moreover, $\mathbfar{\mathbfeta}_+$ is good.
The assertion concerning $\partial$-values follows from \cite[Theorem 6.1(3)]{ChuangTan2001}.
As well, $R$ and $T$ are good, semisimple and disjoint, and $\alpha_+$ is good. Every composition factor of $R$ is labelled by a $p$-regular partition with $\partial$-value $d-1$ or $d+1$, every composition
factor of $T$ is labelled by a $p$-regular partition with $\partial$-value $d$ or $d+2$.
In the following, set $Y:=\mathbfar{Y}\uparrow^B$, $Z:=\mathbfar{Z}\uparrow^B$ and
$D^\lambda:=(D^{\mathbfar{\mathbfeta}_+})\uparrow^B$.
Again we have $(D^{\mathbfar{\mathbfeta}})\uparrow^B\cong D^\gamma$ and $(D^{\mathbfar{\gamma}})\uparrow^B\cong D^\mathbfeta$
It remains to show that $Y\sim R$, $Z\sim T$ and $\Phi(\alpha_+)=\mathbfar{\mathbfeta}_+$.
From \ref{noth ingredients}(c) and (\ref{eqn Specht B case (2)}) we get
\mathbfegin{align}\label{eqn upalpha 2}
S^\alpha\oplus S^\mathbfeta&\sim (S^{\mathbfar{\alpha}})\uparrow^B\sim (D^{\mathbfar{\alpha}})\uparrow^B\oplus (D^{\mathbfar{\mathbfeta}_+})\uparrow^B\oplus Z\,\\ \label{eqn upbeta 2}
S^\alpha\oplus S^\gamma&\sim (S^{\mathbfar{\mathbfeta}})\uparrow^B\sim D^\gamma\oplus (D^{\mathbfar{\alpha}})\uparrow^B\oplus Y\oplus (D^{\mathbfar{\mathbfeta}_+})\uparrow^B\, \\ \label{eqn upgamma 2}
S^\mathbfeta\oplus S^\gamma& \sim (S^{\mathbfar{\gamma}})\uparrow^B\sim D^\mathbfeta\oplus D^\gamma\oplus Z\oplus (D^{\mathbfar{\alpha}})\uparrow^B\,.
\end{align}
Subtracting (\ref{eqn upalpha 2}) from (\ref{eqn upbeta 2}) in the Grothendieck group and using (\ref{eqn Specht case (2)}) and
(\ref{eqn Specht B case (2)}), we see that $Y\oplus T\sim R\oplus Z$. Since $\mathbfar{Y}$ and $\mathbfar{Z}$ are disjoint, so are $Y$ and $Z$.
Since also $R$ and $T$ are disjoint, we must have $T\sim Z$ and $Y\sim R$.
Lastly, we subtract (\ref{eqn upbeta 2}) from (\ref{eqn upgamma 2}). Together with what we have just shown and (\ref{eqn Salpha})
this gives $D^{\alpha_+}\oplus Y\sim D^\mathbfeta\oplus Z\sim (D^{\mathbfar{\mathbfeta}_+})\uparrow^B\oplus Y$,
thus $D^{\alpha_+}\cong (D^{\mathbfar{\mathbfeta}_+})\uparrow^B$ and $\Phi(\alpha_+)=\mathbfar{\mathbfeta}_+$.
\underline{Case (4):} By Lemma~\ref{lemma l and r}, the partitions $\mathbfeta$ and $\mathbfar{\gamma}$ are $p$-restricted, but not $p$-regular.
The remaining exceptional partitions are both $p$-regular and $p$-restricted. From \ref{noth ingredients}(a)-(c) we get
\mathbfegin{equation}\label{eqn Specht case (4)}
S^{\mathbfar{\alpha}}\ \approx \ \mathbfegin{matrix} \mathbfar{\alpha}\\\mathbfar{\mathbfeta}_+\oplus \mathbfar{Z}\\ \mathbfar{\alpha}_+\end{matrix}\;, \ \quad S^{\mathbfar{\mathbfeta}}\ \approx \ \mathbfegin{matrix} \mathbfar{\mathbfeta}\\\mathbfar{\alpha}\oplus\mathbfar{Y}\\ \mathbfar{\mathbfeta}_+\end{matrix}\;,\ \quad S^{\mathbfar{\gamma}}\ \approx \ \mathbfegin{matrix}\\\mathbfar{\mathbfeta}\oplus\mathbfar{Z}\\\mathbfar{\alpha}\end{matrix}\,
\end{equation}
and
\mathbfegin{equation}\label{eqn Specht B case (4)}
S^{\alpha}\ \approx \ \mathbfegin{matrix} \alpha\\ R\oplus \mathbfeta_+\\ \alpha_+\end{matrix}\;, \ \quad S^{\mathbfeta}\ \approx \ \mathbfegin{matrix} \\\alpha\oplus T\\ \mathbfeta_+\end{matrix}\;, \ \quad S^{\gamma}\ \approx \ \mathbfegin{matrix}\gamma\\ R\\\alpha\end{matrix}\,.
\end{equation}
Here $\mathbfar{Y}$ and $\mathbfar{Z}$, $R$ and $T$ are good and semisimple. Moreover, $\mathbfar{\alpha}_+$, $\mathbfar{\mathbfeta}_+$, $\alpha_+$ and $\mathbfeta_+$
are good. As in the proof of case (6) above, we see that $[\mathbfar{Y}:D^{\mathbfar{\alpha}_+}]=0$.
The assertion concerning the $\partial$-values of the composition factors of $\mathbfar{Y}$ and $\mathbfar{Z}$ follows from \cite[Theorem 6.1(3)]{ChuangTan2001}.
Since $S^\gamma$ has Loewy length $3$, we
further have $R\neq \{0\}$.
Let $\lambda:=\Phi^{-1}(\mathbfar{\mathbfeta}_+)$, $\mu:=\Phi^{-1}(\mathbfar{\alpha}_+)$, $Y:=\mathbfar{Y}\uparrow^B$ and $Z:=\mathbfar{Z}\uparrow^B$.
Then from \ref{noth ingredients}(d) we get
\mathbfegin{align}\label{eqn upalpha 4}
S^\alpha\oplus S^\mathbfeta&\sim (S^{\mathbfar{\alpha}})\uparrow^B\sim (D^{\mathbfar{\alpha}})\uparrow^B\oplus D^\lambda\oplus D^\mu\oplus Z\,\\ \label{eqn upbeta 4}
S^\alpha\oplus S^\gamma&\sim (S^{\mathbfar{\mathbfeta}})\uparrow^B\sim D^\gamma\oplus (D^{\mathbfar{\alpha}})\uparrow^B\oplus Y\oplus D^\lambda\, \\ \label{eqn upgamma 4}
S^\mathbfeta\oplus S^\gamma& \sim (S^{\mathbfar{\gamma}})\uparrow^B\sim D^\gamma\oplus Z\oplus (D^{\mathbfar{\alpha}})\uparrow^B\,.
\end{align}
We subtract (\ref{eqn upalpha 4}) from (\ref{eqn upbeta 4}) and use (\ref{eqn Specht case (4)}) and (\ref{eqn Specht B case (4)}) to
get $Y\oplus T\oplus D^{\mathbfeta_+}\sim R\oplus Z\oplus D^\mu$. By Lemma~\ref{lemma l and r} and Lemma~\ref{lemma colour good}, we
have $\partial(\mathbfeta_+)=d+1=\partial(\mu)$.
By \cite[Theorem 6.1(3)]{ChuangTan2001} and Lemma~\ref{lemma l and r} again,
every composition factor of $R$ has a labelling partition with $\partial$-value $d-1$ or $d+1$;
every composition factor of $T$ has a labelling partition with $\partial$-value $d$ or $d+2$. This implies $Z\cong T$ and $Y\oplus D^{\mathbfeta_+}\sim R\oplus D^\mu$. Next, consider the difference (\ref{eqn upgamma 4})-(\ref{eqn upbeta 4}). Together with (\ref{eqn Specht B case (4)})
this gives $T\oplus Y\oplus D^\lambda\sim Z\oplus R\oplus D^{\alpha_+}$, hence $Y\oplus D^\lambda\sim R\oplus D^{\alpha_+}$. By comparing
$\partial$-values, we deduce $Y\cong R$ and $\lambda=\alpha$, and then also $\mathbfeta_+=\mu$. This completes the proof
in the case (4).
\underline{Case (1):} In this case, by Lemma~\ref{lemma l and r}, the partitions $\mathbfar{\alpha}$, $\mathbfar{\mathbfeta}$, $\alpha$ and $\mathbfeta$ are
$p$-regular and not $p$-restricted, while $\mathbfar{\gamma}$ and $\gamma$ are both $p$-regular and $p$-restricted.
With \ref{noth ingredients}(a)-(c) we deduce
\mathbfegin{equation}\label{eqn Specht case (1)}
S^{\mathbfar{\alpha}}\ \approx \ \mathbfegin{matrix} \mathbfar{\alpha}\\\mathbfar{Z}\\ \\\end{matrix}\;, \ \quad S^{\mathbfar{\mathbfeta}}\ \approx \ \mathbfegin{matrix} \mathbfar{\mathbfeta}\\\mathbfar{\alpha}\oplus\mathbfar{Y}\\ \\\end{matrix}\;, \quad S^{\mathbfar{\gamma}}\ \approx \ \mathbfegin{matrix}\mathbfar{\gamma}\\\mathbfar{\mathbfeta}\oplus\mathbfar{Z}\\\mathbfar{\alpha}\end{matrix}\,
\end{equation}
and
\mathbfegin{equation}\label{eqn Specht B case (1)}
S^{\alpha}\ \approx \ \mathbfegin{matrix} \alpha\\ R\\ \\\end{matrix}\;, \ \quad S^{\mathbfeta}\ \approx \ \mathbfegin{matrix} \mathbfeta\\\alpha\oplus T\\ \\\end{matrix}\;, \ \quad S^{\gamma}\ \approx \ \mathbfegin{matrix}\gamma\\ \mathbfeta\oplus R\\\alpha\end{matrix}\,.
\end{equation}
Here $\mathbfar{Y}$, $\mathbfar{Z}$, $R$ and $T$ are semisimple and good. The assertion concerning the $\partial$-values of the composition factors of $\mathbfar{Y}$ and
$\mathbfar{Z}$ follows from \cite[Theorem 6.1(3)]{ChuangTan2001} and the Loewy structure of $P^{\mathbfar{\alpha}}$.
From \cite[Theorem 6.1(3)]{ChuangTan2001}, Lemma~\ref{lemma l and r} and the Loewy structure of $P^{\alpha}$ we further deduce
that every composition factor of $R$ has a labelling partition with $\partial$-value $d-1$ or $d+1$;
every composition factor of $T$ has a labelling partition with $\partial$-value $d$ or $d+2$.
Let $Y:=\mathbfar{Y}\uparrow^B$ and $Z:=\mathbfar{Z}\uparrow^B$. With \ref{noth ingredients}(d) we this time have
\mathbfegin{align}\label{eqn upalpha 1}
S^\alpha\oplus S^\mathbfeta&\sim (S^{\mathbfar{\alpha}})\uparrow^B\sim (D^{\mathbfar{\alpha}})\uparrow^B\oplus Z\,\\ \label{eqn upbeta 1}
S^\alpha\oplus S^\gamma&\sim (S^{\mathbfar{\mathbfeta}})\uparrow^B\sim D^\gamma\oplus (D^{\mathbfar{\alpha}})\uparrow^B\oplus Y\, \\ \label{eqn upgamma 1}
S^\mathbfeta\oplus S^\gamma& \sim (S^{\mathbfar{\gamma}})\uparrow^B\sim D^\mathbfeta\oplus D^\gamma\oplus Z\oplus (D^{\mathbfar{\alpha}})\uparrow^B\,.
\end{align}
Considering the difference(\ref{eqn upbeta 1})-(\ref{eqn upalpha 1}) and (\ref{eqn Specht B case (1)}) we see that $Y\oplus T\sim R\oplus Z$.
Comparing the $\partial$-values of the composition factors of $Y$, $Z$, $R$ and $T$, this forces $T\cong Z$ and $Y\cong R$.
\underline{Case (3):} By Lemma~\ref{lemma l and r}, we know that $\mathbfar{\alpha}$ is $p$-regular and not $p$-restricted, $\mathbfar{\mathbfeta}, \alpha$ and
$\gamma$
are both $p$-regular and $p$-restricted, $\mathbfar{\gamma}$ is $p$-restricted and not $p$-regular, and $\gamma$ is both $p$-regular and $p$-restricted.
Recall from Remark~\ref{rem l and r} that $S^\mathbfeta\cong D^\alpha$. Together with \ref{noth ingredients}(a)-(c) we get
\mathbfegin{equation}\label{eqn Specht case (3)}
S^{\mathbfar{\alpha}}\ \approx \ \mathbfegin{matrix} \mathbfar{\alpha}\\\mathbfar{\mathbfeta}_+\oplus \mathbfar{Z}\\ \\ \end{matrix}\;, \ \quad S^{\mathbfar{\mathbfeta}}\ \approx \ \mathbfegin{matrix} \mathbfar{\mathbfeta}\\\mathbfar{\alpha}\oplus\mathbfar{Y}\\ \mathbfar{\mathbfeta}_+\end{matrix}\;, \ \quad S^{\mathbfar{\gamma}}\ \approx \ \mathbfegin{matrix}\\\mathbfar{\mathbfeta}\oplus\mathbfar{Z}\\\mathbfar{\alpha}\end{matrix}\,
\end{equation}
and
\mathbfegin{equation}\label{eqn Specht B case (3)}
S^{\alpha}\ \approx \ \mathbfegin{matrix} \alpha\\ R\\ \alpha_+\end{matrix}\;, \ \quad S^{\mathbfeta}\ \approx \ \mathbfegin{matrix} \\\alpha\\ \\\end{matrix}\;, \ \quad S^{\gamma}\ \approx \ \mathbfegin{matrix}\gamma\\ R\\\alpha\end{matrix}\,.
\end{equation}
Here $\mathbfar{Y}$, $\mathbfar{Z}$ and $R$ are good and semisimple.
As well, $\mathbfar{\mathbfeta}_+$ and $\alpha_+$ are good.
The assertion concerning the $\partial$-values of the composition factors of $\mathbfar{Y}$ and $\mathbfar{Z}$
follows from \cite[Theorem 6.1(3)]{ChuangTan2001} and the Loewy structure of $P^{\mathbfar{\alpha}}$.
From \cite[Theorem 6.1(3)]{ChuangTan2001}, Lemma~\ref{lemma l and r} and the Loewy structure of $P^{\alpha}$ we further deduce
that every composition factor of $R$ has a labelling partition with $\partial$-value $d-1$ or $d+1$.
Observe also that $R\neq \{0\}$, since $S^\alpha$ has Loewy length $3$, by \ref{noth ingredients}(c).
Let $Y:=\mathbfar{Y}\uparrow^B$ and $Z:=\mathbfar{Z}\uparrow^B$, and let $\lambda:=\Phi^{-1}(\mathbfar{\mathbfeta}_+)$. Then \ref{noth ingredients}(d) gives
\mathbfegin{align}\label{eqn upalpha 3}
S^\alpha\oplus S^\mathbfeta&\sim (S^{\mathbfar{\alpha}})\uparrow^B\sim (D^{\mathbfar{\alpha}})\uparrow^B\oplus Z\oplus D^\lambda\,\\ \label{eqn upbeta 3}
S^\alpha\oplus S^\gamma&\sim (S^{\mathbfar{\mathbfeta}})\uparrow^B\sim D^\gamma\oplus (D^{\mathbfar{\alpha}})\uparrow^B\oplus Y\oplus D^\lambda\, \\ \label{eqn upgamma 3}
S^\mathbfeta\oplus S^\gamma& \sim (S^{\mathbfar{\gamma}})\uparrow^B\sim D^\gamma\oplus Z\oplus (D^{\mathbfar{\alpha}})\uparrow^B\,.
\end{align}
Considering the difference (\ref{eqn upbeta 3})-(\ref{eqn upalpha 3}) and (\ref{eqn Specht B case (3)}), we see that $Y\sim Z\oplus R$.
Comparing $\partial$-values, we further deduce that $Z=\{0\}$ and $Y\cong R$; in particular, also $\mathbfar{Z}=\{0\}$. Next we consider the difference (\ref{eqn upgamma 3})-(\ref{eqn upalpha 3}) and (\ref{eqn Specht B case (3)}), which yields $\lambda=\mathbfar{\alpha}_+$.
\underline{Case (5):} By Lemma~\ref{lemma l and r}, the partitions $\mathbfar{\alpha}$ and $\alpha$ are $p$-regular and $p$-restricted, while
$\mathbfar{\mathbfeta}$, $\mathbfar{\gamma}$, $\mathbfeta$ and $\gamma$ are $p$-restricted and not $p$-regular. From
\ref{noth ingredients}(a)-(c) we get
\mathbfegin{equation}\label{eqn Specht case (5)}
S^{\mathbfar{\alpha}}\ \approx \ \mathbfegin{matrix} \mathbfar{\alpha}\\\mathbfar{\mathbfeta}_+\oplus \mathbfar{Z}\\ \mathbfar{\alpha}_+\end{matrix}\;, \ \quad S^{\mathbfar{\mathbfeta}}\ \approx \ \mathbfegin{matrix} \\\mathbfar{\alpha}\oplus\mathbfar{Y}\\ \mathbfar{\mathbfeta}_+\end{matrix}\;, \ \quad S^{\mathbfar{\gamma}}\ \approx \ \mathbfegin{matrix}\\ \mathbfar{Z}\\\mathbfar{\alpha}\end{matrix}\,
\end{equation}
and
\mathbfegin{equation}\label{eqn Specht B case (5)}
S^{\alpha}\ \approx \ \mathbfegin{matrix} \alpha\\ R\oplus \mathbfeta_+\\ \alpha_+\end{matrix}\;, \ \quad S^{\mathbfeta}\ \approx \ \mathbfegin{matrix} \\\alpha\oplus T\\ \mathbfeta_+\end{matrix}\;, \ \quad S^{\gamma}\ \approx \ \mathbfegin{matrix}\\ R\\\alpha\end{matrix}\,.
\end{equation}
Here $\mathbfar{Y}$ $\mathbfar{Z}$, $R$ and $T$ are good and semisimple and disjoint. Moreover, $\mathbfar{\alpha}_+$, $\mathbfar{\mathbfeta}_+$, $\alpha_+$ and $\mathbfeta_+$ are good.
The assertion concerning the $\partial$-values of the composition factors of $\mathbfar{Y}$ and $\mathbfar{Z}$ follows from \cite[Theorem 6.1(3)]{ChuangTan2001} and the Loewy structure of $P^{\mathbfar{\alpha}}$.
From \cite[Theorem 6.1(3)]{ChuangTan2001}, Lemma~\ref{lemma l and r} and the Loewy structure of $P^{\alpha}$ we further deduce
that every composition factor of $R$ has a labelling partition with $\partial$-value $d-1$ or $d+1$;
every composition factor of $T$ has a labelling partition with $\partial$-value $d$ or $d+2$.
Let $Y:=\mathbfar{Y}\uparrow^B$ and $Z:=\mathbfar{Z}\uparrow^B$. Let further $\lambda:=\Phi^{-1}(\mathbfar{\mathbfeta}_+)$ and
$\mu:=\Phi^{-1}(\mathbfar{\alpha}_+)$. From \ref{noth ingredients}(d) we obtain
\mathbfegin{align}\label{eqn upalpha 5}
S^\alpha\oplus S^\mathbfeta&\sim (S^{\mathbfar{\alpha}})\uparrow^B\sim (D^{\mathbfar{\alpha}})\uparrow^B\oplus Z\oplus D^\lambda\oplus D^\mu\,\\ \label{eqn upbeta 5}
S^\alpha\oplus S^\gamma&\sim (S^{\mathbfar{\mathbfeta}})\uparrow^B\sim (D^{\mathbfar{\alpha}})\uparrow^B\oplus Y\oplus D^\lambda\, \\ \label{eqn upgamma 5}
S^\mathbfeta\oplus S^\gamma& \sim (S^{\mathbfar{\gamma}})\uparrow^B\sim Z\oplus (D^{\mathbfar{\alpha}})\uparrow^B\,.
\end{align}
We consider the difference (\ref{eqn upbeta 5})-(\ref{eqn upalpha 5}) and (\ref{eqn Specht B case (5)}) to get
$Y\oplus T\oplus \mathbfeta_+\sim R\oplus Z\oplus D^\mu$. So, comparing $\partial$-values, we get $T\cong Z$ and
$Y\oplus D^{\mathbfeta_+}\sim R\oplus D^\mu$. Next we consider the difference (\ref{eqn upalpha 5})-(\ref{eqn upgamma 5})
and (\ref{eqn Specht B case (5)}) to get $D^\lambda\oplus D^\mu\sim D^{\mathbfeta_+}\oplus D^{\alpha_+}$.
Again comparing $\partial$-values, this gives $\alpha_+=\lambda$, $\mathbfeta_+=\mu$ and then also $Y\cong R$.
To show that $[\mathbfar{Y}:D^{\mathbfar{\alpha}_+}]=0$, we cannot argue as in cases (4) and (6), since $\mathbfar{\mathbfeta}$ is not $p$-regular.
However, we now see that if $[\mathbfar{Y}:D^{\mathbfar{\alpha}_+}]>0$, then we would also have $[Y:D^{\mathbfeta_+}]>0$ and $[S^\alpha:D^{\mathbfeta_+}]>1$, which
is impossible, since every Specht module in $B$ is multiplicity-free.
This completes the proof of the theorem.
\end{proof}
\section{Proofs of Theorem~\ref{thm main1} and Theorem~\ref{thm main2}}\label{sec main1}
From now on, let $p\geqslant 5$, for the remainder of this section.
Our aim is to prove Theorem~\ref{thm main1} and Theorem~\ref{thm main2}. To this end we shall start by applying the results from
Section~\ref{sec weight 2} to the case of blocks whose $p$-cores are hook partitions, that is, are of
the form $(k,1^l)$, for some $k,l\in\mathbb{N}_0$. Note that a hook partition $(k,1^l)$ is a $p$-core if and only if either $0\leqslant k+l\leqslant p-1$, or
$p+1\leqslant k+l\leqslant 2p-1$, $0\leqslant k<p+1$ and $0\leqslant l <p$.
To simplify the notation, we shall denote the block $B_{(k,1^l)}(2,p)$ by $B_{k,l}$.
While Theorem~\ref{thm main1} is stated in terms of undirected graphs, we restate and prove a more detailed version here:
\mathbfegin{thm}\label{thm main1 details}
Let $p\geqslant 5$, and let $B_{k,l}$ be a block of $F\mathfrak{S}_n$ of $p$-weight $2$ and $p$-core $(k,1^l)$, for
some $k,l\in\mathbb{N}_0$. With the graphs defined in Appendix~\ref{sec quiv}, the Ext-quiver of $B_{k,l}$
equals
\mathbfegin{enumerate}
\item[{\rm (a)}] $Q_{0,0}(p)$, if $k=l=0$,
\item[{\rm (b)}] $Q_{k,l}(p)$, if $1\leqslant k+l\leqslant p-1$,
\item[{\rm (c)}] $Q_{k-1,l-1}(p)$, if $p+1\leqslant k+l\leqslant 2p-1$,
\end{enumerate}
where the vertices in row $i\geqslant 0$, from top to bottom, are labelled by the $p$-regular partitions of $B_{k,l}$ with $\partial$-value $i$
and the total ordering on the vertices is the lexicographic ordering on partitions.
\end{thm}
In preparation of the proof of Theorem~\ref{thm main1 details}, we next collect a number of properties of
$(2:1)$-pairs of weight-2 blocks labelled by hook partitions.
\mathbfegin{prop}\label{prop (2:1) pairs hooks}
Let $k,l\in \mathbb{N}_0$ be such that $(k,1^l)$ is a $p$-core partition. Moreover, let $n:=k+l+2p$, and let $B$ be the block
$B_{k,l}$ of $F\mathfrak{S}_n$.
\mathbfegin{enumerate}
\item[{\rm (a)}] Let $k>1$. If $1\leqslant k+l\leqslant p-1$ or $p+2\leqslant k+l\leqslant 2p-1$, then $B$ forms a $(2:1)$-pair with the block $B_{k-1,l}$
of $F\mathfrak{S}_{n-1}$.
\item[{\rm (b)}] Let $l\geqslant 1$. If $1\leqslant k+l\leqslant p-1$ or $p+2\leqslant k+l\leqslant 2p-1$, then $B$ forms a $(2:1)$-pair with the block
$B_{k,l-1}$ of $F\mathfrak{S}_{n-1}$.
\item[{\rm (c)}] Suppose that also $(k',1^{l'})$ is a $p$-core partition, for $k',l'\in\mathbb{N}_0$ with $k'+l'=k+l-2$. Then $B$
forms a $(2:2)$-pair with the block $B_{k',l'}$ of $F\mathfrak{S}_{n-2}$ if and only if $k+l=p+1$, $k=k'+1$ and $l=l'+1$.
\end{enumerate}
\end{prop}
\mathbfegin{proof}
In order to prove the first half of (a) and the first half of (b), suppose that $1\leqslant k+l\leqslant p-1$. We display $(k,1^l)$ on an abacus with $l+1+2p$ beads. Then the last row of the abacus is
\mathbfegin{center}
\mathbfegin{tabular}{ccccccccccc}
$-$ & $\mathbfullet$ & $\cdots$ & $\mathbfullet$ & $-$ &$\cdots$ & $-$ &$\mathbfullet$ & $-$ & $\cdots$ & $-$\\
&\multicolumn{3}{c}{\upbracefill}&\multicolumn{3}{c}{\upbracefill}&&&&\\
&\multicolumn{3}{c}{$l$}&\multicolumn{3}{c}{$k-1$}&&&&
\end{tabular}
\end{center}
Swapping runners $l+1+k$ and $l+k$, the assertion of (a) follows in this case. Swapping runners $1$ and $0$, the assertion of (b)
follows in this case.
For the second half of (a) and (b), suppose now that $p+1\leqslant k+l\leqslant 2p-1$. We also display $(k,1^l)$ on an abacus with $l+1+2p$ beads. Note that
we must have $2\leqslant k\leqslant p$ and $1<l\leqslant p-1$, since $k+l\geqslant p+2$ and $(k,1^l)$ is a $p$-core.
We consider the last two rows of this abacus:
\mathbfegin{center}
\mathbfegin{tabular}{ccccccccccc}
& \multicolumn{7}{c}{$l$}&\multicolumn{3}{c}{$y$}\\
& \multicolumn{7}{c}{}&\multicolumn{3}{c}{\downbracefill}\\
\multicolumn{9}{c}{\downbracefill}&&\\
$-$ & $\mathbfullet$ & $\cdots$ & $\mathbfullet$ & $\mathbfullet$ & $\mathbfullet$ & $\cdots$ & $\mathbfullet$ & $-$ & $\cdots$ & $-$\\
$-$ & $-$ & $\cdots$ & $-$ & $\mathbfullet$ & $-$ & $\cdots$ & $-$ & $-$ & $\cdots$ & $-$\\
\multicolumn{4}{c}{\upbracefill}&&&&&&&\\
\multicolumn{4}{c}{$x$}&&&&&&&
\end{tabular}
\end{center}
Here $x+y+1=k$ and $x\geqslant 2$. So assertion (a) follows by swapping runners $x$ and $x+1$, and assertion (b) follows by swapping runners
$0$ and $1$.
As for assertion (c), first note that $B_{k,l}$ can only form a $(2:2)$-pair with the block $B_{k-1,l-1}$, since
the core $(k',1^{l'})$ is obtained from $(k,1^l)$ by removing two nodes of the same $p$-residue.
Next we show that, whenever $k+l=p+1$, the block $B$ forms a $(2:2)$-pair with $B_{k-1,l-1}$.
So let $k+l=p+1$. Note that then we necessarily have $k>1$ and $l\geqslant 1$. The last two rows of the abacus display
of $(k,1^l)$ with $l+1+2p$ beads are
\mathbfegin{center}
\mathbfegin{tabular}{cccccccc}
&\multicolumn{4}{c}{$l$}&&&\\
&\multicolumn{4}{c}{\downbracefill}&&&\\
$-$&$\mathbfullet$&$\mathbfullet$& $\cdots$&$\mathbfullet$&$-$&$\cdots$&$-$\\
$-$&$\mathbfullet$&$-$&$\cdots$&$-$&$-$&$\cdots$&$-$
\end{tabular}
\end{center}
We swap the first two runners and get
\mathbfegin{center}
\mathbfegin{tabular}{cccccccc}
&&\multicolumn{3}{c}{$l-1$}&&&\\
&&\multicolumn{3}{c}{\downbracefill}&&&\\
$\mathbfullet$&$-$&$\mathbfullet$& $\cdots$&$\mathbfullet$&$-$&$\cdots$&$-$\\
$\mathbfullet$&$-$&$-$&$\cdots$&$-$&$-$&$\cdots$&$-$
\end{tabular}
\end{center}
This is the abacus display of the partition $(p-(l-1)-1,1^{l-1})=(k-1,1^{l-1})$.
To complete the proof of (c), it remains to verify that, whenever $2\leqslant k+l\leqslant p-1$ or $p+3\leqslant k+l\leqslant 2p-1$, the block $B$ cannot
form a $(2:2)$-pair with $B_{k-1,l-1}$, provided the partition $(k-1,1^{l-1})$ exists and is a $p$-core. So suppose it does, and suppose that
$B$ and $B_{k-1,l-1}$ form a $(2:2)$-pair. Then $k\geqslant 2$, $l\geqslant 1$, and the two removable nodes of $(k,1^l)$ must have the same
$p$-residue. Hence $k-1\equiv p-l\pmod{p}$ and $k+l\equiv 1\pmod{p}$, a contradiction.
\end{proof}
Note that, for $k\geqslant 3$, there cannot be any $(2:k)$-pair of blocks
of $p$-weight $2$ whose $p$-cores are hook partitions.
Thus, as an immediate consequence of Proposition~\ref{prop (2:1) pairs hooks}(c), we have
\mathbfegin{cor}\label{cor Scopes hooks}
There are precisely $(p-1)^2+1$ Scopes classes of $p$-blocks of symmetric groups of $p$-weight $2$ whose $p$-cores
are hook partitions. Representatives of these are given by the blocks $B_{k,l}$, where
\mathbfegin{itemize}
\item[\rm{(i)}] either $0\leqslant k+l\leqslant p-1$,
\item[\rm{ (ii)}] or $p+2\leqslant k+l\leqslant 2p-1$, $0\leqslant k<p+1$ and $0\leqslant l <p$.
\end{itemize}
\end{cor}
The next corollary is a consequence of Lemma~\ref{lemma l and r}.
\mathbfegin{cor}\label{cor l and r hooks}
Let $k,l\in \mathbb{N}_0$ be such that $(k,1^l)$ is a $p$-core partition. Moreover, let $n:=k+l+2p$, and let $B$ be the block
$B_{k,l}$ of $F\mathfrak{S}_n$.
Display all partitions under considerations on an $[m_1,\ldots,m_p]$-abacus with $l+1+2p$ beads.
With the notation as in Lemma~\ref{lemma l and r}, one has the following:
\mathbfegin{enumerate}
\item[{\rm (a)}] Suppose that $1\leqslant k+l\leqslant p-1$ and $k>1$, so that $B$ forms a $(2:1)$-pair with the block $\mathbfar{B}:=B_{k-1,l}$ of $F\mathfrak{S}_{n-1}$. Then
$l_1=l$, $l_2=0$, $r_2=0$ and $r_1=p-k-l-1$; in particular, $d=p-k-1$ and $l_1+r_1=p-k-1<p-2$. Moreover,
one has
\mathbfegin{center}
\mathbfegin{tabular}{|c||c|c|c|c|c|c|}\cline{2-7}
\multicolumn{1}{c||}{}& $\mathbfar{\alpha}$&$\mathbfar{\mathbfeta}$&$\mathbfar{\gamma}$&$\alpha$&$\mathbfeta$&$\gamma$\\\hat{l}ine\hat{l}ine
$p$-regular&$\checkmark$&$\checkmark$&$\checkmark$&$\checkmark$&$\checkmark$&$\checkmark$\\\hat{l}ine
$p$-restricted&$-$&$k<p-1$&$\checkmark$&$k<p-1$&$-$&$\checkmark$\\\hat{l}ine
\end{tabular}
\end{center}
\item[{\rm (b)}] Suppose that $1\leqslant k+l\leqslant p-1$, $k\geqslant 1$ and $l\geqslant 1$, so that $B$ forms a $(2:1)$-pair with the block $\mathbfar{B}:=B_{k,l-1}$
of $F\mathfrak{S}_{n-1}$. Then $l_1=l_2=0$, $r_1=p-2$, and $r_2=l$; in particular, $d=p-2-l$, $l_1+r_1=p-2$ and $l_2+r_2=l$. Moreover,
\mathbfegin{center}
\mathbfegin{tabular}{|c||c|c|c|c|c|c|}\cline{2-7}
\multicolumn{1}{c||}{}& $\mathbfar{\alpha}$&$\mathbfar{\mathbfeta}$&$\mathbfar{\gamma}$&$\alpha$&$\mathbfeta$&$\gamma$\\\hat{l}ine\hat{l}ine
$p$-regular&$\checkmark$&$l<p-2$&$-$&$\checkmark$&$-$&$l<p-2$\\\hat{l}ine
$p$-restricted&$\checkmark$&$\checkmark$&$\checkmark$&$\checkmark$&$\checkmark$&$\checkmark$\\\hat{l}ine
\end{tabular}
\end{center}
\item[{\rm (c)}] Suppose that $p+2\leqslant k+l\leqslant 2p-1$ and $k>1$, so that $B$ forms a $(2:1)$-pair with the block $\mathbfar{B}:=B_{k-1,l}$ of $F\mathfrak{S}_{n-1}$. Then
$l_1=l$, $l_2=0$, $r_2=0$ and $r_1=p-k$; in particular, $d=p-k$ and $l_1+r_1=p-k$ and $l_2+r_2=0$. Moreover,
\mathbfegin{center}
\mathbfegin{tabular}{|c||c|c|c|c|c|c|}\cline{2-7}
\multicolumn{1}{c||}{}& $\mathbfar{\alpha}$&$\mathbfar{\mathbfeta}$&$\mathbfar{\gamma}$&$\alpha$&$\mathbfeta$&$\gamma$\\\hat{l}ine\hat{l}ine
$p$-regular&$\checkmark$&$\checkmark$&$\checkmark$&$\checkmark$&$\checkmark$&$\checkmark$\\\hat{l}ine
$p$-restricted&$-$&$2<k<p$&$\checkmark$&$2<k<p$&$-$&$\checkmark$\\\hat{l}ine
\end{tabular}
\end{center}
\item[{\rm (d)}] Suppose that $p+2\leqslant k+l\leqslant 2p-1$, $k\geqslant 1$ and $l\geqslant 1$, so that $B$ forms a $(2:1)$-pair with the block $\mathbfar{B}:=B_{k,l-1}$
of $F\mathfrak{S}_{n-1}$. Then $l_1=l_2=0$, $r_1=p-2$, and $r_2=l-1$; in particular, $d=p-1-l$, $l_1+r_1=p-2$ and $l_2+r_2=l-1$. Moreover,
\mathbfegin{center}
\mathbfegin{tabular}{|c||c|c|c|c|c|c|}\cline{2-7}
\multicolumn{1}{c||}{}& $\mathbfar{\alpha}$&$\mathbfar{\mathbfeta}$&$\mathbfar{\gamma}$&$\alpha$&$\mathbfeta$&$\gamma$\\\hat{l}ine\hat{l}ine
$p$-regular&$\checkmark$&$l<p-1$&$-$&$\checkmark$&$-$&$l<p-1$\\\hat{l}ine
$p$-restricted&$\checkmark$&$\checkmark$&$\checkmark$&$\checkmark$&$\checkmark$&$\checkmark$\\\hat{l}ine
\end{tabular}
\end{center}
\end{enumerate}
\end{cor}
\mathbfegin{proof}
Under the assumptions of (a), we are in case (2) of Remark~\ref{rem l and r} if $k<p-1$, and in case (1) if $k=p-1$ (and $l=0$). Under
the assumptions of (b), we are in case (4) of Remark~\ref{rem l and r} if $l<p-2$, and in case (5) if $l=p-2$ (and $k=1$).
Under the assumptions of (c), we are in case (1) if $k=p$, and in case (2) if $2<k<p$; note that $k=2$ is not possible. Lastly, under
the assumptions of (d), we are in case (4) if $l<p-1$, and in case (5) if $l=p-1$. Hence, the assertions follow form Lemma~\ref{lemma l and r}.
\end{proof}
\mathbfegin{lemma}\label{lemma alpha max}
Let $k,l\in \mathbb{N}_0$ be such that $(k,1^l)$ is a $p$-core partition. Moreover, let $n:=k+l+2p$, and let $B$ be the block
$B_{k,l}$ of $F\mathfrak{S}_n$.
\mathbfegin{enumerate}
\item[{\rm (a)}] Suppose that $k>1$, so that $B$ forms a $(2:1)$-pair with the block $\mathbfar{B}:=B_{k-1,l}$ of $F\mathfrak{S}_{n-1}$.
Then $\mathbfar{\alpha}:=\mathbfar{\alpha}(B,\mathbfar{B})$ is the lexicographically largest partition of $\mathbfar{B}$ with $\partial$-value $\partial(\mathbfar{\alpha})$.
\item[{\rm (b)}] Suppose that $k\geqslant 1$ and $l\geqslant 1$, so that $B$ forms a $(2:1)$-pair with the block $\mathbfar{B}:=B_{k,l-1}$
of $F\mathfrak{S}_{n-1}$. Then $\mathbfar{\alpha}:=\mathbfar{\alpha}(B,\mathbfar{B})$ is the lexicographically smallest $p$-regular partition of $\mathbfar{B}$ with
$\partial$-value $\partial(\mathbfar{\alpha})$.
\end{enumerate}
\end{lemma}
\mathbfegin{proof}
(a)\, Assume that there is a partition $\mu$ of $n-1$ with $p$-core $(k-1,1^l)$, $\partial(\mu)=\partial(\mathbfar{\alpha})$ and
$\mathbfar{\alpha}<\mu$. Then we may choose $\mu$ to be the lexicographically smallest such partition. By \cite[Remark 2.1]{ChuangTan2001},
this forces $\mu=(\mathbfar{\alpha})_+$ and $\mathbfar{\alpha}$ to be $p$-restricted. But this contradicts Corollary~\ref{cor l and r hooks}(a) and (c).
(b)\, Assume that there is a $p$-regular partition $\mu$ of $n-1$ with $p$-core $(k,1^{l-1})$, $\partial(\mathbfar{\alpha})=\partial(\mu)$ and
$\mu<\mathbfar{\alpha}$. Then we may choose $\mu$ to be the lexicographically largest such partition. Then \cite[Remark 2.1]{ChuangTan2001}
gives $\mu=(\mathbfar{\alpha})_-$. But, by \ref{noth partial} and Theorem~\ref{thm Loewy except}, we know that $\mu=(\mathbfar{\alpha})_-=\mathbfar{\gamma}$ and $\mathbfar{\gamma}$ is not $p$-regular, by
Corollary~\ref{cor l and r hooks} (b) and (d), a contradiction.
\end{proof}
\mathbfegin{noth}\label{noth strategy 1}{\mathbff Strategy of proof.}\,
We are now in the position to prove Theorem~\ref{thm main1 details}. Our most important ingredients will be the results
concerning the Ext-quivers of the principal blocks of $F\mathfrak{S}_{2p}$ and $F\mathfrak{S}_{2p+1}$ in Appendix~\ref{sec B0}
together with Theorem~\ref{thm Loewy except}. The notation used throughout the proof will thus be chosen in accordance with
that fixed in Theorem~\ref{thm Loewy except}. As well, whenever $(B,\mathbfar{B})$ is a $(2:1)$-pair of blocks of weight 2 and $\mu$ is
a good partition of $\mathbfar{B}$, we denote the corresponding good partition $\Phi^{-1}(\mu)$ by $\hat{\mu}$, as in \ref{noth (2:1)}.
In the proof of Theorem~\ref{thm main1 details} below, we shall argue by induction on $k+l$, and give full details in the case where $k+l\leqslant p-1$. In doing so, we
shall consider the following subcases:
\mathbfegin{itemize}
\item $k=l=0$, or $k=1$ and $l=0$, when $B_{k,l}$ is the principal
block of $F\mathfrak{S}_{2p}$ or the principal block of $F\mathfrak{S}_{2p+1}$; these are covered by Theorem~\ref{thm quiver B0};
\item $1<k\leqslant p-3$, when we are in case (2) of
Theorem~\ref{thm Loewy except};
\item $k=1$ and $1\leqslant l<p-3$, when we are case (4) of Theorem~\ref{thm Loewy except};
\item $k=p-1$ and $l=0$, when we are in case (1) of Theorem~\ref{thm Loewy except};
\item $k=1$ and $p-3\leqslant l\leqslant p-2$, when we are in case (4) and (5), respectively, of Theorem~\ref{thm Loewy except}.
\end{itemize}
The assertion in the case where $p+1\leqslant k+l$, can then be obtained completely analogously, so that we shall leave the details
to the reader. Moreover, we should like to emphasize that, for all $p$-cores $(k,1^l)$ with $k\geqslant 1$, the blocks
$B_{k,l}$ and $B_{l+1,k-1}$ are isomorphic via tensoring with the sign representation, since $(l+1,1^{k-1})=(k,1^l)'$.
In particular, the Ext-quivers of $B_{k,l}$ and $B_{l+1,k-1}$ are isomorphic as undirected graphs. Hence, if we were only interested
in the structure of the Ext-quivers as undirected graphs, we would only need to examine half of the blocks. Since, however, we
also want to give more detailed information on the lexicographic ordering and the $\partial$-values of the
$p$-regular partitions in the blocks occurring in Theorem~\ref{thm main1 details}, we shall treat all
blocks via our inductive arguments.
\end{noth}
\mathbfegin{proof}(of Theorem~\ref{thm main1 details})
In the following, set $B:=B_{k,l}$ and $\kappa:=\kappa_B:=(k,1^l)$. We argue by induction on $k+l$, and suppose first that $k+l\leqslant p-1$. If $k=l=0$, then
$B$ is the principal block of $F\mathfrak{S}_{2p}$, which has Ext-quiver $Q_{0,0}(p)$, by Theorem~\ref{thm quiver B0}. If $k+l=1$, then $B$ is the principal
block of $F\mathfrak{S}_{2p+1}$, which has Ext-quiver $Q_{1,0}(p)$, by Theorem~\ref{thm quiver B0}. Thus, from now on we may suppose that $k+l>1$.
Suppose, moreover, that $1<k\leqslant p-3$. Then $B$ forms a $(2:1)$-pair with the block $\mathbfar{B}:=B_{k-1,l}$, by Proposition~\ref{prop (2:1) pairs hooks}.
By induction, $\mathbfar{B}$ has Ext-quiver $Q_{k-1,l}(p)$.
By Corollary~\ref{cor l and r hooks} and Lemma~\ref{lemma alpha max}, we know that $\mathbfar{\alpha}$ is the largest partition of $n-1$ with $p$-core
$\kappa_{\mathbfar{B}}$ and $\partial$-value $p-k$.
By \ref{noth partial Scopes}, it now suffices to consider the following part of the quiver of $\mathbfar{B}$, whose vertices lie
on those rows with $\partial$-values $p-k-2,p-k-1,p-k$ and $p-k+1$.
\mathbfegin{center}
\mathbfegin{tikzpicture}
\coordinate[label=left:$\mathbfar{\mathbfeta}_+$] (B+) at (0.5,-0.5);
\coordinate[label=above:$\mu$] (M) at (1,0);
\coordinate[label=above:$\mathbfar{\mathbfeta}$] (B) at (1.5,-0.5);
\coordinate[label=below:$\mathbfar{\alpha}$] (A) at (1,-1);
\coordinate[label=below:$\mathbfar{\gamma}$] (G) at (1.5,-1);
\draw[con] (0,0) node{$\mathbfullet$} -- (B+) node{\color{red} $\mathbfullet$};
\draw[con] (B+) -- (M) node{\color{red} $\mathbfullet$};
\draw[con] (B) node{\color{red} $\mathbfullet$} -- (G) node{\color{red} $\mathbfullet$};
\draw[con] (M) -- (B) ;
\draw[con] (B+) -- (A) node{\color{red} $\mathbfullet$};
\draw[con] (A) -- (B) ;
\draw[con] (B) -- (2,0) node{$\mathbfullet$};
\draw[con] (B) -- (2,-1) node{$\mathbfullet$};
\draw[con] (G) -- (2,-1.5) node{$\mathbfullet$};
\draw[con] (2,-1.5) -- (2,-1) node{$\mathbfullet$};
\draw[con] (2,-1) -- (2.5,-0.5) node{$\mathbfullet$};
\draw[con] (2,0) -- (2.5,-0.5) node{$\mathbfullet$};
\end{tikzpicture}
\end{center}
We identify the labels of the red vertices.
To do so, we use Corollary~\ref{cor l and r hooks} and Theorem~\ref{thm Loewy except}.
First, by Corollary~\ref{cor l and r hooks}, $\mathbfar{\gamma}$ and $\mathbfar{\mathbfeta}$ are
$p$-regular. By Theorem~\ref{thm Loewy except}, we then know that $\mathbfar{\alpha}=\mathbfar{\gamma}_+$, so that $\mathbfar{\gamma}$ is the vertex to the right of $\mathbfar{\alpha}$.
Moreover, by Theorem~\ref{thm Loewy except}, $\mathbfar{\gamma}$ is connected to $\mathbfar{\mathbfeta}$, and $\partial(\mathbfar{\mathbfeta})=\partial(\mathbfar{\gamma})-1$, by Corollary~\ref{cor l and r hooks}.
Since, by Lemma~\ref{lemma Ext alpha}, $\mathbfar{\mathbfeta}$ is also connected to $\mathbfar{\alpha}$, this identifies the positions of $\mathbfar{\mathbfeta}$
and $\mathbfar{\mathbfeta}_+$. By induction, we also know that $\mu>\mathbfar{\mathbfeta}$. Hence, by \cite[Theorem 6.1]{ChuangTan2001},
we must have $[S^{\mathbfar{\mathbfeta}}:D^\mu]\neq 0$, and then $\mathbfar{Y}\cong D^\mu$, in the notation of Theorem~\ref{thm Loewy except}.
This then also forces $\mathbfar{Z}=\{0\}$,
so that $\mathbfar{\mathbfeta}$ is the only common neighbour of $\mathbfar{\alpha}$ and $\mathbfar{\gamma}$.
So, by Corollary~\ref{cor l and r hooks}, Theorem~\ref{thm Loewy except} and \ref{noth partial Scopes}, we deduce the
following information on the corresponding part of the quiver of $B$, whose vertices again lie in rows with $\partial$-values
$p-k-2,p-k-1,p-k$ and $p-k+1$:
\mathbfegin{center}
\mathbfegin{tikzpicture}
\coordinate[label=left:$\alpha_+$] (A+) at (0.5,-0.5);
\coordinate[label=above:$\hat{\mu}$] (MH) at (1,0);
\coordinate[label=left:$\alpha$] (A) at (1,-0.5);
\coordinate[label=left:$\mathbfeta$] (B) at (1.5,-1);
\coordinate[label=above:$\gamma$] (G) at (1.5,-0.5);
\draw[con] (0,0) node{$\mathbfullet$} -- (A+) node{\color{blue} $\mathbfullet$};
\draw[con] (A+) -- (MH) node{\color{blue} $\mathbfullet$} ;
\draw[con] (MH) -- (G) node{\color{blue} $\mathbfullet$};
\draw[con] (MH) -- (A) node{\color{blue} $\mathbfullet$};
\draw[con] (A) -- (B) node{\color{blue} $\mathbfullet$};
\draw[con] (B) -- (G);
\draw[con] (B) -- (2,-1.5) node{$\mathbfullet$};
\draw[con] (G) -- (2,-1) node{$\mathbfullet$};
\draw[con] (G) -- (2,0) node{$\mathbfullet$};
\draw[con] (2,-1) -- (2.5,-0.5) node{$\mathbfullet$};
\draw[con] (2,0) -- (2.5,-0.5);
\draw[con] (2,-1.5) -- (2,-1);
\end{tikzpicture}
\end{center}
Here $D^{\hat{\mu}}\cong Y$. Since, by Corollary~\ref{cor l and r hooks}, $\mathbfeta$ is not $p$-restricted, $\mathbfeta_+$ does not exist, so that
$\alpha$ is only connected to $\hat{\mu}$ and $\mathbfeta$, by Lemma~\ref{lemma Ext alpha} and Theorem~\ref{thm Loewy except};
in particular, we have $\hat{\mu}>\alpha$ as well as $\mathbfeta>\gamma$.
This shows that $B$ has quiver $Q_{k,l}(p)$.
Next suppose that $k=1$ and $1\leqslant l<p-3$. Then, by Proposition~\ref{prop (2:1) pairs hooks}, $B$ forms a $(2:1)$-pair with the
block $\mathbfar{B}:=B_{k,l-1}$. By induction, $\mathbfar{B}$ has quiver $Q_{k,l-1}(p)$. By Corollary~\ref{cor l and r hooks} and Lemma~\ref{lemma alpha max},
$\mathbfar{\alpha}$ is the smallest $p$-regular partition of $n-1$ with $p$-core $\kappa_{\mathbfar{B}}$ and $\partial$-value $p-1-l$. So, by
\ref{noth partial Scopes}, we only need to consider the following part of the quiver of $\mathbfar{B}$, whose vertices
lie in rows with $\partial$-values $p-l-3,p-l-2,p-l-1$ and $p-l$:
\mathbfegin{center}
\mathbfegin{tikzpicture}
\coordinate[label=above:$\mu$] (M) at (1,0);
\coordinate[label=above:$\rho$] (R) at (2,0);
\coordinate[label=above:$\mathbfar{\mathbfeta}$] (B) at (1.5,-0.5);
\coordinate[label= right:$\mathbfar{\alpha}$] (A) at (1,-1);
\coordinate[label=below right:$\mathbfar{\alpha}_+$] (A+) at (0.5,-1);
\draw[con] (0,0) node{$\mathbfullet$} -- (B+) node{\color{red} $\mathbfullet$};
\draw[con] (B+) -- (M) node{\color{red} $\mathbfullet$};
\draw[con] (M) -- (B) node{\color{red} $\mathbfullet$};
\draw[con] (B) -- (R) node{\color{red} $\mathbfullet$};
\draw[con] (B+) -- (A+) node{\color{red} $\mathbfullet$};
\draw[con] (B+) -- (A) node{\color{red} $\mathbfullet$};
\draw[con] (A) -- (B);
\draw[con] (0,-1.5) node{$\mathbfullet$} -- (A+);
\draw[con] (0,-1) node{$\mathbfullet$} -- (0,-1.5);
\draw[con] (0,-1) -- (B+);
\end{tikzpicture}
\end{center}
Again we determine the labels of the red vertices. By Corollary~\ref{cor l and r hooks}, $\mathbfar{\gamma}$ is $p$-singular, while $\mathbfar{\mathbfeta}$ is $p$-regular and $p$-restricted, so that $\mathbfar{\mathbfeta}_+$ exists. By Theorem~\ref{thm Loewy except} and Lemma~\ref{lemma Ext alpha},
$\mathbfar{\alpha}$ is connected to
$\mathbfar{\mathbfeta}$ and $\mathbfar{\mathbfeta}_+$. This identifies these two vertices. Moreover, by induction, we know that $\mu>\mathbfar{\mathbfeta}$, so that
$D^\mu\cong \mathbfar{Y}$ and $\mathbfar{Z}=\{0\}$. So, by Corollary~\ref{cor l and r hooks}, Theorem~\ref{thm Loewy except} and \ref{noth partial Scopes}, we deduce the
following information on the corresponding part of the quiver of $B$, whose vertices again lie in rows with $\partial$-values
$p-l-3,p-l-2,p-l-1$ and $p-l$:
\mathbfegin{center}
\mathbfegin{tikzpicture}
\coordinate[label=left:$\alpha_+$] (A+) at (0.5,-0.5);
\coordinate[label=above:$\hat{\mu}$] (M) at (1,0);
\coordinate[label=above:$\hat{\rho}$] (R) at (2,0);
\coordinate[label=above:$\gamma$] (G) at (1.5,-0.5);
\coordinate[label=below:$\alpha$] (A) at (1,-0.5);
\coordinate[label=below right:$\mathbfeta_+$] (B+) at (0.5,-1);
\draw[con] (0,0) node{$\mathbfullet$} -- (A+) node{\color{blue} $\mathbfullet$};
\draw[con] (A+) -- (M) node{\color{blue} $\mathbfullet$};
\draw[con] (M) -- (G) node{\color{blue} $\mathbfullet$};
\draw[con] (G) -- (R) node{\color{blue} $\mathbfullet$};
\draw[con] (M) -- (A) node{\color{blue} $\mathbfullet$};
\draw[con] (A+) -- (B+) node{\color{blue} $\mathbfullet$};
\draw[con] (B+) -- (A);
\draw[con] (0,-1.5) node{$\mathbfullet$} -- (B+);
\draw[con] (0,-1) node{$\mathbfullet$} -- (0,-1.5);
\draw[con] (0,-1) -- (A+);
\end{tikzpicture}
\end{center}
Here $D^{\hat{\mu}}\cong Y$. Since, by Corollary~\ref{cor l and r hooks}, $\mathbfeta$ is $p$-singular, Lemma~\ref{lemma Ext alpha} and
Theorem~\ref{thm Loewy except}
imply that $\mathbfeta_+$ and $\hat{\mu}$ are the only neighbours of $\alpha$.
Since $[S^\alpha:D^{\hat{\mu}}]\neq 0\neq [S^\alpha:D^{\mathbfeta_+}]$, we must have $\mathbfeta_+>\alpha$ and $\hat{\mu}>\alpha$.
Moreover, since $[S^\rho:D^{\mathbfar{\mathbfeta}}]\neq 0$, also $[S^{\hat{\rho}}:D^\gamma]\neq 0$ and $\gamma>\hat{\rho}$.
Consequently, $B$ has quiver $Q_{k,l}(p)$.
Next consider the case where $k=p-2$ and $0\leqslant l\leqslant 1$. Then, by Proposition~\ref{prop (2:1) pairs hooks}, $B$
forms a $(2:1)$-pair with the block $\mathbfar{B}:=B_{p-3,l}$, which has quiver $Q_{p-3,l}(p)$, by induction. Again, by
Corollary~\ref{cor l and r hooks} and Lemma~\ref{lemma alpha max},
$\mathbfar{\alpha}$ is the largest $p$-regular partition of $n-1$ with $p$-core $\kappa_{\mathbfar{B}}$ and $\partial$-value $2$. By \ref{noth partial Scopes},
we need to consider the following part of the quiver of $\mathbfar{B}$, whose vertices lie in rows with $\partial$-values $0,1,2$ and $3$:
\mathbfegin{center}
\mathbfegin{tikzpicture}
\coordinate[label=left:$\mathbfar{\mathbfeta}_+$] (B+) at (0,-0.5);
\coordinate[label=above:$\mu$] (M) at (0,0);
\coordinate[label=above:$\rho$] (R) at (1,0);
\coordinate[label=above right:$\mathbfar{\mathbfeta}$] (B) at (1,-0.5);
\coordinate[label=below:$\mathbfar{\gamma}$] (G) at (1,-1);
\coordinate[label=below left:$\mathbfar{\alpha}$] (A) at (0.5,-1);
\draw[con] (M) node{\color{red} $\mathbfullet$} -- (B) node{\color{red} $\mathbfullet$};
\draw[con] (M) -- (B+) node{\color{red} $\mathbfullet$};
\draw[con] (B+) -- (R) node{\color{red} $\mathbfullet$};
\draw[con] (B+) -- (A) node{\color{red} $\mathbfullet$};
\draw[con] (A) -- (B);
\draw[con] (B) -- (G) node{\color{red} $\mathbfullet$};
\draw[con] (R) -- (B);
\draw[con] (G) -- (1.5,-1.5) node{$\mathbfullet$};
\draw[con] (B) -- (1.5,-1) node{$\mathbfullet$};
\draw[con] (1.5,-1.5) -- (1.5,-1);
\end{tikzpicture}
\end{center}
By Corollary~\ref{cor l and r hooks}, $\mathbfar{\mathbfeta}$ is $p$-regular and $p$-restricted, so that $\mathbfar{\mathbfeta}_+$ exists. As well, $\mathbfar{\gamma}$ is
$p$-regular and $p$-restricted, and $\mathbfar{\gamma}_+=\mathbfar{\alpha}$, by Theorem~\ref{thm Loewy except}. So, by Theorem~\ref{thm Loewy except} and Lemma~\ref{lemma Ext alpha},
$\mathbfar{\alpha}$ is only connected to $\mathbfar{\mathbfeta}$ and $\mathbfar{\mathbfeta}_+$, which identifies
the positions of $\mathbfar{\mathbfeta}$, $\mathbfar{\mathbfeta}_+$ and $\mathbfar{\gamma}$. By induction, we further have $\mu>\mathbfar{\mathbfeta}$ and $\rho>\mathbfar{\mathbfeta}$.
From Theorem~\ref{thm Loewy except} and \cite[Theorem 6.1]{ChuangTan2001}, we thus deduce that $\mathbfar{Y}\cong D^\mu\oplus D^\rho$ and $\mathbfar{Z}=\{0\}$.
Thus, by Theorem~\ref{thm Loewy except} and \ref{noth partial Scopes} we obtain the corresponding part of the quiver of $B$, where again the vertices drawn lie in
rows with $\partial$-values $0,1,2$ and $3$:
\mathbfegin{center}
\mathbfegin{tikzpicture}
\coordinate[label=left:$\mathbfar{\alpha}_+$] (A+) at (0,-0.5);
\coordinate[label=above:$\hat{\mu}$] (M) at (0,0);
\coordinate[label=above:$\hat{\rho}$] (R) at (1,0);
\coordinate[label=above right:$\gamma$] (G) at (1,-0.5);
\coordinate[label=below:$\mathbfeta$] (B) at (1,-1);
\coordinate[label=below left:$\alpha$] (A) at (0.5,-0.5);
\draw[con] (M) node{\color{blue} $\mathbfullet$} -- (G) node{\color{blue} $\mathbfullet$};
\draw[con] (M) -- (A+) node{\color{blue} $\mathbfullet$};
\draw[con] (M) -- (A) node{\color{blue} $\mathbfullet$};
\draw[con] (A+) -- (R) node{\color{blue} $\mathbfullet$};
\draw[con] (A) -- (B)node{\color{blue} $\mathbfullet$};
\draw[con] (B) -- (G) node{\color{blue} $\mathbfullet$};
\draw[con] (R) -- (G);
\draw[con] (R) -- (A);
\draw[con] (B) -- (1.5,-1.5) node{$\mathbfullet$};
\draw[con] (G) -- (1.5,-1) node{$\mathbfullet$};
\draw[con] (1.5,-1.5) -- (1.5,-1);
\end{tikzpicture}
\end{center}
Here, by Theorem~\ref{thm Loewy except}, we have $Y\cong D^{\hat{\mu}}\oplus D^{\hat{\rho}}$. Since, by Corollary~\ref{cor l and r hooks}, $\mathbfeta$ is not $p$-restricted,
$\mathbfeta_+$ does not exist. Hence, by Lemma~\ref{lemma Ext alpha} and Theorem~\ref{thm Loewy except}, $\alpha$ is only connected to $\mathbfeta$, $\hat{\mu}$ and
$\hat{\rho}$. The information on the lexicographic order is again obtained from Theorem~\ref{thm Loewy except} and \cite[Theorem 6.1]{ChuangTan2001}. This shows that
$B$ has quiver $Q_{k,l}(p)$.
If $k=p-1$, then $l=0$. By Proposition~\ref{prop (2:1) pairs hooks}, $B$ forms a $(2:1)$-pair with
the block $\mathbfar{B}:=B_{p-2,0}$. The latter has quiver $Q_{p-2,0}(p)$, by induction.
By Corollary~\ref{cor l and r hooks} and Lemma~\ref{lemma alpha max},
$\mathbfar{\alpha}$ is the largest $p$-regular partition of $n-1$ with $p$-core $\kappa_{\mathbfar{B}}$ and $\partial$-value $1$. By
\ref{noth partial Scopes},
we consider the following part of the quiver of $\mathbfar{B}$, whose vertices lie in rows with $\partial$-values $0,1,2$ and $3$:
\mathbfegin{center}
\mathbfegin{tikzpicture}
\coordinate[label=left:$\mathbfar{\alpha}$] (A) at (0,-0.5);
\coordinate[label=above:$\mu$] (M) at (0,0);
\coordinate[label=above:$\mathbfar{\mathbfeta}$] (B) at (1,0);
\coordinate[label=below left:$\mathbfar{\gamma}$] (G) at (0.5,-0.5);
\draw[con] (M) node{\color{red} $\mathbfullet$} -- (G) node{\color{red} $\mathbfullet$};
\draw[con] (M) -- (A) node{\color{red} $\mathbfullet$};
\draw[con] (M) -- (G);
\draw[con] (A) -- (B) node{\color{red} $\mathbfullet$};
\draw[con] (B) -- (G);
\draw[con] (B) -- (1,-0.5) node{$\mathbfullet$};
\draw[con] (G) -- (1,-1) node{$\mathbfullet$};
\draw[con] (1,-1) -- (1,-0.5);
\draw[con] (1,-0.5) -- (1.5,-1)node{$\mathbfullet$};
\draw[con] (1,-1) -- (1.5,-1.5)node{$\mathbfullet$};
\draw[con] (1.5,-1.5) -- (1.5,-1);
\draw[con] (M) -- (1,-0.5);
\end{tikzpicture}
\end{center}
By Corollary~\ref{cor l and r hooks}, $\mathbfar{\gamma}$ is both $p$-regular and $p$-restricted, so that $\mathbfar{\gamma}_+$ exists.
By Theorem~\ref{thm Loewy except}, we get $\mathbfar{\alpha}=\mathbfar{\gamma}_+$.
By Corollary~\ref{cor l and r hooks}, we further know that $\partial(\mathbfar{\mathbfeta})=0$, and $\mathbfar{\mathbfeta}$
is $p$-regular and not $p$-restricted, so that $\mathbfar{\mathbfeta}_+$ does not exists. More precisely, we have $\mathbfar{\mathbfeta}=(2(p-1),p,1)$.
Hence $\mathbfar{\mathbfeta}$ is white, and is thus the largest $p$-regular white partition with $p$-core $\kappa_{\mathbfar{B}}$. This identifies the
positions of $\mathbfar{\alpha}$, $\mathbfar{\gamma}$ and $\mathbfar{\mathbfeta}$.
The remaining red vertex belongs to the $p$-regular (black) partition $\mu$
with $\mathbfar{Z}\cong D^\mu$. Since all other vertices connected to $\mathbfar{\mathbfeta}$ are smaller than $\mathbfar{\mathbfeta}$, Theorem~\ref{thm Loewy except} also
implies $\mathbfar{Y}=\{0\}$.
By Corollary~\ref{cor l and r hooks}, $\mathbfeta$ is $p$-regular and not $p$-restricted, so that $\mathbfeta_+$ is not defined.
Moreover, we have $\alpha=(2(p-1)+1,p)=\langle p,p\rangle$ with $\partial(\alpha)=0$, and $\alpha$ is white.
By Lemma~\ref{lemma Ext alpha} and Theorem~\ref{thm Loewy except}, we deduce
that $\alpha$ is only connected to $\mathbfeta$. As for the lexicographic ordering, we have $\mu=(3p-2)=\langle p-1\rangle$, since this is the lexicographically largest (white) partition in $\mathbfar{B}$.
Thus $\hat{\mu}=(3p-1)=\langle p\rangle$; in particular, $\hat{\mu}>\alpha$. Note also that $\hat{\mu}$
satisfies $Z\cong D^{\hat{\mu}}$. This now gives the corresponding part of the quiver of $B$.
\mathbfegin{center}
\mathbfegin{tikzpicture}
\coordinate[label=above:$\alpha$] (A) at (0.5,0);
\coordinate[label=above:$\hat{\mu}$] (M) at (0,0);
\coordinate[label=above:$\gamma$] (G) at (1,0);
\coordinate[label=below left:$\mathbfeta$] (B) at (0.5,-0.5);
\draw[con] (M) node{\color{blue} $\mathbfullet$} -- (B) node{\color{blue} $\mathbfullet$};
\draw[con] (M) -- (B);
\draw[con] (A) node{\color{blue} $\mathbfullet$} -- (B);
\draw[con] (B) -- (G) node{\color{blue} $\mathbfullet$};
\draw[con] (G) -- (1,-0.5) node{$\mathbfullet$};
\draw[con] (B) -- (1,-1) node{$\mathbfullet$};
\draw[con] (1,-1) -- (1,-0.5);
\draw[con] (1,-0.5) -- (1.5,-1)node{$\mathbfullet$};
\draw[con] (1,-1) -- (1.5,-1.5)node{$\mathbfullet$};
\draw[con] (1.5,-1.5) -- (1.5,-1);
\draw[con] (M) -- (1,-0.5);
\end{tikzpicture}
\end{center}
The remaining information on the lexicographic order is again obtained from Theorem~\ref{thm Loewy except} and \cite[Theorem 6.1]{ChuangTan2001}.
This shows that $B$ has quiver $Q_{k,l}(p)$.
To complete the case $k+l\leqslant p-1$, it remains to treat the blocks with $p$-cores $\kappa_{1,p-3}$ and $\kappa_{1,p-2}$.
So let first $k=1$ and $l=p-3$. By Proposition~\ref{prop (2:1) pairs hooks}, $B$ forms a $(2:1)$-pair
with $\mathbfar{B}:=B_{1,p-4}$, which has quiver $Q_{1,p-4}(p)$, by induction. By Lemma~\ref{lemma alpha max}
and Corollary~\ref{cor l and r hooks}, $\mathbfar{\alpha}$ is the smallest $p$-regular partition of $n-1$ with $p$-core $(1^{p-3})$
and $\partial$-value 2. By \ref{noth partial Scopes}, we thus have to consider the following part of the quiver of $\mathbfar{B}$, whose
vertices belong to the rows with $\partial$-values $0,1,2$ and $3$:
\mathbfegin{center}
\mathbfegin{tikzpicture}
\coordinate[label=left:$\mathbfar{\mathbfeta}_+$] (B+) at (0,-0.5);
\coordinate[label=above:$\mu$] (M) at (0,0);
\coordinate[label=above:$\rho$] (R) at (1,0);
\coordinate[label=right:$\mathbfar{\mathbfeta}$] (B) at (1,-0.5);
\coordinate[label=below right:$\mathbfar{\alpha}_+$] (A+) at (0,-1);
\coordinate[label=right:$\mathbfar{\alpha}$] (A) at (0.5,-1);
\draw[con] (M) node{\color{red} $\mathbfullet$} -- (B) node{\color{red} $\mathbfullet$};
\draw[con] (M) -- (B+) node{\color{red} $\mathbfullet$};
\draw[con] (B+) -- (R) node{\color{red} $\mathbfullet$};
\draw[con] (B+) -- (A) node{\color{red} $\mathbfullet$};
\draw[con] (A) -- (B);
\draw[con] (R) -- (B);
\draw[con] (B+) -- (A+) node{\color{red} $\mathbfullet$};
\draw[con] (-0.5,-1) node{$\mathbfullet$} -- (B+);
\draw[con] (-0.5,-1) -- (-0.5,-1.5) node{$\mathbfullet$};
\draw[con] (-0.5,-1.5) -- (A+);
\end{tikzpicture}
\end{center}
By Corollary~\ref{cor l and r hooks}, $\mathbfar{\gamma}$ is $p$-singular, while $\mathbfar{\alpha}$ and $\mathbfar{\mathbfeta}$
are both $p$-regular and $p$-restricted. Thus $\mathbfar{\alpha}_+$ and $\mathbfar{\mathbfeta}_+$ exist. As well, $\partial(\mathbfar{\mathbfeta})=1$. This identifies
the positions of $\mathbfar{\alpha}$, $\mathbfar{\mathbfeta}$, $\mathbfar{\alpha}_+$ and $\mathbfar{\mathbfeta}_+$. By Theorem~\ref{thm Loewy except} and \cite{ChuangTan2001}, we
further deduce that $\mathbfar{Y}\cong D^\mu\oplus D^\rho$ and $\mathbfar{Z}=\{0\}$.
By Corollary~\ref{cor l and r hooks}, $\gamma$ is $p$-regular and $p$-restricted, and by Theorem~\ref{thm Loewy except}, we have $\gamma_+=\alpha$ with
$\partial$-value 1. So from Theorem~\ref{thm Loewy except} we deduce the following information on the corresponding part of the quiver of $B$:
\mathbfegin{center}
\mathbfegin{tikzpicture}
\coordinate[label=left:$\alpha_+$] (A+) at (0,-0.5);
\coordinate[label=above:$\hat{\mu}$] (M) at (0,0);
\coordinate[label=above:$\hat{\rho}$] (R) at (1,0);
\coordinate[label=right:$\gamma$] (G) at (1,-0.5);
\coordinate[label=below right:$\mathbfeta_+$] (B+) at (0,-1);
\coordinate[label=right:$\alpha$] (A) at (0.5,-0.5);
\draw[con] (M) node{\color{blue} $\mathbfullet$} -- (G) node{\color{blue} $\mathbfullet$};
\draw[con] (M) -- (A+) node{\color{blue} $\mathbfullet$};
\draw[con] (A+) -- (R) node{\color{blue} $\mathbfullet$};
\draw[con] (M) -- (A) node{\color{blue} $\mathbfullet$};
\draw[con] (R) -- (A);
\draw[con] (R) -- (G);
\draw[con] (A+) -- (B+) node{\color{blue} $\mathbfullet$};
\draw[con] (-0.5,-1) node{$\mathbfullet$} -- (A+);
\draw[con] (-0.5,-1) -- (-0.5,-1.5) node{$\mathbfullet$};
\draw[con] (-0.5,-1.5) -- (B+);
\draw[con] (B+) -- (A);
\end{tikzpicture}
\end{center}
Here $Y\cong D^{\hat{\mu}}\oplus D^{\hat{\rho}}$. By Lemma~\ref{lemma Ext alpha}, we also conclude that $\hat{\rho}$,
$\hat{\mu}$ and $\mathbfeta_+$ are the only neighbours of $\alpha$. Thus $B$ has quiver $Q_{1,p-3}(p)$.
The information concerning the lexicographic ordering follows again from Theorem~\ref{thm Loewy except} and
\cite[Theorem~6.1]{ChuangTan2001}.
Now let $k=1$ and $l=p-2$. Then $B$ forms a $(2:1)$-pair with $\mathbfar{B}:=B_{1,p-3}$, which has quiver
$Q_{1,p-3}(p)$, as we have just seen. By Corollary~\ref{cor l and r hooks},
we know that $\mathbfar{\mathbfeta}$ and $\mathbfar{\gamma}$ are $p$-singular, but $\mathbfar{\mathbfeta}$ is $p$-restricted, so that $\mathbfar{\mathbfeta}_+$ exists.
Moreover, $\mathbfar{\alpha}$ is both $p$-regular and $p$-restricted, so that $\mathbfar{\alpha}_+$ exists. As well, $\mathbfar{\alpha}$
has $\partial$-value $1$ and is, by Lemma~\ref{lemma alpha max}, the smallest $p$-regular partition of $n-1$ with
$p$-core $(1^{p-2})$ and $\partial$-value $1$. We also get $\mathbfar{\mathbfeta}=(2^p,1^{p-2})$, which has $\partial$-value $0$
and is black; in particular, also $\mathbfar{\mathbfeta}_+$ is black. This gives the following relevant part of the quiver of
$\mathbfar{B}$, which lies in the rows with $\partial$-values $0,1,2$ and $3$:
\mathbfegin{center}
\mathbfegin{tikzpicture}
\coordinate[label=above:$\mathbfar{\mathbfeta}_+$] (B+) at (0,0);
\coordinate[label=above:$\rho$] (R) at (1,0);
\coordinate[label=right:$\mathbfar{\alpha}$] (A) at (1,-0.5);
\coordinate[label=below right:$\mathbfar{\alpha}_+$] (A+) at (0.5,-0.5);
\draw[con] (B+) node{\color{red} $\mathbfullet$} -- (A) node{\color{red} $\mathbfullet$};
\draw[con] (B+) -- (0,-0.5) node{$\mathbfullet$};
\draw[con] (0,-0.5) -- (R) node{\color{red} $\mathbfullet$};
\draw[con] (B+) -- (A+) node{\color{red} $\mathbfullet$};
\draw[con] (R) -- (A+);
\draw[con] (R) -- (A);
\draw[con] (0,-0.5) -- (0,-1) node{$\mathbfullet$};
\draw[con] (0,-1) -- (A+);
\draw[con] (-0.5,-1) node{$\mathbfullet$} -- (-0.5,-1.5)node{$\mathbfullet$};
\draw[con] (-0.5,-1.5) -- (0,-1);
\draw[con] (-0.5,-1) -- (0,-0.5);
\end{tikzpicture}
\end{center}
By Theorem~\ref{thm Loewy except}, we have $D^\rho\cong \mathbfar{Z}$. Note that $\alpha=(3,2^{p-1},1^{p-2})=\langle 2,2\rangle$,
which has $\partial$-value $0$ and is black. Displaying all partitions of $B$ on a $[2,3^{p-1}]$-abacus, we deduce that the $p$-regular
partitions of $B$ with $\partial$-value 0 are precisely $\langle p\rangle,\langle m,m-1\rangle$, for $m\in\{3,\ldots,p\}$, and $\langle 2,2\rangle=\alpha$.
Hence $\alpha$ is the lexicographically smallest $p$-regular partition of $B$ with $\partial$-value 0.
With \ref{noth partial Scopes}, Lemma~\ref{lemma Ext alpha}
and Theorem~\ref{thm Loewy except}, we get the following information on the corresponding part of the quiver of $B$:
\mathbfegin{center}
\mathbfegin{tikzpicture}
\coordinate[label=above:$\alpha_+$] (A+) at (0,0);
\coordinate[label=above:$\hat{\rho}$] (R) at (1,0);
\coordinate[label=right:$\alpha$] (A) at (1.5,0);
\coordinate[label=below right:$\mathbfeta_+$] (B+) at (0.5,-0.5);
\draw[con] (A+) node{\color{blue}$\mathbfullet$} -- (0,-0.5) node{$\mathbfullet$};
\draw[con] (0,-0.5) -- (R) node{\color{blue} $\mathbfullet$};
\draw[con] (A+) -- (B+) node{\color{blue} $\mathbfullet$};
\draw[con] (R) -- (B+);
\draw[con] (B+) -- (A) node{\color{blue}$\mathbfullet$};
\draw[con] (0,-0.5) -- (0,-1) node{$\mathbfullet$};
\draw[con] (0,-1) -- (B+);
\draw[con] (-0.5,-1) node{$\mathbfullet$} -- (-0.5,-1.5)node{$\mathbfullet$};
\draw[con] (-0.5,-1.5) -- (0,-1);
\draw[con] (-0.5,-1) -- (0,-0.5);
\end{tikzpicture}
\end{center}
Here $D^{\hat{\rho}}\cong Z$.
It remains to verify that $\alpha$ is only connected to $\mathbfeta_+$. This is not immediate at this stage, since we do
not know $\mathbfar{Y}$ and $Y$. However, from \cite{BO1998}, we deduce that the Mullineux conjugate partition of $\alpha$
is $(2(p-1)+1,p)$. We have seen above that this partition has, in the quiver of its block $B_{p-1,0}$, only
one neighbour. Therefore, $\alpha$ also has only one neighbour, which must be $\mathbfeta_+$.
This completes the proof in the
case $k=1$ and $l=p-2$, showing that $B$ then has quiver $Q_{1,p-2}(p)$.
To summarize, we have now proven Theorem~\ref{thm main1} in the case where $k+l\leqslant p-1$. Next suppose that $k+l=p+1$. Then $B_{k,l}$ is Scopes equivalent to
$B_{k-1,l-1}$, by Proposition~\ref{prop (2:1) pairs hooks}. By induction $B_{k-1,l-1}$ has quiver $Q_{k-1,l-1}(p)$. Since the Scopes
equivalence preserves the lexicographic ordering as well as $\partial$-values and colours of the corresponding partitions, we deduce
that also $B_{k,l}$ has quiver $Q_{k-1,l-1}(p)$, as claimed.
Lastly, we need to treat the case where $p+2\leqslant k+l\leqslant 2p-1$. To show that $B_{k,l}$ then has quiver $Q_{k-1,l-1}(p)$, we again argue by induction on $k+l$. Since the arguments used in the case $2\leqslant k+l\leqslant p-1$ above
translate almost literally, we leave the details to the reader.
\end{proof}
As a consequence of Theorem~\ref{thm main1 details} and Proposition~\ref{prop graph isos}, we now also get the
following result, which immediately implies Theorem~\ref{thm main2}.
\mathbfegin{prop}\label{prop main2}
Let $p\geqslant 5$, and let $k,k',l,l'\in \mathbb{N}_0$ be such that $(k,1^l)$ and $(k',1^{l'})$ are $p$-cores. Then the blocks
$B_{k,l}$ and $B_{k',l'}$ are Morita equivalent if and only if one of the following cases occurs:
\mathbfegin{itemize}
\item[{\rm (i)}] $(k,1^l)=(k',1^{l'})$;
\item[{\rm (ii)}] $(k,1^l)'=(k',1^{l'})$;
\item[{\rm (iii)}] $k+l=p-1$, $k'+l'=p+1$, $(k,1^l)=(k'-1,1^{l'-1})$;
\item[{\rm (iv)}] $k+l=p-1$, $k'+l'=p+1$, $(k,1^l)'=(k'-1,1^{l'-1})$.
\end{itemize}
\end{prop}
\mathbfegin{proof}
If $(k,1^l)'=(k',1^{l'})$, then $B_{k',l'}$ is isomorphic, hence Morita equivalent, to
$B_{k,l}$, an isomorphism being given by tensoring with the sign representation. If $k+l=p-1$, $k'+l'=p+1$ and $(k,1^l)=(k'-1,1^{l'-1})$, then
$B_{k,l}$ and $B_{k',l'}$ are Scopes, hence Morita, equivalent, by Proposition~\ref{prop (2:1) pairs hooks}(c).
So, conversely, suppose that $B_{k,l}$ is Morita equivalent to $B_{k',l'}$. Then the Ext-quivers of $B_{k,l}$ and $B_{k',l'}$
are isomorphic as undirected graphs. We distinguish three cases. If $k+l\leqslant p-1$ and $k'+l'\leqslant p-1$, then $B_{k,l}$ has Ext-quiver
$Q_{k,l}(p)$ and $B_{k',l'}$ has Ext-quiver $Q_{k',l'}(p)$, by Theorem~\ref{thm main1 details}.
By Proposition~\ref{prop graph isos}, we must have $(k,l)=(k',l')$ or $(k',l')=(l+1,k-1)$. In the latter case, $(k',1^{l'})=(k,l)'$.
If $p+1\leqslant k+l\leqslant 2p-1$ and $p+1\leqslant k'+l'\leqslant 2p-1$, then
$B_{k,l}$ has Ext-quiver
$Q_{k-1,l-1}(p)$ and $B_{k',l'}$ has Ext-quiver $Q_{k-1',l-1'}(p)$, by Theorem~\ref{thm main1 details}. Thus Proposition~\ref{prop graph isos} again
implies $(k,l)=(k',l')$, or $(k',1^{l'})=(k,l)'$.
Lastly, suppose that $0\leqslant k+l\leqslant p-1$ and $p+1\leqslant k'+l'\leqslant 2p-1$. Then $B_{k,l}$ has Ext-quiver
$Q_{k,l}(p)$ and $B_{k',l'}$ has Ext-quiver $Q_{k-1',l-1'}(p)$, by Theorem~\ref{thm main1 details}. So this time
Proposition~\ref{prop graph isos} implies $(k,l)=(k'-1,l'-1)$ or $(k,l)=(l',k'-2)$; in particular, $k+l=p-1$ and $k'-l'=p+1$.
If $(k,l)=(l',k'-2)$, then $(k,1^l)'=(k'-1,1^{l'-1})'$.
This completes the proof of the proposition.
\end{proof}
\mathbfegin{noth}{\mathbff The case $p=3$.}\,\label{noth p=3}
To conclude this section, we now also consider the case
$p=3$. By \cite{Scopes1991}, there are five Scopes classes of blocks
of weight $2$, representatives of these being given by the blocks with $3$-cores $\emptyset, (1),(2),(1^2)$, and $(3,1^2)$. Note that all of these cores are hook partitions. The decomposition numbers of these blocks are well known; see \cite{JK1981}. As well, the Loewy structures of the respective Specht modules can easily be determined.
Recall further that Theorem~\ref{thm Loewy except} also holds for $p=3$. Thus, examining the principal blocks of $F\mathfrak{S}_6$ and $F\mathfrak{S}_7$ and then arguing
inductively
as in the proof of Theorem~\ref{thm main1 details}, we see that the Ext-quivers of these five blocks are as drawn below. Here, as before, we order
partitions with respect to their $\partial$-values, and the partitions with the same $\partial$-values with respect to the lexicographic ordering, from left to right. Again,
an arrow $\lambda\to \mu$ in the quivers indicates that $\lambda>\mu$ and $\Ext^1(D^\lambda,D^\mu)\neq \{0\}$.
\mathbfegin{center}
\mathbfegin{tabular}{|c|c|c|c|c|}\hat{l}ine
$\emptyset$& $(1)$& $(2)$ &$(1^2)$& $(3,1^2)$\\\hat{l}ine\hat{l}ine
\mathbfegin{tikzpicture}
\coordinate (B+) at (0,-0.5);
\coordinate[label=above:b] (M) at (0,0);
\coordinate[label=above:w] (R) at (1,0);
\coordinate (B) at (1,-0.5);
\coordinate (A) at (0.5,-1);
\draw[con] (M) node{$\mathbfullet$} -- (B) node{$\mathbfullet$};
\draw[con] (M) -- (B+) node{$\mathbfullet$};
\draw[con] (B+) -- (R) node{$\mathbfullet$};
\draw[con] (B+) -- (A) node{$\mathbfullet$};
\draw[con] (A) -- (B);
\draw[con] (R) -- (B);
\end{tikzpicture}&
\mathbfegin{tikzpicture}
\coordinate (A+) at (0,-0.5);
\coordinate[label=above:b] (M) at (0,0);
\coordinate[label=above:w] (R) at (1,0);
\coordinate (G) at (1,-0.5);
\coordinate(A) at (0.5,-0.5);
\draw[con] (M) node{$\mathbfullet$} -- (G) node{$\mathbfullet$};
\draw[con] (M) -- (A+) node{$\mathbfullet$};
\draw[con] (M) -- (A) node{$\mathbfullet$};
\draw[con] (A+) -- (R) node{$\mathbfullet$};
\draw[con] (B) -- (G) node{$\mathbfullet$};
\draw[con] (R) -- (G);
\draw[con] (R) -- (A);
\end{tikzpicture}
&
\mathbfegin{tikzpicture}
\coordinate[label=above:w] (A) at (0.5,0);
\coordinate[label=above:b] (M) at (0,0);
\coordinate[label=above:w] (G) at (1,0);
\coordinate (B) at (0.5,-0.5);
\draw[con] (M) node{$\mathbfullet$} -- (B) node{ $\mathbfullet$};
\draw[con] (M) -- (B);
\draw[con] (A) node{$\mathbfullet$} -- (B);
\draw[con] (B) -- (G) node{$\mathbfullet$};
\draw[con] (G) -- (1,-0.5) node{$\mathbfullet$};
\draw[con] (M) -- (1,-0.5);
\end{tikzpicture}
&
\mathbfegin{tikzpicture}
\coordinate[label=above:b] (A+) at (0,0);
\coordinate[label=above:w] (R) at (1,0);
\coordinate[label=right:b] (A) at (1.5,0);
\coordinate (B+) at (0.5,-0.5);
\draw[con] (A+) node{$\mathbfullet$} -- (0,-0.5) node{$\mathbfullet$};
\draw[con] (0,-0.5) -- (R) node{$\mathbfullet$};
\draw[con] (A+) -- (B+) node{$\mathbfullet$};
\draw[con] (R) -- (B+);
\draw[con] (B+) -- (A) node{$\mathbfullet$};
\end{tikzpicture}&
\mathbfegin{tikzpicture}
\coordinate[label=above:b] (A) at (0,0);
\coordinate[label=above:w] (B) at (0.5,0);
\coordinate[label=above:w] (C) at (1,0);
\coordinate[label=above:b] (D) at (1.5,0);
\coordinate (E) at (0.75,-0.5);
\draw[con] (A) node{$\mathbfullet$} -- (E) node{ $\mathbfullet$};
\draw[con] (B) node{$\mathbfullet$} -- (E);
\draw[con] (E) node{$\mathbfullet$} -- (C)node{$\mathbfullet$};
\draw[con] (E) node{$\mathbfullet$} -- (D)node{$\mathbfullet$};
\end{tikzpicture}\\\hat{l}ine
\end{tabular}
\end{center}
The blocks with $3$-cores $(2)$ and $(1^2)$, respectively, are isomorphic via tensoring with the sign representation, and their quivers
are isomorphic as undirected graphs. The above table also shows that the quivers of the blocks labelled by $\emptyset, (1), (2)$ and $(3,1^2)$ are
pairwise non-isomorphic as undirected graphs. In particular, these partitions parametrize the four Morita equivalence classes of $3$-blocks of symmetric groups of weight 2.
\end{noth}
\mathbfegin{appendix}
\section{Abacus Combinatorics}\label{sec abacus}
The purpose of this short section is to collect some useful
abacus combinatorics that we use repeatedly in this article. Most of this is
well known to the experts and can easily be verified. We, therefore, omit most of the details, but
present some illustrating examples. Throughout this section, let $p$ be a prime. Our notation will be chosen in
accordance with Section~\ref{sec pre}.
\mathbfegin{noth}{\mathbff Hook lengths and abacus displays.}\,\label{noth hooks diagram}
Suppose that $\lambda$ is a partition of $n$ with $p$-core $\kappa=(\kappa_1,\ldots,\kappa_t)$ and $p$-weight $w$. We display $\lambda$ on an
abacus with $p$ runners, labelled from $1$ to $p$. We denote this abacus by $\Gamma_\lambda$.
By \cite[2.7.13]{JK1981}, there is a bijection between the entries in the hook diagram of $\lambda$ divisible by $p$ and the set of pairs $((r,i),(s,i))$ such that
there is a bead on runner $i$ in row $r$ and a gap on runner $i$ in row $s<r$ of $\Gamma_\lambda$. The entry in the hook diagram
of $\lambda$ then equals $p(r-s)$. Moreover, one can also read off that the leg length $l$ of the hook in question
equals the number of beads passed when moving the bead from position
$(r,i)$ to position $(s,i)$.
\end{noth}
\mathbfegin{expl}\label{expl hook lengths}
Suppose that $p=3$ and $\lambda=(6,3^3,2^2)$. Then $\lambda$ has $p$-core $\kappa=(3,1)$ and $p$-weight $5$. We consider
the abacus display $\Gamma_\lambda$ with six beads as well as the hook diagram $H_\lambda$ of $\lambda$:
\mathbfegin{center}
$\Gamma_\lambda$: \quad \mathbfegin{tabular}{ccc}
$-$&$-$&$\mathbfullet$\\
$\mathbfullet$&$-$&$\mathbfullet$\\
$\mathbfullet$&$\mathbfullet$&$-$\\
$-$&$-$&$\mathbfullet$
\end{tabular}
\quad\quad
$H_\lambda$:\quad \mathbfegin{ytableau}
11&10&7&{\mathbff 3}&2&1\cr
7&{\mathbff 6}&{\mathbff 3}\\
{\mathbff 6}&5&2\\
5&4&1\\
{\mathbff 3}&2\\
2&1
\end{ytableau}
\end{center}
So there are five entries in $H_\lambda$ that are divisible by $3$. We record, on the one hand, their positions in $H_\lambda$, the lengths (hl) as well as
the leg lengths (ll) of the corresponding hooks and, on the other hand, the respective pairs of positions in $\Gamma_\lambda$
under the above-mentioned bijection:
\mathbfegin{center}
\mathbfegin{tabular}{|c|c|c|c|}\hat{l}ine
position in $H_\lambda$& hl & ll& pair of positions in $\Gamma_\lambda$\\\hat{l}ine\hat{l}ine
$(1,4)$& $3$& $0$& $((4,3),(3,3))$\\\hat{l}ine
$(2,2)$& $6$&$4$&$((3,2),(1,2))$\\\hat{l}ine
$(2,3)$&$3$&$2$&$((3,2),(2,2))$\\\hat{l}ine
$(3,1)$&$6$&$4$&$((3,1),(1,1))$\\\hat{l}ine
$(5,1)$&$3$&$1$&$((2,1),(1,1))$\\\hat{l}ine
\end{tabular}
\end{center}
\end{expl}
\mathbfegin{noth}{\mathbff Colours and $\partial$-values of weight-$2$ partitions.}\,\label{noth colour weight 2}
Suppose now that $p\geqslant 3$ and that $\lambda=(\lambda_1,\ldots,\lambda_s)$ is a partition of $n$
with $p$-core $\kappa=(\kappa_1,\ldots,\kappa_t)$ and $p$-weight $2$. Recall from \ref{noth partial} the definition
of $\partial(\lambda)$.
We consider an $[m_1,\ldots,m_p]$-abacus display $\Gamma_\lambda$ of $\lambda$ with at least $2p+t$ beads. In the notation of \ref{noth abacus},
there are three possibilities: $\lambda=\langle i\rangle$, $\lambda=\langle i,i\rangle$, or $\lambda=\langle i,j\rangle$, for
some $1\leqslant i<j\leqslant p$. One of our next aims is to show how to determine the colour of $\lambda$ in the case $\partial(\lambda)=0$, using the
abacus display $\Gamma_\lambda$.
This information was needed, for instance, in the proof of Lemma~\ref{lemma l and r}.
Suppose that $\lambda=\langle i\rangle$. Then \ref{noth hooks diagram} shows that $H_\lambda$ has an entry equal to $2p$, and the leg length
of the corresponding hook equals the number of beads passed when moving the (unique) movable bead on runner $i$ two positions up. Moreover, $H_\lambda$
has an entry equal to $p$. The leg length
of the corresponding hook equals the number of beads passed when moving the movable bead on runner $i$ one position up.
If $\lambda=\langle i,i\rangle$, then, by \ref{noth hooks diagram}, $H_\lambda$ also has an entry equal to $2p$. The leg length of
the corresponding hook equals the number of beads passed when putting the lower of the two movable beads on runner $i$ two positions up.
As well, $H_\lambda$ has an entry equal to $p$. The leg length of the corresponding hook equals the number
of beads passed when moving the upper of the two movable beads on runner $i$ one position up.
Lastly, suppose that $\lambda=\langle i,j\rangle$, for
some $i<j$. By \ref{noth hooks diagram}, $H_\lambda$ then has two entries equal to $p$.
The hook lengths of the corresponding hooks equal the number of beads passed when moving the movable bead on runner $i$ (respectively,
the movable bead on runner $j$) one position up.
From now on, suppose, in addition, that $\partial(\lambda)=0$. Suppose, moreover, that the movable beads lie in positions $(x,i)$ and $(y,j)$ of $\Gamma_\lambda$. If $x<y$, then
we consider rows $x-1,\ldots,y$ of $\Gamma_\lambda$:
\mathbfegin{center}
\mathbfegin{tabular}{ccccc}
$\cdots$&$-$&{\cellcolor{lightgray} $m_1$}&$\mathbfullet$&{\cellcolor{lightgray} $r_1$}\\
{\cellcolor{lightgray} $l_1$}&$\mathbfullet$&$\cdots$&$\mathbfullet$&$\cdots$\\
$\vdots$&$\vdots$&$\vdots$&$\vdots$&$\vdots$\\
$\cdots$&$-$&$\cdots$&$\mathbfullet$&$\cdots$\\
$\cdots$&$-$&$\cdots$&$-$&{\cellcolor{gray} $r_2$}\\
{\cellcolor{gray} $l_2$}&$-$&{\cellcolor{gray} $m_2$}&$\mathbfullet$&$\cdots$\\
\end{tabular}
\end{center}
Here as well as in all subsequent abacus displays, $m,m_1,m_2,l_1,l_2,r_1,r_2,l,r$ are the numbers of beads in the respective parts of $\Gamma_\lambda$, as shown in the diagrams.
Since $\lambda$ has weight $2$, we must have $m_1\geqslant m_2$, $l_1\geqslant l_2$ and $r_1\geqslant r_2$.
But then, by \ref{noth hooks diagram} we get that $\partial(\lambda)=|(l_1+r_1+m_1+1)-(l_2+m_2+r_2)|>0$, a contradiction.
If $x>y+1$, then rows $y-1,\ldots,x$ of $\Gamma_\lambda$ have shape
\mathbfegin{center}
\mathbfegin{tabular}{ccccc}
$\cdots$&$\mathbfullet$&$\cdots$&$-$&{\cellcolor{lightgray} $r_1$}\\
{\cellcolor{lightgray} $l_1$}&$\mathbfullet$&{\cellcolor{lightgray} $m_1$}&$\mathbfullet$&$\cdots$\\
$\cdots$&$\mathbfullet$&$\cdots$&$-$&$\cdots$\\
$\vdots$&$\vdots$&$\vdots$&$\vdots$&$\vdots$\\
$\cdots$&$\mathbfullet$&$\cdots$&$-$&$\cdots$\\
$\cdots$&$-$&{\cellcolor{gray} $m_2$}&$-$&{\cellcolor{gray} $r_2$}\\
{\cellcolor{gray} $l_2$}&$\mathbfullet$&$\cdots$&$-$&$\cdots$\\
\end{tabular}
\end{center}
Thus, we get $\partial(\lambda)=|(m_1+l_1+r_1+1)-(l_2+r_2+m_2)|>0$, again a contradiction.
If $x=y+1$, then rows $x-2,x-1,x$ of $\Gamma_\lambda$ have shape
\mathbfegin{center}
\mathbfegin{tabular}{ccccc}
$\cdots$&$\mathbfullet$&$\cdots$&$-$&{\cellcolor{lightgray} $r_1$}\\
{\cellcolor{lightgray} $l_1$}&$-$&{\cellcolor{gray} $m$}&$\mathbfullet$&{\cellcolor{gray} $r_2$}\\
{\cellcolor{gray} $l_2$}&$\mathbfullet$&$\cdots$&$-$&$\cdots$\\
\end{tabular}
\end{center}
We get $\partial(\lambda)=|(l_1+r_1+m)-(l_2+m_2+m+1)|$, thus $l_1+r_1=l_2+m_2+1$.
Lastly, if the movable bead on runner $i$ lies in the same row $x$ as the movable bead on runner $j$, then we consider
rows $x$ and $x-1$ of $\Gamma_\lambda$:
\mathbfegin{center}
\mathbfegin{tabular}{ccccc}
$\cdots$& $-$ &{\cellcolor{lightgray} $m_1$}&$-$&{\cellcolor{lightgray} $r$}\\
{\cellcolor{gray} $l$}& $\mathbfullet$ &{\cellcolor{gray} $m_2$}&$\mathbfullet$&$\cdots$\\
\end{tabular}
\end{center}
Note that still $m_1\geqslant m_2$, since there is no movable bead on any runner different from $i$ and $j$.
So there are two ways to obtain $[\kappa]$ from $[\lambda]$, depending on which bead is moved first. If
we move the bead on runner $i$ first, then from \ref{noth hooks diagram} we get
$0=\partial(\lambda)=|(l+r+m_1)-(l+r+m_2)|$, thus $m_1=m_2$.
Analogously, also in the case that we first move the bead on runner $j$, we get
$0=\partial(\lambda)=|(l+r+m_2+1)-(l+r+m_1+1)|$, thus $m_1=m_2$.
\end{noth}
\mathbfegin{expl}\label{expl colour weight 2}
To illustrate the combinatorics in \ref{noth colour weight 2}, we consider $p=3$ and the principal block of $F\mathfrak{S}_7$ with $3$-core
$(1)$. Moreover, we consider a $[2,3,2]$-abacus, for every partition of this block.
(a)\, For $\lambda=(5,2)=\langle 3\rangle$, we have
\mathbfegin{center}
$\Gamma_\lambda$: \quad \mathbfegin{tabular}{ccc}
$\mathbfullet$&$\mathbfullet$&$\mathbfullet$\\
$\mathbfullet$&$\mathbfullet$&$-$\\
$-$&$\mathbfullet$&$-$\\
$-$&$-$&{\color{red} $\mathbfullet$}
\end{tabular}
\quad\quad
$H_\lambda$:\quad \mathbfegin{ytableau}
{\color{red} 6}&5&{\mathbff 3} &2&1\cr
2&1
\end{ytableau}
\end{center}
The leg length of the hook of length $6$ is $1$, the leg length of the hook of length $3$ is $0$. Moving the red bead on runner 3 one position up, we do not
pass any bead, moving this bead two positions up we pass one bead.
(b)\, For $\lambda=(2^2,1^3)=\langle 3,3\rangle$, we have
\mathbfegin{center}
$\Gamma_\lambda$: \quad \mathbfegin{tabular}{ccc}
$\mathbfullet$&$\mathbfullet$&$-$\\
$\mathbfullet$&$\mathbfullet$&{\color{red} $\mathbfullet$}\\
$-$&$\mathbfullet$&{\color{blue} $\mathbfullet$}\\
\end{tabular}
\quad\quad
$H_\lambda$:\quad \mathbfegin{ytableau}
{\color{blue} 6}&2\cr
5&1\cr
{\color{red} 3}\cr
2\cr
1
\end{ytableau}
\end{center}
The hook of length 6 has leg length 4, the hook of length 3 has leg length 2. Moving the blue bead two positions up we pass four beads, moving
the red bead one position up we pass two beads.
(c)\, For $\lambda=(4,3)=\langle 3,2\rangle$, we have
\mathbfegin{center}
$\Gamma_\lambda$: \quad \mathbfegin{tabular}{ccc}
$\mathbfullet$&$\mathbfullet$&$\mathbfullet$\\
$\mathbfullet$&$\mathbfullet$&$-$\\
$-$&$-$&{\color{blue} $\mathbfullet$}\\
$-$&{\color{red} $\mathbfullet$}&$-$
\end{tabular}
\quad\quad
$H_\lambda$:\quad \mathbfegin{ytableau}
5&4&{\color{red} 3}&1\cr
{\color{blue} 3}&2&1
\end{ytableau}
\end{center}
The hook of length 3 corresponding to the red entry in $H_\lambda$ has leg length 1, the hook of length 3 corresponding to the blue entry in $H_\lambda$ has leg length 0.
Moroever, $\partial(\lambda)=0$ and, since the larger leg length of the hooks just mentioned is odd, $\lambda$ is white.
Moving the red bead one position up we pass one bead, moving the blue bead one position up we do not pass any bead.
(d)\, For $\lambda=(2^3,1)=\langle 3,1\rangle$, we have
\mathbfegin{center}
$\Gamma_\lambda$: \quad \mathbfegin{tabular}{ccc}
$\mathbfullet$&$\mathbfullet$&$\mathbfullet$\\
$-$&$\mathbfullet$&$-$\\
{\color{red} $\mathbfullet$}&$\mathbfullet$&{\color{blue} $\mathbfullet$}\\
\end{tabular}
\quad\quad
$H_\lambda$:\quad \mathbfegin{ytableau}
5&{\color{blue} 3}\cr
4&2\cr
{\color{red} 3}&1\cr
1
\end{ytableau}
\end{center}
The hook of length 3 corresponding to the red entry in $H_\lambda$ has leg length 1, the hook of length 3 corresponding to the blue entry in $H_\lambda$ has leg length 2.
Moroever, $\partial(\lambda)=0$ and, since the larger leg length of the hooks just mentioned is even, $\lambda$ is black.
Moving the red bead one position up we pass one bead, moving the blue bead one position up we pass two beads.
\end{expl}
\section{The Principal $p$-Blocks of $\mathfrak{S}_{2p}$ and $\mathfrak{S}_{2p+1}$}\label{sec B0}
Let $p\geqslant 5$ be a prime.
In the following we shall show that the principal block $B_{0,0}:=B_\emptyset$ of $F\mathfrak{S}_{2p}$ has
Ext-quiver $Q_{0,0,}(p)$, and the principal block $B_{1,0}:=B_{(1)}$ of $F\mathfrak{S}_{2p+1}$ has Ext-quiver $Q_{1,0}(p)$. We should also like to emphasize that the structure of the Ext-quivers of $B_{0,0}$ and $B_{1,0}$ can be deduced
from \cite{Scopes1995} and \cite{ChuangTan2001}. As well, these quivers appear in work of Martin \cite{Martin1989,Martin1990}.
We shall give an elementary, self-contained proof here, the most important
information being given by the decomposition matrix of $B_{0,0}$. The latter has been used in several publications, such as \cite{Martin1989,Scopes1995}, that
refer to the book \cite{Robinson}, which, however, does not provide too many details. We shall, thus, give a brief account in the form we shall
use it. Our strategy will then be to induce indecomposable projective $F\mathfrak{S}_{2p-1}$-modules to $F\mathfrak{S}_{2p}$.
\mathbfegin{noth}\label{noth abacus and order}
{\mathbff Abacus labelling and order on partitions.}\, Consider the principal block $B_{0,0}$ of $F\mathfrak{S}_{2p}$. We display the $p$-core $\emptyset$ of $B_{0,0}$ as well as all
partitions of $B_{0,0}$ on a $[2^p]$-abacus. As in \ref{noth abacus}(b) we identify partitions with their
respective abacus displays.
(a)\, With the above notation, the lexicographic ordering on partitions translates as follows:
\quad (i)\, For all $p\geqslant j>i\geqslant 1$ and $p\geqslant a\geqslant b\geqslant 1$, we have $\langle j\rangle >\langle i\rangle >\langle a,b\rangle$.
\quad (ii)\, For all $p\geqslant j> i\geqslant 1$ and $p\geqslant j' > i'\geqslant 1$, we have
$$\langle j,i\rangle>\langle j',i'\rangle \Leftrightarrow j>j'\,, \text{ or } j=j' \text{ and } i>i'\,.$$
This leaves to determine the positions of the
partitions $\langle i, i\rangle$ with respect to the lexicographic ordering.
\quad (iii)\, For all $p\geqslant i\geqslant 3$, we have $\langle i,1\rangle>\langle i,i\rangle> \langle i-1, i-2\rangle$
and $\langle 2, 1\rangle > \langle 2, 2 \rangle > \langle 1, 1\rangle$.
(b)\, The total number of partitions of $B_{0,0}$ equals $2p+\mathbfinom{p}{2}$, of which $\mathbfinom{p+1}{2}-1$ are $p$-regular.
Moreover, the following $p+1$ partitions are $p$-singular:
\mathbfegin{equation}\label{eqn p-sing}
\langle 2,1\rangle \text{ and } \langle j,j\rangle\,, \text{ for } 1\leqslant j\leqslant p\,.
\end{equation}
All partitions in (\ref{eqn p-sing}) are $p$-restricted.
\end{noth}
We now start to induce indecomposable projective $F\mathfrak{S}_{2p-1}$-modules to $B_{0,0}$.
\mathbfegin{noth}\label{noth induce from w=0}
{\mathbff Inducing from a block of $F\mathfrak{S}_{2p-1}$ of weight 0.}\,
Consider the block $B_{(p,1^{p-1})}$ of $F\mathfrak{S}_{2p-1}$ of weight $0$.
It has a unique simple module (up to isomorphism), namely
$D^{(p,1^{p-1})}\cong S^{(p,1^{p-1})}\cong P^{(p,1^{p-1})}$. The induced module $\ind_{\mathfrak{S}_{2p-1}}^{\mathfrak{S}_{2p}}(P^{(p,1^{p-1})})$
is of course also projective and, by the Branching Rules, it admits a Specht filtration whose Specht quotients are labelled
by those partitions of $2p$ that are obtained by adding a node to $(p,1^{p-1})$. Thus
$\ind_{\mathfrak{S}_{2p-1}}^{\mathfrak{S}_{2p}}(P^{(p,1^{p-1})})$ has a Specht filtration with quotients labelled by $(p+1,1^{p-1})$,
$(p,1^p)$ and $(p,2,1^{p-2})$. All of these partitions belong to $B_{0,0}$, so that
$$\ind_{\mathfrak{S}_{2p-1}}^{\mathfrak{S}_{2p}}(P^{(p,1^{p-1})})=P^{(p,1^{p-1})}\uparrow^{B_{0,0}}\,.$$
Now assume that $P:= P^{(p,1^{p-1})}\uparrow^{B_{0,0}}$ was decomposable. Then this module would
have an
indecomposable (projective) direct summand isomorphic to one of the Specht modules labelled
by $(p+1,1^{p-1})$, $(p,1^p)$, or $(p,2,1^{p-2})$. But such a Specht module would then be simple and projective, which is not possible
in a block of weight greater than $0$. Note that this argument uses the hypothesis $p\geqslant 5$, so that, by
\cite{HN2004} the
multiplicity of a Specht module in any Specht filtration of $P$ is unique. Alternatively, one could
also examine the endomorphism algebra of $P$.
Hence $P$
must be indecomposable. The labels of its Specht quotients with respect to the
$[2^p]$-abacus are:
$$(p+1, 1^{p-1}) = \langle 1\rangle, \ (p, 2, 1^{p-2}) = \langle p, 1\rangle, \ \ (p, 1^p) = \langle p, p\rangle\,.
$$
Recall from \ref{noth Specht filtration}, that if $S^\mu$ is isomorphic to a subquotient of any Specht filtration of $P^\lambda$, then
$\mu=\lambda$ or $\lambda>\mu$. Given the Specht quotients of $P$, this shows that
$$P=P^{(p,1^{p-1})}\uparrow^{B_{0,0}}\cong P^{(p+1,1^{p-1})}=P^{\langle 1\rangle}\,.$$
\end{noth}
\mathbfegin{noth}\label{noth induce from w=1}
{\mathbff Inducing from a block of $F\mathfrak{S}_{2p-1}$ of weight 1.}\,
(a)\, We discuss first an appropriate parametrization of the partitions in blocks of $F\mathfrak{S}_{2p-1}$ of weight 1 that are relevant
for our investigations, so that we end up with our fixed
labelling. For $B_{0, 0}$ we have previously used the $[2^p]$-abacus; we have numbered the runners as $1, 2, \ldots, p$ from left to right, and used this to parametrize the
partitions.
We can also represent all partitions in $B_{0,0}$ on the $[3^a, 2^{p-a}]$-abacus, for any $1\leqslant a <p$.
More precisely, starting with the $[2^p]$-abacus, we insert one bead at each of the positions $1, 2, \ldots, a$, and then shift
all other beads and gaps by $a$ places. This means that a gap on runner $i$ in the $[2^p]$-abacus is now on the $(i+a)$th runner (taking $i+a$ modulo $p$).
For example, a gap on runner $1$ of the $[2^p]$-abacus becomes a gap on runner $1+a$ of the $[3^a, 2^{p-a}]$-abacus.
We label the runners of the new abacus cyclically, so that the rightmost runner has label $p-a$, that is,
the labels of the runners of this new abacus are
$$(p-a+1, p-a+2, \ldots, p, 1, 2, \ldots, p-a).
$$
Then a gap on the runner {\it labelled with $j$} comes from a gap that {\it is} on runner $j$ of the $[2^p]$-abacus.
We use this observation in the following.
(b)\, Now we need to determine which blocks $B$ of $F\mathfrak{S}_{2p-1}$ of weight 1 contain a Specht module $S^\lambda$
with $S^\lambda\uparrow^{B_{0,0}}\neq\{0\}$. Equivalently, such a block $B$ satisfies $S^\mu\downarrow_B\neq \{0\}$, for some
Specht module $S^\mu$ in $B_{0,0}$. Thus, let $S^\mu$ be a Specht module in $B_{0,0}$ and represent $\mu$ on the
$[2^p]$-abacus. To obtain the (uniquely determined) multiplicity of a Specht $F\mathfrak{S}_{2p-1}$-module
in any Specht filtration of $S^\mu\downarrow_{\mathfrak{S}_{2p-1}}$, we distinguish three cases:
suppose first that any gap occurs on some runner $j$ with $2\leqslant j \leqslant p-1$. Then any bead which can be moved one place to the left occurs on some runner
$i$ with $2\leqslant i\leqslant p$ and can be moved to runner $i-1$. In each case, the corresponding Specht quotient $S^{\lambda}$ of $S^\mu\downarrow_{\mathfrak{S}_{2p-1}}$
belongs to a block whose $p$-core is represented on the $[2^k,3,1,2^l]$-abacus, for some
$k,l\in\{0,\ldots,p\}$ with $k+l=p-2$. These are precisely the hook partitions of $p-1$.
Second, suppose that there is a gap on runner $1$, that is $\mu=\langle 1\rangle$ or $\mu=\langle 1,i\rangle$, for some $i\in\{1,\ldots,p\}$.
If $\mu=\langle 1,2\rangle=(2^p)$, then $S^\mu\downarrow_{\mathfrak{S}_{2p-1}}\cong S^{(2^{p-1},1)}$, which belongs to the block
labelled by the hook partition $(2,1^{p-3})$ of $p-1$.
If $\mu=\langle 1,1\rangle=(1^{2p})$, then $S^\mu\downarrow_{\mathfrak{S}_{2p-1}}\cong S^{(1^{2p-1})}$, which belongs to the block
labelled by the hook partition $(1^{p-1})$ of $p-1$.
If $\mu=\langle 1\rangle=(p+1,1^{p-1})$, then $S^\mu\downarrow_{\mathfrak{S}_{2p-1}}$ has a Specht quotient isomorphic to $S^{(p+1,1^{p-2})}$ in the block
with $p$-core $(1^{p-1})$, and a Specht quotient isomorphic to $S^{(p,1^{p-1})}$ in the block of weight 0.
If $\mu=\langle 1,i\rangle$ with $i\geqslant 3$, then the Young diagram of $\mu$ has three removable nodes, which correspond to beads on runners $2$ and $i$ and $i+1$ (when $i<p$).
Moving the bead one place to the left in each case yields a partition in a weight-1 block labelled by a hook partition, as we have just seen above.
Let $i=p$, so that $\langle 1, p\rangle = (p, 2, 1^{p-2})$, then the restriction to
$\mathfrak{S}_{2p-1}$ has a Specht quotient labelled by the partition $(p,1^{p-1})$ of $2p-1$ belonging to the block of weight 0, which has to be the case not yet covered.
Third, suppose there is a gap on runner $p$ with $\mu$ not yet considered, then $\mu = \langle i, p\rangle$ for $2\leqslant i\leqslant p$ or $\mu = \langle p\rangle$.
If $\mu = \langle i, p\rangle$ for $2\leqslant i < p-1$ then the Young diagram has three removable nodes. The abacus presentation yields three
beads which can be moved by one place to the left and in each case the corresponding Specht quotient of the restriction lies in a weight-1 block labelled by a hook partition.
If $\mu = \langle p-1, p\rangle = (p^2)$ there is only one removable node, and the restriction is isomorphic to a Specht module in a block whose core is a hook partition, similarly
for $\mu = \langle p \rangle = (2p)$.
Let $\langle p, p\rangle = (p, 1^p)$, its restriction
to $\mathfrak{S}_{2p-1}$ has Specht quotients labelled by $(p-1, 1^p)$, in the block with core $(p-1)$, and $(p, 1^{p-1})$,
in a block of weight $0$.
To summarize, we now know that, whenever, $S^\lambda$ is a Specht $F\mathfrak{S}_{2p-1}$-module in a block of weight 1
with $S^\lambda\uparrow^{B_{0,0}}\neq\{0\}$, then the $p$-core of the corresponding block is one of the $p-1$ hook partitions of $p-1$.
Therefore, for $s\in\{2,\ldots,p\}$,
we from now on denote the block of $F\mathfrak{S}_{2p-1}$ with $p$-core $(s-1,1^{p-s})$
by $B_s$. The $p$-core as well as all partitions of $B_s$ will be represented on a $[4,2,3^{p-2},2^{s-2}]$-abacus.
As above, we label the runners of this abacus from left to right, by $s-1,s,s+1,\ldots,p,1,2,\ldots,s-2$. Thus, in particular, for $s<p$, the rightmost runner with three beads has label $p$.
With this notation, the partitions of $B_s$ are obtained by moving exactly one bead on some runner $i$ of the $[4,2,3^{p-2},2^{s-2}]$-abacus
of $(s-1,1^{p-s})$ one position down; we shall denote the resulting partition by $\langle i\rangle$ (when $s$ is fixed).
Then,
for a fixed $s\in\{1,\ldots,p\}$, and with \ref{noth abacus and order}, we obtain the following lexicographic ordering on the partitions of $B_s$:
\mathbfegin{itemize}
\item[(i)] $\langle 1\rangle=\langle s-1\rangle>\langle p\rangle>\langle p-1\rangle>\cdots >\langle 3\rangle>\langle 2\rangle \,,\text{ for } s=2\,;$
\item[(ii)] $\langle s-1\rangle =\langle p\rangle >\langle p-1\rangle >\cdots >\langle s+1\rangle >\langle s-2\rangle >\cdots \langle 1\rangle >\langle s\rangle \,,\text{ for } 3\leqslant s<p\,;$
\item[(iii)] $\langle p-1\rangle =\langle p-2\rangle >\cdots >\langle 1\rangle >\langle p\rangle \,,\text{ for } s=p\,.$
\end{itemize}
(c)\, For $s\in\{2,\ldots,p\}$, the decomposition matrix of $B_s$ is well known from the theory of blocks with cyclic defect groups. Thus,
by Brauer Reciprocity and \ref{noth Specht filtration}, one also knows the Specht factors occurring in any Specht filtration of any indecomposable projective $B_s$-module.
More precisely, for every $p$-regular partition $\lambda$ of $B_s$, one has $P^\lambda\sim S^\lambda\oplus S^{\tilde{\lambda}}$, where
$\tilde{\lambda}$ denotes the lexicographically next smaller partition of $B_s$. So, with (i)-(iii) above, this gives the following information that
will be crucial for the proof of Theorem~\ref{thm induce from w=1} below:
\mathbfegin{center}
\mathbfegin{tabular}{|c|c|c|}\hat{l}ine
$s=2$ & $3\leqslant s\leqslant p-1$ &$s=p$\\\hat{l}ine\hat{l}ine
$P^{\langle s-1\rangle}\sim S^{\langle s-1\rangle}\oplus S^{\langle p\rangle}$&$P^{\langle s-1\rangle}\sim S^{\langle s-1\rangle}\oplus S^{\langle s-2\rangle}$&$P^{\langle s-1\rangle}\sim S^{\langle s-1\rangle}\oplus S^{\langle s-2\rangle}$\\
$P^{\langle p\rangle}\sim S^{\langle p\rangle}\oplus S^{\langle p-1\rangle}$&$P^{\langle p\rangle}\sim S^{\langle p\rangle}\oplus S^{\langle p-1\rangle}$&$P^{\langle s-2\rangle}\sim S^{\langle s-2\rangle}\oplus S^{\langle s-3\rangle}$\\
$\vdots$&$\vdots$&$\vdots$\\
$\vdots$&$P^{\langle s+1\rangle}\sim S^{\langle s+1\rangle}\oplus S^{\langle s-2\rangle}$&$\vdots$\\
$\vdots$&$\vdots$&$\vdots$\\
$P^{\langle 3\rangle}\sim S^{\langle 3\rangle}\oplus S^{\langle 2\rangle}$&$P^{\langle 1\rangle}\sim S^{\langle 1\rangle}\oplus S^{\langle s\rangle}$&$P^{\langle 1\rangle}\sim S^{\langle 1\rangle}\oplus S^{\langle s\rangle}$\\\hat{l}ine
\end{tabular}
\end{center}
(d)\, By the Branching Theorem \cite[Theorem~9.2]{James1978}, whenever $s\in\{2,\ldots,p\}$ and $i\in\{1,\ldots,p\}$, the $F\mathfrak{S}_{2p}$-module $\ind_{\mathfrak{S}_{2p-1}}^{\mathfrak{S}_{2p}}(S^{\langle i\rangle})$
has a Specht filtration. The Specht factors occurring in any such filtration are unique up to isomorphism, since $p\geqslant 5$,
and their labelling partitions are obtained by moving a bead on some runner of $\langle i\rangle$ one position to the right.
In particular, the block component $S^{\langle i\rangle}\uparrow^{B_{0,0}}$ has a Specht filtration, and the Specht factors occurring
are labelled by those partitions that are obtained by moving a bead from runner $s-1$ of $\langle i\rangle$ to runner $s$. Thus, for
each $s\in\{2,\ldots,p\}$, we get
$$
S^{\langle j\rangle}\uparrow^{B_{0,0}}\sim \mathbfegin{cases} S^{\langle s-1\rangle}\oplus S^{\langle s\rangle}\,, & \text{ if } j=s-1\,,\\
S^{\langle s-1,s-1\rangle}\oplus S^{\langle s,s\rangle}\,, &\text{ if } j=s\,,\\
S^{\langle i,s-1\rangle}\oplus S^{\langle i,s\rangle}\,, &\text{ if } s<j\leqslant p\,,\\
S^{\langle s-1,i\rangle}\oplus S^{\langle s,i\rangle}\,, &\text{ if } 1\leqslant j<s-1\,.\end{cases}$$
Alternatively, one may use arguments as in \cite[(2.6)]{Donkin}, which also work when $p=3$.
\end{noth}
\mathbfegin{thm}\label{thm induce from w=1}
Up to isomorphism there are $\mathbfinom{p+1}{2}-2$ indecomposable projective $B_{0,0}$-modules
that are induced from a block of $F\mathfrak{S}_{2p-1}$ of weight $1$. Their labelling partitions
in $[2^p]$-abacus notation and their
Specht factors are as follows
\mathbfegin{itemize}
\item[\rm{(a)}] $P^{\langle p\rangle}\sim S^{\langle p\rangle}\oplus S^{\langle p-1\rangle}\oplus S^{\langle p,p-2\rangle}\oplus S^{\langle p-1,p-2\rangle}$;
\item[\rm{(b)}] $P^{\langle s\rangle}\sim S^{\langle s\rangle}\oplus S^{\langle s-1\rangle}\oplus S^{\langle p,s\rangle}\oplus S^{\langle p,s-1\rangle}$, for $s\in \{2,\ldots,p-1\}$;
\item[\rm{(c)}] $P^{\langle s,1\rangle}\sim S^{\langle s,1\rangle}\oplus S^{\langle s-1,1\rangle}\oplus S^{\langle s,s\rangle}\oplus S^{\langle s-1,s-1\rangle}$, for $s\in \{3,\ldots,p\}$;
\item[\rm{(d)}] $P^{\langle s+1,s\rangle}\sim S^{\langle s+1,s\rangle}\oplus S^{\langle s+1,s-1\rangle}\oplus S^{\langle s,s-2\rangle}\oplus S^{\langle s-1,s-2\rangle}$, for $s\in \{3,\ldots,p-1\}$;
\item[\rm{(e)}] $P^{\langle 3,2\rangle}\sim S^{\langle 3,2\rangle}\oplus S^{\langle 3,1\rangle}\oplus S^{\langle 2,2\rangle}\oplus S^{\langle 1,1\rangle}$;
\item[\rm{(f)}] $P^{\langle r,s\rangle}\sim S^{\langle r,s\rangle}\oplus S^{\langle r-1,s\rangle}\oplus S^{\langle r,s-1\rangle}\oplus S^{\langle r-1,s-1\rangle}$,
for $p\geqslant r>s>1$ and $r-s>1$.
\end{itemize}
\end{thm}
\mathbfegin{proof}
Since $F\mathfrak{S}_{2p}\cong \ind_{\mathfrak{S}_{2p-1}}^{\mathfrak{S}_{2p}}(F\mathfrak{S}_{2p-1})$, every indecomposable projective
$F\mathfrak{S}_{2p}$-module is isomorphic to a direct summand of the induction of some indecomposable projective $F\mathfrak{S}_{2p-1}$-module.
As we have seen in \ref{noth induce from w=0}, $P^{\langle 1\rangle}$ is the unique indecomposable projective $B_{0,0}$-module that is induced from
a block of $F\mathfrak{S}_{2p-1}$ of weight 0; moreover, it has precisely three Specht factors.
By \ref{noth induce from w=1} (c), (d), we obtain precisely $x:=\mathbfinom{p+1}{2}-2$ pairwise non-isomorphic
projective $B_{0,0}$-modules
$R_1,\ldots,R_x$ that are obtained by inducing the indecomposable $F\mathfrak{S}_{2p-1}$-modules in blocks
of weight 1 to $B_{0,0}$. Furthermore, each of these block inductions has precisely four pairwise non-isomorphic Specht factors; the lexicographically largest labelling
partition $\lambda_i$ of $R_i$ is always $p$-regular, and $\lambda_i\neq \lambda_j$, for $i\neq j$.
Note that $P^{\langle 1\rangle}$ cannot be isomorphic to a direct summand of any $R_i$, since otherwise there would be a projective
$B_{0,0}$-module with only one Specht factor, which is impossible in a block of weight 2.
We may suppose that $\lambda_1>\lambda_2>\cdots>\lambda_x$. We show that $R_i\cong P^{\lambda_i}$,
for $i\in\{1,\ldots,x\}$. To do so, we first show that
\mathbfegin{equation}\label{eqn R dec}
R_i\cong P^{\lambda_i}\oplus Q_i\,,
\end{equation}
where $Q_i$ is a direct sum of indecomposable projective $B_{0,0}$-modules whose labelling partitions are belong
to $\{\lambda_{i+1},\ldots,\lambda_x\}$.
So let $i\in\{1,\ldots,x\}$.
There is a $p$-regular partition $\mu\in\{\lambda_1,\ldots,\lambda_x\}$ with $P^\mu\mid R_i$ and
$(P^\mu:S^{\lambda_i})=1$. Thus $\mu\geqslant \lambda_i$, by \ref{noth Specht filtration}. Since $(P^\mu:S^\mu)=1$, we also have
$(R_i:S^\mu)\neq 0$, hence $\lambda_i\geqslant \mu$ and then $\mu=\lambda_i$.
If $\rho\neq \lambda_i$ is
a $p$-regular partition of $B_{0,0}$ with $P^\rho\mid R_i$, then also $\rho\in\{\lambda_1,\ldots,\lambda_x\}$. Since
$(P^\rho:S^\rho)=1$, also $(R_i:S^\rho)\neq 0$, so that we must have $\lambda_i>\rho$ and $\rho\in\{\lambda_{i+1},\ldots,\lambda_x\}$.
This proves (\ref{eqn R dec}). Now we show that $Q_i=\{0\}$, for $i\in\{1,\ldots,x\}$. To do so, we argue by reverse induction on $i$.
For $i=x$, the assertion if clearly true. So let $i<x$, and assume that $Q_i\neq \{0\}$. Then, by (\ref{eqn R dec}), we would have
$P^{\lambda_j}\mid Q_i\mid R_i$, for some $j>i$. But, by induction, $P^{\lambda_j}\cong R_j$.
Since both $R_j$ and $R_i$ have
precisely four Specht factors, this would imply $R_i\cong R_j$, a contradiction.
Now the assertion of the theorem follows from \ref{noth induce from w=1} (c), (d).
\end{proof}
To summarize, by \ref{noth induce from w=0} and Theorem~\ref{thm induce from w=1},
we have completely determined the columns of the decomposition matrix of the block $B_{0,0}$.
From this information, it is now straightforward to read off the rows of the decomposition matrix of $B_{0,0}$, that is, the
decomposition numbers of $B_{0,0}$, as well.
In Corollary~\ref{cor dec matrix} below, we shall in fact write down the Loewy
structures of the Specht modules in $B_{0,0}$. Before doing so, we mention one last bit of information
concerning the $\partial$-values and colours of the partitions of $B_{0,0}$, which is immediate from
\ref{noth colour weight 2}.
\mathbfegin{lemma}\label{lemma B0 partial}
Identifying every partition of $B_{0,0}$ with its $[2^p]$-abacus, the partitions of $B_{0,0}$
have the following $\partial$-values:
\mathbfegin{center}
\mathbfegin{tabular}{|c|c|c|}\hat{l}ine
$\partial$& $p$-regular & $p$-singular\\\hat{l}ine\hat{l}ine
$0$ (black)& $\langle p\rangle$& $\langle 2,1\rangle$\\
& $\langle i+1,i\rangle$, $i\in\{2,\ldots,p-1\}$ \text{ odd }& \\\hat{l}ine
$0$ (white) & $\langle i+1,i\rangle$, $i\in\{2,\ldots,p-1\}$ \text{ even }& $\langle 1,1\rangle$\\\hat{l}ine
$1$& $\langle p-1\rangle$ & $\langle 2,2\rangle$\\
& $\langle i+2,i\rangle$, $i\in\{1,\ldots,p-2\}$&\\\hat{l}ine
$2$& $\langle p-2\rangle$ & $\langle 3,3\rangle$\\
& $\langle i+3,i\rangle$, $i\in\{1,\ldots,p-3\}$&\\\hat{l}ine
$d\in\{3,\ldots,p-2\}$& $\langle p-d\rangle$ & $\langle d+1,d+1\rangle$\\
& $\langle i+d+1,i\rangle$, $i\in\{1,\ldots,p-d-1\}$&\\\hat{l}ine
$p-1$ & $\langle 1\rangle$ & $\langle p,p\rangle$\\\hat{l}ine
\end{tabular}
\end{center}
\end{lemma}
\mathbfegin{cor}\label{cor dec matrix}
Identifying every partition of $B_{0,0}$ with its $[2^p]$-abacus, the Specht modules in $B_{0,0}$ have
the following Loewy structures:
\mathbfegin{itemize}
\item[{\rm (a)}] For $i\in\{1,\ldots,p-1\}$, one has $S^{\langle i\rangle}\ \approx \ \mathbfegin{matrix} D^{\langle i\rangle}\\ D^{\langle i+1\rangle}\end{matrix}$. Moreover, $S^{\langle p\rangle}\cong D^{\langle p\rangle}$.
\item[{\rm(b)}] For $i\in\{3,\ldots,p-1\}$, one has $S^{\langle i,i\rangle}\ \approx \ \mathbfegin{matrix} D^{\langle i,1\rangle}\\ D^{\langle i+1,1\rangle}\end{matrix}$. Moreover,
$$S^{\langle p,p\rangle}\ \approx \ \mathbfegin{matrix}D^{\langle p,1\rangle}\\ D^{\langle 1\rangle}\end{matrix}\,,\quad
S^{\langle 2,2\rangle}\ \approx \ \mathbfegin{matrix}D^{\langle 3,2\rangle}\\ D^{\langle 3,1\rangle}\end{matrix}\,,\quad
\text{ and} \quad S^{\langle 1,1\rangle}\cong D^{\langle 3,2\rangle}.$$
\item[{\rm (c)}] For $1\leqslant i<j\leqslant p$ with $j-i\geqslant 3$, one has
$$S^{\langle j,i\rangle}\ \approx \ \mathbfegin{cases}
\mathbfegin{matrix} D^{\langle j,i\rangle}\\ D^{\langle j,i+1\rangle}\oplus D^{\langle j+1,i\rangle}\\ D^{\langle j+1,i+1\rangle}\end{matrix} &\text{ if } j\neq p\,,\\
&\\
\mathbfegin{matrix}D^{\langle p,i\rangle}\\ D^{\langle p,i+1\rangle}\oplus D^{\langle i\rangle}\\ D^{\langle i+1\rangle}\end{matrix} &\text{ if } j= p\,.
\end{cases}$$
\item[{\rm (d)}] For $1\leqslant i<j\leqslant p$ with $j-i=2$, one has
$$S^{\langle j,i\rangle}\ \approx \ \mathbfegin{cases}
\mathbfegin{matrix}D^{\langle j,i\rangle}\\ D^{\langle j,i+1\rangle}\oplus D^{\langle j+1,i\rangle}\oplus D^{\langle j+1,i+1\rangle}\\ D^{\langle j+1,i+2\rangle}\end{matrix} &\text{ if } j\neq p\,,\\
&\\
\mathbfegin{matrix}D^{\langle p,p-2\rangle}\\ D^{\langle p,p-1\rangle}\oplus D^{\langle p\rangle}\oplus D^{\langle p-2\rangle} \\ D^{\langle p-1\rangle}\end{matrix} &\text{ if } j= p\,.
\end{cases}$$
\item[{\rm (e)}] For $1\leqslant i<j\leqslant p$ with $j-i=1$, one has
$$S^{\langle j,i\rangle}\ \approx \ \mathbfegin{cases}
\mathbfegin{matrix}D^{\langle j,i\rangle}\\ D^{\langle j+1,i\rangle}\\ D^{\langle j+2,i+2\rangle}\end{matrix} &\text{ if } i\notin\{1,p-2,p-1\}\,,\\
&\\
\mathbfegin{matrix} D^{\langle p-1,p-2\rangle}\\ D^{\langle p,p-2\rangle}\\ D^{\langle p\rangle}\end{matrix} &\text{ if } i=p-2\,,\\
&\\
\mathbfegin{matrix} D^{\langle p,p-1\rangle}\\ D^{\langle p-1\rangle}\end{matrix} &\text{ if } i=p-1\,,\\
&\\
\mathbfegin{matrix}D^{\langle 3,1\rangle}\\ D^{\langle 4,3\rangle}\end{matrix} &\text{ if } i=1\,.\\
\end{cases}$$
\end{itemize}
\end{cor}
\mathbfegin{proof}
By Theorem~\ref{thm induce from w=1} and \ref{noth induce from w=0}, we know the columns
of the decomposition matrix of $B_{0,0}$. Given these, it is straightforward to determine the rows of this decomposition
matrix, that is, the composition factors of the Specht modules in $B_{0,0}$. In particular, we see that
$S^{\langle p\rangle}\cong D^{\langle p\rangle}$ and $S^{\langle 1,1\rangle}\cong D^{\langle 3,2\rangle}$.
Next, by \cite[Proposition 6.2]{ChuangTan2001}, a Specht module $S^\lambda$ in $B_{0,0}$ has Loewy length at most 3, and has Loewy length 3 if and only if $\lambda$ is both $p$-regular and $p$-restricted. If so, $S^\lambda$ has head isomorphic to $D^\lambda$, by
\cite[Corollary 12.2]{James1978} and socle isomorphic to $D^{\lambda_+}$, by \ref{noth part}(b) and \ref{noth partial}(c). Since
$\lambda_+$ has the same $\partial$-value and, if $\partial(\lambda)=0$, the same colour as $\lambda$, we deduce
the Loewy structures of the Specht modules in (c) and (d) as well as those of $S^{\langle i+1,i\rangle}$ with
$i\in\{2,\ldots,p-2\}$ from Lemma~\ref{lemma B0 partial}.
If $\lambda$ is a $p$-regular partition of $B_{0,0}$ such that $S^\lambda$ has exactly two composition factors, then
$S^\lambda$ is of course uniserial with head isomorphic to $D^\lambda$.
Hence, it remains to establish the Loewy structures of the Specht modules labelled by the $p$-singular
partitions $\langle i,i\rangle$ with $i\in\{2,\ldots, p\}$, and $\langle 2,1\rangle$, respectively.
By \ref{noth part}, we know that, whenever $\lambda$ is $p$-restricted, $S^\lambda$ has socle isomorphic to $D^{\mathbf{m}(\lambda')}$, and
$\mathbf{m}(\lambda')=\lambda_+$, by \ref{noth partial}(b). Since $\lambda_+$ has the same $\partial$ value and, if
$\partial(\lambda)=0$, the same colour as $\lambda$, Lemma~\ref{lemma B0 partial}
implies $\Soc(S^{\langle i,i\rangle})\cong D^{\langle i+1,1\rangle}$, for
$i\in\{2,\ldots,p-1\}$, $\Soc(S^{\langle p,p\rangle})\cong D^{\langle 1\rangle}$, and $\Soc(S^{\langle 2,1\rangle})\cong D^{\langle 4,3\rangle})$.
This completes the proof of the corollary.
\end{proof}
\mathbfegin{rem}\label{rem peel}
We emphasize that the partitions treated in part (a) and (b) of Corollary~\ref{cor dec matrix} are precisely the hook partitions
of $B_{0,0}$, so that these assertions also follow from Peel's results in \cite{Peel}.
\end{rem}
We are now in the position to describe how the Ext-quiver of $B_{0,0}$ is encoded in the decomposition
matrix of $B_{0,0}$, using general information from \cite{Scopes1995, Richards1996, ChuangTan2001}. As already mentioned at the
beginning of this section, this quiver was first computed by Martin \cite{Martin1989}. Although Richards' work had not yet been
available at that time, Martin's way to draw the quiver is the same that we shall now describe.
\mathbfegin{noth}\label{noth quiver B0}
{\mathbff The Ext-quiver of $B_{0,0}$.}\,
(a)\, We draw a quiver with $p$ rows, which we label by $0,1,\ldots,p-1$, from top to bottom. In row $i\in\{0,\ldots,p-1\}$ we draw a
vertex for each $p$-regular partition of $B_{0,0}$ that has $\partial$-value $i$. We order the partitions in row $i\in\{1,\ldots,p-1\}$
with respect to the lexicographic ordering $>$, from left to right. In row $0$, the
leftmost vertex corresponds to the partition $\langle p\rangle$, which is the largest black partition. Next we draw
$\langle p,p-1\rangle$, which is the largest white partition. From then on, black and white partitions alternate in decreasing
lexicographic ordering, from left to right.
(b)\, Let $\lambda$ be a $p$-regular partition of $B_{0,0}$ corresponding to a vertex in row $i$ of the quiver we have just drawn, that is,
$\partial(\lambda)=i$. By \cite[Theorem~6.1]{ChuangTan2001}, $\lambda$ can only be connected to a vertex
$\mu$ that lies in row $i-1$ or in row $i+1$. Moreover, if $\mu$ is a vertex in row $i-1$ or $i+1$, then
$\mu$ and $\lambda$ are connected if and only if one of the following holds:
\quad (i)\, $\lambda>\mu$ and $[S^\mu:D^\lambda]=1$, or
\quad (ii)\, $\mu>\lambda$ and $[S^\lambda:D^\mu]=1$.
In case (i) we draw an arrow $\lambda\to \mu$, in case (ii) we draw an arrow $\mu\to \lambda$.
Thus, representing every partition of $B_{0,0}$ on a $[2^p]$-abacus and invoking Corollary~\ref{cor dec matrix} and Lemma~\ref{lemma B0 partial}, we obtain the following quiver, which
equals $Q_{0,0}(p)$ in Section~\ref{sec quiv}, with respect to the lexicographic ordering on partitions:
\mathbfegin{center}
\mathbfegin{tikzpicture}
\coordinate[label=above:{\tiny $\langle p\rangle$}] (0,0) at (0,0);
\coordinate[label=above:{\tiny $\langle p,p-1\rangle$}] (1,0) at (1,0);
\coordinate (2,0) at (2,0);
\coordinate (2.5,0) at (2.5,0);
\coordinate (3,0) at (3,0);
\coordinate (3.5,0) at (3.5,0);
\coordinate[label=above:{\tiny $\langle 5,4\rangle$}] (4,0) at (4,0);
\coordinate[label=above:{\tiny $\langle 4,3\rangle$}] (5,0) at (5,0);
\coordinate[label=above:{\tiny $\langle 3,2\rangle$}] (6,0) at (6,0);
\coordinate[label=left:{\tiny $\langle p-1\rangle$}] (0,-0.5) at (0,-0.5);
\coordinate[label=below:{\tiny $\langle p,p-2\rangle$}] (1,0.5) at (1,-0.5);
\coordinate (2,-0.5) at (2,-0.5);
\coordinate (2.5,-0.5) at (2.5,-0.5);
\coordinate (3,-0.5) at (3,-0.5);
\coordinate (3.5,-0.5) at (3.5,-0.5);
\coordinate (4,-0.5) at (4,-0.5);
\coordinate[label=right:{\tiny $\langle 4,2\rangle$}] (5,-0.5) at (5,-0.5);
\coordinate[label=right:{\tiny $\langle 3,1\rangle$}] (6,-0.5) at (6,-0.5);
\coordinate[label=left:{\tiny $\langle p-2\rangle$}] (0.5,-1) at (0.5,-1);
\coordinate[label=below:{\tiny $\langle p,p-3\rangle$}] (1.5,-1) at (1.5,-1);
\coordinate (2,-1) at (2,-1);
\coordinate (2.5,-1) at (2.5,-1);
\coordinate (3,-1) at (3,-1);
\coordinate (3.5,-1) at (3.5,-1);
\coordinate (4,-1) at (4,-1);
\coordinate[label=right:{\tiny $\langle 5,2\rangle$}] (4.5,-1) at (4.5,-1);
\coordinate[label=right:{\tiny $\langle 4,1\rangle$}] (5.5,-1) at (5.5,-1);
\coordinate[label=left:{\tiny $\langle p-3\rangle$}] (1,-1.5) at (1,-1.5);
\coordinate (2,-1.5) at (2,-1.5);
\coordinate (2.5,-1.5) at (2.5,-1.5);
\coordinate (3,-1.5) at (3,-1.5);
\coordinate (3.5,-1.5) at (3.5,-1.5);
\coordinate (4,-1.5) at (4,-1.5);
\coordinate[label=right:{\tiny $\langle 5,1\rangle$}] (5,-1.5) at (5,-1.5);
\coordinate (1.5,-2) at (1.5,-2);
\coordinate (2.5,-2) at (2.5,-2);
\coordinate (3,-2) at (3,-2);
\coordinate (3.5,-2) at (3.5,-2);
\coordinate (4.5,-2) at (4.5,-2);
\coordinate[label=left:{\tiny $\langle 3\rangle$}] (2,-2.5) at (2,-2.5);
\coordinate[label=above:{\tiny $\langle p,2\rangle$}] (3,-2.5) at (3,-2.5);
\coordinate[label=right:{\tiny $\langle p-1,1\rangle$}] (4,-2.5) at (4,-2.5);
\coordinate[label=left:{\tiny $\langle 2\rangle$}] (2.5,-3) at (2.5,-3);
\coordinate[label=right:{\tiny $\langle p,1\rangle$}] (3.5,-3) at (3.5,-3);
\coordinate[label=below:{\tiny $\langle 1\rangle$}] (3,-3.5) at (3,-3.5);
\draw (0,0) node{$\mathbfullet$};
\draw (1,0) node{$\mathbfullet$};
\draw (2,0) node{$\mathbfullet$};
\draw (2.5,0) node{$\cdot$};
\draw (3,0) node{$\cdot$};
\draw (3.5,0) node{$\cdot$};
\draw (4,0) node{$\mathbfullet$};
\draw (5,0) node{$\mathbfullet$};
\draw (6,0) node{$\mathbfullet$};
\draw (0,-0.5) node{$\mathbfullet$};
\draw (1,-0.5) node{$\mathbfullet$};
\draw (2,-0.5) node{$\mathbfullet$};
\draw (2.5,-0.5) node{$\cdot$};
\draw (3,-0.5) node{$\cdot$};
\draw (3.5,-0.5) node{$\cdot$};
\draw (4,-0.5) node{$\mathbfullet$};
\draw (5,-0.5) node{$\mathbfullet$};
\draw (6,-0.5) node{$\mathbfullet$};
\draw[con] (0,0) -- (0,-0.5);
\draw[con] (0,0) -- (1,-0.5);
\draw[con] (0,-0.5) -- (1,0);
\draw[con] (1,0) -- (1,-0.5);
\draw[con] (1,-0.5) -- (2,0);
\draw[con] (1,0) -- (2,-0.5);
\draw[con] (2,0) -- (2,-0.5);
\draw[con] (4,0) -- (4,-0.5);
\draw[con] (4,0) -- (5,-0.5);
\draw[con] (4,-0.5) -- (5,0);
\draw[con] (5,0) -- (5,-0.5);
\draw[con] (5,-0.5) -- (6,0);
\draw[con] (6,0) -- (6,-0.5);
\draw[con] (5,0) -- (6,-0.5);
\draw (0.5,-1) node{$\mathbfullet$};
\draw (1.5,-1) node{$\mathbfullet$};
\draw (2.5,-1) node{$\cdot$};
\draw (3,-1) node{$\cdot$};
\draw (3.5,-1) node{$\cdot$};
\draw (4.5,-1) node{$\mathbfullet$};
\draw (5.5,-1) node{$\mathbfullet$};
\draw[con] (0,-0.5) -- (0.5,-1);
\draw[con] (0.5,-1) -- (1,-0.5);
\draw[con] (1,-0.5) -- (1.5,-1);
\draw[con] (1.5,-1) -- (2,-0.5);
\draw[con] (4,-0.5) -- (4.5,-1);
\draw[con] (4.5,-1) -- (5,-0.5);
\draw[con] (5,-0.5) -- (5.5,-1);
\draw[con] (5.5,-1) -- (6,-0.5);
\draw (1,-1.5) node{$\mathbfullet$};
\draw (2,-1.5) node{$\cdot$};
\draw (2.5,-1.5) node{$\cdot$};
\draw (3,-1.5) node{$\cdot$};
\draw (3.5,-1.5) node{$\cdot$};
\draw (4,-1.5) node{$\cdot$};
\draw (5,-1.5) node{$\mathbfullet$};
\draw[con] (0.5,-1) -- (1,-1.5);
\draw[con] (1,-1.5) -- (1.5,-1);
\draw[con] (4.5,-1) -- (5,-1.5);
\draw[con] (5,-1.5) -- (5.5,-1);
\draw (1.5,-2) node{$\cdot$};
\draw (2.5,-2) node{$\cdot$};
\draw (3.5,-2) node{$\cdot$};
\draw (4.5,-2) node{$\cdot$};
\draw (2,-2.5) node{$\mathbfullet$};
\draw (3,-2.5) node{$\mathbfullet$};
\draw (4,-2.5) node{$\mathbfullet$};
\draw (2.5,-3) node{$\mathbfullet$};
\draw (3.5,-3) node{$\mathbfullet$};
\draw (3,-3.5) node{$\mathbfullet$};
\draw[con] (2.5,-3) -- (3,-3.5);
\draw[con] (3,-3.5) -- (3.5,-3);
\draw[con] (2,-2.5) -- (2.5,-3);
\draw[con] (2.5,-3) -- (3,-2.5);
\draw[con] (3,-2.5) -- (3.5,-3);
\draw[con] (3.5,-3) -- (4,-2.5);
\end{tikzpicture}
\end{center}
\end{noth}
\mathbfegin{thm}\label{thm quiver B0}
Let $p\geqslant 5$. The principal block $B_{0,0}$ of $F\mathfrak{S}_{2p}$ has Ext-quiver $Q_{0,0}(p)$ shown in (\ref{eqn (36)}), and
the principal block $B_{1,0}$ of $F\mathfrak{S}_{2p+1}$ has Ext-quiver $Q_{1,0}(p)$ shown in (\ref{eqn (37)}), with respect to the
lexicographic ordering on partitions.
\end{thm}
\mathbfegin{proof}
The assertion concerning $B_{0,0}$ has just been proved in \ref{noth quiver B0}.
We now consider the block $B_{1,0}$, which forms a $(2:1)$-pair with $B_{0,0}$, by Proposition~\ref{prop (2:1) pairs hooks}.
We represent all partitions of $B_{1,0}$ on a $[2,3,2^{p-2}]$-abacus, and all partitions of
$B_{0,0}$ on a $[3,2^{p-1}]$-abacus.
As usual, we denote the exceptional partitions of $B_{0,0}$ and $B_{1,0}$ by $\mathbfar{\alpha}$, $\mathbfar{\mathbfeta}$, $\mathbfar{\gamma}$ and
$\alpha$, $\mathbfeta$, $\gamma$, respectively,
By Corollary~\ref{cor l and r hooks} and Lemma~\ref{lemma alpha max},
we know that $\mathbfar{\alpha}$ is the largest partition of $2p$ in $B_{0,0}$ with $\partial$-value $p-1$.
So, by \ref{noth partial Scopes}, it suffices to consider the last three rows
of $Q_{0,0}$, whose vertices have $\partial$-values $p-1,p-2$ and $p-3$. This gives
\mathbfegin{center}
\mathbfegin{tikzpicture}
\coordinate[label=left:$\mathbfar{\mathbfeta}_+$] (B+) at (0.5,-0.5);
\coordinate[label=above:$\mu$] (M) at (1,0);
\coordinate[label=right:$\mathbfar{\mathbfeta}$] (B) at (1.5,-0.5);
\coordinate[label=below:$\mathbfar{\alpha}$] (A) at (1,-1);
\draw (B+) node{\color{red} $\mathbfullet$};
\draw[con] (B+) -- (M) node{\color{red} $\mathbfullet$};
\draw (B) node{\color{red} $\mathbfullet$};
\draw[con] (M) -- (B);
\draw[con] (B+) -- (A) node{\color{red} $\mathbfullet$};
\draw[con] (A) -- (B);
\draw[con] (0,0) node{$\mathbfullet$} -- (B+);
\draw[con] (B) -- (2,0) node{$\mathbfullet$};
\end{tikzpicture}
\end{center}
We need to identify the red vertices. By Corollary~\ref{cor l and r hooks} (in case (3)), $\mathbfar{\gamma}$ is $p$-singular, while
$\mathbfar{\mathbfeta}$ is both $p$-regular and $p$-restricted. So $\mathbfar{\mathbfeta}_+$ exists. Moreover, $\partial(\mathbfar{\mathbfeta})=\partial(\mathbfar{\mathbfeta}_+)=p-2$.
By Theorem~\ref{thm Loewy except}, $\mathbfar{\mathbfeta}$ and
$\mathbfar{\mathbfeta}_+$ are connected to $\mathbfar{\alpha}$. This identifies $\mathbfar{\mathbfeta}$ and $\mathbfar{\mathbfeta}_+$.
Since $\mu>\mathbfar{\mathbfeta}$, we must have $[S^{\mathbfar{\mathbfeta}}:D^\mu]\neq 0$, by \cite[Theorem 6.1]{ChuangTan2001},
thus $D^\mu\cong \mathbfar{Y}$, in the notation of Theorem~\ref{thm Loewy except}.
Now, by \ref{noth partial Scopes}, Corollary~\ref{cor l and r hooks} and Theorem~\ref{thm Loewy except} again, we
obtain the following information on the rows of the quiver of $B_{1,0}$ whose vertices have $\partial$-values
$p-2$ and $p-3$:
\mathbfegin{center}
\mathbfegin{tikzpicture}
\coordinate[label=left:$\alpha_+$] (A+) at (0.5,-0.5);
\coordinate[label=above:$\hat{\mu}$] (M) at (1,0);
\coordinate[label=right:$\gamma$] (G) at (1.5,-0.5);
\coordinate[label=below:$\alpha$] (A) at (1,-0.5);
\draw (A+) node{\color{blue} $\mathbfullet$};
\draw[con] (A+) -- (M) node{\color{blue} $\mathbfullet$};
\draw (G) node{\color{blue} $\mathbfullet$};
\draw[con] (M) -- (G);
\draw[con] (M) -- (A) node{\color{blue} $\mathbfullet$};
\draw[con] (0,0) node{$\mathbfullet$} -- (A+);
\draw[con] (G) -- (2,0) node{$\mathbfullet$};
\end{tikzpicture}
\end{center}
Here $D^{\hat{\mu}}\cong Y$. By Corollary~\ref{cor l and r hooks}, $\mathbfeta$ is $p$-singular. Hence, by Lemma~\ref{lemma Ext alpha} and
Theorem~\ref{thm Loewy except}, $\alpha$ is only connected to $D^{\hat{\mu}}$. Since
$[S^\alpha:D^{\hat{\mu}}]\neq 0$, we have $\hat{\mu}>\alpha$. This proves, that $B_{1,0}$ has quiver $Q_{1,0}(p)$.
\end{proof}
\section{The Quivers}\label{sec quiv}
Suppose that $n\in\mathbb{N}$ with $n\geqslant 5$.
First we construct a quiver $Q_{0,0}(n)$ with $n$ rows, and $n-1$ vertices in the top row that has the following shape:
\mathbfegin{equation}\label{eqn (36)}
\mathbfegin{tikzcd}[row sep = 0.5em, column sep = 0.5em, ampersand replacement=\&]
\text{b}\&\phantom{x}\&\text{w}\&\phantom{x}\&\text{b}\& \& \& \&\text{w}\&\phantom{x}\&\text{b}\&\phantom{x}\&\text{w}\\
\mathbfullet\ar[con]{d}\ar[con]{drr}\&>\&\mathbfullet\ar[con]{d}\ar[con]{drr}\&>\&\mathbfullet\ar[con]{d}\& \cdot \& \cdot \& \cdot \&\mathbfullet\ar[con]{d}\ar[con]{drr}\&>\&\mathbfullet\ar[con]{d}\ar[con]{drr}\&>\&\mathbfullet\ar[con]{d}\\
\mathbfullet \ar[con]{dr} \ar[con]{urr} \&\phantom{x}\&\mathbfullet \ar[con]{dr} \ar[con]{urr} \&\phantom{x}\&\mathbfullet \& \cdot \& \cdot \& \cdot \&\mathbfullet \ar[con]{dr} \ar[con]{urr} \&\phantom{x}\&\mathbfullet \ar[con]{dr} \ar[con]{urr} \&\phantom{x}\&\mathbfullet\\
\&\mathbfullet\ar[con]{dr}\ar[con]{ur}\&\&\mathbfullet\ar[dashed]{dr}\ar[con]{ur}\&\cdot\&\cdot\&\cdot\&\cdot\&\cdot\&\mathbfullet\ar[con]{dr}\ar[dashed]{dl}\ar[con]{ur}\&\&\mathbfullet\ar[con]{ur}\&\\
\&\&\mathbfullet\ar[dashed]{dr}\ar[con]{ur}\&\& \phantom{x}\ar[dashed]{dr}\&\cdot\&\cdot\& \cdot\&\phantom{x}\ar[dashed]{dl}\&\&\mathbfullet\ar[dashed]{dl}\ar[con]{ur}\&\&\\
\&\&\&\phantom{x}\ar[dashed]{dr}\&\&\mathbfullet\ar[con]{dr}\&\cdot\&\mathbfullet\ar[con]{dr}\&\&\phantom{x}\ar[dashed]{dl}\&\&\&\\
\&\&\&\& \mathbfullet\ar[con]{dr}\ar[con]{ur}\& \&\mathbfullet\ar[con]{dr}\ar[con]{ur}\&\&\mathbfullet \&\&\&\&\\
\&\&\&\&\&\mathbfullet\ar[con]{dr}\ar[con]{ur}\&\&\mathbfullet\ar[con]{ur} \&\&\&\&\&\\
\&\&\&\&\&\&\mathbfullet\ar[con]{ur}\&\&\&\&\&\&
\end{tikzcd}
\end{equation}
The rows are labelled by $0,1,\ldots,n-1$, from top to bottom.
The vertices in the top row are equipped with a colour, which is either white (w) or black (b). Moreover, we assume that there is
a total ordering $>$ on the vertices of $Q_{0,0}(n)$ and that, whenever there is an arrow from vertex $x$ to vertex $y$, we have $x>y$.
In addition, the ordering on the vertices in a given row decreases from left to right. Unfortunately, the way our quivers are drawn,
one can, in general, not read off how to compare a vertex in the top row with a vertex in one of the lower rows. For the applications to
the main results of this paper this is irrelevant.
Next we modify the quiver $Q_{0,0}(n)$ to define $Q_{1,0}(n)$ as the following quiver with $n-1$ rows, and $n-1$ vertices in the top row. We call the part of $Q_{1,0}(n)$
consisting of the red and green vertices the \textit{left rim segment} of $Q_{1,0}(n)$, and those part
consisting of the blue vertices the \textit{right rim segment} of $Q_{1,0}(n)$. Again we have a total ordering on the vertices
of $Q_{1,0}(n)$.
\mathbfegin{equation}\label{eqn (37)}
\mathbfegin{tikzcd}[row sep = 0.5em, column sep = 0.5em, ampersand replacement=\&]
\text{b}\&\phantom{x}\&\text{w}\&\phantom{x}\&\text{b}\& \& \& \&\text{w}\&\phantom{x}\&\text{b}\&\phantom{x}\&\text{w}\\
\color{red}{\mathbfullet}\ar[con]{d}\ar[con]{drr}\&>\&\color{red}{\mathbfullet}\ar[con]{d}\ar[con]{drr}\&>\&\mathbfullet\ar[con]{d}\& \cdot \& \cdot \& \cdot \&\mathbfullet\ar[con]{d}\ar[con]{drr}\&>\&\color{blue}{\mathbfullet}\ar[con]{d}\ar[con]{drr}\ar{dll}\&>\&\color{blue}{\mathbfullet}\ar[con]{d}\\
\color{red}{\mathbfullet} \ar[con]{dr} \ar[con]{urr} \&\phantom{x}\&\color{red}{\mathbfullet} \ar{dl}\ar[con]{dr} \ar[con]{urr} \&\phantom{x}\&\mathbfullet \& \cdot \& \cdot \& \cdot \&\mathbfullet \ar[con]{dr} \ar[con]{urr} \&\phantom{x}\&\color{blue}{\mathbfullet} \ar[con]{dr} \ar[con]{urr} \&\phantom{x}\&\color{blue}{\mathbfullet}\\
\&\color{red}{\mathbfullet}\ar[con]{dr}\ar[con]{ur}\&\&\color{red}{\mathbfullet}\ar[con]{ur}\ar[dashed]{dr}\&\cdot\&\cdot\&\cdot\&\cdot\&\cdot\&\color{blue}{\mathbfullet}\ar[con]{dr}\ar[dashed]{dl}\ar[con]{ur}\&\&\color{blue}{\mathbfullet}\ar[con]{ur}\&\\
\&\&\color{red}{\mathbfullet}\ar[dashed]{dr}\ar[con]{ur}\&\& \phantom{x}\ar[dashed]{dr}\&\cdot\&\cdot\& \cdot\&\phantom{x}\ar[dashed]{dl}\&\&\color{blue}{\mathbfullet}\ar[dashed]{dl}\ar[con]{ur}\&\&\\
\&\&\&\phantom{x}\ar[dashed]{dr}\&\&\color{red}{\mathbfullet}\ar[con]{dr}\&\cdot\&\color{blue}{\mathbfullet}\ar[con]{dr}\&\&\phantom{x}\ar[dashed]{dl}\&\&\&\\
\&\&\&\& \color{red}{\mathbfullet}\ar[con]{dr}\ar[con]{ur}\& \&\color{green}{\mathbfullet}\ar[con]{dr}\ar[con]{d}\ar[con]{ur}\&\&\color{blue}{\mathbfullet} \&\&\&\&\\
\&\&\&\&\&\color{red}{\mathbfullet}\ar[con]{ur}\&\color{green}{\mathbfullet}\&\color{blue}{\mathbfullet}\ar[con]{ur} \&\&\&\&\&\\
\end{tikzcd}
\end{equation}
Now, for $(i,j)\in\{1,\ldots,n-1\}\times\{0,\ldots,n-2\}$ with $(i,j)\neq (1,0)$,
we define the quiver $Q_{i,j}(n)$ that is obtained by modifying the
left and right rim segments of $Q_{1,0}(n)$ in the following way:
\mathbfegin{tabular}{|c||c|}\hat{l}ine
\multicolumn{2}{|c|}{left rim segment}\\\hat{l}ine\hat{l}ine
rows
$n-2-i,\ldots,n-2$, for $i\leqslant n-3$&
$$\mathbfegin{tikzcd}[row sep = 0.5em, column sep = 0.5em, ampersand replacement=\&]
\&\phantom{x}\ar[dashed]{dr}\&\& \& \&\&\&\&\\
\phantom{x}\ar[dashed]{dr}\&\&\phantom{x} \ar[dashed]{dr}\& \& \&\&\&\&\\
\&\mathbfullet\ar[con]{dr}\& \& \mathbfullet\ar[con]{d}\ar[con]{dr} \& \&\&\&\&\\
\&\&\mathbfullet\ar[con]{ur}\&\mathbfullet\ar[con]{dr}\&\mathbfullet \ar[dashed]{dr}\&\&\&\&\\
\&\&\&\&\mathbfullet\ar[dashed]{dr}\ar[con]{u}\&\phantom{x}\ar[dashed]{dr}\&\&\&\\
\&\&\&\&\&\phantom{x}\ar[dashed]{dr}\&\mathbfullet\ar[con]{dr}\&\&\\
\&\&\&\&\&\&\mathbfullet\ar[con]{dr}\ar[con]{u}\&\mathbfullet\ar[con]{dr}\&\\
\&\&\&\&\&\&\&\mathbfullet\ar[con]{dr}\ar[con]{u}\&\mathbfullet\\
\&\&\&\&\&\&\&\&\mathbfullet\ar[con]{u}\\
\end{tikzcd}$$\\\hat{l}ine
rows
$0,\ldots,n-2$, for $i=n-2$&
$$\mathbfegin{tikzcd}[row sep = 0.5em, column sep = 0.5em, ampersand replacement=\&]
\text{b}\& \&\text{w}\&\&\&\&\&\\
\mathbfullet\ar[con]{d}\ar[con]{dr}\ar[con]{drr}\& \&\mathbfullet \ar[con]{d}\ar[con]{dl}\&\&\&\&\&\\
\mathbfullet\ar[con]{urr}\& \mathbfullet \ar[con]{dr}\& \mathbfullet\ar[con]{dr} \& \&\&\&\&\\
\&\&\mathbfullet\ar[con]{dr}\ar[con]{u}\&\mathbfullet \ar[dashed]{dr}\&\&\&\&\\
\&\&\&\mathbfullet\ar[dashed]{dr}\ar[con]{u}\&\phantom{x}\ar[dashed]{dr}\&\&\&\\
\&\&\&\&\phantom{x}\ar[dashed]{dr}\&\mathbfullet\ar[con]{dr}\&\&\\
\&\&\&\&\&\mathbfullet\ar[con]{dr}\ar[con]{u}\&\mathbfullet\ar[con]{dr}\&\\
\&\&\&\&\&\&\mathbfullet\ar[con]{dr}\ar[con]{u}\&\mathbfullet\\
\&\&\&\&\&\&\&\mathbfullet\ar[con]{u}\\
\end{tikzcd}$$\\\hat{l}ine
rows
$0,\ldots,n-2$, for $i=n-1$&
$$\mathbfegin{tikzcd}[row sep = 0.5em, column sep = 0.5em, ampersand replacement=\&]
\&\text{b}\&\text{w} \&\text{w}\&\&\&\&\&\\
\&\mathbfullet\ar[con]{dr}\ar[con]{drr}\& \mathbfullet\ar[con]{d} \&\mathbfullet \ar[con]{d}\&\&\&\&\&\\
\&\& \mathbfullet\ar[con]{ur} \ar[con]{dr} \& \mathbfullet\ar[con]{dr} \& \&\&\&\&\\
\&\&\&\mathbfullet\ar[con]{dr}\ar[con]{u}\&\mathbfullet \ar[dashed]{dr}\&\&\&\&\\
\&\&\&\&\mathbfullet\ar[con]{u}\ar[dashed]{dr}\&\phantom{x}\ar[dashed]{dr}\&\&\&\\
\&\&\&\&\&\phantom{x}\ar[dashed]{dr}\&\mathbfullet\ar[con]{dr}\&\&\\
\&\&\&\&\&\&\mathbfullet\ar[con]{dr}\ar[con]{u}\&\mathbfullet\ar[con]{dr}\&\\
\&\&\&\&\&\&\&\mathbfullet\ar[con]{dr}\ar[con]{u}\&\mathbfullet\\
\&\&\&\&\&\&\&\&\mathbfullet\ar[con]{u}\\
\end{tikzcd}$$\\\hat{l}ine
\end{tabular}
\mathbfegin{tabular}{|c||c|}\hat{l}ine
\multicolumn{2}{|c|}{right rim segment}\\\hat{l}ine\hat{l}ine
rows
$n-2-j-1,\ldots,n-2$, for $j<n-3$ &
$$\mathbfegin{tikzcd}[row sep = 0.5em, column sep = 0.5em, ampersand replacement=\&]
\&\&\&\&\&\&\& \phantom{x}\ar[dashed]{dl}\&\\
\&\&\&\& \& \& \phantom{x} \ar[dashed]{dl} \& \& \ar[dashed]{dl}\\
\&\&\&\&\&\mathbfullet \ar[con]{dr} \ar[con]{d} \& \& \mathbfullet \&\\
\&\&\&\&\mathbfullet \ar[con]{ur} \ar[con]{d} \ar[dashed]{dl} \& \mathbfullet \& \mathbfullet \ar[con]{ur} \&\&\\
\&\&\& \phantom{x} \ar[dashed]{dl}\& \mathbfullet \ar[dashed]{dl}\ar[con]{ur}\&\&\&\&\\
\&\&\mathbfullet \ar[con]{d}\& \phantom{x} \ar[dashed]{dl}\&\&\&\&\&\\
\&\mathbfullet\ar[con]{d}\ar[con]{ur}\&\mathbfullet \&\&\&\&\&\&\\
\phantom{x}\ar[con]{ur}\&\mathbfullet\ar[con]{ur}\&\&\&\&\&\&\&\\
\phantom{x}\ar[con]{ur}\&\&\&\&\&\&\&\&\\
\end{tikzcd}$$\\\hat{l}ine
rows
$0,\ldots,n-2$, for $j=n-3$&
$$\mathbfegin{tikzcd}[row sep = 0.5em, column sep = 0.5em, ampersand replacement=\&]
\&\&\&\&\&\text{b} \& \& \text{w} \&\\
\&\&\&\&\&\mathbfullet \ar[con]{d} \ar[con]{dr} \ar[con]{drr}\& \& \mathbfullet \ar[con]{d}\ar[con]{dl} \&\\
\&\&\&\&\&\mathbfullet \ar[con]{urr} \ar[con]{d} \& \mathbfullet \& \mathbfullet \&\\
\&\&\&\&\mathbfullet \ar[con]{d} \ar[dashed]{dl} \ar[con]{ur}\& \mathbfullet \ar[con]{ur} \& \&\&\\
\&\&\& \phantom{x} \ar[dashed]{dl}\& \mathbfullet \ar[dashed]{dl}\ar[con]{ur}\&\&\&\&\\
\&\&\mathbfullet \ar[con]{d}\& \phantom{x} \ar[dashed]{dl}\&\&\&\&\&\\
\&\mathbfullet\ar[con]{ur}\ar[con]{d}\&\mathbfullet \&\&\&\&\&\&\\
\phantom{x}\ar[con]{ur}\&\mathbfullet\ar[con]{ur}\&\&\&\&\&\&\&\\
\phantom{x}\ar[con]{ur}\&\&\&\&\&\&\&\&\\
\end{tikzcd}$$\\\hat{l}ine
rows
$0,\ldots,n-2$, for $j=n-2$&
$$\mathbfegin{tikzcd}[row sep = 0.5em, column sep = 0.5em, ampersand replacement=\&]
\&\&\&\&\&\text{b} \& \& \text{w}\&\text{b}\\
\&\&\&\&\&\mathbfullet\ar[con]{d}\ar[con]{dr} \& \& \mathbfullet\ar[con]{dl} \&\mathbfullet\\
\&\&\&\&\&\mathbfullet \ar[con]{urr} \ar[con]{d} \& \mathbfullet \ar[con]{urr} \& \&\&\\
\&\&\&\&\mathbfullet \ar[con]{d} \ar[dashed]{dl}\ar[con]{ur} \& \mathbfullet \ar[con]{ur} \& \& \&\&\\
\&\&\& \phantom{x} \ar[dashed]{dl}\& \mathbfullet \ar[dashed]{dl}\ar[con]{ur}\&\&\&\&\&\\
\&\&\mathbfullet \ar[con]{d}\& \phantom{x} \ar[dashed]{dl}\&\&\&\&\&\&\\
\&\mathbfullet\ar[con]{d}\ar[con]{ur}\&\mathbfullet \&\&\&\&\&\&\&\\
\phantom{x}\ar[con]{ur}\&\mathbfullet\ar[con]{ur}\&\&\&\&\&\&\&\&\\
\phantom{x}\ar[con]{ur}\&\&\&\&\&\&\&\&\&\\
\end{tikzcd}$$\\\hat{l}ine
\end{tabular}
\mathbfigskip
By Theorem~\ref{thm main1} and Theorem~\ref{thm main1 details}, the above quivers appear as Ext-quivers of
blocks of symmetric groups of weight 2 whose cores are hooks partitions. In Proposition~\ref{prop main2} and Theorem~\ref{thm main2}, we characterize possible
Morita equivalences between different such blocks. Since Morita equivalent blocks have isomorphic Ext-quivers,
the following result is our key ingredient in the proof of Proposition~\ref{thm main2}.
\pagebreak
\mathbfegin{prop}\label{prop graph isos}
Suppose that $(i,j),(i',j')\in \{(0,0)\}\cup (\{1,\ldots,n-1\}\times\{0,\ldots,n-2\})$.
Then $Q_{i,j}(n)$ is isomorphic to $Q_{i,'j'}(n)$, as an undirected graph, if and only if one of the following cases occurs:
\mathbfegin{itemize}
\item[{\rm (i)}] $(i,j)=(i',j')$;
\item[{\rm (ii)}] $i=j'+1$ and $j=i'-1$.
\end{itemize}
\end{prop}
\mathbfegin{proof}
The number of vertices with a given valency is invariant under a graph isomorphism. Moreover,
we call a pair $v\neq w$ of vertices of any of the above graphs $Q_{ij}(n)$ {\it exceptional} if $v$ and $w$ have valency at most $2$, and in addition have distance $2$ in $Q_{ij}(n)$. A graph isomorphism permutes exceptional pairs.
Next we define the {\it boundary} of $Q_{i,j}(n)$
to be the full subgraph of $Q_{i,j}(n)$ whose vertices are precisely the vertices with valency at most $3$. The boundary is also invariant under graph isomorphisms.
We fix $n$ and write $Q_{i,j}$ instead of $Q_{i,j}(n)$.
We note that $Q_{0,0}$ and $Q_{1,0}$ cannot be isomorphic to any of the other graphs, since they are the only graphs with no
exceptional pair, or with two exceptional pairs sharing a vertex, respectively.
Note that in case (ii) the (undirected) graphs $Q_{i,j}$ and $Q_{i,'j'}$ are clearly isomorphic,
since $Q_{i,j}$ is then simply obtained by reflecting $Q_{i',j'}$ along the middle axis.
Hence, we may from now on suppose that $2\leqslant i+j$, and show that $Q_{i,j}$ is not isomorphic
to any graph $Q_{i',j'}$ with $(i,j)\neq (i',j')\neq (i-1,j+1)$.
We first consider the case that $Q_{i,j}$ has a vertex of valency 1. Then $i=n-1$ or $j=n-2$.
Since $Q_{n-1,n-2}$ is the only graph with two vertices of valency 1, it is not isomorphic to any other graph under consideration. Therefore we may suppose that $(i,j)\neq (n-1,n-2)$.
Then $i=n-1$ and $j\in\{0,\ldots,n-3\}$, or $j=n-2$ and $i\in\{1,\ldots,n-2\}$. For $j\in\{0,\ldots,n-3\}$, the graph $Q_{n-1,j}$ is isomorphic to
$Q_{j+1,n-2}$, as mentioned above.
Thus it suffices to show that if $Q_{n-1,j}$ is isomorphic to $Q_{n-1, m}$, for
$j,m\in\{0,\ldots,n-3\}$, then $j=m$.
Based on the diagrams, we describe the graph structure of the boundary of $Q_{n-1, j}$.
It has a unique exceptional pair, which is drawn in the right rim segment when $j\geqslant 1$, and when $j=0$, one vertex is in the left rim segment and the
other is the lowest vertex of the right
rim segment.
The vertices of the exceptional pair belong to two disjoint connected components of the boundary.
When $j=0$, one of these components is a line with $n-4$ vertices, and one component is
isomorphic to the Dynkin diagram $D_{n}$. For $1\leqslant j< n-3$, one component is isomorphic to $D_{n-j}$. The other component is a tree
branching at the vertex drawn in the lowest row
in the left rim segment. One arm of this tree has $n-4$ segments, one has just one segment, and the third has $j$ segments.
All other components of the boundary are isolated vertices. If $j=n-3$, then the boundary of $Q_{n-1, j}$ is one connected component.
The boundary of $Q_{n-1, m}$ has a similar structure.
A graph isomorphism restricts to a graph isomorphism of the boundaries, so that $j=m$.
The graph $Q_{n-2,n-3}$ is the only graph with precisely two vertices of valency $2$, hence cannot be isomorphic to any of the others.
Now we consider the case that
$i=n-2$ and $j\in\{0,\ldots,n-4\}$, or $j=n-3$ and $i\in\{1,\ldots,n-3\}$. This covers those cases where $Q_{i,j}$ has precisely four vertices of valency $2$.
Let $j\in\{0,\ldots,n-4\}$. Since $Q_{n-2, j}$ is isomorphic to $Q_{j+1, n-3}$, it suffices to show if $Q_{n-2, j}$ is isomorphic to $Q_{n-2, m}$, for $m\in\{0,\ldots,n-4\}$, then $j=m$.
The graph $Q_{n-2, j}$ has a unique exceptional pair, which is drawn in the right rim segment when $j\geqslant 1$, and when
$j=0$, one vertex is in the left rim segment and the
other is the lowest vertex of the right
rim segment.
The vertices of the exceptional pair belong to two disjoint connected components of the boundary.
When $j=0$, these components are a line with $n-1$ segments, and one component
isomorphic to the (Dynkin) diagram $D_{n}$.
For $j\geqslant 1$, one component is isomorphic to $D_{n-j}$, and the other is a tree
branching at the vertex drawn in the lowest row
in the left rim segment. This tree has one arm with $n-1$ segments, one arm with just one segment and one arm with $j$ segments.
All other components of the boundary are isolated vertices.
The boundary of $Q_{n-2, m}$ has a similar structure.
A graph isomorphism restricts to a graph isomorphism of the boundaries, so that $j=m$.
Now we consider the graphs $Q_{i,j}$ with $i\in\{1,\ldots, n-3\}$ and $j\in\{0,\ldots,n-4\}$. These have precisely six vertices of valency 2.
Since $Q_{ij}$ is isomorphic to $Q_{i-1,j+1}$, for $i\in \{1,\ldots, n-3\}$ and $j\in \{0,\ldots, n-4\}$,
it suffices to show
that if $Q_{i,j}$ is isomorphic to $Q_{i,m}$, for $i\in \{1,\ldots, n-3\}$ and $j,m\in \{0,\ldots, n-4\}$, then $j = m$.
Suppose first that $i>1$. The graph $Q_{i,j}$ has precisely two exceptional pairs. One is of then is drawn in the left rim segment and the other is drawn in the right rim segment, for $j\geqslant 1$.
For
$j=0$, one vertex is in the left rim segment and the
other is the lowest vertex of the right
rim segment.
The vertices of the exceptional pair belong to {\it three} disjoint connected components of the boundary.
When $j=0$, they are a line with $i-1$ segments, then a component isomorphic to $D_{n-(i-1)}$ and a component isomorphic to $D_n$.
When $j\geqslant 1$, the boundary of $Q_{i,j}$ has a component that is a tree branching at the vertex drawn in the lowest row in the left rim segment, whose arms have
$i-1$ segments, one segment
and $j$ segments respectively. The other components of the boundary containing vertices from exceptional pairs are
isomorphic to $D_{n-(i-1)}$, and to $D_n$.
The boundary of $Q_{i,m}$ has a similar structure. As before, if there is a graph isomorphism between $Q_{i,j}$ and
$Q_{i,m}$, then it restricts to an isomorphism of the boundaries, and we deduce
$j=m$.
It remains to consider the graphs $Q_{1, j}$, with $j\in\{1,\ldots,n-4\}$. Each of these has two exceptional pairs: one of them is drawn between the lowest two vertices
in the left rim segment, and the other in the right rim segment. They belong to three disjoint connected components of the boundary: one is isomorphic to $D_n$,
one is a line with $j$ segments, and the third is a component isomorphic to $D_{n-j}$. Hence if $m,j\in\{1,\ldots,n-4\}$ and if $Q_{1, j}$ is isomorphic to
$Q_{1,m}$, then $j=m$.
\end{proof}
\end{appendix}
\mathbfegin{thebibliography}{00}
\mathbfibitem{BO1998} C. Bessenrodt, J.B. Olsson, On residue symbols and the Mullineux conjecture. J. Algebraic Combin. {\mathbff 7} (1998), 227--251.
\mathbfibitem{ChuangTan2001} J. Chuang, K.M. Tan, On certain blocks of Schur algebras.
Bull. London Math. Soc. \textbf{33} (2001), 157--167.
\mathbfibitem{DanzErdmann2012} S. Danz, K. Erdmann, Specht modules in the Auslander--Reiten quiver. J. Algebra {\mathbff 397} (2014), 343--364.
\mathbfibitem{Donkin} S. Donkin, On Schur algebras and related algebras II. J. Algebra {\mathbff 111}(1987), 354-364.
\mathbfibitem{Erdmann1990} K. Erdmann, Blocks of tame representation type and related algebras. Lecture Notes in Mathematics,
1428. Springer-Verlag, Berlin, 1990.
\mathbfibitem{Green} J. A. Green, Polynomial representations of $\mathrm{GL}_n$, Lecture Notes in Mathematics {\mathbff 830}, Springer, 1980.
\mathbfibitem{HN2004} D.~Hemmer, D.~Nakano, Specht filtrations for Hecke algebras of type $A$, J. London Math. Soc. (2) \textbf{69} (2004), no. 3, 623--638.
\mathbfibitem{James1978} G. D. James, The representation theory of the symmetric groups, Lecture Notes in Mathematics
{\mathbff 682}, Springer, 1978.
\mathbfibitem{JK1981} G. D. James, A. Kerber, The representation theory of the symmetric group, Encyclopedia
Math. Appl. 16, Addison-Wesley, 1981.
\mathbfibitem{Martin1989} S. Martin, On the ordinary quiver of the principal block of certain symmetric groups. Quart. J. Math. Oxford Ser. (2) \textbf{40} (1989),
209--223.
\mathbfibitem{Martin1990} S. Martin, Ordinary quivers for symmetric groups. II. Quart. J. Math. Oxford Ser. (2) \textbf{41} (1990), 79--92.
\mathbfibitem{NT} H. Nagao, Y. Tsushima, Representations of finite groups, Academic Press, 1989.
\mathbfibitem{Peel} M.~H.~Peel, Hook representations of the symmetric groups, Glasgow Math.~J. {\mathbff 12} (1971), 136--149.
\mathbfibitem{Richards1996} M. Richards, Some decomposition numbers for Hecke algebras of general linear groups.
Math. Proc. Cambridge Philos. Soc. \textbf{119} (1996), 383--402.
\mathbfibitem{Robinson} G. de B. Robinson, Representation theory of the symmetric group, Mathematical Expositions 12, University of Toronto Press, 1961.
\mathbfibitem{Sambale2018} B. Sambale, Morita equivalent blocks of symmetric groups. SIGMA Symmetry Integrability Geom. Methods Appl. \textbf{14} (2018), Paper No. 100, 8 pp.
\mathbfibitem{Scopes1991} J. Scopes, Cartan matrices and Morita equivalence for blocks of the symmetric groups. J. Algebra \textbf{142} (1991), 441--455.
\mathbfibitem{Scopes1995} J. Scopes, Symmetric group blocks of defect two.
Quart. J. Math. Oxford Ser. (2) \textbf{46} (1995), 201--234.
\end{thebibliography}
\noindent (Susanne Danz) \ {\sc FB Mathematik, Katholische Universit\"{a}t Eichst\"{a}tt-Ingolstadt,
85072 Eichst\"{a}tt, Germany}\\
{\it Email} \ [email protected]
\noindent (Karin Erdmann) \ {\sc Mathematical Institute, University of Oxford, OX2 6GG, UK}\\
{\it Email} \ [email protected]
\end{document}
|
\begin{equation}gin{document}
\title[Coupled elliptic system]{ Ground states of Nonlinear Schr\"{o}dinger System with Mixed Couplings }
\author[J. Wei]{Juncheng Wei }
\address{\noindent Department of Mathematics, University of British Columbia,
Vancouver, B.C., Canada, V6T 1Z2}
\email{[email protected]}
\author[Y.Wu]{Yuanze Wu}
\address{\noindent School of Mathematics, China
University of Mining and Technology, Xuzhou, 221116, P.R. China }
\email{[email protected]}
\begin{equation}gin{abstract}
We consider the following $k$-coupled nonlinear Schr\"{o}dinger system:
\begin{equation}gin{equation*}
\left\{\aligned&-\Delta u_j+\lambda_ju_j=\mu_ju_j^3+\sum_{i=1,i\not=j}^k\begin{equation}ta_{i,j} u_i^2u_j\quad\text{in }\mathbb{R}^N,\\
&u_j>0\quad\text{in }\mathbb{R}^N,\quad u_j(x)\to0\quad\text{as }|x|\to+\infty,\quad j=1,2,\cdots,k,\endaligned\right.
\end{equation*}
where $N\leq 3$, $k\geq3$, $\lambda_j,\mu_j>0$ are constants and $\begin{equation}ta_{i,j}=\begin{equation}ta_{j,i}\not=0$ are parameters. There have been intensive studies for the above system when $k=2$ or the system is purely attractive ($ \begin{equation}ta_{i,j}>0, \forall i \not = j$) or purely repulsive ($\begin{equation}ta_{i,j}<0, \forall i\not = j $); however very few results are available for $k\geq 3$ when the system admits {\bf mixed couplings}, i.e., there exist $(i_1,j_1)$ and $(i_2,j_2)$ such that $\begin{equation}ta_{i_1,j_1}\begin{equation}ta_{i_2,j_2}<0$. In this paper we give the first systematic and an (almost) complete study on the existence of ground states when the system admits mixed couplings. We first divide this system into {\bf repulsive-mixed} and {\bf total-mixed} cases. In the first case we prove nonexistence of ground states. In the second case we give an necessary condition for the existence of ground states and also provide estimates for the Morse index. The key idea is the {\bf block decomposition} of the system ({\bf optimal block decompositions, eventual block decompositions}), and the measure of total interaction forces between different {\bf blocks}. Finally the assumptions on the existence of ground states are shown to be {\bf optimal} in some special cases.
\noindent{\bf Keywords:} nonlinear Schr\"{o}dinger system; ground state; mixed coupling; variational method; Morse index.
\noindent {\bf AMS} Subject Classification 2010: 35B09; 35J47; 35J50.
\end{abstract}
\date{}
\maketitle
\section{Introduction}
We consider the following $k$-coupled nonlinear Schr\"{o}dinger system
\begin{equation}gin{equation}\label{eqn0001}
\left\{\aligned&-\Delta u_j+\lambda_ju_j=\mu_ju_j^3+\sum_{i=1,i\not=j}^k\begin{equation}ta_{i,j} u_i^2u_j\quad\text{in }\mathbb{R}^N,\\
&u_j>0\quad\text{in }\mathbb{R}^N,\quad u_j(x)\to0\quad\text{as }|x|\to+\infty,\quad j=1,2,\cdots,k,\endaligned\right.
\end{equation}
where $N=1,2,3$, $k\geq3$, $\lambda_j,\mu_j>0$ are constants and $\begin{equation}ta_{i,j}=\begin{equation}ta_{j,i}\not=0$ are coupling parameters. (To simplify the notations, in the following, we assume $\begin{equation}ta_{j, j}=\mu_j$.) This paper is concerned with the existence of ground states in the general case $ k\geq 3$.
It is well known that solutions of \eqref{eqn0001} are related to the solitary waves
of the Gross-Pitaevskii equations, which have applications in many physical models, such as in nonlinear optics and in Bose-Einstein condensates for multi-species condensates (cf. \cite{CLLL04,R03}). Physically, in the system~\eqref{eqn0001}, $\mu_j$ and $\begin{equation}ta_{i,j}$ are the intraspecies and interspecies scattering lengths respectively, while $\lambda_j$ are from the chemical potentials. The sign of the scattering length $\begin{equation}ta_{i,j}$ determines whether the interactions of states $i\rangle$ and $j\rangle$ are {\bf repulsive} ($\begin{equation}ta_{i,j}<0$) or {\bf attractive} ($\begin{equation}ta_{i,j}>0$).
In the past fifteen years, the two-coupled case of the system~\eqref{eqn0001} (i.e. $k=2$) has been studied extensively in the literature. An important feature of the two-coupled case is that it only has one coupling, i.e., $\begin{equation}ta_{12}=\begin{equation}ta_{21}$. Thus, the two-coupled case of the system~\eqref{eqn0001} is either {\bf purely repulsive} ($\begin{equation}ta_{12}=\begin{equation}ta_{21}<0$) or {\bf purely attractive} ($\begin{equation}ta_{12}=\begin{equation}ta_{21} >0$). By using variational methods, Lyapunov-Schmidt reduction methods or bifurcation methods, various theorems, about the existence, multiplicity and qualitative properties of nontrivial solutions of the two-coupled elliptic systems similar to \eqref{eqn0001}, have been established in the literature under various assumptions. Since it seems almost impossible for us to provide a complete list of references, we refer the readers only to \cite{AC06,AC07,BDW10,BJS16,BS17,BS19,BTWW13,BW06,BWW07,CD10,CLLL04,CTV05,CZ13,DWW10,GJ16,GJ18,LW051,LW06,NTTV10,NTTV12,PW13,S07,ST15,
WW07,WW08,WW081,WWZ17,WZ19,W18} and the references therein. Roughly speaking, in the two-coupled elliptic systems, the two components tend to segregate with each other in the repulsive case, which leads to phase separations and multi-existence of solutions, while the two components tend to synchronize with each other in the attractive case, which leads to uniqueness of the positive solution. For $k\geq 3$, the purely repulsive case and the purely attractive case of \eqref{eqn0001}, i.e., the couplings $\begin{equation}ta_{i,j}$ have the same sign for all $i\not=j$, have also been studied, see, for example, \cite{B13,LW08,LW10,SZ15,TT12,TT121,TV09,TW13,W17} and the references therein.
However, a significant new feature of \eqref{eqn0001} for $k\geq3$ is the presence of {\bf mixed couplings}, i.e., there exist $(i_1,j_1)$ and $(i_2,j_2)$ such that $\begin{equation}ta_{i_1,j_1}\begin{equation}ta_{i_2,j_2}<0$. As far as we know, \eqref{eqn0001} for $k\geq3$ with mixed couplings is less studied in the literature, and the only references are \cite{BSW16,BSW161,DW12,LW05,SW15,SW151,S15,ST16,STTZ16,
TTVW11}. The primary goal of this paper is to give a complete study about the existence of ground states in the case of {\bf mixed couplings}. In what follows, for the sake of clarity, let us first introduce some necessary notations and definitions.
Let $\mathcal{H}_j$ be the Hilbert space of $H^1(\mathbb{R}^N)$ with the inner product
\begin{equation}gin{eqnarray*}\label{eqn0002}
\langle u,v\rangle_{\lambda_j}=\int_{\mathbb{R}^N}\nabla u\nabla v+\lambda_juvdx.
\end{eqnarray*}
Its corresponding norm is given by
\begin{equation}gin{eqnarray*}\label{eqn0003}
\|u\|_{\lambda_j}=\langle u,u\rangle_{\lambda_j}^{\frac12}.
\end{eqnarray*}
Let the energy functional of \eqref{eqn0001} be given by
\begin{equation}gin{eqnarray}
\mathcal{E}(\overrightarrow{u})=\frac{1}{2}\sum_{j=1}^k\|u_j\|_{\lambda_j}^2-\frac{1}{4}\sum_{j=1}^k\mu_j\|u_j\|_4^4-\frac{1}{2}\sum_{i,j=1, i<j}^k\begin{equation}ta_{i,j}\|u_iu_j\|_2^2,
\end{eqnarray}
where $\overrightarrow{u}=(u_1,u_2,\cdots,u_k)$ and $\|\cdot\|_p$ is the usual norm in $L^p(\mathbb{R}^N)$. Then, $\mathcal{E}(\overrightarrow{u})$ is of class $C^2$ in $\mathcal{H}:=\prod_{j=1}^k\mathcal{H}_j$. $\overrightarrow{v}$ is called a positive critical point of $\mathcal{E}(\overrightarrow{u})$ if $\mathcal{E}'(\overrightarrow{v})=\overrightarrow{0}$ in $\mathcal{H}^{-1}$ with $v_j>0$ for all $j$, where $\mathcal{H}^{-1}$ is the dual space of $\mathcal{H}$. For $N\leq 3$, the standard elliptic regularity theory yields that
positive critical points of $\mathcal{E}(\overrightarrow{u})$ are equivalent to classical solutions of \eqref{eqn0001}.
We define the Nehari manifold of $\mathcal{E}(\overrightarrow{u})$ as follows:
\begin{equation}gin{eqnarray}
\label{Neh1}
\mathcal{N}=\{\overrightarrow{u}\in\widetilde{\mathcal{H}}\mid \overrightarrow{\mathcal{G}}(\overrightarrow{u})=(\mathcal{G}_1(\overrightarrow{u}), \mathcal{G}_2(\overrightarrow{u}),\cdots, \mathcal{G}_k(\overrightarrow{u}))=\overrightarrow{0}\},
\end{eqnarray}
where $\mathcal{G}_j(\overrightarrow{u})=\|u_j\|_{\lambda_j}^2-\mu_j\|u_j\|_4^4-\sum_{i=1,i\not=j}^k\begin{equation}ta_{i,j}\|u_iu_j\|_2^2$
and $\widetilde{\mathcal{H}}=\prod_{j=1}^k(\mathcal{H}_j\backslash\{0\})$. Clearly, $\mathcal{N}$ contains all positive critical points of $\mathcal{E}(\overrightarrow{u})$.
Let
\begin{equation}gin{eqnarray}
\label{CN}
\mathcal{C}_{\mathcal{N}}=\inf_{\mathcal{N}}\mathcal{E}(\overrightarrow{u}).
\end{eqnarray}
Then, $\mathcal{C}_{\mathcal{N}}$ is well defined and nonnegative. $\overrightarrow{v}$ is called a ground state of \eqref{eqn0001}, if $\overrightarrow{v}$ is a positive critical point of $\mathcal{E}(\overrightarrow{u})$ with $\mathcal{E}(\overrightarrow{v})=\mathcal{C}_{\mathcal{N}}$.
We now continue our discussions on \eqref{eqn0001} for $k\geq3$ with the mixed couplings. Most of the literature (cf. \cite{BSW16,BSW161,SW15,SW151,S15,ST16}) is devoted to the ``restricted'' ground states of \eqref{eqn0001} for $k\geq3$ with the mixed couplings, by either assuming that $u_j$ are all radially symmetric or considering \eqref{eqn0001} in a bounded domain $\Omega$. The only paper, which is devoted to the ground states of \eqref{eqn0001}, is \cite{LW05}, where the existence and nonexistence of the ground states of \eqref{eqn0001} with mixed couplings were partially studied when $k=3$. Thus, the existence of the ground states of \eqref{eqn0001}, for $k\geq3$ with the mixed couplings, remains largely open. In this paper we give the first result on the existence and nonexistence of the ground states of \eqref{eqn0001} for $k\geq3$ with the mixed couplings, which can be summarized as follows (see Theorem \ref{thm0002} below):
\begin{equation}gin{enumerate}
\item[$(1)$] Under some technical conditions, (which can be shown to be optimal in some special cases), \eqref{eqn0001} for $k\geq3$ has a ground state in the cases of the {\bf total-mixed couplings} (the definition can be seen below);
\item[$(2)$] \eqref{eqn0001} for $k\geq3$ has no ground states in the cases of the {\bf repulsive-mixed couplings} (the definition can also be seen below).
\end{enumerate}
\section{Block Decompositions and Statements of Main Results when $k=3, 4$}
Before we present the results in the general case $ k\geq 3$, we
first explain key ideas, concepts and main results when $k=3$ or $4$. We first consider the case $k=3$:
\begin{equation}gin{equation}\label{eqnew0001}
\left\{\aligned&-\Delta u_1+\lambda_1u_1=\mu_1u_1^3+\begin{equation}ta_{1,2} u_2^2u_1+\begin{equation}ta_{1,3}u_3^2u_1\quad\text{in }\mathbb{R}^N,\\
&-\Delta u_2+\lambda_2u_2=\mu_2u_2^3+\begin{equation}ta_{1,2} u_1^2u_2+\begin{equation}ta_{2,3}u_3^2u_2\quad\text{in }\mathbb{R}^N,\\
&-\Delta u_3+\lambda_3u_3=\mu_3u_3^3+\begin{equation}ta_{1,3} u_1^2u_3+\begin{equation}ta_{2,3}u_2^2u_3\quad\text{in }\mathbb{R}^N,\\
&u_i>0\quad\text{in }\mathbb{R}^N,\quad u_i(x)\to0\quad\text{as }|x|\to+\infty,\quad i=1,2,3.\endaligned\right.
\end{equation}
We start by recalling known results about \eqref{eqnew0001} in the literature. As pointed out in \cite{LW05}, there are actually only {\bf four} cases of the couplings:
\begin{equation}gin{enumerate}
\item[$(a)$] The purely attractive case: $\begin{equation}ta_{1,2}>0$, $\begin{equation}ta_{1,3}>0$ and $\begin{equation}ta_{2,3}>0$;
\item[$(b)$] The purely repulsive case: $\begin{equation}ta_{1,2}<0$, $\begin{equation}ta_{1,3}<0$ and $\begin{equation}ta_{2,3}<0$;
\item[$(c)$] The mixed case~$(1)$: $\begin{equation}ta_{1,2}>0$, $\begin{equation}ta_{1,3}<0$ and $\begin{equation}ta_{2,3}<0$;
\item[$(d)$] The mixed case~$(2)$: $\begin{equation}ta_{1,2}>0$, $\begin{equation}ta_{1,3}>0$ and $\begin{equation}ta_{2,3}<0$.
\end{enumerate}
The first two cases (a) and (b) are reminiscent of the $k=2$ case, which can be dealt with similarly.
In the mixed case~$(c)$, the system~\eqref{eqnew0001} can be seen as a coupled system between an attractively two-coupled system about $(u_1, u_2)$ and a single equation about $u_3$. Since $\begin{equation}ta_{1,3}<0$ and $\begin{equation}ta_{2,3}<0$, the interaction between the two-coupled system and the single equation is ``repulsive''. We re-name this mixed case as the {\bf repulsive-mixed case}. Similar to the repulsive case of $k=2$ (cf. \cite{LW05}), the ground state of \eqref{eqnew0001} does not exist in this case (under some technical conditions). (However, if $u_j$ are all radially symmetric or one considers \eqref{eqnew0001} in a bounded domain $\Omega$, then the ``restricted'' ground states of \eqref{eqnew0001} exist for some ranges of $\begin{equation}ta_{i,j}$ (cf. \cite{BSW16,BSW161,SW15,SW151,S15,ST16}).)
The most difficult (and interesting) case is the mixed case~$(d)$. If we still regard the system~\eqref{eqnew0001} as an attractively two-coupled system coupled with a single equation, then the situation is much more complicated than that in the repulsive-mixed case~$(c)$, since the coupling between them can be both repulsive ($\begin{equation}ta_{2,3}<0$) and attractive ($\begin{equation}ta_{1,2}>0$, $\begin{equation}ta_{1,3}>0$). We re-name this mixed case as the {\bf total-mixed case}. In the bounded domain with Dirichlet boundary condition, the existence of the ``restricted'' ground states of \eqref{eqnew0001}, in the total-mixed case~$(d)$, has been studied in \cite{S15,ST16} for some ranges of $\begin{equation}ta_{i,j}$. However, it has been proved in \cite{LW05}, by using Lyapunov-Schmidt reduction methods, that \eqref{eqnew0001} has a non-radially symmetric solution in the total-mixed case~$(d)$ for $|\begin{equation}ta_{i,j}|$ all sufficiently small and $|\begin{equation}ta_{2,3}|>>|\begin{equation}ta_{1,2}|,|\begin{equation}ta_{1,3}|$. Moreover, the energy value of this non-radially symmetric solution is strictly less than that of the uniquely radially symmetric solution of \eqref{eqnew0001} for $|\begin{equation}ta_{i,j}|$ all sufficiently small. This result suggests that the ground states of \eqref{eqnew0001}, if they exist, are non-radially symmetric in the total-mixed case~$(d)$, at least for $|\begin{equation}ta_{i,j}|$ all sufficiently small and $|\begin{equation}ta_{2,3}|>>|\begin{equation}ta_{1,2}|,|\begin{equation}ta_{1,3}|$. By our above discussions, in the total-mixed case~$(d)$, the major task, in studying the existence of the ground states of \eqref{eqnew0001}, is to measure the {\bf total interaction} between the attractively two-coupled system and the single equation, near the least energy value $\mathcal{C}_{\mathcal{N}}$. It turns out in this case the total interaction can mainly be controlled by the linear term $\lambda_j$.
The following theorem gives complete characterization of the existence and nonexistence of ground states of \eqref{eqnew0001}.
\begin{equation}gin{theorem}\label{coro0001}
Let $N=1,2,3$.
\begin{equation}gin{enumerate}
\item[$(1)$] In the purely attractive case~$(a)$, there exist $0<\begin{equation}ta_0<\widehat{\begin{equation}ta}_0$ such that
\begin{equation}gin{enumerate}
\item[$(i)$] \eqref{eqnew0001} has a ground state with the Morse index 3 for $0<\begin{equation}ta_{1,2},\begin{equation}ta_{1,3},\begin{equation}ta_{2,3}<\begin{equation}ta_0$;
\item[$(ii)$] \eqref{eqnew0001} has a ground state with the Morse index 2 for $\begin{equation}ta_{1,2}>\widehat{\begin{equation}ta}_0$, $0<\begin{equation}ta_{1,3},\begin{equation}ta_{2,3}<\begin{equation}ta_0$.
\item[$(iii)$] \eqref{eqnew0001} has a ground state with the Morse index 1 for $\begin{equation}ta_{i,j}>\widehat{\begin{equation}ta}_0$ and $|\begin{equation}ta_{i,j}-\begin{equation}ta_{i,l}|<<1$ with all $i,j,l=1,2,3$, $i\not=j$, $i\not=l$ and $j\not=l$, provided that $|\lambda_i-\lambda_j|<<1$ for all $i,j=1,2,3$ with $i\not=j$.
\end{enumerate}
\item[$(2)$] In the purely repulsive case~$(b)$ and in the repulsive-mixed case~$(c)$, $\mathcal{C}_{\mathcal{N}}$ can not be attained, provided that the coefficient matrix $\Theta= (\begin{equation}ta_{i,j})$ is positively definite. That is, system~\eqref{eqnew0001} have no ground states.
\item[$(3)$] In the total-mixed case~$(d)$, if $\lambda_1<\min\{\lambda_2,\lambda_3\}$, then there exist $0<\begin{equation}ta_0<\widehat{\begin{equation}ta}_0$ such that
\begin{equation}gin{enumerate}
\item[$(i)$] \eqref{eqnew0001} has a ground state with the Morse index 3 for $0<\begin{equation}ta_{1,2}<\begin{equation}ta_0$, $0<\begin{equation}ta_{1,3}<\begin{equation}ta_0$ and $\begin{equation}ta_{2,3}<0$;
\item[$(ii)$] \eqref{eqnew0001} has a ground state with the Morse index 2 for $\begin{equation}ta_{1,2}>\widehat{\begin{equation}ta}_0$, $0<\begin{equation}ta_{1,3}<\begin{equation}ta_0$ and $\begin{equation}ta_{2,3}<0$.
\end{enumerate}
\item[$(4)$] In the total-mixed case~$(d)$, let $\begin{equation}ta_{1,2}=\delta\widehat{\begin{equation}ta}_{1,2}$, $\begin{equation}ta_{1,3}=\delta^t\widehat{\begin{equation}ta}_{1,3}$ and $\begin{equation}ta_{2,3}=-\delta^s\widehat{\begin{equation}ta}_{2,3}$, where $\delta>0$ is a parameter and $t,s,\widehat{\begin{equation}ta}_{i,j}$ are absolutely positive constants. If $\lambda_1\geq\min\{\lambda_2,\lambda_3\}$ and $0<s<\min\{1,t\}$, then for $\delta$ sufficiently small, $\mathcal{C}_{\mathcal{N}}$ can not be attained. That is, \eqref{eqnew0001} has no ground states.
\end{enumerate}
\end{theorem}
\begin{equation}gin{remark}\label{rmk0003}
\begin{equation}gin{enumerate}
\item[$(a)$] (4) of Theorem \ref{coro0001} shows that the ground state in Corollary 1 of \cite{LW05} does not exist.
\item[$(b)$] As we pointed out above, the major difficulty in proving the existence part of Theorem~\ref{coro0001} is to measure the interaction terms
\begin{equation}gin{eqnarray}
\begin{equation}ta_{1,3}\|u_1u_3\|_2^2+\begin{equation}ta_{2,3}\|u_2u_3\|_2^2\quad\text{and}\quad\begin{equation}ta_{1,2}\|u_1u_2\|_2^2+\begin{equation}ta_{2,3}\|u_2u_3\|_2^2
\end{eqnarray}
by non-radially symmetric vector-functions.
By using the ground states of the system of $(u_1, u_2)$ and the single equation of $u_3$ (or the pair of $(u_1, u_3)$ and $u_2$) as test functions we find that the above interaction terms behave like:
\begin{equation}gin{eqnarray*}
\mathfrak{H}&=&\sup_{R>>1}(C\begin{equation}ta_{1,3}R^{1-N+\gamma}e^{-2\min\{\sqrt{\lambda_1}, \sqrt{\lambda_3}\}R}\\
&&+C'\begin{equation}ta_{2,3}R^{1-N+\gamma'}e^{-2\min\{\sqrt{\lambda_2}, \sqrt{\lambda_3}\}R})
\end{eqnarray*}
and
\begin{equation}gin{eqnarray*}
\mathfrak{G}&=&\sup_{R>>1}(C\begin{equation}ta_{1,2}R^{1-N+\gamma''}e^{-2\min\{\sqrt{\lambda_1}, \sqrt{\lambda_2}\}R}\\
&&+C'\begin{equation}ta_{2,3}R^{1-N+\gamma'}e^{-2\min\{\sqrt{\lambda_2}, \sqrt{\lambda_3}\}R}),
\end{eqnarray*}
where $\gamma,\gamma',\gamma''$ are positive constants depending only on $N$ and the relation of $\lambda_j$, and $C,C'$ are positive constants (depending on the ground states of small system $(u_1, u_2)$ or $(u_1, u_3)$). Moreover, roughly speaking, if $\min\{\mathfrak{H}, \mathfrak{G}\}>0$ then the interaction between the system of $(u_1, u_2)$ and the single equation of $u_3$ (or the pair of $(u_1, u_3)$ and $u_2$) is ``attractive'' and consequently the ground states exist; while if $\min\{\mathfrak{H}, \mathfrak{G}\}<0$ then the interaction between the system of $(u_1, u_2)$ and the single equation of $u_3$ (or the pair of $(u_1, u_3)$ and $u_2$) is ``repulsive'' and consequently the ground states do not exist. Based on this observation, if we further assume that $0<-\begin{equation}ta_{2,3}<<\min\{\begin{equation}ta_{1,2}, \begin{equation}ta_{1,3}\}$ in the case $\lambda_1=\min\{\lambda_2,\lambda_3\}$, then the ground states of \eqref{eqnew0001} still exists. Thus, Theorem \ref{coro0001} gives an almost complete result about the existence and nonexistence of ground states of \eqref{eqnew0001}.
\item[$(c)$] The existence of the ground states of \eqref{eqnew0001} with the Morse index $3$ for the purely attractive case and the nonexistence of the ground states of \eqref{eqnew0001} for the repulsive-mixed case is actually proved in
\cite[Corollary~1.3 and Theorem~1.6]{ST16}, respectively. We list them in Theorem~\ref{coro0001} for the sake of completeness. The existence of ground states of \eqref{eqnew0001} with the Morse index $1$ for the purely attractive case is proved in \cite[Theorem~2.1]{LW10}. Here, our provide a different proof of this result.
\end{enumerate}
\end{remark}
As we stated above, in proving Theorem~\ref{coro0001}, our major idea is to regard the three-coupled system~\eqref{eqnew0001} as an attractively two-coupled system coupled with a single equation, and to precisely measure the interaction between them. To extend the above idea to the general $k$-coupled system~\eqref{eqn0001} for $k\geq 4$, we need to further decompose the $k$-coupled system~\eqref{eqn0001}, which is based on the following concepts of {\bf optimal block decomposition} and {\bf eventual block decomposition}. These definitions for the general $k$-component cases are tedious and lengthy, which we would like to state at the next section and only introduce the key steps here: first we group all attractive components $\mu_j$ together into blocks of sub-matrices so that inside each block the interactions between components are all attractive. The decomposition is called {\bf optimal} if the number of blocks needed is the least, and the number of the blocks is called the {\bf degree} of this {\bf optimal block decomposition} and is denoted by $d$. In the second step we need to group different "attractive" blocks together to form larger blocks. To see if two blocks are attractive or repulsive, we need to define quantities, named {\bf interaction forces}, which measure the interaction between different blocks in an optimal block decomposition. Roughly speaking, if the quantity is positive then the interaction between of corresponding blocks is ``attractive'', while if this quantity is negative then the interaction between of these two blocks is ``repulsive''. We now group all possible ``attractive'' blocks together into bigger blocks of sub-matrices so that inside each bigger block the forces between blocks are all ``attractive''. We repeat these steps until we can not group them in this way anymore. Then the remaining matrix, consisting of ``largest'' attractive blocks, is called an {\bf eventual block decomposition}, and the number of the ``largest'' blocks is called the {\bf degree} of an eventual block decomposition and is denoted by $m$. More precise definitions can be found at the next section. Let us test these ideas with the first nontrivial case $k=4$:
\begin{equation}gin{equation}\label{eqnewnew0001}
\left\{\aligned&-\Delta u_1+\lambda_1u_1=\mu_1u_1^3+\begin{equation}ta_{1,2} u_2^2u_1+\begin{equation}ta_{1,3}u_3^2u_1+\begin{equation}ta_{1,4}u_4^2u_1\quad\text{in }\mathbb{R}^N,\\
&-\Delta u_2+\lambda_2u_2=\mu_2u_2^3+\begin{equation}ta_{1,2} u_1^2u_2+\begin{equation}ta_{2,3}u_3^2u_2+\begin{equation}ta_{2,4}u_4^2u_2\quad\text{in }\mathbb{R}^N,\\
&-\Delta u_3+\lambda_3u_3=\mu_3u_3^3+\begin{equation}ta_{1,3} u_1^2u_3+\begin{equation}ta_{2,3}u_2^2u_3+\begin{equation}ta_{3,4}u_4^2u_3\quad\text{in }\mathbb{R}^N,\\
&-\Delta u_4+\lambda_4u_4=\mu_4u_4^3+\begin{equation}ta_{1,4} u_1^2u_4+\begin{equation}ta_{2,4}u_2^2u_4+\begin{equation}ta_{3,4}u_3^2u_4\quad\text{in }\mathbb{R}^N,\\
&u_i>0\quad\text{in }\mathbb{R}^N,\quad u_i(x)\to0\quad\text{as }|x|\to+\infty,\quad i=1,2,3,4.\endaligned\right.
\end{equation}
We assume that the coefficients satisfy
\begin{equation}gin{equation}
\label{Hdef}
(H)\ \ \ \ \ \ \ \begin{equation}ta_{1,2}>0, \begin{equation}ta_{1,3}>0, \begin{equation}ta_{1,4}<0, \begin{equation}ta_{2,3}<0, \begin{equation}ta_{2,4}>0, \begin{equation}ta_{3,4}<0.
\end{equation}
Clearly, an optimal block decomposition in this case can be given by
\begin{equation}gin{eqnarray}\label{eqnewnew0007}
\label{A1}
\mathbf{A}_1=\left(\aligned \left(\aligned &\mu_1\quad \begin{equation}ta_{1,2}\\
&\begin{equation}ta_{1,2}\quad \mu_2 \endaligned\right)
\ \ \left(\aligned&\begin{equation}ta_{1,3}\\
&\begin{equation}ta_{2,3}\endaligned\right)\ \ \left(\aligned&\begin{equation}ta_{1,4}\\
&\begin{equation}ta_{2,4}\endaligned\right)\\
\left(\begin{equation}ta_{1,3}\quad \begin{equation}ta_{2,3}\right)\quad\quad\mu_3 \quad\quad\quad\begin{equation}ta_{3,4}\\
\left(\begin{equation}ta_{1,4}\quad \begin{equation}ta_{2,4}\right)
\quad\quad\begin{equation}ta_{3,4} \quad \quad\quad\mu_4\endaligned\right)
\end{eqnarray}
with degree $d=3$. To obtain eventual block decomposition, we need to first define the interaction forces. To do this, we rewrite $\mathbf{A}_1$ as follows:
\begin{equation}gin{eqnarray}
\mathbf{A}_1=\left(\aligned B_{1,1}\quad B_{1,2}\quad B_{1,3}\\
B_{1,2}\quad B_{2,2}\quad B_{2,3}\\
B_{1,3}\quad B_{2,3}\quad B_{3,3}\endaligned\right).
\end{eqnarray}
Here $B_{i,j}$ are given in (\ref{A1}). For example $B_{2,2}=\mu_3, B_{3,3}= \mu_4$.
Since the ground states in $B_{1,1}$ exist for some ranges of $\begin{equation}ta_{1,2}$ and the ground states in $B_{2,2}$ and $B_{3,3}$ also exist, and they all have exponentially decaying at infinity, we may define quantities
\begin{equation}gin{eqnarray}\label{eqnewnew0002}
\mathfrak{F}_{1,2}^0=\sup_{R>>1}&(&C_{1,3}^{1,2}\begin{equation}ta_{1,3}R^{1-N+\gamma_{1,3}}e^{-2\min\{\sqrt{\lambda_1}, \sqrt{\lambda_3}\}R}\notag\\
&&+C_{2,3}^{1,2}\begin{equation}ta_{2,3}R^{1-N+\gamma_{2,3}}e^{-2\min\{\sqrt{\lambda_2}, \sqrt{\lambda_3}\}R}),
\end{eqnarray}
\begin{equation}gin{eqnarray*}
\mathfrak{F}_{1,3}^0=\sup_{R>>1}&(&C_{1,4}^{1,3}\begin{equation}ta_{1,4}R^{1-N+\gamma_{1,4}}e^{-2\min\{\sqrt{\lambda_1}, \sqrt{\lambda_4}\}R}\notag\\
&&+C_{2,4}^{1,3}\begin{equation}ta_{2,4}R^{1-N+\gamma_{2,4}}e^{-2\min\{\sqrt{\lambda_2}, \sqrt{\lambda_4}\}R})
\end{eqnarray*}
and
\begin{equation}gin{eqnarray*}
\mathfrak{F}_{2,3}^0=\sup_{R>>1}(C_{3,4}^{2,3}\begin{equation}ta_{3,4}R^{1-N+\gamma_{3,4}}e^{-2\min\{\sqrt{\lambda_3}, \sqrt{\lambda_4}\}R}),
\end{eqnarray*}
where $\gamma_{i,j}$ are positive constants depending only on $N$ and the relation of $\lambda_j$, and $C_{i,j}^{s,t}$ are positive constants depending only on the ground states in the corresponding blocks. Since the ground states in blocks with the same least critical value is compact, $C_{i,j}^{s,t}$ is uniformly bounded from below and above. These quantities $\mathfrak{F}_{i,j}^0$, as $\mathfrak{H}$ and $\mathfrak{G}$, are used to measure the interaction between the blocks $B_{i,i}$ and $B_{j,j}$ from the viewpoint of the concentration-compactness principle. Roughly speaking, the sign of $\mathfrak{F}_{i,j}^0$ determines whether the blocks $B_{i,i}$ and $B_{j,j}$ are ``attractive'' $(\mathfrak{F}_{i,j}^0>0)$ or ``repulsive'' $(\mathfrak{F}_{i,j}^0<0)$. Note that $\mathfrak{F}^0_{2,3}<0$. If both $\mathfrak{F}^0_{1,2}<0$ and $\mathfrak{F}^0_{1,3}<0$, then the blocks in $\mathbf{A}_1$ can not be further grouped into ``bigger'' blocks so that inside each bigger block the interaction forces between blocks are all ``attractive''. Thus, $\mathbf{A}_1$ is also an eventual block decomposition with degree $m=3$. If either $\mathfrak{F}_{1,2}^0>0$ or $\mathfrak{F}_{1,3}^0>0$, then roughly speaking, by Theorem~\ref{coro0001} there exists a ground state in the ``bigger'' block:
\begin{equation}gin{eqnarray*}
C_{1,1}=\left(\aligned B_{1,1}\quad B_{1,2}\\
B_{1,2}\quad B_{2,2}\endaligned\right).
\end{eqnarray*}
Here, without loss of generality, we assume $\mathfrak{F}_{1,2}^0>0$ (the other case $\mathfrak{F}_{1,3}^0>0$ is similar).
Thus, we may further group $\mathbf{A}_1$ as follows:
\begin{equation}gin{eqnarray*}
\mathbf{A}_2=\left(\aligned &\left(\aligned B_{1,1}\quad B_{1,2}\\
B_{1,2}\quad B_{2,2}\endaligned\right)\quad\left(\aligned B_{1,3}\\ B_{2,3}\endaligned\right)\\
&\left(\aligned B_{1,3}\quad B_{2,3}\endaligned\right)\quad\quad B_{3,3}\endaligned\right).
\end{eqnarray*}
We rewrite $\mathbf{A}_2$ by
\begin{equation}gin{eqnarray}\label{eqnewnew0008}
\mathbf{A}_2=\left(\aligned C_{1,1}\quad C_{1,2}\\
C_{1,2}\quad C_{2,2}\endaligned\right)
\end{eqnarray}
and define the interaction force between $C_{1,1}$ and $C_{2,2}$ by $\mathfrak{F}^1_{1,2}=\mathfrak{F}^0_{1,3}+\mathfrak{F}^0_{2,3}$, which as $\mathfrak{F}_{i,j}^0$, is used to measure the interaction between the blocks $C_{1,1}$ and $C_{2,2}$, and roughly speaking,
the sign of $\mathfrak{F}_{1,2}^1$ determines whether the blocks $C_{1,1}$ and $C_{2,2}$ are ``attractive'' $(\mathfrak{F}_{1,2}^1>0)$ or ``repulsive'' $(\mathfrak{F}_{1,2}^1<0)$. If $\mathfrak{F}^1_{1,2}<0$ then the blocks in $\mathbf{A}_2$ can not be further grouped into ``bigger'' blocks so that inside each bigger block the interaction forces between blocks are all ``attractive''. Thus, $\mathbf{A}_2$ is an eventual block decomposition with degree $m=2$. If $\mathfrak{F}^1_{1,2}>0$ then we may further group $\mathbf{A}_2$ as a whole element
\begin{equation}gin{eqnarray*}
\mathbf{A}_3=\left(\aligned \left[\aligned C_{1,1}\quad C_{1,2}\\
C_{1,2}\quad C_{2,2}\endaligned\right]\endaligned\right).
\end{eqnarray*}
Since $\mathbf{A}_3$ only has one block, we can not further group it into a ``bigger'' block. Therefore, $\mathbf{A}_3$ is an eventual block decomposition with degree $m=1$. There are another optimal block decomposition with the blocks $(u_1,u_3)$, $u_2$ and $u_4$. One can use the same method to obtain its eventual block decompositions and count their degrees. Since the defined interaction forces almost determine whether the corresponding blocks are ``attractive'' or ``repulsive'', roughly speaking, the degrees of eventual block decompositions determine the number of groups of the components $u_j$ that ``stay together''. Therefore, the ground states of \eqref{eqnewnew0001} are expected to exist if and only if the degrees of all eventual block decompositions equal to $1$. Now, our results for \eqref{eqnewnew0001} in the case $(H)$ can be stated as follows.
\begin{equation}gin{theorem}\label{thm0003}
Let $N=1,2,3$. Then in the case $(H)$ at (\ref{Hdef}),
\begin{equation}gin{enumerate}
\item[$(1)$] if $\lambda_1=\lambda_2<\min\{\lambda_3,\lambda_4\}$ and $0<-\begin{equation}ta_{2,3},-\begin{equation}ta_{1,4},-\begin{equation}ta_{3,4}<<\begin{equation}ta_{1,2},\begin{equation}ta_{2,4},\begin{equation}ta_{1,3}$ then there exist $\widehat{\begin{equation}ta}_0>\begin{equation}ta_0>0$ such that
\begin{equation}gin{enumerate}
\item[$(i)$] if $\begin{equation}ta_{1,2},\begin{equation}ta_{1,3}<\begin{equation}ta_0$ then \eqref{eqnewnew0001} has a ground state with the Morse index $4$,
\item[$(ii)$] if $\begin{equation}ta_{1,3}<\begin{equation}ta_0$ and $\begin{equation}ta_{1,2}>\widehat{\begin{equation}ta}_0$ then \eqref{eqnewnew0001} has a ground state with the Morse index $3$.
\end{enumerate}
\item[$(2)$] Assume $\begin{equation}ta_{1,2}=\delta^{t_{1,2}}\widehat{\begin{equation}ta}_{1,2}$, $\begin{equation}ta_{1,3}=\delta^{t_{1,3}}\widehat{\begin{equation}ta}_{1,3}$, $\begin{equation}ta_{2,3}=-\delta^{t_{2,3}}\widehat{\begin{equation}ta}_{2,3}$, $\begin{equation}ta_{1,4}=-\delta^{t_{1,4}}\widehat{\begin{equation}ta}_{1,4}$, $\begin{equation}ta_{2,4}=\delta^{t_{2,4}}\widehat{\begin{equation}ta}_{2,4}$ and $\begin{equation}ta_{3,4}=-\delta^{t_{3,4}}\widehat{\begin{equation}ta}_{3,4}$, where $t_{i,j}$ and $\widehat{\begin{equation}ta}_{i,j}$ are all positively absolute constants and $\delta>0$ is a small parameter. If $\min\{\lambda_3,\lambda_4\}<\min\{\lambda_1,\lambda_2\}$ and $\max\{t_{2,3},t_{1,4}, t_{3,4}\}<t_{1,2}<\min\{t_{1,3}, t_{2,4}\}$, then \eqref{eqnewnew0001} has no ground states for $\delta>0$ sufficiently small.
\end{enumerate}
\end{theorem}
\begin{equation}gin{remark}
As in Theorem~\ref{coro0001}, the assumptions $\lambda_1=\lambda_2<\min\{\lambda_3,\lambda_4\}$ and $0<-\begin{equation}ta_{2,3},-\begin{equation}ta_{1,4},-\begin{equation}ta_{3,4}<<\begin{equation}ta_{1,2},\begin{equation}ta_{2,4},\begin{equation}ta_{1,3}$ are used to grantee all eventual block decompositions have the degree $m=1$, and it can be slightly generalized as that in $(b)$ of Remark~\ref{rmk0003}.
\end{remark}
For other cases of the couplings of the four-coupled system~\eqref{eqnewnew0001} or for the general $k$-coupled system~\eqref{eqn0001}, the strategy is the same. However, to state our results for the general $k$-coupled system~\eqref{eqn0001}, we need to rigorously define optimal block decompositions and eventual block decompositions.
\section{Block Decompositions and Statements of Main Results in the General Case}
Let us first define optimal block decompositions. Let $d=1,2,\cdots,k$, $0=a_0<a_1<\cdots<a_{d-1}<a_d=k$ and
\begin{equation}gin{eqnarray}\label{eqn0098}
\mathcal{K}_{t,s,\mathbf{a}_d}=(a_{t-1}, a_{t}]_{\mathbb{N}}\times(a_{s-1}, a_{s}]_{\mathbb{N}},
\end{eqnarray}
where $\mathbf{a}_d=(a_0,a_1,\cdots,a_d)$, $t,s=1,2,\cdots,d$ and $(a_{t-1}, a_{t}]_{\mathbb{N}}=(a_{t-1}, a_{t}]\cap\mathbb{N}$. Then,
\begin{equation}gin{eqnarray*}
\mathbf{A}_d=([\begin{equation}ta_{i,j}]_{(i,j)\in\mathcal{K}_{t,s,\mathbf{a}_d}})_{t,s=1,2,\cdots,d}
\end{eqnarray*}
is called a {\bf $d$-decomposition} of the coefficient matrix $\Theta=(\begin{equation}ta_{i,j})$. Moreover, $\mathbf{A}_d$ is called repulsive if the couplings $\begin{equation}ta_{i,j}$ are all negative, $\mathbf{A}_d$ is called attractive if the couplings $\begin{equation}ta_{i,j}$ are all positive and $\mathbf{A}_d$ is called mixed if the couplings $\begin{equation}ta_{i,j}$ are mixed. In $\mathbf{A}_d$, $\Theta_{t,s}=[\begin{equation}ta_{i,j}]_{(i,j)\in\mathcal{K}_{t,s,\mathbf{a}_d}}$ is called the $(t,s)$ block of $\mathbf{A}_d$. Moreover, if $\{i\not=j,(i,j)\in\mathcal{K}_{s,s,\mathbf{a}_d}\}\not=\emptyset$, then all couplings~$\begin{equation}ta_{i,j}$ with $i\not=j$ in the $(s,s)$ block $\Theta_{s,s}$ are called the $s_{th}$ inner-couplings, while the couplings~$\begin{equation}ta_{i,j}$ in all $(s,t)$ blocks $\Theta_{s,t}$ with $s\not=t$ are called the inter-couplings.
\vskip0.12in
Let $\mathbf{i}=({i_1}, {i_2}, \cdots, {i_k})$ be a permutation of $(1,2,\cdots,k)$. Then, correspondingly
$$
\Theta_{\mathbf{i}}=[\begin{equation}ta_{i_j,i_l}]_{j,l=1,2,\cdots,k}
$$
is a permutation of $\Theta$.
For the sake of clarity, we denote the corresponding $d$-decomposition of $\Theta_{\mathbf{i}}$ by $\mathbf{A}_{d,\mathbf{i}}$.
For the mixed couplings, there exist $\mathbf{i}=({i_1}, {i_2}, \cdots, {i_k})$, a permutation of $(1,2,\cdots,k)$, and $d=2,3\cdots,k-1$ such that $\Theta_{\mathbf{i}}$ has a mixed $d$-decomposition $\mathbf{A}_{d,\mathbf{i}}$ with all inner-couplings being positive.
Let $\mathbf{A}_{d,\mathbf{i}}$ be a mixed $d$-decomposition of $\Theta_{\mathbf{i}}$ such that all inner-couplings are positive.
$\mathbf{A}_{d,\mathbf{i}}$ is called an {\bf optimally mixed block decomposition of $\Theta$ to the permutation $\mathbf{i}$}, if for any $n<d$ and any $n$-decomposition of $\Theta_{\mathbf{i}}$, there exists at least one negative inner-coupling.
By our definitions, an optimally mixed block decomposition of $\Theta$ to the permutation $\mathbf{i}$, say $\mathbf{A}_{d,\mathbf{i}}$, is the one that, the number of the $(s,s)$ blocks of $\mathbf{A}_{d,\mathbf{i}}$ is the smallest in all decompositions of $\Theta_{\mathbf{i}}$, whose inner-couplings are all positive. Clearly, for a given permutation $\mathbf{i}$, any optimally mixed block decomposition of $\Theta_{\mathbf{i}}$ to this fixed permutation has the same number of the $(s,s)$ blocks, which is called the degree of optimally mixed block decompositions of $\Theta$ to the permutation $\mathbf{i}$ and is denoted by $d_{\mathbf{i}}$. Let
\begin{equation}gin{eqnarray*}
\mathfrak{A}_{\mathbf{i}}=\{\mathbf{A}_{d,\mathbf{i}}\mid\text{all inner-couplings of $\mathbf{A}_{d,\mathbf{i}}$ are positive and }d=d_{\mathbf{i}}\}.
\end{eqnarray*}
Then, $\mathbf{A}_{d,\mathbf{i}}$ is an optimally mixed block decomposition of $\Theta$ to the permutation $\mathbf{i}$ if and only if $\mathbf{A}_{d,\mathbf{i}}\in\mathfrak{A}_{\mathbf{i}}$.
Let
\begin{equation}gin{eqnarray*}
\mathfrak{d}=\min\{d_{\mathbf{i}}\mid \mathbf{i}\text{ is a permutation of }(1,2,\cdots,k)\}
\end{eqnarray*}
and
\begin{equation}gin{eqnarray*}
\mathfrak{S}=\{\mathbf{j}\mid \mathbf{j}\text{ is a permutation of }(1,2,\cdots,k)\text{ and }d_{\mathbf{j}}=\mathfrak{d}\}.
\end{eqnarray*}
Then, $\mathfrak{S}\not=\emptyset$. $\mathbf{A}_{d_{\mathbf{j}},\mathbf{j}}$ is called an {\bf optimally mixed block decomposition of $\Theta$} if $\mathbf{j}\in\mathfrak{S}$.
By our definitions, an optimally mixed block decomposition of $\Theta$, say $\mathbf{A}_{d_{\mathbf{j}},\mathbf{j}}$, is the one that, the number of the $(s,s)$ blocks of $\mathbf{A}_{d_{\mathbf{j}},\mathbf{j}}$ is the smallest in all decompositions of $\Theta_{\mathbf{i}}$ for all permutations $\mathbf{i}$, whose inner-couplings are all positive. Let
\begin{equation}gin{eqnarray*}
\mathfrak{A}=\{\mathbf{A}_{d_{\mathbf{i}},\mathbf{i}}\mid\mathbf{A}_{d_{\mathbf{i}},\mathbf{i}} \text{ is an optimally mixed block decomposition to $\mathbf{i}$ and }d_{\mathbf{i}}=\mathfrak{d}\}.
\end{eqnarray*}
Then, $\mathbf{A}_{d_{\mathbf{i}},\mathbf{i}}$ is an optimally mixed block decomposition of $\Theta$ if and only if $\mathbf{A}_{d_{\mathbf{i}},\mathbf{i}}\in\mathfrak{A}$. Clearly, the number of $(s,s)$ blocks in every optimally mixed block decomposition is the same, and this number is called the degree of optimally mixed block decompositions of $\Theta$ and is denoted by $d$. Without loss of generality, in what follows, we always assume that $\mathbf{A}_{d_{\mathbf{o}},\mathbf{o}}\in\mathfrak{A}$, where $\mathbf{o}=(1,2,\cdots,k)$. For the sake of simplicity, we re-denote $\mathbf{A}_{d_{\mathbf{o}},\mathbf{o}}$ and $d_{\mathbf{o}}$ by $\mathbf{A}_d$ and $d$, respectively.
Since all inner-couplings of an optimally mixed block decomposition, say $\mathbf{A}_d$, are {\bf positive}, for the inter-couplings $\{\begin{equation}ta_{i,j}\}$, either
\begin{equation}gin{enumerate}
\item[$(1)$] there exists an $(s,s)$ block $\Theta_{s,s}$ such that $\begin{equation}ta_{i,j}$ are negative for all $i\in(a_{s-1}, a_{s}]_{\mathbb{N}}$ and $j\not\in(a_{s-1}, a_{s}]_{\mathbb{N}}$ or
\item[$(2)$] $\begin{equation}ta_{i,j}$ are still mixed for all $(i,j)\in\mathcal{K}_{s,t,\mathbf{a}_d}$ and all $1\leq s<t\leq d$.
\end{enumerate}
In the case~$(1)$, $\mathbf{A}_d$ is called repulsive-mixed while in the case~$(2)$, $\mathbf{A}_d$ is called total-mixed. If there exists an optimally mixed block decomposition that is repulsive-mixed then the mixed couplings $\{\begin{equation}ta_{i,j}\}$ are called {\bf repulsive-mixed} while if all optimally mixed block decompositions are total-mixed then the mixed couplings $\{\begin{equation}ta_{i,j}\}$ are called {\bf total-mixed}.
From the definitions above, for purely attractive couplings, its optimal block decomposition has the degree $d=1$, while for the purely repulsive couplings the degree of its optimal block decomposition is $k$. Clearly, the optimal block decompositions of the coefficient matrix $\Theta$ for the purely attractive couplings and the purely repulsive couplings, respectively, are unique up to all permutations of $(1,2,\cdots,k)$. In what follows, for the sake of simplicity, the optimally mixed block decompositions of mixed couplings are also called their optimal block decompositions. Thus by the definition of optimal block decompositions, the couplings $\{\begin{equation}ta_{i,j}\}$ can be {\bf classified into four classes}: the purely attractive case, the purely repulsive case, the repulsive-mixed case and the total-mixed case.
Let us next define eventual block decompositions. We rewrite $\mathbf{A}_d$ as
\begin{equation}gin{eqnarray*}
\mathbf{A}_d=[\Theta_{t,s}]_{t,s=1,2,\cdots,d}
\end{eqnarray*}
and define the {\bf interaction forces} between $\Theta_{s,s}$ and $\Theta_{t,t}$ as
\begin{equation}gin{eqnarray*}
\mathfrak{F}_{s,t}^0=\sup_{R_{s,t}>>1}\sum_{(i,j)\in\mathcal{K}_{s,t,\mathbf{a}_d};s\not=t}
&\bigg(&\sum_{\lambda_i=\lambda_j}
C_{i,j}^{s,t}\begin{equation}ta_{i,j}(\frac{1}{R_{s,t}})^{N-1-\alpha}e^{-2\sqrt{\lambda_i}R_{s,t}}\notag\\
&&+\sum_{\lambda_i\not=\lambda_j}C_{i,j}^{s,t}\begin{equation}ta_{i,j}(\frac{1}{R_{s,t}})^{N-1}
e^{-2\min\{\sqrt{\lambda_i},\sqrt{\lambda_j}\}R_{s,t}}\bigg),
\end{eqnarray*}
where $\alpha=1$ for $N=1$ and $\alpha=\frac{1}{2}$ for $N=2,3$. Let
\begin{equation}gin{eqnarray*}
\mathbf{A}_{d^1}^1=[\Theta_{t,s}^1]_{t,s=1,2,\cdots,d^1}
\end{eqnarray*}
be such a decomposition: $\Theta_{t,s}^1$ are consisted by $\Theta_{i,j}$ such that all interaction forces $\mathfrak{F}_{i,j}^0$ between $\Theta_{i,i}$ and $\Theta_{j,j}$ in $\Theta_{t,s}^1$ are positive. Without loss of generality, we denote $\Theta_{t,s}^1$ by
\begin{equation}gin{eqnarray*}
\Theta_{t,s}^1=[\Theta_{i,j}]_{(i,j)\in\mathcal{K}_{t,s,\mathbf{a}^1_{d^1}}},
\end{eqnarray*}
Where
\begin{equation}gin{eqnarray*}
\mathcal{K}_{t,s,\mathbf{a}^1_{d^1}}=(a^1_{t-1}, a^1_{t}]_{\mathbb{N}}\times(a^1_{s-1}, a^1_{s}]_{\mathbb{N}}
\end{eqnarray*}
with $\mathbf{a}^1_{d^1}=(a^1_0,a^1_1,\cdots,a^1_{d^1})$, $(a^1_{t-1}, a^1_{t}]_{\mathbb{N}}=(a^1_{t-1}, a^1_{t}]\cap\mathbb{N}$ and $0=a^1_0<a^1_1<\cdots<a^1_{d^1-1}<a^1_{d^1}=d$. We then define the interaction forces between $\Theta_{s,s}^1$ and $\Theta_{t,t}^1$ as
\begin{equation}gin{eqnarray*}
\mathfrak{F}_{s,t}^1=\sum_{(i,j)\in\mathcal{K}_{t,s,\mathbf{a}^1_{d^1}}}\mathfrak{F}_{i,j}^0.
\end{eqnarray*}
We repeat these two steps over and over again until we can not further group in this way any more. Without loss of generality, we assume that these two steps can be repeated $\tau$ times. Moreover, for the sake of simplicity, we re-denote the optimal block decomposition by $\mathbf{A}_{d^0}^0$. Then we will obtain a sequence of decompositions
\begin{equation}gin{eqnarray*}
\mathbf{A}_{d^\varsigma}^\varsigma=[\Theta_{t,s}^\varsigma]_{t,s=1,2,\cdots,d^\varsigma}
\end{eqnarray*}
with
\begin{equation}gin{eqnarray*}
\Theta_{t,s}^\varsigma=[\Theta_{i,j}^{\varsigma-1}]_{(i,j)\in\mathcal{K}_{t,s,\mathbf{a}^\varsigma_{d^\varsigma}}}
\end{eqnarray*}
and $1\leq\varsigma\leq\tau$,
\begin{equation}gin{eqnarray*}
\mathcal{K}_{t,s,\mathbf{a}^\varsigma_{d^\varsigma}}=(a^\varsigma_{t-1}, a^\varsigma_{t}]_{\mathbb{N}}\times(a^\varsigma_{s-1}, a^\varsigma_{s}]_{\mathbb{N}}
\end{eqnarray*}
with $\mathbf{a}^\varsigma_{d^\varsigma}=(a^\varsigma_0,a^\varsigma_1,\cdots,a^\varsigma_{d^\varsigma})$, $(a^\varsigma_{t-1}, a^\varsigma_{t}]_{\mathbb{N}}=(a^\varsigma_{t-1}, a^\varsigma_{t}]\cap\mathbb{N}$ and $0=a^\varsigma_0<a^\varsigma_1<\cdots<a^\varsigma_{d^\varsigma-1}<a^\varsigma_{d^\varsigma}=d^{\varsigma-1}$, and a sequence $1\leq d^{\tau}< d^{\tau-1}< \cdots< d^1< d^0=d$. $\mathbf{A}_{d^\tau}^\tau$ is called an {\bf eventual block decomposition} of $\mathbf{A}_{d^0}^0$, and the number of $(s,s)$ blocks $\Theta_{s,s}^\tau$ is called the degree of $\mathbf{A}_{d^\tau}^\tau$ and is denoted by $m$. To obtain all eventual block decompositions of $\mathbf{A}_{d^0}^0$, for the $\varsigma_{th}$ decomposition $\mathbf{A}_{d^\varsigma}^\varsigma$, $0\leq\varsigma\leq\tau-1$, we should write down all next decompositions $\mathbf{A}_{d^{\varsigma+1}}^{\varsigma+1}$ in the above way under the action of permutations. Clearly, for other optimal block decompositions, we can obtain their eventual block decompositions in the same way. By our definitions, the degrees of eventual block decompositions of the purely repulsive case and the repulsive-mixed cases are always strictly large than $1$, while the degrees of eventual block decompositions of the purely attractive case always equal to $1$.
In the $(s,s)$ block $\Theta_{s,s}=[\begin{equation}ta_{i,j}]_{(i,j)\in\mathcal{K}_{s,s,\mathbf{a}_d}}$ of $\mathbf{A}_d$, either $\{i\not=j,(i,j)\in\mathcal{K}_{s,s,\mathbf{a}_d}\}\not=\emptyset$ or $\{i\not=j,(i,j)\in\mathcal{K}_{s,s,\mathbf{a}_d}\}=\emptyset$. Without loss of generality, we assume that $\{i\not=j,(i,j)\in\mathcal{K}_{s,s,\mathbf{a}_d}\}\not=\emptyset$ for $s=1,2,\cdots,s_0$ and $\{i\not=j,(i,j)\in\mathcal{K}_{s,s,\mathbf{a}_d}\}=\emptyset$ for $s=s_0+1,\cdots,d$ with an $s_0\in\{0,1,2,\cdots,d\}$. For every $d\leq\gamma\leq k$, there exists a unique $0\leq s^*\leq s_0$ such that $a_{s^*}\leq k-\gamma<a_{s^*+1}$. Now, our results for the general $k$-coupled system~\eqref{eqn0001} can be stated as follows.
\begin{equation}gin{theorem}\label{thm0002}
Let $N=1,2,3$ and $k\geq 3$. Suppose that the degree of optimal block decompositions of the coefficient matrix $\Theta$ is $d$. Then,
\begin{equation}gin{enumerate}
\item[$(1)$] if all eventual block decompositions satisfy $m=1$ then for every $d\leq\gamma\leq k$, there exist $\widehat{\begin{equation}ta}_0>\begin{equation}ta_0>0$ such that if
\begin{equation}gin{enumerate}
\item[$(i)$] $\begin{equation}ta_{i,j}>\widehat{\begin{equation}ta}_0$ and $|\begin{equation}ta_{i,j}-\begin{equation}ta_{i,l}|<<1$ for all $(i,j), (i,l)\in\mathcal{K}_{s,s,\mathbf{a}_d}$ with $i\not=j$, $i\not=l$ and $j\not=l$, and $i,j,l\leq k-\gamma+1$,
\item[$(ii)$] $\begin{equation}ta_{i,j}<\begin{equation}ta_0$ for all other $(i,j)$ with $i\not=j$ that are not contained in $(i)$,
\end{enumerate}
then \eqref{eqn0001} has a ground state with the Morse index $\gamma$, provided that $|\lambda_i-\lambda_j|<<1$ for all $i,j\in\mathcal{K}_{s,s,\mathbf{a}_d}$ and $i\not=j$ with $0\leq s\leq s^*$ satisfying $a_s-a_{s-1}\geq3$ and for all $i,j\in\mathcal{K}_{s^*+1,s^*+1,\mathbf{a}_d}$,$i\not=j$ and $i,j\leq k-\gamma+1$ satisfying $k-\gamma-a_{s^*}\geq3$. In particular, in the purely attractive case, for every $1\leq\gamma\leq k$, \eqref{eqn0001} has a ground state with the Morse index $\gamma$.
\item[$(2)$] Suppose $\begin{equation}ta_{i,j}=\delta^{t_{i,j}}\widehat{\begin{equation}ta}_{i,j}$, where $\delta>0$ is a parameter and $t_{i,j}$, $\widehat{\begin{equation}ta}_{i,j}$ are absolute constants. If the couplings $\begin{equation}ta_{i,j}$ are total-mixed, $t_{i,j}=t_0$ for all $(i,j)\in\mathcal{K}_{s,s,\mathbf{a}_d}$ and all $0\leq s\leq s_0$, $t_{0}< t_{min,int,+}$, $t_{max,-}<t_{min,+}$ and
\begin{equation}gin{eqnarray*}
\min\{\sqrt{\lambda_{i_0}}, \sqrt{\lambda_{j_0}}\}\geq\min\{\sqrt{\lambda_{i_0'}}, \sqrt{\lambda_{j_0'}}\}
\end{eqnarray*}
for all $(i_0,j_0)$ and $(i_0',j_0')$ with $\begin{equation}ta_{i_0,j_0}>0>\begin{equation}ta_{i_0',j_0'}$,
then $\mathcal{C}_{\mathcal{N}}$ can not be attained for $\delta>0$ sufficiently small. That is, \eqref{eqn0001} has no ground states. Here, $t_{max,-}=\max\{t_{i,j}\mid\widehat{\begin{equation}ta}_{i,j}<0\}$, $t_{min,+}=\min\{t_{i,j}\mid\widehat{\begin{equation}ta}_{i,j}>0\}$,
and
$$
t_{min,int,+}=\min\{t_{i,j}\mid\widehat{\begin{equation}ta}_{i,j}>0\text{ and }\begin{equation}ta_{i,j}\text{ is a inter-coupling}\}.
$$
\item[$(3)$] If the couplings $\begin{equation}ta_{i,j}$ are repulsive-mixed or purely repulsive, then $\mathcal{C}_{\mathcal{N}}$ can not be attained, provided that the coefficient matrix $\Theta= (\begin{equation}ta_{i,j})$ is positively definite. That is, \eqref{eqn0001} has no ground states.
\end{enumerate}
\end{theorem}
\begin{equation}gin{remark}
\begin{equation}gin{enumerate}
\item[$(a)$] The existence result yields a very interesting consequence: The degree of optimal block decompositions determines the lower bound of the Morse index of the ground states of \eqref{eqn0001}. According to our definitions, the degree of optimal block decompositions is the smallest number of the groups, which are made up by the components $\{u_j\}$ such that they are all attractive to each others in these groups. This implies that, in Bose-Einstein condensates for multi-species condensates, the components $\{u_j\}$ will huddle as much as possible. On the other hand, as one can see by comparing Theorems~\ref{coro0001} and \ref{thm0003}, the existence conditions of the four-coupled system~\eqref{eqnewnew0001} in the total-mixed case $(H)$ at (\ref{Hdef}) are much stronger than that of the three-coupled system~\eqref{eqnew0001}. This is caused by the fact that the four-coupled system~\eqref{eqnewnew0001} has more $(s,s)$ blocks in its optimal block decompositions in the total-mixed case $(H)$ at (\ref{Hdef}), which needs more interaction forces to be positive to grantee the existence of ground states. Thus, it seems that the ground states are harder to exist if its optimal block decompositions has more $(s,s)$ blocks. In the extremal case in this direction, i.e., the purely repulsive case or the repulsive-mixed cases, there are no ground states.
\item[$(b)$] As we pointed out in $(c)$ of Remark~\ref{rmk0003}, some existence and nonexistence results for \eqref{eqn0001} in some very special cases have been obtained in the literature, see, for example, \cite{LW05,LW10,ST16}.
\item[$(c)$] Another interesting fact is that the Morse index of ground states is related to the number of eigenvalues of the coefficient matrix. To understand this relation, we use the four-coupled system~\eqref{eqnewnew0001} in the total-mixed case $(H)$ at (\ref{Hdef}) as an example. Indeed, under the conditions of $(1)$ of Theorem~\ref{thm0003}, the coefficient matrix is nonsingular. Moreover, in $(i)$ of $(1)$ of Theorem~\ref{thm0003} the coefficient matrix has four positive eigenvalues, while in $(ii)$ of $(1)$ of Theorem~\ref{thm0003} the coefficient matrix has three positive eigenvalues and one negative eigenvalue. Since roughly speaking, the superlinear nonlinearities are determined by the coefficient matrix and they ``generate'' the negative part in the second derivative of the functional, $\gamma$ positive eigenvalues of the coefficient matrix will ``generate'' $\gamma$ Morse index of the ground states.
\end{enumerate}
\end{remark}
Since the main ideas in proving these three Theorems are similar, to make our proof easier to follow and to avoid unnecessary complicated calculations, we only give a complete proof of Theorem~\ref{coro0001} in section~4. We will also sketch the proof of Theorems~\ref{thm0003} and \ref{thm0002} by pointing out necessary changes in section~5.
\noindent{\bf\large Notations.} Throughout this paper, $C$ and $C'$ are indiscriminately used to denote various absolutely positive constants. $a\sim b$ means that $C'b\leq a\leq Cb$ and $a\lesssim b$ means that $a\leq Cb$.
\section{Three-coupled system~\eqref{eqnew0001}}
\subsection{Some preliminaries}
In this section, we state some well-known results which will be frequently used in proving Theorem~\ref{coro0001}.
Let $w_j$ be the unique solution of the following scalar field equation
\begin{equation}gin{eqnarray}\label{eqnew0013}
\left\{\aligned&-\Delta u +\lambda_j u =\mu_ju^3\quad\text{in }\mathbb{R}^N,\\
&u>0\quad\text{in }\mathbb{R}^N,\quad u(0)=\max_{x\in\mathbb{R}^N}u(x),\\
&u(x)\to0\quad\text{as }|x|\to+\infty.
\endaligned\right.
\end{eqnarray}
Then, $w_j$, satisfying
\begin{equation}gin{eqnarray}\label{eqnew9998}
w_j(|x|)\sim |x|^{-\frac{N-1}{2}}e^{-\sqrt{\lambda_j}|x|}\quad\text{as }|x|\to+\infty,
\end{eqnarray}
is radially symmetric and strictly decreasing in $|x|$.
The energy functional of \eqref{eqnew0013} in $\mathcal{H}_j$ is given by
\begin{equation}gin{eqnarray}\label{eqnew0099}
\mathcal{E}_j(u)=\frac{1}{2}\|u\|_{\lambda_j}^2-\frac{\mu_j}{4}\|u\|_4^4
\end{eqnarray}
and the corresponding Nehari manifold is
\begin{equation}gin{eqnarray*}
\mathcal{N}_j=\{u\in\mathcal{H}_j\backslash\{0\}\mid\mathcal{E}_j'(u)u=0\}.
\end{eqnarray*}
We need the following estimate which will be used frequently in this paper. The proof is technical and thus delayed to appendix.
\begin{equation}gin{lemma}\label{lemn0010}
Let $N=1,2,3$ and $w_j$ be the unique solution of \eqref{eqnew0013}. Suppose $e_1\in\mathbb{R}^N$ such that $|e_1|=1$. Then as $R\to+\infty$,
\begin{equation}gin{eqnarray*}
\int_{\mathbb{R}^N}w_i^2(x)w_j^2(x-Re_1)dx\sim \left\{\aligned R^{1-N}e^{-2\min\{\sqrt{\lambda_i}, \sqrt{\lambda_j}\}R},\quad \lambda_i\not=\lambda_j;\\
R^{1+\alpha-N}e^{-2\sqrt{\lambda}R},\quad \lambda_i=\lambda_j=\lambda,\endaligned
\right.
\end{eqnarray*}
where $\alpha=1$ for $N=1$ and $\alpha=\frac{1}{2}$ for $N=2,3$.
\end{lemma}
We also define energy functionals, which are of class $C^2$ in $\mathcal{H}_{i,j}=\mathcal{H}_i\times\mathcal{H}_j$, as follows:
\begin{equation}gin{eqnarray}\label{eqn0104}
\mathcal{E}_{i,j}(\overrightarrow{\phi})=\frac{1}{2}(\|\phi_i\|_{\lambda_i}^2+\|\phi_j\|_{\lambda_j}^2)
-\frac{1}{4}(\mu_i\|\phi_i\|_{4}^4+\mu_j\|\phi_j\|_{4}^4)-\frac{\begin{equation}ta_{i,j}}{2}\|\phi_i\phi_j\|_2^2,
\end{eqnarray}
where $\overrightarrow{\phi}=(\phi_i, \phi_j)$ and $(i,j)$ equals to $(1,2)$, $(1,3)$ or $(2,3)$. Positive critical points of $\mathcal{E}_{i,j}(\overrightarrow{\phi})$ are equivalent to the solutions of the following system
\begin{equation}gin{equation}\label{eqn0099}
\left\{\aligned&-\Delta u_i+\lambda_iu_i=\mu_iu_i^3+\begin{equation}ta_{i,j} u_j^2u_i\quad\text{in }\mathbb{R}^N,\\
&-\Delta u_j+\lambda_ju_j=\mu_ju_j^3+\begin{equation}ta_{i,j} u_i^2u_j\quad\text{in }\mathbb{R}^N,\\
&u_i,u_j>0\quad\text{in }\mathbb{R}^N,\quad u_i(x),u_j(x)\to0\quad\text{as }|x|\to+\infty.\endaligned\right.
\end{equation}
We define the Nehari manifold of $\mathcal{E}_{i,j}(\overrightarrow{\phi})$ as follows:
\begin{equation}gin{eqnarray*}
\mathcal{N}_{i,j}=\{\overrightarrow{\phi}\in\widetilde{\mathcal{H}}_{i,j}\mid \overrightarrow{\widehat{\mathcal{G}}}_{i,j}(\overrightarrow{\phi})=(\widehat{\mathcal{G}}_i(\overrightarrow{\phi}), \widehat{\mathcal{G}}_j(\overrightarrow{\phi}))=\overrightarrow{0}\},
\end{eqnarray*}
where $\widetilde{\mathcal{H}}_{i,j}=(\mathcal{H}_i\backslash\{0\})\times(\mathcal{H}_j\backslash\{0\})$, $\widehat{\mathcal{G}}_j(\overrightarrow{\phi})=\|\phi_j\|_{\lambda_j}^2
-\mu_j\|\phi_j\|_{4}^4-\begin{equation}ta_{i,j}\|\phi_i\phi_j\|_2^2$ and $\widehat{\mathcal{G}}_i(\overrightarrow{\phi})=\|\phi_i\|_{\lambda_i}^2
-\mu_i\|\phi_i\|_{4}^4-\begin{equation}ta_{i,j}\|\phi_i\phi_j\|_2^2$. Let
\begin{equation}gin{eqnarray}\label{eqn0110}
\mathcal{C}_{\mathcal{N}_{i,j}}=\inf_{\mathcal{N}_{i,j}}\mathcal{E}_{i,j}(\overrightarrow{\phi}).
\end{eqnarray}
Then, $\mathcal{C}_{\mathcal{N}_{i,j}}$ is well defined and nonnegative for all $i\not = j$. Moreover, there exists $0<\begin{equation}ta_*<\sqrt{\mu_i \mu_j}$ such that if $0<\begin{equation}ta_{i,j}<\begin{equation}ta_*<\sqrt{\mu_i \mu_j}$ then $\mathcal{C}_{\mathcal{N}_{i,j}}$ is attained by $\overrightarrow{\varphi}^{i,j}$ which is positive and radially symmetric (cf. \cite[Theorem~1.2]{CZ13}). Clearly, $\overrightarrow{\varphi}^{i,j}$ is also a solution of \eqref{eqn0099}. Applying the comparison principle as for \cite[(4.6) and (4.7)]{LW05} yields that
\begin{equation}gin{eqnarray}\label{eqnew9999}
\varphi^{i,j}_{i}(|x|)\sim |x|^{-\frac{N-1}{2}}e^{-\sqrt{\lambda_i}|x|}\quad\text{as }|x|\to+\infty.
\end{eqnarray}
\subsection{Ground states with the Morse index 3}
In this section, we will study the existence of the ground states of \eqref{eqnew0001} with the Morse index 3, in the total-mixed case~$(d)$: $\begin{equation}ta_{1,2}>0$, $\begin{equation}ta_{1,3}>0$ and $\begin{equation}ta_{2,3}<0$.
Recall the definition of the Nehari manifold $\mathcal{N}$ at (\ref{Neh1}) and the least energy value
$\mathcal{C}_{\mathcal{N}}=\inf_{\mathcal{N}}\mathcal{E}(\overrightarrow{u})$ at (\ref{CN}).
Using $(w_{1,-R},w_2,w_{3,R})$ as a test function and calculating similarly in the proof of \cite[Theorem~1]{LW05} yields
\begin{equation}gin{eqnarray}\label{eqnew0098}
\mathcal{C}_{\mathcal{N}}\leq\sum_{j=1}^3\mathcal{E}_j(w_j),
\end{eqnarray}
where $w_j$ and $\mathcal{E}_j(u)$ are given by \eqref{eqnew9998} and \eqref{eqnew0099}, respectively, and $w_{j,z}=w_j(x+z)$.
\begin{equation}gin{lemma}\label{lemn0001}
There exists $\begin{equation}ta_0>0$ such that $\mathcal{N}$ contains a $(PS)$ sequence at the least energy value $\mathcal{C}_{\mathcal{N}}$ for $0<\begin{equation}ta_{1,2},\begin{equation}ta_{1,3}<\begin{equation}ta_0$ and $\begin{equation}ta_{2,3}<0$.
Moreover, any positive minimizer of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{N}$ is a ground state of \eqref{eqnew0001} with the Morse index 3.
\end{lemma}
\begin{equation}gin{proof}
The proof is standard, so we only sketch it. By a standard argument, there exists $\begin{equation}ta_0>0$ such that $1\lesssim\|u_j\|_4^4$ for all $\overrightarrow{u}\in\mathcal{N}$ with $\sum_{j=1}^3\|u_j\|_{\lambda_j}^2\leq8\sum_{j=1}^3\mathcal{E}_j(w_j)$ and $j=1,2,3$ for $0<\begin{equation}ta_{1,2},\begin{equation}ta_{1,3}<\begin{equation}ta_0$ and $\begin{equation}ta_{2,3}<0$. Thus, the matrix $\Xi=[\begin{equation}ta_{i,j}\|u_iu_j\|_2^2]_{i,j=1,2,3}$ is strictly diagonally dominant for $\overrightarrow{u}\in\mathcal{N}$, with $\sum_{j=1}^3\|u_j\|_{\lambda_j}^2\leq8\sum_{j=1}^3\mathcal{E}_j(w_j)$, where $\begin{equation}ta_{j,j}=\mu_j$. It follows that $\Xi$ is positively definite, with $1\lesssim|\text{det}(\Xi)|$. Thus, applying the implicit function theorem, the Ekeland variational principle and the Taylor expansion in a standard way yields that, $\mathcal{N}$ contains a $(PS)$ sequence at the least energy value $\mathcal{C}_{\mathcal{N}}$. Since $1\lesssim|\text{det}(\Xi)|$ for $\overrightarrow{u}\in\mathcal{N}$ with $\sum_{j=1}^3\|u_j\|_{\lambda_j}^2\leq8\sum_{j=1}^3\mathcal{E}_j(w_j)$, for any positive minimizer of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{N}$, say $\overrightarrow{v}$, $\mathcal{H}=\mathcal{T}_{\overrightarrow{v}}\mathcal{N}
\bigoplus(\mathbb{R}\overrightarrow{v}_1\times\mathbb{R}\overrightarrow{v}_2\times\mathbb{R}\overrightarrow{v}_3)$, where $\mathcal{T}_{\overrightarrow{v}}\mathcal{N}$ is the tangent space of $\mathcal{N}$ as $\overrightarrow{v}$, $\overrightarrow{v}_1=(v_1,0,0)$, $\overrightarrow{v}_2=(0,v_2,0)$ and $\overrightarrow{v}_3=(0,0,v_3)$. Since $\overrightarrow{v}$ is a positive minimizer of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{N}$, $\mathcal{E}''(\overrightarrow{v})(\overrightarrow{h},\overrightarrow{h})\geq0$ for all $\overrightarrow{h}\in\mathcal{T}_{\overrightarrow{v}}\mathcal{N}$. It follows that the Morse index of $\overrightarrow{v}$ is less than or equal to $3$. On the other hand, since
\begin{equation}gin{eqnarray*}
\mathcal{E}''(\overrightarrow{v})(\overrightarrow{v}_i,\overrightarrow{v}_i)=\|v_i\|_{\lambda_i}^2-3\mu_i\|v_i\|_4^4
-\sum_{j=1,j\not=i}^3\begin{equation}ta_{i,j}\|v_iv_j\|_2^2
=-2\mu_i\|v_i\|_4^4<0
\end{eqnarray*}
for all $i=1,2,3$, the Morse index of $\overrightarrow{v}$ is greater than or equal to $3$. Thus, $\overrightarrow{v}$ is a ground state of \eqref{eqnew0001} with the Morse index 3.
\end{proof}
By Lemma~\ref{lemn0001}, to prove the existence of the ground states of \eqref{eqnew0001} with the Morse index 3 in the total-mixed case, it is sufficient to prove the existence of a positive minimizer of $\mathcal{E}(\overrightarrow{u})$ on the Nehari manifold $\mathcal{N}$. We start by the following energy estimate.
\begin{equation}gin{lemma}\label{lem0001}
Let $\begin{equation}ta_{1,2}>0$, $\begin{equation}ta_{1,3}>0$ and $\begin{equation}ta_{2,3}<0$. If $\lambda_1<\min\{\lambda_2,\lambda_3\}$ then
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{N}}<\min\{\mathcal{C}_{\mathcal{N}_{1,2}}+\mathcal{E}_3(w_3), \mathcal{C}_{\mathcal{N}_{1,3}}+\mathcal{E}_2(w_2)\}
\end{eqnarray*}
for $\begin{equation}ta_{1,2},\begin{equation}ta_{1,3}<\begin{equation}ta_0$, where $\begin{equation}ta_0$ is given by Lemma~\ref{lemn0001}, $\mathcal{C}_{\mathcal{N}_{i,j}}$ are given by \eqref{eqn0110} and $\mathcal{C}_{\mathcal{N}}=\inf_{\mathcal{N}}\mathcal{E}(\overrightarrow{u})$.
\end{lemma}
\begin{equation}gin{proof}
We only give the proof of $\mathcal{C}_{\mathcal{N}}<\mathcal{C}_{\mathcal{N}_{1,2}}+\mathcal{E}_3(w_3)$ since the proof of the other inequality is similar. For the sake of simplicity, we denote $\varphi^{1,2}_j$ by $\varphi_j$, where $\overrightarrow{\varphi}^{1,2}=(\varphi^{1,2}_1,\varphi^{1,2}_2)$ is a ground state of \eqref{eqn0099} for $(i,j)=(1,2)$. Let $w_{3,R}=w_3(x-Re_1)$ where $e_1\in\mathbb{R}^N$ satisfying $|e_1|=1$. We consider the following system
\begin{equation}gin{eqnarray}\label{eqnew0011}
\left\{\aligned&\|\varphi_1\|_{\lambda_1}^2=\mu_1\|\varphi_1\|_4^4t_1^2(R)+\begin{equation}ta_{1,2}\|\varphi_1\varphi_2\|_2^2t_2^2(R)
+\begin{equation}ta_{1,3}\|\varphi_1w_{3,R}\|_2^2t_3^2(R),\\
&\|\varphi_2\|_{\lambda_2}^2=\mu_2\|\varphi_2\|_4^4t_2^2(R)+\begin{equation}ta_{1,2}\|\varphi_1\varphi_2\|_2^2t_1^2(R)
+\begin{equation}ta_{2,3}\|\varphi_2w_{3,R}\|_2^2t_3^2(R),\\
&\|w_3\|_{\lambda_3}^2=\mu_3\|w_3\|_4^4t_3^2(R)+\begin{equation}ta_{1,3}\|\varphi_1w_{3,R}\|_2^2t_1^2(R)
+\begin{equation}ta_{2,3}\|\varphi_2w_{3,R}\|_2^2t_2^2(R).
\endaligned\right.
\end{eqnarray}
Clearly, $\{t_j(R)\}$, $j=1,2,3$, are bounded for sufficiently large $R>0$ and $t_j(R)\to1$ as $R\to+\infty$. Moreover, since $\|\varphi_jw_{3,R}\|_2^2\to0$ as $R\to+\infty$ for $j=1,2$, by taking $\begin{equation}ta_0$ in Lemma~\ref{lemn0001} sufficiently small if necessary, the above linear system is uniquely solvable for $\begin{equation}ta_{1,2}<\begin{equation}ta_0$. Its unique solution $(t_1^2(R),t_2^2(R),t_3^2(R))$ is given by
\begin{equation}gin{eqnarray*}
t_j^2(R)=1-\frac{(1+o_R(1))(\begin{equation}ta_{j,3}\|\varphi_jw_{3,R}\|_2^2\mu_i\|\varphi_i\|_4^4
-\begin{equation}ta_{i,3}\|\varphi_iw_{3,R}\|_2^2\begin{equation}ta_{1,2}\|\varphi_1\varphi_2\|_2^2)}
{\prod_{l=1}^2\mu_l\|\varphi_l\|_4^4-\begin{equation}ta_{1,2}^2\|\varphi_1\varphi_2\|_2^4}
\end{eqnarray*}
for $(i,j)$ equals to $(1,2)$ or $(2,1)$ and
\begin{equation}gin{eqnarray*}
t_3^2(R)=1-\frac{1+o_R(1)}{\mu_3\|w_{3}\|_4^4}(\begin{equation}ta_{1,3}\|\varphi_1w_{3,R}\|_2^2+\begin{equation}ta_{2,3}\|\varphi_2w_{3,R}\|_2^2).
\end{eqnarray*}
Here, $o_R(1)\to0$ as $R\to+\infty$. Since $\begin{equation}ta_{1,2}>0$, \eqref{eqnew9999} holds for $\varphi_j$, $j=1,2$. Thus, by Lemma~\ref{lemn0010} and $\lambda_1<\min\{\lambda_2,\lambda_3\}$,
\begin{equation}gin{eqnarray}\label{eqnew8989}
\|\varphi_1w_{3,R}\|_2^2\sim R^{1-N}e^{-2\sqrt{\lambda_1}R}\quad\text{as }R\to+\infty.
\end{eqnarray}
By Lemma~\ref{lemn0010} once more, as $R\to+\infty$,
\begin{equation}gin{eqnarray}\label{eqnew8832}
\|\varphi_2w_{3,R}\|_2^2\sim \left\{\aligned R^{1-N}e^{-2\min\{\sqrt{\lambda_2}, \sqrt{\lambda_3}\}R},\quad \lambda_2\not=\lambda_3;\\
R^{1+\alpha-N}e^{-2\sqrt{\lambda}R},\quad \lambda_2=\lambda_3=\lambda,\endaligned
\right.
\end{eqnarray}
where $\alpha=1$ for $N=1$ and $\alpha=\frac{1}{2}$ for $N=2,3$.
Since $(t_1(R),t_2(R),t_3(R))$ satisfies \eqref{eqnew0011}, we can test $\mathcal{C}_{\mathcal{N}}$ by
$$
(t_1(R)\varphi_1,t_2(R)\varphi_2,t_3(R)w_{3,R})
$$
and estimate it by \eqref{eqnew8989} as follows:
\begin{equation}gin{eqnarray}
\mathcal{C}_{\mathcal{N}}&\leq&\frac{1}{4}(\sum_{j=1}^2t_j^2(R)\|\varphi_j\|_{\lambda_j}^2+t_3^2(R)\|w_{3,R}\|_{\lambda_3}^2)\notag\\
&\leq&\mathcal{C}_{\mathcal{N}_{1,2}}+\mathcal{E}_3(w_3)-C\begin{equation}ta_{1,3}R^{1-N}e^{-2\sqrt{\lambda_1}R}
-C'\begin{equation}ta_{2,3}\|\varphi_2w_{3,R}\|_2^2\label{eqnew0012}
\end{eqnarray}
By \eqref{eqnew8832} and taking $R>0$ sufficiently large in \eqref{eqnew0012}, it follows from $\lambda_1<\min\{\lambda_2,\lambda_3\}$ that
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{N}}<\mathcal{C}_{\mathcal{N}_{1,2}}+\mathcal{E}_3(w_3),\label{eq0003}
\end{eqnarray*}
which completes the proof.
\end{proof}
\begin{equation}gin{remark}\label{rmk0001}
As that in the proof of Lemma~\ref{lem0001}, if we use $(w_i,w_j)$ as a test function of $\mathcal{C}_{\mathcal{N}_{i,j}}$ where $(i,j)$ equals to $(1,2)$ or $(1,3)$, then by taking $\begin{equation}ta_0>0$ sufficiently small if necessary,
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{N}_{i,j}}\leq\mathcal{E}_i(w_i)+\mathcal{E}_j(w_j)-\frac{\begin{equation}ta_{i,j}}{2}\|w_iw_j\|_2^2+O(\begin{equation}ta_{i,j}^2)
\end{eqnarray*}
for $0<\begin{equation}ta_{i,j}<\begin{equation}ta_0$.
\end{remark}
Now, we are prepared to prove the following existence result.
\begin{equation}gin{proposition}\label{prop0001}
Let $\begin{equation}ta_{1,2}>0$, $\begin{equation}ta_{1,3}>0$ and $\begin{equation}ta_{2,3}<0$. If $\lambda_1<\min\{\lambda_2,\lambda_3\}$ then there exists a positive minimizer of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{N}$ for $\begin{equation}ta_{1,2},\begin{equation}ta_{1,3}<\begin{equation}ta_0$, where $\begin{equation}ta_0$ is given by Lemma~\ref{lemn0001}. That is, \eqref{eqnew0001} has a ground state with the Morse index 3.
\end{proposition}
\begin{equation}gin{proof}
By Lemma~\ref{lemn0001}, there exists a $(PS)$ sequence $\{\overrightarrow{u}_n\}$ at the least energy value $\mathcal{C}_{\mathcal{N}}$. Clearly, $\{\overrightarrow{u}_n\}$ is bounded in $\mathcal{H}$. Since $1\lesssim\|u_{j,n}\|_4$ for all $j=1,2,3$, by the Lions lemma and the Sobolev embedding theorem, there exist $\{y_{j,n}\}\subset\mathbb{R}^N$ such that $u_{j,n}(x+y_{j,n})\rightharpoonup v_{j,\infty}\not=0$ weakly in $H^1(\mathbb{R}^N)$ as $n\to\infty$. We denote $v_{i,j,n}=u_{i,n}(x+y_{j,n})$. Then, $v_{i,j,n}\rightharpoonup v_{i,j,\infty}$ weakly in $H^1(\mathbb{R}^N)$ as $n\to\infty$. Moreover, $v_{j,j,\infty}=v_{j,\infty}\not=0$ for all $j=1,2,3$. Since $\{\overrightarrow{u}_n\}$ is a $(PS)$ sequence, it is standard to show that $\overrightarrow{v}_{j,\infty}=(v_{1,j,\infty},v_{2,j,\infty},v_{3,j,\infty})$ is a critical point of $\mathcal{E}(\overrightarrow{u})$ for all $j=1,2,3$. If for every $j=1,2,3$, we always have $v_{i,j,\infty}=0$ with $i\not=j$, then,
$$
\mathcal{C}_{\mathcal{N}}=\sum_{j=1}^3\frac14\|u_{j,n}\|_{\lambda_j}^2+o_n(1)=\sum_{j=1}^3\frac14\|v_{j,j,n}\|_{\lambda_j}^2+o_n(1)
\geq\sum_{j=1}^3\mathcal{E}_j(w_j)+o_n(1),
$$
which contradicts Lemma~\ref{lem0001} and Remark~\ref{rmk0001} by taking $\begin{equation}ta_0>0$ sufficiently small if necessary. Thus, without loss of generality, we assume that for $j=1$, one of the following cases must happen:
\begin{equation}gin{enumerate}
\item[$(1)$] $v_{1,1,\infty}\not=0$, $v_{2,1,\infty}\not=0$ and $v_{3,1,\infty}=0$.
\item[$(2)$] $v_{1,1,\infty}\not=0$, $v_{2,1,\infty}=0$ and $v_{3,1,\infty}\not=0$.
\item[$(3)$] $v_{1,1,\infty}\not=0$, $v_{2,1,\infty}\not=0$ and $v_{3,1,\infty}\not=0$.
\end{enumerate}
We first consider the case~$(1)$. Clearly, $(v_{1,1,\infty}, v_{2,1,\infty})$ is a nontrivial critical point of $\mathcal{E}_{1,2}(\overrightarrow{\phi})$, where $\mathcal{E}_{1,2}(\overrightarrow{\phi})$ is given by \eqref{eqn0104}. Note that for $j=3$, one of the following cases must happen:
\begin{equation}gin{enumerate}
\item[$(i)$] $v_{1,3,\infty}\not=0$, $v_{2,3,\infty}=0$ and $v_{3,3,\infty}\not=0$.
\item[$(ii)$] $v_{1,3,\infty}=0$, $v_{2,3,\infty}\not=0$ and $v_{3,3,\infty}\not=0$.
\item[$(iii)$] $v_{1,3,\infty}=0$, $v_{2,3,\infty}=0$ and $v_{3,3,\infty}\not=0$.
\item[$(iv)$] $v_{1,3,\infty}\not=0$, $v_{2,3,\infty}\not=0$ and $v_{3,3,\infty}\not=0$.
\end{enumerate}
If the case~$(iv)$ happens, then by a standard argument, $\mathcal{C}_{\mathcal{N}}$ is attained by $\overrightarrow{H^1(\bbr^N)at{v}}_{3,\infty}=(|v_{1,3,\infty}|,|v_{2,3,\infty}|,|v_{3,3,\infty}|)$, which, together with the Harnack inequality and the fact that $\mathcal{N}$ is a natural constraint, implies that there exists a positive minimizer of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{N}$. Thus, by Lemma~\ref{lemn0001}, \eqref{eqnew0001} has a ground state with the Morse index 3. Therefore, without loss of generality, we assume that one of the cases~$(i)$--$(iii)$ must happen in what follows.
Since $v_{3,3,\infty}\not=0$ and $v_{3,3,n}(x)=v_{3,1,n}(x+y_{3,n}-y_{1,n})$,
by the Sobolev embedding theorem, $|y_{3,n}-y_{1,n}|\to+\infty$ as $n\to\infty$. It follows that for every $R>0$,
\begin{equation}gin{eqnarray*}
\int_{\mathbb{R}^N}|v_{1,1,n}|^4dx&\geq&\int_{B_R(0)}|v_{1,1,n}|^4dx+\int_{B_R(y_{3,n}-y_{1,n})}|v_{1,1,n}|^4dx\\
&=&\int_{B_R(0)}|v_{1,1,n}|^4dx+\int_{B_R(0)}|v_{1,3,n}|^4dx.
\end{eqnarray*}
By letting $n\to\infty$ first and $R\to+\infty$ next,
$$
\|v_{1,1,n}\|_4^4\geq\|v_{1,1,\infty}\|_4^4+\|v_{1,3,\infty}\|_4^4+o_n(1).
$$
If the case~$(i)$ happens, then $(v_{1,3,\infty},v_{3,3,\infty})$ is a nontrivial critical point of $\mathcal{E}_{1,3}(\overrightarrow{\phi})$, where $\mathcal{E}_{1,3}(\overrightarrow{\phi})$ is given by \eqref{eqn0104}. Since it is standard to show that $\|v_{j,3,\infty}\|_4^4\geq\|w_j\|_4^4+o_{\begin{equation}ta_0}(1)$ for sufficiently small $\begin{equation}ta_0$,
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{N}}&=&\frac{1}{4}\sum_{j=1}^3\mu_j\|u_{j,n}\|_4^4+\frac{1}{2}\sum_{i=1,i<j}^3\begin{equation}ta_{i,j}\|u_{i,n}u_{j,n}\|_2^2+o_n(1)\\
&\geq&\frac{1}{4}\sum_{j=1}^2\mu_j\|v_{j,1,n}\|_4^4+\frac{\mu_3}{4}\|v_{3,3,n}\|_4^4+o_{\begin{equation}ta_0}(1)+o_n(1)\\
&\geq&\mathcal{C}_{\mathcal{N}_{1,2}}+\mathcal{E}_3(w_3)+\frac{\mu_1}{4}\|w_1\|_4^4+o_{\begin{equation}ta_0}(1)+o_n(1),
\end{eqnarray*}
which contradicts Lemma~\ref{lem0001} for $\begin{equation}ta_{i,j}<\begin{equation}ta_0$ by taking $\begin{equation}ta_0>0$ sufficiently small if necessary. Here, $o_{\begin{equation}ta_0}(1)\to0$ as $\begin{equation}ta_0\to0$. The case~$(iii)$ is also impossible since in this case,
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{N}}&=&\frac{1}{4}\sum_{j=1}^3\|u_{j,n}\|_{\lambda_j}^2+o_n(1)\\
&=&\frac{1}{4}\sum_{j=1}^2\|v_{j,1,n}\|_{\lambda_j}^2+\frac{1}{4}\|v_{3,3,n}\|_{\lambda_j}^2+o_n(1)\\
&\geq&\mathcal{C}_{\mathcal{N}_{1,2}}+\mathcal{E}_3(w_3)+o_n(1),
\end{eqnarray*}
which still contradicts Lemma~\ref{lem0001}. Thus, we must have the case~$(ii)$. If $|y_{1,n}-y_{2,n}|\lesssim1$, then by $|y_{1,n}-y_{3,n}|\to+\infty$ as $n\to\infty$, $|y_{2,n}-y_{3,n}|\to+\infty$ as $n\to\infty$. It follows from
\begin{equation}gin{eqnarray*}
v_{2,2,n}(x)=v_{2,3,n}(x+y_{2,n}-y_{3,n})
\end{eqnarray*}
that $\|v_{2,3,n}\|_4^4\geq\|v_{2,3,\infty}\|_4^4+\|v_{2,2,\infty}\|_4^4+o_n(1)$. Then by a similar calculation used in the above arguments,
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{N}}\geq\mathcal{C}_{\mathcal{N}_{2,3}}+\mathcal{E}_1(w_1)+o_{\begin{equation}ta_0}(1)+\frac{\mu_2}{4}\|w_2\|_4^4+o_n(1).
\end{eqnarray*}
Since $\begin{equation}ta_{2,3}<0$, it is well known that $\mathcal{C}_{\mathcal{N}_{2,3}}=\sum_{j=2}^3\mathcal{E}_j(w_j)$. Thus, it is impossible for sufficiently small $\begin{equation}ta_0>0$, owing to Lemma~\ref{lem0001}. It remains to exclude the case $|y_{1,n}-y_{2,n}|\to+\infty$ as $n\to\infty$. In this case, it follows from
\begin{equation}gin{eqnarray*}
v_{2,2,n}(x)=v_{2,1,n}(x+y_{2,n}-y_{1,n})
\end{eqnarray*}
that $\|v_{2,1,n}\|_4^4\geq\|v_{2,1,\infty}\|_4^4+\|v_{2,2,\infty}\|_4^4+o_n(1)$. Similarly,
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{N}}\geq\mathcal{C}_{\mathcal{N}_{1,2}}+\mathcal{E}_3(w_3)+o_{\begin{equation}ta_0}(1)+\frac{\mu_2}{4}\|w_2\|_4^4+o_n(1).
\end{eqnarray*}
It is also impossible for sufficiently small $\begin{equation}ta_0>0$, owing to Lemma~\ref{lem0001}. Thus, the case~$(1)$ can not happen. Similarly, we can show that the case~$(2)$ can not happen either, which implies the case~$(3)$ must happen. Now, by a standard argument, $\mathcal{C}_{\mathcal{N}}$ is attained by $\overrightarrow{H^1(\bbr^N)at{v}}_{1,\infty}=(|v_{1,1,\infty}|,|v_{2,1,\infty}|,|v_{3,1,\infty}|)$. Thus, by the Harnack inequality and Lemma~\ref{lemn0001}, \eqref{eqnew0001} has a ground state with the Morse index 3.
\end{proof}
\subsection{Ground states with the Morse index 2}
In this section, we shall study the existence of the ground states of \eqref{eqnew0001} with the Morse index 2, in the total-mixed case~$(d)$: $\begin{equation}ta_{1,2}>0$, $\begin{equation}ta_{1,3}>0$ and $\begin{equation}ta_{2,3}<0$. Let
\begin{equation}gin{eqnarray*}
\mathcal{M}_{12,3}=\{\overrightarrow{u}\in\widehat{\mathcal{H}}_{12,3}\mid \overrightarrow{\mathcal{\widehat{Q}}}_{12,3}(u)=(\mathcal{G}_1(\overrightarrow{u})+\mathcal{G}_2(\overrightarrow{u}), \mathcal{G}_3(\overrightarrow{u}))=\overrightarrow{0}\},
\end{eqnarray*}
where $\mathcal{G}_j(\overrightarrow{u})=\|u_j\|_{\lambda_j}^2-\mu_j\|u_j\|_4^4-\sum_{i=1,i\not=j}^3\begin{equation}ta_{i,j}\|u_iu_j\|_2^2$ and $\widehat{\mathcal{H}}_{12,3}=((\mathcal{H}_1\times\mathcal{H}_2)\backslash\{\overrightarrow{0}\})\times(\mathcal{H}_3\backslash\{0\})$.
Let
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{M}_{12,3}}=\inf_{\mathcal{M}_{12,3}}\mathcal{E}(\overrightarrow{u}).
\end{eqnarray*}
Then, $\mathcal{C}_{\mathcal{M}_{12,3}}$ is well defined and nonnegative. Using $(0,w_2,w_{3,R})$ as a test function and calculating similarly in the proof of \cite[Theorem~1]{LW05} yields
\begin{equation}gin{eqnarray}\label{eqnew0097}
\mathcal{C}_{\mathcal{M}_{12,3}}\leq\sum_{j=2}^3\mathcal{E}_j(w_j).
\end{eqnarray}
\begin{equation}gin{lemma}\label{lemn0002}
There exists $\begin{equation}ta_0>0$ such that $\mathcal{M}_{12,3}$ contains a $(PS)$ sequence at the least energy value $\mathcal{C}_{\mathcal{M}_{12,3}}$ for $\begin{equation}ta_{1,2}>0$, $0<\begin{equation}ta_{1,3}<\begin{equation}ta_0$ and $\begin{equation}ta_{2,3}<0$.
Moreover, any positive minimizer of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{M}_{12,3}$ is a ground state of \eqref{eqnew0001} with the Morse index 2.
\end{lemma}
\begin{equation}gin{proof}
The proof is similar to that of \cite[Lemma~2.1]{SW15}, so we only sketch it. By \eqref{eqnew0097},
\begin{equation}gin{eqnarray*}
\mathcal{M}^*_{12,3}=\{\overrightarrow{u}\in\mathcal{M}_{12,3}\mid\sum_{j=1}^3\|u_{j}\|_{\lambda_j}^2\leq8\sum_{j=2}^3\mathcal{E}_j(w_j)\}
\not=\emptyset.
\end{eqnarray*}
Moreover, since $\begin{equation}ta_{2,3}<0$, there exists $\begin{equation}ta_0>0$ such that
\begin{equation}gin{eqnarray}\label{eqnew0021}
\min\{\sum_{j=1}^2\mu_j\|u_j\|_4^4+2\begin{equation}ta_{1,2}\|u_1u_2\|_2^2, \|u_{3}\|_{\lambda_3}^2\}\geq C_{\begin{equation}ta_{1,2}}>0
\end{eqnarray}
for all $\overrightarrow{u}\in\mathcal{M}^*_{12,3}$ with $\begin{equation}ta_{1,3}<\begin{equation}ta_0$, where $C_{\begin{equation}ta_{1,2}}$ is a constant only depending on $\begin{equation}ta_{1,2}$. It follows that
\begin{equation}gin{eqnarray*}
\Upsilon=\left(\aligned &\sum_{j=1}^2\mu_j\|u_j\|_4^4+2\begin{equation}ta_{1,2}\|u_1u_2\|_2^2\quad &\sum_{j=1}^2\begin{equation}ta_{j,3}\|u_ju_3\|_2^2\\
&\sum_{j=1}^2\begin{equation}ta_{j,3}\|u_ju_3\|_2^2\quad&\mu_3\|u_3\|_4^4\endaligned\right)
\end{eqnarray*}
is strictly diagonally dominant and $|\text{det}(\Upsilon)|\geq C'_{\begin{equation}ta_{1,2}}>0$ for $\overrightarrow{u}\in\mathcal{M}^*_{12,3}$.
Here, $C'_{\begin{equation}ta_{1,2}}$ is also a constant only depending on $\begin{equation}ta_{1,2}$.
Now, we can follow the argument in the proof of \cite[Lemma~2.1]{SW15} to obtain a $(PS)$ sequence at the least energy value $\mathcal{C}_{\mathcal{M}_{12,3}}$ for $\begin{equation}ta_{1,2}>0$, $0<\begin{equation}ta_{1,3}<\begin{equation}ta_0$ and $\begin{equation}ta_{2,3}<0$. For the Morse index, the proof is similar to that of Lemma~\ref{lemn0001} since we have $\mathcal{H}=\mathcal{T}_{\overrightarrow{v}}\mathcal{M}
\bigoplus(\mathbb{R}\overrightarrow{v}_{1,2}\times\mathbb{R}\overrightarrow{v}_3)$ for any positive minimizer of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{M}_{12,3}$ now, where $\overrightarrow{v}_{1,2}=(v_1,v_2,0)$ and $\overrightarrow{v}_3=(0,0,v_3)$.
\end{proof}
By Lemma~\ref{lemn0002}, to prove the existence of the ground states of \eqref{eqnew0001} with the Morse index 2, it is sufficient to prove the existence of a positive minimizer of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{M}_{12,3}$. Let
\begin{equation}gin{eqnarray}
\overline{\begin{equation}ta}_{1,2}=\max\bigg\{\inf_{u\in H^1(\mathbb{R}^N)\backslash\{0\}}\frac{\|u\|_{\lambda_2}^2}{\|w_1u\|_2^2},
\inf_{u\in H^1(\mathbb{R}^N)\backslash\{0\}}\frac{\|u\|_{\lambda_1}^2}{\|w_2u\|_2^2}\bigg\}\label{eqn0096}
\end{eqnarray}
where $w_j$ is the unique solution of \eqref{eqnew0013}.
\begin{equation}gin{lemma}\label{lem0003}
Let $\begin{equation}ta_{1,2}>0$, $0<\begin{equation}ta_{1,3}<\begin{equation}ta_0$ and $\begin{equation}ta_{2,3}<0$, where $\begin{equation}ta_0$ is given by Lemma~\ref{lemn0002}. If $\lambda_1<\min\{\lambda_2,\lambda_3\}$, then
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{M}_{12,3}}<\mathcal{C}_{\mathcal{N}_{1,2}}+\mathcal{E}_3(w_3)
\end{eqnarray*}
for $\begin{equation}ta_{1,2}>\overline{\begin{equation}ta}_{1,2}$, where $\mathcal{C}_{\mathcal{N}_{1,2}}$ is given by \eqref{eqn0110}.
\end{lemma}
\begin{equation}gin{proof}
Since this proof is similar to that of Lemma~\ref{lem0001}, we only sketch it and point out the differences. By \cite[Theorems~1 and 2]{AC06}, $\mathcal{C}_{\mathcal{N}_{1,2}}$ is attained by a positive and radially symmetric function $\overrightarrow{\varphi}$ for $\begin{equation}ta_{1,2}>\overline{\begin{equation}ta}_{1,2}$. Let $w_{3,R}=w_3(x-Re_1)$, where $e_1\in\mathbb{R}^N$ satisfying $|e_1|=1$. We consider the following system
\begin{equation}gin{eqnarray*}\label{eqnew0020}
\left\{\aligned\sum_{j=1}^2\|\varphi_j\|_{\lambda_j}^2=&(\sum_{j=1}^2\mu_j\|\varphi_j\|_4^4
+2\begin{equation}ta_{1,2}\|\varphi_1\varphi_2\|_2^2)t^2(R)+(\sum_{j=1}^2\begin{equation}ta_{j,3}\|\varphi_jw_{3,R}\|_2^2)s^2(R)\\
\|w_{3}\|_{\lambda_3}^2=&(\sum_{j=1}^2\begin{equation}ta_{j,3}\|\varphi_jw_{3,R}\|_2^2)t^2(R)+\mu_3\|w_{3}\|_4^4s^2(R).
\endaligned\right.
\end{eqnarray*}
By Lemma~\ref{lemn0010} and $\lambda_1<\min\{\lambda_2,\lambda_3\}$, $\sum_{j=1}^2\begin{equation}ta_{j,3}\|\varphi_jw_{3,R}\|_2^2>0$ for $R>0$ sufficiently large. Thus, as in the proof of Lemma~\ref{lem0001}, the above linear system is uniquely solvable for $\begin{equation}ta_{1,2}>\overline{\begin{equation}ta}_{1,2}$ and the unique solution is given by
\begin{equation}gin{eqnarray*}
t^2(R)=1-C(\begin{equation}ta_{1,3}\|\varphi_1w_{3,R}\|_2^2
+\begin{equation}ta_{2,3}\|\varphi_2w_{3,R}\|_2^2)
\end{eqnarray*}
and
\begin{equation}gin{eqnarray*}
s^2(R)=1-C'(\begin{equation}ta_{1,3}\|\varphi_1w_{3,R}\|_2^2
+\begin{equation}ta_{2,3}\|\varphi_2w_{3,R}\|_2^2)
\end{eqnarray*}
for sufficiently large $R>0$.
Moreover, $(t(R)\varphi_1, t(R)\varphi_2, s(R)w_{3,R})\in\mathcal{M}_{12,3}$. As \eqref{eqnew9999}, applying the comparison principle yields that
\begin{equation}gin{eqnarray*}
\varphi_{i}(|x|)\sim |x|^{-\frac{N-1}{2}}e^{-\sqrt{\lambda_i}|x|}\quad\text{as }|x|\to+\infty.
\end{eqnarray*}
Thus, by similar estimates as that used in the proof of Lemma~\ref{lem0001}, it follows from $\lambda_1<\min\{\lambda_2, \lambda_3\}$ that
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{M}_{12,3}}\leq\mathcal{E}((t_1(R)\varphi_1,t_2(R)\varphi_2, t_3(R)w_{3,R}))
<\mathcal{C}_{\mathcal{N}_{1,2}}+\mathcal{E}_3(w_3),
\end{eqnarray*}
for sufficiently large $R>0$.
\end{proof}
Now, we are prepared to prove the following existence result.
\begin{equation}gin{proposition}\label{prop0003}
Let $\begin{equation}ta_{1,2}>\overline{\begin{equation}ta}_{1,2}$, $0<\begin{equation}ta_{1,3}<\begin{equation}ta_0$ and $\begin{equation}ta_{2,3}<0$, where $\overline{\begin{equation}ta}_{1,2}$ and $\begin{equation}ta_0$ are given by \eqref{eqn0096} and Lemma~\ref{lemn0002}, resectively. If $\lambda_1<\min\{\lambda_2,\lambda_3\}$, then there exists a positive minimizer of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{M}_{12,3}$.
That is, \eqref{eqnew0001} has a ground state with the Morse index 2.
\end{proposition}
\begin{equation}gin{proof}
By Lemma~\ref{lemn0002}, $\mathcal{M}_{12,3}$ contains a $(PS)$ sequence of $\mathcal{E}(\overrightarrow{u})$, say $\{\overrightarrow{u}_n\}$, at the least energy value $\mathcal{C}_{\mathcal{M}_{12,3}}$. Since $\begin{equation}ta_{1,2}>\overline{\begin{equation}ta}_{1,2}$, by \eqref{eqnew0021} and \cite[Theorems~1 and 2]{AC06}, applying the Lions lemma and the Sobolev embedding theorem in a standard way yields that, there exist $\{y_n\},\{z_n\}\subset\mathbb{R}^N$ such that $v_{j,n}=u_{j,n}(x+y_n)\rightharpoonup v_{j,\infty}\not=0$ for both $j=1,2$ and $\widehat{v}_{3,n}=u_{3,n}(x+z_n)\rightharpoonup \widehat{v}_{3,\infty}\not=0$ weakly in $H^1(\mathbb{R}^N)$ as $n\to\infty$. Indeed, if we denote $v_{3,n}=u_{3,n}(x+y_n)$ and $\widehat{v}_{j,n}=u_{j,n}(x+z_n)$ for both $j=1,2$, then $v_{3,n}\rightharpoonup v_{3,\infty}$ and $\widehat{v}_{j,n}\rightharpoonup \widehat{v}_{j,\infty}$ weakly in $H^1(\mathbb{R}^N)$ as $n\to\infty$ for both $j=1,2$. Now, if $\widehat{v}_{j,\infty}\not=0$ for all $j=1,2,3$, then similar as in the proof of Proposition~\ref{prop0001}, we can show that there exists a positive minimizer of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{M}_{12,3}$. Otherwise, if
either $v_{1,\infty}=0$ or $v_{2,\infty}=0$, then by taking $\begin{equation}ta_0$ sufficiently small if necessary and using similar arguments in the proof of Proposition~\ref{prop0001}, $\mathcal{C}_{\mathcal{M}_{12,3}}\geq\min\{\mathcal{E}_1(w_1), \mathcal{E}_2(w_2)\}+\mathcal{E}_3(w_3)-C\begin{equation}ta_0$, which contradicts \cite[Theorems~1 and 2]{AC06}, $\begin{equation}ta_{1,2}>\overline{\begin{equation}ta}_{1,2}$ and Lemma~\ref{lem0003}.
We next claim that either $v_{3,\infty}\not=0$ or $\widehat{v}_{j,\infty}\not=0$ for both $j=1,2$. Suppose the contrary; then, one of the following cases must happen:
\begin{equation}gin{enumerate}
\item[$(i)$] $v_{3,\infty}=0$, $\widehat{v}_{1,\infty}=0$ and $\widehat{v}_{2,\infty}\not=0$.
\item[$(ii)$] $v_{3,\infty}=0$, $\widehat{v}_{1,\infty}\not=0$ and $\widehat{v}_{2,\infty}=0$.
\item[$(iii)$] $v_{3,\infty}=0$, $\widehat{v}_{1,\infty}=0$ and $\widehat{v}_{2,\infty}=0$.
\end{enumerate}
Since $\{\overrightarrow{u}_n\}$ is a $(PS)$ sequence, it is standard to show that
$$
\overrightarrow{v}_\infty=(v_{1,\infty},v_{2,\infty},v_{3,\infty})\quad\text{and}\quad
\overrightarrow{\widehat{v}}_\infty=(\widehat{v}_{1,\infty},\widehat{v}_{2,\infty},\widehat{v}_{3,\infty})
$$
are both critical points of $\mathcal{E}(\overrightarrow{u})$. In the case~$(i)$, $(v_{1,\infty},v_{2,\infty})$ is a nontrivial critical point of $\mathcal{E}_{1,2}(\overrightarrow{\phi})$ and $(\widehat{v}_{2,\infty},\widehat{v}_{3,\infty})$ is a nontrivial critical point of $\mathcal{E}_{2,3}(\overrightarrow{\phi})$. Since $\widehat{v}_{1,\infty}=0$ and $v_{1,\infty}\not=0$, by the Sobolev embedding theorem, $|y_n-z_n|\to+\infty$ as $n\to\infty$. Now, as in the proof of Proposition~\ref{prop0001}, we have the following energy estimate:
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{M}_{12,3}}&=&\frac{1}{4}\sum_{j=1}^3\|u_{j,n}\|_{\lambda_j}^2+o_n(1)\\
&=&\frac{1}{4}\sum_{j=1}^2\|v_{j,n}\|_{\lambda_j}^2+\frac{1}{4}\|\widehat{v}_{3,n}\|_{\lambda_3}^2+o_n(1)\\
&\geq&\mathcal{C}_{\mathcal{N}_{1,2}}+\mathcal{E}_3(w_3)+\frac{1}4\|w_2\|_{\lambda_2}+o_{\begin{equation}ta_0}(1)+o_n(1),
\end{eqnarray*}
where $o_{\begin{equation}ta_0}(1)\to0$ as $\begin{equation}ta_0\to0$. It contradicts Lemma~\ref{lem0003} by taking $\begin{equation}ta_0>0$ sufficiently small. Thus, the case~$(i)$ is impossible. Similarly, the case~$(ii)$ is also impossible. It remains to exclude the case~$(iii)$. In this case,
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{M}_{12,3}}&=&\frac{1}{4}\sum_{j=1}^3\|u_{j,n}\|_{\lambda_j}^2+o_n(1)\\
&=&\frac{1}{4}\sum_{j=1}^2\|v_{j,n}\|_{\lambda_j}^2
+\|\widehat{v}_{3,n}\|_{\lambda_3}^2+o_n(1)\\
&\geq&\mathcal{C}_{\mathcal{N}_{1,2}}+\mathcal{E}_3(w_3)+o_n(1),
\end{eqnarray*}
which contradicts Lemma~\ref{lem0003}. Therefore, without loss of generality, we may assume that $(v_{1,\infty},v_{2,\infty},v_{3,\infty})$ is a nontrivial critical point of $\mathcal{E}(\overrightarrow{u})$. By a standard argument, we can show that $\mathcal{C}_{\mathcal{M}_{12,3}}$ is attained by $(|v_{1,\infty}|,|v_{2,\infty}|,|v_{3,\infty}|)$. By the Harnack inequality and Lemma~\ref{lemn0002}, $(|v_{1,\infty}|,|v_{2,\infty}|,|v_{3,\infty}|)$ is a ground state of \eqref{eqnew0001} with the Morse index 2.
\end{proof}
We need to further prepare an existence result for the purely attractive case: $\begin{equation}ta_{1,2}>0$, $\begin{equation}ta_{1,3}>0$ and $\begin{equation}ta_{2,3}>0$. By checking the proof of Lemma~\ref{lemn0002}, we can see that it still works for $\begin{equation}ta_{1,2}>0$ and $0<\begin{equation}ta_{1,3},\begin{equation}ta_{2,3}<\begin{equation}ta_0$. Thus, we can still work in $\mathcal{M}_{12,3}$ for $\begin{equation}ta_{1,2}>0$ and $0<\begin{equation}ta_{1,3},\begin{equation}ta_{2,3}<\begin{equation}ta_0$. Since the Schwatz symmetrization works for this case, the minimizing sequence, at the least energy value $\mathcal{C}_{\mathcal{M}_{12,3}}$, can be chosen to be radially symmetric. Recall that $\mathcal{C}_{\mathcal{N}_{1,2}}<\min\{\mathcal{E}_1(w_1), \mathcal{E}_2(w_2)\}$ for $\begin{equation}ta_{1,2}>\overline{\begin{equation}ta}_{1,2}$ by \cite[Theorems~1 and 2]{AC06}, by a standard argument, we can obtain the following:
\begin{equation}gin{proposition}\label{prop0014}
If $\begin{equation}ta_{1,2}>\overline{\begin{equation}ta}_{1,2}$ and $0<\begin{equation}ta_{1,3},\begin{equation}ta_{2,3}<\begin{equation}ta_0$, then there exists a positive minimizer of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{M}_{12,3}$.
That is, \eqref{eqnew0001} has a ground state with the Morse index 2.
\end{proposition}
\subsection{Ground states with the Morse index 1}
In this section, we shall study the existence of the ground states with the Morse index 1. We define another Nehari manifold of $\mathcal{E}(\overrightarrow{u})$ as follows:
\begin{equation}gin{eqnarray*}
\mathcal{M}=\{\overrightarrow{u}\in\mathcal{H}\backslash\{\overrightarrow{0}\}\mid \mathcal{Q}(u)=\sum_{j=1}^3\mathcal{G}_j(\overrightarrow{u})=0\},
\end{eqnarray*}
where $\mathcal{G}_j(\overrightarrow{u})=\|u_j\|_{\lambda_j}^2-\mu_j\|u_j\|_4^4-\sum_{i=1,i\not=j}^3\begin{equation}ta_{i,j}\|u_iu_j\|_2^2$.
Let
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{M}}=\inf_{\mathcal{M}}\mathcal{E}(\overrightarrow{u}).
\end{eqnarray*}
Then, $\mathcal{C}_{\mathcal{M}}$ is well defined and nonnegative.
\begin{equation}gin{lemma}\label{lemn0003}
Let $\begin{equation}ta_{1,2}>0$, $\begin{equation}ta_{1,3}>0$ and $\begin{equation}ta_{2,3}>0$. Then, $\mathcal{M}$ contains a $(PS)$ sequence at the least energy value $\mathcal{C}_{\mathcal{M}}$. Moreover, any positive minimizer of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{M}$ is a ground state of \eqref{eqnew0001} with the Morse index 1.
\end{lemma}
\begin{equation}gin{proof}
Since $\mathcal{M}$ is homeomorphous to the set
\begin{equation}gin{eqnarray*}
\mathcal{O}=\{\overrightarrow{u}\in\mathcal{H}\backslash\{\overrightarrow{0}\}
\mid\sum_{j=1}^3\mu_j\|u_j\|_4^4+2\sum_{i,j=1,i<j}^3\begin{equation}ta_{i,j}\|u_iu_j\|_2^2>0\},
\end{eqnarray*}
the conclusion follows from a standard argument.
\end{proof}
By Lemma~\ref{lemn0003}, to prove the existence of the ground states of \eqref{eqnew0001} with the Morse index 1, it is sufficient to prove the existence of a positive minimizer of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{M}$.
Let
\begin{equation}gin{eqnarray*}
\mathcal{M}_{i,j}=\{\overrightarrow{\phi}\in\mathcal{H}_{i,j}\backslash\{\overrightarrow{0}\}\mid \mathcal{Q}_{i,j}(\phi)=\widehat{\mathcal{G}}_i(\overrightarrow{\phi})+\widehat{\mathcal{G}}_j(\overrightarrow{\phi})=0\},
\end{eqnarray*}
where $\mathcal{H}_{i,j}=\mathcal{H}_i\times\mathcal{H}_j$, $\widehat{\mathcal{G}}_j(\overrightarrow{\phi})=\|\phi_j\|_{\lambda_j}^2
-\mu_j\|\phi_j\|_{4}^4-\begin{equation}ta_{i,j}\|\phi_i\phi_j\|_2^2$ and $(i,j)$ equals to $(1,2)$, $(1,3)$ and $(2,3)$. We define
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{M}_{i,j}}=\inf_{\mathcal{M}_{i,j}}\mathcal{E}_{i,j}(\overrightarrow{\phi}).
\end{eqnarray*}
Then, $\mathcal{C}_{\mathcal{M}_{i,j}}$ is well defined and nonnegative. $\mathcal{C}_{\mathcal{M}_{i,j}}$ can also be variational expressed as follows:
\begin{equation}gin{eqnarray*}\label{eqn0080}
\mathcal{C}_{\mathcal{M}_{i,j}}=\inf_{\overrightarrow{u}\in(\mathcal{H}_i\times\mathcal{H}_j)\backslash\{\overrightarrow{0}\}}
\frac{(\|u_i\|_{\lambda_i}^2+\|u_j\|_{\lambda_j}^2)^2}{4(\mu_i\|u_i\|_4^4+\mu_j\|u_j\|_4^4+2\begin{equation}ta_{i,j}\|u_iu_j\|_2^2)}.
\end{eqnarray*}
Moreover, if $\begin{equation}ta_{i,j}>\overline{\begin{equation}ta}_{i,j}$ then $\mathcal{C}_{\mathcal{M}_{i,j}}=\mathcal{C}_{\mathcal{N}_{i,j}}$ is attained by $\overrightarrow{\varphi}^{i,j}$, which is positive and radially symmetric. Here, $\overline{\begin{equation}ta}_{i,j}$ is defined as that of $\overline{\begin{equation}ta}_{1,2}$ at \eqref{eqn0096} (cf. \cite[Theorems~1 and 2]{AC06}). Clearly, $\overrightarrow{\varphi}^{i,j}$ is also a solution of \eqref{eqn0099}.
\begin{equation}gin{lemma}\label{lem0009}
If $\begin{equation}ta_{i,j}\to+\infty$, then
\begin{equation}gin{eqnarray*}\label{eq0011}
(\sqrt{\begin{equation}ta_{i,j}}\varphi^{i,j}_i, \sqrt{\begin{equation}ta_{i,j}}\varphi^{i,j}_j)\to(\widetilde{\varphi}^{i,j}_i, \widetilde{\varphi}^{i,j}_j)
\end{eqnarray*}
up to a subsequence,
where $\overrightarrow{\widetilde{\varphi}}_{i,j}=(\widetilde{\varphi}^{i,j}_i, \widetilde{\varphi}^{i,j}_j)$, which is positive and radially symmetric, is a minimizer of the following minimizing problem:
\begin{equation}gin{eqnarray}\label{eqn0081}
\widetilde{\mathcal{D}}_{i,j}=\inf_{\overrightarrow{u}\in(\mathcal{H}_i\times\mathcal{H}_j)\backslash\{\overrightarrow{0}\}}
\frac{(\|u_i\|_{\lambda_i}^2+\|u_j\|_{\lambda_j}^2)^2}{8\|u_iu_j\|_2^2}.
\end{eqnarray}
\end{lemma}
\begin{equation}gin{proof}
Using the Schwatz symmetrization and the Sobolev embedding theorem in a standard way yields that $\widetilde{\mathcal{D}}_{i,j}$ is attained by $\overrightarrow{\check{\varphi}}_{i,j}$, which is positive and radially symmetric. Testing $\mathcal{C}_{\mathcal{M}_{i,j}}$ by $\overrightarrow{\check{\varphi}}_{i,j}$ yields that $\mathcal{C}_{\mathcal{M}_{i,j}}\begin{equation}ta_{i,j}\leq\widetilde{\mathcal{D}}_{i,j}+o(1)$ as $\begin{equation}ta_{i,j}\to+\infty$, where $o(1)\to0$ as $\begin{equation}ta_{i,j}\to+\infty$. It follows that $(\sqrt{\begin{equation}ta_{i,j}}\varphi^{i,j}_i, \sqrt{\begin{equation}ta_{i,j}}\varphi^{i,j}_j)$ is bounded in $\mathcal{H}_i\times\mathcal{H}_j$ for $\begin{equation}ta_{i,j}>0$ sufficiently large. On the other hand, it is easy to see that $\mathcal{C}_{\mathcal{M}_{i,j}}\to0$ as $\begin{equation}ta_{i,j}\to+\infty$. It follows that $\|\varphi_i\|_{\lambda_i}^2+\|\varphi_j\|_{\lambda_j}^2\to0$ as $\begin{equation}ta_{i,j}\to+\infty$. By the H\"older and Sobolev inequalities, $\mu_i\|\varphi_i\|_4^4+\mu_j\|\varphi_j\|_4^4=o(\|\varphi_i\|_{\lambda_i}^2+\|\varphi_j\|_{\lambda_j}^2)$ as $\begin{equation}ta_{i,j}\to+\infty$. Thus, testing $\widetilde{\mathcal{D}}_{i,j}$ by $(\sqrt{\begin{equation}ta_{i,j}}\varphi^{i,j}_i, \sqrt{\begin{equation}ta_{i,j}}\varphi^{i,j}_j)$ yields that
$\widetilde{\mathcal{D}}_{i,j}\leq\mathcal{C}_{\mathcal{M}_{i,j}}\begin{equation}ta_{i,j}+o(1)$ as $\begin{equation}ta_{i,j}\to+\infty$. Therefore, $\mathcal{C}_{\mathcal{M}_{i,j}}\begin{equation}ta_{i,j}=\widetilde{\mathcal{D}}_{i,j}+o(1)$ as $\begin{equation}ta_{i,j}\to+\infty$. Since $\overrightarrow{\varphi}^{i,j}$ is radially symmetric, it is standard to show that
\begin{equation}gin{eqnarray*}
(\sqrt{\begin{equation}ta_{i,j}}\varphi^{i,j}_i, \sqrt{\begin{equation}ta_{i,j}}\varphi^{i,j}_j)\to(\widetilde{\varphi}^{i,j}_i, \widetilde{\varphi}^{i,j}_j)
\end{eqnarray*}
as $\begin{equation}ta_{i,j}\to+\infty$ up to a subsequence, where $\overrightarrow{\widetilde{\varphi}}_{i,j}$, which is positive and radially symmetric, is a minimizer of \eqref{eqn0081}.
\end{proof}
Let
\begin{equation}gin{eqnarray}\label{eqn0078}
\rho_{ij,l}=\inf_{u\in H^1(\mathbb{R}^N)\backslash\{0\}}\frac{\|u\|_{\lambda_l}^2}{\int_{\mathbb{R}^N}((\varphi^{i,j}_i)^2+(\varphi^{i,j}_j)^2)u^2dx},
\end{eqnarray}
where $i,j,l=1,2,3$ with $i\not=j$, $i\not=l$ and $j\not=l$.
It follows from Lemma~\ref{lem0009} that
\begin{equation}gin{eqnarray}\label{eqn0077}
\rho_{ij,l}=\begin{equation}ta_{i,j}(\widehat{\rho}_{ij,l}+o(1))\text{ as }\begin{equation}ta_{i,j}\to+\infty,
\end{eqnarray}
where
\begin{equation}gin{eqnarray}\label{eqnew3344}
\widehat{\rho}_{ij,l}=\inf_{u\in H^1(\mathbb{R}^N)\backslash\{0\}}\frac{\|u\|_{\lambda_l}^2}{\int_{\mathbb{R}^N}((\widetilde{\varphi}^{i,j}_i)^2+(\widetilde{\varphi}^{i,j}_j)^2)u^2dx}.
\end{eqnarray}
Since $\overrightarrow{\varphi}^{i,j}$ is a solution of \eqref{eqn0099}, by Lemma~\ref{lem0009}, $\overrightarrow{\widetilde{\varphi}}_{i,j}$ also satisfies the following system:
\begin{equation}gin{equation}\label{eqnewnew0009}
\left\{\aligned&-\Delta\widetilde{\varphi}^{i,j}_i+\lambda_i\widetilde{\varphi}^{i,j}_i
=(\widetilde{\varphi}^{i,j}_j)^2\widetilde{\varphi}^{i,j}_i\quad\text{in }\mathbb{R}^N,\\
&-\Delta\widetilde{\varphi}^{i,j}_j+\lambda_j\widetilde{\varphi}^{i,j}_j
=(\widetilde{\varphi}^{i,j}_i)^2\widetilde{\varphi}^{i,j}_j\quad\text{in }\mathbb{R}^N.
\endaligned\right.
\end{equation}
\begin{equation}gin{proposition}\label{prop0005}
Let $\begin{equation}ta_{1,2}>0$, $\begin{equation}ta_{1,3}>0$ and $\begin{equation}ta_{2,3}>0$. Then, there exist $\widehat{\begin{equation}ta}_0>0$ such that if $\min\{\begin{equation}ta_{i,j}\}>\widehat{\begin{equation}ta}_0$ and
\begin{equation}gin{eqnarray}\label{eqn0079}
\widehat{\rho}_{jl,i}<\frac{\begin{equation}ta_{i,l}}{\begin{equation}ta_{j,l}}<\frac{1}{\widehat{\rho}_{il,j}}
\end{eqnarray}
for all $i,j,l=1,2,3$ with $i\not=j$, $i\not=l$ and $l\not=j$, then $\mathcal{C}_{\mathcal{M}}<\min\{\mathcal{C}_{\mathcal{M}_{i,j}}\}$ and consequently there exists a positive minimizer of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{M}$, provided that $|\lambda_i-\lambda_j|<<1$ for all $i,j=1,2,3$ with $i\not=j$. That is, \eqref{eqnew0001} has a ground state with the Morse index 1.
\end{proposition}
\begin{equation}gin{proof}
Let us first prove that $\widehat{\rho}_{jl,i}<\frac{1}{\widehat{\rho}_{il,j}}$ for all $i,j,l=1,2,3$ with $i\not=j$, $i\not=l$ and $l\not=j$, provided that $|\lambda_i-\lambda_j|<<1$ for all $i,j=1,2,3$ with $i\not=j$. Without loss of generality, we assume that $\lambda_1\leq\lambda_2\leq\lambda_3$. Testing $\widehat{\rho}_{13,2}$ by $\widetilde{\varphi}^{1,3}_3$ yields that
\begin{equation}gin{eqnarray*}
\widehat{\rho}_{13,2}\leq\frac{\|\widetilde{\varphi}^{1,3}_3\|_{\lambda_2}^2}{\|\widetilde{\varphi}^{1,3}_1\widetilde{\varphi}^{1,3}_3\|_2^2
+\|\widetilde{\varphi}^{1,3}_3\|_4^4}
<\frac{\|\widetilde{\varphi}^{1,3}_3\|_{\lambda_3}^2+(\lambda_2-\lambda_3)\|\widetilde{\varphi}^{1,3}_3\|_{2}^2}
{\|\widetilde{\varphi}^{1,3}_1\widetilde{\varphi}^{1,3}_3\|_2^2}
\leq1.
\end{eqnarray*}
Similarly, testing $\widehat{\rho}_{23,1}$ by $\widetilde{\varphi}^{2,3}_3$ yields that $\widehat{\rho}_{23,1}<1$. For $\widehat{\rho}_{12,3}$, by the Pohozaev identity,
\begin{equation}gin{eqnarray}\label{eqnewnew0010}
\lambda_j\|w_{j}\|_2^2=\frac{(4-N)\mu_j}{4}\|w_{j}\|_4^4,
\end{eqnarray}
where $w_j$ is the unique solution of \eqref{eqnew0013}. On the other hand, it is well known that $\inf_{u\in H^1(\mathbb{R}^N)\backslash\{0\}}\frac{\|u\|_{\lambda_j}^2}{\|u\|_4^2}=\mu_j\|w_{j}\|_4^2$. Thus, by \eqref{eqnewnew0009}, $\mu_1\|w_{1}\|_4^2\leq\|\widetilde{\varphi}^{1,2}_2\|_4^2$. Now, testing $\widehat{\rho}_{12,3}$ by $\widetilde{\varphi}^{1,2}_2$,
\begin{equation}gin{eqnarray*}
\widehat{\rho}_{12,3}\leq\frac{\|\widetilde{\varphi}^{1,2}_2\|_{\lambda_3}^2}{\|\widetilde{\varphi}^{1,2}_1\widetilde{\varphi}^{1,2}_2\|_2^2
+\|\widetilde{\varphi}^{1,2}_2\|_4^4}
\leq1+\frac{(\lambda_3-\lambda_2)\|\widetilde{\varphi}^{1,2}_2\|_{2}^2-\mu_1^2\|w_{1}\|_4^4}
{\|\widetilde{\varphi}^{1,2}_1\widetilde{\varphi}^{1,2}_2\|_2^2+\mu_1^2\|w_{1}\|_4^4}.
\end{eqnarray*}
Since $\|\widetilde{\varphi}^{1,2}_2\|_{2}^2\leq\frac{4}{\lambda_2}\widetilde{\mathcal{D}}_{1,2}$, testing $\widetilde{\mathcal{D}}_{1,2}$ by $(w_1,w_1)$ and using \eqref{eqnewnew0010} yields that
\begin{equation}gin{eqnarray*}
\widetilde{\mathcal{D}}_{1,2}&\leq&\frac{(\|w_{1}\|_{\lambda_1}^2+\|w_{1}\|_{\lambda_2}^2)^2}{8\|w_{1}\|_4^4}\leq\mu_1^2(\frac12
+\frac{C(\lambda_2-\lambda_1)}{\lambda_1}+\frac{C'(\lambda_2-\lambda_1)^2}{\lambda_1^2})\|w_{1}\|_4^4.
\end{eqnarray*}
Thus, there exists $\delta_0>0$, only depending on $\min\{\lambda_i\}$, such that if $|\lambda_i-\lambda_j|\leq\delta_0$, then
$\widehat{\rho}_{jl,i}<\frac{1}{\widehat{\rho}_{il,j}}$ for all $i,j,l=1,2,3$ with $i\not=j$, $i\not=l$ and $l\not=j$. It follows that there exists $\widehat{\begin{equation}ta}_0>0$ such that \eqref{eqn0079} holds for $\begin{equation}ta_{i,j}<\widehat{\begin{equation}ta}_0$ and $|\begin{equation}ta_{i,j}-\begin{equation}ta_{i,l}|<<1$ for all $i,j,l=1,2,3$ with $i\not=j$, $i\not=l$ and $l\not=j$. Since $\mathcal{C}_{\mathcal{M}}$ can also be variational expressed as follows:
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{M}}=\inf_{\overrightarrow{u}\in\mathcal{H}\backslash\{\overrightarrow{0}\}}
\frac{(\sum_{j=1}^3\|u_j\|_{\lambda_j}^2)^2}
{4(\sum_{j=1}^3\mu_j\|u_j\|_4^4+2\sum_{i,j=1,i<j}^3\begin{equation}ta_{i,j}\|u_iu_j\|_2^2)},
\end{eqnarray*}
testing $\mathcal{C}_{\mathcal{M}}$ by $\overrightarrow{V}_s=(\varphi^{1,2}_1,\varphi^{1,2}_2,su)$ yields that
\begin{equation}gin{eqnarray}
\mathcal{C}_{\mathcal{M}}\leq\mathcal{C}_{\mathcal{M}_{1,2}}+
\frac{s^2}{2}(\|u\|_{\lambda_3}^2-\sum_{j=1}^2\begin{equation}ta_{j,3}\|\varphi^{1,2}_ju\|_2^2)+O(s^4).\label{eq0009}
\end{eqnarray}
Let $u=\psi_{12,3}$ be the minimizer of \eqref{eqn0078}. Then, by \eqref{eqn0077}, \eqref{eqn0079} and \eqref{eq0009},
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{M}}&\leq&\mathcal{C}_{\mathcal{M}_{1,2}}+\frac{s^2}{2}(\rho_{12,3}\sum_{j=1}^2\|\varphi^{1,2}_j\psi_{12,3}\|_2^2
-\sum_{j=1}^2\begin{equation}ta_{j,3}\|\varphi^{1,2}_j\psi_{12,3}\|_2^2)+O(s^4)\\
&=&\mathcal{C}_{\mathcal{M}_{1,2}}+\frac{s^2}{2}(\begin{equation}ta_{1,2}\widehat{\rho}_{12,3}\sum_{j=1}^2\|\varphi^{1,2}_j\psi_{12,3}\|_2^2
-\sum_{j=1}^2\begin{equation}ta_{j,3}\|\varphi^{1,2}_j\psi_{12,3}\|_2^2)\\
&&+o(s^2)\\
&<&\mathcal{C}_{\mathcal{M}_{1,2}}
\end{eqnarray*}
for $s>0$ sufficiently small by taking $\widehat{\begin{equation}ta}_0>0$ sufficiently large. Similarly,
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{M}}<\mathcal{C}_{\mathcal{M}_{1,3}}\quad\text{ for }\begin{equation}ta_{1,3}>\widehat{\begin{equation}ta}_0\quad\text{and}\quad
\mathcal{C}_{\mathcal{M}}<\mathcal{C}_{\mathcal{M}_{2,3}}\quad\text{ for }\begin{equation}ta_{2,3}>\widehat{\begin{equation}ta}_0.
\end{eqnarray*}
Since we have already shown that $\mathcal{C}_{\mathcal{M}}<\min\{\mathcal{C}_{\mathcal{M}_{i,j}}\}$ for $\min\{\begin{equation}ta_{i,j}\}>\widehat{\begin{equation}ta}_0>0$, it is standard to use the Schwatz symmetrization to show that there exists a positive minimizer of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{M}$, which implies that \eqref{eqnew0001} has a ground state with the Morse index 1.
\end{proof}
\subsection{Nonexistence of ground states}
In this section, let us focus our attention on the nonexistence of the ground states of \eqref{eqnew0001}, in the total-mixed case~$(d)$: $\begin{equation}ta_{1,2}>0$, $\begin{equation}ta_{1,3}>0$ and $\begin{equation}ta_{2,3}<0$. We begin with the following observation.
\begin{equation}gin{lemma}\label{lem0002}
Let $\begin{equation}ta_{1,2}=\delta\widehat{\begin{equation}ta}_{1,2}$, $\begin{equation}ta_{1,3}=\delta^t\widehat{\begin{equation}ta}_{1,3}$ and $\begin{equation}ta_{2,3}=-\delta^s\widehat{\begin{equation}ta}_{2,3}$, where $\delta>0$ is a parameter, $0<s<\min\{1,t\}$ and $\widehat{\begin{equation}ta}_{i,j}$ are positively absolute constants. Suppose that $\overrightarrow{u}_\delta$ is a ground state of \eqref{eqnew0001} and $y_{j,\delta}$ is the maximum point of $u_{j,\delta}$, respectively. Then, $\widehat{v}_{j,\delta}=u_{j,\delta}(x+y_{j,\delta})\to w_j$ strongly in $H^1(\mathbb{R}^N)\cap L^\infty(\mathbb{R}^N)$ as $\delta\to0$ up to a subsequence. Moreover, either
\begin{equation}gin{enumerate}
\item[$(i)$] $y_{1,\delta}-y_{2,\delta}\to0$ and $|y_{2,\delta}-y_{3,\delta}|\to+\infty$ or
\item[$(ii)$] $y_{1,\delta}-y_{3,\delta}\to0$ and $|y_{2,\delta}-y_{3,\delta}|\to+\infty$.
\end{enumerate}
\end{lemma}
\begin{equation}gin{proof}
We respectively re-denote $\mathcal{C}_{\mathcal{N}}$ and $\mathcal{C}_{\mathcal{N}_{i,j}}$ by $\mathcal{C}_{\mathcal{N}}^\delta$ and $\mathcal{C}_{\mathcal{N}_{i,j}}^\delta$ for the sake of clarity in this proof, where $\mathcal{C}_{\mathcal{N}_{i,j}}$ is given by \eqref{eqn0110} and $(i,j)$ equals to $(1,2)$, $(1,3)$ or $(2,3)$. We also re-denote $\overrightarrow{\varphi}^{i,j}$ by $\overrightarrow{\varphi}_\delta^{i,j}$, where $\overrightarrow{\varphi}^{i,j}=(\varphi^{i,j}_i,\varphi^{i,j}_j)$ is a ground state of \eqref{eqn0099} and $(i,j)$ equals to $(1,2)$ or $(1,3)$. As in the proof of Lemma~\ref{lem0001}, Using $(\varphi_{1,\delta}^{1,2},\varphi_{2,\delta}^{1,2},w_{3,R})$ as a test function of $\mathcal{C}_{\mathcal{N}}^\delta$ and letting $R\to+\infty$ yields that
\begin{equation}gin{eqnarray}\label{eqnew0002}
\mathcal{C}_{\mathcal{N}}^\delta\leq\mathcal{C}_{\mathcal{N}_{1,2}}^\delta+\mathcal{E}_3(w_3),
\end{eqnarray}
which together with Remark~\ref{rmk0001}, implies $\mathcal{C}_{\mathcal{N}}^\delta\leq\sum_{j=1}^3\mathcal{E}_j(w_j)-C\delta$ for sufficiently small $\delta>0$. Similarly, if we test $\mathcal{C}_{\mathcal{N}}^\delta$ by $(\varphi_{1,\delta}^{1,3},w_{2,R},\varphi_{3,\delta}^{1,3})$, then we obtain $\mathcal{C}_{\mathcal{N}}^\delta\leq\sum_{j=1}^3\mathcal{E}_j(w_j)-C\delta^t$ for sufficiently small $\delta>0$. Hence, we always have
\begin{equation}gin{eqnarray}\label{eq0004}
\mathcal{C}_{\mathcal{N}}^\delta\leq\sum_{j=1}^3\mathcal{E}_j(w_j)-C\delta^{\min\{1,t\}}\quad\text{for sufficiently small }\delta>0.
\end{eqnarray}
On the other hand, applying the Lions lemma and the Sobolev embedding theorem in a standard way yields that there exist $\{z_{j,\delta}\}\subset\mathbb{R}^N$ such that $\widehat{v}_{j,\delta}=u_{j,\delta}(x+z_{j,\delta})\to w_j$ strongly in $H^1(\mathbb{R}^N)$ as $\delta\to0$ up to a subsequence. Let $v_{j,\delta}=\widehat{v}_{j,\delta}-w_j$, then $v_{j,\delta}$ satisfies the following equation
\begin{equation}gin{eqnarray}
-\Delta v_{j,\delta}+\lambda_jv_{j,\delta}&=&\mu_j[3w_j^2v_{j,\delta}+3w_j(v_{j,\delta})^2+(v_{j,\delta})^3]\notag\\
&&+\begin{equation}ta_{i,j}(\widehat{v}_{i,\delta})^2\widehat{v}_{j,\delta}+\begin{equation}ta_{l,j}(\widehat{v}_{l,\delta})^2\widehat{v}_{j,\delta}\label{eqnew9995}
\end{eqnarray}
in $\mathbb{R}^N$,
where $i,j,l=1,2,3$ with $i\not=l$, $l\not=j$ and $i\not=j$.
Applying the Moser iteration in a standard way yields that $v_{j,\delta}\to0$ strongly in $L^p(\mathbb{R}^N)$ for all $p\geq2$ as $\delta\to0$ up to a subsequence. Using the classical elliptic estimates in a standard way yields that $\widehat{v}_{j,\delta}\to w_j$ strongly in $L^\infty(\mathbb{R}^N)$ as $\delta\to0$ up to a subsequence. In particular, $|\widehat{v}_{j,\delta}(x)|<<1$ for $|x|>>1$ uniformly for sufficiently small $\delta>0$. Since $y_{j,\delta}$ is the maximum point of $u_{j,\delta}$, $|y_{j,\delta}-z_{j,\delta}|\lesssim1$ for sufficiently small $\delta>0$. Thus, since $w_j(0)=\max_{x\in\mathbb{R}^N}w_j(x)$ and $y_{j,\delta}$ is the maximum point of $u_{j,\delta}$, we may assume that $z_{j,\delta}=y_{j,\delta}$ for sufficiently small $\delta>0$. That is, $\widehat{v}_{j,\delta}=u_{j,\delta}(x+y_{j,\delta})\to w_j$ strongly in $H^1(\mathbb{R}^N)\cap L^\infty(\mathbb{R}^N)$ as $\delta\to0$ up to a subsequence.
Since by scaling, the best embedding constant from $\mathcal{H}_j$ to $L^4(\mathbb{R}^N)$ is $\mu_j\|w_j\|_4^2$, $\|u_{j,\delta}\|_{\lambda_j}^2\geq\mu_j\|w_j\|_4^2\|u_{j,\delta}\|_4^2$. It follows that
\begin{equation}gin{eqnarray*}
\mu_j\|u_{j,\delta}\|_4^2\geq\mu_j\|w_j\|_4^2-\frac{1}{\|u_{j,\delta}\|_4^2}(\begin{equation}ta_{i,j}\|u_{i,\delta}u_{j,\delta}\|_2^2
+\begin{equation}ta_{l,j}\|u_{l,\delta}u_{j,\delta}\|_2^2),
\end{eqnarray*}
which implies
\begin{equation}gin{eqnarray}\label{eqnewnew0003}
\|u_{j,\delta}\|_{\lambda_j}^2\geq\mu_j\|w_j\|_4^4-\frac{\|w_j\|_4^2}{\|u_{j,\delta}\|_4^2}(\begin{equation}ta_{i,j}\|u_{i,\delta}u_{j,\delta}\|_2^2
+\begin{equation}ta_{l,j}\|u_{l,\delta}u_{j,\delta}\|_2^2).
\end{eqnarray}
Here, $i,j,l=1,2,3$ with $i\not=l$, $l\not=j$ and $i\not=j$. Therefore, we have a lower-bound estimate of $\mathcal{C}_{\mathcal{N}}^\delta$ as follows:
\begin{equation}gin{eqnarray}
\mathcal{C}_{\mathcal{N}}^\delta&\geq&\sum_{j=1}^3\mathcal{E}_j(w_j)
-\frac{1+o_\delta(1)}{2}\sum_{i,j=1,i<j}^3\begin{equation}ta_{i,j}\|u_{i,\delta}u_{j,\delta}\|_2^2\label{eqnew9997}\\
&\geq&\sum_{j=1}^3\mathcal{E}_j(w_j)-\frac{C+o_\delta(1)}{2}(\|u_{1,\delta}u_{2,\delta}\|_2^2\delta+\|u_{1,\delta}u_{3,\delta}\|_2^2\delta^t)\notag\\
&&+\frac{C'+o_\delta(1)}{2}\|u_{2,\delta}u_{3,\delta}\|_2^2\delta^s.\label{eq0005}
\end{eqnarray}
Here, $o_\delta(1)\to0$ as $\delta\to0$.
If both $\|u_{1,\delta}u_{2,\delta}\|_2^2$ and $\|u_{1,\delta}u_{3,\delta}\|_2^2$ converge to $0$ as $\delta\to0$ or $1\lesssim\|u_{2,\delta}u_{3,\delta}\|_2^2$ for sufficiently small $\delta>0$, then \eqref{eq0004} and \eqref{eq0005} can not hold at the same time for sufficiently small $\delta>0$. Thus, either
\begin{equation}gin{enumerate}
\item[$(1)$] $1\lesssim\|u_{1,\delta}u_{2,\delta}\|_2^2$ and $\|u_{2,\delta}u_{3,\delta}\|_2^2=o_\delta(1)$ or
\item[$(2)$] $1\lesssim\|u_{1,\delta}u_{3,\delta}\|_2^2$ and $\|u_{2,\delta}u_{3,\delta}\|_2^2=o_\delta(1)$
\end{enumerate}
as $\delta\to0$. By the Lebesgue dominated convergence theorem, either
\begin{equation}gin{enumerate}
\item[$(i)$] $|y_{1,\delta}-y_{2,\delta}|\lesssim1$ and $|y_{2,\delta}-y_{3,\delta}|\to+\infty$ or
\item[$(ii)$] $|y_{1,\delta}-y_{3,\delta}|\lesssim1$ and $|y_{2,\delta}-y_{3,\delta}|\to+\infty$
\end{enumerate}
as $\delta\to0$. Without loss of generality, we assume $y_{1,\delta}-y_{2,\delta}\to y_0$ as $\delta\to0$ in the case~$(i)$ and $y_{1,\delta}-y_{3,\delta}\to y_0'$ as $\delta\to0$ in the case~$(ii)$. It remains to show that both $y_0$ and $y_0'$ equal to $0$. In what follows, we only give the proof of $y_0$ since that of $y_0'$ is similar. In the case~$(i)$, we also have $|y_{1,\delta}-y_{3,\delta}|\to+\infty$ as $\delta\to0$ and $t\geq1$. It follows from the Lebesgue dominated convergence theorem that $\|u_{1,\delta}u_{3,\delta}\|_2^2=o_\delta(1)$ and
$$
\|u_{1,\delta}u_{2,\delta}\|_2^2=\int_{\mathbb{R}^N}w_1(x)^2w_2(x+y_0)^2dx+o_\delta(1).
$$
Moreover, since $\widehat{v}_{j,\delta}=u_{j,\delta}(x+y_{j,\delta})\to w_j$ strongly in $H^1(\mathbb{R}^N)\cap L^\infty(\mathbb{R}^N)$ as $\delta\to0$ up to a subsequence, it is standard to show that there exist $t_j(\delta)\to1$ and $s(\delta)\to1$ as $\delta\to0$ such that $(t_1(\delta)u_{1,\delta}, t_2(\delta)u_{2,\delta})\in\mathcal{N}_{1,2}$ and $s(\delta)u_{3,\delta}\in\mathcal{N}_{3}$. Thus, by \cite[Theorem~5]{LW05},
\begin{equation}gin{eqnarray}
\mathcal{C}_{\mathcal{N}}&=&\mathcal{E}(\overrightarrow{u}_\delta)\notag\\
&\geq&\mathcal{E}((t_1(\delta)u_{1,\delta}, t_2(\delta)u_{2,\delta},s(\delta)u_{3,\delta}))\notag\\
&\geq&\mathcal{C}_{\mathcal{N}_{1,2}}+\mathcal{E}_3(w_3)\notag\\
&&-\frac{1+o_\delta(1)}{2}(\begin{equation}ta_{1,3}\|u_{1,\delta}u_{3,\delta}\|_2^2+\begin{equation}ta_{2,3}\|u_{2,\delta}u_{3,\delta}\|_2^2),\label{eqnew9987}
\end{eqnarray}
which together with \eqref{eqnew0002}, implies $\begin{equation}ta_{1,3}\|u_{1,\delta}u_{3,\delta}\|_2^2+\begin{equation}ta_{2,3}\|u_{2,\delta}u_{3,\delta}\|_2^2\geq0$. Thus,
by Remark~\ref{rmk0001} and \eqref{eqnew9997},
$$
\|u_{1,\delta}u_{2,\delta}\|_2^2\geq\max\{\|w_1w_2\|_2^2, \|w_1w_3\|_2^2\}+o_\delta(1).
$$
It follows that
\begin{equation}gin{eqnarray}\label{eqnew9996}
\int_{\mathbb{R}^N}w_1(x)^2(w_2(x+y_0))^2dx\geq\int_{\mathbb{R}^N}w_1(x)^2w_2(x)^2dx.
\end{eqnarray}
Let $F(z)=\int_{\mathbb{R}^N}w_1(x)^2(w_2(x+z))^2dx$. Then,
\begin{equation}gin{eqnarray}
\nabla F(z)&=&\int_{\mathbb{R}^N}2w_1(|x|)^2w_2(|x+z|)w_2'(|x+z|)\frac{x+z}{|x+z|}dx\notag\\
&=&\int_{\mathbb{R}^N}2w_1(|x-z|)^2w_2(|x|)w_2'(|x|)\frac{x}{|x|}dx.\label{eqnew6666}
\end{eqnarray}
Since $w_1(x)$ and $w_2(x)$ are radially symmetric and strictly decreasing for $|x|$, $\nabla F(z)=0$ if and only if $z=0$. Thus, by $F(z)>0$ and $F(z)\to0$ as $|z|\to+\infty$, $F(0)=\max_{z\in\mathbb{R}^N}F(z)$. It follows from \eqref{eqnew9996} that $y_0=0$.
\end{proof}
Now, we are prepared to prove the following nonexistence result.
\begin{equation}gin{proposition}\label{prop0002}
Let $\begin{equation}ta_{1,2}=\delta\widehat{\begin{equation}ta}_{1,2}$, $\begin{equation}ta_{1,3}=\delta^t\widehat{\begin{equation}ta}_{1,3}$ and $\begin{equation}ta_{2,3}=-\delta^s\widehat{\begin{equation}ta}_{2,3}$, where $\delta>0$ is a parameter, $0<s<\min\{1,t\}$ and $\widehat{\begin{equation}ta}_{i,j}$ are positively absolute constants. If $\lambda_1\geq\min\{\lambda_2,\lambda_3\}$ then $\mathcal{C}_{\mathcal{N}}$ can not be attained for sufficiently small $\delta>0$. That is, \eqref{eqnew0001} has no ground states.
\end{proposition}
\begin{equation}gin{proof}
Let us assume the contrary that \eqref{eqnew0001} has a ground state $\overrightarrow{u}_\delta$ for sufficiently small $\delta>0$, in the case $\lambda_1\geq\min\{\lambda_2,\lambda_3\}$. Let $y_{j,\delta}$ be the maximum point of $u_{j,\delta}$, respectively. Then, by Lemma~\ref{lem0002}, $\widehat{v}_{j,\delta}=u_{j,\delta}(x+y_{j,\delta})\to w_j$ strongly in $H^1(\mathbb{R}^N)\cap L^\infty(\mathbb{R}^N)$ as $\delta\to0$ up to a subsequence. Moreover, either
\begin{equation}gin{enumerate}
\item[$(i)$] $y_{1,\delta}-y_{2,\delta}\to0$ and $|y_{2,\delta}-y_{3,\delta}|\to+\infty$ or
\item[$(ii)$] $y_{1,\delta}-y_{3,\delta}\to0$ and $|y_{2,\delta}-y_{3,\delta}|\to+\infty$.
\end{enumerate}
Without loss of generality, we assume that the case~$(i)$ happens. Let $\{\alpha_{j,l}\}_{l=0,1,2,\cdots}$ be the eigenvalues of the following eigenvalue problem:
\begin{equation}gin{eqnarray*}
-\Delta v+\lambda_j v=\alpha w_j^2v,\quad v\in H^1(\mathbb{R}^N).
\end{eqnarray*}
Then, it is well-known that $\alpha_{j,0}=1$, $\alpha_{j,1}=\alpha_{j,2}=\cdots=\alpha_{j,N}=3$ and $\alpha_{j,l}>3$ for $l=N+1,N+2,\cdots$. Let $\upsilon_{j,l}$ be the corresponding eigenfunction of $\alpha_{j,l}$. Then, it is also well-known that $H^1(\mathbb{R}^N)=\bigoplus_{l=1}^\infty\mathbb{R}\upsilon_{j,l}$ and $\upsilon_{j,0}=w_j$ and $\upsilon_{j,l}=\frac{\partial w_j}{\partial x_l}$ for $l=1,2,\cdots,N$.
Moreover, $|\upsilon_{j,n}(x)|\lesssim|\upsilon_{j,0}(x)|$ for $|x|$ sufficiently large. Since $\widehat{v}_{j,\delta}\to w_j$ strongly in $H^1(\mathbb{R}^N)\cap L^\infty(\mathbb{R}^N)$ as $\delta\to0$ up to a subsequence, $v_{j,\delta}=\sum_{l=1}^\infty\gamma_{j,l}^\delta\upsilon_{j,l}$ with $\gamma_{j,l}^\delta\to0$ as $\delta\to0$, where $v_{j,\delta}$ is the solution of \eqref{eqnew9995}. Thus,
\begin{equation}gin{eqnarray*}
\bigg(\frac{v_{j,\delta}}{w_j}\bigg)^2\lesssim\sum_{l=1}^{\infty}(\gamma_{j,l}^\delta)^2=\int_{\mathbb{R}^N}w_j^2v_{j,\delta}^2dx=o_{\delta}(1).
\end{eqnarray*}
Here, without loss of generality, we assume that $\int_{\mathbb{R}^N}w_j^2\upsilon_{j,l}^2dx=1$ for all $j=1,2,3$ and $l=1,2,3,\cdots$.
For the sake of simplicity, we assume $y_{1,\delta}=0$ and denote $w_{j,y}=w_j(x+y)$ in what follows. Thus,
\begin{equation}gin{eqnarray}
&&\int_{\mathbb{R}^N}(u_{1,\delta})^2(u_{3,\delta})^2dx\notag\\
&=&\int_{\mathbb{R}^N}w_1^2w_{3,-y_{3,\delta}}^2dx
+2\int_{\mathbb{R}^N}w_1^2w_{3,-y_{3,\delta}}v_{3,\delta}(x-y_{3,\delta})dx\notag\\
&&+\int_{\mathbb{R}^N}w_1^2(v_{3,\delta}(x-y_{3,\delta}))^2dx+2\int_{\mathbb{R}^N}w_{3,-y_{3,\delta}}^2 w_1v_{1,\delta}dx\notag\\
&&+4\int_{\mathbb{R}^N}w_1v_{1,\delta}w_{3,-y_{3,\delta}}v_{3,\delta}(x-y_{3,\delta})dx\notag\\
&&+2\int_{\mathbb{R}^N}w_1v_{1,\delta}(v_{3,\delta}(x-y_{3,\delta}))^2dx+\int_{\mathbb{R}^N}v_{1,\delta}^2w_{3,-y_{3,\delta}}^2dx\notag\\
&&+2\int_{\mathbb{R}^N}v_{1,\delta}^2w_{3,-y_{3,\delta}}v_{3,\delta}(x-y_{3,\delta})dx+\int_{\mathbb{R}^N}v_{1,\delta}^2(v_{3,\delta}(x-y_{3,\delta}))^2dx
\notag\\
&=&(1+o_\delta(1))\int_{\mathbb{R}^N}w_1^2w_{3,-y_{3,\delta}}^2dx.\label{eqnew9994}
\end{eqnarray}
Similarly,
\begin{equation}gin{eqnarray}
\int_{\mathbb{R}^N}(u_{2,\delta})^2(u_{3,\delta})^2dx&=&(1+o_\delta(1))\int_{\mathbb{R}^N}w_2^2w_{3,-y_{3,\delta}}^2dx.\label{eqnew9993}
\end{eqnarray}
Since $|y_{3,\delta}|\to+\infty$ as $\delta\to0$, by Lemma~\ref{lemn0010},
\begin{equation}gin{eqnarray*}
\int_{\mathbb{R}^N}w_1^2w_{3,-y_{3,\delta}}^2dx\sim\left\{\aligned |y_{3,\delta}|^{1-N}e^{-2\min\{\sqrt{\lambda_1}, \sqrt{\lambda_3}\}|y_{3,\delta}|},\quad \lambda_1\not=\lambda_3;\\
|y_{3,\delta}|^{1+\alpha-N}e^{-2\sqrt{\lambda}|y_{3,\delta}|},\quad \lambda_1=\lambda_3=\lambda\endaligned
\right.
\end{eqnarray*}
and
\begin{equation}gin{eqnarray*}
\int_{\mathbb{R}^N}w_2^2w_{3,-y_{3,\delta}}^2dx\sim\left\{\aligned |y_{3,\delta}|^{1-N}e^{-2\min\{\sqrt{\lambda_2}, \sqrt{\lambda_3}\}|y_{3,\delta}|},\quad \lambda_2\not=\lambda_3;\\
|y_{3,\delta}|^{1+\alpha-N}e^{-2\sqrt{\lambda}|y_{3,\delta}|},\quad \lambda_2=\lambda_3=\lambda\endaligned
\right.
\end{eqnarray*}
as $\delta\to0$, where $\alpha=1$ for $N=1$ and $\alpha=\frac{1}{2}$ for $N=2,3$. Since $s<t$ and $\lambda_1\geq\min\{\lambda_2,\lambda_3\}$, it follows from \eqref{eqnew9994} and \eqref{eqnew9993} that
\begin{equation}gin{eqnarray}
\begin{equation}ta_{1,3}\|u_{1,\delta}u_{3,\delta}\|_2^2+\begin{equation}ta_{2,3}\|u_{2,\delta}u_{3,\delta}\|_2^2&\leq&
\delta^t\widehat{\begin{equation}ta}_{1,3}(1+o_\delta(1))\int_{\mathbb{R}^N}w_1^2w_{3,-y_{3,\delta}}^2dx\notag\\
&&-\delta^s\widehat{\begin{equation}ta}_{2,3}(1+o_\delta(1))\int_{\mathbb{R}^N}w_2^2w_{3,-y_{3,\delta}}^2dx\notag\\
&<0&\label{eqnew9991}
\end{eqnarray}
for sufficiently small $\delta>0$.
On the other hand, since $\widehat{v}_{j,\delta}=u_{j,\delta}(x+y_{j,\delta})\to w_j$ strongly in $H^1(\mathbb{R}^N)$ as $\delta\to0$ up to a subsequence,
By \eqref{eqnew0002} and \eqref{eqnew9987}, $\begin{equation}ta_{1,3}\|u_{1,\delta}u_{3,\delta}\|_2^2+\begin{equation}ta_{2,3}\|u_{2,\delta}u_{3,\delta}\|_2^2\geq0$ for sufficiently small $\delta>0$. It contradicts \eqref{eqnew9991}. Therefore, \eqref{eqnew0001} has no ground states for sufficiently small $\delta>0$.
\end{proof}
\begin{equation}gin{remark}\label{rmk0002}
By the proof of Proposition~\ref{prop0002}, we can obtain a by-product: Suppose $\overrightarrow{u}_\delta$ is a ground state of \eqref{eqnew0001} for sufficiently small $\delta>0$, in the total-mixed case~$(d)$ with $\lambda_1<\min\{\lambda_2,\lambda_3\}$ and $s<\min\{1,t\}$. Then, by \eqref{eqnew0002} and \eqref{eqnew9987},
\begin{equation}gin{eqnarray*}
\begin{equation}ta_{1,3}\|u_{1,\delta}u_{3,\delta}\|_2^2+\begin{equation}ta_{2,3}\|u_{2,\delta}u_{3,\delta}\|_2^2\geq0
\end{eqnarray*}
for sufficiently small $\delta>0$ in the case~$(i)$, which is given by Lemma~\ref{lem0002}. It follows that
\begin{equation}gin{eqnarray*}
\left\{\aligned &C'\delta^te^{-2\sqrt{\lambda_1}|y_{2,\delta}-y_{3,\delta}|}-C\delta^se^{-2\min\{\sqrt{\lambda_2}, \sqrt{\lambda_3}\}|y_{2,\delta}-y_{3,\delta}|}\geq0,\quad\lambda_2\not=\lambda_3;\\
&C'\delta^te^{-2\sqrt{\lambda_1}|y_{2,\delta}-y_{3,\delta}|}-C\delta^s|y_{2,\delta}-y_{3,\delta}|^\alpha e^{-2\sqrt{\lambda_2}
|y_{2,\delta}-y_{3,\delta}|}\geq0,\quad\lambda_2=\lambda_3,\endaligned\right.
\end{eqnarray*}
which implies
$$
|y_{2,\delta}-y_{3,\delta}|\lesssim(\log\frac{1}{\delta})^{\frac{t-s}{2(\min\{\sqrt{\lambda_2}, \sqrt{\lambda_3}\}-\sqrt{\lambda_1})}}
$$
in the case~$(i)$. Similarly, in the case~$(ii)$ which is given by Lemma~\ref{lem0002},
$$
|y_{2,\delta}-y_{3,\delta}|\lesssim(\log\frac{1}{\delta})^{\frac{1-s}{2(\min\{\sqrt{\lambda_2}, \sqrt{\lambda_3}\}-\sqrt{\lambda_1})}}.
$$
\end{remark}
We close this section by
\vskip0.12in
\noindent\textbf{Proof of Theorem~\ref{coro0001}:}\quad The conclusion~$(1)$ follows from Propositions~\ref{prop0014} and \ref{prop0005} and \cite[Theorem~1]{LW05} (see also \cite[Corollary~1.3]{ST16}), the conclusion~$(2)$ follows from \cite[Theorem~3]{LW05} (see also \cite[Theorem~1.6]{ST16}), the conclusion~$(3)$ follows from Propositions~\ref{prop0001} and \ref{prop0003}, and the conclusion~$(4)$ follows from Proposition~\ref{prop0002}.
H^1(\bbr^N)fill$\Box$
\section{$k$-coupled system~\eqref{eqn0001}}
In this section, we will consider the general $k$-coupled system~\eqref{eqn0001} and prove Theorems~\ref{thm0003} and \ref{thm0002}. Since the main ideas are similar to those of Theorem~\ref{coro0001}, we only sketch the proofs.
\noindent\textbf{Proof of Theorem~\ref{thm0003}:}\quad $(1)$
Since the proof of the existence of ground states of \eqref{eqnewnew0001} in the total-mixed case~$(H)$ with Morse index $4$ is similar to the Morse index 3 case of Theorem \ref{coro0001}, we shall only give the proof of the Morse index $3$ case. Let
\begin{equation}gin{eqnarray*}
\mathcal{M}_{12,3,4}=\{\overrightarrow{u}\in\widehat{\mathcal{H}}_{12,3,4}\mid \overrightarrow{\mathcal{\widehat{Q}}}_{12,3,4}(u)=(\mathcal{G}_1(\overrightarrow{u})+\mathcal{G}_2(\overrightarrow{u}), \mathcal{G}_3(\overrightarrow{u}), \mathcal{G}_4(\overrightarrow{u}))=\overrightarrow{0}\},
\end{eqnarray*}
where $\mathcal{G}_j(\overrightarrow{u})=\|u_j\|_{\lambda_j}^2-\mu_j\|u_j\|_4^4-\sum_{i=1,i\not=j}^4\begin{equation}ta_{i,j}\|u_iu_j\|_2^2$ and $\widehat{\mathcal{H}}_{12,3,4}=((\mathcal{H}_1\times\mathcal{H}_2)\backslash\{\overrightarrow{0}\})\times(\mathcal{H}_3\backslash\{0\})
\times(\mathcal{H}_4\backslash\{0\})$.
Let
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{M}_{12,3,4}}=\inf_{\mathcal{M}_{12,3,4}}\mathcal{E}(\overrightarrow{u}).
\end{eqnarray*}
Then, $\mathcal{C}_{\mathcal{M}_{12,3,4}}$ is well defined and nonnegative. Since $\begin{equation}ta_{1,2}>\widehat{\begin{equation}ta}_0>0$ and $\begin{equation}ta_{i,j}<\begin{equation}ta_{0}$ for all other $(i,j)\not=(1,2)$, where $\widehat{\begin{equation}ta}_0$ is sufficiently large and $\begin{equation}ta_{0}$ is sufficiently small, it is standard to show that $\mathcal{C}_{\mathcal{M}_{12,3,4}}<\sum_{j=1}^4\mathcal{E}_j(w_j)$. Moreover, by similar arguments, as that used in the proof of Lemma~\ref{lemn0002}, we can show that
the matrix $\Xi=[\begin{equation}ta_{i,j}\|u_iu_j\|_2^2]_{i,j=1,2,\cdots,4}$ is strictly diagonally dominant for $\overrightarrow{u}\in\mathcal{N}_{\mathcal{M}_{12,3,4}}$ with $\sum_{j=1}^4\|u_j\|_{\lambda_j}^2\leq8\sum_{j=1}^4\mathcal{E}_j(w_j)$. Here, $\begin{equation}ta_{j,j}=\mu_j$. It follows that $\Xi$ is
positively definite with $|\text{det}(\Xi)|\geq C$. Thus,
by similar arguments, as in the proof of Lemma~\ref{lemn0002}, there exists a $(PS)$ sequence $\{\overrightarrow{u}_n\}$ at the least energy value $\mathcal{C}_{\mathcal{M}_{12,3,4}}$. Moreover, any positive minimizer is a ground state of \eqref{eqnewnew0001} with the Morse index $3$. Thus, it is sufficient to find a positive minimizer of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{M}_{12,3,4}$. We start by estimating $\mathcal{C}_{\mathcal{M}_{12,3,4}}$. By our assumptions, it is easy to verify that the degrees of eventual block decompositions of $\mathbf{A}_1$ all equal to $1$. Thus, we can further group $\mathbf{A}_1$ which is given by \eqref{eqnewnew0007} into $\mathbf{A}_2$ which is given by \eqref{eqnewnew0008}.
Since the interaction force $\mathfrak{F}_{1,2}^0$, given by \eqref{eqnewnew0002}, is positive, by Lemma~\ref{lem0003}, the least energy value of ground states in the block $C_{1,1}$, denoted by $\mathcal{C}_{\mathcal{M}_{12,3}}$, is strictly less than $\mathcal{C}_{\mathcal{N}_{1,2}}+\mathcal{E}_3(w_3)$. Under the permutation: $(1,2,3,4)\to(1,2,4,3)$, there is another choice of $C_{1,1}$, which is consisted by $(u_1,u_2)$ and $u_4$. Similarly, this least energy value of ground states, denoted by $\mathcal{C}_{\mathcal{M}_{12,4}}$, is also strictly less than $\mathcal{C}_{\mathcal{N}_{1,2}}+\mathcal{E}_4(w_4)$. Thus, by \cite[Theorems~1 and 2]{AC06} and our choice that $\begin{equation}ta_{1,2}>\widehat{\begin{equation}ta}_0$ sufficiently large,
\begin{equation}gin{eqnarray*}
\mathcal{C}=\min\{\mathcal{C}_{\mathcal{M}_{12,3}}+\mathcal{E}_4(w_4), \mathcal{C}_{\mathcal{M}_{12,4}}+\mathcal{E}_3(w_3)\}
\end{eqnarray*}
is the smallest energy value that the $(PS)$ sequence, at the least energy value $\mathcal{C}_{\mathcal{M}_{12,3,4}}$, will split into blocks in passing to the limit in the optimal block decomposition~$\mathbf{A}_1$. Even though there is another optimal block decomposition consisted by the blocks $(u_1,u_3)$, $u_2$ and $u_4$, by the assumptions $\begin{equation}ta_{1,2}>\widehat{\begin{equation}ta}_0>0$ and $\begin{equation}ta_{i,j}<\begin{equation}ta_{0}$ for all other $(i,j)\not=(1,2)$, the smallest energy value in this optimal block decomposition, defined similarly as $\mathcal{C}$, is strictly large than $\mathcal{C}$. Thus, $\mathcal{C}$ is the smallest energy value that the $(PS)$ sequence, at the least energy value $\mathcal{C}_{\mathcal{M}_{12,3,4}}$, will split into blocks in passing to the limit. Now, using the fact that the degrees of eventual block decompositions of $\mathbf{A}_1$ all equal to $1$ and similar arguments as that used in the proof of Lemma~\ref{lem0003} yields
$\mathcal{C}_{\mathcal{M}_{12,3,4}}<\mathcal{C}$.
Thus, applying the arguments similar to the proof of Proposition~\ref{prop0003} yields that $\mathcal{E}(\overrightarrow{u})$ has a positive minimizer on $\mathcal{M}_{12,3,4}$.
$(2)$\quad
Since we assume that all $|\begin{equation}ta_{i,j}|$ sufficiently small, the ground states, if they exist, should be minimizers of $\mathcal{E}(\overrightarrow{u})$ on
\begin{equation}gin{eqnarray*}
\mathcal{N}_{1,2,3,4}=\{\overrightarrow{u}\in\widehat{\mathcal{H}}_{1,2,3,4}\mid \overrightarrow{\mathcal{\widehat{Q}}}_{1,2,3,4}(u)=(\mathcal{G}_1(\overrightarrow{u}), \mathcal{G}_2(\overrightarrow{u}), \mathcal{G}_3(\overrightarrow{u}), \mathcal{G}_4(\overrightarrow{u}))=\overrightarrow{0}\},
\end{eqnarray*}
where $\mathcal{G}_j(\overrightarrow{u})=\|u_j\|_{\lambda_j}^2-\mu_j\|u_j\|_4^4-\sum_{i=1,i\not=j}^4\begin{equation}ta_{i,j}\|u_iu_j\|_2^2$ and $\widehat{\mathcal{H}}_{1,2,3,4}=((\mathcal{H}_1\backslash\{0\})\times(\mathcal{H}_2\backslash\{0\})\times(\mathcal{H}_3\backslash\{0\})
\times(\mathcal{H}_4\backslash\{0\})$. Let
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{N}_{1,2,3,4}}=\inf_{\mathcal{N}_{1,2,3,4}}\mathcal{E}(\overrightarrow{u}).
\end{eqnarray*}
Then, by a similar choice of test functions as that in the proof of Lemma~\ref{lem0001},
\begin{equation}gin{eqnarray}\label{eqnewnew0006}
\mathcal{C}_{\mathcal{N}_{1,2,3,4}}\leq\mathcal{C}_{\mathcal{N}_{1,2}}+\mathcal{E}_{3}(w_3)+\mathcal{E}_4(w_4).
\end{eqnarray}
On the other hand, by similar arguments as used for \eqref{eqnew9987},
\begin{equation}gin{eqnarray}\label{eqnewnew0005}
\mathcal{C}_{\mathcal{N}_{1,2,3,4}}&\geq&\mathcal{C}_{\mathcal{N}_{1,2}}+\mathcal{E}_{3}(w_3)+\mathcal{E}_4(w_4)\notag\\
&&-\frac{1+o_\delta(1)}{2}(\begin{equation}ta_{1,3}\|u_1^\delta u_3^\delta\|_2^2+\begin{equation}ta_{2,3}\|u_2^\delta u_3^\delta\|_2^2+\begin{equation}ta_{1,4}\|u_1^\delta u_4^\delta\|_2^2\notag\\
&&+\begin{equation}ta_{2,4}\|u_2^\delta u_4^\delta\|_2^2+\begin{equation}ta_{3,4}\|u_3^\delta u_4^\delta\|_2^2).
\end{eqnarray}
Thus, since $t_{1,2}<\min\{t_{1,3}, t_{2,4}\}$, we can apply the arguments used in the proof of Lemma~\ref{lem0002} to show that $|y_{1,\delta}-y_{2,\delta}|\lesssim1$ and $|y_{i,\delta}-y_{i,\delta}|\to+\infty$ for $(i,j)\not=(1,2)$, where $y_{i,\delta}$ is the maximum point of $u_i^\delta$, respectively. Moreover, similar computations as \eqref{eqnew9996} and \eqref{eqnew6666} yields $y_{1,\delta}-y_{2,\delta}\to0$ as $\delta\to0$. Now, we can use Lemma~\ref{lemn0010} and similar computations as that in the proof of Proposition~\ref{prop0002} to estimate the term $\begin{equation}ta_{1,3}\|u_1^\delta u_3^\delta\|_2^2+\begin{equation}ta_{2,3}\|u_2^\delta u_3^\delta\|_2^2+\begin{equation}ta_{1,4}\|u_1^\delta u_4^\delta\|_2^2+\begin{equation}ta_{2,4}\|u_2^\delta u_4^\delta\|_2^2+\begin{equation}ta_{3,4}\|u_3^\delta u_4^\delta\|_2^2$. Since $\min\{t_{2,3}, t_{1,4}, t_{3,4}\}<t_{1,2}$ and $\min\{\lambda_3,\lambda_4\}<\min\{\lambda_1,\lambda_2\}$,
$$
\begin{equation}ta_{1,3}\|u_1^\delta u_3^\delta\|_2^2+\begin{equation}ta_{2,3}\|u_2^\delta u_3^\delta\|_2^2+\begin{equation}ta_{1,4}\|u_1^\delta u_4^\delta\|_2^2+\begin{equation}ta_{2,4}\|u_2^\delta u_4^\delta\|_2^2+\begin{equation}ta_{3,4}\|u_3^\delta u_4^\delta\|_2^2<0
$$
for $\delta>0$ sufficiently small. This contradicts with \eqref{eqnewnew0006} and \eqref{eqnewnew0005}. As a result, the ground states of \eqref{eqnewnew0001} do not exist.
H^1(\bbr^N)fill$\Box$
We close this section by
\noindent\textbf{Proof of Theorem~\ref{thm0002}:}\quad $(1)$ In proving this conclusion, we need to further employ the iteration argument. We assume this conclusion is true for $3,4,\cdots,k-1$. Recall that we have assumed that $\{i\not=j,(i,j)\in\mathcal{K}_{s,s,\mathbf{a}_d}\}\not=\emptyset$ for $s=1,2,\cdots,s_0$ and $\{i\not=j,(i,j)\in\mathcal{K}_{s,s,\mathbf{a}_d}\}=\emptyset$ for $s=s_0+1,\cdots,d$ with an $s_0\in\{0,1,2,\cdots,d\}$.
Since $d\leq\gamma\leq k$, there exists a unique $0\leq s^*\leq s_0$ such that $a_{s^*}\leq k-\gamma<a_{s^*+1}$. Now,
we define the following Nihari manifold:
\begin{equation}gin{eqnarray*}
\mathcal{N}_{\gamma}=\bigg\{\overrightarrow{u}\in\widetilde{\mathcal{H}}_{\gamma}&\mid& \sum_{j=a_{s-1}+1}^{a_{s}}\mathcal{G}_{j}(\overrightarrow{u})=0, \quad \sum_{j=a_{s^*}+1}^{k-\gamma+1}\mathcal{G}_{j}(\overrightarrow{u})=0,\quad\mathcal{G}_{t}(\overrightarrow{u})=0\\
&&\mathcal{G}_{a_n}(u)=0,\quad
1\leq s\leq s^*, k-\gamma+2\leq t\leq a_{s_0}, s_0+1\leq n\leq m\bigg\},
\end{eqnarray*}
where $\mathcal{G}_j(\overrightarrow{u})=\|u_j\|_{\lambda_j}^2-\mu_j\|u_j\|_4^4-\sum_{i=1,i\not=j}^k\begin{equation}ta_{i,j}\|u_iu_j\|_2^2$,
$\mathcal{G}_{a_n}(u)=\|u\|_{\lambda_{a_n}}^2-\mu_{a_n}\|u\|_{4}^4$
and
$$
\widetilde{\mathcal{H}}_{\gamma}=\prod_{s=1}^{s^*}\bigg((\prod_{i=a_{s-1}+1}^{a_s}\mathcal{H}_{i})\backslash\{\overrightarrow{0}\}\bigg)
\times\bigg((\prod_{i=a_{s^*}+1}^{k-\gamma+1}\mathcal{H}_i)\backslash\{\overrightarrow{0}\}\bigg)
\times\bigg(\prod_{i=k-\gamma+2}^{k}(\mathcal{H}_{s}\backslash\{0\})\bigg).
$$
Let
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{N}_{\gamma}}=\inf_{\mathcal{N}_{\gamma}}\mathcal{E}(\overrightarrow{u}).
\end{eqnarray*}
Then $\mathcal{C}_{\mathcal{N}_{\gamma}}$ is nonnegative and well defined. Since all $s_{th}$ inner-couplings are positive, it is standard to show that $\mathcal{C}_{\mathcal{N}_{\gamma}}\leq\sum_{j=1}^k\mathcal{E}_j(w_j)$.
Thus,
by similar arguments, as in the proof of Lemma~\ref{lemn0002} for $\gamma<k$ and also in the proof of Lemma~\ref{lemn0001} for $\gamma=k$, there exists a $(PS)$ sequence $\{\overrightarrow{u}_n\}$ at the least energy value $\mathcal{C}_{\mathcal{N}_{\gamma}}$. Moreover, any positive minimizers of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{N}_{\gamma}$ is a ground state with the Morse index $\gamma$. Thus, it is sufficient to show that there exists a positive minimizer of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{N}_{\gamma}$. Recall that
\begin{equation}gin{eqnarray*}
\mathbf{A}_{d^\varsigma}^\varsigma=[\Theta_{t,s}^\varsigma]_{t,s=1,2,\cdots,d^\varsigma}
\end{eqnarray*}
be the $\varsigma_{th}$ decomposition. Here,
\begin{equation}gin{eqnarray*}
\Theta_{t,s}^\varsigma=[\Theta_{i,j}^{\varsigma-1}]_{(i,j)\in\mathcal{K}_{t,s,\mathbf{a}^\varsigma_{d^\varsigma}}}
\end{eqnarray*}
and $0\leq\varsigma\leq\tau$,
\begin{equation}gin{eqnarray*}
\mathcal{K}_{t,s,\mathbf{a}^\varsigma_{d^\varsigma}}=(a^\varsigma_{t-1}, a^\varsigma_{t}]_{\mathbb{N}}\times(a^\varsigma_{s-1}, a^\varsigma_{s}]_{\mathbb{N}}
\end{eqnarray*}
with $\mathbf{a}^\varsigma_{d^\varsigma}=(a^\varsigma_0,a^\varsigma_1,\cdots,a^\varsigma_{d^\varsigma})$, $(a^\varsigma_{t-1}, a^\varsigma_{t}]_{\mathbb{N}}=(a^\varsigma_{t-1}, a^\varsigma_{t}]\cap\mathbb{N}$ and $0=a^\varsigma_0<a^\varsigma_1<\cdots<a^\varsigma_{d^\varsigma-1}<a^\varsigma_{d^\varsigma}=d^{\varsigma-1}$. Since the eventual block decomposition $\mathbf{A}_{d^\tau}^\tau$ has the degree $m=1$, by the iteration assumptions, in every $\Theta_{s,s}^\varsigma$, there exists a ground state $\overrightarrow{u}_{s,\varsigma}$. Moreover, by similar estimates as that in Lemma~\ref{lem0003}, the least energy value of $\overrightarrow{u}_{s,\varsigma}$ is strictly less than the sum of the least energy values of $\overrightarrow{u}_{i,\varsigma-1}$ for $i\in(a^\varsigma_{s-1}, a^\varsigma_{s}]_{\mathbb{N}}$. Since all eventual block decompositions have the degree $m=1$, this fact also holds for all other eventual block decompositions. Thus, in passing to a limit, if the $(PS)$ sequence $\{\overrightarrow{u}_n\}$ at the least energy value $\mathcal{C}_{\mathcal{N}_{\gamma}}$ will split into several blocks and some of them vanish at infinity, then the smallest energy value is generated by the sum of the least energy values of ground states, denoted by $\overrightarrow{u}_1^*$ and $\overrightarrow{u}_2^*$, in the $(s,s)$ blocks of the following decomposition
\begin{equation}gin{eqnarray*}
\widetilde{\mathbf{A}}=\left(\aligned C_{1,1}\quad C_{1,2}\\
C_{1,2}\quad C_{2,2}\endaligned\right),
\end{eqnarray*}
where $\widetilde{\mathbf{A}}$ is the last second decomposition of an optimal block decomposition. Since all eventual block decompositions have the degree $m=1$, using $\overrightarrow{u}_1^*$ and $\overrightarrow{u}_2^*$ as basic elements to construct test functions as that in Lemma~\ref{lem0003} yields that $\mathcal{C}_{\mathcal{N}_{\gamma}}$ is strictly less than the sum of the least energy values of $\overrightarrow{u}_1^*$ and $\overrightarrow{u}_2^*$. Thus, applying the Lions lemma and the Sobolev embedding theorem, similar as that in the proofs of Propositions~\ref{prop0001} and \ref{prop0003}, yields that the $(PS)$ sequence $\{\overrightarrow{u}_n\}$ at the least energy value $\mathcal{C}_{\mathcal{N}_{\gamma}}$ will not split such that some blocks vanish at infinity in passing to a limit. It follows that there exists a minimizer of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{N}_{\gamma}$. By the Harnack inequality, there exists a positive minimizer of $\mathcal{E}(\overrightarrow{u})$ on $\mathcal{N}_{\gamma}$.
In the purely attractive case, since the $\{\widehat{\rho}_{ij,l}\}$, given by \eqref{eqnew3344}, are nonincreasing for $k$, the existence of ground states in the purely attractive case can also be obtained by iteration the arguments of Propositions~\ref{prop0014} and \ref{prop0005} from $3$ to $k$, under the similar assumptions on $\lambda_j$ and $\begin{equation}ta_{i,j}$.
$(2)$\quad For $(2)$ of Theorem~\ref{thm0002}, as in the proof of Proposition~\ref{prop0002}, we still assume the contrary that, \eqref{eqn0001} has a ground state $\overrightarrow{u}_\delta$ under the assumptions of $(2)$ of Theorem~\ref{thm0002} for $\delta>0$ sufficiently small.
We define functionals as follows:
\begin{equation}gin{eqnarray*}\label{eqn0004}
\mathcal{E}_{s}(\overrightarrow{u})=\sum_{j=a_{s-1}+1}^{a_s}(\frac{1}{2}\|u_j\|_{\lambda_j}^2
-\frac{1}{4}\mu_j\|u_j\|_{4}^4)-\frac12\sum_{i\not=j,(i,j)\in\mathcal{K}_{s,s,\mathbf{a}_d}}\begin{equation}ta_{i,j}\|u_iu_j\|_2^2
\end{eqnarray*}
for $s=1,2,\cdots,s_0$ and $\mathcal{E}_{a_s}(u)=\frac{1}{2}\|u\|_{\lambda_{a_s}}^2-\frac{\mu_{a_s}}{4}\|u\|_{4}^4$
for $s=s_0+1,\cdots,m$. We define the corresponding Nihari manifolds as follows:
\begin{equation}gin{eqnarray*}
\mathcal{N}_s=\{\overrightarrow{u}\in\prod_{j=a_{s-1}+1}^{a_s}(\mathcal{H}_j\backslash\{0\})\mid (\mathcal{G}_{a_{s-1}+1,s}(\overrightarrow{u}), \cdots, \mathcal{G}_{a_s,s}(\overrightarrow{u}))=\overrightarrow{0}\}
\end{eqnarray*}
with
\begin{equation}gin{eqnarray*}
\mathcal{G}_{j,s}(\overrightarrow{u})=\|u_j\|_{\lambda_j}^2-\mu_j\|u_j\|_{4}^4
-\sum_{i=a_{s-1}+1,i\not=j}^{a_s}\begin{equation}ta_{i,j}\|u_iu_j\|_2^2
\end{eqnarray*}
for $s=1,2,\cdots,s_0$ and
\begin{equation}gin{eqnarray}\label{eqn0034}
\mathcal{M}_{a_s}=\{\overrightarrow{u}\in\mathcal{H}_{a_s}\backslash\{0\}\mid \mathcal{Q}_{a_s}(u):=\|u\|_{\lambda_{a_s}}^2-\mu_{a_s}\|u\|_{4}^4=0\}
\end{eqnarray}
for $s=s_0+1,\cdots,m$. Let
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{N}_{s}}=\inf_{\mathcal{N}_{s}}\mathcal{E}_{s}(\overrightarrow{u})
\quad\text{and}\quad
\mathcal{C}_{\mathcal{M}_{a_s}}=\inf_{\mathcal{M}_{a_s}}\mathcal{E}_{a_s}(u).
\end{eqnarray*}
Then $\mathcal{C}_{\mathcal{N}_{s}}$ and $\mathcal{C}_{\mathcal{M}_{a_s}}$ are all well defined and nonnegative.
As in Remark~\ref{rmk0001}, since $\begin{equation}ta_{i,j}>0$ in $\mathcal{E}_{l}(\overrightarrow{u})$ for all $(i,j)\in\{i\not=j,(i,j)\in\mathcal{K}_{l,l,\mathbf{a}_d}\}$ and all $1\leq l\leq s_0$,
\begin{equation}gin{eqnarray}\label{eqn0008}
\mathcal{C}_{\mathcal{N}_l}\leq\sum_{j=a_{l-1}+1}^{a_l}\mathcal{E}_j(w_j)-\frac{(1+o_\delta(1))}{2}
\sum_{(i,j)\in\mathcal{K}_{l,l,\mathbf{a}_d};i\not=j}\begin{equation}ta_{i,j}\|w_iw_j\|_2^2
\end{eqnarray}
for $1\leq l\leq s_0$.
On the other hand, by similar calculations as for \eqref{eqnew9997},
\begin{equation}gin{eqnarray}\label{eqn0009}
\mathcal{C}_{\mathcal{N}}\geq\sum_{j=1}^k\mathcal{E}_j(w_j)-\frac{(1+o_\delta(1))}{2}
\sum_{s,t=1}^{d}\sum_{(i,j)\in\mathcal{K}_{s,t,\mathbf{a}_d};i\not=j}\begin{equation}ta_{i,j}\|u_{i,\delta}u_{j,\delta}\|_2^2.
\end{eqnarray}
It follows from $t_{max,-}<t_{min,+}$ and $t_{0}< t_{min,int,+}$ that
\begin{equation}gin{eqnarray}\label{eqn0012}
\|u_{i,\delta}u_{j,\delta}\|_2^2=o_\delta(1)\quad\text{for all }(i,j)\in\mathcal{K}_{t,s,\mathbf{a}_m}, i\not=j\quad\text{and}\quad t\not=s.
\end{eqnarray}
By Lions' lemma and the Sobolev embedding theorem, there exists $\{y_{j,\delta}\}\subset\mathbb{R}^N$ such that $u_{j,\delta}(x+y_{j,\delta})\to w_j$ strongly in $\mathcal{H}_j$ as $\delta\to0$ up to a subsequence.
Applying the Moser iteration and the elliptic estimates, as that used in the proof of Lemma~\ref{lem0002}, yields that $u_{j,\delta}(x+y_{j,\delta})\to w_j$ strongly in $L^\infty(\mathbb{R}^N)$ as $\delta\to0$ up to a subsequence. Without loss of generality, $y_{j,\delta}$ can be chosen to be the maximum point of $u_{j,\delta}$. By a similar argument as for \eqref{eqnew0002}, it is standard to show that
\begin{equation}gin{eqnarray*}
\mathcal{C}_{\mathcal{N}}\leq\sum_{l=1}^{s_0}\mathcal{C}_{\mathcal{N}_{l}}+\sum_{s=s_0+1}^d\mathcal{C}_{\mathcal{M}_{a_s}}.
\end{eqnarray*}
Thus, by a similar calculation as for \eqref{eqnew9987},
\begin{equation}gin{eqnarray}\label{eqnew3355}
\sum_{s,t=1;s<t}^{d}\sum_{(i,j)\in\mathcal{K}_{s,t,\mathbf{a}_m}}\begin{equation}ta_{i,j}\|u_{i,\delta}u_{j,\delta}\|_2^2\geq0
\end{eqnarray}
for $\delta>0$ sufficiently small. It follows from \eqref{eqn0008} and \eqref{eqn0009} that
\begin{equation}gin{eqnarray}\label{eqn0011}
\sum_{l=1}^{s_0}\sum_{(i,j)\in\mathcal{K}_{l,l,\mathbf{a}_d};i\not=j}\widehat{\begin{equation}ta}_{i,j}\|u_{i,\delta}u_{j,\delta}\|_2^2\geq
\sum_{l=1}^{s_0}\sum_{(i,j)\in\mathcal{K}_{l,l,\mathbf{a}_d};i\not=j}\widehat{\begin{equation}ta}_{i,j}\|w_iw_{j}\|_2^2+o_{\delta}(1).
\end{eqnarray}
Thus, $y_{i,\delta}-y_{j,\delta}=y_{ij}+o_\delta(1)$ and
\begin{equation}gin{eqnarray}\label{eqn0010}
1\lesssim\|u_{i,\delta}u_{j,\delta}\|_2^2\quad\text{for all }(i,j)\in\mathcal{K}_{l,l,\mathbf{a}_d}, i\not=j\text{ and all }l=1,2,\cdots,s_0
\end{eqnarray}
Let $F(\mathbf{y})=\sum_{s=1}^{d}\sum_{(i,j)\in\mathcal{K}_{s,s,\mathbf{a}_d};i\not=j}\begin{equation}ta_{i,j}\|w_iw_{j,y_{ij}}\|_2^2$. Since $w_j(x)$ is strictly decreasing for $|x|$, by a similar argument as that used for \eqref{eqnew6666}, $\nabla F(\mathbf{y})=0$ if and only if $\mathbf{y}=\mathbf{0}$. Thus, by \eqref{eqn0011}, $y_{ij}=0$ for all $(i,j)\in\mathcal{K}_{l,l,\mathbf{a}_d}$ with $i\not=j$ and all $l=1,2,\cdots,s_0$. Thus, without loss of generality, we assume $y_{i,\delta}=y_{j,\delta}=y_{l,\delta}$ for all $(i,j)\in\mathcal{K}_{l,l,\mathbf{a}_d}$ with $i\not=j$ and all $l=1,2,\cdots,s_0$ with $\delta>0$ sufficiently small. By \eqref{eqn0012} and the Lebesgue dominated convergence theorem, $|y_{l,\delta}-y_{l',\delta}|\to+\infty$ for all $l,l'=1,2,\cdots,d$ with $l\not=l'$. We denote $y_{l,\delta}-y_{l',\delta}$ by $y_{ll',\delta}$, for the sake of simplicity. Then, by similar arguments as for \eqref{eqnew9994} and \eqref{eqnew9993},
\begin{equation}gin{eqnarray}
&&\sum_{s,t=1;s<t}^{d}\sum_{(i,j)\in\mathcal{K}_{s,t,\mathbf{a}_d}}\begin{equation}ta_{i,j}\|u_{i,\delta}u_{j,\delta}\|_2^2\notag\\
&=&\sum_{s,t=1;s<t}^{d}\sum_{(i,j)\in\mathcal{K}_{s,t,\mathbf{a}_d}}\bigg(\sum_{\lambda_i=\lambda_j}
C_{i,j}\begin{equation}ta_{i,j}(\frac{1}{|y_{st,\delta}|})^{N-1-\alpha}e^{-2\sqrt{\lambda_i}|y_{st,\delta}|}\notag\\
&&+\sum_{\lambda_i\not=\lambda_j}C_{i,j}\begin{equation}ta_{i,j}(\frac{1}{|y_{st,\delta}|})^{N-1}
e^{-2\min\{\sqrt{\lambda_i},\sqrt{\lambda_j}\}|y_{st,\delta}|}\bigg).
\label{eqn0013}
\end{eqnarray}
Since
\begin{equation}gin{eqnarray*}
\min\{\sqrt{\lambda_{i_0}}, \sqrt{\lambda_{j_0}}\}\geq\min\{\sqrt{\lambda_{i_0'}}, \sqrt{\lambda_{j_0'}}\}
\end{eqnarray*}
for all $(i_0,j_0)$ and $(i_0',j_0')$ with $\begin{equation}ta_{i_0,j_0}>0>\begin{equation}ta_{i_0',j_0'}$, by $t_{max,-}<t_{min,+}$ and \eqref{eqn0013},
\begin{equation}gin{eqnarray*}
\sum_{s,t=1;s<t}^{d}\sum_{(i,j)\in\mathcal{K}_{s,t,\mathbf{a}_d}}\begin{equation}ta_{i,j}\|u_{i,\delta}u_{j,\delta}\|_2^2<0
\end{eqnarray*}
for $\delta>0$ sufficiently small, which contradicts \eqref{eqnew3355}. Hence, \eqref{eqn0001} has no ground states for $\delta>0$ sufficiently small under the conditions of $(2)$ of Theorem~\ref{thm0002}.
$(3)$\quad In the purely repulsive case, this result has been proved in \cite{LW05}. For the repulsive-mixed case, by regarding the blocks in optimal block decompositions as a whole, we can follow the argument as used in the proof of \cite[Theorem~3]{LW05} to show that the ground states of \eqref{eqnewnew0001} do not exist.
H^1(\bbr^N)fill$\Box$
\section{Appendix:Proof of Lemma \ref{lemn0010}}
\begin{equation}gin{proof}
When $\lambda_i\not=\lambda_j$, the Lemma is proved in \cite[Lemma~6]{LW05}. Thus, we assume that $\lambda_i=\lambda_j=\lambda$.
Let $M>0$ be sufficiently large but fixed such that the decay estimate~\eqref{eqnew9998} holds for $w_j$ with $|x|>M$.
We first consider the case $N=1$. Without loss of generality, we assume that $\lambda_i=\lambda_j=\lambda_1$ and $w_i=w_j=w_1$. Moreover, we also assume that $e_1=1$. Then, $Re_1=R$ and for $R>0$ sufficiently large,
\begin{equation}gin{eqnarray*}
&&\int_{-\infty}^{+\infty}w_1^2(|x|)w_1^2(|x-R|)dx\\
&=&\int_{-\infty}^{-M}w_1^2(|x|)w_1^2(|x-R|)dx+\int_{-M}^{M}w_1^2(|x|)w_1^2(|x-R|)dx\\
&&+\int_{M}^{R-M}w_1^2(|x|)w_1^2(|x-R|)dx+\int_{R-M}^{R+M}w_1^2(|x|)w_1^2(|x-R|)dx\\
&&+\int_{R+M}^{+\infty}w_1^2(|x|)w_1^2(|x-R|)dx.
\end{eqnarray*}
By symmetry,
\begin{equation}gin{eqnarray*}
\int_{-\infty}^{-M}w_1^2(|x|)w_1^2(|x-R|)dx=\int_{R+M}^{+\infty}w_1^2(|x|)w_1^2(|x-R|)dx
\end{eqnarray*}
and
\begin{equation}gin{eqnarray*}
\int_{-M}^{M}w_1^2(|x|)w_1^2(|x-R|)dx=\int_{R-M}^{R+M}w_1^2(|x|)w_1^2(|x-R|)dx.
\end{eqnarray*}
For $\int_{-M}^{M}w_1^2(|x|)w_1^2(|x-R|)dx$, we estimate by \eqref{eqnew9998} as follows:
\begin{equation}gin{eqnarray*}
\int_{-M}^{M}w_1^2(|x|)w_1^2(|x-R|)dx&\sim&\int_{-M}^{M}w_1^2(|x|)e^{-2\sqrt{\lambda_1}|x-R|}dx\\
&=&\int_{-M}^{M}w_1^2(|x|)e^{-2\sqrt{\lambda_1}(R-x)}dx\\
&=&e^{-2\sqrt{\lambda_1}R}\int_{-M}^{M}w_1^2(|x|)e^{2\sqrt{\lambda_1}x}dx\\
&\sim&e^{-2\sqrt{\lambda_1}R}
\end{eqnarray*}
as $R\to+\infty$.
For $\int_{-\infty}^{-M}w_1^2(|x|)w_1^2(|x-R|)dx$, we estimate by \eqref{eqnew9998} as follows:
\begin{equation}gin{eqnarray*}
\int_{-\infty}^{-M}w_1^2(|x|)w_1^2(|x-R|)dx&\sim&\int_{-\infty}^{-M}e^{-2\sqrt{\lambda_1}|x|}e^{-2\sqrt{\lambda_1}|x-R|}dx\\
&=&\int_{-\infty}^{-M}e^{2\sqrt{\lambda_1}x}e^{-2\sqrt{\lambda_1}(R-x)}dx\\
&=&e^{-2\sqrt{\lambda_1}R}\int_{-\infty}^{-M}e^{4\sqrt{\lambda_1}x}dx\\
&\sim&e^{-2\sqrt{\lambda_1}R}
\end{eqnarray*}
as $R\to+\infty$. For $\int_{M}^{R-M}w_1^2(|x|)w_1^2(|x-R|)dx$, we estimate by \eqref{eqnew9998} as follows:
\begin{equation}gin{eqnarray*}
\int_{M}^{R-M}w_1^2(|x|)w_1^2(|x-R|)dx&\sim&\int_{M}^{R-M}e^{-2\sqrt{\lambda_1}|x|}e^{-2\sqrt{\lambda_1}|x-R|}dx\\
&=&\int_{M}^{R-M}e^{-2\sqrt{\lambda_1}x}e^{-2\sqrt{\lambda_1}(R-x)}dx\\
&=&e^{-2\sqrt{\lambda_1}R}(R-2M)\\
&\sim& Re^{-2\sqrt{\lambda_1}R}
\end{eqnarray*}
as $R\to+\infty$. Thus, $\int_{-\infty}^{+\infty}w_1^2(|x|)w_1^2(|x-R|)dx\sim Re^{-2\sqrt{\lambda_1}R}$ as $R\to+\infty$. Without loss of generality, we assume that $e_1=(0,1)$ for $N=2$ and $e_1=(0,0,1)$ for $N=3$. Thus, for the cases $N=2,3$, by symmetry,
\begin{equation}gin{eqnarray*}
&&\int_{\mathbb{R}^N}w_1^2(|x|)w_1^2(|x-Re_1|)dx\\
&=&\int_{\{|x|\leq M\}}w_1^2(|x|)w_1^2(|x-Re_1|)dx+\int_{\{|x-Re_1|\leq M\}}w_1^2(|x|)w_1^2(|x-Re_1|)dx\\
&&+\int_{\{M<|x|\leq \frac{R}{2}\}}w_1^2(|x|)w_1^2(|x-Re_1|)dx+\int_{\{M<|x-Re_1|\leq \frac{R}{2}\}}w_1^2(|x|)w_1^2(|x-Re_1|)dx\\
&&+\int_{\{|x|>\frac{R}{2}\}\cap\{|x-Re_1|>\frac{R}{2}\}}w_1^2(|x|)w_1^2(|x-Re_1|)dx\\
&=&2\int_{\{|x|\leq M\}}w_1^2(|x|)w_1^2(|x-Re_1|)dx+2\int_{\{M<|x|\leq \frac{R}{2}\}}w_1^2(|x|)w_1^2(|x-Re_1|)dx\\
&&+\int_{\{|x|>\frac{R}{2}\}\cap\{|x-Re_1|>\frac{R}{2}\}}w_1^2(|x|)w_1^2(|x-Re_1|)dx
\end{eqnarray*}
for $R>0$ sufficiently large. For $\int_{\{|x|\leq M\}}w_1^2(|x|)w_1^2(|x-Re_1|)dx$, we estimate by \eqref{eqnew9998} as follows:
\begin{equation}gin{eqnarray*}
\int_{\{|x|\leq M\}}w_1^2(|x|)w_1^2(|x-Re_1|)dx&\sim&\int_{\{|x|\leq M\}}w_1^2(|x|)|x-Re_1|^{1-N}e^{-2\sqrt{\lambda_1}|x-Re_1|}dx\\
&\lesssim&R^{1-N}e^{-2\sqrt{\lambda_1}R}\int_{\{|x|\leq M\}}w_1^2(|x|)e^{2\sqrt{\lambda_1}|x|}dx
\end{eqnarray*}
as $R\to+\infty$. For $\int_{\{|x|>\frac{R}{2}\}\cap\{|x-Re_1|>\frac{R}{2}\}}w_1^2(|x|)w_1^2(|x-Re_1|)dx$, we estimate by \eqref{eqnew9998} as follows:
\begin{equation}gin{eqnarray*}
&&\int_{\{|x|>\frac{R}{2}\}\cap\{|x-Re_1|>\frac{R}{2}\}}w_1^2(|x|)w_1^2(|x-Re_1|)dx\\
&\sim&\int_{\{|x|>\frac{R}{2}\}\cap\{|x-Re_1|>\frac{R}{2}\}}(|x||x-Re_1|)^{1-N}e^{-2\sqrt{\lambda_1}|x|}e^{-2\sqrt{\lambda_1}|x-Re_1|}dx\\
&\lesssim&R^{1-N}e^{-\sqrt{\lambda_1}R}\int_{\{|x|>\frac{R}{2}\}}|x|^{1-N}e^{-2\sqrt{\lambda_1}|x|}dx\\
&=&R^{1-N}e^{-\sqrt{\lambda_1}R}\int_{\frac{R}{2}}^{+\infty}e^{-2\sqrt{\lambda_1}r}dr\\
&\sim&R^{1-N}e^{-2\sqrt{\lambda_1}R}
\end{eqnarray*}
as $R\to+\infty$. For $\int_{\{M<|x|\leq \frac{R}{2}\}}w_1^2(|x|)w_1^2(|x-Re_1|)dx$, we denote $x=(x',x_1)$. Then,
\begin{equation}gin{eqnarray}\label{eqnew9654}
|Re_1-x|-R\sim-x_1+\frac{|x|^2}{2R}\quad\text{uniformly for }M<|x|\leq \frac{R}{2}.
\end{eqnarray}
Thus, by \eqref{eqnew9998} and $\frac{R}{2}\leq|x-Re_1|\leq\frac{3R}{2}$ uniformly for $M<|x|\leq \frac{R}{2}$,
\begin{equation}gin{eqnarray}
&&\int_{\{M<|x|\leq \frac{R}{2}\}}w_1^2(|x|)w_1^2(|x-Re_1|)dx\notag\\
&\sim&\int_{\{M<|x|\leq \frac{R}{2}\}}(|x||x-Re_1|)^{1-N}e^{-2\sqrt{\lambda_1}|x|}e^{-2\sqrt{\lambda_1}|x-Re_1|}dx\notag\\
&\sim&R^{1-N}e^{-2\sqrt{\lambda_1}R}\int_{\{M<|x|\leq \frac{R}{2}\}}|x|^{1-N}e^{-2\sqrt{\lambda_1}(|x|+\frac{|x|^2}{2R}-x_1)}dx\label{eqnew9964}
\end{eqnarray}
We estimate the upper bound as follows:
\begin{equation}gin{eqnarray*}
&&\int_{\{M<|x|\leq \frac{R}{2}\}}w_1^2(|x|)w_1^2(|x-Re_1|)dx\\
&\lesssim&R^{1-N}e^{-2\sqrt{\lambda_1}R}\int_{0}^{\frac{\pi}{2}}\int_{M}^{\frac{R}{2}}e^{-2\sqrt{\lambda_1}(r-rcos\rho)}drd\rho\\
&\lesssim&R^{1-N}e^{-2\sqrt{\lambda_1}R}\int_{0}^{\frac{\pi}{2}}\int_{M}^{\frac{R}{2}}e^{-2\sqrt{\lambda_1}r(\sin\rho)^2}drd\rho\\
&=&R^{1-N}e^{-2\sqrt{\lambda_1}R}\int_{0}^{\frac{\pi}{2}}\int_{M}^{\frac{R}{2}}e^{-2\sqrt{\lambda_1}r\rho^2(\frac{\sin\rho}{\rho})^2}drd\rho\\
&\sim&R^{1-N}e^{-2\sqrt{\lambda_1}R}\int_{0}^{\frac{\pi}{2}}\int_{M}^{\frac{R}{2}}e^{-2\sqrt{\lambda_1}r\rho^2}drd\rho\\
&\sim&R^{1-N}e^{-2\sqrt{\lambda_1}R}\int_{M}^{\frac{R}{2}}r^{-\frac{1}{2}}dr\int_{0}^{+\infty}e^{-2\sqrt{\lambda_1}y^2}dy\\
&\sim&R^{\frac{3}{2}-N}e^{-2\sqrt{\lambda_1}R}.
\end{eqnarray*}
For the lower bound, we estimate it as follows:
\begin{equation}gin{eqnarray*}
&&\int_{\{M<|x|\leq \frac{R}{2}\}}w_1^2(|x|)w_1^2(|x-Re_1|)dx\notag\\
&\gtrsim&R^{1-N}e^{-2\sqrt{\lambda_1}R}\int_{\{M<|x|\leq \frac{R}{2}\}}|x|^{1-N}e^{-2\sqrt{\lambda_1}(|x|+\frac{|x|^2}{2R}-x_1)}dx\\
&\gtrsim&R^{1-N}e^{-2\sqrt{\lambda_1}R}\int_{0}^{\frac{\pi}{4}}(\sin\rho)^{N-2}\int_{M}^{\frac{R}{2}}e^{-4\sqrt{\lambda_1}r\cos^2\rho}drd\rho\\
&\gtrsim&R^{1-N}e^{-2\sqrt{\lambda_1}R}\int_{0}^{\frac{\pi}{4}}\sin\rho\int_{M}^{\frac{R}{2}}e^{-4\sqrt{\lambda_1}r\cos^2\rho}drd\rho\\
&\sim&R^{1-N}e^{-2\sqrt{\lambda_1}R}\int_{M}^{\frac{R}{2}}r^{-\frac{1}{2}}dr\int_{0}^{+\infty}e^{-2\sqrt{\lambda_1}y^2}dy\\
&\sim&R^{\frac{3}{2}-N}e^{-2\sqrt{\lambda_1}R}.
\end{eqnarray*}
The proof is thus completed.
\end{proof}
\begin{equation}gin{thebibliography}{999}
\bibitem{AC06}
A. Ambrosetti, E. Colorado, Bound and ground states of coupled nonlinear Schr\"odinger equations, {\it C. R. Math. Acad. Sci. Paris,} {\bf342}(2006), 453-458.
\bibitem{AC07}
A. Ambrosetti, E. Colorado, Standing waves of some coupled nonlinear Schr\"odinger equations, {\it J. Lond. Math. Soc.,} {\bf75} (2007), 67-82.
\bibitem{B13}
T. Bartsch, Bifurcation in a multicomponent system of nonlinear Schr\"odinger equations, {\it J. Fixed Point Theory Appl., } {\bf13}(2013), 37-50.
\bibitem{BDW10}
T. Bartsch, N. Dancer, Z.-Q. Wang, A Liouville theorem, a-priori bounds, and bifurcating branches of positive solutions for a nonlinear elliptic system, {\it Calc. Var. PDEs, } {\bf37}(2010), 345-361.
\bibitem{BJS16}
T. Bartsch, L. Jeanjean, N. Soave, Normalized solutions for a system of coupled cubic Schr\"odinger equations on $\mathbb{R}^3$, {\it J. Math. Pures Appl. (9),} {\bf106} (2016), 583-614.
\bibitem{BS17}
T. Bartsch, N. Soave, A natural constraint approach to normalized solutions of nonlinear Schr\"odinger equations and systems, {\it J. Funct. Anal.,} {\bf272} (2017), 4998-5037.
\bibitem{BS19}
T. Bartsch, N. Soave, Multiple normalized solutions for a competing system of Schr\"odinger equations, {\it Calc. Var. PDEs} {\bf58} (2019), Article 22.
\bibitem{BSW16}
J. Byeon, Y. Sato, Z.-Q. Wang, Pattern formation via mixed attractive and repulsive interactions for nonlinear Schr\"odinger systems, {\it J. Math. Pures Appl.,} {\bf106} (2016), 477-511.
\bibitem{BSW161}
J. Byeon, Y. Sato, Z.-Q. Wang, Pattern formation via mixed interactions for coupled Schr\"odinger equations under Neumann boundary condition, {\it J. Fixed Point Theory Appl.,} {\bf19} (2017), 559-583.
\bibitem{BTWW13}
H. Berestycki, S. Terracini, K. Wang, J. Wei, On entire solutions of an elliptic system modeling phase separations, {\it Adv. Math.,} {\bf243} (2013), 102-126.
\bibitem{BW06}
T. Bartsch, Z.-Q. Wang, Note on ground states of nonlinear Schr\"odinger systems, {\it J. Partial Differential Equations,} {\bf19} (2006), 200-207.
\bibitem{BWW07}
T. Bartsch, Z.-Q. Wang, J. Wei, Bound states for a coupled Schr\"odinger system, {\it J. Fixed Point Theory Appl., } {\bf2}(2007), 353-367.
\bibitem{CD10}
E. Crooks, E. Dancer, Highly nonlinear large-competition limits of elliptic systems, {\it Nonlinear Anal.,} {\bf73} (2010), 1447-1457.
\bibitem{CLLL04}
S.-M. Chang, C.-S. Lin, T.-C. Lin, W.-W. Lin, Segregated nodal domains of two-dimensional multispecies Bose-Einstein condensates, {\it Phys. D.,} {\bf196} (2004), 341--361.
\bibitem{CTV05}
M. Conti, S. Terracini, G. Verzini, Asymptotic estimates for the spatial segregation of competitive systems,
{\it Adv. Math.,} {\bf 195} (2005), 524-560.
\bibitem{CZ13}
Z. Chen, W. Zou, An optimal constant for the existence of least energy solutions of a coupled Schr\"odinger system, {\it Calc. Var. PDEs,} {\bf48} (2013), 695-711.
\bibitem{DW12}
E. Dancer, T. Weth, Liouville-type results for non-cooperative elliptic systems in a half-space, {\it J. Lond. Math. Soc.,} {\bf86} (2012), 111-128.
\bibitem{DWW10}
E. Dancer, J. Wei, T. Weth, A priori bounds versus multiple existence of positive solutions for a nonlinear Schr\"odinger system, {\it Ann. Inst. H. Poincar\'e Anal. Non Lin\'eaire,} {\bf27} (2010), 953-969.
\bibitem{GJ16}
T. Gou, L. Jeanjean, Existence and orbital stability of standing waves for nonlinear Schr\"odinger systems, {\it Nonlinear Anal.,} {\bf144} (2016), 10-22.
\bibitem{GJ18}
T. Gou, L. Jeanjean, Multiple positive normalized solutions for nonlinear Schr\"odinger systems, {\it Nonlinearity,} {\bf31} (2018), 2319-2345.
\bibitem{LW05}
T.-C. Lin, J. Wei, Ground state of $N$ coupled nonlinear Schr\"odinger equations in $\mathbb{R}^n$, $n\leq3$, {\it Comm. Math. Phys.,}
{\bf255} (2005), 629-653.
\bibitem{LW051}
T.-C. Lin, J. Wei, Spikes in two coupled nonlinear Schr\"odinger equations, {\it Ann. Inst. H. Poincar\'e Anal. Non Lin\'eaire,} {\bf22} (2005), 403-439.
\bibitem{LW06}
T.-C. Lin, J. Wei, Spikes in two-component systems of nonlinear Schr\"odinger equations with trapping potentials,
{\it J. Differential Equations,} {\bf229} (2006), 538-569.
\bibitem{LW08}
Z. Liu, Z.-Q. Wang, Multiple bound states of nonlinear Schr\"odinger systems, {\it Comm. Math. Phys.,} {\bf282} (2008), 721-731.
\bibitem{LW10}
Z. Liu, Z.-Q. Wang, Ground states and bound states of a nonlinear Schr\"odinger system, {\it Adv. Nonlinear Stud.,} {\bf10}(2010), 175-193.
\bibitem{NTTV10}
B. Noris, H. Tavares, S. Terracini, G. Verzini, Uniform H\"older bounds
for nonlinear Schr\"odinger systems with strong competition, {\it Comm. Pure
Appl. Math., } {\bf63} (2010), 267-302.
\bibitem{NTTV12}
B. Noris, H. Tavares, S. Terracini, G. Verzini, Convergence of minimax structures and continuation of critical points for singularly perturbed systems, {\it J. Eur. Math. Soc.,} {\bf14} (2012), 1245-1273.
\bibitem{PW13}
S. Peng, Z.-Q. Wang, Segregated and synchronized vector solutions for nonlinear Schrodinger systems, {\it Arch. Rational Mech. Anal.}, {\bf208}(2013), 305-339.
\bibitem{R03}
Ch. R\"uegg et al., Bose-Einstein condensation of the triple states in the magnetic insulator TICuCI$_3$, {\it Nature,} {\bf423} (2003), 62-65.
\bibitem{S07}
B. Sirakov, Least energy solitary waves for a system of nonlinear Schr\"odinger equations in $\mathbb{R}^N$, {\it Comm. Math. Phys.,} {\bf 271} (2007), 199-221.
\bibitem{S15}
N. Soave, On existence and phase separation of solitary waves for nonlinear Schr\"odinger systems modelling simultaneous cooperation and competition, {\it Calc. Var. PDEs,} {\bf53} (2015), 689-718.
\bibitem{ST15}
N. Soave, S. Terracini, Liouville theorems and $1$-dimensional symmetry for solutions of an elliptic system modelling phase separation, {\it Adv. Math.,} {\bf279} (2015), 29-66.
\bibitem{ST16}
N. Soave, H. Tavares, New existence and symmetry results for least energy positive solutions of Schr\"odinger systems with mixed
competition and cooperation terms, {\it J. Differential Equations,} {\bf261} (2016), 505-537.
\bibitem{STTZ16}
N. Soave, H. Tavares, S. Terracini, A. Zilio, H\"older bounds and regularity of emerging free boundaries for strongly competing Schr\"odinger equations with nontrivial grouping, {\it Nonlinear Anal.,} {\bf138} (2016), 388-427.
\bibitem{SW15}
Y. Sato, Z.-Q. Wang, Least energy solutions for nonlinear Schr\"odinger systems with mixed attractive and repulsive
couplings, {\it Adv. Nonlinear Stud.,} {\bf15} (2015), 1-22.
\bibitem{SW151}
Y. Sato, Z.-Q. Wang, Multiple positive solutions for Schr\"odinger systems with mixed couplings, {\it Calc. Var. PDEs,} {\bf54} (2015), 1373-1392.
\bibitem{SZ15}
N. Soave, A. Zilio, Uniform bounds for strongly competing systems: the optimal Lipschitz case, {\it Arch. Ration. Mech. Anal.,} {\bf 218} (2015), 647-697.
\bibitem{TT12}
H. Tavares, S. Terracini, Sign-changing solutions of competition-diffusion elliptic systems and optimal partition problems, {\it Ann. Inst. H. Poincar\'e Anal. Non Lin\'eaire,} {\bf29} (2012), 279-300.
\bibitem{TT121}
H. Tavares, S. Terracini, Regularity of the nodal set of segregated critical configurations under a weak reflection law, {\it Calc. Var. PDEs,} {\bf45} (2012), 273-317.
\bibitem{TTVW11}
H. Tavares, S. Terracini, G. Verzini, T. Weth, Existence and nonexistence of entire solutions for non-cooperative cubic elliptic systems, {\it Comm. PDEs,} {\bf36} (2011), 1988-2010.
\bibitem{TV09}
S. Terracini, G. Verzini, Multipulse phases in k-mixtures of Bose-Einstein condensates,
{\it Arch. Ration. Mech. Anal.,} {\bf194} (2009), 717-741.
\bibitem{TW13}
H. Tavares,T. Weth, Existence and symmetry results for competing variational systems, {\it NoDEA,} {\bf20} (2013), 715-740.
\bibitem{W17}
Y. Wu, On a $K$-component elliptic system with the Sobolev critical exponent in high dimensions: the repulsive case, {\it Calc. Var. PDEs,} {\bf56} (2017), article 151.
\bibitem{W18}
Y. Wu, On the semiclassical solutions of a two-component elliptic system in $\mathbb{R}^4$ with trapping potentials and Sobolev critical exponent: the repulsive case, {\it Z. Angew. Math. Phys.,} {\bf69} (2018), article 111.
\bibitem{WW07}
J. Wei, T. Weth, Nonradial symmetric bound states for a system of coupled Schr\"odinger equations, {\it Atti Accad. Naz. Lincei Rend. Lincei Mat. Appl.,} {\bf18} (2007), 279-293.
\bibitem{WW08}
J. Wei, T. Weth, Radial solutions and phase separation in a system of two
coupled Schr\"odinger equations, {\it Arch. Ration. Mech. Anal., } {\bf190} (2008),
83-106.
\bibitem{WW081}
J. Wei, T. Weth, Asymptotic behaviour of solutions of planar elliptic
systems with strong competition, {\it Nonlinearity, } {\bf21} (2008), 305-317.
\bibitem{WWZ17}
Y. Wu, T.-F. Wu, W. Zou, On a two-component Bose-Einstein condensate with steep potential wells, {\it Ann. Math. Pura Appl.,} {\bf196} (2017), 1695-1737.
\bibitem{WZ19}
Y. Wu, W. Zou, Spikes of the two-component elliptic system in $\mathbb{R}^4$ with the critical Sobolev exponent, {\it Calc. Var. PDEs,}, {\bf58} (2019), article 24.
\end{thebibliography}
\end{document}
|
\begin{document}
\title{A general framework for deriving integral preserving numerical methods for PDEs}
\begin{abstract}
A general procedure for constructing conservative numerical integrators for time dependent partial differential equations is presented. In particular,
linearly implicit methods preserving a time discretised version of the invariant is developed for systems of partial differential equations with polynomial nonlinearities. The framework is rather general and allows for an arbitrary number of dependent and independent variables with derivatives of any order. It is proved formally that second order convergence is obtained. The procedure is applied to a test case and numerical experiments are provided.
\end{abstract}
\section{Introduction}
Schemes that conserve geometric structure have been shown to be useful when studying the long time behaviour of dynamical systems.
Such schemes are sometimes called geometric or structure preserving integrators \cite{MR2221614,MR2132573}.
In this paper we shall mostly be concerned with the conservation of first integrals.
Even if a presumption in this work is that the development of new and better integral preserving schemes is useful, we would still like to mention some situations where schemes with such properties are of importance. In the literature one finds several examples where stability of a numerical method is proved by directly using its conservative property, one example is the scheme developed for the cubic Schr\"{o}dinger equation in \cite{fei95nso}. Another application where the exact preservation of first integrals plays an important role is in the study of orbital stability of soliton solutions to certain Hamiltonian partial differential equations (PDEs) as discussed by Benjamin and coauthors \cite{benjamin72tso,benjamin72mef}.
For ordinary differential equations (ODEs) it is common to devise relatively general frameworks for structure preservation.
This is somewhat to the contrary of the usual practice with partial differential equations where each equation under consideration normally requires a dedicated scheme.
But there exist certain fairly general methodologies that can be used for
developing geometric schemes also for PDEs. For example, through space discretisation of a Hamiltonian PDE one may obtain a system of Hamiltonian ODEs to which a geometric integrator may be applied. Another approach is to formulate the PDE in multi-symplectic form, and then apply a scheme which preserves a discrete version of this form, see \cite{MR2220764} for a review of this approach.
In this paper we consider methods for PDEs that are based on the discrete gradient method for ODEs. The discrete gradient method was perhaps first treated in a systematic way by Gonzalez \cite{MR1411343}, see also
\cite{MR2221614,MR1694701}.
For PDEs one may derive discrete gradients either for the abstract Cauchy problem, where the solution at any time is considered as an element of some infinite dimensional space, or one may semidiscretise the equations in space and then derive the corresponding discrete gradient for the resulting ODE system.
This last procedure has been elegantly presented in several articles by
Furihata, Matsuo and collaborators, see e.g.\ \cite{MR1727636,MR1852556,MR1815731,MR2313820,MR1848726,
MR1795452,MR1933890}, using the concept of discrete variational derivatives. See also the monograph \cite{fumabook}. The first part of this paper develops a similar framework that is rather general and allows for an arbitrary number of dependent and independent variables with derivatives of any order. The suggested approach does not require the equations to be discretised in space.
We consider a class of conservative schemes which are linearly implicit.
By linearly implicit we mean schemes which require the solution of precisely one linear system of equations in each time step. This is opposed to fully implicit schemes for which one typically applies an iterative solver that may require a linear system to be solved in every iteration.
For standard fully implicit schemes one would typically balance the iteration error in solving the nonlinear system with the local truncation error. However, for conservative schemes the situation is different since exact conservation of the invariant requires that the nonlinear system is solved to machine precision. This work can be seen as a generalisation of ideas introduced in \cite[Section 6]{MR1848726}.
It may not, in general, be an easy task to quantify exactly what can be expected of gain in computational cost, if any, when replacing a fully implicit scheme with a linearly implicit one. For illustration we present an example where the KdV equation
\begin{equation} \label{eq:KdV}
u_t + u_{xxx}+(u^2)_x = 0
\end{equation}
is solved on a periodic domain using a fully implicit scheme
\begin{equation}\label{eq:kdvimp}
\frac{U^{n+1}-U^{n}}{\Delta t}+\frac{U_{xxx}^{n+1}+U_{xxx}^{n}}2+\left(\frac{(U^{n+1})^2+U^{n+1}U^{n}+(U^{n})^2}3\right)_x=0,
\end{equation}
and a linearly implicit scheme
\begin{equation}\label{eq:kdvlinin}
\frac{U^{n+2}-U^{n}}{2\Delta t}+\frac{U_{xxx}^{n+2}+U_{xxx}^{n}}2+\left(U^{n+1}\frac{U^{n+2}+U^{n+1}+U^{n}}3\right)_x=0,
\end{equation}
where $U^n(x)\approx u(x,t^n)=u(x,t^0+n\Delta t)$.
These schemes are derived in Section \ref{se:num}.
For space discretisation centered differences are used for both schemes.
Note that the linearly implicit scheme \eqref{eq:kdvlinin} has a multistep nature, but should not be confused with standard linear multistep methods. Furihata et al.\ sometimes use the term "multiple points linearly implicit schemes" to emphasise this fact.
The schemes are both second order in time, but in our example the linearly implicit multistep scheme has an error constant which is about 3-4 times larger than the fully implicit one-step scheme.
In Figure~\ref{fi:secondpic} we plot the global error versus the number of linear solves for the two
schemes. The linearly implicit scheme solves one linear system in each time step.
The fully implicit scheme, on the other hand, solves a linear system for each Newton iteration which is repeated to machine precision in each time step.
For the largest time step in our experiment this amounts to 561 linear solves per time step. The linear systems in each of the two cases
have the same matrix structure, they are both penta-diagonal, and we therefore assume that the cost of solving the linear system is approximately the same for both methods.
The $x$-axis in Figure~\ref{fi:secondpic} can thus be interpreted as a measure of the computational cost in each scheme.
The plot shows that for a given global error the linearly implicit scheme is computationally cheaper than the fully implicit scheme.
\begin{figure}
\caption{The global error versus the number of linear solves for the two schemes~\eqref{eq:kdvimp}
\label{fi:secondpic}
\end{figure}
There are situations in which the results from this example may be less relevant. For instance, the iteration method used in a fully implicit schemes may use approximate versions of the Jacobian for which faster solvers can be applied, therefore the cost of a linear solve may not be the same for the two types of schemes. For large time steps, both types of schemes are likely to encounter difficulties, but for slightly different reasons. The fully implicit scheme may experience slow or no convergence at all of the iteration scheme, whereas the linearly implicit scheme may become unstable for time steps over a certain threshold \cite{dahlbyowren}. For instance, in the case of the stiff ODE considered by Gonzales and Simo \cite{gonzalez96ots} one will observe that the stability properties of the linearly implicit schemes will be completely lost whereas fully implicit conservative schemes behave remarkably well.
For large-scale problems, one may have situations in which iterative linear solvers are required and where one cannot afford to solve these systems to machine accuracy, in such cases the linearly implicit schemes are less useful.
In conlusion, we believe that which of the two types of schemes that is preferable depends on the PDE and the circumstances under which it is to be solved.
The two schemes used in the example above have slightly different conservation properties.
The first one \eqref{eq:kdvimp} conserves the exact Hamiltonian
\begin{equation*}
\mathcal{H}[U^n]=\int_\Omega\left(\frac12(U^n_x)^2-\frac13(U^n)^3\right)\,\mathrm{d}x,
\end{equation*}
whereas the second scheme \eqref{eq:kdvlinin} conserves what we will define as the polarised Hamiltonian
\begin{equation*}
H[U^n,U^{n+1}]=\int_\Omega\left(\frac14\left((U^n_x)^2+(U^{n+1}_x)^2\right)-\frac16\left((U^n)^2U^{n+1}+(U^{n+1})^2U^n\right)\right)\,\mathrm{d}x.
\end{equation*}
Both of these functions are approximations to the true Hamiltonian, the first is a spatial approximation for a fixed time, and the second also includes an averaging over time.
The intention is that in both cases one can see the methods as exactly preserving a slightly perturbed first integral over very long times.
That this seems to work for the chosen example is clearly seen in the first plot in Figure \ref{fi:Hplot} where we plot the error in $\mathcal{H}$ as a function of the solution obtained by the linearly implicit scheme \eqref{eq:kdvlinin}. We integrate to $t=1000$ and the error is plotted from $t=980$ to $t=1000$.
Notice that in this example there is no drift in the energy error.
The corresponding error plots for $\mathcal{H}$ as a function of the solution of \eqref{eq:kdvimp} and $H$ as a function of the solution of \eqref{eq:kdvlinin} are omitted since they are both preserved up to round-off error, and thus not that interesting.
The second plot in Figure \ref{fi:Hplot} shows how the error in $\mathcal{H}$ at the endpoint depends on $\Delta t$.
Empirically, we have the relation
\begin{equation*}
\mathcal{H}[U^n]=\mathcal{H}[U^0]+C(\Delta t)^2,
\end{equation*}
where $C$ is a constant that depends on the solution, but not on $n$.
See Section \ref{se:num} for another example that tests the long time structure preserving properties of these schemes.
\begin{figure}
\caption{The error in $\mathcal{H}
\label{fi:Hplot}
\end{figure}
This and similar examples show that there are situations where linearly implicit schemes can be a better choice than their fully implicit counterparts. Figure \ref{fi:secondpic} shows that the linearly implicit scheme is cheaper, while Figure \ref{fi:Hplot} shows that both solutions have similar long-term behaviour. Similar favourable behaviour of linearly implicit schemes can be found in the literature.
For the cubic Schr\"{o}dinger equation there are such conservative schemes based on time averaged versions of the Hamiltonian by Fei et al.\ \cite{fei95nso} and by Besse \cite{besse04ars}. Examples of methods for other PDEs can be found in the monograph \cite{fumabook} and the papers \cite{MR1360462} and \cite{MR1239931}.
In the next section we define the PDE framework that we use. Then, in Section~\ref{sec:dg}
we consider discrete gradient methods and how they can be applied to PDEs. We study in particular
the average vector field method by Quispel and McLaren \cite{MR2451073} and the discrete variational derivative method by
Furihata, Matsuo and coauthors
\cite{MR1727636,MR1852556,MR2313820,MR1848726,MR1795452,MR1933890}. We develop a framework that works for a rather general class of equations.
The key tools for developing linearly implicit methods for polynomial Hamiltonians are treated in Section~\ref{sec:linimp}, introducing the concept of polarisation. There is some freedom in this procedure, and we show through a rather general example term how the choice may significantly affect the stability of the scheme.
We defer the introduction of spatial discretisation until Section~\ref{se:spatial}. This is done mostly in order to keep a simpler notation, but also because our approach concerns conservative time discretisations and is essentially independent of the choice of spatial discretisation.
The last section offers some more details on the procedure for constructing schemes and we give some indication through numerical tests on the long term behaviour of the schemes.
\section{Notation and preliminaries}
We consider integral preserving PDEs written in the form
\begin{equation}
u_t=\mathcal{D}\frac{\delta\mathcal{H}}{\delta u},
\label{eq:hamileq}
\end{equation}
where
\begin{equation}
\mathcal{H}[u]=\int_{\Omega} \mathcal{G}[u]\,\mathrm{d} x=
\int_{\Omega} \mathcal{G}((u_J^{\alpha}))\,\mathrm{d} x,\quad \Omega\subseteq\mathbb{R}^d,
\label{eq:h}
\end{equation}
is the preserved quantity and $\mathcal{D}$ is a skew-symmetric operator that may depend on $u$. We write $\mathrm{d}x=\mathrm{d}x^1\cdots\mathrm{d}x^d$. We remark in passing that the class of PDEs which can be written in the form \eqref{eq:hamileq} contains the class of Hamiltonian PDEs, however we do not make the additional assumption that $\mathcal{D}$ satisfies the Jacobi identity \cite{olver93aol}.
By $(u_J^\alpha)$ we mean $u$ itself, which may be a vector
$u=(u^{\alpha})\in\mathbb{R}^m$, and all its partial derivatives with respect to all independent variables, $(x^1,\ldots,x^d)$, up to and including some degree $\nu$. Thus, $J$ is a multi-index, we let $J=(j_1,\ldots,j_{r})$, where $r=|J|$ the number of components in $J$, and
\[
u_J^{\alpha} = \frac{\partial^{r}u^{\alpha}}{\partial x^{j_1}\cdots\partial x^{j_{r}}},\quad 0\leq r\leq\nu.
\]
As in \cite{olver93aol}, the square brackets in \eqref{eq:h} are used to indicate that a function depends also on the derivatives of its arguments with respect to the independent variables.
In one dimension $d=1$ and $m=1$, for example, we have
\begin{equation*}
\mathcal{G}[u]=\mathcal{G}\left((u_J)\right)=
\mathcal{G}\left(u,\frac{\partial u}{\partial x},\dots,
\frac{\partial^\nu u}{\partial x^\nu}\right).
\end{equation*}
The variational derivative $\frac{\delta\mathcal{H}}{\delta u}$ is an $m$-vector depending on
$u_J^{\alpha}$ for $|J|\leq \nu'$ where $\nu'\geq \nu$. It may be defined through the relation
\cite[p. 245]{olver93aol}
\begin{equation}\label{eq:varder}
\int_{\Omega} \frac{\delta\mathcal{H}}{\delta u}\cdot \varphi\,\mathrm{d}x
=\left.\frac{\partial}{\partial \epsilon}\right|_{\epsilon=0}\mathcal{H}[u+\epsilon \varphi],
\end{equation}
for any sufficiently smooth $m$-vector of functions $\varphi(x)$. One may calculate
$\frac{\delta\mathcal{H}}{\delta u}$ by applying the Euler operator to $\mathcal{G}[u]$,
the $\alpha$-component is given as
\begin{equation} \label{eq:varderdef}
\left(\frac{\delta\mathcal{H}}{\delta u} \right)^{\alpha} = \mathbf{E}_{\alpha}\mathcal{G}[u],
\end{equation}
where
\begin{equation} \label{eq:euleropdef}
\mathbf{E}_{\alpha} = \sum_{|J|\leq\nu} (-1)^{|J|} D_J \frac{\partial}{\partial u_J^{\alpha}}
\end{equation}
so that the sum ranges over all $J$ corresponding to derivatives $u_J^{\alpha}$ featuring in
$\mathcal{G}$.
We have used total derivative operators,
\[
D_J=D_{j_1}\dots D_{j_k},\quad D_{i}=\sum_{\alpha,J} \frac{\partial u_J^{\alpha}}{\partial x^i}
\frac{\partial}{\partial u_J^{\alpha}}.
\]
In parts of the paper we refer to Hamiltonians as polynomial, or specifically quadratic. By this we mean that $\mathcal{H}$ is of a form such that $\mathcal{G}$ is a multivariate polynomial in the indeterminates $u_J^{\alpha}$, which in the quadratic case is of degree at most two. For example, the KdV equation \eqref{eq:KdV} has a polynomial Hamiltonian of degree 3
\begin{equation*}
\mathcal{H}[u]=\int_\Omega\left(\frac12u_x^2-\frac{1}{3}u^{3}\right)\,\mathrm{d}x.
\end{equation*}
In this case $\mathcal{G}=\mathcal{G}(u,u_{x})$ and thus $m=d=\nu=1$, and we get
\begin{align}
\frac{\delta\mathcal{H}}{\delta u}=\mathbf{E}\mathcal{G}((u_J))&=
\frac{\partial\mathcal{G} }{\partial u}-\frac{\partial}{\partial x}\frac{\partial\mathcal{G} }{\partial u_{x}}\label{eq:euler111}\\
&=-u^2-u_{xx}.
\end{align}
We always assume sufficient regularity in the solution and that the boundary conditions on $\Omega$ are such that the boundary terms vanish when doing integration by parts, for example periodic boundary conditions.
The operator $\mathcal{D}$ should be skew-symmetric with respect to the $L^2$ inner product
\begin{equation}
\int_{\Omega} (\mathcal{D}v)w\,\mathrm{d}x=-\int_{\Omega} v(\mathcal{D}w)\,\mathrm{d}x\quad
\forall\; u,w.
\label{eq:skew}
\end{equation}
For the KdV case we simply have $\mathcal{D}=\frac{\partial}{\partial x}$.
Furthermore, to be a true Hamiltonian system it should induce a Poisson bracket on the space of functionals as described e.g.\ in \cite[Ch 7.1]{olver93aol}, meaning that the Jacobi identity must be satisfied. However, the approach presented here only requires $\mathcal{D}$ to be skew-symmetric so that the functional $\mathcal{H}$ is a conserved quantity. In the case that the PDE has more than one Hamiltonian formulation, we may make a choice of which of the integrals to preserve. Our approach does not in general allow for the preservation of more than one Hamiltonian at the same time, for this see the upcoming paper \cite{daowya2010}.
PDEs such as the wave equation are typically written with $u_{tt}$ appearing on the left hand side, in such cases we double the dimension of $u$ in order to apply the stated framework.
For complex equations one may do something similar, splitting either into a real and an imaginary part, or adding in the complex conjugate as a separate variable.
\section{Discrete gradient and variational derivative methods}\label{sec:dg}
Discrete gradient methods for ODEs were introduced by Gonzalez \cite{MR1411343}.
See also \cite{cell092}, \cite{cell093}, \cite{MR1694701}, and \cite[Chapter V.5]{MR2221614}.
Recently this idea has been applied to PDEs in the form of
the average vector field (AVF) method \cite{cell09} and in a somewhat more general setting, the discrete variational derivative (DVD) method.
We recall the definition of a discrete gradient as presented for ODEs. If $H:\mathbb{R}^M \rightarrow\mathbb{R}$, a discrete gradient is a continuous map $\overline{\nabla}:
\mathbb{R}^M\times \mathbb{R}^M\rightarrow\mathbb{R}^M$ such that for every $\bf u$ and
$\bf v$ in $\mathbb{R}^M$
\begin{align*}
H({\bf u})-H({\bf v})&= \overline{\nabla}H({\bf v},{\bf u})\cdot ({\bf u}-{\bf v}),\\
\overline{\nabla}H({\bf u},{\bf u}) &= \nabla H({\bf u}).
\end{align*}
Since an ODE system preserving $H$ can be written in the form
\[
\frac{\mathrm{d}\bf y}{\mathrm{d}t} = S({\bf y})\,\nabla H({\bf y})
\]
for some skew-symmetric matrix $S({\bf y})$, one obtains a conservative method simply by defining approximations ${\bf y}^n \approx {\bf y}(t^n)={\bf y}(t^0+n\Delta t)$ through the formula
\[
\frac{{\bf y}^{n+1}-{\bf y}^n}{\Delta t} = \tilde{S}\,\overline{\nabla}H({\bf y}^n,{\bf y}^{n+1}),
\]
where $\tilde{S}$, typically allowed to depend on ${\bf y}^n$ and ${\bf y}^{n+1}$, is some skew-symmetric matrix approximating the original $S$.
There are many possible choices of discrete gradients for a function $H$, see for instance
\cite{MR2221614,MR1694701}.
A particular example is the one used in the AVF method defined as
\[
\overline{\nabla}_{\mathrm{AVF}}H({\bf v},{\bf u}) = \int_0^1 \nabla H(\xi {\bf u}+(1-\xi){\bf v})\,\mathrm{d}\xi.
\]
When applying this approach to PDEs the obvious strategy is to discretise the Hamiltonian
$\mathcal{H}[u]$ in space, replacing each derivative by a suitable approximation like e.g.\ finite differences,
to obtain a Hamiltonian $\mathcal{H}_d({\bf u})$ as for ODEs. Similarly, the skew-symmetric operator $\mathcal{D}$ is replaced by a skew-symmetric $M\times M$-matrix $\mathcal{D}_d$ to yield the scheme
\begin{equation} \label{eq:metdg}
\frac{{\bf u}^{n+1}-{\bf u}^n}{\Delta t} = \mathcal{D}_d\,\overline{\nabla}\mathcal{H}_d({\bf u}^n,{\bf u}^{n+1})
\end{equation}
for advancing the numerical solution ${\bf u}^n$ at time $t^n$ to ${\bf u}^{n+1}$ at time $t^{n+1}$.
Examples are worked out for several PDEs in \cite{cell09}.
Furihata, Matsuo and coauthors present a whole framework for discretising PDEs in the variational setting in a series of papers, providing a discrete analogue of the continuous calculus, see for instance \cite{MR1727636}.
They discretise $\mathcal{G}$ to obtain $\mathcal{G}_d$ using difference operators,
and then the integral in $\mathcal{H}$ is approximated by a sum to yield $\mathcal{H}_d$.
Then they derive a discrete counterpart to the variational derivative, and finally state the difference scheme in a form which is a perfect
analogue to the Hamiltonian PDE system \eqref{eq:hamileq}, letting
\[
\frac{\mathbf{u}^{n+1}-\mathbf{u}^n}{\Delta t} = \mathcal{D}_d\frac{\delta\mathcal{H}_d}{\delta({\bf u}^n,{\bf u}^{n+1})}.
\]
The use of integration by parts in deriving the Euler operator is mimicked by similar summation by part formulas for the discrete case.
Their discrete variational derivative is in fact rather similar to a discrete gradient, as it satisfies
the relation
\begin{equation} \label{eq:dvdprop}
\mathcal{H}_d({\bf u})-\mathcal{H}_d({\bf v}) =
\langle\frac{\delta\mathcal{H}_d}{\delta({\bf v},{\bf u})},{\bf u}-{\bf v}\rangle
\end{equation}
for the discrete $L^2$ inner product.
In the present paper, we focus on the time dimension in most of what follows, thus we shall defer the steps in which $\mathcal{H}$ and thereby
$\mathcal{G}$ are discretised in space. But \eqref{eq:dvdprop} makes perfect sense after removing the subscript $d$, replacing $\mathbf{u}$ and $\mathbf{v}$ by functions $u$ and $v$, and the discrete $L^2$ inner product by the continuous one.
A discrete variational derivative (DVD) is here defined to be any continuous function $\frac{\delta \mathcal{H}}{\delta (v,u)}$ of $(u^{(\nu)},v^{(\nu)})$ satisfying
\begin{align}
\mathcal{H}[u]-\mathcal{H}[v]&=\int_\Omega \frac{\delta \mathcal{H}}{\delta (v,u)}(u-v)\,\mathrm{d}x,\label{eq:dg1}\\
\frac{\delta \mathcal{H}}{\delta (u,u)}&=\frac{\delta\mathcal{H}}{\delta u}.\label{eq:dg2}
\end{align}
The integrator yields a continuous function $U^n:=U^n(x)\approx u(x,t^n)$ for each $t^n$
\begin{equation}\label{eq:dgm}
\frac{U^{n+1}-U^{n}}{\Delta t}=\mathcal{D}\frac{\delta \mathcal{H}}{\delta (U^n,U^{n+1})}.
\end{equation}
By combining \eqref{eq:dg1} and \eqref{eq:dgm} we see that the method preserves $\mathcal{H}$.
The AVF scheme can of course also be interpreted as a discrete variational derivative method where
\begin{equation}\label{eq:dgavf}
\frac{\delta \ensuremath{\mathcal{H}_{\mathrm{\scriptscriptstyle{AVF}}}} }{\delta (v,u)}=\int_{0}^{1}\frac{\delta\mathcal{H}}{\delta u}[\xi u+(1-\xi)v]\,\mathrm{d}\xi.
\end{equation}
The fact that \eqref{eq:dgavf} verifies the condition \eqref{eq:dg1} is seen from the elementary identity
\begin{equation} \label{eq:diffH}
\mathcal{H}[u]-\mathcal{H}[v] = \int_0^1 \frac{\rm d}{\mathrm{d}\xi}
\mathcal{H}[\xi u + (1-\xi) v]\,\mathrm{d}\xi.
\end{equation}
The derivative under the integral is written
\begin{align*}
\frac{\rm d}{\mathrm{d}\xi}\mathcal{H}[\xi u + (1-\xi) v]&=
\left.\frac{\rm d}{\mathrm{d}\varepsilon}\right|_{\varepsilon=0}\mathcal{H}[v+(\xi+\varepsilon) (u - v)]
\\[2mm]
& = \int_{\Omega} \frac{\delta \mathcal{H}}{\delta u}[\xi u + (1-\xi) v] (u-v)\,\mathrm{d}\mathbf{x}.
\end{align*}
Now substitute this into \eqref{eq:diffH} and interchange the integrals to obtain \eqref{eq:dg1}.
In most of the cited papers by Furihata, Matsuo and coauthors, the notion of a DVD method is less general than what we just presented, in the sense that the relation \eqref{eq:dvdprop} is not actually used as the defining equation for a discrete variational derivative. Instead the authors present a relatively general format that can be used for discretising $\mathcal{H}$, this format is depending on the class of PDEs under consideration, and they work out the explicit expression for a particular discrete variational derivative. To give an idea of how the format may look like, we briefly review some points from \cite{MR1727636} where PDEs of the form \eqref{eq:h} are considered with $d=m=\nu=1$ such that $\mathcal{G}=\mathcal{G}(u,u_x)$. $\mathcal{G}$ is assumed to be written as a finite sum
\begin{equation} \label{eq:japform}
\mathcal{G}(u,u_x) = \sum_{\ell} \alpha_{\ell} f_{\ell}(u)g_{\ell}(u_x).
\end{equation}
where $f_{\ell}$ and $g_{\ell}$ are differentiable functions.
\footnote{In \cite{MR1727636} the expression is discretised in space and $g_{\ell}(u_x)$
is replaced by a product $g_{\ell}^+(\delta_k^+U_k)g_{\ell}^-(\delta_k^-U_k)$ where
$\delta_k^+$ and $\delta_k^-$ are forward and backward divided differences respectively.}
The form \eqref{eq:dg1} is then derived through
\begin{align*}
f_{\ell}(u)g_{\ell}(u_x)- f_{\ell}(v)g_{\ell}(v_x)
&= \frac{f_\ell(u)-f_\ell(v)}{u-v}\frac{g_\ell(u_x)+g_\ell(v_x)}{2}(u-v)\\&
+ \frac{g_\ell(u_x)-g_\ell(v_x)}{u_x-v_x}\frac{f_\ell(u)+f_\ell(v)}{2}(u_x-v_x)
\end{align*}
followed by an integration by part on the second term. This technique can be extended in any number of ways to allow for more general classes of PDEs. For instance, one may allow for more factors in \eqref{eq:japform}, like
\[
\mathcal{G}[u]=\sum_{\ell}\alpha_{\ell}\prod_{J} g_{\ell,J}(\partial_J u)
\]
and repeated application of the formula $ab-cd=\frac{a+c}{2}(b-d)+\frac{b+d}{2}(a-c)$ to this equation combined with integration by parts will result in a discrete variational derivative.
Schemes which are built on this particular type of discrete variational derivative will be called the Furihata methods in the sequel since it was first introduced
in~\cite{MR1727636}. Matsuo et al.\ extend the method
to complex equations in \cite{MR1848726}, while~\cite{MR1852556,MR2313820} derive methods for equations with second order time derivatives.
Other papers using the discrete variational derivative approach include \cite{MR1815731}, \cite{MR1933890}, and \cite{Yaguchi2009}.
The lack of a general formalism in the papers just mentioned, makes it somewhat difficult to compare the approach to the AVF method and characterise in which cases they lead to the same scheme. Taking for instance the KdV equation \eqref{eq:KdV} one easily finds that both approaches lead to the scheme
\eqref{eq:kdvimp}, however, considering for instance the Hamiltonian
\[
\mathcal{H}[u] = \int_\Omega u u_{x}^2\,\mathrm{d}x
\]
one would obtain two different types of discrete variational derivative in the Furihata method and the AVF method, that is
$\frac{\delta \mathcal{H}_{F}}{\delta (v,u)}\neq\frac{\delta \ensuremath{\mathcal{H}_{\mathrm{\scriptscriptstyle{AVF}}}}}{\delta (v,u)}$.
In some important cases, the Furihata method and the AVF method lead to the same scheme.
\begin{theorem}\label{thm:avfeqfur}
Suppose that the Hamiltonian $\mathcal{H}[u]$ is a linear combination of terms of either of the types
\begin{enumerate}
\item
$\int_\Omega\partial_{J}u\,\cdot\,\partial_{K}u\,\mathrm{d}x$ for multi-indices $J$ and $K$, or
\item
$\int_\Omega g(\partial_J u)\,\mathrm{d}x$ for differentiable $g: \mathbb{R}\rightarrow \mathbb{R}$.
\end{enumerate}
Then the AVF and the Furihata methods yield the same scheme
\end{theorem}
\begin{proof} It suffices to check one general term of each type.
\begin{enumerate}
\item We find the variational derivative using~\eqref{eq:varder}
\begin{equation*}
\frac{\delta\mathcal{H}}{\delta u}=\left((-1)^{|J|}+(-1)^{|K|}\right)\partial_{J+K}u.
\end{equation*}
Inserting the variational derivative into~\eqref{eq:dgavf} gives
\begin{equation*}
\frac{\delta \ensuremath{\mathcal{H}_{\mathrm{\scriptscriptstyle{AVF}}}}}{\delta (v,u)}=\left((-1)^{|J|}+(-1)^{|K|}\right)\partial_{J+K}\left(\frac{u+v}{2}\right).
\end{equation*}
To find the discrete variational derivative of the Furihata method we compute
\begin{align*}
\mathcal{H}[u]-\mathcal{H}[v]&=\int_\Omega\partial_{J}u\cdot\partial_{K}u-\partial_{J}v\cdot\partial_{K}v\,\mathrm{d}x\\
&=\frac12\int_\Omega\left(\partial_{J}u-\partial_{J}v\right)\cdot\left(\partial_{K}u+\partial_{K}v\right)
+\left(\partial_{J}u+\partial_{J}v\right)\cdot\left(\partial_{K}u-\partial_{K}v\right)\,\mathrm{d}x.
\end{align*}
After integration by parts we get
\begin{equation*}
\frac{\delta \mathcal{H}_{F}}{\delta (v,u)}=\left((-1)^{|J|}+(-1)^{|K|}\right)\partial_{J+K}\left(\frac{u+v}{2}\right),
\end{equation*}
and we see that $\displaystyle{\frac{\delta \ensuremath{\mathcal{H}_{\mathrm{\scriptscriptstyle{AVF}}}}}{\delta (v,u)}=\frac{\delta \mathcal{H}_{F}}{\delta (v,u)}}$.
\item In this case we get
\begin{equation*}
\frac{\delta\mathcal{H}}{\delta u}=(-1)^{|J|}\partial_{J} g'(\partial_{J}u),
\end{equation*}
so that
\begin{align*}
\frac{\delta \ensuremath{\mathcal{H}_{\mathrm{\scriptscriptstyle{AVF}}}}}{\delta (v,u)}&=
(-1)^{|J|}\int_0^1 \partial_{J}g'(\partial_{J}(\xi u+(1-\xi)v))\,\mathrm{d}\xi
\\&=(-1)^{|J|}\partial_J\left(\frac{g(\partial_J u)-g(\partial_J v)}
{\partial_Ju - \partial_Jv}
\right).
\end{align*}
For the Furihata method one would here just compute
\begin{align*}
H[u]-H[v]=\int_\Omega \frac{g(\partial_J u)-g(\partial_J v)}
{\partial_J u-\partial_J v}\left(\partial_J u-\partial_Jv
\right)\,\mathrm{d}x
\end{align*}
and integration by parts yields $\displaystyle{\frac{\delta \mathcal{H}_{F}}{\delta (v,u)}=\frac{\delta \ensuremath{\mathcal{H}_{\mathrm{\scriptscriptstyle{AVF}}}}}{\delta (v,u)}}$.
\end{enumerate}
\end{proof}
\section{Linearly Implicit Difference Schemes} \label{sec:linimp}
\subsection{Polarisation}\label{se:pol}
The key to constructing conservative linearly implicit schemes will be to portion out the nonlinearity over consecutive time steps. In effect, this means that we replace the original Hamiltonian $\mathcal{H}$ with an approximate one $H$. We shall call $H$ a polarisation
of $\mathcal{H}$ since its definition resembles the way an inner product is derived from a quadratic form. We shall see that the difference scheme resulting from such a polarised Hamiltonian will be a multistep method. This method will now preserve exactly $H$, as opposed to $\mathcal{H}$ for the methods in the previous section.
The requirements on $H$ are given in the following definition.
\begin{definition}[The polarised Hamiltonian]\label{de:polar}
Given a Hamiltonian $\mathcal{H}[u]$ the polarised Hamiltonian $H$ depends on $k$ arguments, and
is:
\begin{itemize}
\item Consistent
\begin{equation} \label{eq:consist}
H[u,u,\dots,u]=\mathcal{H}[u].
\end{equation}
\item Invariant under any cyclic permutation of the arguments
\begin{equation} \label{eq:cyclic}
H[w_1,w_2,\dots,w_{k}]=H[w_2,\dots,w_{k},w_1].
\end{equation}
\end{itemize}
\end{definition}
Polarisations exist for any Hamiltonian, this is asserted by the example
\begin{equation*}
H[w_1,w_2,\dots,w_{k}] = \frac{1}{k}\left(\mathcal{H}[w_1]+\mathcal{H}[w_2]+\cdots+\mathcal{H}[w_{k}]\right).
\end{equation*}
We may impose the polarisation directly on the density $\mathcal{G}((u_J^{\alpha}))$, letting
\[
H[w_1,w_2,\ldots,w_{k}] = \int_\Omega G[w_1,w_2,\ldots,w_{k}]\,\mathrm{d}x.
\]
The conditions \eqref{eq:consist}, \eqref{eq:cyclic} are then inherited as
\[
G(u,u,\ldots,u)=\mathcal{G}(u),\qquad G[w_1,w_2,\dots,w_{k}]=G[w_2,\dots,w_{k},w_1].
\]
In Section \ref{se:lin} we will discuss local order of consistency, it will then be convenient to make the stronger assumption that $\mathcal{H}$ and $H$ are at least twice Fr\'{e}chet differentiable.
To distinguish from the weaker notion of variational (G\^{a}teaux) derivative, we replace
$\delta$ by $\partial$, noting that the first derivative in the two definitions are the same when they both exist.
We then find from \eqref{eq:consist} and \eqref{eq:cyclic} that the Fr\'{e}chet derivatives satisfy the relation
\begin{equation} \label{eq:firstder}
\frac{\partial\mathcal{H}}{\partial u}[u] = k\,\frac{\partial H}{\partial w_1}[u,\dots,u].
\end{equation}
For the second derivatives, we find the identity
\begin{equation} \label{eq:2dpolar}
\frac{\partial^2 H}{\partial w_1\partial w_j}
[u,\ldots,u] = \frac{\partial^2 H}{\partial w_1\partial w_{k+2-j}}[u,\ldots,u],\quad
j=2,\ldots, \lfloor k/2\rfloor + 1,
\end{equation}
which is used to compute
\begin{equation} \label{eq:secder}
\frac{\partial^2 \cal H}{\partial u^2}[u]=
\left\{
\begin{array}{ll}
\displaystyle{k\left(\frac{\partial^2 H}{\partial w_1^2}
+2\sum_{\ell=2}^{\frac{k+1}{2}}\frac{\partial^2 H}{\partial w_1\partial w_\ell}\right)},
&k\ \mbox{odd,} \\
\displaystyle{k\left(\frac{\partial^2 H}{\partial w_1^2}
+2\sum_{\ell=2}^{\frac{k}{2}}\frac{\partial^2 H}{\partial w_1\partial w_\ell}
+ \frac{\partial^2H}{\partial w_1\partial w_{\frac{k}{2}+1}} \right)},
&k\ \mbox{even,}
\end{array}
\right.
\end{equation}
all second derivatives on the right being evaluated at $[u,\dots,u]$.
\subsubsection{Polynomial Hamiltonians}
The polarisation of polynomial Hamiltonians will be key to constructing linearly implicit schemes.
We will now explain in detail how to do this, and we begin with an example term in the
integrand $\mathcal{G}[u]=\mathcal{G}(\partial_J^{\alpha} u)$ depending on just one scalar indeterminate, namely $\mathcal{G}(z)=z^p$ where $z=\partial_J u^\alpha$ for some $(J, \alpha)$
and where $p\leq 4$.
This example is important not only as a simple illustration of the procedure, but also because terms of this type are common in many of the Hamiltonians found in physics. As we will see in the next section, it will be natural to use two arguments, $k=2$, in the polarised Hamiltonian. In fact, we need to restrict ourself to cases with polynomial Hamiltonians for our technique to yield linearly implicit schemes. Then, by using $k \geq\lceil p/2\rceil$, we can obtain polarised Hamiltonians which are at most quadratic in each argument. We call these quadratic polarisations. We see that if $k=2$ then cyclic is the same as symmetric $G(u,v)=G(v,u)$, and
the possible quadratic polarisations for $p=2,3,4$ are respectively,
\begin{align}
p&=2:& G(u,v)&=\theta\frac{{u}^2+{v}^2}2+(1-\theta){u}{v} \,\label{eq:hh1},\quad\theta\in[0,1], \\
p&=3:& G(u,v)&={u}{v}\frac{{u}+{v}}{2},\label{eq:hh2}\\
p&=4:& G(u,v)&={u}^2{v}^2.\label{eq:hh3}
\end{align}
Note that for these monomials
both the third and fourth degree case are uniquely given, but the second degree case is not. In Section~\ref{se:stability}
we will consider how the choice of $\theta$ influences the stability of the scheme for a term which appears frequently in PDEs.
We now consider the general case when $\mathcal{G}[u]$ is a multivariate polynomial in $N_\nu$ variables of degree $p$. It suffices in fact to let $\mathcal{G}((u_J^{\alpha}))$ be a monomial since each term can be treated separately, for $u\in\mathbb{R}^{N_\nu}$. For a convenient notation, we rename the vector of indeterminates $(u_J^\alpha)$ by using a single index i.e. $u=(u_1,\ldots,u_{N_\nu})$ and write
\begin{equation} \label{eq:polar}
\mathcal{G}(u) = u_{i_1}u_{i_2}\dots u_{i_p}.
\end{equation}
One may use the following procedure for obtaining a quadratic polarisation from \eqref{eq:polar}
\begin{enumerate}
\item Group the factors of the right hand side of \eqref{eq:polar} into pairs
$z_r = u_{i_{2r-1}}u_{i_{2r}}$ and if $p$ is odd $z_{k}=u_{i_p}$. Set
\[
K(z_1,\ldots,z_{k})=z_1\cdots z_{k}.
\]
Note that there are potentially many ways of ordering the factors in \eqref{eq:polar} which give rise to different polarisations.
\item Symmetrise $K$ with respect to the cyclic subgroup of permutations. Letting the left shift permutation $\sigma$ be defined through
$\sigma K(z_1,\ldots,z_{k}) = K(z_2,\ldots,z_{k},z_1)$, we set
\[
G(z_1,\ldots,z_{k})=\frac{1}{k}\sum_{k=1}^{k} \sigma^{k-1}K(z_1,\ldots,z_{k}).
\]
The resulting $G$ is now both consistent \eqref{eq:consist} and cyclic \eqref{eq:cyclic}.
\end{enumerate}
\subsection{Linearly Implicit Methods}\label{se:lin}
We may now define the discrete variational derivative for this polarised Hamiltonian as a generalisation of \eqref{eq:dg1} and \eqref{eq:dg2}. We let
\[
\frac{\delta H}{\delta(w_1,\dots,w_{k + 1})}
\]
be a continuous function of $k+1$ arguments, satisfying
\begin{equation} \label{eq:mdg1}
H[w_2,\dots,w_{k +1}]-H[w_1,\dots,w_{k}] =
\int_\Omega \frac{\delta H}{\delta(w_1,\dots,w_{k + 1})}(w_{k +1}-w_1)\,\mathrm{d}x,
\end{equation}
\begin{equation} \label{eq:mdg2}
k\,\frac{\delta H}{\delta(u,\dots,u)} = \frac{\delta\mathcal{H}}{\delta u}.
\end{equation}
Our standard example will be a generalisation of the AVF discrete variational derivative, which we define as
\begin{equation}\label{eq:tja}
\frac{\delta \ensuremath{H_{\mathrm{\scriptscriptstyle{AVF}}}} }{\delta(w_1,\ldots,w_{k +1})}=
\int_0^1 \frac{\delta H}{\delta w_1}[\xi w_{k + 1}+(1-\xi) w_1,w_2,\ldots,w_{k}]
\,\mathrm{d}\xi.
\end{equation}
Here the variational derivative on the right hand side, $\frac{\delta H}{\delta w_1}$ is defined as before, considering $H$ as a function of
its first argument only, leaving the others fixed.
Similar discrete variational derivatives could be derived in a number of different ways. In particular one finds that when the function $H$ is quadratic in all its arguments, the approach used in deriving the Furihata methods would lead to a discrete variational derivative which is identical to that of the AVF-method.
Now we define the polarised discrete variational derivative scheme and prove that, under some assumptions, this scheme is conservative, linearly implicit and has formal order of consistency two.
\begin{definition}
For a Hamiltonian PDE of the form \eqref{eq:hamileq}, let $H$ be a polarised Hamiltonian
of $k$ arguments, satisfying \eqref{eq:consist} and \eqref{eq:cyclic}, and suppose that approximations
$U^j$ to $u(j\Delta t,\cdot)$ are given for $j=0,\ldots,k-1$.
\begin{itemize}
\item
The polarised DVD (PDVD) scheme is given as
\begin{equation}\label{eq:scheme}
\frac{U^{n+k}-U^{n}}{k\Delta t}=k
D\frac{\delta H}{\delta(U^{n},\dots,U^{n+k})},\quad n\geq 0.
\end{equation}
\item
$D$ is a skew-symmetric operator approximating $\mathcal{D}$.
In \eqref{eq:scheme}, $D$ may depend on $U^{n+j}$, $1\leq j \leq k-1$
\footnote{$D$ should not depend on $U^{n+k}$ since otherwise the method would no longer be linearly implicit.},
and be consistent
\begin{equation}\label{eq:Dconsistent}
D[u,\ldots,u]=\mathcal{D}[u].
\end{equation}
$D$ is called cyclic if
\begin{equation} \label{eq:Dcyclic}
D[w_1,w_2,\ldots,w_{k-1}]=D[w_{2},\ldots,w_{k-1},w_1].
\end{equation}
\item
If the discrete variational derivative is given by
\eqref{eq:tja}, then the scheme is called the polarised AVF (PAVF) scheme.
\end{itemize}
\end{definition}
\begin{theorem}\label{th:cons}
The scheme \eqref{eq:scheme} is conservative
in the sense that
\begin{equation*}
H[U^{n+1},\ldots,U^{n+k}]=H[U^0,\ldots,U^{k}],\quad\forall n\geq 1.
\end{equation*}
for any polarised Hamiltonian function $H$.
\end{theorem}
\begin{proof}
By induction, this is an immediate consequence of \eqref{eq:mdg1}
\end{proof}
In a framework as general as the one presented here, it is not possible to present a general analysis for convergence or the order of the truncation error.
However, it seems plausible that a necessary condition to obtain a prescribed order of convergence can be derived through a formal Taylor expansion of the local truncation error, we denote this \emph{the formal order of consistency}.
\begin{theorem}\label{th:order}\hspace*{0 cm}
\begin{itemize}
\item The PAVF scheme has formal order of consistency one
for any polarised Hamiltonian, and skew-symmetric operator $D$ satisfying \eqref{eq:Dconsistent}.
\item If in addition \eqref{eq:Dcyclic} is satisfied, the scheme has formal order of consistency two.
\end{itemize}
\end{theorem}
\begin{proof}
We show that when the exact solution is substituted into \eqref{eq:scheme} where the
discrete variational derivative is given by \eqref{eq:tja}, then the residual is $\mathcal{O}(\Delta t^2)$. Throughout the proof we assume the existence of Fr\'{e}chet derivatives.
Writing, for any $j$, $u^j=u(\cdot,t^j)$ for the exact local solution at $t=t^j$, we get for the
left hand side
\begin{multline} \label{eq:ordlhs}
\frac{u^{n+k}-u^n}{k\Delta t}=\partial_t u^n + \frac{k\,\Delta t}{2}\partial_t^2 u^n +\mathcal{O}(\Delta t^2)
= \left.\mathcal{D}
\frac{\partial\mathcal H}{\partial u}\right|_{u^n}\\
+\frac{k\Delta t}{2}\left.\left(\frac{\partial\cal D}{\partial u}(\partial_t u^n)
\frac{\partial\cal H}{\partial u}+\mathcal{D}\frac{\partial^2\cal H}{\partial u^2}
(\cdot,\partial_t u^n)\right)\right|_{u^n} + \mathcal{O}(\Delta t^2).
\end{multline}
Next we expand \eqref{eq:tja} to get
\begin{multline*}
\frac{\delta \ensuremath{H_{\mathrm{\scriptscriptstyle{AVF}}}} }{\delta(u^n,\ldots,u^{n+k})} =
\int_0^1 \frac{\delta H}{\delta w_1}(\xi u^{n+k}+(1-\xi)u^n,
u^{n+1},\ldots,u^{n+k-1})\,\mathrm{d}\xi \\
=\left.\frac{\partial H}{\partial w_1}\right|_{\bf u}+
\left(\frac{k}{2}\left.\frac{\partial^2 H}{\partial w_1^2}\right|_{\bf u}
+\Delta t\sum_{j=2}^k (j-1)\left.\frac{\partial^2 H}{\partial w_1\partial w_j}\right|_{\bf u}
\right) (\cdot,\partial_t u^n) +\mathcal{O}(\Delta t^2)
\end{multline*}
where $\mathbf{u}=(u^n,\ldots,u^n)$.
Using first \eqref{eq:2dpolar} and then \eqref{eq:firstder}, \eqref{eq:secder} we find
\begin{equation} \label{eq:avfexpanded}
\frac{\delta \ensuremath{H_{\mathrm{\scriptscriptstyle{AVF}}}} }{\delta(u^n,\ldots,u^{n+k})} =
\frac{1}{k}\left.\frac{\partial\mathcal{H}}{\partial u}\right|_{u^n}+
\frac{\Delta t}{2}\left.\frac{\partial^2\mathcal{H}}{\partial u^2}\right|_{u^n} (\cdot,\partial_t u^n)+\mathcal{O}(\Delta t^2).
\end{equation}
Expanding $D$ we get
\begin{equation} \label{eq:Dexpand}
D[u^{n+1},\ldots,u^{n+k-1}]=\mathcal{D}[u^n] +
\Delta t\sum_{j=1}^{k-1} j \left.\frac{\partial D}{\partial w_j}\right|_{\mathbf{u}}(\partial_tu^n)+\mathcal{O}(\Delta t^2).
\end{equation}
If the cyclicity condition \eqref{eq:Dcyclic} holds for $D$, we can simplify \eqref{eq:Dexpand}
to obtain
\begin{multline} \label{eq:Dexpanded}
D[u^{n+1},\ldots,u^{n+k-1}]=\mathcal{D}[u^n] + \frac{k(k-1)\Delta t}{2}\left.\frac{\partial D}{\partial w_1}\right|_{\bf u}(\partial_t u^n)+ \mathcal{O}(\Delta t^2)\\
= \mathcal{D}[u^n] + \frac{k\Delta t}{2}
\left.\frac{\partial\cal D}{\partial u}\right|_{u^n}(\partial_t u_n)+\mathcal{O}(\Delta t^2).
\end{multline}
By substituting into \eqref{eq:scheme} the expressions \eqref{eq:ordlhs}, \eqref{eq:avfexpanded} and \eqref{eq:Dexpanded}, all terms of zeroth and first order cancel and we are left with $\mathcal{O}\left((\Delta t^2)\right)$.
\end{proof}
\begin{theorem}\label{th:linimp}
Suppose that the polarised Hamiltonian $H$ is a quadratic polynomial in each of its arguments, then the PAVF scheme
is linearly implicit
\end{theorem}
\begin{proof}
Since $H$ is at most quadratic in the first argument, it follows from
\eqref{eq:euleropdef} that $\frac{\delta H}{\delta w_1}$ is of degree at most 1 in its first argument,
and so we see from \eqref{eq:tja} that
\[
\frac{\delta H}{\delta(U^n,\ldots,U^{n+k})}
\]
is linear in $U^{n+k}$.
Since $D$ does not depend on $U^{n+k}$ we conclude that the scheme \eqref{eq:scheme} is linearly implicit.
\end{proof}
In some cases one wishes to have time-symmetric numerical schemes, see for example~\cite{MR2221614}. The numerical
scheme~\eqref{eq:scheme} will in general not be symmetric, however it is not hard to modify the procedure to yield
symmetric schemes. One needs to polarise $\mathcal{H}$ such that $H$ is
invariant also when the order of its arguments is reversed, it turns out that this can be achieved by symmetrising over the dihedral group rather than just the cyclic one.
A similar adjustment must be made for
$D$.
We remark that one can construct explicit schemes by using $p$ time steps (as opposed to $k$) in $H$
such that $H$ becomes $p$-linear (as opposed to $k$-quadratic).
The rest of the procedure for the explicit case is the same as for the linearly implicit case.
Clearly, one expects that explicit schemes will have more severe stability restrictions
than the linearly implicit ones.
Since these multistep schemes need the $k$ previous values, it is
not self-starting. We have to provide the starting-values $U^{1},\dots,U^{k}$ in addition to the initial value $U^0$. Usually these are computed using another sufficiently accurate conservative scheme, such as for example the AVF scheme. Another possibility is to use any integrator and integrate to machine precision.
\subsection{Stability}\label{se:stability}
In~\cite{dahlbyowren} we studied linearly implicit schemes for the cubic Schr\"{o}dinger equation, and found that two-step schemes can develop a two-periodic instability in time.
We also saw that this can be remedied by choosing a different polarisation of the Hamiltonian.
As it turns out, a common case is when the Hamiltonian is a univariate polynomial
of degree 4 or less. If we polarise this Hamiltonian using two time-steps, we get three linearly independent
$H$, corresponding to~\eqref{eq:hh1}, \eqref{eq:hh2}, and~\eqref{eq:hh3}.
The third and fourth degree Hamiltonians are uniquely given.
However, in the second degree case we can choose $\theta\in[0,1]$
such that the scheme becomes unstable.
Since Hamiltonians of the type~\eqref{eq:hh1} appear in many important PDEs
it may be useful to
determine which $\theta\in[0,1]$ lead to unstable schemes.
We choose to study the test equation with Hamiltonian
\begin{equation*}
\mathcal{H}[u]=\frac12\int_\Omega u_x^2\,\mathrm{d}x,
\end{equation*}
and a skew-symmetric operator $\mathcal{D}$ which satisfies the eigenvalue equation
\begin{equation*}
\mathcal{D}\mathrm{e}^{\mathrm{i}kx}=\mathrm{i}\lambda_k\mathrm{e}^{\mathrm{i}kx}, \quad \lambda_k\in\mathbb{R}
\end{equation*}
for all integers $k$. The Airy equation
\begin{equation*}
u_t+u_{xxx}=0
\end{equation*}
is of this type with $\mathcal{D}=-\partial_x$ and $\lambda_k=-k$.
Other equations which have such terms in the
Hamiltonian include the nonlinear Schr\"{o}dinger equation, the linear wave equation, the KdV equation, and the Kadomtsev-Petviashvili equation.
Rewriting~\eqref{eq:hh1} gives
\begin{equation*}
H[v,u]=\frac12\int\left(\theta\frac{u_x^2+v_x^2}2+(1-\theta)u_{x}v_{x}\right) \,\mathrm{d}x.
\end{equation*}
And the numerical scheme is
\begin{equation}\label{eq:numairy}
\frac{U^{n+2}-U^{n}}{2\Delta t}=-\mathcal{D}\left(\theta\frac{U_{xx}^{n+2}+U_{xx}^{n}}2+(1-\theta)U_{xx}^{n+1}\right).
\end{equation}
Since this is a linear equation we can use von Neumann stability analysis~\cite{MR0042799}.
We insert the ansatz
\begin{equation*}
U^n(x)=\zeta^n\mathrm{e}^{\mathrm{i}kx}
\end{equation*}
to obtain the quadratic equation
\begin{equation}\label{eq:stabpol}
(1-\theta\tau\mathrm{i})\zeta^2-2(1-\theta)\tau\mathrm{i}\zeta-(1+\theta\tau\mathrm{i})=0,\quad\tau=\lambda_k\Delta t k^2.
\end{equation}
A necessary condition for stability is $|\zeta|\leq1$ which implies
\begin{equation*}
\theta\geq\frac12-\frac1{2\tau^2}.
\end{equation*}
Assuming that $\{\lambda_k k^2\}_{k\in\mathbb{Z}}$ is unbounded, we must require that
$\theta$ is chosen greater than or equal $\frac12$.
This is exactly the condition found in~\cite{dahlbyowren} for the cubic Schr\"odinger equation. When $\theta\geq\frac12$
the roots of \eqref{eq:stabpol} satisfy $|\zeta_1|=|\zeta_2|=1$.
In Figure~\ref{fi:airystab} we solve the Airy equation with the scheme~\eqref{eq:numairy} using $\theta=0.5$ and $\theta=0.49$.
We use the initial value $u(x,0)=\sin(x)$, which, in the exact case, yields the traveling wave solution $u(x,t)=\sin(x+t)$.
The $\theta=0.49$ solution blows up in few time steps, while the $\theta=0.50$ solution shows no signs of instability.
Doing a discrete Fourier transform of the unstable solution we see that the instability starts at high frequencies, that is large $k$, which
corresponds to the results shown above.
\begin{figure}
\caption{The numerical solution of the Airy equation with two different values of $\theta$. The two solutions are shown after $n=10^6$ time steps ($\theta=0.5$) and $n=115$ time steps ($\theta=0.49$). }
\label{fi:airystab}
\end{figure}
There might be cases where the scheme develop instabilities due to spurious modes no matter what polarisation one chooses. A full stability analysis of either the fully or linearly implicit schemes is to our knowledge not been done. Standard linearisation techniques will usually lead to the conclusion that the schemes are neutrally stable. The nonlinear effects, however small, may still cause the scheme to be unstable. The tests we have done on a wide range of PDEs seem to indicate that the stability usually is very good, future work may shed a light on this issue.
\section{Space discretisation}\label{se:spatial}
Until now we have mostly considered the situation where the PDE is discretised in time while remaining continuous in space. The methodology developed in the previous sections apply equally well to systems of ODEs.
Arguably, the most straightforward approach is simply to discretise the space derivatives in the Hamiltonian, for instance by finite differences. This leads to
\[
\mathcal{H}(u) \longrightarrow \mathcal{H}_d(\mathbf{u}).
\]
One also needs to replace the skew-symmetric operator $\mathcal{D}$ by a skew-symmetric matrix $\mathcal{D}_d$.
The fully implicit method reviewed in Section~\ref{sec:dg} is then just the discrete gradient method
\eqref{eq:metdg}, which conserves the discretised Hamiltonian $\mathcal{H}_d(\mathbf{u})$ in every time step.
We consider now finite difference approximations.
The function space to which the solution $u$ belongs, is replaced by a finite dimensional space with
functions on a grid indexed by $I_g\subset \mathbb{Z}^d$.
We use boldface symbols for these functions. Let there be $N_r$ grid points in the space direction $r$ so that
$\mathbf{N}=N_1\cdots N_d$ is the total number of grid points. We denote by $\mathbf{u}^{\alpha}$ the approximation
to $u^{\alpha}$ on such a grid, and by $\mathbf{u}$ the vector consisting of $(\mathbf{u}^1,\ldots,\mathbf{u}^m)$.
We will
replace each derivative $u_J^{\alpha}$ by a finite difference approximation $\delta_J\mathbf{u}^{\alpha}$, and replace the integral by a quadrature rule.
We then let
\begin{equation} \label{eq:discH}
\mathcal{H}_d(\mathbf{u}) = \sum_{\mathbf{i}\in I_g} b_{\mathbf{i}} (\mathcal{G}_d( (\delta_J {\bf u})))_{\mathbf{i}}\, \Delta x.
\end{equation}
Here $\Delta x$ is the volume (length, area) of a grid cell and ${\bf b}=(b_{\mathbf{i}})_{\mathbf{i}\in I_g}$ are the weights in the quadrature rule. The discretised $\mathcal{G}_d$ has the same number of arguments as $\mathcal{G}$, and each input argument as well as the output are vectors in $\mathbb{R}^N$. We have here approximated the function $u_J^\alpha$ by a difference approximation
$\delta_J \mathbf{u}^{\alpha}$, where $\delta_J:\mathbb{R}^{\bf N}\rightarrow\mathbb{R}^{\bf N}$ is a linear map. As in the continuous case, we use square brackets, say $F[\mathbf{u}]$, as shorthand for a list of arguments involving difference operators $F[\mathbf{u}]=F(\mathbf{u},\delta_{J_1}\mathbf{u},\ldots,\delta_{J_q}\mathbf{u})$.
We compute
\begin{multline}
\mathcal{H}_d[\mathbf{u}] -\mathcal{H}_d[\mathbf{v}] =\\
\sum_{{\bf i}\in I_g} b_{\bf i} \sum_{J,\alpha}\int_0^1
\left(\frac{\partial\mathcal{G}_d}{\partial\delta_J\mathbf{u}^{\alpha}}\right)_{\bf i}[
\xi\mathbf{u}
+(1-\xi)\mathbf{v}] {\rm d}\xi
(\delta_J(\mathbf{u}^{\alpha}-\mathbf{v}^{\alpha}))\;\Delta x\\=
\langle \frac{\delta\mathcal{H}_d}{\delta(\mathbf{v},\mathbf{u})}, \mathbf{u}-\mathbf{v}\rangle
\label{eq:discip}
\end{multline}
where
\begin{equation*}
\frac{\delta\mathcal{H}_d}{\delta(\mathbf{v},\mathbf{u})}
= \sum_{J,\alpha} \delta_J^T\,B\, \left(\int_0^1\frac{\partial\mathcal{G}_d}{\partial\mathbf{u}_J}
[\xi\mathbf{u}^{\alpha}+(1-\xi)\mathbf{v}^{\alpha}]\,\mathrm{d}\xi\right),
\end{equation*}
$B$ is the diagonal linear map $B=\mathrm{diag}(b_{\mathbf{i}}),\ \mathbf{i}\in I_g$, and
the discrete inner product used in \eqref{eq:discip} is
\[
\langle \mathbf{u}, \mathbf{v} \rangle = \sum_{\alpha,\mathbf{i}\in I_g} \mathbf{u}^{\alpha}_{\mathbf{i}} \mathbf{v}^{\alpha}_{\mathbf{i}}.
\]
Notice the resemblance between the operator acting on $\mathcal{G}_d$ in \eqref{eq:discip} and the continuous Euler operator in \eqref{eq:euleropdef}. Alternatively, suppose that
\begin{enumerate}
\item The spatially continuous method \eqref{eq:dgm} (using \eqref{eq:dgavf}) is discretised in space, using
a skew-symmetric $\mathcal{D}_d$ and a selected set of difference quotients $\delta_J$ for each derivative $\partial_J$.
\item Considering \eqref{eq:varderdef} and \eqref{eq:euleropdef}, the choice of discretisation operators $\delta_J$ used in
$\partial\mathcal{G}/\partial u_J^{\alpha}[u]$ is arbitrary, but the corresponding $D_J$ is replaced by the transpose
$\delta_J^T$.
\end{enumerate}
In this case, using the same $\mathcal{D}_d$, an identical set of difference operators in discretising $\mathcal{H}$ \eqref{eq:discH}, and choosing all the quadrature weights $b_{\mathbf{i}}=1$ the resulting scheme would be the same as that given by procedure outlined in the two points above. That is, one can get the same scheme by either discretising the Hamiltonian in space first (and then deriving the scheme) or discretising the scheme in space first (and then deriving the conserved Hamiltonian).
Letting the $r$th canonical unit vector in $\mathbb{R}^d$ be denoted $\mathbf{e}_r$, we define the most used first order difference operators
\begin{align*}
(\delta_{r}^+\mathbf{u})_{\mathbf{i}}&=\frac{\mathbf{u}_{\mathbf{i}+\mathbf{e}_r}-\mathbf{u}_{\mathbf{i}}}{\Delta x_r}, \\
(\delta_{r}^-\mathbf{u})_{\mathbf{i}}&=\frac{\mathbf{u}_{\mathbf{i}}-\mathbf{u}_{\mathbf{i}-\mathbf{e}_r}}{\Delta x_r}, \\
(\delta_{r}^{\langle 1\rangle}\mathbf{u})_{\mathbf{i}}&=\frac{\mathbf{u}_{\mathbf{i}+\mathbf{e}_r}
- \mathbf{u}_{\mathbf{i}-\mathbf{e}_r}}{2\Delta x_r}.
\end{align*}
These difference operators are all commuting, but only the last one is skew-symmetric. However, for the first two one has the useful identities
\begin{equation*}
(\delta_{r}^+)^T = -\delta_{r}^ -,\qquad (\delta_{r}^-)^T = -\delta_{r}^ +.
\end{equation*}
Higher order difference operators $\delta_J$ can generally be defined by taking compositions of these operators, in particular we shall consider examples in the next section using the second and third derivative approximations
\[
\delta_r^{\langle 2\rangle} = \delta_r^+\circ\delta_r^-,\quad \delta^{\langle 3\rangle}=
\delta^{\langle 1\rangle}\circ
\delta^{\langle 2\rangle}.
\]
We may now introduce numerical approximations $\mathbf{U}^n$ representing the fully discretised system, the scheme is
\[
\frac{\mathbf{U}^{n+1}-\mathbf{U}^n}{\Delta t} = \mathcal{D}_d\frac{\delta\mathcal{H}_d}{\delta(\mathbf{U}^n,\mathbf{U}^{n+1})}.
\]
The conservative schemes based on polarisation are adapted in a straightforward manner, introducing a function
$H_d[\ensuremath{\bf w}_1,\ldots,\ensuremath{\bf w}_{k}]$ which is consistent and cyclic as in \eqref{eq:consist}, \eqref{eq:cyclic}, and
a skew-symmetric map $D_d$ depending on at most $k-1$ arguments.
The scheme is then
\begin{equation}\label{eq:discscheme}
\frac{\mathbf{U}^{n+k}-\mathbf{U}^{n}}{k\Delta t}=k D_d\frac{\delta H_d}
{\delta(\mathbf{U}^{n},\dots,\mathbf{U}^{n+k})}.
\end{equation}
This scheme conserves the function $H_d$ in the sense that
\[
H_d[\mathbf{U}^{n+1},\ldots,\mathbf{U}^{n+k}]= H_d[\mathbf{U}^{0},\ldots,\mathbf{U}^{k-1}],\quad n\geq 0.
\]
\section{Examples}\label{se:num}
To illustrate the procedures for constructing conservative schemes presented in this paper we consider as an example the generalised Korteweg-de Vries (gKdV) equation
\begin{equation*}
u_t+u_{xxx}+(u^{p-1})_x=0
\end{equation*}
for an integer $p\geq3$, see for example~\cite{MR2457070}.
The case $p=3$ is the KdV equation \eqref{eq:KdV}, the case $p=4$ is known as the modified KdV equation, and
$p=6$ is sometimes referred to as the mass critical generalised KdV equation. The gKdV can be written as \eqref{eq:hamileq} with
\begin{equation*}
\mathcal{H}[u]=\int_\Omega\left(\frac12u_x^2-\frac{1}{p}u^{p}\right)\,\mathrm{d}x,\quad
\mathcal{D}=\frac{\partial}{\partial x}.
\end{equation*}
The AVF discrete variational derivative \eqref{eq:dgavf}
gives rise to the fully implicit scheme \eqref{eq:dgm}
\begin{equation}\label{eq:genkdvimp}
\frac{U^{n+1}-U^n}{\Delta t}+\frac{U^{n+1}_{xxx}+U^{n}_{xxx}}2+\frac1{p}\left(\sum_{i=0}^{p-1}(U^{n+1})^{p-1-i}(U^{n})^i\right)_x=0.
\end{equation}
After applying the polarising procedure of Section \ref{se:pol} to $\mathcal{H}=\int_\Omega\mathcal{G}\,\mathrm{d}x$ we get $H=\int_\Omega G\,\mathrm{d}x$ which depends on $k=\lceil p/2\rceil$ arguments
\begin{equation*}
G[w_1,\dots,w_k]=
\frac1{2k}\sum_{i=1}^k(w_i)_x^2-
\begin{cases}
\frac1{pk}\left(\prod_{j=1}^kw_j^2\right)\left(\sum_{i=1}^k\frac{1}{w_i}\right), & p\text{ odd},\\
\frac1{p}\prod_{j=1}^kw_j^2, & p\text{ even}.
\end{cases}
\end{equation*}
After finding the
AVF discrete variational derivative from \eqref{eq:tja} we get the linearly implicit PAVF scheme \eqref{eq:scheme}
\begin{multline}\label{eq:genkdvlinin}
\frac{U^{n+k}-U^n}{k\Delta t}+\frac{U^{n+k}_{xxx}+U^{n}_{xxx}}2\\+
\begin{cases}
\frac1p\left[\left(\prod_{j=1}^{k-1}(U^{n+j})^2\right)\left(\sum_{i=1}^{k-1}\frac{U^{n+k}+U^{n}}{U^{n+i}}+1\right)\right]_x=0,& p\text{ odd},\\
\frac12\left[\left(\prod_{j=1}^{k-1}(U^{n+j})^2\right)\left(U^{n+k}+U^{n}\right)\right]_x=0, & p\text{ even}.
\end{cases}
\end{multline}
Notice that $U^{n+k}$ is indeed only appearing as linear terms in this scheme.
The schemes \eqref{eq:kdvimp} and \eqref{eq:kdvlinin} are found by setting $p=3$ ($k=2)$ in \eqref{eq:genkdvimp} and \eqref{eq:genkdvlinin}, respectively. Following the procedure of Section \ref{se:spatial} one can get a fully discretised scheme by replacing $U$ by $\mathbf{U}$ and the first and third derivative operators by $\delta^{\langle 1\rangle}$ and $\delta^{\langle 3\rangle}$ respectively.
In the Figures \ref{fi:solitonerror} and \ref{fi:faseerror} we compare the conservative methods \eqref{eq:kdvimp} and \eqref{eq:kdvlinin} with the fully implicit midpoint method
\begin{equation}\label{eq:fi}
\frac{U^{n+1}-U^{n}}{\Delta t}+\frac{U_{xxx}^{n+1}+U_{xxx}^{n}}2+\left(\left(\frac{U^{n+1}+U^{n}}2\right)^2\right)_x=0
\end{equation}
and a naive linearly implicit method
\begin{equation}\label{eq:li}
\frac{U^{n+1}-U^{n}}{\Delta t}+\frac{U_{xxx}^{n+1}+U_{xxx}^{n}}2+\left(U^nU^{n+1}\right)_x=0.
\end{equation}
We test the four methods on a traveling wave solution
\begin{equation*}
\Phi(x-ct)=\frac{3c}{2}\mathrm{sech}^2\left(\frac{3\sqrt{c}}{2}(x-ct)\right),\quad c>0
\end{equation*}
using the parameters $c=1$, $x=(-5,5)$, $\Delta x=\frac{10}{32}$ and $\Delta t=0.1$.
As an indication of the long time behaviour of the presented schemes we consider to which extent the methods are able to preserve the shape and propagation speed of a traveling wave solution.
We define the two quantities
\begin{align}
\varepsilon_{\mathrm{shape}}&=\min_{\tau} \lVert U^n-\Phi(\cdot-\tau)\rVert_2^2\label{eq:shape}
\intertext{and}
\varepsilon_{\mathrm{distance}}&=|\underset{\tau}{\mathrm{argmin}} \lVert U^n-\Phi(\cdot-\tau)\rVert_2^2-ct^n|.\label{eq:distance}
\end{align}
Thus $\varepsilon_{\mathrm{shape}}$ measures the shape error of the numerical solution, and $\varepsilon_{\mathrm{distance}}$ measures the error in the travelled distance of the numerical solution.
We see in Figure \ref{fi:solitonerror} the fully implicit schemes preserves the shape better than the linearly implicit ones, and that the conservative schemes perform better than the non-conservative ones. In Figure \ref{fi:faseerror} we see that the linearly implicit schemes have a more accurate phase speed than the fully implicit ones.
Figure \ref{fi:orderplot} shows the global error as a function of the time step. As expected the plot shows that the four methods are second order and that the linearly implicit schemes are inaccurate for large $\Delta t$.
In conclusion we see that the linearly implicit conservative scheme performs comparably to the other methods while being more efficient (the latter is shown in Figure \ref{fi:secondpic}).
\begin{figure}
\caption{The shape error $\varepsilon_{\mathrm{shape}
\label{fi:solitonerror}
\end{figure}
\begin{figure}
\caption{The distance error $\varepsilon_{\mathrm{distance}
\label{fi:faseerror}
\end{figure}
\begin{figure}
\caption{The global error at $t=8$ for the schemes \eqref{eq:kdvimp}
\label{fi:orderplot}
\end{figure}
\end{document}
|
\begin{document}
\title{Theory of near-field matter wave interference beyond the eikonal approximation}
\author{Stefan Nimmrichter}
\affiliation{
Arnold Sommerfeld Center for Theoretical Physics,
Ludwig-Maximilians-Universit\"{a}t M\"{u}nchen,\\
Theresienstra{\ss}e 37, 80333 Munich, Germany
}
\affiliation{Faculty of Physics, University of Vienna, Boltzmanngasse 5, 1090 Vienna, Austria}
\author{Klaus Hornberger}
\homepage{www.klaus-hornberger.de}
\affiliation{
Arnold Sommerfeld Center for Theoretical Physics,
Ludwig-Maximilians-Universit\"{a}t M\"{u}nchen,\\
Theresienstra{\ss}e 37, 80333 Munich, Germany
}
\preprint{\sf published in: Phys.~Rev.~A \textbf{78}, 023612 (2008)}
\begin{abstract}
A generalized description of Talbot-Lau interference with matter waves is presented, which accounts for arbitrary grating interactions and realistic beam characteristics.
The dispersion interaction between the beam particles and the optical elements strongly influences the interference pattern in this near-field effect, and it is known to dominate the fringe visibility if increasingly massive and complex particles are used.
We provide a general description of the grating interaction process by combining semiclassical scattering theory with a phase space formulation. It serves to systematically improve the eikonal approximation used so far, and to assess its regime of validity.
\end{abstract}
\pacs{03.75.-b, 03.75.Dg, 34.35.+a}
\maketitle
\section{Introduction}
The ability of material particles to show wave-like
interference is one of the central predictions of quantum mechanics. While the early experimental tests worked with elementary particles \cite{Davisson1927a,Halban1936a}, interferometry of atoms is by
now a matured field of physics \cite{Berman1997a,Miffre2006a,Cronin2008a}.
It is in particular the ability to cool and to control atoms using laser techniques
that has propelled atom interferometry into a versatile tool for
precision measurements, e.g. \cite{Weiss1993a,Gustavson1997a,Peters1999a,Muller2007a}.
As far as more complex and more massive objects are concerned, we are
currently witnessing this transition from the proof-of-principle
demonstration of their wave nature to the use of interferometry for
quantitative measurements. Specifically, the static
\cite{Berninger2007a} and the dynamic \cite{Hackermuller2007a} bulk
polarizability of fullerene molecules was measured recently with a
molecule interferometer. Also the controlled observation of
decoherence, due to collisions with gas particles
\cite{colldecoboth} or due to the emission of
thermal radiation \cite{thermodecoboth}, has been
used to characterize the interaction strength (or cross-section)
of fullerenes with external degrees of freedom. Another motivation for
studying interference with large molecules is to test quantum
mechanics in unprecedented regimes by establishing the wave nature of ever more massive objects \cite{Arndt2005a}.
The above-mentioned interference experiments with large molecules
are based on the near-field Talbot-Lau effect, where three
gratings are used which serve, in turn, to produce coherence in
the beam, to bring it to interference, and to resolve the fringe
pattern. This is an established technique in atom and electron
interferometry
\cite{Clauser1994both,Cahn1997a,Deng1999a,Cronin2006a,Kohno2007a}, and it is the
method of choice for massive and bulky molecules (such as fullerenes
\cite{Brezger2002a}, meso-tetraphenylprorphyrins \cite{Hackermuller2003a},
or functionalized azobenzenes \cite{Gerlich2007a}). The main reason is that a
Talbot-Lau interferometer (TLI) tolerates beams which are relatively weakly
collimated, thus alleviating the increasing difficulty in producing
brilliant beams if the particles get more complex.
Another important advantage compared to far-field setups, which is
essential if one wants to increase the particle mass by several orders
of magnitude, is the favorable scaling behavior of Talbot-Lau
interferometers with respect to the de Broglie wavelength \cite{Berman1997a}.
As a specific feature of Talbot-Lau interferometers, the forces between the particles in the beam and the diffraction grating
influence the interference pattern much stronger than in a far field
setup. This is due to the fact that the different diffraction orders
do not get spatially separated in a TLI. Rather, all the orders
interfere among each other, producing a resonant recurrence of the
pattern whenever the so-called Talbot condition is met. As a
consequence, even tiny distortions of the matter wave may lead to
significant changes of the fringe visibility---requiring, for example,
the modification of the van der Waals force due to retardation effects
\cite{Casimir1948a} to be taken into account when describing the
diffraction of fullerenes at gold gratings.
This effect of the dispersion force between the particle and the
grating wall gets more important as the mass and structure of the
molecule grows. In particular, it sets increasingly strict
requirements on the monochromaticity, i.e, the velocity spread permissible in the
molecular beam. A very recent development, undertaken to reduce this
influence, is the Kapitza-Dirac Talbot-Lau Interferometer (KDTLI),
where the second material grating is replaced by the pure phase grating
produced by a standing light field \cite{Gerlich2007a}.
The available theoretical descriptions of molecular Talbot-Lau
interference account for the grating forces in terms of a simple
eikonal phase shift \cite{Patorski1989a,Brezger2003a,Hornberger2004a}
(an expression originally derived by nuclear physicists as an
asymptotic high-energy approximation to the Lippmann-Schwinger
equation in scattering theory \cite{Moliere1947a,Glauber1959a}).
This approximation ceases to be valid with a growing influence of the
particle-grating interaction, and, due to its non-perturbative nature,
its range of validity is not easy to assess. Therefore, given the quest
for testing quantum mechanics with ever larger particles and given the
increased precision required in metrological applications, there is a
clear need to extend the theoretical description of near-field matter
wave interference beyond the eikonal approximation.
The main purpose of this paper is therefore to develop a generalized
formulation of the coherent Talbot-Lau effect. By combining a
scattering theory formulation with semiclassical approximations we
incorporate the effect of the grating interaction systematically
beyond the eikonal approximation.
As a prerequisite for its implementation, the established theory of near
field interference first needs to be extended to account for the
effects of finite angular dispersion in the molecular beam. We show
how this can be done transparently by using the phase space formulation
of quantum mechanics. As a by-product, this formulation permits us to
quantify the adjustment precision required in realistic experiments.
The structure of the article is as follows. In Section
\ref{sec:phasespace} we develop a generalized theory of Talbot-Lau
interference, which is formulated independently of the particular
choice of how to incorporate the grating interaction. We also establish the relation to
the previous treatments by applying the
eikonal approximation. In Section \ref{sec:potential},
we review relevant realistic descriptions of the grating interaction,
and numerically illustrate their effect in the eikonal approximation.
Section \ref{sec:semiclass} is devoted to the development of a
semiclassical formalism to go beyond the elementary eikonal approximation.
Its predictions are numerically evaluated and compared in Section
\ref{sec:numerics}, using the experimental parameters of the
molecular interference experiments carried out in Vienna
\cite{Brezger2002a,Gerlich2007a}. Finally, we present our conclusions in
Section \ref{sec:conclusions}.
\section{Phase space description of the Talbot-Lau effect} \langlebel{sec:phasespace}
The effect of Talbot-Lau interference can be described in a
particularly transparent and accessible fashion by using the phase
space representation of quantum mechanics
\cite{Wigner1932a,Ozorio1998a,Schleich2001a,Zachos2005a}. This is demonstrated in
\cite{Hornberger2004a}, where both the coherent effect and the
consequences of environmental interactions are formulated in terms of
the Wigner function of the matter wave beam. As also shown there, the
analogy of the Wigner function with the classical phase space
distribution allows one to evaluate the predictions of classical and
quantum mechanics in the same framework, a necessary step if one wants
to distinguish unambiguously quantum interference from a possible
classical shadow effect.
However, the treatment in \cite{Hornberger2004a} is based on a number
of idealizations, which must be reconsidered in view of a more refined
description of the particle-grating interaction. The least problematic
approximation is to disregard the motion in the
direction parallel to the grating slits, which is permissible due
to the translational symmetry of the setup in this direction. It
follows that a two-dimensional description involving the
longitudinal motion (denoted by $z$) and the transverse motion (denoted by $x$) of the beam is required in principle. In front of the interferometer these two degrees of freedom
are well approximated by a separable state involving transverse momenta $|p|\ll p_z$.
If the grating interaction is treated in eikonal approximation,
as done in \cite{Hornberger2004a}, this implies that the transverse
and the longitudinal motion remain separable throughout. One can then
resort to an effectively one-dimensional description, characterized by
a fixed longitudinal momentum $p_z= h /\langlembda$. The $z$ coordinate
then represents a time $t=m z /p_z$ and the longitudinal propagation
of the beam along $z$ effectively evolves the one-dimensional
transverse beam state during the time $t$. The finite distribution
$\mu(p_z)$ of the longitudinal momenta is then accounted for only in
the end by averaging the results obtained with sharp values of $p_z$.
A priori, such a treatment is no longer valid for a general grating
interaction where different longitudinal momentum components of the
beam get correlated. We will accordingly use a two-dimensional
scattering formulation to describe the grating interaction in
Sect.~\ref{sec:semiclass}. However, we will see that for the
parameters of typical experiments the main effect of taking the
grating interaction beyond the eikonal approximation is on the
transverse degrees of freedom. Since the effect on the longitudinal
motion is much weaker we will retain the effectively one-dimensional
description outlined above, postponing the physical discussion why
this is permissible to Sect.~\ref{sec:semiclass}.
Another idealization found in the basic treatments of the Talbot-Lau
effect is to assume the transverse motion in front of the interferometer to be in a completely incoherent state. This would correspond to a constant distribution of the transverse momenta $p$, and for general grating transformations, which depend on $p$, it is no longer a valid approximation. We will therefore present a formulation that takes into account a realistic beam profile and that allows for a general state transformation at the diffraction grating. Going beyond the idealization of a perfectly incoherent state will also permit us to assess the adjustment precisions required in an experimental implementation.
\subsection{The Wigner function and its transformations}
We start by briefly outlining how to describe matter wave interference by means of the phase space representation of quantum mechanics \cite{Hornberger2004a,Hornberger2006a}. As discussed above, one may restrict the dynamics to the transverse beam state, which is most generally specified by its density matrix $\rho$. This state is equivalently described by the Wigner function
\begin{equation}
w \left(x,p \right) = \frac{1}{2 \pi \hbar} \int \mathrm{d} s \, e^{i p s / \hbar} \langle x - \frac{s}{2}|\rho|x + \frac{s}{2} \rangle,
\end{equation}
which is a function of the transverse phase space coordinates $\left(x,p \right)$.
Like $\rho$ it depends parametrically on the longitudinal momentum $p_z$. Since we assume the beam to be collimated $w(x,p)$ is non-zero only for $|p|\ll p_z$.
The great advantage of the phase space formulation is that it permits a straightforward, yet realistic, description of the beam and its propagation through
the interferometer. Most importantly, the free time evolution of a state
during the time $t$ is given by the same shearing transformation
as in the case of the classical phase space density,
\begin{equation}
w_t \left(x,p \right) = w_0 \left( x-\frac{t}{m} p ,p \right),
\langlebel{eq:wt}
\end{equation}
with $m$ the particle mass.
On the other hand, a quantum state transformation of the form $\rho' = \mathsf U \rho
\mathsf U ^{\dagger}$, such as the effect of passing a grating, reads in phase space
\begin{equation}
w' \left(x,p \right) = \iint \mathrm{d} x_0 \mathrm{d} p_0 \, K \left(x,p;x_0,p_0 \right) w \left(x_0,p_0 \right).
\end{equation}
Here the integral kernel is given by the the propagator
\begin{eqnarray}
K \left( x, p; x_0 , p_0 \right) &=& \frac{1}{2\pi \hbar} \iint \mathrm{d} s \mathrm{d} s_0 \, e^{i \left(p s + p_0 s_0 \right)/\hbar} \nonumber \\
&&\times \langle x - \frac{s}{2} | \mathsf U | x_0 + \frac{s_0}{2} \rangle \langle x + \frac{s}{2} | \mathsf U | x_0 - \frac{s_0}{2} \rangle^{*} .
\nonumber \\ &&
\langlebel{eqn:generalprop}
\end{eqnarray}
Specifically, an ideal
grating is characterized by a grating transmission function $t(x)$, with $|t(x)|\leq 1$, describing the multiplicative modification of an incoming plane wave. (The factor $t(x)$ is non-zero only within the slit openings of the grating and it may there imprint a complex phase to account for the interaction potential between the grating walls and
the beam particle, see Sect.~\ref{sec:potential}.) For such gratings the transformation reads $\langle x | \mathsf U |x_0 \rangle = t \left(x \right) \delta\left(x-x_0 \right)$, so that the grating propagator reduces to a convolution kernel
\begin{eqnarray}
K \left( x, p; x_0 , p_0 \right) &=& \delta \left(x-x_0 \right) \frac{1}{2\pi \hbar} \int \mathrm{d} s \, e^{i \left(p - p_0\right) s/\hbar} \nonumber \\
&&\times t \left(x- \frac{s}{2} \right) t^{*} \left(x+ \frac{s}{2} \right) ,
\langlebel{eqn:eikonalkernel}
\end{eqnarray}
which is local in position \cite{Hornberger2004a}. This choice of $K$ will be required below to reduce the generalized formulation of Talbot-Lau interference to the eikonal approximation.
The convolution kernel provides a descriptive picture
of the diffraction process. Suppose the incoming beam is perfectly coherent, i.e. a plane wave characterized by the longitudinal momentum $p_z$ and transverse momentum $p_0$, corresponding to the (unnormalized) transverse Wigner function $w_0 \left(x,p\right) = \delta \left(p-p_0\right)$. The diffracted transverse state is then given by
\begin{equation}
w_1 \left(x,p \right) = \frac{1}{2\pi \hbar} \int \mathrm{d} s \, e^{i \left(p - p_0\right) s/\hbar} t \left(x- \frac{s}{2} \right) t^{*} \left(x+ \frac{s}{2} \right).
\end{equation}
In case of a periodic grating with period $d$, the transmission function can be
decomposed into a Fourier series,
\begin{equation}
t (x) = \sum_{j=-\infty}^\infty b_j \boldsymbol{e}_xp \left( 2\pi i j \frac{x}{d} \right),
\langlebel{eq:bj}
\end{equation}
so that, after a free propagation over the longitudinal distance $L$, the
transverse spatial density of the beam state is given by the marginal distribution
\begin{eqnarray}
w_2 \left(x \right) &=& \int \mathrm{d} p \, w_1 \left( x- \frac{p}{p_z}L,p\right) \nonumber \\
&=& \sum_{m=-\infty}^{\infty}
B_m\left(m \frac{ L}{L_\textrm{T}}\right)
\boldsymbol{e}_xp \left( 2\pi i m \frac{x}{d} \right).
\langlebel{eq:TE}
\end{eqnarray}
Here, we introduced the basic \textit{Talbot-Lau coefficients},
\begin{equation}
B_{m} \left(\xi \right) = \sum_{j=-\infty}^{\infty} \, b_j b_{j-m}^{*} \boldsymbol{e}_xp \left(i \pi \xi \left( m - 2j\right) \right),
\langlebel{eqn:Beik}
\end{equation}
and the characteristic length scale $L_{\rm T} = d^2 / \langlembda$ is called the Talbot length.
Compare (\ref{eqn:Beik}) to the Fourier coefficients of the transmission probability $|t(x)|^2$, which are given by the convolution $A_{m}=\sum_j b_j b_{j-m}^{*}$. Equation (\ref{eq:TE}) thus implies that the density distribution takes the form of the grating transmission profile whenever the distance $L$ is an integer multiple of the Talbot length, $L=k
L_{\textrm{T}}$,
\begin{equation}
w_2 \left(x \right) = \left| t \left( x + k \frac{d}{2}\right)\right|^2.
\langlebel{eqn:Talboteffect}
\end{equation}
This is the elementary Talbot effect \cite{Talbot1836a}, and it is the backbone of the Talbot-Lau interferometer, which however does not require a coherent illumination.
\subsection{The general Talbot-Lau effect} \langlebel{sec:generalTL}
\begin{figure*}
\caption{\langlebel{fig:TLsetup}
\end{figure*}
The general setup of a Talbot-Lau interferometer is described in Fig.~\ref{fig:TLsetup}, along with the most relevant parameters. An incoherent particle beam represented by the shaded area
passes a preliminary collimation slit on the left side of the figure and enters the Talbot-Lau interferometer through the first grating.
The angular distribution of those particles in the beam which are finally detected is characterized by the spread $\alpha$,
determined by the collimation slit and the finite area of the detector.
The first grating can be understood as an array of collimation slits of period $d_1$ preparing, after a distance $L$, the transverse coherence required for diffraction at the second grating. In this region,
the distortion of a matter wave front due to the grating interaction may significantly affect the interference pattern, so that the grating thickness $b$ enters the calculation. (It is replaced by the laser waist $w_z$ in case of a light grating.)
The different diffraction orders still overlap further downstream if the distance is on the order of the Talbot length $L_{\textrm{T}} = d^2 / \langlembda$, and they may thus interfere among each other producing a near field fringe pattern. This can be explained qualitatively from the elementary Talbot effect (\ref{eqn:Talboteffect}) by considering each of the slits in the first grating as independent point sources. The Talbot patterns due to the individual slits will add up constructively for appropriate choices of the grating periods $d_1$, $d$ and of the distance factor $\eta$, so that a distinct density pattern is created. It is verified without the need for a spatially resolving detector by superposing a
third grating whose period $d_3$ equals that of the density pattern, and by measuring the total flux through it as a function of its transverse position $x_S$.
We will now present a quantitative formulation of the successive propagation of the beam through the interferometer. As discussed above, one can take the longitudinal motion of the beam particles to remain unaffected by the gratings. We may therefore assume the longitudinal momentum to have a definite value $p_z$ for the time being, so that the longitudinal position $z$ plays the role of a time coordinate $t= z m /p_z$ for the transverse motion.
\subsubsection{Sequential calculation}
The transverse state of the beam entering the interferometer is far from pure. It is confined in position by the orifice of the first grating, whose size $G$ typically covers thousands of grating slits. The momenta $p$ are characterized by the angular distribution $D \left( p/p_z \right)$ whose characteristic spread is denoted as $\alpha$.
In a typical experimental situation, $\alpha$ rarely exceeds $1$\,mrad corresponding to a fairly well-collimated beam. However, this still covers a range of transverse momenta that is by orders of magnitude larger than the grating momentum $h/d_1$ which separates the different diffraction orders. This explains why diffraction does not need to be taken into account at the first grating. The transverse Wigner function merely gets modulated by the grating profile, so that behind the first grating it reads
\begin{equation}
w_1 \left(x,p \right) = \frac{1}{G p_z} \left| t_1 \left(x \right) \right|^2 D \left( \frac{p}{p_z}\right).
\end{equation}
Here $t_1 \left(x \right)$ is the transmission function which is confined to the grating orifice, in principle.
However, since the size $G$ is typically larger than
the sensitive region $W$ of the final detector
one may equally take it to be an unconstricted periodic function.
The free propagation of the beam over the longitudinal distance $L$ yields $w_2(x,p)=w_1 (x-L p/p_z,p)$.
The effect of passing the second grating is in general described by an operator $ \mathsf U $ to be specified below. The corresponding phase space transformation is given by (\ref{eqn:generalprop}) so that, after a second free propagation over the distance $\eta L$, the transverse beam state in front of the third grating reads
\begin{eqnarray}
w_3 \left(x,p \right) &=& \frac{1}{G p_z} \iint \mathrm{d} x_0 \mathrm{d} p_0 \, D \left( \frac{p_0}{p_z} \right) \, \left| t_1 \left( x_0\right)\right|^2 \nonumber \\
&&\times \, K \left( x - \frac{p}{p_z} \eta L, p; x_0+\frac{p_0}{p_z}L, p_0 \right) .
\langlebel{eq:w3eins}
\end{eqnarray}
The corresponding spatial density distribution, given by
$ w_3 \left(x \right) = \int \mathrm{d} p \, w_3 \left(x,p \right)$,
is now modulated by the third grating as a function of the grating shift $x_S$. The detection signal is thus obtained as
\begin{eqnarray}
S \left(x_S \right) &=& \int_{-W/2}^{W/2} \mathrm{d} x \, w_3 \left(x \right) \left| t_3 \left(x-x_S \right) \right|^2
\nonumber\\
&=& \sum_{n=-\infty}^\infty S_n \boldsymbol{e}_xp \left( 2\pi i n\frac{ x_S}{d_3}\right),
\langlebel{eqn:signal}
\end{eqnarray}
where $\left| t_3 \left(x\right) \right|^2$ is the spatial transmission probability of the grating and $W$ the size of the detector. The latter typically covers hundreds of grating periods so that
one may disregard the finiteness of $W$ when evaluating the basic interference effect. However, as shown below, it does play a role when the experimental adjustment precision needs to be evaluated.
The resulting interference pattern is most easily characterized by the contrast of the modulation signal,
conventionally defined as the ratio between the amplitude of the signal variation and its mean value.
However, since the experimental signal is usually very close to a sinusoidal curve, it is in practice more convenient and more robust to use the \emph{sinusoidal visibility} defined in terms of the first to the zeroth Fourier expansion coefficient of the $d_3$-periodic signal \cite{Brezger2003a},
\begin{equation}
{\cal V} = \left| \frac{2 S_1}{S_0} \right| .
\langlebel{eqn:Vexp}
\end{equation}
It can be easily obtained from noisy experimental data by fitting a sine curve and it coincides with the conventional definition in case of a sinusoidal signal.
\subsubsection{Decomposing the grating propagator}
We proceed to evaluate the interference pattern (\ref{eq:w3eins}) by noting a general property of the grating transformation $ \mathsf U $. The periodicity of the grating implies that the position representation $\langle x| \mathsf U |x_0 \rangle$ is $d$-periodic with respect to the center position $(x+x_0)/2$.
This admits the series expansion
\begin{equation}
\langle x| \mathsf U |x_0 \rangle = \sum_{n=-\infty}^{\infty} \, \boldsymbol{e}_xp \left(i \pi n\frac{x+x_0}{d} \right) \, {\cal U} _n \left( x- x_0\right)
\end{equation}
where the transformation within a single slit is now characterized by the corresponding Fourier coefficients
\begin{equation}
{\cal U} _n \left( \tilde{x} \right) = \frac{1}{d} \int_{-d/2}^{d/2} \mathrm{d} \bar{x} \, \boldsymbol{e}_xp \left(-2 \pi i n\frac{ \bar{x}}{d} \right) \langle \bar{x} + \frac{\tilde{x}}{2} | \mathsf U | \bar{x} - \frac{\tilde{x}}{2} \rangle .
\langlebel{eq:Undef}
\end{equation}
It is now convenient to specify these transformation functions in terms of their \emph{shifted} Fourier transformation, which serves to generalize the grating coefficients introduced for the eikonal case in (\ref{eq:bj}).
\begin{equation}
b_n \left( {p} \right) = \int \mathrm{d} {x} \, \boldsymbol{e}_xp \left(-\frac{i}{\hbar} \left({p} + \frac{\pi \hbar}{d} n \right) {x} \right) \, {\cal U} _n \left( {x}\right)
\langlebel{eqn:gratingcoeff}
\end{equation}
Indeed, for position-diagonal operators $ \mathsf U $, these functions drop their
momentum-dependence and reduce to the coefficients in (\ref{eq:bj}).
The generalized grating coefficients (\ref{eqn:gratingcoeff}) can now be used to construct the generalized Talbot-Lau coefficients, which are the central quantities for describing the interference effect.
\begin{eqnarray}
B_{m} \left(\xi; p, \nu \right) &=& \sum_{j=-\infty}^{\infty} \, b_j \left( p +\nu\frac{\pi \hbar}{d} \right) b_{j-m}^{*} \left( p - \nu\frac{\pi \hbar}{d} \right) \nonumber \\
&&\times \, \boldsymbol{e}_xp \left(i \pi \xi \left( m - 2j\right) \right) ,
\langlebel{eqn:TLcoeff}
\end{eqnarray}
We note that at integer values of the argument $\xi$ the phase factor in (\ref{eqn:TLcoeff}) reduces to a constant sign, while $p$ has the meaning of an incident momentum and $\nu$ that of a scale factor, as will be seen below.
By using the generalized Talbot-Lau coefficients the density distribution at the third grating takes the explicit form
\begin{eqnarray}
w_3 \left(x \right) &=& \frac{1}{G} \sum_{m,n = -\infty}^{\infty}\, A_{n} \, \boldsymbol{e}_xp\left(2 \pi i \left( m + n \frac{d}{d_1} \right)\frac{ x}{d} \right) \nonumber \\
&& \times \, \int \frac{\mathrm{d} p}{p_z} \, \boldsymbol{e}_xp \left(-2 \pi i\frac{p L}{p_z d} \left( \eta m + (\eta + 1) n \frac{d}{d_1} \right) \right) \nonumber \\
&& \times \, D \left( \frac{p}{p_z} \right) B_{m} \left( \eta \left( m+ n \frac{d}{d_1} \right) \frac{L}{L_{\textrm{T}}}; p, n \frac{d}{d_1} \right) .\nonumber
\\ & & \langlebel{eqn:interferencepattern1}
\end{eqnarray}
This expression for the interference pattern is now further simplified by identifying the Talbot-Lau resonance condition.
\subsubsection{The resonance condition}
Only a part of the double summation in (\ref{eqn:interferencepattern1}) contributes appreciably to the interference pattern. This follows from the fact that the ratio $L/d$ of grating distance to grating period is a large number, typically on the order of $10^5$. As a result, the momentum dependence of the phase factor in (\ref{eqn:interferencepattern1}) occurs on a very different scale compared to the variation of the momentum distribution $D \left( {p}/{p_z} \right)$ and of the Talbot-Lau coefficients $B_m(\xi;p,\nu)$. In fact, in the idealized case of both a completely incoherent illumination and an eikonal interaction the latter two functions are independent of $p$, so that the momentum integral is finite only if the phase vanishes identically, i.e., for
\begin{equation}
\eta m + (\eta + 1) \frac{d}{d_1}n=0.
\end{equation}
It would imply that an interference pattern is observed only if $(\eta+1)d/(\eta d_1)$ is a rational number.
This strong resonance condition gets relaxed if we account for the weak momentum dependence of the remaining integrand in (\ref{eqn:interferencepattern1}). We assume that those index pairs $(m,n)$ of the double sum contribute appreciably where the phase variation in the momentum integration does not exceed about $\pi$. Since the value of $|p|/p_z$ is bounded by the angular spread $\alpha$ this leads to the relaxed condition
\begin{equation}
\left| \frac{\eta+1}{\eta} \frac{d}{d_1} - \frac{r}{s} \right| \leq
\frac{1}{2\alpha}\frac{d}{L},
\langlebel{eqn:TLresbound}
\end{equation}
Here $r$ and $s$ are natural numbers without common divisor. They indicate the type of resonance and specify the set of index pairs $(m,n)\in\{(r\ell,s\ell):\ell\in\mathbb{Z}\}$ contributing to the sum.
If $\alpha$ is about $1$\,mrad and $L/d=10^{5}$ the right hand side of (\ref{eqn:TLresbound}) is on the order of $10^{-2}$, and in practice only a single resonance dominates
for each set of parameters $d$, $d_1$, $\eta$. The expression for the interference pattern thus simplifies to
\begin{eqnarray}
w_3 \left(x \right) &=& \frac{1}{G} \sum_{\ell=-\infty}^{\infty} A_{s\ell}^{*} \boldsymbol{e}_xp\left(2 \pi i\frac{ \ell x}{d_3} \right)
\int \frac{\mathrm{d} p}{p_z} \, D \left( \frac{p}{p_z} \right) \nonumber \\
&& \times \, B_{r \ell } \left( s \ell \frac{d}{d_1}\frac{L}{L_{\textrm{T}}}; p, - s \ell \frac{d}{d_1} \right) .
\langlebel{eqn:interferencepattern2}
\end{eqnarray}
Here, the period of the interference pattern is given by
\begin{equation}
d_3 = \left( \frac{r}{d} - \frac{s}{d_1}\right)^{-1} .
\end{equation}
Note that a large interference contrast is obtained for the low order resonances (with small values of the integers $r$ and $s$) because the Fourier and Talbot-Lau coefficients $A_m, B_m$ generically decrease with their order $m$.
The standard choice in experiments is to take equal gratings, $d=d_1$, in an equidistant configuration, $\eta=1$. This corresponds to the $r:s = 2:1$ resonance. One may obtain even larger contrasts with $\eta=1$ by using the basic $r:s=1:1$ resonance, at the expense of dealing with different gratings, $d_1=2d$.
Also magnifying or demagnifying interferometers can be realized \cite{Brezger2003a}. Consider, for example, the case where the first two gratings are equal, $d=d_1$. The period of the interference signal is then given by $d_3 = d / (r-s)$ and cannot thus be greater than $d$.
The condition $r/s= \left( \eta+1 \right) / \eta$ with $\eta >0$ implies that $r$ must be strictly greater than $s$. If one wishes to decrease $d_3$, one must choose $r-s \geq 2$ which requires $ \eta < 1$, i.e., a compressed setup. However, the values of $r$ and $s$ grow with decreasing $d_3$ so that the visibility of the resulting interference pattern decreases significantly. A magnifying interferometer, on the other hand, can only be realized with different gratings $d \neq d_1$.
If the aim is to create a high contrast interference pattern with a specific period $d_3$ a low-order resonance should be taken, such as $r:s=2:1$ or $r:s=1:1$. The experimental setup parameters $\left(\eta,d,d_1 \right)$ must then satisfy the equations
\begin{eqnarray}
\left(\eta +1 \right) d &=& r d_3 \\
\eta d_1 &=& s d_3
\end{eqnarray}
with desired resonance parameters $\left(r,s,d_3 \right)$. The solution is not unique, in general, so that one has a certain freedom to account for experimental limitations.
\subsection{The Talbot-Lau effect in eikonal approximation}
\langlebel{sec:TLeikonal}
The expression for the interference pattern can be further simplified if the grating interaction is treated in eikonal approximation. The grating coefficients (\ref{eqn:gratingcoeff}) turn then into the momentum-independent Fourier coefficients $b_j$ of the transmission function (\ref{eq:bj}). Similarly, the generalized Talbot-Lau coefficients (\ref{eqn:TLcoeff}) then reduce to the basic coefficients given in Eq.~(\ref{eqn:Beik}) so that the prediction for the density pattern (\ref{eqn:interferencepattern1}) simplifies to the series
\begin{eqnarray}
w_3 \left(x \right) &=& \frac{1}{G} \sum_{\ell,m = -\infty}^{\infty}\, A_{\ell} \, B_{m} \left( \eta \left(m + \ell \frac{d}{d_1} \right) \frac{L}{L_{\textrm{T}}}\right) \nonumber \\
&& \times \, \widetilde{D} \left( \frac{2 \pi L}{d} \left( \eta m + (\eta + 1) \ell \frac{d}{d_1} \right) \right) \nonumber \\
&&\times \, \boldsymbol{e}_xp\left(2 \pi i \left( m + \ell \frac{d}{d_1} \right)\frac{ x}{d} \right) .
\langlebel{eqn:w3_eik1}
\end{eqnarray}
It involves the Fourier transformation of the angular distribution,
\begin{equation}
\widetilde{D} \left( \omega \right) = \int \frac{\mathrm{d} p}{p_z} D \left( \frac{p}{p_z}\right) e^{-i \omega p / p_z} .
\langlebel{eqn:Dtilde}
\end{equation}
In the case of a very broad momentum distribution this characteristic function $\widetilde{D} $ can be replaced by a Kronecker-$\delta$ function, which yields the basic result \cite{Brezger2003a,Hornberger2004a}
\begin{eqnarray}
w_3 \left(x \right) &=& \frac{1}{G} \sum_{\ell=-\infty}^{\infty} A_{s\ell}^{*} B_{r \ell } \left( s \ell \frac{d}{d_1} \frac{L}{L_{\textrm{T}}} \right) \nonumber \\
&& \times \, \boldsymbol{e}_xp\left(2 \pi i \ell\frac{ x}{d_3} \right) .
\langlebel{eqn:w3_eik2}
\end{eqnarray}
The comparison with Eq.~(\ref{eq:TE}) shows clearly that Talbot-Lau interference is based on the Talbot effect, both described by the Talbot-Lau coefficients defined in (\ref{eqn:Beik}).
It should be emphasized, though, that in order to establish that an observed fringe pattern is really due to a quantum effect one must compare the quantum prediction with the result of the corresponding classical calculation. This is necessary since a classical moir\'{e}-type shadow pattern might give rise to a similar observation, even tough the classical contrast is typically much smaller. It is shown in the appendix how the classical treatment can be formulated in the same framework as the quantum case using a phase-space description.
Finally, since Eq.~(\ref{eqn:w3_eik2}) assumes the resonance condition to be exactly met, it cannot be used to assess the adjustment precision required in the experiment.
As shown in the next section, one can evaluate the necessary precision in the eikonal approximation by explicitly taking into account the finite transverse momentum spread and the finite size of the signal detector.
\subsection{Adjustment requirements}
In order to allow for deviations from the exact resonance condition we use Eq.~(\ref{eqn:w3_eik1}) when evaluating the expression for the detection signal.
According to Eq.~(\ref{eqn:signal}) the detection signal $S(x_s)$ is then characterized by the Fourier coefficients
\begin{eqnarray}
S_n &=& \frac{W}{G}A^{\prime *}_{n} \sum_{\ell,m = -\infty}^{\infty}\, A_{\ell} \, B_{m } \left( \eta \left(m + \ell \frac{d}{d_1} \right) \frac{L}{L_{\textrm{T}}}\right) \nonumber \\
&& \times \, \widetilde{D} \left( 2 \pi L \left( \frac{\eta}{d} m + \frac{\eta + 1}{d_1} \ell \right) \right) \nonumber \\
&&\times \, \text{\rm sinc} \left( \pi W \left( \frac{m}{d} + \frac{\ell}{d_1} - \frac{n}{d_3}\right)\right).
\langlebel{eqn:signal2}
\end{eqnarray}
Here, the $A'_n$ are the Fourier expansion coefficients of the third grating profile $\left|t_3 \left(x \right) \right|^2$ with the period $d_3$. Consider now small deviations $\left(\delta L, \delta \eta , \delta d_1, \delta d, \delta d_3 \right)$ from the setup parameters $\left(L, \eta , d_1, d, d_3 \right)$ satisfying a particular Talbot-Lau resonance condition $r:s$. The distances $L_1=L$ and $L_2=\eta L$ can thus vary independently.
Instead of approximating the characteristic function $\widetilde{D}$ by a Kronecker-$\delta$, we account for the small deviations by the refined approximation $\widetilde{D} \left(\omega + \varepsilon \right) \approx \delta_{\omega,0} \widetilde{D} \left(\varepsilon \right)$. It holds as long as the main argument $\omega$ is either zero or much greater in modulus than the width $1/\alpha$, and as long as $\varepsilon < 1/\alpha$.
One can thus split the argument of $\widetilde{D} $ in (\ref{eqn:signal2}) into the ideal resonance part $\omega$ and in the part $\varepsilon$ containing the parameter deviations.
The same procedure may be done with the $\text{\rm sinc}$ term, which is sharply peaked if the detector size $W$ exceeds the period $d$ by orders of magnitude.
If the small parameter deviations are taken into account to first order one obtains the same resonance relation
for the summation indices in (\ref{eqn:signal2}) as in the ideal case, while the $n$th Fourier expansion coefficient of the signal (with respect to the period $d_3$) is multiplied by the reduction factor
\begin{eqnarray}
R_{n} &=& \widetilde{D} \left( \frac{2 \pi s n L}{d_1} \left[ \frac{\delta L}{L} - \frac{\delta \eta }{\eta } + \left(\eta +1 \right) \left( \frac{\delta d}{d} - \frac{\delta d_1}{d_1} \right) \right]\right) \nonumber \\
&&\times \, \text{\rm sinc} \left(\frac{\pi s n W}{d_1} \left[ \frac{\delta d_1}{d_1} - \frac{\delta d}{d} + \frac{1}{\eta} \left( \frac{\delta d_3}{d_3} - \frac{\delta d}{d} \right] \right) \right) . \nonumber\\
&&
\langlebel{eqn:reductionfactor}
\end{eqnarray}
The absolute value of this factor is less than $1$ for $n \neq 0$ so that the contrast of the detection signal is effectively reduced by the deviations. In particular, the sinusoidal visibility (\ref{eqn:Vexp}) of the signal is reduced by the factor $R_1$.
Typical experiments \cite{Brezger2002a,Brezger2003a,Hackermuller2003a,Gerlich2007a} are characterized by $W/d \approx 10^2$, $L/d \approx 10^5$ and $\alpha \approx 10^{-3}$ so that already relative parameter deviations on the order of $0.1 \%$ strongly affect the interference contrast.
To obtain a simple and conservative estimate we treat all imprecisions as independent, specializing to the standard case $r:s=2:1$, where the grating distances and grating periods are equal. Bounds for the adjustment precisions are then obtained from Eq.~(\ref{eqn:reductionfactor}) by requiring the arguments to be smaller than the widths of the momentum distribution and of the $\text{\rm sinc}$ function, respectively,
\begin{eqnarray}
\frac{\delta L}{L} + 2\frac{ \delta d}{d} &<& \frac{d}{4 \pi \alpha L}
\end{eqnarray}
and
\begin{eqnarray}
\frac{\delta d}{d} &<& \frac{d}{4W}.
\end{eqnarray}
This quantifies to what degree a better collimation of the beam and a smaller detector size relax the required adjustment precision, albeit at the expense of a loss of signal.
\section{Effect of the interaction potential} \langlebel{sec:potential}
The preceding section showed how the general coherent state transformation effected by a diffraction grating enters the Talbot-Lau calculation via the generalized grating coefficients (\ref{eqn:gratingcoeff}).
In general, this transformation is determined by the potential $V \left(x,z \right)$ due to the long-range dispersion forces acting on the beam particles while they pass the grating structure.
Before we present a general way to account for the presence of this potential in Sect.~\ref{sec:semiclass} it is helpful to discuss the most important grating-particle interactions in a simpler form, by using the \emph{eikonal approximation}. It treats the grating as the combination of an absorption mask and a phase modification, and it can be characterized by a grating transmission function
\begin{equation}
t \left(x \right) = \left|t\left(x \right) \right| \boldsymbol{e}_xp \left(- \frac{i m}{\hbar p_z} \int \mathrm{d} z \, V \left(x,z \right) \right),
\langlebel{eqn:simpleeiko}
\end{equation}
whose amplitude $\left|t\left(\cdot \right) \right|\mapsto\{0,1\}$ describes the grating structure.
Interaction-free gratings are modeled by a grating transmission function without phase, $t(x)=|t(x)|$, while pure phase gratings, such as the standing laser field, wave are characterized by $|t(x)|=1$.
The corresponding eikonal interference pattern is then obtained immediately
as described in Sect.~\ref{sec:TLeikonal}.
We proceed with a short overview of the typical grating potentials, discussing how they affect the interference contrast in the eikonal approximation. By convention, the diffraction grating is located at the longitudinal position $z=0$, as indicated in Fig.~\ref{fig:TLsetup}.
\subsection{Material gratings} \langlebel{sec:potential_mat}
A neutral particle located within the slit of a material grating experiences a potential determined mainly by the attractive dispersion forces due to the grating walls. Other forces, such as the exchange interaction or the electro-static attraction due to a permanent dipole, are much less important for interferometry because they either occur only at very close distances or because they are diminished by rotational averaging.
In any case, for all positions within a slit the walls are well approximated by an infinite surface in the $yz$-plane \cite{Grisenti1999a,Bruhl2002a}. The grating potential is thus set to be solely $x$-dependent and acting only within the time of passage $t=m b /p_z$ throughout the grating of thickness $b$, while it is set to be zero outside.
In general, the dispersion force between a polarizable particle and a material plane is described by the expression of Casimir and Polder \cite{Casimir1948a} and its generalizations, e.g.~\cite{Wylieboth,Buhmann2005a}. In the close distance limit it reduces to the van der Waals potential $V (x) = -C_3/x^{3}$, with $x$ the distance to the surface, and at large distances, where retardation plays a role, one has the asymptotic form $V(x) = -C_4/x^{4}$.
The interaction constants $C_3,C_4 > 0$ are determined by the frequency dependent polarizability of the particle (as well as the dielectric function of the grating material), and the regime of validity of the limiting forms is delimited by the wavelengths corresponding to the strong electronic transitions \cite{Derevianko1999a,Bruhl2002a,Madronero2007a}.
In any case, the dispersion force diverges on the grating walls, rendering
the eikonal approximation invalid in close vicinity to the walls.
Since the contributions of these regions do not alter the interference contrast appreciably, but lead to
numerical noise,
we will discuss a reasonable criterion to blind out the beam close to the grating walls in Section \ref{sec:semiclass}.
\begin{figure}
\caption{\langlebel{fig:lambda_mat}
\end{figure}
Figure \ref{fig:lambda_mat} shows the interference visibility (\ref{eqn:Vexp}) for a typical Talbot-Lau experiment with C$_{70}$ fullerene molecules of mass $m=840 \, \text{amu}$ \cite{Brezger2002a},
plotted versus their de Broglie wavelength. All gratings are assumed to be separated by the distance $L=22 \, \text{cm}$ and to be made of gold with a period of $d=991 \, \text{nm}$, a slit width of $476 \, \text{nm}$, and a thickness of $b=500 \, \text{nm}$. One can see two peaks corresponding to the first and second Talbot order, at $\langlembda=4.5\,\text{pm}$ and at $\langlembda=9.0\,\text{pm}$, respectively. The solid line represents the eikonal approximation with a retarded asymptotic potential ($C_4=3\hbar c \alpha_0 / 8 \pi$ with the static polarizability $\alpha_0 = 96.7\,$\AA$^3$ obtained via the Clausius-Mossotti relation \cite{Compagnon2001a}),
while the dashed and the dotted lines correspond to the van der Waals potential ($C_3=10\, \text{meV}\,\text{nm}^3$) and to the absence of an intra-slit potential, respectively. One observes that the presence of the dispersion forces changes the interference characteristics significantly. The asymmetry in the double-peak structure compared to the interaction-free case is due to the fact that the particles with a larger velocity, i.e., with a smaller wavelength $\langlembda$, receive a smaller eikonal phase than the slower particles.
Moreover, note that the effect of retardation has a small but visible influence on the fringe contrast.
The dash-dotted line in Fig.~\ref{fig:lambda_mat} represents the moir\'{e}-like effect as expected from classical mechanics. It was calculated in a phase space formulation as described in the appendix,
using the classical correspondence of the eikonal approximation with the $C_4$-potential. As one expects, the dependence of the classical result on the fictitious ``wavelength'' $\langlembda=h/(m v_z)$, which is due to the velocity dependence of the classical deflection, differs strongly from the quantum results.
We emphasize that all numerical results presented in this paper are obtained for a fixed longitudinal velocity $v_z = h / (\langlembda m)$ of the particles. A comparison with the experiment still requires the results to be averaged with respect to the velocity distribution in the beam. This may pose a severe restriction when using particles with larger polarizability because the increased dispersive interaction decreases the width of the double-peaks seen in Fig.~\ref{fig:lambda_mat} significantly \cite{Gerlich2007a}. One way to avoid this is to replace the material diffraction grating by a standing laser wave.
\subsection{Laser gratings}
Recently, a Kapitza-Dirac-Talbot-Lau interferometer (KDTLI) was demonstrated, where the central grating is formed by a standing light wave \cite{Gerlich2007a}.
In the eikonal approximation the phase of an incoming plane wave is modulated according to the potential created by the off-resonant interaction with the standing laser beam. It is determined by the energy of the induced electric dipole in the oscillating field, and is therefore proportional to the laser power $P_L$ and to the dynamic polarizability $\alpha_{\omega}$ of the beam particles at the laser frequency,
\begin{equation}
V \left( x,z \right) = - \frac{4 P_L \alpha_{\omega}}{\pi \varepsilon_0 c w_y w_z} \sin^2 \left( \pi \frac{x}{d}\right) e^{-2 z^2 / w_z^2} .
\langlebel{eqn:laserpot}
\end{equation}
Here, $w_y$ and $w_z$ are the waists of the Gaussian mode, and $w_y$ is chosen large compared to the detector size in order to guarantee a regular grating structure. We therefore disregard the $y$-dependence by setting $y=0$. Moreover, for sufficiently small $w_z$ an effective one-dimensional treatment of the potential is permissible, where one replaces the $z$-dependence by a parametric time dependence $z=p_z t / m$, as discussed below in Sect.~\ref{sec:pertexp}.
The $z$-integration in (\ref{eqn:simpleeiko}) renders the eikonal phase independent of $w_z$, which already indicates that the elementary eikonal approximation will cease to be valid if the laser waist is increased. Nonetheless, it yields an interference contrast that fits well to the measured data of the recent fullerene experiments \cite{Gerlich2007a}. Moreover, it admits a simple, closed expression for the Talbot-Lau coefficients (\ref{eqn:Beik}),
\begin{equation}
B_m \left(\xi \right) = J_m \left( - \frac{4 M P_L \alpha_{\omega} }{\sqrt{2\pi} \hbar c \varepsilon_0 w_y p_z} \sin \pi \xi \right),
\langlebel{eqn:Beiklaser}
\end{equation}
where $M$ is the mass of the beam particles and $J_m$ stands for the $m$th order Bessel function of the first kind \cite{Abramowitz1965a}.
\begin{figure}
\caption{\langlebel{fig:lambda_laser}
\end{figure}
The solid line in Figure \ref{fig:lambda_laser} shows the visibility (\ref{eqn:Vexp}) for the Viennese KDTLI with fullerenes \cite{Gerlich2007a} as obtained from (\ref{eqn:Beiklaser}). In contrast to Figure \ref{fig:lambda_mat}, the visibility drops to zero whenever the wavelength corresponds to an integer multiple of the Talbot condition. This is explained by the fact that in
the elementary Talbot effect (\ref{eqn:Talboteffect}) a pure phase grating with no absorptive walls leads to a constant density at multiples of the Talbot length.
At the same time, there are broad regions of high contrast. They render the KDTLI setup superior to a material grating interferometer for particles with a high polarizability and a substantial velocity spread. Moreover, the interaction strength and the grating period can be tuned in a KDTLI rendering the laser grating the preferable choice to explore the validity regime of the eikonal approximation, as done in Section \ref{sec:numerics}.
The dashed line in Figure \ref{fig:lambda_laser} represents the classical version of the eikonal calculation as described in the appendix.
Note that it differs significantly from the quantum result in the experimentally relevant wavelength regime, while the curves become indistinguishable for $\langlembda \rightarrow 0$.
Finally, it should be emphasized that a realistic description of the laser grating must also account for the possibility of photon absorption. The effect of the resulting transverse momentum kicks
can be incorporated into the eikonal approximation by replacing the grating transformation kernel by a probabilistic sum of such kernels \cite{Gerlich2007a}.
\section{Semiclassical approach to the grating interaction} \langlebel{sec:semiclass}
The de Broglie wavelength is by far the smallest length scale in the matter wave interference experiments considered in this paper.
It seems therefore natural to use a semiclassical approach for calculating the grating transformation used in (\ref{eq:Undef}). We will show how the elementary eikonal approximation (\ref{eqn:simpleeiko}) can be derived from the appropriate semiclassical formulation if a high-energy limit is taken. Moreover, one can obtain a refined eikonal approximation, where an incoming plane wave is still merely multiplied by a factor. Since this factor depends on the transverse momentum of the incoming wave it renders the numerical implementation more elaborate.
However, the main result of this section is the expression (\ref{eqn:semiclassresult})--(\ref{eqn:semiclassphase}) for the scattering factor which approximates the scattering transformation of a transverse plane wave in the semiclassical high-energy regime. As such it permits to evaluate the generalized grating coefficients (\ref{eqn:gratingcoeff}) straightforwardly, see Eq.~(\ref{eqn:gratingcoeff2}). As shown in Section \ref{sec:numerics}, this result outperforms the eikonal approximation and it may serve to characterize its regime of validity.
The derivation is based on the semiclassical approximation of the two-dimensional time evolution operator $ \mathsf U _t$ determined by the grating interaction potential $V \left(\boldsymbol{r} \right)=V \left(x,z \right)$.
\subsection{The scattering factor}
We proceed to incorporate the grating interaction
by means of the formalism of scattering theory \cite{Taylor1972a}. Its basic tool is the scattering operator defined as
\begin{equation}
\mathsf S = \lim_{t \rightarrow \infty} \mathsf U _{-t}^{(0)} \mathsf U _{2t} \mathsf U _{-t}^{(0)},
\langlebel{eqn:Soperator}
\end{equation}
with $ \mathsf U _{t}^{(0)}=\boldsymbol{e}_xp \left(-i \text{\sf \textbf{p}} ^2 t /2 m \hbar \right)$ the free time evolution operator
for the motion in the $xz$-plane
and $ \mathsf U _{t}$ the complete time evolution operator, which includes the grating potential $V(\boldsymbol{r})$. It is pertinent to use $ \mathsf S $ instead of $ \mathsf U _t$ since the scattering operator transforms the state instantaneously leaving the asymptotic dynamics to be described by the free time evolution. Since the latter is easily incorporated using Wigner functions this fits to the phase space description of Sect.~\ref{sec:generalTL}, where the initial beam state entering a Talbot-Lau interferometer is propagated freely to the second grating before the scattering transformation is applied. In the expression (\ref{eqn:generalprop}) for the grating propagator, which serves to calculate the interference pattern (\ref{eqn:interferencepattern2}), one may thus use the S-matrix (\ref{eqn:Soperator}) in place of the general unitary operator $ \mathsf U $.
In the basis of the improper plane wave states $\langle \boldsymbol{r}|\boldsymbol{p} \rangle=(2\pi\hbar)^{-1}\boldsymbol{e}_xp(i\boldsymbol{r}\cdot\boldsymbol{p}/\hbar)$ the scattering operator (\ref{eqn:Soperator}) is conveniently described by the scattering factor
\begin{equation}
\Phi \left( \boldsymbol{r}, \boldsymbol{p} \right) = \frac{\langle \boldsymbol{r} | \mathsf S | \boldsymbol{p} \rangle}{\langle \boldsymbol{r} | \boldsymbol{p} \rangle}
\langlebel{eqn:Sfactor}
\end{equation}
Notice that $ \mathsf S $ acts in the Hilbert space defined on the two-dimensional plane $\boldsymbol{r} = \left(x,z \right)$. However, as will be justified below, the longitudinal motion may be separated and treated classically
so that the transformation is confined to the transverse dimension, while the $z$-coordinate turns into an effective time coordinate for a given longitudinal momentum $p_z$. The brings about the
reduced scattering factor
\begin{equation}
\phi \left(x,p \right)=\Phi \left( x\boldsymbol{e}_x, p\boldsymbol{e}_x + p_z \boldsymbol{e}_z \right),
\langlebel{eqn:Sfactor_1D}
\end{equation}
which depends parametrically on $p_z$ and which is evaluated at the position $z=0$ of the center of the diffraction grating.
The reduced scattering factor $\phi \left(x,p \right)$ describes the phase and amplitude modification of a transverse plane wave with momentum $p$ due to the grating interaction. It enters the Talbot-Lau calculation via the Fourier coefficients (\ref{eq:Undef}) after an expansion in the plane wave basis. It follows that the generalized grating coefficients (\ref{eqn:gratingcoeff}) are directly related to $\phi\left(x,p \right) $ by a Fourier transformation,
\begin{equation}
b_n \left( p\right) = \frac{1}{d} \int_{-d/2}^{d/2} \mathrm{d} x \, e^{-2 \pi i n x /d} \phi \left( x, p \right).
\langlebel{eqn:gratingcoeff2}
\end{equation}
The calculation of the Talbot-Lau interference thus reduces to evaluating the scattering factor (\ref{eqn:Sfactor_1D}).
\subsection{Semiclassical calculation}
We proceed to calculate the scattering factor (\ref{eqn:Sfactor_1D}) by means of the semiclassical asymptotic approximation. It assumes the action of those trajectories through the interaction region, which contribute to the path integral for its position representation, to be much larger than $\hbar$, and the particle wavelength to be much smaller than the scale where the interaction potential changes appreciably, $\left| \langlembda \nabla V \right| \ll V$).
These conditions are well satisfied in the typical experimental situation if we disregard the regions very close to the gratings where the potential exceeds the kinetic energy.
We may thus approximate the time evolution operator in the position representation by the semiclassical van Vleck-Gutzwiller propagator \cite{Gutzwiller1967a}
\begin{eqnarray}
\langle \boldsymbol{r} | \mathsf U _t |\boldsymbol{r}_0 \rangle &=& \frac{1}{2 \pi i \hbar} \sqrt{\left| \det \left( \frac{\partial^2 S_{t} \left( \boldsymbol{r}, \boldsymbol{r}_0\right)}{\partial \boldsymbol{r} \partial \boldsymbol{r}_0}\right)\right|} \nonumber \\
&&\times \, \boldsymbol{e}_xp \left(\frac{i}{\hbar} S_{t} \left( \boldsymbol{r}, \boldsymbol{r}_0\right)\right) .
\langlebel{eqn:VVG1}
\end{eqnarray}
Here, $S_{t}$ is the action of the classical trajectory travelling during time $t$ from the position $\boldsymbol{r}_0$ to $\boldsymbol{r}$. In general, there might be more than one such trajectory, which would require taking special care of almost coalescing trajectories and of the associated Morse index. However, in the interferometric setup we are in the high-energy regime, where the interaction potential is much weaker than the energy of the incoming particles, $|V| \ll E$.
All relevant contributions are therefore characterized by a single, slightly deflected classical trajectory passing the interaction region.
It is now convenient to specify this trajectory in terms of the deviation from the undeflected straight line, as specified by the initial position $\boldsymbol{r}_0$ and momentum $\boldsymbol{p}_0$. The momentum change after time $t$ is given by
\begin{eqnarray}
\Delta \boldsymbol{p}_t \left(\boldsymbol{r}_0, \boldsymbol{p}_0 \right) &=& -\int_0^t \mathrm{d} \tau \, \nabla V \left( \boldsymbol{r}_{\tau} \left(\boldsymbol{r}_0, \boldsymbol{p}_0 \right) \right),
\langlebel{eq:Deltap}
\end{eqnarray}
so that the deflected trajectory reads
\begin{eqnarray}
\boldsymbol{r}_{t} \left(\boldsymbol{r}_0, \boldsymbol{p}_0 \right) &=& \boldsymbol{r}_0 + \frac{t}{m}\boldsymbol{p}_0+\int_0^t \frac{\mathrm{d} \tau}{m} \, \Delta\boldsymbol{p}_{\tau} \left(\boldsymbol{r}_0, \boldsymbol{p}_0 \right) \langlebel{eqn:trajx} \\
\boldsymbol{p}_{t} \left(\boldsymbol{r}_0, \boldsymbol{p}_0 \right) &=& \boldsymbol{p}_0 + \Delta \boldsymbol{p}_t \left(\boldsymbol{r}_0, \boldsymbol{p}_0 \right). \langlebel{eqn:trajp}
\end{eqnarray}
In the van Vleck-Gutzwiller propagator (\ref{eqn:VVG1}) the contributing trajectory is specified by the boundary values $\boldsymbol{r}_0$ and $\boldsymbol{r}$. For the following calculation it is important to rewrite it as an initial value problem, specified by the initial phase space point $\left(\boldsymbol{r}_0, \boldsymbol{p}_0 \right)$ of the trajectory \cite{Heller1991a}.
\begin{eqnarray}
\langle \boldsymbol{r} | \mathsf U _t |\boldsymbol{r}_0 \rangle &=& \frac{1}{2\pi i \hbar} \int \mathrm{d}^2 p_0 \, \sqrt{\left| \det \left( \frac{\partial \boldsymbol{r}_{t} \left(\boldsymbol{r}_0, \boldsymbol{p}_0 \right)}{\partial \boldsymbol{p}_0} \right) \right|}
\nonumber\\
&&\times \delta \left( \boldsymbol{r} - \boldsymbol{r}_{t} \left(\boldsymbol{r}_0, \boldsymbol{p}_0 \right) \right) \boldsymbol{e}_xp \left( \frac{i}{\hbar} S_{t} \left( \boldsymbol{r}_0, \boldsymbol{p}_0\right) \right)
\nonumber\\&& \langlebel{eqn:VVG2}
\end{eqnarray}
Plugging this into the expression (\ref{eqn:Soperator}) for the 2d scattering operator yields a semiclassical approximation for the scattering factor (\ref{eqn:Sfactor}).
\begin{align}
\Phi \left( \boldsymbol{r}, \boldsymbol{p} \right) =& \lim_{T\rightarrow \infty} \frac{m}{T} \iint \frac{\mathrm{d}^2 r_0 \mathrm{d}^2 p_0}{\left(2 \pi \hbar \right)^2} \, \sqrt{\left| \det \left( \frac{\partial \boldsymbol{r}_{2T} \left(\boldsymbol{r}_0, \boldsymbol{p}_0 \right)}{\partial \boldsymbol{p}_0} \right) \right|}
\nonumber\\
&\times\boldsymbol{e}_xp \left( \frac{i}{\hbar} \Theta_{2T} \left( \boldsymbol{r}_0, \boldsymbol{p}_0 \right) \right)
\langlebel{eqn:Sfactor2}
\end{align}
The phase of the integrand is given by the action-valued function
\begin{eqnarray}
\Theta_{2T} \left( \boldsymbol{r}_0, \boldsymbol{p}_0 \right) &=& S_{2T} \left( \boldsymbol{r}_0, \boldsymbol{p}_0\right) - \frac{m}{2T} \left( \boldsymbol{r} - \boldsymbol{r}_{2T} \left( \boldsymbol{r}_0, \boldsymbol{p}_0 \right) \right)^2
\nonumber\\
&&- \boldsymbol{p} \cdot \left( \boldsymbol{r} - \boldsymbol{r}_0 - \frac{\boldsymbol{p}}{2m} T \right),
\langlebel{eqn:Sphase1}
\end{eqnarray}
while the amplitude is determined by the stability determinant of the associated trajectory.
The semiclassical van Vleck-Gutzwiller propagator (\ref{eqn:VVG1}) used here is a stationary phase approximation of the time evolution operator in path integral formulation \cite{Gutzwiller1967a}. To remain at a consistent level of approximation it is therefore necessary to evaluate the phase space integral in (\ref{eqn:Sfactor2}) in the stationary phase approximation as well \cite{Bleistein1975a}.
The stationary point of the phase $\Theta_{2T}$ is determined by the condition
\begin{equation}
\left( \left. \frac{\partial \Theta_{2T}}{\partial \boldsymbol{r}_0} \right|_{\boldsymbol{p}_0}, \left. \frac{\partial \Theta_{2T}}{\partial \boldsymbol{p}_0} \right|_{\boldsymbol{r}_0} \right) = 0 .
\end{equation}
Noting the initial value derivatives of the classical action
\begin{eqnarray}
\left(\left. \frac{\partial S_{t}}{\partial \boldsymbol{r}_0} \right|_{\boldsymbol{p}_0}\right)^T &=& - \boldsymbol{p}_0 +\left( \left. \frac{\partial \boldsymbol{r}_t}{\partial \boldsymbol{r}_0} \right|_{\boldsymbol{p}_0}\right)^{\rm T} \boldsymbol{p}_{t} \\
\left(\left. \frac{\partial S_{t}}{\partial \boldsymbol{p}_0} \right|_{\boldsymbol{r}_0}\right)^T &=& \left(\left. \frac{\partial \boldsymbol{r}_t}{\partial \boldsymbol{p}_0} \right|_{\boldsymbol{r}_0} \right)^{\rm T}\boldsymbol{p}_{t},
\end{eqnarray}
and using the fact that the matrix $\partial\boldsymbol{r}_{2T}/\partial\boldsymbol{p}_0$ is invertible for our trajectories,
the stationary phase condition leads to the equations
\begin{eqnarray}
\boldsymbol{r} &=& \boldsymbol{r}_{2T} \left( \boldsymbol{r}_0, \boldsymbol{p}_0 \right) - \frac{T}{m} \boldsymbol{p}_{2T} \left( \boldsymbol{r}_0, \boldsymbol{p}_0 \right) \langlebel{eqn:statx}\\
\boldsymbol{p} &=& \boldsymbol{p}_0 \langlebel{eqn:statp}.
\end{eqnarray}
They serve to determine the initial position $\boldsymbol{r}_0(\boldsymbol{r},\boldsymbol{p})$ of the trajectory implicitly. Using the general formula for a four-dimensional integral, e.g. \cite[Eq. (A.30)]{Hornberger2002b}, the 2d scattering factor (\ref{eqn:Sfactor2}) then takes the form
\begin{equation}
\Phi \left( \boldsymbol{r}, \boldsymbol{p} \right) = \lim_{T\rightarrow \infty} A_{2T} \left( \boldsymbol{r}_0 \left( \boldsymbol{r}, \boldsymbol{p} \right), \boldsymbol{p} \right) \boldsymbol{e}_xp \left( \frac{i}{\hbar} \Theta_{2T} \left( \boldsymbol{r}_0 \left( \boldsymbol{r}, \boldsymbol{p} \right),\boldsymbol{p} \right) \right).
\langlebel{eqn:Sfactor3}
\end{equation}
The amplitude modification
\begin{equation}
A_{2T} \left( \boldsymbol{r}_0, \boldsymbol{p}_0 \right) = \left| \det \left( \frac{\partial \boldsymbol{r}_{2T} \left( \boldsymbol{r}_0, \boldsymbol{p}_0 \right)}{\partial \boldsymbol{r}_0} - \frac{m}{T} \frac{\partial \boldsymbol{p}_{2T} \left( \boldsymbol{r}_0, \boldsymbol{p}_0 \right)}{\partial \boldsymbol{r}_0} \right) \right|^{-\frac{1}{2}}
\langlebel{eqn:Samp1}
\end{equation}
is obtained, in a tedious but straightforward calculation, by using the Poisson relation between the conjugate variables $\boldsymbol{r}_t$ and $\boldsymbol{p}_t$ of the trajectory \cite{LandauM} when evaluating the product of two determinants. The matrix of derivatives in (\ref{eqn:Samp1}) can be computed by taking the initial value derivative of the equation of motion for the trajectory and solving the resulting ordinary differential equation.
\begin{figure}
\caption{\langlebel{fig:scattskizze}
\end{figure}
Figure \ref{fig:scattskizze} shows how the stationary point can be understood from the point of view of a classical scattering trajectory. Given the phase space coordinates $\boldsymbol{r}, \boldsymbol{p}$
determining the matrix element of the scattered plane wave,
the momentum $\boldsymbol{p}$ fixes the initial momentum of the classical trajectory passing through the interaction region. The position $\boldsymbol{r}$ determines the final position $\boldsymbol{r}_{2T}$ of the deflected trajectory, which is obtained after a free motion during time $T$ in a direction given by the final momentum $\boldsymbol{p}_{2T}$ of the trajectory. This free evolution ensures that the associated action (\ref{eqn:Sphase1}) and stability amplitude (\ref{eqn:Samp1}) is independent of $T$ in the limit $T \rightarrow \infty$.
Note that Fig.~\ref{fig:scattskizze} overemphasizes the effect of the potential, since in all interferometrically relevant situations the deflection of the classical trajectories will be only a
small correction to the free rectilinear path.
The limit in the semiclassical result (\ref{eqn:Sfactor3}) is obtained already at a finite time $T$ if the interaction potential may be considered to have a finite range. The relevant scale is the grating passage time, given by $t=m b /p_z$ for material gratings and $t = m w_z / p_z$ for laser gratings, respectively. This follows from the fact that outside of the grating potential the classical trajectories remain rectilinear, so that neither the stationary phase condition (\ref{eqn:statx}) nor the stationary phase $\Theta_{2T}$ and amplitude $A_{2T}$ are affected by a further increase in $T$.
\subsection{Iterative solution in the momentum deflection}
\langlebel{sec:pertexp}
We will now give explicit approximate solutions for the semiclassical scattering factor (\ref{eqn:Sfactor3}) which are valid in the high-energy limit, i.e., whenever the longitudinal kinetic energy $p_z^2/2 m$ of the incoming particles is large compared to the interaction potential $|V(\boldsymbol{r})|$.
In general, one has to solve Eq.~(\ref{eqn:statx}) for the initial value $\boldsymbol{r}_0$. Rewriting it in terms of the expressions (\ref{eqn:trajx}) and (\ref{eqn:trajp}) for the positions and momenta of the trajectory, one obtains an implicit equation for $\boldsymbol{r}_0(\boldsymbol{r},\boldsymbol{p})$ which may be solved iteratively.
\begin{align}
\boldsymbol{r}_0 \left( \boldsymbol{r}, \boldsymbol{p} \right) =& \boldsymbol{r} - \frac{T}{m} \boldsymbol{p} + \frac{T}{m} \Delta \boldsymbol{p}_{2T} \left( \boldsymbol{r}_0\left( \boldsymbol{r}, \boldsymbol{p} \right), \boldsymbol{p} \right)
\nonumber\\
& - \int_{0}^{2T} \frac{\mathrm{d} t}{m} \, \Delta \boldsymbol{p}_t \left( \boldsymbol{r}_0\left( \boldsymbol{r}, \boldsymbol{p} \right), \boldsymbol{p} \right)
\langlebel{eqn:trajexpansion}
\end{align}
\subsubsection{The Glauber eikonal approximation}
In the semiclassical short interaction time and high-energy limit, which applies to typical interference experiments, the transverse momentum deflection $\Delta \boldsymbol{p}_{2T}$ is so weak that its contribution
can be neglected.
This corresponds to the zeroth order solution of Eq.~(\ref{eqn:trajexpansion}),
\begin{equation}
\boldsymbol{r}_0^{(0)} \left( \boldsymbol{r}, \boldsymbol{p} \right) = \boldsymbol{r} - \frac{T}{m} \boldsymbol{p}.
\langlebel{eqn:reihe0}
\end{equation}
In order to keep the approximation consistent
one has to neglect the deflection in the expressions for the phase modification (\ref{eqn:Sphase1}) and for the amplitude modification (\ref{eqn:Samp1}) as well, by setting the classical trajectory to be the free rectilinear path. Consequently, there is no amplitude modification and the scattering factor (\ref{eqn:Sfactor3}) reads
\begin{equation}
\Phi \left( \boldsymbol{r}, \boldsymbol{p} \right) = \boldsymbol{e}_xp \left( - \frac{i}{\hbar} \int_{-\infty}^{\infty} \mathrm{d} t \, V \left( \boldsymbol{r} + \frac{\boldsymbol{p}}{m}t \right) \right).
\end{equation}
This is the \textit{Glauber eikonal approximation} obtained by R.~J.~Glauber from the Lippmann-Schwinger equation in a quite different argumentation \cite{Glauber1959a}. The reduced scattering factor (\ref{eqn:Sfactor_1D}) for the transverse dimension then reads
\begin{equation}
\phi \left(x,p \right) = \boldsymbol{e}_xp \left( - \frac{im}{\hbar p_z} \int_{-\infty}^{\infty} \mathrm{d} z \, V \left(x + \frac{p}{p_z}z , z \right)\right).
\langlebel{eqn:glauber}
\end{equation}
The elementary eikonal approximation (\ref{eqn:simpleeiko}) follows from this expression in the limit of vanishing transverse momentum, $p \rightarrow 0$. It applies in the case of a well-collimated beam and a small grating thickness $b$,
i.e., $\left| p b / p_z \right| \ll d$.
\subsubsection{The deflection approximation}
The Glauber eikonal approximation ceases to be valid as the interaction strength or interaction time increases, and one has to go to the first order in the momentum deflection $\Delta \boldsymbol{p}_{t}$ when evaluating the stationary initial value (\ref{eqn:trajexpansion}),
\begin{eqnarray}
\boldsymbol{r}_0^{(1)} \left( \boldsymbol{r}, \boldsymbol{p} \right)& = &\boldsymbol{r} - \frac{T}{m} \boldsymbol{p} + \frac{T}{m} \Delta \boldsymbol{p}_{2T} \left( \boldsymbol{r} - \frac{T}{m} \boldsymbol{p}, \boldsymbol{p} \right)
\nonumber\\
&& - \int_{0}^{2T} \frac{\mathrm{d} t}{m} \, \Delta \boldsymbol{p}_t \left( \boldsymbol{r} - \frac{T}{m} \boldsymbol{p}, \boldsymbol{p} \right).
\langlebel{eqn:reihe1}
\end{eqnarray}
The higher order terms, which are neglected here, involve derivatives of the momentum deflection $ \Delta \boldsymbol{p}_{2T}$.
The time-evolved trajectory starting from this initial value is approximated, again to first order in $\Delta \boldsymbol{p}_{t}$, by
\begin{eqnarray}
\langlebel{eqn:1stordertraj}
\boldsymbol{r}_t \left( \boldsymbol{r}_0^{(1)} (\boldsymbol{r}, \boldsymbol{p}), \boldsymbol{p} \right)& \approx& \boldsymbol{r}_t \left( \boldsymbol{r} - \frac{T}{m} \boldsymbol{p}, \boldsymbol{p} \right)
\nonumber\\
&& + \frac{T}{m} \Delta \boldsymbol{p}_{2T} \left( \boldsymbol{r} - \frac{T}{m} \boldsymbol{p}, \boldsymbol{p} \right)
\\
&& - \int_{0}^{2T} \frac{\mathrm{d} \tau }{m}\, \Delta \boldsymbol{p}_\tau \left( \boldsymbol{r} - \frac{T}{m} \boldsymbol{p}, \boldsymbol{p} \right).
\nonumber
\end{eqnarray}
This trajectory must be used when calculating the action (\ref{eqn:Sphase1}) in order to ensure that all expressions are evaluated to the same order in the deflection $\Delta \boldsymbol{p}_{t}$. Since this holds also for the time integral $\int \mathrm{d} t \, V \left( \boldsymbol{r}_t \left( \boldsymbol{r}_0, \boldsymbol{p} \right) \right)$ a $1$st order Taylor expansion of the potential is required. In total this yields
\begin{widetext}
\begin{eqnarray}
\Theta_{2T} \left( \boldsymbol{r}, \boldsymbol{p} \right) &\approx& -\int_0^{2T} \mathrm{d} t \,V \left( \boldsymbol{r}_t \left( \boldsymbol{r} - \frac{T}{m} \boldsymbol{p}, \boldsymbol{p} \right) \right) + \frac{1}{2m} \int_0^{2T} \mathrm{d} t \, \Delta \boldsymbol{p}_t^2 \left( \boldsymbol{r} - \frac{T}{m} \boldsymbol{p}, \boldsymbol{p} \right) \nonumber \\
&&- \Delta \boldsymbol{p}_{2T} \left( \boldsymbol{r} - \frac{T}{m} \boldsymbol{p}, \boldsymbol{p} \right) \cdot \int_0^{2T} \frac{\mathrm{d} t}{m} \, \Delta \boldsymbol{p}_t \left( \boldsymbol{r} - \frac{T}{m} \boldsymbol{p}, \boldsymbol{p} \right) + \frac{T}{2m} \Delta \boldsymbol{p}_{2T}^2 \left( \boldsymbol{r} - \frac{T}{m} \boldsymbol{p}, \boldsymbol{p} \right).
\langlebel{eqn:Sphase2}
\end{eqnarray}
On the other hand, when evaluating the amplitude (\ref{eqn:Samp1}) the zeroth order solution of the initial value (\ref{eqn:reihe0}) must be used instead of (\ref{eqn:reihe1}) because the classical equation of motion for the matrix of initial value derivatives $(\partial \boldsymbol{r}_{t} / \partial \boldsymbol{r}_0, \partial \boldsymbol{p}_{t} / \partial \boldsymbol{r}_0 )$ is governed by the derivative of the interaction force rather than by the force itself.
Accounting for the modification (\ref{eqn:reihe1}) in calculating the stability determinant would therefore amount to a higher order correction in the momentum deflection. It follows that the amplitude factor is consistently approximated by
\begin{equation}
A_{2T} \left( \boldsymbol{r}, \boldsymbol{p} \right) \approx \left| \det \left( \frac{\partial \boldsymbol{r}_{2T} \left( \boldsymbol{r} - \boldsymbol{p} T / m, \boldsymbol{p} \right)}{\partial \boldsymbol{r}_0} - \frac{T}{m} \frac{\partial \boldsymbol{p}_{2T} \left( \boldsymbol{r} - \boldsymbol{p} T / m, \boldsymbol{p} \right)}{\partial \boldsymbol{r}_0} \right) \right|^{-1/2}.
\langlebel{eqn:Samp2}
\end{equation}
\end{widetext}
\subsubsection{Separation of the longitudinal motion} \langlebel{sec:seplongmotion}
Based on the
deflection approximation of the stationary scattering factor we may now derive a reduced scattering factor (\ref{eqn:Sfactor_1D}) that can be incorporated into the one-dimensional Talbot-Lau calculation and that significantly extends the validity regime of the eikonal approximation.
It is necessary for this purpose that the longitudinal motion remains unaltered. To a very good approximation this is indeed the case for the small trajectory deflections required above in the
deflection approximation. This is due to the energy conservation during the scattering process, ensuring $(\boldsymbol{p} + \Delta \boldsymbol{p}_{2T})^2=\boldsymbol{p}^2$ where $\boldsymbol{p}= \left(p,p_z \right)$ is the incoming momentum and $\Delta \boldsymbol{p}_{2T}$ the total deflection (\ref{eq:Deltap}). It follows that a small transverse deflection $\Delta p_{2T} \ll p_z$ yields in a well-collimated beam, $|p| \ll p_z$, a total longitudinal deflection $\Delta p_{z,2T} \approx p/p_z \Delta p_{2T}$ which is much smaller than $ \Delta p_{2T}$.
The longitudinal part of the classical trajectory (\ref{eqn:1stordertraj}) may thus be treated as a free motion with constant momentum $p_z$,
\begin{equation}
z_t \left( \boldsymbol{r}_0^{(1)} \left( \boldsymbol{r}, \boldsymbol{p} \right), \boldsymbol{p} \right) = z + \frac{p_z}{m} \left(t-T \right) +
{\cal O} \left( \Delta p_{2T}\frac{p}{p_z} \right).
\langlebel{eqn:zfree}
\end{equation}
This reduces all the vectorial quantities in the
semiclassical phase and amplitude modification to the transverse scalar quantities.
In addition, one can now remove the longitudinal motion altogether from the scattering description by switching into a comoving frame with velocity $v_z=p_z/m$. It is convenient to redefine the transverse components of the classical transverse trajectory so that they start at $-T$,
\begin{eqnarray}
\Delta \bar{p}_t \left( x,p \right) &=& - \int_{-T}^t \mathrm{d} \tau \, \partial_x V \left( \bar{x}_{\tau} \left( x,p \right), \frac{p_z}{m}\tau \right) \\
\bar{x}_t \left( x, p \right) &=& x + \frac{p}{m}t - \int_{-T}^t \frac{\mathrm{d} \tau}{m} \, \Delta \bar{p}_{\tau} \left(x,p \right) \\
\bar{p}_t \left(x,p \right) &=& p + \Delta \bar{p}_t \left(x,p \right).
\end{eqnarray}
These are the scalar analogues of (\ref{eq:Deltap})--(\ref{eqn:trajp}) but for the shift in the time coordinate by $-T$, which accounts for the asymptotic initial condition of the free longitudinal trajectory (\ref{eqn:zfree}).
We can now state the result for the reduced scattering factor $\phi \left(x,p \right)=\Phi \left( x\boldsymbol{e}_x, p\boldsymbol{e}_x + p_z \boldsymbol{e}_z \right)$ to first order in the momentum deflection.
\begin{equation}
\phi \left(x,p \right) = a \left(x,p \right) \boldsymbol{e}_xp \left(\frac{i}{\hbar} \theta \left(x,p \right) \right)
\langlebel{eqn:semiclassresult}
\end{equation}
The amplitude is given by
\begin{equation}
a \left( x, p \right) = \lim_{T \rightarrow \infty} \left| \partial_x\bar{x}_T \left(x,p \right) - \frac{T}{m} \partial_x\bar{p}_T\left(x,p \right) \right|^{-\frac{1}{2}},
\langlebel{eqn:semiclassamp}
\end{equation}
while the phase can be simplified as
\begin{widetext}
\begin{eqnarray}
\theta \left( x, p \right) = -\lim_{T\to\infty} \int_{-T}^{T} \mathrm{d} t \left[ V \left( \bar{x}_t(x,p) , \frac{p_z}{m}t \right) - \frac{t}{m} \partial_x V \left( \bar{x}_t(x,p) , \frac{p_z}{m}t \right) \int_{t}^{T} \mathrm{d} \tau \, \partial_x V \left( \bar{x}_\tau(x,p) , \frac{p_z}{m}\tau \right) \right] .
\langlebel{eqn:semiclassphase}
\end{eqnarray}
\end{widetext}
Here $\bar{x}_t(x,p)$ is the classical one-dimensional trajectory of a particle starting, at $t=-T$, with momentum $p$ at the position $x-p T/m$. It evolves in the effectively time-dependent potential $U_t(x)=V \left( x , t p_z/m \right)$. The associated momentum is $\bar{p}_t(x,p)$.
It is easy to see that the limits in (\ref{eqn:semiclassamp}), (\ref{eqn:semiclassphase}) exist for sufficiently short-ranged potentials.
In particular, if the scattering potential has a finite extension the limit is reached already at finite times $T$, once the longitudinal distance $2T v_z$ is larger than the size of the interaction region.
The reduced scattering factor (\ref{eqn:semiclassresult})
constitutes a significant improvement over the eikonal approximation used so far, as will be demonstrated in the next section. Compared to the elementary eikonal approximation (\ref{eqn:simpleeiko}) and to the Glauber eikonal result (\ref{eqn:glauber}), this expression not only provides the consistent incorporation of the deflection into the phase, but it also introduces an amplitude modification of the scattered wave.
\section{Numerical Analysis} \langlebel{sec:numerics}
We proceed to analyze the numerical performance of the semiclassical scattering factor (\ref{eqn:semiclassresult}) as
compared to the Glauber eikonal approximation (\ref{eqn:glauber}) and to the elementary eikonal approximation used so far in evaluations of the Talbot-Lau effect. We will see that the elementary eikonal approximation was appropriate in the molecular matter wave experiments performed to date \cite{Brezger2002a,Gerlich2007a}. At the same time, both the Glauber and the semiclassical approximation significantly improve the treatment of laser gratings if the particles have
larger polarizabilities or smaller velocities (as required if their mass ins increased).
According to the general theory from Sect.~\ref{sec:generalTL} the interference pattern (\ref{eqn:interferencepattern2}) is determined by the generalized, momentum dependent grating coefficients (\ref{eqn:gratingcoeff2}) via the scattering factor (\ref{eqn:Sfactor_1D}).
In order to assess the validity
of the different approximations it is therefore pertinent to evaluate
the scattering factors directly and compare them to a numerical implementation of the exact propagation of a plane wave through the grating interaction
region.
\subsection{The scattering factor}
The exact transverse scattering factor (\ref{eqn:Sfactor_1D}) for the grating can be obtained by a numerical evaluation of the limit
\begin{equation}
\phi \left(x,p \right) = \lim_{T\to\infty} \frac{\langle x | \mathsf U _{-T}^{(0)} \mathsf U _{2T} \mathsf U _{-T}^{(0)} |p \rangle}{\langle x |p \rangle}.
\end{equation}
In practice, this is done by computing the propagator matrix elements by means of a split operator technique \cite{Feit1982a,Chambers2000a}, making sure that the propagation time $T$ is sufficiently large (much larger than the grating passage time) so that the result is converged. In the following examples we use periodic boundary conditions for the position coordinate $x$ and a fixed transverse momentum $p$ not larger than the typical beam spread $10^{-3} p_z$.
\subsubsection{Material grating}
\begin{figure}
\caption{\langlebel{fig:scatt_mat}
\end{figure}
We first take the grating to be of material type with a retarded Casimir-Polder interaction potential. Figure~\ref{fig:scatt_mat} shows the phase \emph{differences} between the approximate scattering factors and the exact scattering factor as a function of the distance to the wall. The calculations were done for a de Broglie wavelength of $\langlembda = 4\,$pm. Moreover, we choose an orthogonally incident plane wave, $p=0$, so that the elementary and the Glauber eikonal approximations coincide. They are given by the dotted line, while the solid curve corresponds to the semiclassical approximation (\ref{eqn:semiclassresult}).
As one observes in Figure~\ref{fig:scatt_mat}, the divergence of the wall potentials invalidates the approximations for the scattering factor close to the walls. However, the contributions from this small vicinity of the slit walls do not affect the interference visibility appreciably, since it corresponds only to a small fraction of the semiclassical trajectories.
This suggests that a reasonable cut-off criterion is given by the critical distance $x_c$ to the wall, where a classical beam particle would hit the wall within the grating passage time $t=b/v_z$. Disregarding the initial transverse momentum $p$ and the potential of the opposite wall, the time for a particle starting at the distance $x_0$ to hit the wall is given by
\begin{equation}
T = \sqrt{\frac{m}{2}} \int_0^{x_0} \mathrm{d} x \, \frac{1}{\sqrt{- V \left(x \right)}} .
\end{equation}
For a wall potential $V(x) = -C_4 x^{-4}$ this leads to the critical distance
\begin{equation}
x_c = \left( \frac{18m C_4 b^2}{p_z^2} \right)^{1/6}.
\end{equation}
The parameters used for Fig.~\ref{fig:scatt_mat} yield $x_c=21\,$nm, and this value indeed corresponds to the position
where the semiclassical and the exact phase deviate by about $2\pi$.
The fact that the eikonal approximation is virtually identical to the exact phase factor for most of the slit width explains why the eikonal approximation is well justified with thin material gratings.
In fact, our numerical results indicate that in this case the eikonal approximation remains valid even for
particles with a stronger particle-wall interaction, breaking down only in a regime where the interference visibility is already strongly diminished by the interaction effect. We therefore focus on the KDTLI setup in the following.
As a final point, we note that the opening width of the slits is effectively reduced by twice the critical distance $x_c$. For large and slow particles it may be necessary to take this into account even at the first and at the third grating, since the fringe visibility depends quite sensitively on the corresponding effective open fractions.
\subsubsection{Laser grating}
\begin{figure*}
\caption{\langlebel{fig:scatt_laser1}
\end{figure*}
Replacing the diffraction grating by a standing laser wave leads to the smooth and bounded interaction potential (\ref{eqn:laserpot}).
For the numerical evaluation of the scattering factor, shown in Fig.~\ref{fig:scatt_laser1}, we choose the same parameters as for Fig.~\ref{fig:lambda_laser}
(motivated by the experiment \cite{Gerlich2007a}) and choose $\langlembda=3 \,$pm. The longitudinal laser waist $w_z = 20\,\mu$m is much larger than the grating period $d=266 \,$nm. It follows that non-zero transverse momenta $p\neq 0$ now have to be considered separately since the free transverse motion over the distance $ w_z p/p_z$ must not be neglected.
Figure~\ref{fig:scatt_laser1} shows how the phases of the approximate scattering factors deviate from the exact phase. Panel (a) corresponds to a perpendicular incidence of the incoming plane wave, $p=0$, while (b) is evaluated for $p = 10^{-3} p_z$, as found in a beam spread of $1\,$mrad
The Glauber and the elementary eikonal approximation coincide in the case (a) of perpendicular incidence, and they deviate from the exact phase by about 1\,mrad. The error of the semiclassical phase is smaller by three orders of magnitude, and it is not resolved in the plot. On the other hand, in the case (b) of a non-zero transverse momentum the elementary eikonal approximation deviates substantially from the exact result, while the error for the Glauber approximation remains on the order of $10^{-3}$ and the semiclassical one on the order of $10^{-6}$ (not resolved in the plot).
As for the corresponding amplitude of the incident plane waves, the exact calculation yields deviations from the incident amplitude $1$ on the order of $10^{-6}$ in case (a) and deviations on the order of $10^{-4}$ in case (b), respectively (not shown). While the eikonal approximations cannot account for this effect, the semiclassical amplitude (\ref{eqn:semiclassamp}) reproduces the exact result with an error
of less than $10^{-8}$, and $10^{-6}$, respectively.
The semiclassical expression of the scattering factor is thus demonstrated to be superior by orders of magnitude compared to the eikonal approximations.
While the Glauber eikonal approximation already improves the elementary eikonal approximation significantly, it still does not take the amplitude modification into account. However, the overall corrections to the eikonal approximations are so small in the present parameter regime that the Talbot-Lau interference contrast is hardly affected, as demonstrated below. The elementary eikonal approximation thus remains valid for the considered experiment \cite{Gerlich2007a}.
\begin{figure*}
\caption{\langlebel{fig:scatt_laser2}
\end{figure*}
The situation changes distinctively if the de Broglie wavelength is increased by a factor of ten, to $\langlembda = 30\,$pm. The resulting phase difference and amplitude plots are shown in Fig.~\ref{fig:scatt_laser2} for the case of a transverse momentum $p=10^{-3}p_z$. Now both the Glauber and the elementary eikonal phase strongly differ from the exact result, as demonstrated by the dashed and the dotted curves in Fig.~\ref{fig:scatt_laser2}(a). At the same time, the semiclassical result (solid line) deviates by less than $100\,$mrad from the exact phase, and also the corresponding amplitude, seen in Fig.~\ref{fig:scatt_laser2}(b), faithfully approximates the exact one.
The semiclassical expression (\ref{eqn:semiclassresult}) starts to fail only if we decrease the beam velocity to such an extent that the trajectories get strongly deflected during the increased passage time. This is expected since the derivation assumes the corrections due to deflection to be small. Equation (\ref{eqn:semiclassresult}) thus extends the eikonal approximation to the smaller beam velocities required for more massive particles, but it does not cover the whole semiclassical wavelength regime.
\subsection{The Talbot-Lau visibility}
We can now discuss how the improved treatment of the grating interaction effect affects the Talbot-Lau interference visibility. We focus again on the laser grating setup demonstrated in \cite{Gerlich2007a}.
In the Figures \ref{fig:vis_wz}--\ref{fig:vis_lambda}
the eikonal results are obtained by calculating the visibility (\ref{eqn:Vexp})
by means of the Talbot-Lau coefficients (\ref{eqn:Beiklaser}). The semiclassical calculation implements the generalized formula for the interference pattern (\ref{eqn:interferencepattern2}), where the semiclassical scattering factor (\ref{eqn:semiclassresult}) enters by means of the generalized grating coefficients (\ref{eqn:gratingcoeff2}). Note that, unlike in the elementary eikonal approximation, it is now essential to incorporate the angular distribution $D \left( p / p_z \right)$ into the calculation.
It is set here to be a Gaussian $D \left( p / p_z \right) \propto \boldsymbol{e}_xp \left( -(p/p_z)^2 / 2 \alpha^2 \right) $ with a realistic width of $\alpha = 1\,$mrad.
\begin{figure}
\caption{\langlebel{fig:vis_wz}
\end{figure}
One immediate consequence of the dependence of the semiclassical and the Glauber approximations on the transverse momentum is demonstrated in Fig.~\ref{fig:vis_wz}, where the interference visibility is plotted versus the longitudinal laser waist $w_z$, starting from the experimental value $w_z = 20 \, \mu$m. The elementary eikonal approximation (dotted line) is independent of $w_z$ due to the longitudinal integration in (\ref{eqn:simpleeiko}). The semiclassical result (solid line), which takes into account the transverse motion through the laser field, decreases with growing $w_z$. While the difference between the approximations is negligible at the experimental value, it becomes significant for a larger laser focus. The Glauber approximation reproduces the semiclassical result up to a precision of $10^{-4}$ and is therefore indistinguishable from the solid line Fig.~\ref{fig:vis_wz}. This implies that the visibility loss with growing waist is due to the free transverse motion through the laser grating, rather than due to a considerable deflection of the trajectories.
\begin{figure}
\caption{\langlebel{fig:vis_P}
\end{figure}
A similar result is presented in Fig.~\ref{fig:vis_P}, where we increase the laser power starting from the experimental value $P_L=6\,$W. This is equivalent to increasing the polarizability of the particles, see (\ref{eqn:laserpot}). One observes that the semiclassical result (solid curve) decreases more rapidly than the eikonal visibility (dotted curve) as the strength of the phase grating is increased. The Glauber approximation again matches the semiclassical curve.
\begin{figure}
\caption{\langlebel{fig:vis_lambda}
\end{figure}
Finally, in Fig.~\ref{fig:vis_lambda} we present the interference visibility as a function of the de Broglie wavelength $\langlembda$ around $30\,$pm, corresponding to a tenfold smaller beam velocity than in the experiment. In this case one observes at some wavelengths a considerable difference between the
Glauber eikonal approximation (dotted line) and the semiclassical approximation (solid line).
Note that the sharp dips, where the visibility drops to zero in Fig.~\ref{fig:vis_lambda} and Fig.~\ref{fig:vis_P}, indicate a shift of the interference pattern by half its period. One has to take this into account when averaging the visibility over the velocity distribution of the particle beam. For example, if this distribution was so broad that it amounts to averaging over two subsequent peaks in Fig.~\ref{fig:vis_lambda} it would lead to almost zero visibility.
We have seen that in general the validity of the eikonal approximation depends on the grating interaction strength, the passage time, the longitudinal velocity, and the distribution of the transverse momenta in the particle beam. For the specific experiment \cite{Gerlich2007a} the elementary eikonal approximation breaks down by either decreasing the longitudinal velocity by a factor of $10$, or similarly by increasing the laser power $P_L$, the longitudinal waist $w_z$, or the particle polarizability by an order of magnitude. The latter are effects mainly due to the transverse motion of the beam particles, which is not taken into account by the elementary eikonal approximation. Here the Glauber eikonal approximation (\ref{eqn:glauber}), i.e., the semiclassical scattering factor without deflection, would be already sufficient, at least in the experimentally accessible regime. This is no longer the case
if one increases the wavelength, since the deflection effect is more sensitive to the wavelength than to the grating parameters, limiting the validity of both the elementary and the Glauber eikonal approximation.
\section{Conclusions} \langlebel{sec:conclusions}
We presented a general theory of the coherent Talbot-Lau interference effect.
It allows to incorporate the interaction between particle and grating structure--a dominant effect in near field interference--at various degrees of approximation. Our treatment shows that it is necessary to account for detailed beam characteristics, such as the angular distribution, whenever one is required to go beyond the elementary eikonal approximation, or if one wants to quantify the experimental adjustment requirements.
Using the phase space formulation of quantum mechanics, we identify the appropriate generalization of the Talbot-Lau coefficients.
They serve to incorporate the most general coherent grating transformation and to describe the various near field interference effects in a transparent fashion. The general effect of the passage through a grating can thus be formulated in terms of scattering theory, providing a starting point for the numerically exact evaluation of the interference pattern.
Moreover, the semiclassical approximation of the S-matrix yields a systematic and non-perturbative improvement over the elementary eikonal approximation.
An additional high-energy approximation of the semiclassical trajectories then yields the Glauber eikonal approximation and the semiclassical deflection approximation as systematic corrections to the standard treatment.
A comparison with the numerically exact calculation verifies the high quality of the semiclassical deflection approximation. It suggests that a Kapitza-Dirac Talbot-Lau interferometer, where the center grating is replaced by a standing light wave, will be able to demonstrate the wave nature even of particles which are so large that the eikonal approximation is no longer valid.
\begin{acknowledgments}
We thank M. Arndt and H. Ulbricht for helpful discussions.
This work was supported by the FWF doctoral program `Complex Quantum Systems' (W1210) and by the DFG Emmy Noether program.
\end{acknowledgments}
\appendix*
\section{Classical description} \langlebel{app:class}
If the Talbot-Lau experiment is to prove the quantum nature
of particles one clearly needs to be able to distinguish between the quantum
interference effect and the moir\'{e}-type shadow effect that may occur with classical particles. A formulation is therefore required that yields the classical shadow contrast by using the same assumptions and approximations as in the quantum case. We present this classical theory in the following by assuming a material grating of thickness $b$ with a transverse interaction potential $V(x)$ of the
grating slit walls. The case of an explicitly $z$-dependent
interaction potential $V \left(x,z \right)$, such as a laser grating \cite{Gerlich2007a}, can be treated in the
one-dimensional model by an effectively time-dependent potential
$\widetilde{V} \left(x,t \right) = V \left(x, p_z t /m \right)$, where
the longitudinal motion provides the time coordinate for a given
momentum $p_z$.
The classical formulation is based on the phase space density rather than the Wigner function. The corresponding classical propagator through a diffraction
grating is given by the expression
\begin{eqnarray}
K_{\text{cl}} \left(x,p;x_0,p_0 \right) &=& \left|t\left(x_0\right) \right|^2 \delta \left( x_0 - x_0^{\text{cl}} \left(x,p,\frac{mb}{p_z} \right) \right) \nonumber \\
&&\times \, \delta \left( p_0 - p_0^{\text{cl}} \left(x,p,\frac{mb}{p_z} \right) \right),
\langlebel{eqn:klassprop}
\end{eqnarray}
where the hard grating wall cutoff of the particle beam is taken into
account at the entrance into the grating. The phase space coordinates
$x_0^{\text{cl}},p_0^{\text{cl}}$ are the starting point of the
classical trajectory evolving to $x,p$ under the influence of the
interaction potential $V(x)$ within the grating passage time
$t=mb/p_z$.
Since the free evolution of the phase space density
is given by the same transformation (\ref{eq:wt}) as in the case of the Wigner function,
one
ends up with the general classical shadow pattern, denoted by $f_3$
instead of $w_3$, after
substituting the classical propagator (\ref{eqn:klassprop}) into
the general Talbot-Lau calculation from Sect.~\ref{sec:generalTL}.
\begin{widetext}
\begin{eqnarray}
f_3 \left( x \right) &=& \frac{1}{G} \int \frac{\mathrm{d} p}{p_z} \, D \left( \frac{p_0^{\text{cl}} \left( x- \frac{p}{p_z}\eta L,p,\frac{mb}{p_z}\right)}{p_z}\right) \left|t \left( x_0^{\text{cl}} \left( x- \frac{p}{p_z}\eta L,p,\frac{mb}{p_z}\right)\right) \right|^2 \nonumber \\
&& \times \, \left|t_1 \left( x_0^{\text{cl}} \left( x- \frac{p}{p_z}\eta L,p,\frac{mb}{p_z}\right) - \frac{p_0^{\text{cl}} \left( x- \frac{p}{p_z}\eta L,p,\frac{mb}{p_z}\right)}{p_z} L\right) \right|^2
\langlebel{eqn:shadow1}
\end{eqnarray}
\end{widetext}
One can numerically implement this formula directly, rather than performing a Fourier decomposition with respect to the argument $x-p \eta L /p_z$ of the trajectory terms.
The ideal moir\'{e} shadow pattern is obtained if one disregards both the interaction potential and the grating thickness by setting $x_0^{\text{cl}} \left(x,p,t\right)=x$ and
$p_0^{\text{cl}} \left(x,p,t\right)=p$.
If a particle-wall interaction is present, the classical analogue to the eikonal approximation (\ref{eqn:simpleeiko}) is to approximate the deflection of the trajectory due to the grating by an instantaneous momentum kick \cite{Hornberger2004a},
\begin{eqnarray}
x_0^{\text{cl}} \left( x, p, \frac{m b}{p_z} \right) &=& x \nonumber \\
p_0^{\text{cl}} \left( x, p, \frac{m b}{p_z} \right) &=& p + \frac{m b}{p_z} V'(x).
\end{eqnarray}
Putting this into the classical formula (\ref{eqn:shadow1}), performing all the Fourier decompositions, and focusing on a particular $r:s$ Talbot-Lau resonance, as done in the eikonal quantum case (\ref{eqn:w3_eik2}), one obtains the classical shadow pattern in eikonal approximation,
\begin{eqnarray}
f_3 \left( x \right) &=& \frac{1}{G} \sum_{\ell=-\infty}^{\infty} A_{s \ell}^{*} \boldsymbol{e}_xp \left(2 \pi i \ell\frac{ x}{d_3} \right) \nonumber \\
&&\times \, \sum_{k=-\infty}^{\infty} B_{r\ell-k} (0) c_k \left( s \ell \frac{d}{d_1}\frac{L}{L_\textrm{T}} \right). \langlebel{eqn:f3_eik}
\end{eqnarray}
The classical momentum kick coefficients read as
\begin{eqnarray}
c_n \left( \xi \right) &=& \frac{1}{d} \int_{-d/2}^{d/2} \mathrm{d} x \, e^{-2 \pi i n x / d} \boldsymbol{e}_xp \left( i \xi \frac{m b V' (x) / p_z}{\hbar / d} \right) .
\nonumber\\&&
\end{eqnarray}
If the second grating is implemented by a standing laser beam the
classical calculation yields an analytical expression for the
Talbot-Lau coefficients $B_m^{\text{cl}} \left( \xi \right) = \sum_k
B_{m-k} \left(0 \right) c_k \left( \xi \right)$. They are related to the
quantum expression (\ref{eqn:Beiklaser}) by replacing the sine function in the argument with its linear expansion,
\begin{equation}
B_m^{\text{cl}} \left(\xi \right) = J_m \left( - \frac{4 M P_L \alpha_{\omega} }{\sqrt{2\pi} \hbar c \varepsilon_0 w_y p_z} \pi \xi \right).
\langlebel{eqn:Beiklasercl}
\end{equation}
Since the argument of the $B_m$ is proportional to the de Broglie
wavelength $\langlembda$ in the Talbot-Lau interference effect, this means
that the quantum
interference and the classical shadow effect become indistinguishable in the naive classical limit of a vanishing wavelength, $\langlembda \rightarrow 0$.
\end{document}
|
\begin{document}
\title[The geometry of Hrushovski constructions, II.]{The geometry of Hrushovski constructions, II. \\ The strongly minimal case.\footnote{Version: 18 March 2011}}
\author{David M. Evans}
\address{School of Mathematics, UEA, Norwich NR4 7TJ, UK.}
\email{[email protected]}
\author{Marco S. Ferreira}
\address{School of Mathematics, UEA, Norwich NR4 7TJ, UK.}
\email{[email protected]}
\begin{abstract}
We investigate the isomorphism types of combinatorial geometries arising from Hrushovski's flat strongly minimal structures and answer some questions from Hrushovski's original paper.
\noindent\textit{Keywords:\/} Stongly minimal set, Hrushovski construction, predimension\newline
\textit{MSC(2010):\/} 03C45, 03C30, 03C13
\end{abstract}
\maketitle
\section{Introduction}
In this paper, we investigate the isomorphism types of combinatorial geometries arising from Hrushovski's flat strongly minimal structures and answer some questions from Hrushovski's original paper \cite{EH}. It is a sequel to \cite{MFDE1}, but can be read independently of it. In order to describe the main results it will be convenient to summarise some of the results from the previous paper.
Suppose $L$ is a relational language with, for convenience, all relation symbols of arity at least 3 and at most one relation symbol of each arity. Denote by $k(L)$ the maximum of the arities of the relation symbols in $L$ (allowing $k(L)$ to be $\infty$ if this is unbounded). The basic Hrushovski construction defines the \textit{predimension} of a finite $L$-structure to be its size minus the number of atomic relations on the structure. The class $\mathcal{C}_0(L)$ consists of the finite $L$-structures in which this is non-negative on all substructures. There is then an associated notion of \textit{dimension} $d$ and the notion of \textit{self-sufficiency} (denoted by $\leq$) of a substructure. All of this is reviewed in detail in Section 2 below. The class $(\mathcal{C}_0, \leq)$ has an associated \textit{generic structure} $\mathcal{M}_0(L)$ which also carries a dimension function $d$ giving it the structure of an infinite-dimensional pregeometry. The associated (combinatorial) geometry is denoted by $G(\mathcal{M}_0(L))$.
In \cite{MFDE1} we showed that:
\begin{enumerate}
\item[(1)] The collection of finite subgeometries of $G(\mathcal{M}_0(L))$ does not depend on $L$ (Theorem 3.8 of \cite{MFDE1}).
\item[(2)] For languages $L, L'$, the geometries $G(\mathcal{M}_0(L))$ and $G(\mathcal{M}_0(L'))$ are isomorphic iff the maximum arities $k(L)$ and $k(L')$ are equal. (Theorem 3.1 of \cite{MFDE1} for $\Leftarrow$ and see also Section 4.2 here; Theorem 4.3 of \cite{MFDE1} gives $\Rightarrow$.)
\item[(3)] The localization of $G(\mathcal{M}_0(L))$ over any finite set is isomorphic to $G(\mathcal{M}_0(L))$ (Theorem 5.5 of \cite{MFDE1}).
\end{enumerate}
For the strongly minimal set construction of \cite{EH}, one takes a certain function $\mu$ (see section 2 here) and considers a subclass $\mathcal{C}_\mu(L)$ of $\mathcal{C}_0(L)$. For appropriate $\mu$ there is a generic structure $\mathcal{M}_\mu(L)$ for the class $(\mathcal{C}_\mu(L), \leq)$ which is strongly minimal. The dimension function given by the predimension is the same as the dimension in the strongly minimal set and we are interested in the geometry of this. Our main result here is that this process of `collapse' is irrelevant to the geometry: under rather general conditions on $\mu$ we prove:
\begin{enumerate}
\item[(4)] The geometry $G(\mathcal{M}_\mu(L))$ of the strongly minimal set is isomorphic to the geometry $G(\mathcal{M}_0(L))$ (Theorem \ref{main}).
\end{enumerate}
Sections 5.1 and 5.2 of Hrushovski's paper \cite{EH} give variations on the construction which produce strongly minimal sets with geometries different from the $G(\mathcal{M}_0(L))$. However, we show, answering a question from \cite{EH} (see also Section 3 of \cite{Hasson}):
\begin{enumerate}
\item[(5)] the geometries of the strongly minimal sets in Sections 5.1 and 5.2 of \cite{EH} have localizations (over a finite set) which are isomorphic to one of the geometries $G(\mathcal{M}_0(L))$ (for appropriate $L$) (see Section 4.1 here).
\end{enumerate}
The first version of the result in (4) was proved by the second Author in his thesis \cite{MFThesis}: this was for the case where $L$ has a single 3-ary relation symbol (as in the original paper \cite{EH}). The somewhat different method of proof used in Sections 3 and 4 here was found later. It has the advantage of being simpler and more readily adaptable to generalization and proving the result in (5), however, the class of $\mu$-functions to which it is applicable is slightly more restricted than the result from \cite{MFThesis}: Theorem 6.2.1 of \cite{MFThesis} assumes only that $\mu \geq 1$.
In summary, for each $k = 3, 4, \ldots, \infty$ we have a countably-infinite dimensional geometry $\mathcal{G}_k$ isomorphic to $G(\mathcal{M}_0(L))$ where $L$ has maximum arity $k$, and these are pairwise non-isomorphic. The geometry of each of the new (countable, saturated) strongly minimal sets in \cite{EH} has a localization isomorphic to one of these $\mathcal{G}_k$. Thus, whilst there is some diversity amongst the strongly minimal structures which can be produced by these constructions, the range of geometries which can be produced appears to be rather limited. It would therefore be very interesting to have a characterization of the geometries $\mathcal{G}_k$ in terms of a `geometric' condition (such as flatness, as in 4.2 of \cite{EH}, for example) and a condition on the automorphism group (such as homogeneity, but possibly with a stronger assumption).
\noindent\textit{Acknowledgement:\/} Some of the results of this paper were produced whilst the second Author was supported as an Early Stage Researcher by the Marie Curie Research Training Network MODNET, funded by grant MRTN-CT-2004-512234 MODNET from the CEC. We thank the Referee for drawing our attention to the need to consider local isomorphisms in Theorem \ref{main}.
\section{Hrushovski constructions}\label{sec2}
We give a brief description of Hrushovski's constructions from \cite{EH}. Other presentations can be found in \cite{FW} and \cite{B&S}. The book \cite{AP2} of Pillay contains all necessary background material on pregeometries and model theory. The notation, terminology and level of generality is mostly consistent with that used in \cite{MFDE1}.
\subsection{Predimension and pregeometries}
Let $L$ be a relational language consisting of relation symbols $(R_i : i \in I)$ with $R_i$ of arity $n_i \geq 3$ (and $\vert I \vert \geq 1$). We suppose there are only finitely many relations of each arity here.
We work with $L$-structures $A$ where each each $R_i$ is symmetric: so we regard the interpretation $R_i^A$ of $R_i$ in $A$ as a set of $n_i$-sets. (By modifying the language, the arguments we give below can be adapted to deal with the case of $n_i$-tuples of not-necessarily-distinct elements: see Section 4.3 here.)
For finite $A$ we let the predimension of $A$ be $\delta(A) = \vert A \vert - \sum_{i \in I} \vert {R_i}^A\vert$ (of course this depends on $L$ but this will be clear from the context).
We let
$\mathcal{C}_0(L)$ be the set of finite $L$-structures $A$ such that $\delta(A') \geq 0$ for all $A' \subseteq A$.
Suppose $A \subseteq B \in \mathcal{C}_0(L)$. We write $A \leq B$ and say that $A$ is \textit{self-sufficient} in $B$ if for all $B'$ with $A \subseteq B' \subseteq B$ we have $\delta(A) \leq \delta(B')$. We will assume that the reader is familiar with the basic properties (such as transitivity) of this notion.
Let $\mathcal{C}b_0(L)$ be the class of $L$-structures all of whose finite substructures lie in $\mathcal{C}_0(L)$. We can extend the notion of self-sufficiency to this class in a natural way.
Note that if $A \subseteq B \in \mathcal{C}b_0(L)$ is finite then there is a finite $A'$ with $A \subseteq A' \subseteq B$ and $\delta(A')$ as small as possible. In this case $A' \leq B$ and it can be shown that there is a smallest finite set $C \leq B$ with $A \subseteq C$. We define the \textit{dimension} $d_B(A)$ of $A$ (in $B$) to be the minimum value of $\delta(A')$ for all finite subsets $A'$ of $B$ which contain $A$.
We define the $d$-\textit{closure} of $A$ in $B$ to be:
$${\rm cl}_B(A)=\{c\in B:d_{B}(Ac)=d_{B}(A)\}$$
where, as usual, $Ac$ is shorthand for $A\cup \{c\}$.
These notions can be relativized. If $A, C \subseteq B \in \mathcal{C}_0(L)$ define $\delta(A/C)$ to be $\delta(A\cup C)- \delta(C)$ and $d_B(A/C) = d_B(A\cup C) - d_B(C)$: the (pre)dimension \textit{over} $C$. In all of this notation, we will suppress the subscript for the ambient structure $B$ if it is clear from the context.
We can coherently extend the definition of $d$-closure to infinite subsets $A$ of $B$ by saying that the $d$-closure of $A$ is the union of the $d$-closures of finite subsets of $A$.
It can be shown that $(B,{\rm cl}_B)$ is a pregeometry and the dimension function (as cardinality of a basis) equals $d_{B}$ on finite subsets of $B$. We use the notation $PG(B)$ instead of $(B,{\rm cl}_B)$, and denote by $G(B)$ the associated (combinatorial) geometry: so the elements of $G(B)$ are the sets ${\rm cl}_B(x)\setminus{\rm cl}_B(\emptyset)$ for $x \in B \setminus {\rm cl}_B(\emptyset)$ and the closure on $G(B)$ is that induced by ${\rm cl}_B$.
Note that if $A \leq B \in \mathcal{C}b_0(L)$ then for $X \subseteq A$ we have $d_A(X) = d_B(X)$. Thus $G$ can be regarded as a functor from $(\mathcal{C}b_0(L), \leq)$ to the category of geometries (with embeddings of geometries as morphisms).
If $Y \subseteq B \in \mathcal{C}b_0(L)$ then the \textit{localization} of $PG(B)$ over $Y$ is the pregeometry with closure operation ${\rm cl}_B^Y(Z) = {\rm cl}_B(Y\cup Z)$. The corresponding geometry is denoted by $G_Y(B)$. Note that the dimension function here is given by the relative dimension $d_B(./Y)$.
It will be convenient to fix a first order language for the class of pregeometries. A reasonable choice for this is the language $LPI=\{I_n:n\geq 1\}$ where each $I_n$ is an $n$-ary relational symbol. A pregeometry $(P,{\rm cl})$ will be seen as a structure in this language by taking $I_n^P$ to be the set of independent $n$-tuples in $P$. Notice that we can recover a pregeometry just by knowing its finite independent sets. Note also that the isomorphism type of a pregeometry is determined by the isomorphism type of its associated geometry and the size of the equivalence classes of interdependence. In the case where these are all countably infinite, it therefore makes no difference whether we consider the geometry or the pregeometry.
\subsection{Self-sufficient amalgamation classes}
If $B_1, B_2 \in \mathcal{C}b_0(L)$ have a common substructure $A$ then the \textit{free amalgam} $E$ of $B_1$ and $B_2$ over $A$ consists of the disjoint union of $B_1$ and $B_2$ over $A$ and $R_i^E = R_i^{B_1} \cup R_i^{B_2}$ for each $i \in I$. It is well known that if $A \leq B_1$ then $B_2 \leq E$, so $E \in \mathcal{C}b_0(L)$, and $(\mathcal{C}_0, \leq)$ is an \textit{amalgamation class}. It can also be shown that if $A$ is $d$-closed in $B_1$ then $B_2$ is $d$-closed in $E$.
Suppose $Y \leq Z \in \mathcal{C}_0(L)$ and $Y \neq Z$. Following \cite{EH}, we say that this is an \textit{algebraic extension} if $\delta(Y) = \delta(Z)$. It is a simply algebraic extension if also $\delta(Z') > \delta(Y)$ whenever $Y \subset Z' \subset Z$. It is a minimally simply algebraic (msa) extension if additionally $Y' \subseteq Y'\cup (Z\setminus Y)$ is not simply algebraic whenever $Y' \subset Y$.
It can be shown that for each $n \geq 0$ there are arbitrarily large msa extensions $Y \leq Z$ in $\mathcal{C}_0(L)$ with $\delta(Y) = n$ (this make use of the fact that at least one of the relations $R_i$ has arity at least 3, which is certainly covered by our assumptions on $L$).
The following is trivial, but crucial for us:
\begin{lem}\label{21}
Suppose $Y \leq Z$ is a msa extension. Then for every $y \in Y$ there is some $w \in \bigcup_{i \in I} R_i^Z$ and $z \in Z\setminus Y$ such that $y, z \in w$. Moreover, if $Z\setminus Y$ is not a singleton and $z \in Z\setminus Y$, then there are at least two elements of $\bigcup_{i \in I} R_i^Z$ which contain $z$.
\end{lem}
\begin{proof}
Suppose this does not hold for some $y \in Y$. Let $Y' = Y\setminus \{y\}$. Then for every $U \subseteq Z\setminus Y$ we have $\delta(Y'\cup U) - \delta(Y') = \delta(Y \cup U) - \delta(Y)$. So $Y' \subseteq Y'\cup (Z\setminus Y)$ is simply algebraic: contradiction. Similarly, for the `moreover' part, if $z$ is in at most one relation in $\bigcup_{i \in I} R_i^Z$, then $\delta(Z\setminus\{z\}) \leq \delta(Z)$, which contradicts the simple algebraicity.
\end{proof}
We let $\mu$ be a function from the set of isomorphism types of minimally simply algebraic extensions in $\mathcal{C}_0(L)$ to the non-negative integers. The subclass $\mathcal{C}_\mu(L)$ consists of structures in $\mathcal{C}_0(L)$ which, for each msa $Y\leq Z$ in $\mathcal{C}_0(L)$, omit the atomic type consisting of $\mu(Y,Z)+1$ disjoint copies of $Z$ over $Y$.
We will work with $\mu$ where the following holds:
\begin{ass}[Assumed Amalgamation Lemma]\label{aalemma}
\begin{enumerate}
\item[(i)] If $A \leq B_1, B_2 \in \mathcal{C}_\mu(L)$ and the free amalgam of $B_1$ and $B_2$ over $A$ is not in $\mathcal{C}_\mu(L)$, then there exists $Y \subseteq A$ and minimally simply algebraic extensions $Y \leq Z_i \in B_i$ (for $i = 1,2$) which are isomorphic over $Y$ and $Z_i\setminus Y \subseteq B_i\setminus A$.
\item[(ii)] The class $(\mathcal{C}_\mu(L), \leq)$ is an amalgamation class (see below).
\end{enumerate}
\end{ass}
Note that (ii) here follows from (i) (cf. the proof of Lemma 4 in \cite{EH}), and by Section 2 of \cite{EH}, (i) holds if $\mu(Y,Z) \geq \delta(Y)$ for all msa $Y\leq Z$ in $\mathcal{C}_0(L)$.
\subsection{Generic structures and their geometries}
Suppose $\mathcal{A}$ is a subclass of $\mathcal{C}_0(L)$ such that $(\mathcal{A}, \leq)$ is an amalgamation class: meaning that if $B \in \mathcal{A}$ and $A \leq B$ then $A \in \mathcal{A}$, and if $A \leq B_1, B_2 \in \mathcal{A}$ then there is $C \in \mathcal{A}$ and embeddings $f_i : B_i \to C$ with $f_i(B_i) \leq C$ and $f_1 \vert A = f_2 \vert A$. Then there is a countable structure $\mathcal{M} \in \mathcal{C}b_0(L)$ satisfying the following conditions:\begin{enumerate}
\item [(G1)] $\mathcal{M}$ is the union of a chain $A_0 \leq A_1 \leq A_2 \leq \cdots$ of structures in $\mathcal{A}$.
\item [(G2)] (extension property) If $A\leq\mathcal M$ and $A\leq B\in\mathcal A$ then there exists an embedding $g:B\to\mathcal M$ such that $g(B)\leq\mathcal M$ and $g(a) = a$ for all $a \in A$.
\end{enumerate}
We refer to $\mathcal{M}$ as the \textit{generic structure} of the amalgamation class $(\mathcal{A}, \leq)$: it is determined up to isomorphism by the properties G1 and G2 (and G1 is automatic for countable structures in $\mathcal{C}b_0(L)$). Of course, Hrushovski's strongly minimal sets are the generic structures $\mathcal{M}_\mu(L)$ for the amalgamation classes $(\mathcal{C}_\mu(L), \leq)$. We will compare the geometries of these with that of the generic structure $\mathcal{M}_0(L)$ for the amalgamation class $(\mathcal{C}_0(L), \leq)$.
Suppose $(\mathcal{A}, \leq)$ and $(\mathcal{A}', \leq)$ are amalgamation classes, as above. We refer to the following as the Isomorphism Extension Property, and denote it by $\mathcal{A} \rightsquigarrow \mathcal{A}'$.
\begin{enumerate}
\item[(*)] Suppose $A \in \mathcal{A}$, $A' \in \mathcal{A}'$ and $f : G(A) \to G(A')$ is an isomorphism of geometries, and $A \leq B \in \mathcal{A}$. Then there is $B' \in \mathcal{A}'$ with $A' \leq B'$ and an isomorphism $f' : G(B) \to G(B')$ which extends $f$.
\end{enumerate}
\begin{lem}\label{isolemma}
Suppose $(\mathcal{A}, \leq)$ and $(\mathcal{A}', \leq)$ are amalgamation classes with generic structures $\mathcal{M}$, $\mathcal{M}'$ respectively. Suppose that both extension properties $\mathcal{A} \rightsquigarrow \mathcal{A}'$ and $\mathcal{A}' \rightsquigarrow \mathcal{A}$ hold. Then the geometries $G(\mathcal{M})$ and $G(\mathcal{M}')$ are isomorphic.
\end{lem}
\begin{proof}
We have already remarked that if $A \leq \mathcal{M}$, then the dimension of a subset of $A$ is the same whether computed in $A$ or in $\mathcal{M}$. Thus $G(A)$ is naturally a substructure of $G(\mathcal{M})$. We claim that the set $\mathcal{S}$ of geometry-isomorphisms
\[ f : G(A) \to G(A') \]
where $A \leq \mathcal{M}, A' \leq \mathcal{M}'$ are finite
is a back-and-forth system between $G(\mathcal{M})$ and $G(\mathcal{M}')$. Indeed (for the `forth'), given such an $f : G(A) \to G(A')$ and $A \leq B \leq \mathcal{M}$, there is $A' \leq B' \in \mathcal{A}'$ and an isomorphism $f' : G(B) \to G(B')$ extending $f$, by our assumption $\mathcal{A} \rightsquigarrow \mathcal{A}'$. The extension property G2 in $\mathcal{M}'$ means that we can take $B' \leq \mathcal{M}'$, as required. Similarly we obtain the `back' part from $\mathcal{A}' \rightsquigarrow \mathcal{A}$ and G2 in $\mathcal{M}$.
It follows that $G(\mathcal{M})$ and $G(\mathcal{M}')$ are isomorphic.
\end{proof}
\begin{rem} \label{isorem}\rm
We can adapt this slightly to give a criterion for local isomorphism of $G(\mathcal{M})$ and $G(\mathcal{M}')$. This is really just about adding parameters to the language, but we make it explicit. Suppose $X \in \mathcal{A}$ and $X' \in \mathcal{A}'$ are fixed finite structures with $X \leq \mathcal{M}$ and $X' \leq \mathcal{M}'$. We write $\mathcal{A}(X) \rightsquigarrow \mathcal{A}'(X')$ for the statement:
\begin{enumerate}
\item[]
Suppose $X \leq A \in \mathcal{A}$, $X' \leq A' \in \mathcal{A}'$ and $f : G_X(A) \to G_X'(A')$ is an isomorphism of geometries, and $A \leq B \in \mathcal{A}$. Then there is $B' \in \mathcal{A}'$ with $A' \leq B'$ and an isomorphism $f' : G_X(B) \to G_{X'}(B')$ which extends $f$.
\end{enumerate}
Here, recall that $G_X(A)$ is the localization of $G(A)$ over $X$, as defined in Section 2.1. It follows as in Lemma \ref{isolemma} that if $\mathcal{A}(X) \rightsquigarrow \mathcal{A}'(X')$ and $\mathcal{A}'(X') \rightsquigarrow \mathcal{A}(X)$ hold, then $G_X(\mathcal{M})$ and $G_{X'}(\mathcal{M}')$ are isomorphic.
\end{rem}
\section{Isomorphism of the strongly minimal set geometries}
Throughout, $(\mathcal{C}_0(L), \leq)$ and $(\mathcal{C}_\mu(L), \leq)$ are the amalgamation classes from the previous section. Note that $(\mathcal{C}_0(L), \leq)$ is an amalgamation class and we are \textit{assuming} that the amalgamation lemma \ref{aalemma} holds for $\mathcal{C}_\mu(L)$. We denote the generic structures by $\mathcal{M}_0(L)$ and $\mathcal{M}_\mu(L)$ respectively: so the latter is Hrushovski's strongly minimal set $D(L,\mu)$. The geometries are denoted by $G(\mathcal{M}_0(L))$ and $G(\mathcal{M}_\mu(L))$. We have already remarked that there are arbitrarily large msa extensions $\emptyset \leq Z$ in $\mathcal{C}_0(L)$, so if $\mu(\emptyset, Z) > 0$ for infinitely many of these, then ${\rm cl}_{\mathcal{M}_\mu(L)}(\emptyset)$ is infinite as it contains a copy of each such $Z$. Similarly, there are infinitely many msa extensions $Y \leq Z \in \mathcal{C}_0(L)$ with $\delta(Y) = 1$, so if $\mu(Y, Z) > 0$ for all of these, and $X \leq \mathcal{M}_\mu(L)$ is a singleton, then ${\rm cl}_{\mathcal{M}_\mu(L)}(X)$ is infinite. Our main result is:
\begin{thm}\label{main} Suppose \ref{aalemma} holds and $\mu(Y,Z) \geq 2$ for all msa $Y \leq Z \in \mathcal{C}_0(L)$ with $\delta(Y) \geq 2$ and $\mu(Y,Z) \geq 1$ when $\delta(Y) = 1$. Suppose that $X \leq \mathcal{M}_\mu(L)$ is finite and ${\rm cl}_{\mathcal{M}_\mu(L)}(X)$ is infinite. Then $G_X(\mathcal{M}_\mu(L))$ and $G(\mathcal{M}_0(L))$ are isomorphic geometries. In particular, $G(\mathcal{M}_\mu(L))$ and $G(\mathcal{M}_0(L))$ are locally isomorphic.
\end{thm}
\begin{proof}
Note that $X$ can be taken as $\emptyset$ or a singleton, by the remarks preceding the theorem.
We need to verify that the isomorphism extension property of Lemma \ref{isolemma} and Remarks \ref{isorem} holds in both directions. The main part will be to show that
$\mathcal{C}_0(L) \rightsquigarrow \mathcal{C}_\mu(L)(X)$. In fact, because of the symmetry of the argument, it will be convenient to take an arbitrary finite $W \leq \mathcal{M}_0(L)$ and show that $\mathcal{C}_0(L)(W) \rightsquigarrow \mathcal{C}_\mu(L)(X)$.
So suppose we are given $W \leq A \leq B \in \mathcal{C}_0(L)$ and $X \leq A' \in \mathcal{C}_\mu(L)$ with an isomorphism $f : G_W(A) \to G_X(A')$. We want to find $B' \in \mathcal{C}_\mu(L)$ with $A' \leq B'$ and an isomorphism $f' : G_W(B) \to G_X(B')$ extending $f$. The main point will be to ensure that each point of $B'\setminus (A' \cup {\rm cl}_{B'}(X))$ is involved in only a small number of relations, and this gives us control over the msa extensions in $B'$.
Let $A_0 = {\rm cl}_A(W)$ and let $A_1,\ldots, A_r$ be the $d$-dependence classes (over $W$) on $A\setminus A_0$: the latter are the points of $G_W(A)$. Similarly let $B_0 = {\rm cl}_B(W)$ and $B_1, \ldots, B_s$ the $d$-dependence classes (over $W$) on $B\setminus B_0$, with $A_i \subseteq B_i$ for $i = 1,\ldots, r$. List the relations on $B$ which are not contained in $A$ or some $B_0\cup B_j$ as $\rho_1,\ldots, \rho_t$. So these are finite sets. Let $A_0' = {\rm cl}_{A'}(X)$ and $A_1',\ldots, A_r'$ be the classes of $d$-dependence (over $X$) on $A'\setminus A_0'$, labelled so that $f(A_i) = A_i'$. We construct $B_0', B_1', \ldots, B_s'$ with $A_i' \subseteq B_i'$ for $i = 0, \ldots, r$, and $B' = \bigcup_{i = 0}^s B_i'$ in the steps below.
\noindent\textit{Terminology:\/} If $u, v \in E \in \mathcal{C}_0(L)$, say that $u,v$ are \textit{adjacent in $E$} if there exists $w \in \bigcup_i R_i^E$ such that $u,v \in w$.
\noindent\textit{Step 1:\/} Construction of $A'' = A' \cup B_0' \in \mathcal{C}_\mu(L)$. \newline
Take $A_0' \leq V \in \mathcal{C}_\mu(L)$ with $\delta(V) = \delta(X)$ and $\vert V \setminus A_0' \vert $ sufficiently large. For example, by our assumption on $X$, $\mathcal{C}_\mu(L)$ contains arbitrarily large finite algebraic extensions of $X$, so we can take $V \in \mathcal{C}_\mu(L)$ to be an amalgamation of $A_0'$ and one of these over $X$. Let $A''$ be the free amalgam of $A'$ and $V$ over $A_0'$ and let $B_0'$ be the copy of $V$ inside this. As $A_0'$ is $d$-closed in $A'$ and $A_0' \leq V$ it follows from \ref{aalemma} that $A'' \in \mathcal{C}_\mu(L)$, $B_0'$ is $d$-closed in $A''$ and $A' \leq A''$. Note that $\delta(A'') = \delta(A')$.
\noindent\textit{Step 2:\/} Construction of $B_0' \cup B_i' \in \mathcal{C}_\mu(L)$.\newline
We do this so that $B_0'$ is $d$-closed in $B_0' \cup B_i'$ and $\delta(B_i'\cup B_0'/B_0') = 1$. (As $\delta(B_0' \cup A_i'/B_0') = 1$, it then follows that $B_0' \cup A_i' \leq B_0'\cup B_i'$ when $1 \leq i \leq r$.) Let $m$ be sufficiently large. Choose some $R_i$: for example $R_1$ of arity $n \geq 3$. \newline
\textit{Case 1:\/} Suppose $i \leq r$. Pick $b_{i0} \in A_i'$ and let $s_{i1},\ldots, s_{im}$ be disjoint $(n-2)$-subsets of $B_0'\setminus A_0'$ (we adjust the choice of $V$ in step 1 to accommodate this). Let $B_i' = A_i' \cup \{b_{i1},\ldots, b_{im}\}$ and include as new $R_1$-relations on $B_0' \cup B_i'$ the $n$-sets $\{b_{i0}, b_{ij}\}\cup s_{ij}$ for $1\leq j\leq m$. We need to show that this has the required properties.
First, note that $B_0'\cup A_i' \leq B_0' \cup A_i' \cup \{b_{ij}\}$, so $B_0' \cup A_i' \cup \{b_{ij}\} \in \mathcal{C}_0(L)$.
Suppose $Y \leq Z$ is a msa extension in $B_0' \cup A_i' \cup \{b_{ij}\}$ not contained in $B_0'\cup A_i'$. So $s_{ij} \cup \{b_{i0},b_{ij}\} \subseteq Z$. If $b_{ij} \not\in Y$ then $Y \leq Z\setminus\{b_{ij}\} < Z$ is algebraic, so $Z\setminus \{b_{ij}\} = Y$ and $Y = s_{ij}\cup \{b_{i0}\}.$ As the elements of $s_{ij}$ are non-adjacent to $b_{i0}$ in $B_0'\cup A_i'$, it follows that there is only one copy of $Z$ over $Y$ in $B_0' \cup A_i' \cup \{b_{ij}\}$. If $b_{ij} \in Y$, then by Lemma \ref{21} $s_{ij}\cup \{b_{i0}\} \not\subseteq Y$. But then there is at most one copy of $Z$ over $Y$ in $B_0' \cup A_i' \cup \{b_{ij}\}$. Note that $\delta(Y) = \delta(Z) \geq \delta(Z\cap (A_i' \cup B_0')) \geq 1$. So in both cases we meet the requirements for $B_0' \cup A_i' \cup \{b_{ij}\} \in \mathcal{C}_\mu(L)$.
Now note that $B_0' \cup B_i'$ is the free amalgam over $B'_0\cup A_i'$ of the structures $B_0'\cup A_i' \cup \{b_{ij}\}$ (for $j = 1, \ldots, m$). Each $B_0'\cup A_i' \subseteq B_0'\cup A_i' \cup \{b_{ij}\}$ is an algebraic extension and the only msa extension in this with base in $B_0' \cup A_i'$ and which is not contained in $B_0'\cup A_i'$ is $s_{ij}\cup \{b_{i0}\} \leq s_{ij}\cup \{b_{i0}, b_{ij}\}$. So the amalgamation lemma \ref{aalemma} implies that $B_0' \cup A_i' \leq B_0'\cup B_i' \in \mathcal{C}_\mu(L)$. It is clear that $\delta(B_i'/B_0' \cup A_i') = 0$ so $\delta(B_0' \cup B_i') = \delta(B_0' \cup A_i') = \delta(B_0') + 1$.
Finally, note that as $B_0'$ is $d$-closed in $B_0' \cup A_i'$ and $B_0' \cup A_i' \leq B_0' \cup B_i'$, the $d$-closure of $B_0'$ in $B_0'\cup B_i'$ does not contain $b_{i0}$. It then follows easily that $B_0'$ is $d$-closed in $B_0'\cup B_i'$.
\noindent\textit{Case 2:\/} $i > r$. As in Case 1, let $s_{i1},\ldots, s_{im}$ be $(n-2)$-subsets of $B_0'\setminus A_0'$ with no relations on them. Let $B_i' = \{b_{i1},\ldots, b_{im}\}$ and include as new $R_1$-relations on $B_0' \cup B_i'$ the $n$-sets $\{b_{ij}, b_{i(j+1)}\}\cup s_{ij}$ for $1\leq j\leq m-1$. In this version of the construction we take the $s_{ij}$ to be $s_i$, independent of $j$.
It is clear that $\delta(B_0'\cup B_i'/B_0') = 1$ and if $\emptyset\neq Y \subseteq B_i'$ then $\delta(B_0' \cup Y/B_0') \geq 1$. So $B_0' \leq B_0'\cup B_i'$ and therefore $B_0'\cup B_i' \in \mathcal{C}_0(L)$, and $B_0'$ is $d$-closed in $B_0' \cup B_i'$. It remains to show that $B_0'\cup B_i' \in \mathcal{C}_\mu(L)$, so suppose $Y \leq Z_1$ is a msa extension in $B_0'\cup B_i'$. If $\delta(Y) = 0$ then $Z_1\subseteq B_0'$ and the same is true of any copy of $Z_1$ over $Y$. Similarly, if $Y \subseteq B_0'$ then all copies of $Z_1$ over $Y$ are contained in $B_0'$ as this is $d$-closed in $B_0'\cup B_i'$. So we can assume that $\delta(Y) \geq 1$ and $b_{ij} \in Y$ for some $j$. By Lemma \ref{21}, we can assume that one of the relations $s_i \cup \{b_{ij},b_{i(j\pm1)}\}$ is a subset of $Z_1$.
If $Z_1 \setminus Y$ is a singleton then there is at most one other copy $Z_2$ of $Z_1$ over $Y$, and in this case $Y = Z_1 \cap Z_2 = s_i\cup \{b_{ij}\}$. Note that $\delta(Y) \geq 2$ here, so $\mu(Y, Z_1) \geq 2$, by hypothesis.
Now suppose that $Z_1 \setminus Y$ has at least two elements. It will suffice to prove that there is no other copy $Z_2$ of $Z_1$ over $Y$ in $B_0'\cup B_i'$, so suppose there is such a $Z_2$. Take $j$ maximal such that $b_{ij} \in Z_1\cup Z_2$. By Lemma \ref{21}, $b_{ij}$ is in at least two relations in $Z_1\cup Z_2$; but $b_{ij}$ is only in two relations in $B_0'\cup B_i'$ and one of these also involves $b_{i(j+1)}$, so this is in $Z_1\cup Z_2$. This contradicts the choice of $j$.
\noindent\textit{Step 3:\/} Other relations on $B'$. \newline
The relations on $B'$ not contained in $A'$ or some $B_0' \cup B_i'$ are $\rho_1', \ldots \rho_t'$. We can choose these to be subsets of $B'\setminus A''$ with $\rho_i' \cap \rho_j' = \emptyset$ if $i\neq j$, and $\rho_i' \cap B_j' \neq \emptyset$ iff $\rho_i \cap B_j \neq \emptyset$ (for $j \geq 1$). Note that this is possible if $m$ is sufficiently large. We make $\rho_i'$ of the same type as $\rho_i$ (that is, in the same $R_j$).
This completes the construction of $B'$. We now make a series of claims about it.
\noindent\textit{Claim 1:\/} Let $U \subseteq \{1,\ldots, s\}$, and $Y = \bigcup_{i\in U} (B_i\cup B_0)$, $Y' = \bigcup_{i\in U} (B_i' \cup B_0')$. Then $Y \cap A$ is $d$-closed in $A$ iff $Y' \cap A'$ is $d$-closed in $A'$, and in this case we have $\delta(Y/B_0) = \delta(Y'/B_0').$
Let $U_0 = \{i\in U : i \leq r\}$ and $U_1 = \{i \in U : i > r\}$. Then $Y\cap A = \bigcup_{i \in U_0} (A_0\cup A_i)$ and $Y' \cap A' = \bigcup_{i \in U_0} (A_0'\cup A_i')$. Because $f$ is an isomorphism of geometries, one of these is $d$-closed (in $A$ or $A'$) iff the other is (remembering that a subset of a geometry is $d$-closed iff any set properly containing it has bigger dimension). So suppose this is the case. We compute that:
\[\delta(Y'/B_0') = \delta(A''\cap Y'/B_0') + \delta(Y'/A''\cap Y') = \delta(A''\cap Y'/B_0') + \vert U_1\vert - \vert J\vert\]
where $J = \{ j : \rho_j' \subseteq Y'\}$. This follows from the fact that $Y'$ consists of $\vert U_0\vert$ sets of $\delta$-value $0$ over $A''\cap Y'$ and $\vert U_1\vert$ sets of $\delta$-value $1$ over $A''\cap Y'$, and an extra $\vert J\vert$ relations $\rho'_j$ between them. Moreover
\[\delta(A'' \cap Y'/B_0') = \delta ((A'\cap Y')\cup B_0'/B_0') = \delta(A' \cap Y'/A_0'),\]
using, for example, the construction of $A''$ as a free amalgam in step 1. Thus we have
\[\delta(Y'/B_0') = \delta(A'\cap Y'/A_0') + \vert U_1\vert - \vert J\vert.\]
Now, by construction (step 3) we have $\rho_j \subseteq Y$ iff $\rho'_j \subseteq Y'$. So an identical calculation shows that
\[\delta(Y/B_0) = \delta(A\cap Y/A_0) + \vert U_1\vert - \vert J\vert.\]
(This uses the fact that $A, B_0$ are freely amalgamated over $A_0$, which follows from the definitions of $A_0$, $B_0$ and the assumption that $A\leq B$.)
By the isomorphism $f$, we have $d(A'\cap Y'/X) = d(A\cap Y/W)$. If $A'\cap Y'$, $A\cap Y$ are $d$-closed (in $A'$, $A$ respectively) then $d(A'\cap Y') = \delta(A'\cap Y')$ and $d(A\cap Y) = \delta(A \cap Y)$. So in this case $\delta(A\cap Y/ A_0) = \delta(A' \cap Y' / A_0')$, as $d(W) = \delta(A_0)$ and $d(X) =\delta(A_0')$. Thus we have $\delta(Y/B_0) = \delta(Y'/B_0')$, as required.
$\Box_{Claim}$
\noindent\textit{Claim 2:\/} $B' \in \mathcal{C}_0(L)$, $B_0'$ and $B_0'\cup B_i'$ are $d$-closed in $B'$ and $A'' \leq B'$. The map $f' : G_W(B) \to G_X(B')$ given by $f'(B_i) = B_i'$ is an isomorphism of geometries which extends $f$.
First, note that (by construction step 2) if $B_0'\subseteq C \subseteq B'$ and $Y' = \bigcup\{ B_0'\cup B_i' : C \cap B_i' \neq \emptyset\}$, then $\delta(C) \geq \delta(Y')$. If $A'' \subseteq C$ then by Claim 1, $\delta(Y'/B_0') = \delta(Y/B_0)$, where $Y = \bigcup\{ B_0\cup B_i : C \cap B_i' \neq \emptyset\}$. This contains $A$, so as $A \leq B$ we have $\delta(Y) \geq \delta(A)$. By the isomorphism, $\delta(A/A_0) = \delta(A'/A_0') = \delta(A''/B_0')$ (by step 1). Thus $\delta(C/B_0') \geq \delta(Y'/B_0') = \delta(Y/B_0) \geq \delta(A/A_0) = \delta(A''/B_0')$ (using that $\delta(A_0) = \delta(B_0)$). This shows $A''\leq B'$ and therefore (as $\emptyset \leq A''$) we also have $\emptyset \leq B'$.
Now suppose $Y' \geq X$ is $d$-closed in $B'$. Then $B_0' \subseteq Y'$ and as above, $Y'$ is of the form $\bigcup_{i \in U} (B_0'\cup B_i')$ for some $U$. Moreover $Y'\cap A'$ is $d$-closed in $A'$ and so we can apply Claim 1. It follows from this that $B_0'$ is $d$-closed in $B'$ and the $d$-closed sets of dimension 1 over $B_0'$ are the $B_0'\cup B_i'$, by using the fact that the corresponding statements hold in $B$.
It remains to show that $f'$ is an isomorphism of geometries. Let $Y$, $Y'$ be as in Claim 1. We need to show that $Y$ is $d$-closed in $B$ iff $Y'$ is $d$-closed in $B'$. So suppose $Y$ is $d$-closed in $B$. Then $Y\cap A$ is $d$-closed in $A$ and so we can apply Claim 1 to get that $\delta(Y/B_0) = \delta(Y'/B_0')$. Suppose for a contradiction that $Y'$ is not $d$-closed in $B'$. Let $Z'$ be its $d$-closure in $B'$. So $Z' = \bigcup_{i \in Q} (B_0'\cup B_i')$ for some $Q$ with $U \subset Q \subseteq \{1,\ldots, s\}$ and $\delta(Z') \leq \delta(Y')$. Let $Z = \bigcup_{i \in Q} (B_0\cup B_i)$. So $Y \subset Z$. Because $Z'$ is $d$-closed in $B'$ and therefore $Z'\cap A'$ is $d$-closed in $A'$, we can apply Claim 1 to get that $\delta(Z/B_0) = \delta(Z'/B_0')$. So we have
\[\delta(Z/B_0) = \delta(Z'/B_0') \leq \delta(Y'/B_0') = \delta(Y/B_0)\]
and this contradicts the fact that $Y$ is $d$-closed and $Y \subset Z$. Thus $Y'$ is $d$-closed in $B'$. The argument for the converse implication is the same.
$\Box_{Claim}$
\noindent\textit{Claim 3:\/} $B' \in \mathcal{C}_\mu(L)$.
Suppose that $Y \leq Z$ is a minimally simply algebraic extension in $B'$. First suppose $\delta(Y) = \delta(Z) \leq 1$. Then $d(Y) \leq 1$, so $Y \subseteq B_0'\cup B_i'$ for some $i$ and as $B_0'\cup B_i'$ is $d$-closed in $B'$, any copies of $Z$ over $Y$ in $B'$ are contained in $B_0'\cup B_i'$. So there are at most $\mu(Y,Z)$ of these as $B_0'\cup B_i' \in \mathcal{C}_\mu(L)$.
Now suppose that $\delta(Y) \geq 2$ and suppose for a contradiction that $Z_i$ (for $i = 1,\ldots, \mu(Y,Z)+1$) are disjoint copies in $B'$ of $Z$ over $Y \subseteq B'$ (meaning that the sets $Z_i \setminus Y$ are disjoint, of course).
If $y \in Y$ then $y$ is in some relation in $R_k^Z\setminus R_k^Y$ (for some $k \in I$) by Lemma \ref{21}. Thus $y$ is in at least three relations in $R_k^{B'}$ (one in each $R_k^{Z_i}\setminus R_k^Y$). By inspection of the construction one therefore sees that $y \in A''$ or $y = b_{ij}$ for some $i > r$. In the latter case, two of the (at most) three relations in $B'$ which involve $b_{ij}$ are $s_{i}\cup \{b_{i(j-1)}, b_{ij}\}$ and $s_{i}\cup \{b_{i(j+1)}, b_{ij}\}$. So we can assume that the first is a subset of $Z_1$ (and not a subset of $Y$) and the second is a subset of $Z_2$. But this implies that $s_{i} \subseteq Y$. However, there is no other relation which contains $\{b_{ij}\} \cup s_{i}$: contradicting the fact that $Z_3$ is a copy of $Z_1$ over $Y$.
Thus $Y \subseteq A''$. As $A'' \in \mathcal{C}_\mu(L)$ not all of the $Z_i$ are subsets of $A''$, so we can assume that $Z_1 \not\subseteq A''$. As $A'' \leq B'$ we have $Y \subseteq A'' \cap Z_1 \leq Z_1$ so (by the simplicity of the extension) $Z_1 \cap A'' = Y$.
Note that $Z_1$ is in the $d$-closure of $Y$ so we cannot have $Y \subseteq B_0'$. Let $y \in Y \setminus B_0'$. This is adjacent in $Z_1$ to some $z \in Z_1\setminus Y$. So $y \in A'' \setminus B_0'$ is adjacent in $B'$ to $z \in B'\setminus A''$. Inspection of the construction shows that $y = b_{i0}$ (for some $i \leq r$) and $z = b_{ij}$. Then the adjacency of $y$ and $z$ in $Z_1$ forces $s_{ij} \subseteq Z_1$, and so $s_{ij} \subseteq Y$ (as $A''\cap Z_1 = Y$). But then $Y \leq Y \cup \{b_{ij}\}$ is a simply algebraic extension in $Z_1$. As $Y \leq Z_1$ is a minimally simply algebraic extension, this implies $Y = \{b_{i0}\}\cup s_{ij}$ and $Z_1 = \{b_{i0}, b_{ij}\}\cup s_{ij}$. However, there is no other relation in $B'$ which contains this $Y$ (by construction), so we have a contradiction.
$\Box_{Claim}$
Claims 2 and 3 finish the proof of the isomorphism extension property $\mathcal{C}_0(L) (W)\rightsquigarrow \mathcal{C}_\mu(L)(X)$.
For the other direction, we can use the same construction (it is a special case of the the above as $\mathcal{C}_\mu(L) \subseteq \mathcal{C}_0(L)$). Of course, in this case we do not need Claim 3.
\end{proof}
\section{Further isomorphisms}
\subsection{Localization of non-isomorphic geometries}
In this subsection the language $L$ has just a single $3$-ary relation $R$. We often suppress $L$ in the notation.
In 5.2 of \cite{EH} Hrushovski varies his strongly minimal set construction to produce examples where the model-theoretic structure of the strongly minimal set can be read off from the geometry: lines of the geometry have three points, and colinear points correspond to instances of the ternary relation. He thereby produces continuum-many non-isomorphic geometries of (countable, saturated) strongly minimal structures, but asks whether these examples are \textit{locally} isomorphic. We show that this is the case: in fact, localizing any of them over a 3-dimensional set gives a geometry isomorphic to $G(\mathcal{M}_0(L))$, the geometry of the generic structure for $(\mathcal{C}_0(L), \leq)$.
In 5.2 of \cite{EH}, Hrushovski considers
\[\mathcal{K}_0 = \{A \in \mathcal{C}_0(L) : B \leq A \mbox{ for all $B \subseteq A$ with $\vert B \vert \leq 3$}\}.\]
The class $(\mathcal{K}_0, \leq)$ is an amalgamation class: one shows that if $A \leq B_1, B_2 \in \mathcal{K}_0$ and the free amalgam of $B_1$ and $B_2$ over $A$ is not in $\mathcal{K}_0$, then there exist $a, a' \in A$ and $b_i \in B_i\setminus A$ with $R(a,a', b_i)$ holding in $B_i$ (for $i=1,2$).
More generally, given a function $\mu$ as before, we can consider $\mathcal{K}_\mu = \mathcal{K}_0 \cap \mathcal{C}_\mu(L)$ and for appropriate $\mu$, the class $(\mathcal{K}_\mu, \leq)$ will satisfy Assumption \ref{aalemma}. In fact, we only need to define $\mu(Y,Z)$ for $\delta(Y) \geq 3$. For suppose $Y \leq Z$ is a minimally simply algebraic extension in $\mathcal{K}_0$ and $\delta(Y) = \delta(Z) \leq 2$. Then $Z$ has at most 3 elements: otherwise there is a subset $W\subseteq Z$ of size 3 with $W \not\in R^Z$, and then $W$ is not self-sufficient in $Z$, contradicting the definition of $\mathcal{K}_0$. It follows that the value of $\mu(Y,Z)$ is irrelevant for such $Y \leq Z$: the multiplicity is already controlled by the definition of $\mathcal{K}_0$.
So we shall assume:
\begin{ass}\label{HAmalgLemma} With the above notation:
\begin{enumerate}
\item[(i)] If $A \leq B_1, B_2 \in \mathcal{K}_\mu$ and the free amalgam of $B_1$ and $B_2$ over $A$ is not in $\mathcal{K}_\mu$, then either there exist $a, a' \in A$ and $b_i \in B_i\setminus A$ with $R(a,a', b_i)$ holding in $B_i$ (for $i=1,2$), or there exists $Y \subseteq A$ with $\delta(Y) \geq 3$ and msa extensions $Y \leq Z_i \in B_i$ (for $i= 1,2$) which are isomorphic over $Y$ and $Z_i\setminus Y \subseteq B_i\setminus A$.
\item[(ii)] The class $(\mathcal{K}_\mu, \leq)$ is an amalgamation class.
\end{enumerate}
\end{ass}
Again, (ii) follows from (i) here and the condition $\mu(Y,Z) \geq \delta(Y)$ (for $\delta(Y) \geq 3$) guarantees that (i) holds.
Denote the generic structure of $(\mathcal{K}_\mu, \leq)$ by $\mathcal{N}_\mu$. The $d$-closure of two points in $\mathcal{N}_\mu$ has size $3$ (as above), so certainly $G(\mathcal{N}_\mu)$ and $G(\mathcal{M}_0(L))$ are non-isomorphic. In fact, we can recover the relation $R$ from the geometry $G(\mathcal{N}_\mu)$ as the $3$-sets with dimension 2. Thus different $\mu$ give different geometries. It can be shown that there are infinitely many msa extensions $Y \leq Z \in \mathcal{K}_0$ with $\delta(Y) = 3$, so if $\mu(Y, Z) \geq 1$ for infinitely many of these, then ${\rm cl}_{\mathcal{N}_\mu}(X)$ is infinite whenever $X \leq \mathcal{N}_\mu$ consists of 3 independent points.
We show:
\begin{thm}\label{surprise}
Suppose $\mu(Y,Z) \geq 3$ for all msa $Y \leq Z$ in $\mathcal{K}_0$ with $\delta(Y) \geq 3$ and Assumption \ref{HAmalgLemma} holds. Let $X \leq \mathcal{N}_\mu$ and $d(X) = 3$. Then the localization $G_X(\mathcal{N}_\mu)$ is isomorphic to $G(\mathcal{M}_0(L))$.
\end{thm}
\begin{proof}
We may assume that $X$ consists of 3 points (and no relations). By the remarks preceding the theorem, $X$ has arbitrarily large finite algebraic extensions in $\mathcal{K}_\mu$. We show that the isomorphism extension property $\mathcal{C}_0(L) \rightsquigarrow \mathcal{K}_\mu(X)$ holds.
Suppose we are given $A \leq B \in \mathcal{C}_0(L)$ and $X \leq A' \in \mathcal{K}_\mu$ and an isomorphism $f : G(A) \to G_X(A')$. We want to find $B' \in \mathcal{K}_\mu$ with $A' \leq B'$ and an isomorphism $f' : G(B) \to G_X(B')$ extending $f$. This is very similar to the the construction of $B'$ in the proof of Theorem \ref{main} and we will only indicate what needs to be modified and provide extra argument as required.
Let $A_0 = {\rm cl}_A(\emptyset)$ and let $A_1,\ldots, A_r$ be the $d$-dependence classes on $A\setminus A_0$: the latter are the points of $G(A)$. Similarly let $B_0 = {\rm cl}_B(\emptyset)$ and $B_1, \ldots, B_s$ the $d$-dependence classes on $B\setminus B_0$, with $A_i \subseteq B_i$ for $i = 1,\ldots, r$. List the relations on $B$ which are not contained in $A$ or some $B_0\cup B_j$ as $\rho_1,\ldots, \rho_t$. So these are 3-sets and note that each of them intersects three different $B_i$. Let $A_0' = {\rm cl}_{A'}(X)$ and $A_1',\ldots, A_r'$ be the classes of $d$-dependence over $X$ on $A'\setminus A_0'$, labelled so that $f(A_i) = A_i'$. We construct $B_0', B_1', \ldots, B_s'$ with $A_i' \subseteq B_i'$ for $i = 0, \ldots, r$, and $B' = \bigcup_{i = 0}^s B_i'$ in the following way.
\noindent\textit{Step 1:\/} Construction of $A'' = A' \cup B_0' \in \mathcal{K}_\mu$. \newline
This is as before, but we need to take $V \in \mathcal{K}_\mu$: we can do this because algebraic extensions of $X$ can be arbitrarily large.
\noindent\textit{Step 2:\/} Construction of $B_0' \cup B_i' \in \mathcal{K}_\mu$.\newline
The construction is as in Theorem \ref{main} for $i \leq r$. In the case $i > r$ we vary the construction by taking the $s_{ij}$ to be distinct. The proofs that $B_0'$ is $d$-closed in $B_0'\cup B_i'$ are as before; as are the arguments which show that if $Y \leq Z$ is a msa extension in $\mathcal{K}_0$ with $\delta(Y) \geq 3$ then there are at most $\mu(Y,Z)$ copies of $Z$ over $Y$ in $B_0'\cup B_i'$. So it remains to show that $B_0' \cup B_i' \in \mathcal{K}_0$.
If $i \leq r$, then using the amalgmation lemma \ref{HAmalgLemma} as in Step 2, Case 1 of Theorem \ref{main}, it will suffice to show that $B_0'\cup A_i' \cup \{b_{ij}\} \in \mathcal{K}_0$. This is the free amalgam of $\{s_{ij}, b_{i0}, b_{ij}\}$ and $B_0'\cup A_i'$ over $\{s_{ij},b_{i0}\}$. So we can apply \ref{HAmalgLemma} (because $\{s_{ij},b_{i0}\}$ is in no relation in $B_0'\cup A_i'$).
Now suppose $i > r$. We analyse the possibilities for $\delta(Y)$ when $Y \subseteq B_0'\cup B_i'$, $Y \not\subseteq B_0'$ and $\vert Y \vert > 1$. As $Y \cap B_0'$ is $d$-closed in $Y$ we have $\delta(Y) > \delta(Y \cap B_0')$. If $Y \cap B_0' = \emptyset$ then by the construction, $\delta(Y) = \vert Y \vert$. If $Y \cap B_0'$ is a singleton then $Y$ has at most one relation (because the $s_{ij}$ are distinct), so $\delta(Y) > 1$ and $\delta(Y) \geq \vert Y \vert -1$. In the remaining case, $\delta(Y \cap B_0') \geq 2$ (as $B_0' \in \mathcal{K}_0$), so $\delta(Y) \geq 3$. Thus, $Y$ consists of 2 points, or is 3 points in a relation, or has $\delta(Y) \geq 3$. It follows that $B_0'\cup B_i' \in \mathcal{K}_0$.
\noindent\textit{Step 3:\/} Other relations on $B'$. \newline
As before.
\noindent\textit{Claim 1:\/} Let $U \subseteq \{1,\ldots, s\}$, and $Y = \bigcup_{i\in U} (B_i\cup B_0)$, $Y' = \bigcup_{i\in U} (B_i' \cup B_0')$. Then $Y \cap A$ is $d$-closed in $A$ iff $Y' \cap A'$ is $d$-closed in $A'$, and in this case we have $\delta(Y) = \delta(Y'/B_0').$
As before.
\noindent\textit{Claim 2:\/}
$B' \in \mathcal{C}_0(L)$, $B_0'$ and $B_0'\cup B_i'$ are $d$-closed in $B'$ and $A'' \leq B'$. The map $f' : G(B) \to G_X(B')$ given by $f'(B_i) = B_i'$ is an isomorphism of geometries which extends $f$.
As before, using Claim 1.
\noindent\textit{Claim 3:\/} If $i\neq j$ then $B_0'\cup B_i' \cup B_j' \in \mathcal{K}_\mu$ and $B_0' \cup B_i'\cup B_j' \leq B'$.
By construction $B_0'\cup B_i' \cup B_j'$ is the free amalgam of $B_0'\cup B_i'$ and $B_0' \cup B_j'$ over $B_0'$, and $B_0'$ is $d$-closed in each. So the first statement follows from the assumed amalgamation lemma \ref{HAmalgLemma}. We have $\delta(B_0' \cup B_i'\cup B_j'/ B_0') = 2$. Moreover, as $B_0'\cup B_i'$ is $d$-closed in $B'$ (Claim 2), if $B_0' \cup B_i'\cup B_j'\subseteq Z$ then $\delta(Z/B_0') \geq 2$. This gives the second statement.
$\Box_{Claim}$
\noindent\textit{Claim 4:\/} $B' \in \mathcal{K}_0$.
We need to show that if $D \subseteq B'$ has size at most 3 then $D \leq B'$. If $\vert D \vert \leq 2$ then $D \subseteq B_0' \cup B_i' \cup B_j'$ for some $i, j$ and it follows from Claim 3 that $D \leq B_0'\cup B_i'\cup B_j' \leq B'$. So suppose $D$ has size 3 and $D \subseteq C$ with $\delta(C) < \delta(D)$. We must have $\delta(C) = 2$ (as any two points of $D$ are self-sufficient in $C$ and have $\delta$-value $2$). As $A'' \in \mathcal{K}_0$ there is an $i$ such that $C \cap (B_i'\setminus A'') \neq \emptyset$. Note that $C$ is not contained in $B_0'\cup B_i'$ (because this is in $\mathcal{K}_\mu$), so as $B_0'$ is $d$-closed in $B_0' \cup B_i'$ and the latter is $d$-closed in $B'$, we have
\[0 \leq \delta(C\cap B_0') < \delta(C \cap (B_0'\cup B_i')) < \delta(C) = 2.\]
Thus $\delta(C\cap B_0') = 0$, so $C \cap B_0' = \emptyset$.
It then follows from Step 2 of the construction that there is no adjacency in $C$ between points of $C\cap A''$ and points of $C \setminus A''$. Let $q = \vert\{j : \rho_j' \subseteq C\}\vert$. Then (using $A'' \leq B'$; so $C \cap A'' \leq C$)
\[2 \geq \delta(C/C\cap A'') = \vert C \setminus A''\vert - q \geq 2q\]
as the $\rho_j'$ are disjoint. If $q = 0$ then $C$ is $C\cap A''$ together with some isolated points, and this is in $\mathcal{K}_\mu$ (so not possible in this situation). If $q = 1$ then $C$ consists of 3 points in a single relation and this has no subset of the form required for $D$.
$\Box_{Claim}$
\noindent\textit{Claim 5:\/} $B' \in \mathcal{K}_\mu$\newline
We already know that $B' \in \mathcal{K}_0$, so we need to show that $B' \in \mathcal{C}_\mu(L)$, at least as far as msa extensions $Y \leq Z$ with $\delta(Y) \geq 3$ are concerned. So suppose $Z_1,\ldots, Z_4$ are disjoint copies of $Z$ over $Y$ in $B'$. Then each element $y \in Y$ is in at least $4$ relations (using \ref{21}, as before), so by construction, $y \in A''$. Thus $Y \subseteq A''$ and the rest of the proof is just as in Claim 3 of Theorem \ref{main}.
$\Box_{Claim}$
Claims 2 and 5 finish the proof of one direction of the isomorphism extension property. The direction $\mathcal{K}_\mu(X) \rightsquigarrow \mathcal{C}_0(L)$ follows from the property $\mathcal{C}_\mu(L)(X) \rightsquigarrow \mathcal{C}_0(L)$ proved in Theorem \ref{main}.
\end{proof}
\begin{rem}\rm Note that $\mathcal{K}_0$ can be considered as $\mathcal{K}_\mu$ where $\mu(Y,Z)$ is formally given the value $\infty$ for all msa $Y \leq Z \in \mathcal{K}_0$. Thus the above argument also shows that the geometry of $\mathcal{N}_0$, the generic for $(\mathcal{K}_0, \leq)$, is locally isomorphic to $G(\mathcal{M}_0(L))$.
\end{rem}
\begin{rem} \rm Another variation is given in 5.1 of \cite{EH}. Let $k \geq 2$ and consider the language $L$ with a single $(k+1)$-ary relation symbol $R$. Let
\[\mathcal{C}_0'(L) = \{A \in \mathcal{C}_0(L): \delta(B) \geq\min(\vert B\vert , k)\,\, \forall B \subseteq A\}.\]
So if $C \subseteq A \in \mathcal{C}_0'(L)$ and $\vert C \vert \leq k$ then $C \leq A$. Hrushovski observes that $(\mathcal{C}_0'(L), \leq)$ is a free amalgamation class and that the assumed amalgamation lemma \ref{aalemma} holds for $(\mathcal{C}_\mu', \leq)$, for suitable $\mu \geq 2$. The generic structures here are strongly minimal and any $k$ points are independent. So the geometries are again different from that of $\mathcal{M}_0(L)$. However, they are again locally isomorphic. To see this we proceed as in Theorem \ref{surprise}, but take $X$ to be a set of size $k$. The construction and proof are essentially the same as before, except for in Claim 4 where to show that $B' \in \mathcal{C}_0'(L)$ we modify the argument as follows.
Suppose $C \subseteq B'$ has $\delta(C) < k$ and $\vert C \vert \geq k+1$. Then for some $i$ we have:
\[0 \leq \delta(C\cap B_0') < \delta(C\cap (B_0' \cup B_i')) <\delta(C) \leq k-1.\]
So $\delta(C\cap B_0') \leq k-3$ and therefore $\vert C \cap B_0'\vert \leq k-3$. Then by construction there is no adjacency in $C$ between points of $C \cap A''$ and points of $C\setminus A''$. So (with $q$ as before):
\[k-1 \geq \delta(C/C\cap A'') = \vert C \setminus A''\vert - q\geq kq.\]
Thus $q = 0$ and we have a contradiction.
\end{rem}
\subsection{Changing the language and predimension}
Recall that the language $L$ consists of relation symbols $\{R_i: i \in I\}$ with $R_i$ of arity $n_i$ (and only finitely many symbols of each arity). Suppose that $L_0 = \{ R_i : i \in I_0\}$ is a sublanguage with the property that for every $i \in I$ there is $j \in I_0$ such that $n_i \leq n_j$. For example, if $I$ is finite we can take $L_0$ to consist of a relation symbol of maximal arity in $L$. The following is essentially Theorem 3.1 of \cite{MFDE1}, but working with sets rather than tuples: we omit most of the details of the proof.
\begin{thm}
The geometries $G(\mathcal{M}_0(L))$ and $G(\mathcal{M}_0(L_0))$ are isomorphic.
\end{thm}
\begin{proof}
We can use the construction in Theorem \ref{main} to show that $\mathcal{C}_0(L) \rightsquigarrow \mathcal{C}_0(L_0)$ holds. In step 3 of the construction, if $\rho_i$ is a $k$-set then we take $\rho_i'$ to be a $k'$-set with $k' \geq k$: the condition on $L_0$ allows us to do this. Claims 1 and 2 of Theorem \ref{main} then go through exactly as previously. The direction $\mathcal{C}_0(L_0) \rightsquigarrow \mathcal{C}_0(L)$ follows as in Theorem \ref{main}.
\end{proof}
\begin{rem}\rm
Theorem 3.1 of \cite{MFDE1} works with a predimension of the form:
\[\delta_\alpha(A) = \vert A \vert - \sum_{i \in I} \alpha_i\vert R_i^A\vert,\]
where the $\alpha_i$ are natural numbers. We can adapt the argument here to deal with such predimensions. For example, suppose $I$ is finite and $R_1$ is of maximal arity and $\alpha_1 = 1$. Let $L_0$ consist of $R_1$. Then, as in Theorem 3.1 of \cite{MFDE1}, $G(\mathcal{M}_0(L))$ is isomorphic to $G(\mathcal{M}_0(L_0))$. To show that $\mathcal{C}_0^\alpha(L) \rightsquigarrow \mathcal{C}_0(L_0)$ (where $\mathcal{C}_0^\alpha(L)$ is defined using the predimension $\delta_\alpha$) we perform the same construction except that in step 3, if $\rho_j$ is of type $R_i$ then we add $\alpha_i$ corresponding $\rho_j'$ (but still disjoint etc).
\end{rem}
\subsection{Sets versus tuples} We have chosen to work with structures $A$ where the relations $R_i^A$ are sets of $n_i$-sets. As was done in \cite{MFDE1} we could also have worked more generally with structures $A$ where the $R_i^A$ are sets of $n_i$-tuples and the predimension is still given by $\vert A \vert - \sum _i \vert R_i^A\vert$. Let $\hat{\mathcal{C}}_0(L)$ denote the class of these finite structures with $\emptyset \leq A$.
\begin{thm}
The geometries of the generic structures of the amalgamation classes $(\mathcal{C}_0, \leq)$ and $(\hat{\mathcal{C}}_0(L), \leq)$ are isomorphic.
\end{thm}
\begin{proof}
This is the usual sort of proof using the construction. For example, to show $\hat{\mathcal{C}}_0(L) \rightsquigarrow \mathcal{C}_0(L)$ we replace an $n_i$-tuple $\rho_j$ (in $R_i^B\setminus R_i^{A}$) by an $n_i$-set, using the new $d$-dependent points to eliminate repetitions of points in the tuple or different enumerations of the same set.
\end{proof}
\end{document}
|
\begin{equation}gin{document}
\title{Trimming a Tree and the Two-Sided Skorohod Reflection.}
\begin{equation}gin{abstract}
The $h$-trimming of a tree is a natural regularization procedure
which consists in
pruning the
small branches of a tree: given $h\geq0$,
it is obtained by only keeping the vertices
having at least one leaf above them at a distance greater or equal to $h$.
The $h$-cut of a function $f$ is the function
of minimal total variation uniformly approximating the increments of $f$ with accuracy $h$, and
can be explicitly constructed via
the two-sided Skorohod reflection of $f$ on the interval $[0,h]$.
In this work, we show that the contour path of the $h$-trimming
of a rooted real tree is given by the $h$-cut of its original
contour path. We provide two applications of this result. First,
we recover a famous result of
Neveu and Pitman \cite{NP89}, which states
that the $h$-trimming of a tree coded by a Brownian excursion
is distributed as a standard binary tree. In addition,
we provide the joint distribution of this Brownian tree and its trimmed version
in terms of the local time of the two-sided reflection of its contour path.
As a second application,
we relate the maximum of a sticky Brownian motion
to the local time of its driving process.
\end{abstract}
\section{Introduction and Main Results.}
\lambdabel{Intro}
In a rooted tree, there is a natural partial ordering
on the set of vertices -- $x \partialeceq y$ iff the unique path
from the root to vertex $y$ passes through vertex $x$.
Under this ordering, the children of a given node are not ordered.
However, one can always specify some arbitrary
ordering of the children of each vertex of the tree (from left to right)
and by doing so,
one defines an object called a rooted {\it plane} tree -- see Le Gall \cite{LG05} for a
formal definition.
Every rooted plane tree can be encoded
by its contour path, where the contour path
can be loosely understood
by envisioning the tree as embedded in
the plane,
with each of its edges
having unit length.
We can then imagine
a particle starting
from the root,
traveling
along the edges of the tree
at speed $1$ and exploring
the tree from left to right --- see Fig \ranglef{fig1}.
The contour path of the tree
is simply defined as the current distance
of the exploration particle to the root --- see Fig \ranglef{fig2}.
In this paper, we show that the contour path of the
$h$-trimming of a rooted plane tree
(and more generally the $h$-trimming of rooted real trees)
is given by the $h$-cut of the
original contour path; where the $h$-cut is constructed from the two-sided Skorohod reflection
of the original contour path -- see (\ranglef{h-truncation}).
{\bf Real rooted trees.} As already discussed, every rooted plane tree can be encoded by
its contour path which is a function in $C_0^+({\mathbb R}^+)$
-- the set of continuous non-negative functions on ${\mathbb R}^+$ with $f(0)=0$ and compact support.
Conversely, it is now well established that any $f\in C_0^+({\mathbb R}^+)$
encodes a {\it real rooted tree} in the following natural way -- see again \cite{LG05}
for more details.
Define
$$
\forall s,t \in {\mathbb R}^+, \ \ d_f(s,t) = f(s) + f(t) - 2 \inf_{[s\wedge t,s\vee t]} f,
$$
and the equivalence relation $\ensuremath{\sigma}m$ on ${\mathbb R}^+$ as follows
$$
s \ensuremath{\sigma}m t \Longleftrightarrow d_f(s,t) = 0.
$$
The equivalence relation $\ensuremath{\sigma}m$ defines a quotient space
$$
{\cal T}_f \ = \ {\mathbb R}^+ / \ensuremath{\sigma}m
$$
referred to as the tree encoded by $f$.
The function $d_f$ induces a distance on ${\cal T}_f$, and we keep the notation
$d_f$ for this distance. In \cite{LG05}, it is shown that the pair $({\cal T}_f,d_f)$
defines a real tree in the sense
that the two following properties are satisfied. For every $a,b\in {\cal T}_f$:
\begin{equation}gin{enumerate}
\item[(i)]{\bf (Unique geodesics.)} There is a unique isometric map $\psi^{a,b}$ from $[0,d_f(a,b)]$ into ${\cal T}_f$ such that $\psi^{a,b}(0) = a$ and $\psi^{a,b}({d_f(a, b)}) = b$.
\item[(ii)]{\bf (Loop free.)} If $q$ is a continuous injective map from $[0, 1]$ into ${\cal T}_f$ , such that $q(0) = a$ and $q(1) = b$, we have
$q({[0, 1]}) = \psi^{a,b}({[0, d_f(a, b)]})$.
\end{enumerate}
In the following, for any $x,y\in{\cal T}_f$, $[x,y]$
will denote the geodesic from $x$ to $y$, i.e., $[x,y]$
is the image of $[0,d_f(x,y)]$ by $\psi^{x,y}$. We will denote by $p_f$ the canonical projection from ${{\mathbb R}}^+$
to ${\cal T}_f$ which can be thought of
as the position of the exploration particle
at time $t$. In the following,
$\rho_f=p_f(0)$ will be referred to as the root of the tree ${\cal T}_f$.
In what follows, real trees will always be rooted, even if this is not mentioned explicitly.
$d_f$ induces a natural partial ordering on the rooted tree ${\cal T}_f$ :
$v' \partialeceq v$ ($v'$ is an ancestor of $v$) iff
$$
d_f(v,v') \ = \ d_f(\rho_f,v) - d_f(\rho_f,v').
$$
We note that this partial ordering is directly related to the sub-excursions nested in the function $f$. Indeed,
for any $s,t\geq0$,
$p_f(t) \partialeceq p_f(s)$ if and only if $\inf_{[t\wedge s, t\vee s]} f = f(t)$, which is equivalent
to saying that $t$ is the ending time or starting time of a sub-excursion of $f$ starting from level $f(t)$
and straddling time $s$ -- see Fig \ranglef{fig1} and \ranglef{fig2}.
Finally,
for any $x,y\in \cT_f$,
the most recent common ancestor of $x$ and $y$ -- denoted by $x\wedge y$ --
is defined as $\substackp\{ z\in \cT_f : z \partialeceq x,y \}$. From the definition of our genealogy,
for any $t_1,t_2\in{\mathbb R}^+$, we must have
\begin{equation}\lambdabel{ionf}
p_f(t_1)\wedge p_f(t_2) = p_f(s), \ \ \mbox{for any $s\in\mbox{argmin}_{[t_1\wedge t_2,t_1 \vee t_2]} f$,}
\end{equation}
with the height of the most recent common ancestor being given by $f(s)=\min_{[t_1\wedge t_2, t_1\vee t_2]} f$.
\begin{equation}gin{figure}[ht]
\begin{equation}gin{minipage}[b]{0.5\langlenewidth}
\centering
\includegraphics[scale=0.2]{tree1}
\caption{{\it Exploration of a plane tree. The exploration particle travels
along each branch twice : first on the left and away from the root, and then on the right
and towards the root. The root of the red sub-tree belongs to the $2$-trimming of the tree.}}
\lambdabel{fig1}
\end{minipage}
\hspace{0.5cm}
\begin{equation}gin{minipage}[b]{0.5\langlenewidth}
\centering
\includegraphics[scale=0.2]{contour1}
\caption{{\it Contour path. The red portion of the curve
is a sub-excursion of height 2 corresponding to the exploration of the red sub-tree
on the left panel.}}
\lambdabel{fig2}
\end{minipage}
\end{figure}
{\bf Trimming and the two-sided Skorohod reflection.} As in Evans \cite{E05}, for every $h>0$,
$({\cal T}_f,d_f)$
as the (possibly empty) sub-tree
\begin{equation}\lambdabel{def-tr}
\mbox{Tr}^h( {\cal T}_f ) := \{ x\in{\cal T}_f \ : \ \substackp_{y\in {\cal T}_f \ : \ y \substackcceq x} d_f(x,y) \geq h \},
\end{equation}
which consists of all the points in ${\cal T}_f$
having at least one leaf above them at distance greater or equal to $h$. (Note that
$\mbox{Tr}^h( {\cal T}_f )$ is not empty if and only if $\substackp_{[0,\infty)} f\geq h$.)
As already mentioned, one of the main results of this paper is the relation between
the $h$-trimming of a real rooted tree and
the two-sided Skorohod reflection of its contour path.
The one-sided Skorohod reflection is well known
among probabilists. Given
a continuous function $f$ starting from $x\geq0$,
it is simply defined as
the following transformation
\begin{equation}
\Gamma^{0}(f)(t) \ = \ f(t) \ - \ \inf_{[0,t]} f \wedge 0. \lambdabel{one-sided}
\end{equation}
The resulting path obviously remains non-negative and
the function
$c(t)\equiv-\inf_{[0,t]} f$ is easily
seen
to be the unique solution
of the so-called (one-sided) Skorohod equation, i.e., $c$ is the continuous function $c$ on ${\mathbb R}^+$ such that $c(0)=0$
and
\begin{equation}gin{enumerate}
\item $\Gamma^0(f)(t) := f(t) + c(t)$ is non-negative.
\item $c$ is non-decreasing.
\item $c$ does not vary off the set $\{t \ : \ \Gamma^0(f)(t) = 0 \}$, i.e.,
the support
of the measure $d c$ is contained in $\Gamma^{0}(f)^{-1}(\{0\})$ .
\end{enumerate}
See Lemma 6.17 in \cite{KS91} for a proof of this statement.
Intuitively,
the solution $c$,
which will be
referred to as the {\it compensator}
of the reflection in the rest of this paper, can be thought
of as the minimal
amount of upward push that one needs to
exert on the path $f$
to keep it away from negative values.
The Skorohod equation states that the reflected path is completely driven
by $f$ when it is away from the origin, while it is repealed from negative values
by the compensator upon reaching level $0$.
The following theorem
is a generalization of the Skorohod
equation to the two-sided case.
\begin{equation}gin{theorem}[{\bf Two-Sided Skorohod Reflection}]
\lambdabel{Skor}
Let $h\geq0$ and let $f$ be a continuous function with $f(0)\in[0,h]$.
There exists a unique
pair of
continuous functions $(c^0(f),c^h(f))$ with $c^0(f)(0)=c^h(f)(0)=0$ satisfying the three following properties.
\begin{equation}gin{enumerate}
\item $\Lambda_{0,h}(f)(t) \ := \ f(t) + c^h(f)(t) + c^0(f)(t)$ is valued in $[0,h]$.
\item $c^0(f)$ (resp, $c^h(f)$) is a non-decreasing (resp., non-increasing) function.
\item $c^0(f)$ (resp, $c^h(f)$) does not vary off the set $\Lambda_{0,h}(f)^{-1}(\{0\})$ (resp., $\Lambda_{0,h}(f)^{-1}(\{h\})$)
\end{enumerate}
\end{theorem}
As noted by
Kruk, Lehoczky, Ramanan, and Shreve \cite{KLRS07}, existence and uniqueness to the Skorohod problem
follow directly from Lemma 2.1, 2.3 and 2.6 in Tanaka \cite{T79}.
In the rest of this paper,
$\Lambda_{0,h}(f)$ will be referred to as
the two-sided
Skorohod
reflection of the path $f$ on $[0,h]$,
while
the pair of functions $(c^0(f),c^h(f))$ will be referred to
as the compensators associated with the function $f$.
In the same spirit as the one-sided
reflection,
the compensator $c^h(f)$ (resp., $c^0(f)$) can be thought of as
the minimal amount
of downward (resp., upward) push at level $h$ (resp., $0$) that one has to exert on $f$ to keep the path
$\Lambda_{0,h}(f)$ inside the interval $[0,h]$. In other words,
adding the compensators $c^0(f)$ and $c^h(f)$ to $f$
is the ``laziest way'' of keeping $f$ in the interval $[0,h]$.
Let $f$ be a continuous function on ${\mathbb R}^+$
with $f(0)=0$ (with no restriction on the support and on the sign of $f$).
For such a function, define the $h$-cut of the function $f$ as
\begin{equation}\lambdabel{h-truncation}
f_h \ := \ f-\Lambdambda_{0,h}(f) = -c^0(f) - c^h(f).
\end{equation}
$f_h$ is
also characterized by an interesting variational property. Indeed,
combining Proposition 2 in Mil{\l}o\'s \cite{M13} and Corollary 3.12 in
{\L}ochowski \cite{L13}, we get that
for every interval $[0,t]$,
$f_h$ is the unique solution of the minimization problem
\begin{equation}\lambdabel{h-truncation2}
\mbox{argmin}_{g \ : \ g(0)=0, ||f-g||_{osc,[0,t]} < h} \ TV(g,[0,t]),
\end{equation}
where $||h||_{osc,[0,t]} \ = \ \substackp_{x,y\in[0,t]} \ |h(x)-h(y)|$
and $TV(g,[0,t])$ is the total variation of $g$ on the interval
$[0,t]$. In other words,
$f_h$ is the function of minimal total variation
uniformly approximating the
increments of $f$ with accuracy $h$.
\footnote{Again following \cite{M13}, the $h$-cut of the function $f$ is a translation of the so-called
$h$-truncation
of $f$, as introduced and studied by {\L}ochowski \cite{L11}, \cite{L13}-- see also {\L}ochowski and Mi{\l}o\'s \cite{LM13}.}
Our main theorem states that the contour path
of the $h$-trimming of a tree is simply given by
the $h$-cut of its original contour path.
\begin{equation}gin{theorem}\lambdabel{main1}
Let $f\in C_0^+({\mathbb R}^+)$ and let us assume that the $h$-trimming
of ${\cal T}_f$ is not empty.
\begin{equation}gin{enumerate}
\item The $h$-cut $f_h$
belongs to $C_0^+({\mathbb R}^+)$.
\item
The $h$-trimming of the
real tree $({\cal T}_f,d_f)$ is identical to the real tree $({\cal T}_{f_h},d_{f_h})$
(up to a root preserving isometry).
\end{enumerate}
\end{theorem}
To state our next result, we need to introduce some extra notations.
For a continuous function $f$ with $f(0)=0$, define $t_n(f)\equiv t_n$ (resp., $T_n(f)\equiv T_n$) to be the $n^{th}$ returning time at level $0$ (resp., $h$) of $\Lambda_{0,h}(f)$
and $s_n(f)\equiv s_n$ to be the $n^{th}$ exit time at $0$ of $\Lambda_{0,h}(f)$ as follows. $t_0=0$, and for $n\geq1$
\begin{equation}qn
T_n & := \inf\{ u> t_{n-1} & : \Lambda_{0,h}(f)(u)= h \} \nonumberumber \\
t_n & := \inf\{ u> T_{n} & : \Lambda_{0,h}(f)(u)= 0 \} \nonumberumber \\
s_n & := \substackp\{u\in[t_{n-1},t_{n}) & : \ \Lambda_{0,h}(f)(u)=0 \} \lambdabel{def:T}
\end{equation}qn
with the convention that $\substackp\{\emptyset\}, \inf \{\emptyset\}=\infty$.
Let $N_h(f)$ be the number of returns of $\Lambda_{0,h}(f)$ to $0$, i.e.
\begin{equation}
N_h(f) \ = \ \substackp\{n: \ t_n < \infty\}. \nonumberumber
\end{equation}
Finally, define
\begin{equation}qn
\forall n\geq1, & X_n(f) = & f_{h}(t_n) - f_h(s_{n}), \nonumberumber \\
& Y_n(f) = & f_h(t_{n-1}) - f_h(s_{n}). \lambdabel{xy}
\end{equation}qn
As we shall see below (see Theorem \ranglef{PN}(2)), when $f$ is a Brownian excursion,
the quantity $X_n(f)$ (resp., $Y_n(f)$) simply
coincides with the amount of Brownian local time
accumulated by the reflected path $\Lambda^{0,h}(f)$ at $h$ (resp., $0$) on the interval $[t_{n-1},t_n]$.
\begin{equation}gin{proposition}\lambdabel{algo} Let $f\in C_0^+({\mathbb R}^+)$
and let us assume that the $h$-trimming
of ${\cal T}_f$ is not empty.
The $h$-trimming of ${\cal T}_f$
is equal (up to a root preserving isometry) to the tree
generated inductively according to the following algorithm -- see Fig \ranglef{my-tree}.
\begin{equation}gin{enumerate}
\item[(Step 1.)] Start with a single branch of length $X_1$.
\item[(Step n, $n\geq2$)] If $n=N_h(f)$ stop. Otherwise, let $z_{n-1}$ be the tip of the $(n-1)^{th}$ branch. On the ancestral line $[\rho,z_{n-1}]$, graft a branch of length $X_{n}$
at a distance $Y_{n}$ from the leaf $z_{n-1}$.
\end{enumerate}
\end{proposition}
\begin{equation}gin{figure}[ht]
\centering
\includegraphics[scale=0.3]{my-tree}
\caption{{\it Schematic representation of the algorithm generating the $h$-trimming of a tree
from the two-sided reflection of its contour path.}}
\lambdabel{my-tree}
\end{figure}
{\bf Relation with standard binary trees.}
Recall that standard binary trees have branches
(1) that have i.i.d. exponential life time with mean $\alphapha$, and
(2) when they die, they either give birth to two new branches, or have no offspring with equal probability $1/2$.
The algorithm described in Proposition \ranglef{main1} is reminiscent of a classical construction of standard
binary trees (see e.g., \cite{LG89}),
for which $\{(X_n(f),Y_n(f))\}$ are replaced with an infinite sequence of independent exponential r.v.'s $\tildelde X_1, \tildelde Y_2, \tildelde X_2, \tildelde Y_3,\cdots$
with parameter $\alphapha$ and the algorithm stops at step $\tildelde N$, with
\begin{equation}
\tildelde N := \inf\{n \ : \ \substackm_{i=1}^{n} (\tildelde X_i-\tildelde Y_{i+1}) < 0 \}.
\end{equation}
(Note that this stopping condition is quite natural:
the quantity
$\substackm_{i=1}^{n} (\tildelde X_i-\tildelde Y_{i+1})$
is the height of the $n^{th}$ intercalated branching point.
We stop the algorithm once the branching point has negative height.)
Using Proposition \ranglef{algo},
we easily recover a result due to
Neveu and Pitman \cite{NP89}, relating
the $h$-trimming of the tree encoded by a Brownian excursion
with standard binary trees (see item 1. in the following theorem).
Further, the next theorem provides the joint distribution of the tree ${\cal T}_e$
and its trimmed version ${\mathbb T}r({\cal T}_e)$ (see item 2).
In the following, we
define
\begin{equation}qn\lambdabel{local-time}
l^h(w)(t) & := & \langlem_{\end{proposition}s\downarrow0}\fracac{1}{2\end{proposition}s} |\{s\in[0,t] \ : \ \Lambdambda_{0,h}(w)(s)\in[h-\end{proposition}s,h] \}| \nonumberumber \\
l^0(w)(t) & := & \langlem_{\end{proposition}s\downarrow0}\fracac{1}{2\end{proposition}s} |\{s\in[0,t] \ : \ \Lambdambda_{0,h}(w)(s)\in[0,\end{proposition}s] \}|,\
\end{equation}qn
provided that those limits exist. $l^h(w)$ (resp., $l^0(w)$) will be
referred to as the local time of $\Lambda^{0,h}(w)$ at $h$ (resp., at $0$).
\begin{equation}gin{theorem}\lambdabel{PN}
Let $e$ be a Brownian excursion conditioned on having a height larger
than $h$.
\begin{equation}gin{enumerate}
\item The $h$-trimming of the tree $(\cT_e,d_e)$
is a standard binary tree with parameter $\alphapha=h/2$.
\item For $1 \leq i\leq N_h(e)$,
\begin{equation}gin{itemize}
\item $X_i(e)$ a.s. coincides with the local time of $\Lambda_{0,h}(e)$ at $h$ accumulated between $[t_{i-1}(e),t_{i}(e)]$, i.e.,
$X_i(e)=l^h(w)(t_i)-l^h(w)(t_{i-1})$.
\item $Y_i(e)$ a.s. coincides with the local time of $\Lambda_{0,h}(e)$ at $0$ accumulated between $[t_{i-1}(e),t_{i}(e)]$.
\end{itemize}
\end{enumerate}
\end{theorem}
{\bf The maximum of a sticky Brownian motion.}
Our final application of Theorem \ranglef{main1}
relates to the sticky Brownian motion.
Given a filtered probability space
$(\Omega,{\cal G}, \{{\cal G}_t\}_{t\geq0},{\mathbb P})$,
a sticky Brownian motion, with parameter
$\theta>0$,
is defined as the adapted process
taking value on $[0,\infty)$
solving the following stochastic
differential equation (SDE):
\begin{equation}qn
\lambdabel{sticky-bm}
d z^\theta(t) \ = \ 1_{z^\theta(t)>0} d w(t) \ + \ \theta \ 1_{z^\theta(t)=0} d t,
\end{equation}qn
where $(w(t); \ t \geq0)$ is a standard ${\cal G}_t$-Brownian motion.
Intuitively, $z^\theta$ is driven by $w$ away from level $0$,
and gets an upward push upon reaching this level,
keeping the process away from negative values.
Sticky Brownian motion were first investigated by Feller \cite{F57}
on strong Markov processes taking
values in $[0,\infty)$ that behave like Brownian motion away from 0.
We refer the reader to Varadhan
\cite{V01} for a good introduction on this object.
Ikeda and Watanabe showed that (\ranglef{sticky-bm})
admits a unique weak solution. The result was later
straightened by Chitashvili \cite{C89} and
Warren \cite{W99}
who showed that
$z^\theta$ is not measurable with respect to $w$ and that, in order to construct
the process $z^\theta$, one needs to add some extra randomness
to the driving Brownian motion $w$. In \cite{W02},
Warren did exhibit this extra randomness
and showed that it can be expressed
in terms of a certain marking procedure
of the random tree induced by the reflection of the
driving Brownian motion $w$ (more on that in Section \ranglef{proof-sect}).
Among the first applications related to
sticky Brownian motions, we cite Yamada \cite{Y94} and Harrison and Lemoine \cite{HL81}
who
studied sticky random walks as the limit of storage processes.
More recently, Sun and Swart \cite{SS08}
introduced a new object called the Brownian net
which can be thought of as an infinite
family of one-dimensional coalescing branching Brownian motions
and in which sticky Brownian motions play an essential role (see also Newman, Ravishankar and Schertzer \cite{NRS10}).
Building on the approach of Warren \cite{W02},
and using Theorem \ranglef{main1}, we will show that
the law of the maximum of a sticky Brownian motion can be expressed in
terms of the local time of the two-sided reflection of its driving Brownian motion $w$ on the interval $[0,h]$.
In the following,
$\lambdambda_{0,h}(\cdot)$ will refer to the linear function reflected at $0$ and $h$, i.e., the function obtained by a linear interpolation of
the points
$\{(2n \cdot h,0)\}_{n\in{\mathbb Z}}$ and $\{((2n+1) \cdot h),h\}_{n\in{\mathbb Z}}$. For any
continuous function $f$,
the {\it standard} reflection of $f$ on $[0,h]$ (as opposed to the two-sided Skorohod reflection) will refer to the transformation $\lambdambda_{0,h}(f)$. In \cite{W99}, the one-dimensional
distribution
of a sticky Brownian motion
conditionally on its driving process was given.
The following theorem
provides the one-dimensional distribution
of the maximum of a sticky Brownian motion
conditionally on its driving process.
\begin{equation}gin{theorem}
\lambdabel{teo1}
Let $h>0$ and let $(z^\theta(t),w(t); t\geq0)$ be a weak solution of equation (\ranglef{sticky-bm})
starting at $(0,0)$.
$\Lambdambda_{0,h}(w)$ is distributed as a Brownian motion reflected (in the standard way) on $[0,h]$.
Furthermore,
\begin{equation}qnn
{\mathbb P}\left( \max_{[0,t]} z^\theta\ \leq \ h \ | \ \ensuremath{\sigma}gmama(w) \right)
\ = \ \exp\left( -2\theta \ l^h(w)(t) \right), \ \
\end{equation}qnn
where $l^h(w)$ is the local time at $h$ for the path $\Lambdambda_{0,h}(w)$ (see (\ranglef{local-time})).
\end{theorem}
\section{Proof of Theorem \ranglef{main1} and Proposition \ranglef{algo}}
In the following, a nested sub-excursion of the function $f\in C_0^+({\mathbb R}^+)$
will refer to any section of the path $f$ on an interval $[t_-,t_+]$
such that
$\forall t\in[t_-,t_+], \inf_{[t_-,t]} f = f(t-)=f(t_+)$ -- see Fig \ranglef{fig2}. The height of such a sub-excursion is defined as $\max_{[t_-,t_+]} f - f(t_+)$.
Let $p_f$ be the canonical projection from ${{\mathbb R}}^+$
to ${\cal T}_f$, which can be thought of as
the position
of the exploration particle at time $t$.
By definition, $p_f(t)$ belongs to $\mbox{Tr}^h( {\cal T}_f )$ if and only if there exists
$s$ such that $\inf_{[t\wedge s, t\vee s]} f = f(t)$ and $f(s)-f(t)\geq h$, which is equivalent
to saying that $t$ is the ending time or starting time of a sub-excursion (nested in $f$) of height at least $h$ starting from level $f(t)$.
We claim that the extreme points
of ${\mathbb T}r(\cT_f)$ -- or {\it leaves} -- are contained in the set of points of the form $z=p_f(t)$, where $t$
is the time extremity of a sub-excursion of height {\it exactly} $h$.
In order to see that,
let $t$ be the time extremity of a sub-excursion of height strictly larger
than $h$. By continuity, this sub-excursion must contain a sub-excursion
of height exactly $h$. Thus, $p_f(t)$ must have at least one descendant and can not be a leaf.
As claimed earlier, this
shows that the leaves must be visited at
the time extremities of some sub-excursion of height {\it exactly} $h$.
\footnote{
Note that we only have an inclusion. For example, let $t$ be the starting time
of a sub-excursion of height exactly $h$,
and let $t_e$ be the ending time of this excursion. It $t_e$
is the starting time of another sub-excursion of size $>h$, then
$p_f(t)$ is not a leaf of the $h$-trimming of the tree.}
Let us now define
inductively $\{\tau_n(f)\}_{n\geq 0}\equiv\{\tau_n\}_{n\geq 0}$,
$\{\theta_{n}(f)\}_{n\geq1}\equiv \{\theta_{n}\}_{n\geq1}$ and $\{\ensuremath{\sigma}gmama_{n}(f)\}_{n\geq0}\equiv \{\ensuremath{\sigma}gmama_{n}\}_{n\geq0}$
as follows : $\tau_0=0$, $\ensuremath{\sigma}gmama_0=0$ and
\begin{equation}qn
\theta_{n+1} \ = \ \inf\{s > \tau_n: \ f(s) -h = \inf_{[\tau_n,s]} f\}.\nonumberumber \\
\tau_{n+1} \ = \ \inf\{s > \theta_{n+1} : \substackp_{[\theta_{n+1},s]} f = f(t)+h\}, \nonumberumber \\
\ensuremath{\sigma}gma_{n+1} \ = \ \substackp\{s\in[\tau_n,\tau_{n+1}] : f(s) = \inf_{[\tau_n,s]} f\}. \lambdabel{t-s}
\end{equation}qn
with the convention that $\inf\{\emptyset\},\substackp\{\emptyset\}=\infty$. As already noted in Neveu and Pitman \cite{NP89} (although under a slightly different form),
the sequences $\{\tau_n\}_{n\geq1}$ and $\{\ensuremath{\sigma}gmama_{n}\}_{n\geq1}$ play a key role in the tree ${\mathbb T}r(\cT_f)$, being respectively related to the exploration times for the leaves and the branching
points respectively.
First,
the reader can easily convince herself that the set of finite $\{\tau_n\}$ coincide
with
the completion times of all the sub-excursions nested in the function $f$ which are exactly of height $h$
(see \cite{NP89} for more details).
As already discussed,
this implies that $\{p_f(\tau_n)\}_{n\geq1}$
contains the set of leaves of the tree ${\mathbb T}r(\cT_f)$.
Secondly, the very definition of $\ensuremath{\sigma}gmama_i$'s implies that for every $m<n$, $\inf_{[\tau_m,\tau_{n}]} f = f(\ensuremath{\sigma}gmama_{k})$
for some $k\in\{m,\cdots,n\}$. From
(\ranglef{ionf}), this implies that
$p_f(\tau_n) \wedge p_f(\tau_{m})$ -- the most recent common ancestor of $p_f(\tau_n)$ and $p_f(\tau_{m})$ -- is given by
some $p_f(\ensuremath{\sigma}gmama_{k})$.
We now show that the times $\ensuremath{\sigma}gmama_n(f)$ and $\tau_n(f)$ also
appear quite naturally in the two-sided Skorohod reflection. Recall from the introduction that $t_n$ (resp., $s_n$)
refer to the $n^{th}$ returning time (exit time) of $\Lambda_{0,h}(f)$ at level $0$ (see (\ranglef{def:T})).
\begin{equation}gin{proposition}
\lambdabel{rtt} For every continuous function $f$ with $f(0)=0$
and
for every $n\geq 1$,
\begin{equation}gin{enumerate}
\item
$\tau_n(f)$ is the $n^{th}$ returning time to level 0 of $\Lambda_{0,h}(f)$, i.e., $\tau_n(f)=t_n(f)$.
\item
$\ensuremath{\sigma}gmama_n(f)$ is the $n^{th}$ exit time at level 0 of $\Lambda_{0,h}(f)$, i.e., $\ensuremath{\sigma}gmama_n(f)=s_n(f)$.
\item The function $f_h=f-\Lambda_{0,h}(f)$ is non-decreasing (resp., non-increasing) on $[\ensuremath{\sigma}gmama_{n+1}(f),\tau_{n+1}(f)]$ (resp., on $[\tau_n(f),\ensuremath{\sigma}gmama_{n+1}(f)]$).
In particular, $\{f_h(\ensuremath{\sigma}gmama_i)\}$ (resp., $\{f_h(\tau_i)\}$) coincide with the local minima (resp., local maxima) of
$f_h$.
\end{enumerate}
\end{proposition}
As we shall see, this proposition is a consequence of elementary results on the two-sided
Skorohod reflection that we now expose.
We start by introducing some notations:
$$
\forall f, \ \forall t>0, \ R^T(f)(t) \ := \ 1_{t\geq T} \left(f(t) - f(T)\right).
$$
In other words, $R^T(f)$ is constant on $[0,T]$
and follows the variation of $f$ afterwards.
The next elementary lemma states that
the reflection of a path
can be obtained by successively reflecting the path up to some $T$
and then reflecting the remaining portion of the path from $T$ to $\infty$.
\begin{equation}gin{lemma}\lambdabel{two-steps-ref}
For any continuous function $f$ with $f(0)\in[0,h]$ and $T\geq0$,
\begin{equation}qnn
\forall t < T , \ \ \Lambda_{0,h}(f)(t) & = & \Lambda_{0,h}\left(f(\cdot\wedge T)\right)(t), \\
\forall t\geq T, \ \ \Lambda_{0,h}(f)(t) & = & \Lambda_{0,h}\left(R^T(f) + \Lambda_{0,h}(f)(T) \right)(t). \lambdabel{wws}
\end{equation}qnn
\end{lemma}
\begin{equation}gin{proof}
In the following, we write
\begin{equation}qnn
L^T(f)(t) & := & R^T(f)(t) + \Lambda_{0,h}(f)(T),
\end{equation}qnn
and for any continuous function $F$ with $F(0)\in[0,h]$, we denote by $(c^0(F),c^h(F))$ the pair
of compensators solving the Skorohod equation for the two-sided reflection of $F$
on the interval $[0,h]$.
For $y=0,h$, define
$$
\forall t\geq0, \ \ \ \tildelde c^y(t) \ : = \ \ c^y(f({\cdot\wedge T}))({t}) \ + \ c^y(L^T(f))(t).
$$
We will show that
$(\tildelde c^0,\tildelde c^h)$ solves the two-sided Skorohod equation for $f$.
We first need to prove that the function $G(t):=f(t) \ + \ \tildelde c^h(t) + \tildelde c^0(t)$
is valued in $[0,h]$. First
\begin{equation}gin{eqnarray*}
G(t) & = & \left(f({t\wedge T}) + \substackm_{y=0,h} c^y(f({\cdot \wedge T}))(t)\right)
\ + \ \left( L^T(f)(t) + \substackm_{y=0,h} c^y(L^T(f))(t) \right) - \Lambda_{0,h}(f)(T)\\
& = & \Lambda_{0,h}(f({\cdot\wedge T}))(t) \ + \ \Lambda_{0,h}\circ L^T(f)(t) \ - \Lambda_{0,h}(f)(T) \\
& = & \ \Lambda_{0,h}(f)({t\wedge T}) \ + \ \Lambda_{0,h}\circ L^T(f)(t) - \Lambda_{0,h}(f)(T)
\end{eqnarray*}
where the first equality follows from the fact $f(t) = f({t\wedge T}) + L^T(f)(t) - \Lambda_{0,h}(f)(T)$
and the last equality only states that the reflection of the function
$f({\cdot\wedge T})$ (the function $f$ ``stopped'' at $T$)
is the reflection of $f$ stopped at $T$ (this can directly be checked
from the definition
of the two-sided Skorohod reflection).
The function $L^T(f)$ is constant and equal to $\Lambda_{0,h}(f)(T)$
on the interval $[0,T]$. This easily implies that its reflection
is also identically $\Lambda_{0,h}(f)(T)$ on the same interval.
Thus, the latter equality implies that
\begin{equation}qn\lambdabel{htt}
G(t) := \left\{ \begin{equation}gin{array}{ll}
\Lambda_{0,h}(f)({t}) & \mbox{if $t < T $},\\
\Lambda_{0,h}\circ L^T(f)(t)& \mbox{otherwise,}\end{array} \right. \lambdabel{barxi}
\end{equation}qn
(\ranglef{htt}) implies that $G(t)$
belongs to $[0,h]$, hence proving that the first
requirement of the Skorohod equation (see Theorem \ranglef{Skor}) is satisfied.
The second requirement -- the function $\tildelde c^{h}$ (resp., $\tildelde c^0$) non-increasing (resp., non decreasing) --
is obviously satisfied since the function $\tildelde c^h$ (resp., $\tildelde c^0$)
is constructed out of a compensator at $h$ (resp., at $0$).
Finally, for $y=0,h$, we need to show that the support of the measure
$d \tildelde c^y$ is included in the set $G^{-1}(\{y\})$. In order to see
that,
we use the fact that
the support of the compensators $dc^{y}(L^T(f))$
and $d c^y(f({\cdot \wedge T}))$
are respectively included in $[T,\infty]$
and $[0,T]$ --- using the fact that if a function $g$ is constant
on some interval, its compensator does not vary on this interval.
As a consequence, for $y=0,h$
$$
\forall t\in [0,T], \ \ d \tildelde c^y(t) \neq 0 \Longleftrightarrow d c^y(f({\cdot\wedge T}))(t) \neq 0.
$$
Further, $d c^y(f({\cdot\wedge T}))(t) \neq 0$ for $t$ such that $\Lambda_{0,h}(f({\cdot\wedge T}))(t) = y$.
Since $\Lambda_{0,h}(f({\cdot\wedge T}))$ and $G$ coincides on $[0,T]$ (by (\ranglef{barxi})),
we get that on $[0,T]$ the compensator $\tildelde c^y_t$ only varies on $G^{-1}(\{y\})$.
By an analogous argument, one can show that the same holds on the interval $[T,\infty]$.
Hence,
the third and final requirement of the Skorohod equation holds for $\tildelde c^y$, $y=0,h$.
This shows that $(\tildelde c^0,\tildelde c^h)$
solves the two-sided Skorohod reflection. Combining this with (\ranglef{barxi})
ends the proof of our lemma.
\end{proof}
Let $h\geq0$.
For any continuous function $f$ with $f(0)\leq h$, let us define the one-sided reflection (with downward push) at $h$ -- denoted by $\Gammamma^h(f)$ -- as
$$
\Gamma^h(f) := f-(\substackp_{[0,t]} f - h )\vee0.
$$
Along the same lines as the one-sided reflection at $0$
(as introduced in (\ranglef{one-sided})), the function $c(t)=-(\substackp_{[0,t]} f - h )\vee0$
can be interpreted as
the minimal amount of downward push necessary to keep
the path $f$ below level $h$. More precisely,
this function is easily seen to be the only continuous
function $c$ with $c(0)=0$ satisfying the following requirements: (1) $f+c\leq h$, (2) c is non-increasing and, (3) $c$
does not vary off the set $\{t \ : \ f(t)+c(t) =h \}$.
\begin{equation}gin{lemma}\lambdabel{pr-s}
For every $T\geq0$ and every continuous function $F$
with $F(0)\in[0,h]$
such that $\Gamma^h(F)\geq 0$ (resp., $\Gamma^0(F)\leq h$) on $[0,T]$, we must have
$$
\forall t\in[0,T], \ \ \Lambdambda_{0,h}(F)(t) \ = \ \Gamma^h(F)(t) \ \ (\mbox{resp., } \Lambdambda_{0,h}(F)(t) \ = \ \Gamma^0(F)(t)).
$$
\end{lemma}
\begin{equation}gin{proof}
Let us consider a continuous $F$ with $F(0)\in[0,h]$ and
such that $\Gamma^h(F)\geq 0$ on $[0,T]$.
Let us prove that $\Lambda_{0,h}(F)=\Gamma^{h}(F)$ on $[0,T]$.
We aim at showing that $(0,-(\substackp_{[0,t]}F-h)^+)$
coincides with the pair of compensators of $F$ on the time interval $[0,T]$.
First,
$$
\Gamma^h(F) \ = \ f \ + \ 0 \ + (-(\substackp_{[0,t]}F-h)^+)
$$
belongs to $[0,h]$ since $\Gamma^h(F)\leq h$ and under the conditions of our lemma $\Gamma^h(F)\geq0$. Secondly,
using the fact that $-(\substackp_{[0,t]}F-h)^+$
is the compensator for the one-sided case (at $h$), this function is non-increasing and only decreases
when $\Gamma^h(F)$ is at level $h$. This shows that $\Gamma^h(F)$ coincides with
the two sided reflection of $f$
on the interval $[0,h]$. The case $\Gamma^0(F)\leq h$ can be handled similarly.
\end{proof}
\begin{equation}gin{proof}[Proof of Proposition \ranglef{rtt}] In order to prove Proposition \ranglef{rtt}, we will now proceed by induction on $n$.
{\bf Step 1.}
We first claim that $\ensuremath{\sigma}gmama_1\leq \theta_1$. When $\theta_1=\infty$, this is obvious. Let us assume that $\theta_1<\infty$.
In order to see that, let us assume that $\ensuremath{\sigma}gmama_1>\theta_1$. The definition
of $\theta_1$ implies that
$\Gamma^0(f)(\theta_1)=h$ and thus $\theta_1$
belongs to an excursion of $\Gamma^0(f)$ away from $0$ (of height at least $h$),
whose interval we denote by $[t_-,t_+]$.
Since $\ensuremath{\sigma}gmama_1$ was defined as the last visit at $0$
of $\Gamma^0(f)$ before time $\tau_1$ (see (\ranglef{t-s})) and $\ensuremath{\sigma}gmama_1$
is assumed to be greater than $\theta_1$,
$\ensuremath{\sigma}gmama_1\geq t_+$ and the excursion of $\Gamma^0(f)$ on $[t_-,t_+]$
must be completed before $\tau_1$.
On the other hand,
\begin{equation}qnn
h & = & \left(f(\theta_1) - \inf_{[0,\theta_1]} f\right) \ - \ \left(f(t_+)-\inf_{[0,t_+]} f\right) \\
& = & f(\theta_1) - f(t_+) \\
& \leq & \substackp_{[\theta_1,t+]} f - f(t_+),
\end{equation}qnn
where we used the fact that $\inf_{[0,t]} f$ must be constant during an excursion of $\Gamma^0(f)$ away
from $0$ in the second equality.
By continuity of $f$, there must exist $s\in[\theta_1 ,t_+]$
such that $\substackp_{[\theta_1,s]}f -f(s)=h$, which implies that $\tau_1 \leq t_+$, thus yielding a contradiction
and proving that
$\ensuremath{\sigma}gmama_1\leq \theta_1$.
skip
Next,
the strategy for proving our proposition consists in breaking
the intervals $[0,\tau_1]$ into three pieces: $[0,\ensuremath{\sigma}gmama_1]$, $[\ensuremath{\sigma}gmama_1,\theta_1]$ and $[\theta_1,\tau_1]$.
First, on $[0,\ensuremath{\sigma}gmama_1]$, we must have $\Gamma^{0}(f) < h$ since $\ensuremath{\sigma}gmama_1< \theta_1$, and $\theta_1$
was defined as the first time $\Gamma^0(f)(t)=h$.
By Lemma \ranglef{pr-s},
this implies that
\begin{equation}qn
\forall t\in [0,\ensuremath{\sigma}gmama_1],\ \Lambda_{0,h}(f)(t)=\Gamma^0(f)(t) = f-\inf_{[0,t]} f, \ \ \ \mbox{and} \ \Lambda_{0,h}(f)(\ensuremath{\sigma}gmama_1)=0, \lambdabel{6-1}
\end{equation}qn
where the latter equality follows directly
from the definition of $\ensuremath{\sigma}gmama_1$.
Next by Lemma \ranglef{two-steps-ref}, we must have
\begin{equation}qnn
\forall t\in[\ensuremath{\sigma}gmama_1,\infty], \ \Lambda_{0,h}(f)(t) & = & \Lambda_{0,h}(1_{\cdot\geq\ensuremath{\sigma}gmama_1}(f - f(\ensuremath{\sigma}gmama_1))(t).
\end{equation}qnn
Using the fact that
$\inf_{[0,t]} f$
remains constant during an excursion of $\Gamma^0(f)$ away from $0$
and the fact that
$f-\inf_{[0,\cdot]}f<h$ on $[0,\ensuremath{\sigma}gmama_1]$,
it is easy to see that
$\theta_1$
coincides with the first visit of $1_{\cdot\geq\ensuremath{\sigma}gmama_1}(f(\cdot)-f(\ensuremath{\sigma}gmama_1))$ at $h$.
Furthermore, since $\ensuremath{\sigma}gmama_1$ is the {\it last} visit at $0$
of $\Gamma^0(f)$ before $\tau_1$, we must have
$$
\forall t\in (\ensuremath{\sigma}gmama_1,\tau_1), \ \ \ f(t)-\inf_{[0,t]}f = f(t)-f(\ensuremath{\sigma}gmama_1)>0.
$$
In particular, $f-f(\ensuremath{\sigma}gmama_1)\in(0,h]$ on the interval
$(\ensuremath{\sigma}gmama_1,\theta_1]$ and thus, $(0,0)$ solves the Skorohod equation for $1_{\cdot\geq\ensuremath{\sigma}gmama_1}(f-f(\ensuremath{\sigma}gmama_1))$ on this interval. This yields
\begin{equation}qn
\forall t\in(\ensuremath{\sigma}gmama_1,\theta_1), \ \Lambda_{0,h}(f)(t) \ = f(t) - f(\ensuremath{\sigma}gmama_1) >0.
\end{equation}qn
Finally, using $\Lambda_{0,h}(f)(\theta_1)=h$, Lemma \ranglef{two-steps-ref} implies that
\begin{equation}qnn
\forall t\in[\theta_1,\tau_1], \ \ \Lambdambda_{0,h}(f)(t)
& = & \Lambdambda_{0,h}(h + 1_{\cdot\geq \theta_1} (f(\cdot) - f(\theta_1)) \lambdabel{bordel}.
\end{equation}qnn
A straightforward computation yields
\begin{equation}qnn
\forall t\in[\theta_1,\tau_1], \ \Gamma^h\left(h+1_{\cdot\geq \theta_1} (f(\cdot) - f(\theta_1)\right)(t) & = & f(t) + h - \substackp_{[\theta_1,t]} f
\end{equation}qnn
By definition
of $\tau_1$, the RHS of the equality must remain positive on $[\theta_1,\tau_1)$. Using Lemma \ranglef{pr-s},
we get that
\begin{equation}qn
\forall t\in[\theta_1,\tau_1), \ \ \Lambdambda_{0,h}(f)(t) \ = \ \Gamma^h(f)(t) \ = f(t) +h - \substackp_{[\theta_1,t]} f >0, \nonumberumber \\
\mbox{and} \ \ \Lambdambda_{0,h}(f)(\tau_1) \ = 0
\lambdabel{6-3}
\end{equation}qn
where the second equality follows from the very definition of $\tau_1$.
Finally, combining (\ranglef{6-1})--(\ranglef{6-3}) yields
$$
\Lambda_{0,h}(f)(t) = f \ - \ \left(1_{t\in[0,\ensuremath{\sigma}gmama_1]} \cdot \inf_{[0,t]} f \ + \ 1_{t\in[\ensuremath{\sigma}gmama_1,\theta_1]} \cdot f(\ensuremath{\sigma}gmama_1) \ + \ 1_{t\in[\theta_1,\tau_1]}\cdot (\substackp_{[\theta_1,t]}f -h) \right).
$$
As a consequence, $f_h=f-\Lambda_{0,h}(f)$ is non-increasing (resp., non-decreasing) on $[0,\ensuremath{\sigma}gmama_1]$ (resp., $[\ensuremath{\sigma}gmama_1,\tau_1]$). Furthermore,
the argument above also shows that $\tau_1$ (resp., $\ensuremath{\sigma}gmama_1$) is the first returning time (resp., exit time) at level $0$. Indeed, piecing together the previous results, we proved
\begin{equation}qnn
\Lambda_{0,h}(f)(t)<h, \ \mbox{on $[0,\ensuremath{\sigma}gmama_1)$ and } \Lambda_{0,h}(f)(\ensuremath{\sigma}gmama_1)=0 \\
\Lambda_{0,h}(f)>0 \ \ \mbox{on $(\ensuremath{\sigma}gmama_1,\tau_1)$}, \ \ \mbox{and} \ \ \Lambda_{0,h}(f)(\theta_1)=h, \ \Lambda_{0,h}(f)(\tau_1)=0.
\end{equation}qnn
{\bf Step n+1.} Let us assume Proposition \ranglef{rtt} is valid up to rank $n$. Recall that $R^{\tau_n}(f)=1_{t\geq\tau_n}(f(t)-f(\tau_n))$.
By Lemma \ranglef{two-steps-ref},
$$
\forall t\in[\tau_n, \infty), \ \Lambda_{0,h}(f) = \Lambda_{0,h}(R^{\tau_n}(f)),
$$
where we used the induction
hypothesis to write $\Lambda_{0,h}(f)(\tau_n)=0$. On the other hand, it is straightforward to check from the
definitions of $\tau_{n+1},\theta_{n+1}$ and $\ensuremath{\sigma}gmama_{n+1}$ in (\ranglef{t-s}) that
$$
\ensuremath{\sigma}gmama_{n+1}(f) \ = \ \ensuremath{\sigma}gmama_1(R^{\tau_n}(f)),\ \ \tau_{n+1}(f) \ = \ \tau_{1}(R^{\tau_n}(f)), \ \ \theta_{n+1}(f) = \theta_{1}(R^{\tau_n}(f))
$$
Applying the case $n=1$ to the function $R^{\tau_{n}}(f)$ immediately implies that our proposition is valid at step $n+1$.
\end{proof}
In order to prove Theorem \ranglef{main1},
we will combine Proposition {\ranglef{rtt} with the following lemma.
\begin{equation}gin{lemma}\lambdabel{isot}
Let $(\cT_1,d_1)$ and $(\cT_2,d_2)$ be two rooted real trees
with only finitely many leaves. For $k=1,2$, let $S_k=(z_1^k,\cdots,z_N^k)\in \cT_k$
such that the two following conditions hold.
\begin{equation}gin{enumerate}
\item For $k=1,2$, $S_k$ contains the leaves of $\cT_k$.
\item
$$
\forall i\leq N, \ d_1(\rho^1,z_i^1)=d_2(\rho^2,z_i^2) \ \mbox{and} \ \forall i,j \leq N, \ d_1(\rho^1, z_i^1\wedge z_j^1)=d_2(\rho^2, z_i^2 \wedge z_j^2),
$$ where $\rho^k$ is the root of $\cT_k$.
\end{enumerate}
Under those conditions, there exists a root preserving isometry
from $\cT_1$ onto $\cT_2$.
\end{lemma}
\begin{equation}gin{proof}
For $k=1,2$ and $m\leq N$, let $I_m^k:=[\rho^k,z_m^k]$.
Using the second assumption of our lemma, the two ancestral lines
$I_m^1$ and $I_m^2$ must have the same length.
From there, it easy to construct an isometry from $I_m^1$ onto $I_m^2$ as follows. Since $\cT_k$ is a tree,
there exists a unique isometric map $\psi^{k}_m$ from
$[0,d_k(\rho^k,z_m^k)]$ onto $I_m^k$,
such that $\psi^{k}_m(0)=\rho^k$ and $\psi^{k}_m(d_k(\rho^k,z_m^k))=z^k_m$.
Define
$$
\forall a\in I^1_m, \ \ \phi_m(a) \ := \ \psi^{2}_m\circ( \psi^{1}_m )^{-1}(a).
$$
Since $d_1(\rho^1,z_m^1)= d_2(\rho^2,z_m^2)$, it is straightforward to show that
$\phi_m$ defines an isometric isomorphism from $I^1_m$ to $I^2_m$.
Furthermore,
$\phi_m$ preserves the root and $\phi_m(z^1_m)=z^2_m$.
Next, we claim that if $a\in I_m^1\cap I_l^1$, then $\phi_m(a)=\phi_l(a)$.
We first show the property for $a=z_m^1\wedge z_l^1$.
Using the isometry of $\phi_m$ and the root preserving property,
we have
\begin{equation}qnn
d_2(\rho^2, \phi_m(z_l^1\wedge z_m^1)) & = & d_1(\rho^1, z_l^1 \wedge z_m^1) \\
& = & d_2(\rho^2, z_l^2 \wedge z_m^2),
\end{equation}qnn
where the second equality follows from the second assumption of our lemma.
Since $\phi_m(a)\in I_m^2$, it follows that $\phi_m(z_l^1\wedge z_m^1) = z_l^2\wedge z_m^2$ -- on a segment $I_m^k$, a point is uniquely determined by its distance to the root. By the same reasoning, we get that
$\phi_l(z_l^1\wedge z_m^1) = z_l^2\wedge z_m^2$. Let us now take any point $a\in I_m^1\cap I_l^1$.
Under this assumption, we must have $a\partialeceq z_m^1\wedge z_l^1$.
Using the isometry property, this implies
$$
d_2(\rho^2,\phi_m(a)) \ =\ d_2(\rho^2,\phi_l(a))
$$
and
$$
\phi_l(a), \phi_m(a) \partialeceq z^2_m\wedge z^2_l.
$$
since we showed that $\phi_m( z^1_m\wedge z^1_l), \phi_l( z^1_m\wedge z^1_l) = z^2_m\wedge z^2_l$.
It easily follows that $\phi_m(a)=\phi_l(a)$, as claimed earlier.
We are now ready to construct the isometry from $\cT_1$ onto $\cT_2$.
First, for $k=1,2$, any point $a_k\in \cT_k$ must belong to some ancestral line of the form $[\rho^k,l]$,
for some leaf $l$ in the tree $\cT_k$.
By what we just proved, and since $S_1$ contains all the leaves
of $\cT_1$,
we can define the map $\phi$ from $\cT_1$ into $\cT_2$ as follows
$$
\forall a\in \cT_1, \ \ \phi(a) \ := \ \phi_m(a) \ \ \mbox{if $a\in I_m^1$}.
$$
Since $S_2$ contains all the leaves of $\cT_2$, and
any ancestral line of the form $[\rho^1,z^1_m]$
is mapped onto $[\rho^2,z_m^2]$, the map $\phi$ is onto.
It remains to show that $\phi$ is isometric.
Let $a,b\in \cT_1$ and let us distinguish between two cases. First, let us assume that $a$ and $b$ belong to the same
ancestral line $I_m^1$ for some $m\leq N$. Under this assumption, the property simply follows from the isometry of $\phi_m$.
Let us now consider the case
where $a$ and $b$ belong to two distinct ancestral lines:
$a\in I_m^1$ but $a\notin I_l^1 $,
and $b\in I_l^1$ but $b\notin I_m^1$; in such a way that $a\wedge b=z_l^1\wedge z_m^1$. Using the fact that both
$\phi_m$ and $\phi_l$ are isometric,
and $\phi(z_l^1\wedge z_m^1)=z_l^2\wedge z_m^2$, and $\phi_l(a)\in I_l^2,\phi_m(a)\in I_m^2$ we get that
$\phi(a) \wedge \phi(b)=z_l^2\wedge z_m^2 $.
We can then write $[\phi(a),\phi(b)]$ as the union $[z_l^2\wedge z_m^2, \phi(a)]\cup[z_l^2\wedge z_m^2, \phi(b)]$ and
write
\begin{equation}qnn
d_2(\phi(a),\phi(b)) & = & d_2(z_l^2\wedge z_m^2,\phi(a))+d_2(z_l^2\wedge z_m^2,\phi(b)) \\
& = & d_1(z_l^1\wedge z_m^1,a)+d_1(z_l^1\wedge z_m^1,b) \\
& = & d_1(a,b)
\end{equation}qnn
where the second equality follows by applying the previous case to the pairs of
points $(z_l^1\wedge z_m^1,a)$ and
$(z_l^1\wedge z_m^1,b)$.
\end{proof}
In the following,
we make the assumption that the $h$-trimming
of the tree $\cT_f$
is non-empty,
i.e., that $\substackp_{[0,\infty)} f <h$.
\begin{equation}gin{proof}[Proof of Theorem \ranglef{main1}]
Recall that $C_0^+({\mathbb R}^+)$
denotes the set of continuous non-negative functions
with $f(0)=0$
and compact support.
We start by showing the first item of our theorem, i.e., that
$f\in C_0^+({\mathbb R}^+)$
implies that $f_h\in C_0^+({\mathbb R}^+)$.
First, as an easy corollary of Proposition \ranglef{rtt}, we get that for every $f\in C_0^+({\mathbb R}^+)$,
the function $f_h=f-\Lambda_{0,h}(f)$ is non-negative. This simply follows from the fact
that
the local minima of $f_h$ are attained on the set $\{\ensuremath{\sigma}gmama_i\}$, on which
$f(\ensuremath{\sigma}gmama_i)=f_h(\ensuremath{\sigma}gmama_i)$ since $\Lambda_{0,h}(f)(\ensuremath{\sigma}gmama_i)=0$. Since $f(\ensuremath{\sigma}gmama_i)\geq0$, the function
$f_h$ is non-negative. Secondly, the function $f_h$ must have compact support. In order to see
that, let us take $K$ such that $\forall t\geq K, f(t)=0$. For $t\geq K$,
$f_h(t)=-\Lambda_{0,h}(f)$ and since $f_h\geq0$ and $-\Lambda^{0,h}(f)\leq0$, it
follows that $f_h\equiv0$ after time $K$.
Next, let us show that
$f_h$
is the contour function of the $h$-trimming of the tree $\cT_f$ (up to
an isomtetric isomorphism preserving the root).
For $k<N_h(f)$, Proposition \ranglef{rtt}
immediately implies that the maximum of $f_h$ on $[\ensuremath{\sigma}gmama_{k},\ensuremath{\sigma}gmama_{k+1})$
is attained at time $\tau_k$ and that
the set
$$
I_k:=\{t \in [\ensuremath{\sigma}gmama_k,\ensuremath{\sigma}gmama_{k+1}] \ : \ f(t)=f(\tau_k) \ \}
$$
is a closed interval. On the one hand, any time $t\in[\ensuremath{\sigma}gmama_k,\ensuremath{\sigma}gmama_{k+1}]$
outside of this interval
is the starting or ending time of a sub-excursion with (strictly) positive height, and for such $t$, $p_{f_h}(t)$ can not be a leaf. On the other hand,
we have $p_f(t)=p_f(t')$ for $t,t'\in I_k$. This implies that the only possible leaf visited during the time interval $[\ensuremath{\sigma}gmama_{k},\ensuremath{\sigma}gmama_{k+1}]$
is given by $p_{f_h}(\tau_k)$ and thus, that
the set of leaves of $\cT_{f_h}$ is included in the finite set of points $\{p_{f_h}(\tau_n)\}_{n\leq N_h(f)}$.
skip
As explained at the beginning of this section, any leaf of the tree ${\mathbb T}r(\cT_f)$
must be explored at some $\tau_n$, i.e., the set of leaves
of ${\mathbb T}r(\cT_f)$
is a subset of $\{p_f(\tau_n)\}_{n\leq N_h(f)}$.
In order to prove our result, we use Lemma \ranglef{isot}
with $z^1_i=p_f(\tau_i)$ and $z_i^2=p_{f_h}(\tau_i)$
and $N=N_h(f)$.
First,
item 1. of Proposition \ranglef{rtt} implies that the height
of the vertices $p_f(\tau_i)$ and $p_{f_h}(\tau_i)$ are identical, i.e., that $f(\tau_i)=f_h(\tau_i)$
since $\Lambda_{0,h}(f)(\tau_i)=0$.
In order to show that ${\mathbb T}r(\cT_f)$
and $\cT_{f_h}$ are identical (up to a root preserving isomorphism),
it is sufficient to check that
$$
\forall i < j, \ \ \inf_{[\tau_i,\tau_j]} f = \inf_{[\tau_i,\tau_j]} f_h,
$$
i.e., that the height of the most recent common ancestor of the vertices visited at $\tau_i$ and $\tau_j$ -- respectively
the $i^{th}$ and $j^{th}$ leaf --
is the same in both trees.
To justify the latter relation, we first note that
the definition of the $\ensuremath{\sigma}gmama_m$'s (see (\ranglef{t-s})) implies that
$\inf_{[\tau_i,\tau_j]} f$ must be attained at some $\ensuremath{\sigma}gmama_k$ (for some
$k\in\{i+1,\cdots,j\}$). On the other hand, the third item of the
Proposition \ranglef{rtt} implies that
the same must hold for $f_h$ since the set of local minima of $f_h$ coincide with $\{f_h(\ensuremath{\sigma}gmama_i)\}$.
Since
$
f(\ensuremath{\sigma}gmama_i)=f_h(\ensuremath{\sigma}gmama_i),
$
($s_i=\ensuremath{\sigma}gmama_i$ by the second item of Proposition \ranglef{rtt} and $\Lambda^{0,h}(f)(s_i)=0$), Theorem \ranglef{main1} follows.
\end{proof}
\begin{equation}gin{proof}[Proof of Proposition \ranglef{algo}]
Let us define
$$
\cT_{f_h}^n \ := \ \{z\in\cT_{f_h}: \ \exists t\leq \tau_n, \ p_{f_h}(t)=z\},
$$
the set of vertices in $\cT_{f_h}$ visited up to time $\tau_n$.
$\cT_{f_h}$ can be constructed recursively by adding to $\cT^n_{f_h}$
all the vertices in $\cT^{n+1}_{f_h}\setminus \cT^{n}_{f_h}$ for every $n<N$,
where $N\equiv N_h(f)$.
(Indeed, by definition of a real tree from its contour path,
if a point is visited at a given time, its ancestral line must have been explored before
that time. Thus, if all the leaves have been explored at a given time -- e.g., at $\tau_N$-- every vertex has been visited
at least once before that.)
For every $n<N$, let us show that the set $\cT^{n+1}_{f_h}\setminus \cT^{n}_{f_h}$ is a branch (i.e., a segment $[a,b]$
with $a,b\in{\cT_{f_h}}$ and $a\partialeceq b$) such that
\begin{equation}gin{enumerate}
\item[(i)] with tip $b=p_{f_h}(\tau_{n+1})$ -- i.e.,
$\forall z\in \cT_{f_h}^{n+1}\setminus \cT^{n}_{f_h}, \ z\partialeceq p_{f_h}(\tau_{n+1})$ --
\item[(ii)] attached to $a=p_{f_h}(\ensuremath{\sigma}gmama_{n+1})$ -- i.e., $\forall z\in \cT_{f_h}^{n+1}\setminus \cT^{n}_{f_h}, \ p_{f_h}(\ensuremath{\sigma}gmama_{n+1}) \partialeceq z$.
\item[(iii)] $p_f(\ensuremath{\sigma}gmama_{n+1})$ belongs
to the ancestral line
$[\rho_{f_h},p_{f_h}(\tau_n)]$.
\end{enumerate}
Let $t\in[\tau_n,\tau_{n+1}]$ and let $n<N$.
The definition of the real tree ${\cal T}_{f_h}$ implies
that
the point $p_{f_h}(t)$
has been visited before $\tau_n$ if and only if there exists $s\leq\tau_n$ such that
\begin{equation}\lambdabel{gt-gs}
\inf_{[s,t]} f_h = f_h(t) = f_h(s),
\end{equation}
i.e., $t$ must be the ending time of a sub-excursion straddling $\tau_{n}$.
On the one hand,
since $f_h$ is non-increasing on
$[\tau_n,\ensuremath{\sigma}gmama_{n+1}]$, we have
\begin{equation}\lambdabel{anc}
\forall t\in[\tau_n,\ensuremath{\sigma}gmama_{n+1}],\ \inf_{[\tau_{n},t]} f_h = f_h(t).
\end{equation}
Since $f_h(0)=0$ and $f_h(t)\geq 0$
one can find $s\leq \tau_n$ such that (\ranglef{gt-gs}) is satisfied (using the continuity of $f_h$).
Thus, every point visited on the time interval $[\tau_n,\ensuremath{\sigma}gmama_{n+1}]$ has already been visited before $\tau_{n}$ and thus does not belong to $\cT_{f_h}^{n+1}\setminus \cT_{f_h}^{n}$.
On the other hand,
the function $f_h$ is non-decreasing on $[\ensuremath{\sigma}gmama_{n+1},\tau_{n+1}]$. Let us define
$$
\begin{array}r \theta_{n+1}=\substackp\{t\in[\ensuremath{\sigma}gmama_{n+1},\tau_{n+1}] \ : \ f_h(t)=f_h(\ensuremath{\sigma}gmama_{n+1}) \}
$$
(with the convention $\substackp\{\emptyset\}=\tau_{n+1}$). First,
the definition of our real tree ${\cal T}_{f_h}$ implies that for
any $p_{f_h}(t)$
with $t\in[\ensuremath{\sigma}gmama_{n+1}, \begin{array}r \theta_{n+1}]$
coincides with $p_{f_h}(\ensuremath{\sigma}gmama_{n+1})$. Secondly, for any $t\in [\begin{array}r \theta_{n+1},\tau_{n+1}]$, and any $s\leq\tau_{n}$
$$
\inf_{[s,t]} f_h \leq f_h(\ensuremath{\sigma}gmama_{n+1})<f_h(t)
$$
which implies that any point visited during the interval $(\begin{array}r \theta_{n+1},\tau_{n+1}]$ belongs to ${\cal T}_{f_h}^{n+1}\setminus {\cal T}_{f_h}^n$. Furthermore,
the previous inequality implies that
\begin{equation}\lambdabel{deux}
\forall t\in(\begin{array}r \theta_{n+1},\tau_{n+1}], \ \ p_{f_h}(t)\substackcc p_{f_h}(\ensuremath{\sigma}gmama_{n+1}).
\end{equation}
Finally,
$$
\forall t\in[\begin{array}r \theta_{n+1},\tau_{n+1}],\ \inf_{[t,\tau_{n+1}]} f_h = f_h(t),
$$
which implies that
\begin{equation}\lambdabel{trois}
\forall t\in[\begin{array}r \theta_{n+1},\tau_{n+1}], \ \ p_{f_h}(t) \partialeceq p_{f_h}(\tau_{n+1}).
\end{equation}
Combining the results above, we showed the claims (i)--(iii) made earlier:
$\cT_{f_h}^{n+1}\setminus \cT_{f_h}^{n}$ is a branch
of extremity $p_f(\tau_{n+1})$ (see (\ranglef{trois})) attached at
the point $p_{f_h}(\ensuremath{\sigma}gmama_{n+1})$ (see (\ranglef{deux})), which belongs to $[\rho_{f_h},p_{f_h}(\tau_n)]$ (see (\ranglef{anc})
applied to $t=\ensuremath{\sigma}gmama_{n+1}$).
Furthermore,
the length of the branch is given by
\begin{equation}\lambdabel{xn}
X_{n+1} := f_h(\tau_{n+1}) - f_h(\ensuremath{\sigma}gmama_{n+1}),
\end{equation}
(height of the $(n+1)^{th}$ leaf $-$ height of the attachment point) and the distance of the attachment point from the leaf $p_{f_h}(\tau_n)$
is given by
\begin{equation}\lambdabel{yn}
Y_{n+1} := f_h(\tau_{n}) - f_h(\ensuremath{\sigma}gmama_{n+1}).
\end{equation}
(Height of the $n^{th}$ leaf $-$ height of the attachment point.)
\end{proof}
\section{Proof of Theorem \ranglef{PN}}
Next, let $e$ be a Brownian excursion conditioned on having a height greater than $h$
and let $\{(X_n(e),Y_n(e))\}_{i\leq N_h(e)}$ be defined as in (\ranglef{xy}), i.e.,
\begin{equation}qnn
\forall n\geq1, & X_n(e) = & e_h(t_{n}) - e_h(s_{n}) \nonumberumber \\
& Y_n(e) = & e_h(t_{n-1})-e_h(s_{n}),
\end{equation}qnn
with $t_n\equiv t_n(e)$, $s_n\equiv s_n(e)$ and let $N_h(e)$ be the number of returns of $e$ to level $0$.
As discussed in the introduction (see the discussion preceding Theorem \ranglef{PN}), in order to prove that the trimmed tree ${\mathbb T}r({\cal T}_e)$
is a binary tree, we need to show that
$\{(X_i(e),Y_i(e))\}_{i\leq N_h(e)}$ is identical in law with a sequence $\{(\tildelde X_i,\tildelde Y_i)\}_{i\leq \tildelde N}$,
where $\{(\tildelde X_i,\tildelde Y_i)\}_{i\in{\mathbb N}}$
is an infinite sequence of independent exponential random variables
with mean $h/2$ and
$$
\tildelde N \ := \ \inf\{n\geq0 \ : \ \substackm_{i=1}^{n} (\tildelde X_{i}- \tildelde Y_{i+1}) < 0\}.
$$
The idea of the proof consists in constructing a coupling
between $\{(X_i(e),Y_i(e))\}_{i\leq N_h(e)}$
and $(\{(\tildelde X_i, \tildelde Y_i)\}_{i\geq 1}, \tildelde N)$ as follows.
Let $w$ be a Brownian motion with $w(0)=0$, independent of
the excursion $e$, and define
\begin{equation}\lambdabel{K(e)}
\tildelde w(t) \ := \ e(t) + w\left((t-K(e))\vee0\right) \ \ \mbox{where $K(e):=\substackp\{t>0: \ e(t)>0\}$},
\end{equation}
obtained by pasting the process $w$ at the end of the excursion $e$.
Finally,
define $\tildelde X_n:=X_n(\tildelde w)$ and $\tildelde Y_n:=Y_n(\tildelde w)$.
It is easy to show
that the support of $\Lambda_{0,h}(e)$
is included in the support of $e$, that we denote by $[0,K(e)]$. (This was established in the course of proving Theorem \ranglef{main1}.)
As a consequence,
for every $n\leq N_h(e)$,
we must have
$t_n(e),s_n(e)\leq K(e)$ (recall that for $n\leq N_h(e)$, $t_n(e)$ and $s_{n}(e)$ coincide with the $n^{th}$ {\it finite}
returning and exit times at $0$). Since
$e$ and $\tildelde w$ (and their reflections)
coincide up to $K(e)$, this implies that
$s_n(e)=s_n(\tildelde w), t_n(e)=t_n(\tildelde w)$ and that
$X_n(e)=\tildelde X_n,Y_n(e)=\tildelde Y_n$ for $n\leq N_h(e)$.
Theorem \ranglef{PN}
is a direct consequence of our coupling and the two following lemmas.
\begin{equation}gin{lemma}\lambdabel{l1}
\begin{equation}gin{enumerate}
\item $\tildelde X_1,\tildelde Y_2, \tildelde X_2, \tildelde Y_3,\cdots$ is an i.i.d. sequence of pair of independent exponential variables with parameter $h/2$.
Further, $\tildelde Y_1=0$.
\item Under our coupling, for $1\leq i\leq N_h(e)$,
\begin{equation}gin{itemize}
\item $\tildelde X_i \ = \ l^h(\tildelde w)(\tildelde t_i)-l^h(\tildelde w)(\tildelde t_{i-1})$,
\item $\tildelde Y_i\ = \ l^0(\tildelde w)(\tildelde t_i)-l^0(\tildelde w)(\tildelde t_{i-1})$,
\end{itemize}
where $\tildelde t_i := t_i(\tildelde w)$.
\end{enumerate}
\end{lemma}
\begin{equation}gin{lemma}\lambdabel{l2}
Under our coupling,
$$
N_h(e) = \ \inf\{n : \substackm_{i=1}^{n} (\tildelde X_{i}- \tildelde Y_{i+1}) < 0\} \ \ \mbox{a.s.}.
$$
\end{lemma}
\begin{equation}gin{proof}[Proof of Lemma \ranglef{l1}]
Let us first prove that $\tildelde Y_1=0$ and that $\tildelde Y_1=l^0(\tildelde t_{1})-l^0(\tildelde t_0)$.
Let
$$
\begin{array}r T_1:=\inf\{t: \ \tildelde w(t)=e(t)=h\}.
$$
Since $e$ is a Brownian
excursion with height larger than $h$,
$\begin{array}r T_1<\infty$ and
$\tildelde w\in(0,h]$ on $(0,\begin{array}r T_1]$. From there, it immediately
follows that $\Lambda_{0,h}(\tildelde w)=\tildelde w$ on $[0,\begin{array}r T_1]$
and that $\begin{array}r T_1$ is the first returning time at level $h$
for the reflected process, i.e., $\begin{array}r T_1 = T_1(\tildelde w)$ (see (\ranglef{def:T}) for a definition
of $T_1(\tildelde w)$).
Further,
$\tildelde s_1$ -- the first exit time of $\Lambda_{0,h}(\tildelde w)$ at level $0$ --
is equal to $0$.
Since $c^h(\tildelde w)$ does not vary off the set $\{t \ : \ \Lambda^{0,h}(\tildelde w)(t)=h\}$, we have
\begin{equation}qnn
\tildelde Y_1 & = & -(c^0+c^h)(\tildelde w)(0)+(c^0+c^h)(\tildelde w)(\tildelde s_1) \\
& = & -c^0(\tildelde w)(0)+c^0(\tildelde w)(\tildelde s_1),
\end{equation}qnn
implying that $\tildelde Y_1=0$.
Finally, we also get that $l^0(\tildelde t_{1})-l^0(\tildelde t_0)=0$,
since
$\tildelde t_1$ coincides with the first returning time of the
reflected process at $0$, and this process
never hits $0$ on the interval $(0,\tildelde t_1)$.
Before proceeding with
the rest of the proof,
we start with a preliminary discussion.
Let $w'$ be a one-dimensional Brownian motion starting at $x\in[0,h]$.
Recall that the one-sided Skorohod reflection $\Gamma^0(w')$ is distributed as the absolute value
of a standard Brownian motion,
and the compensator
$c(w')(t):=(-\inf_{[0,t]} w')^+$ is the local time
at $0$ of $\Gamma^0(w')$. A proof of this statement can be found in \cite{KS91}. By following the exact same steps,
one can prove an analogous statement for the two-sided case, i.e., that for any Brownian motion $w'$ starting at some $x\in[0,h]$,
$\Lambda_{0,h}(w')$ is identical in law with $\lambdambda_{0,h}(w')$ (the standard reflection of the Brownian motion $w'$ -- see Section \ranglef{Intro} in the discussion preceding
Theorem \ranglef{teo1} for a description of $\lambdambda_{0,h}(w')$)
and that $-c^h(w')$ and $c^0(w')$ are respectively the local times at $h$ and $0$ of this process.
Next, let us define $\tildelde t_n=t_n(\tildelde w)$
and $\tildelde s_n=s_{n}(\tildelde w)$
and recall that the $h$-cut $\tildelde w_h$ is defined as
$$
\tildelde w_h = \tildelde w - \Lambda_{0,h}(\tildelde w) = -c^0(\tildelde w) - c^h(\tildelde w).
$$
By definition of $\tildelde X_n$, we have
\begin{equation}qn
\tildelde X_{n}
& = & \tildelde w_h(\tildelde t_{n}) - \tildelde w_h(\tildelde s_{n}) \nonumberumber \\
& = & c^h(\tildelde w)(\tildelde s_{n}) - c^h(\tildelde w)(\tildelde t_{n}) \nonumberumber \\
& = & c^h(\tildelde w)(\tildelde t_{n-1}) - c^h(\tildelde w)(\tildelde t_{n}). \lambdabel{local-x}
\end{equation}qn
The second line follows from the fact that
$c^0(\tildelde w)$ does not vary off the set $\Lambda_{0,h}(\tildelde w)^{-1}(\{0\})$ and $\Lambda_{0,h}(\tildelde w)>0$ on $(\tildelde s_{n},\tildelde t_n)$;
the third line is a consequence of the fact that
$c^h(\tildelde w)$ does not vary off the set $\Lambda_{0,h}(\tildelde w)^{-1}(\{h\})$ and
$\Lambda_{0,h}(\tildelde w)<h$ on $(\tildelde t_{n-1},\tildelde s_n)$.
By an analogous argument, one can prove that
\begin{equation}qn
\tildelde Y_n & = & c^0(\tildelde w)(\tildelde t_n) -c^0(\tildelde w)(\tildelde t_{n-1}). \lambdabel{local-y}
\end{equation}qn
With those results at hand, we are now ready to prove our lemma.
In the first paragraph, we already argued that
$\Lambda_{0,h}(\tildelde w)=\tildelde w$ on $[0,\begin{array}r T_1]$.
By Lemma \ranglef{two-steps-ref},
\begin{equation}qnn
\forall t\geq0, \ w'(t) & :=& \Lambdambda_{0,h}(\tildelde w)(t+\begin{array}r T_1) \\
& = & \Lambdambda_{0,h}(\tildelde w(\cdot+\begin{array}r T_1))(t).
\end{equation}qnn
The strong Markov property and the discussion above imply
that the path $w'$
is identical in law with a reflected
Brownian motion (where the reflection is a two-sided ``standard reflection")
starting at level $h$. Further, the compensators $c^0(\tildelde w)(\begin{array}r T_1+\cdot)$ and $-c^h(\tildelde w)(\begin{array}r T_1+\cdot)$
are the local times at $0$ and $h$ for the process $w'$. Using (\ranglef{local-x})--(\ranglef{local-y}), we easily obtain that
$\tildelde X_n$ (resp., $\tildelde Y_{n}$) is the local time
accumulated at $h$ (resp., $0$), for $1\leq n \leq N_h(e)$ (resp., $2\leq n \leq N_h(e)$) on $[\tildelde t_{n-1},\tildelde t_n]$.
This completes the proof of the second part of our lemma.
Finally,
by standard excursion theory, $\tildelde X_1, \tildelde Y_2, \tildelde X_2, \tildelde Y_3,\cdots$ are i.i.d. exponential random variables with mean $h/2$. Independence follows from the strong Markov property, whereas $\tildelde X_i$ and $\tildelde Y_{i+1}$ are distributed as the amount of Brownian local time accumulated at $0$ before occurrence of an excursion of height larger or equal to $h$.
\end{proof}
\begin{equation}gin{proof}[Proof of Lemma \ranglef{l2}]
Recall that
$$
\tildelde X_n = \tildelde w_h(\tildelde t_n)-\tildelde w_h(\tildelde s_n) \
\mbox{and} \ \tildelde Y_n = \tildelde w_h(\tildelde t_{n-1}) - \tildelde w_h(\tildelde s_n),
$$
where we wrote $\tildelde t_n=t_n(\tildelde w)$, $\tildelde s_k = s_k(\tildelde w)$. Thus,
\begin{equation}qnn
\tildelde w_h(\tildelde s_{n+1})
& = &
[\substackm_{i=1}^n (\tildelde w_h(\tildelde t_i)- \tildelde w_h(\tildelde t_{i-1}))] \ - \ (\tildelde w_h(\tildelde t_{n}) - \tildelde w_h(\tildelde s_{n+1})) \\
& = &
[(-\tildelde Y_1 + \tildelde X_1) + (-\tildelde Y_2+\tildelde X_2) + \cdots +(-\tildelde Y_{n}+\tildelde X_{n})] \ - \tildelde Y_{n+1} \\
& = &
-\tildelde Y_1 + \substackm_{i=1}^{n} (\tildelde X_i-\tildelde Y_{i+1})\\
& = & \substackm_{i=1}^{n} (\tildelde X_i-\tildelde Y_{i+1}),
\end{equation}qnn
where the last equality follows from the fact that $\tildelde Y_1=0$ (see the first item of the previous lemma).
Let us now show that $N_h(e)=\inf\{n : \substackm_{i=1}^{n} (\tildelde X_{i}- \tildelde Y_{i+1}) < 0\}$ a.s..
First, let us take $n< N_h(e)$.
On the one hand, we already argued that $\tildelde s_{n+1}\leq K(e)$. On the other hand,
$\tildelde w_h\geq0$ on $[0,K(e)]$ since $e_h\geq0$ (by Theorem \ranglef{main1}) and that $e_h$ and $\tildelde w_h$
coincide up to $K(e)$. Thus,
$$
\tildelde w_h(\tildelde s_{n+1}) = \substackm_{i=1}^{n} (\tildelde X_i-\tildelde Y_{i+1}) \geq 0.
$$
Conversely,
let us take $n= N_h(e)$. By Proposition \ranglef{rtt},
$\tildelde w_h$ attains a minimum at $\tildelde s_{\tildelde N_h(e)+1}$ on the interval $[\tildelde t_{N_h(e)}, \tildelde t_{N_h(e)+1}]$.
Using the fact that
$$
\substackp \ \mbox{Supp}(e) \geq \substackp \ \mbox{Supp}(\Gamma_{0,h}(e))
$$
(see the proof of Theorem \ranglef{main1}(1)),
it is easy to show that $K(e)\in[\tildelde t_{N_h(e)}, \tildelde t_{N_h(e)+1}]$ which implies that
\begin{equation}qnn
0 = \tildelde w_h(K(e)) & \geq & \tildelde w_h(\tildelde s_{N_{h}(e)+1}) \\
& = & \substackm_{i=1}^{N_h(e)} (\tildelde X_i - \tildelde Y_{i+1}).
\end{equation}qnn
Since $\tildelde X_i$ and $\tildelde Y_i$ are exponential
random variables, this inequality is strict almost surely. This completes the proof of the lemma.
\end{proof}
\section{Proof of Theorem \ranglef{teo1}}
\lambdabel{proof-sect}
Let $(z^\theta, w)$
be a weak solution of (\ranglef{sticky-bm}).
Our proof builds on the approach of Warren \cite{W02}. In this work, it is proved that the pair $(z^\theta,w)$ can be constructed by adding some extra noise to
the reflected process
$$
\xi(t):= w(t)-\inf_{[0,t]} w
$$
as follows. First, there exists a unique $\ensuremath{\sigma}gmama$-finite measure -- here denoted by ${\cal L}_\xi$ and
referred to as the branch length measure -- on the metric space $({\mathbb T}T_\xi,d_\xi)$
such that
$$
\forall a,b\in{\mathbb T}T_\xi, \ \ {\cal L}_\xi([a,b]) \ = \ d_\xi([a,b]).
$$
(See \cite{E05} for more details).
Conditioned on a realization of $\xi$,
let us now consider
the Poisson point process on $(\cT_\xi,d_\xi)$ with intensity measure
$2\theta {\cal L}_\xi$
and define the pruned tree
$$
\cT^\theta_\xi \ := \ \{z\in\cT_\xi \ : \ [\rho_\xi,z] \ \mbox{is unmarked}\},
$$
obtained after removal of every vertex with a marked ancestor along its ancestral line.
Finally, define $z^\theta(t)$ as the distance of the point $p_\xi(t)$
from
the subset $\cT^\theta_\xi$, i.e.,
\begin{equation}
z^\theta(t) \ := \ \left\{ \begin{equation}gin{array}{c}
\mbox{0 if $p_\xi(t)\in \cT^\theta_\xi$,}
\\
\mbox{$\xi(t) - A(t)$ otherwise,} \end{array} \right. \lambdabel{ztheta}
\end{equation}
where $A(t)=0$ if there is no mark along the ancestral line $[\rho_\xi,p_\xi(t)]$, and is equal to the height of the first mark (counted from the root) on $[\rho_\xi,p_\xi(t)]$ otherwise. Informally,
$(z^\theta(t); t\geq0)$
can be thought of as the exploration process ÒaboveÓ
the pruned tree $\cT^\theta_\xi$ -- see Fig \ranglef{sticky-fig}.
\begin{equation}gin{figure}[ht]
\centering
\includegraphics[width=0.3\textwidth]{sticky-fig}
\caption{{\it The top panel displays a reflected random walk $\xi$ with marking of the underlying tree $\cT_\xi$.
$\cT_\xi^\theta$ is the black subset of the tree.
The bottom panel
displays the sticky path $(z^\theta(t); t\geq0)$ obtained by concatenating
the contour paths
of the red subtrees attached to $\cT_\xi^\theta$.}}\lambdabel{sticky-fig}
\end{figure}
\begin{equation}gin{theorem}[\cite{W02}]\lambdabel{wae}
The process
$(z^\theta(t),w(t); \ t\geq 0)$ is a weak solution of the SDE (\ranglef{sticky-bm}).
\end{theorem}
Using this result, we proceed with the proof of Theorem \ranglef{teo1}.
Let $\cT^{(t)}_\xi:=\cT_\xi \cap \{p_\xi(s) : \ s\leq t\}$ be the sub-tree consisting of all the vertices in $\cT_\xi$
visited up to time $t$.
For every $s$, the set
$$
\{x\in \cT_\xi \ : \ x\partialeceq p_\xi(s) \mbox{ and }d_\xi(x,p_{\xi(s)})\geq h\}.
$$
is totally ordered.
We define $a_h(s)$ as the $\substackp$
of this set,
with the convention that $\substackp\{\emptyset\}=\rho_\xi$. Informelly,
$a_h(s)$ is the ancestor of $p_\xi(s)$ at a distance $h$.
Following the construction of the pair $(z^\theta,\xi)$ described earlier,
$\substackp_{[0,t]} z^\theta\leq h$ if and only if
$$
\forall s\leq t, \ \ [\rho_\xi, a_h(s)] \ \ \mbox{is unmarked},
$$
which is easily seen to be equivalent to not finding any mark
on the $h$-trimming of the
tree $\cT^{(t)}_\xi$.
By a standard result about Poisson point processes, we have
\begin{equation}qn
{\mathbb P}(\substackp_{[0,t]} z^\theta \leq h \ | \ \ensuremath{\sigma}gmama(w)) & = & {\mathbb P}\left( {\mathbb T}r(\cT_\xi^{(t)}) \ \ \mbox{is unmarked} \ | \ \ensuremath{\sigma}gmama(w)\right) \nonumberumber \\
& = & \exp\left[-2\theta \cdot {\cal L}_\xi( {\mathbb T}r(\cT_\xi^{(t)}) )\right] \lambdabel{eqp}.
\end{equation}qn
Let us define
$$
\xi^{(t)}(s) \ := \ \left\{ \begin{equation}gin{array}{c} \xi(s) \ \mbox{if $s\leq t$} \\ (\xi(t) - (s-t))^+ \ \ \mbox{otherwise.} \end{array} \right.
$$
which is a function in $C_0^+({\mathbb R}^+)$ from which we can construct the real rooted
tree $(\cT_{\xi^{(t)}},d_{\xi^{(t)}})$.
\begin{equation}gin{lemma}
There exists an isometric isomorphism preserving the root from
$(\cT_\xi^{(t)},d_{\cT_\xi})$ onto $(\cT_{\xi^{(t)}}, d_{\cT_{\xi^{(t)}}})$.
\end{lemma}
\begin{equation}gin{proof}
To simplify the notation, we write ${\cal T}_1=\cT_\xi^{(t)}$ and ${\cal T}_2=\cT_{\xi^{(t)}}$.
For every $y\in\cT_1$, define $t_y$ to be the minimal element of the fiber $\{s \ : \ p_{\xi}(s)=y\}$
(i.e., the first exploration time for $y$)
and define
the mapping
\begin{equation}qnn
\phi \ : \ \cT_1 & \rightarrow & \cT_2 \\
y & \rightarrow & p_{\xi^{(t)}}(t_y).
\end{equation}qnn
It is straightforward to show that $\phi$ defines a mapping from ${\cT_1}$ to $\cT_2$ preserving the root.
We first show that $\phi$ is surjective.
In order to do so, we start by showing that
\begin{equation}\lambdabel{chj}
\forall s\leq t, \ \phi(p_{\xi}(s)) = p_{\xi^{(t)}}(s).
\end{equation}
For
$s\leq t$, we have
\begin{equation}qnn
t_{p_\xi(s)} & = & \inf\{ u \ : \ p_\xi(u) = p_\xi(s) \} \\
& = & \inf\{u\leq s \ : \ \xi(u)=\xi(s)=\inf_{[u,s]} \xi \} \\
& = & \inf\{u\leq s \ : \ \xi^{(t)}(u)=\xi^{(t)}(s)=\inf_{[u,s]} \xi^{(t)} \},
\end{equation}qnn
where the last equality follows from the fact that $\xi$ and $\xi^{(t)}$
coincide before time $t$. The latter identity implies
that
$
t_{p_\xi(s)} \in \{u\leq s \ : \ \xi^{(t)}(u)=\xi^{(t)}(s)=\inf_{[u,s]} \xi^{(t)} \}
$
or equivalently that
$$
p_{\xi^{(t)}}(t_{p_\xi(s)}) = p_{\xi^{(t)}}(s),
$$
which can be rewritten as
(\ranglef{chj}),
as claimed earlier. In order to show surjectivity,
let us take $v\in{\mathbb T}T_2$ and
$s$ such that $v=p_{\xi^{(t)}}(s)$. We distinguish between two cases:
(1) if $s\leq t$,
the previous result immediately implies that $v\in\phi(\cT_1)$, and
(2) $s> t$,
since $\xi^{(t)}$
is continuous and non-increasing on $[t,\infty)$,
one can find
$s' \leq t$ such that
$$
\xi^{(t)}(s)=\xi^{(t)}(s')=\inf_{[s',s]} \xi^{(t)}
$$
implying that $v=p_{\xi^{(t)}}(s')$ and we are back to case (1).
skip
It remains to show that $\phi$ is an isometry.
Let $x_1,x_2\in\cT_1$. We have
$
\phi(x_i)=p_{\xi^{(t)}}(t_{x_i})
$
with $t_{x_i}\leq t$. Since $\xi$ and $\xi^{(t)}$
coincide up to time $t$,
we must have
\begin{equation}qnn
d_{\cT_2}(\phi(x_1),\phi(x_2))) & = & \xi^{(t)}(t_{x_2}) \ + \ \xi^{(t)}(t_{x_2}) \ - \ 2 \inf_{[t_{x_1}\wedge t_{x_2}, t_{x_1}\vee t_{x_2}]} \xi^{(t)} \\
& = & \xi(t_{x_2})\ +\ \xi(t_{x_1}) \ - \ 2 \inf_{[t_{x_1}\wedge t_{x_2}, t_{x_1}\vee t_{x_2}]} \xi \\
& =& d_{\cT_1}(x_1,x_2).
\end{equation}qnn
\end{proof}
The branch length ${\cal L}_\xi( {\mathbb T}r(\cT_{\xi^{(t)}}))$
is obtained by adding up
all the branch lengths of the trimmed tree ${\mathbb T}r(\cT_{\xi^{(t)}})$.
Following the algorithm described in Proposition \ranglef{algo}, the total branch length
is given by the sum of the $X_n(\xi^{(t)})$'s
or equivalently
\begin{equation}qn
{\cal L}_\xi( {\mathbb T}r(\cT_{\xi^{(t)}}))
& = & \substackm_{n\geq1} \left( \xi_h^{(t)}(t_{n}^{(t)}) - \xi_h^{(t)}(s_{n}^{(t)}) \right) \nonumberumber \\
& = & \substackm_{n\geq1} \left( c^h(\xi^{(t)})(t_{n-1}^{(t)}) - c^h(\xi^{(t)})(t_{n}^{(t)}) \right) \nonumberumber \\
& = & \langlem_{s\uparrow\infty} -c^h(\xi^{(t)})(s) \lambdabel{ch},
\end{equation}qn
where we wrote $t_{n}^{(t)}:=t_{n}(\xi^{(t)}), s_{n}^{(t)}:=s_{n}(\xi^{(t)})$, and the second line
can be shown as in (\ranglef{local-x}).
\begin{equation}gin{lemma}
\begin{equation}qnn
\langlem_{s\uparrow\infty} c^h(\xi^{(t)})(s)= c^h(\xi)(t).
\end{equation}qnn
\end{lemma}
\begin{equation}gin{proof}
Since $\xi^{(t)}$ and $\xi$ coincide up to $t$, we have
\begin{equation}\lambdabel{mono}
c^h(\xi^{(t)})(t)=c^h(\xi)(t).
\end{equation}
Furthermore, by Lemma \ranglef{two-steps-ref},
$$
\forall s\geq t, \ \Lambda_{0,h}(\xi^{(t)})(s) \ = \ \Lambda_{0,h} ( m )(s), \
\mbox{where $m(s)=\Lambda_{0,h}(\xi)(t) + 1_{s\geq t}\left(\left(\xi(t) - (s-t)\right)^+ \ - \ \xi(t)\right).$}
$$
The function $m$ is non-increasing on $[t,\infty)$. From this observation,
we easily get from the definition of the one-sided reflection $\Gamma^0(\cdot)$ that
$$
\forall s\geq0, \ \ \Gamma^0(m)(s)\leq \Gamma^0(m)(t)\leq h.
$$
Lemma \ranglef{pr-s} then
implies that $\Lambda_{0,h} (m)=\Gamma^0(m)$
and that
$c^h(m)=0$ (in other words, no compensator is needed to keep $m$ below level $h$). Finally,
since
$dc^h(m)=dc^h(\xi^{(t)})$ on $[t,\infty)$,
we have
$$
\langlem_{s\uparrow\infty} c^h(\xi^{(t)})(s) \ = \ c^h(\xi^{(t)})(t).
$$
The latter equation combined with (\ranglef{mono}) completes the proof of the lemma.
\end{proof}
Combining (\ranglef{eqp}), (\ranglef{ch}) with the previous lemma,
we get
\begin{equation}
{\mathbb P}(\substackp_{[0,t]} z^\theta \leq h \ | \ \ensuremath{\sigma}gmama(w)) \ = \ \exp\left[2\theta \cdot c^h(\xi)(t)\right]. \lambdabel{last1}
\end{equation}
In order to prove our theorem, it remains to show that
$\Lambda_{0,h}(w)$ is identical in law with a Brownian motion reflected (in a ``standard way'')
on $[0,h]$, and that $-c^h(\xi)$ is the local time of $\Lambda_{0,h}(w)$ at $h$.
\begin{equation}gin{lemma}\lambdabel{reflection-inv}
For every continuous function $f$ with $f(0)=0$, $c^h(\Gamma^0(f)) = c^h(f)$.
\end{lemma}
\begin{equation}gin{proof}
In Theorem \ranglef{main1}, we showed that if $g\geq0$ then $g_h\geq0$,
or equivalently
$$
\Lambda_{0,h}(g) \leq g.
$$
This implies that for every continuous
non-negative function $g$,
every zero of the function $g$,
is also a zero of the function $\Lambda_{0,h}(g)$. On the other hand,
for any continuous function $f$ with $f(0)=0$,
the definition of the one-sided Skorohod reflection at $0$ (see (\ranglef{one-sided}))
implies that
$\Gamma^0(f)$ can be written as $f+c$ where $c$ is a non-decreasing continuous
function, only increasing at the zeros of the reflected path $\Gamma^0(f)$.
Taking $g=\Gamma^0(f)$ in the previous discussion,
the set of zeros for $\Gamma^0(f)$ is included in its $\Lambda_{0,h}(\Gamma^0(f))$ counter part,
and
we get that the compensator $c(t):=-\inf_{[0,t]} f$
(for the one-sided reflection) only increases on the set of zeros of the doubly reflected path
$\Lambda_{0,h}(\Gamma^0(f))$.
Next,
let
$\begin{array}r c^{0}$ and $\begin{array}r c^h$ be the compensators associated
with the function $\Gamma^0(f)$, i.e. $\Lambdambda_{0,h}(\Gammamma^0(f)) = (f + c) + \begin{array}r c^{0} + \begin{array}r c^h$.
where $\begin{array}r c^{0}$ and $\begin{array}r c^h$ satisfy the hypothesis of Theorem \ranglef{Skor} for the function $f+c$.
Since $\begin{array}r c^{0}$ only increases at the zeroes of $\Lambda_{0,h}(\Gamma^0(f))$,
the functions
$(c+\begin{array}r c^{0},\begin{array}r c^h)$ must solve the Skorohod equation for $f$ on the interval $[0,h]$.
By uniqueness of the solution,
this readily implies that $\Lambda_{0,h}(f)=\Lambda_{0,h}(\Gamma^0(f))$ and $\begin{array}r c^h\equiv c^h(\Gamma^0(f))= c^h(f)$.
\end{proof}
The previous lemma and (\ranglef{last1}) yield
\begin{equation}qn
{\mathbb P}(\substackp_{[0,t]} z^\theta \leq h \ | \ \ensuremath{\sigma}gmama(w)) & = & \exp\left[2\theta \cdot c^h(w)(t)\right]. \lambdabel{last}
\end{equation}qn
As already explained in the previous section (see the proof of Lemma \ranglef{l1}), $\Lambda_{0,h}(w)$ is identical in law with a Brownian motion reflected (in a ``standard way'')
on $[0,h]$, and $-c^h(w)$ is the local time of this process at $h$ (see again the proof of Lemma \ranglef{l1}
for more details). This completes
the proof of Theorem \ranglef{teo1}.
{\bf Acknowledgments.} I thank P. Hosheit for helpful discussions and his careful reading of an early version of the present paper.
\begin{equation}gin{thebibliography}{SSS}
\bibitem[C89]{C89} R.J. Chitashvili.
On the existence
of a strong solution in the boundary problem for a sticky Brownian motion.
{\em Technical Report BS-R8901, Center for Mathematics and
Computer Science, Amsterdam}, (1989).
\bibitem[E05]{E05} S. Evans. Probability and Real Trees.
{\em \'Ecole d'\'Et\'e de Probabilit\'es de Saint-Flour XXXV}, (2005).
\bibitem[F57]{F57} W. Feller.
On boundaries and lateral conditions for the Kolmogorov equations.
{\em Ann. of Math., Series 2}, {\bf 65}, (1957), 527--570.
\bibitem[HL81]{HL81} J.M. Harrison, A.J. Lemoine.
Sticky Brownian motion as the limit of storage processes.
{\em Journal of App. Proba.}, {\bf 18}, (1981), 216--226.
\bibitem[KLRS07]{KLRS07}
L. Kruk, J. Lehoczky, K. Ramanan, S. Shreve.
An explicit formula for the Skorokhod map on $[0, a]$.
{Ann. Probab.}, {\bf 35}, (2007), 1740--1768.
\bibitem[KS91]{KS91}
I.~Karatzas, E.~Shreve.
{\em Brownian Motion and Stochastic Calculus}, Springer-Verlag,
Berlin-Heidelberg-New York, (1991).
\bibitem[L11]{L11} R. {\L}ochowski.
Truncated variation, upward truncated variation and downward truncated variation of Brownian motion with drift
- their characteristics and applications.
{\em Stoch Proc. Appl.}, {\bf 121}, (2011), 378--393.
\bibitem[L13]{L13} R. {\L}ochowski.
On a generalisation of the Hahn--Jordan decomposition for real c\`adl\`ag functions.
{\em Colloq. Math.}, {\bf 132}, (2013), 121--138.
\bibitem[LG05]{LG05} J.F. Le Gall.
Random trees and their applications.
{\em Proba. Surv}, {\bf 2}, (2005), 245--311.
\bibitem[LG89]{LG89}
J.F. Le Gall.
Marches al\'eatoires, mouvement brownien et processus de branchement.
{\em S\'em. de Probab. XXIII}, {\bf 1372}, (1989), 258--274.
\bibitem[LM13]{LM13} R. {\L}ochowski, P. Mi{\l}o\'s.
On truncated variation, upward truncated variation and downward truncated variation for diffusion.
{\em Stoch. Proc. Appl.}, {\bf 123}, (2013), 446--474.
\bibitem[M13]{M13} P. Mi{\l}\'os.
Exact representation of truncated variation of Brownian motion. (2013), {\em arXiv: 1311.2415.}
\bibitem[NP89]{NP89}
J. Neveu, J. Pitman.
Renewal property of the extrema and tree property of a one-dimensional Brownian motion.
{\em S\'em. de Proba. XXIII}, {\bf 1372}, (1989), 239--247.
\bibitem[NRS10]{NRS10}
C.M.~Newman, K.~Ravishankar, E.~Schertzer. Marking the $(1,2)$ points
of the Brownian web and applications. {\em Ann. Inst. H. Poincar\'e},
{\bf 46}, (2010), 537--574.
\bibitem[SS08]{SS08}
R.~Sun, J.M.~Swart.
The Brownian net.
{\em Ann. Probab.}, {\bf 36}, (2008), 1153--1208.
\bibitem[T79]{T79}
H. Tanaka.
Stochastic differential equations with reflecting boundary conditions in convex regions,
{\em Hiroshima Math}, {\bf 9.}, (1979), 16--177.
\bibitem[V01]{V01} S.R.S. Varadhan.
Probability theory. {\em Courant lecture notes}. {\bf 7}, American Mathematical Society, (2001).
\bibitem[W97]{W97}
J.~Warren.
Branching processes, the Ray-Knight theorem, and sticky Brownian motion.
{\em S\'em. de Proba. de Strasbourg}, {\bf 31}, (1997), 1--15.
\bibitem[W99]{W99} J. Warren.
On the joining of sticky Brownian motion.
{\em S\'em. de Proba. XXXIII.}, {\bf 1709}, (1999), 257---266.
\bibitem[W02]{W02}
J.~Warren.
The noise made by a Poisson snake.
{\em Elec. J. Probab}, {\bf 7}, (2002), 1--21.
\bibitem[Y94]{Y94} K. Yamada.
Reflecting or sticky Markov processes with Levy generators as the limit of storage processes.
{\em Stoch. Proc. and Appl.}, {\bf 52}, (1994), 135--164.
\end{thebibliography}
\end{document}
|
\begin{document}
\title{Efficient discontinuous Galerkin finite element methods via Bernstein polynomials}
\pagestyle{myheadings}
\thispagestyle{plain}
\markboth{R.C. KIRBY}{Efficient DGFEM via Bernstein polynomials}
\renewcommand{\arabic{footnote}}{\fnsymbol{footnote}}
\footnotetext[2]{
Department of Mathematics, Baylor University; One Bear Place \#97328; Waco, TX 76798-7328. This work is supported by NSF grant CCF-1325480.}
\renewcommand{\arabic{footnote}}{\arabic{footnote}}
\begin{abstract}
We consider the discontinuous Galerkin method for hyperbolic
conservation laws, with some particular attention to the linear
acoustic equation, using Bernstein polynomials as local bases.
Adapting existing techniques leads to optimal-complexity
computation of the element and boundary flux terms. The element mass
matrix, however, requires special care. In particular, we give an explicit
formula for its eigenvalues and exact characterization of the
eigenspaces in terms of the Bernstein representation of orthogonal
polynomials. We also show a fast algorithm for solving linear systems
involving the element mass matrix to preserve the overall complexity
of the DG method. Finally, we present numerical results investigating
the accuracy of the mass inversion algorithms and the scaling of total
run-time for the function evaluation needed in DG time-stepping.
\end{abstract}
\begin{keywords}
Bernstein polynomials, discontinuous Galerkin methods,
\end{keywords}
\begin{AMS}
65N30
\end{AMS}
\section{Introduction}
Bernstein polynomials, which are ``geometrically decomposed'' in the
sense of~\citep{arnold2009geometric} and rotationally symmetric,
provide a flexible and general-purpose set of simplicial finite
element shape functions.
Morever, recent research has demonstrated distinct algorithmic
advantages over other simplicial shape functions, as many essential
elementwise finite element computations can be performed on
with optimal complexity using Bernstein polynomials
In~\citep{kirby2011fast}, we showed
how, with constant coefficients, elementwise mass and stiffness
matrices could each be applied to vectors in $\mathcal{O}(n^{d+1})$ operations,
where $n$ is the degree of the local basis and $d$ is the spatial
dimension. Similar blockwise linear algebraic structure enabled
quadrature-based algorithms in~\citep{kirby2012fast}. Around the same time,
Ainsworth \emph{et al}~\citep{ainsworth2011bernstein} showed that the Duffy
transform~\citep{duffy1982quadrature} reveals a tensorial structure in
the Bernstein
basis itself, leading to sum-factored algorithms for polynomial
evaluation and moment computation. Moreover, they provide an
algorithm that assembles element matrices with $\mathcal{O}(1)$ work
per entry that utilizes their fast moment algorithm together with a
very special property of the Bernstein polynomials. Work
in~\citep{kirbyssec,brown_sc_2012_20} extends these techniques to
$H(\mathrm{div})$ and $H(\mathrm{curl})$.
In this paper, we consider Bernstein polynomial techniques in a
different context -- discontinuous Galerkin methods for hyperbolic
conservation laws
\begin{equation}
\label{eq:conslaw}
q_t + \nabla \cdot F(q) = 0,
\end{equation}
posed on a domain
$\Omega \times [0,T) \subset \mathbb{R}^d \times\mathbb{R}$, together
with suitable initial and boundary conditions. As a
particular example, we consider the linear acoustic model
\begin{equation}
\begin{split}
p_{t} + \nabla \cdot u & = 0,\\
u_{t} + \nabla p & = 0,
\end{split}
\end{equation}
Here, $q = [u,p]^T$ where the pressure variable $p$ is a scalar-valued function on
$\Omega \times [0,T]$ and the velocity $u$ maps the same space-time
domain into $\mathbb{R}^d$.
Discontinuous Galerkin (DG) methods for such problems place finite
volume methods in a variational
framework and extend them to higher orders of polynomial
approximation~\cite{cockburn1991runge}, but fully realizing the potential
efficiencies of high-order methods requires careful consideration of
algorithmic issues.
Simplicial orthogonal polynomials~\cite{dubiner1991spectral,KarShe05}
provide one existing mechanism for achieving low operation counts. Their
orthogonality gives diagonal local mass
matrices. Optimality then requires special quadrature that reflects the
tensorial nature of the basis under the Duffy transform or
collapsed-coordinate mapping from the $d$-simplex to the $d$-cube and
also includes appropriate points to incorporate contributions
from both volume and boundary flux terms.
Hesthaven and Warburton~\citep{hesthaven2002nodal,hesthaven2007nodal}
propose an alternate approach,
using dense linear algebra in conjunction with Lagrange polynomials.
While of greater algorithmic complexity, highly-tuned
matrix multiplication can make this approach
competitive or even superior at practical polynomial orders.
Additional extensions of this idea include the so-called ``strong DG''
forms and also a pre-elimination of the elementwise mass matrix giving
rise to a simple ODE system. With care, this approach can give very
high performance on both CPU and GPU systems~\citep{klockner2009nodal}.
In this paper, we will show how each term in the DG
formulation with Bernstein polynomials as the local basis can be
handled with optimal complexity For the element
and boundary flux terms, this requires only an adaptation of existing
techniques, but inverting the element mass matrix turns out to be a
challenge lest it dominate the complexity of the entire process.
We rely on the recursive block structure
described in~\citep{kirby2011fast} to give an $\mathcal{O}(n^{d+1})$
algorithm for solving linear systems with the constant-coefficient
mass matrix.
We may view our approach as sharing certain
important features of both collapsed-coordinate
and Lagrange bases. Like collapsed-coordinate methods, we seek
to use specialized structure to optimize algorithmic complexity.
Like Lagrange polynomials, we seek to do this using a relatively
discretization-neutral basis.
\section{Discontinuous Galerkin methods}
We let $\mathcal{T}_h$ be a triangulation of $\Omega$ in the sense of
~\citep{BreSco} into affine simplices. For curved-sided elements, we could
adapt the techniques of~\cite{warburton2013low} to incorporate the Jacobian into our local basis functions to recover the reference mass matrix on each
cell at the expense of having variable coefficients in other
operators, but this does not affect the overall order of complexity.
We let $\mathcal{E}_h$ denote the set of all edges in the triangulation.
For $T \in \mathcal{T}_h$, let $P_n(T)$ be the space of polynomials of
degree no greater than $n$ on $T$. This is a vector space of
dimension $P^d_n \equiv \binom{n+d}{n}$. We define the global finite
element space
\begin{equation}
V_h = \left\{ f : \Omega \rightarrow \mathbb{R} : f|_T \in P_n(T), \ T
\in \mathcal{T}_h \right\},
\end{equation}
with no continuity
enforced between cells.
Let $\left( \cdot , \cdot \right)_T$ denote the standard $L^2$ inner
product over $T \in \mathcal{T}_h$ and
$\langle \cdot , \cdot \rangle_\gamma$ the $L_2$
inner product over an edge $\gamma \in \mathcal{E}_h$.
After multiplying~\eqref{eq:conslaw} by a test function and integating by parts elementwise, a DG method seeks $u_h$ in $V_h$ such that
\begin{equation}
\label{eq:dg}
\sum_{T \in \mathcal{T}_h}
\left[
\left( u_{h,t} , v_h \right)_T
- \left( F(u_h) , \nabla v_h \right)_T \right]
+\sum_{\gamma \in \mathcal{E}_h} \langle \hat{F} \cdot n , v_h \rangle
\end{equation}
for all $v_h \in V_h$.
Fully specifying the DG method requires defining a numerical flux
function $\hat{F}$ on each $\gamma$. On internal edges, it takes
values from either side of the
edge and produces a suitable approximation to the flux $F$. Many
Riemann solvers from the finite volume literature have
been adapted for DG
methods~\cite{cockburn1991runge,dumbser2008unified,toro1999riemann}.
The particular choice of numerical flux does not matter for our
purposes. On external edges,
we choose $\hat{F}$ to appropriately enforce boundary conditions.
This discretization gives rise to a system of ordinary differential
equations
\begin{equation}
\mathrm{M} \mathrm{u}_t + \mathrm{F}(\mathrm{u}) = 0,
\end{equation}
where $\mathrm{M}$ is the block-diagonal mass matrix and
$\mathrm{F}(\mathrm{u})$ includes the cell and boundary flux terms.
Because of the hyperbolic nature of the system, explicit methods are
frequently preferred. A forward Euler method, for example, gives
\begin{equation}
\mathrm{u}^{n+1} = \mathrm{u}^n - \Delta t \mathrm{M}^{-1}
\mathrm{F}(\mathrm{u^n}) \equiv \mathrm{u}^n - \Delta t L(\mathrm{u}^n),
\end{equation}
which requires the application of $\mathrm{M}$ at each time step.
The SSP methods~\cite{gottlieb2001strong,shu1988total} give stable
higher-order in time methods. For example, the well-known third order
scheme is \begin{equation}
\begin{split}
\mathrm{u}^{n,1} & = \mathrm{u}^n + \Delta t L(\mathrm{u}^n), \\
\mathrm{u}^{n,2} & = \frac{3}{4} \mathrm{u}^n + \frac{1}{4}
\mathrm{u}^{n,1} + \frac{1}{4} \Delta t L(\mathrm{u}^{n,1}),\\
\mathrm{u}^{n+1} & = \frac{1}{3} \mathrm{u}^n + \frac{2}{3}
\mathrm{u}^{n,2} + \frac{2}{3} \Delta t L( \mathrm{u}^{n,2}).
\end{split}
\end{equation}
Since the Bernstein polynomials give a dense element mass
matrix, applying $\mathrm{M}^{-1}$ efficiently will
require some care. It turns out that $\mathrm{M}$ possesses many
fascinating properties that we shall survey in Section~\ref{sec:mass}.
Among these, we will give an $\mathcal{O}(n^{d+1})$ algorithm for
applying the elementwise inverse.
DG methods yield reasonable solutions to acoustic or Maxwell's
equations without slope limiters, although most nonlinear problems will
require them to suppress oscillations. Even linear transport can
require limiting when a discrete maximum principle is
required. Limiting high-order polynomials on simplicial domains
remains quite a challenge. It may be possible to utilize properties
of the Bernstein polynomials to design new limiters or conveniently
implement existing ones.
For example, the convex hull property (i.e. that polynomials in the
Bernstein basis lie in the convex hull of their control points) gives
sufficient conditions for enforcing extremal bounds.
We will not offer further contributions in this direction, but refer
the reader to other works on higher order limiting such
as~\citep{hoteit2004new,zhu2008runge,zhu2013runge}.
\section{Bernstein-basis finite element algorithms}
\label{BBFEA}
\subsection{Notation for Bernstein polynomials}
We formulate Bernstein polynomials on the $d$-simplex
using barycentric coordinates and multiindex notation. For a
nondegenerate simplex $T \subset \mathbb{R}^{d}$ with vertices
$\{x_i\}_{i=0}^{d}$, let $\{b_i\}_{i=0}^{d}$ denote the
barycentric coordinates. Each $b_i$ affinely maps
$\mathbb{R}^d$ into $\mathbb{R}$ with $b_i(x_j) = \delta_{ij}$ for
$0 \leq i,j \leq d$. It follows that $b_i(x) \geq 0$ for all $x \in T$.
We will use common multiindex notation, denoting multiindices
with Greek letters, although we will begin the indexing with 0 rather than 1.
So,
$\alpha=(\alpha_0,\alpha_1,\dots,\alpha_{d})$ is a tuple of $d+1$
nonnegative integers. We define the \emph{order} of a multiindex $\alpha$
by $\left| \alpha \right| \equiv \sum_{i=0}^{d} \alpha_i$. We say
that $\alpha \geq \beta$ provided that the inequality
$\alpha_i \geq \beta_i$ holds componentwise for $0 \leq i \leq d$.
Factorials and binomial coefficients over multiindices have implied multiplication. That is,
\[
\alpha! \equiv \prod_{i=0}^{d} \alpha_i!
\]
and, provided that $\alpha \geq \beta$,
\[
\binom{\alpha}{\beta} = \prod_{i=0}^d \binom{\alpha_i}{\beta_i}.
\]
Without ambiguity of notation, we also define a binomial coefficient
with a whole number for the upper argument and and multiindex as lower
by
\[
\binom{n}{\alpha} = \frac{n!}{\alpha!} = \frac{n!}{\prod_{i=0}^n \alpha_i!}.
\]
We also define $e_i$ to be the multiindex consisting of zeros in all but the $i^{\mathrm{th}}$ entry, where it is one.
Let $\mathbf{b} \equiv \left( b_0 , b_2 , \dots , b_d \right) $ be
a tuple of barycentric coordinates on a simplex. For multiindex
$\alpha$, we define a \emph{barycentric monomial} by
\[
\mathbf{b}^\alpha = \prod_{i=0}^{d} b_i^\alpha.
\]
We obtain the Bernstein polynomials by scaling these by certain
binomial coefficients
\begin{equation}
B^n_\alpha = \frac{n!}{\alpha!} \mathbf{b}^\alpha.
\end{equation}
For all spatial dimensions and degrees $n$, the Bernstein
polynomials of degree $n$
\[
\left\{
B^n_{\alpha}
\right\}_{\left| \alpha \right| = n},
\]
form a nonnegative partition of unity and a basis for the vector space of
polynomials of degree $n$. They are suitable for
assembly in a $C^0$ fashion or even into smoother splines~\cite{LaiSch07}.
While DG methods do not require assembly, the geometric decomposition
does make handling the boundary terms straightforward.
Crucial to fast algorithms using the Bernstein basis, as
originally applied to $C^0$
elements~\citep{ainsworth2011bernstein,kirby2011fast}, is the
sparsity of differentiation. That is, it takes no more than $d+1$
Bernstein polynomials of degree $n-1$ to represent the derivative of a
Bernstein polynomial of degree $n$.
For some coordinate direction $s$, we use the general product rule to write
\[
\frac{\partial B^n_\alpha}{\partial s}
= \frac{\partial}{\partial s} \left( \frac{n!}{\alpha!} \mathbf{b}^\alpha \right)
= \frac{n!}{\alpha!} \sum_{i=0}^{d} \left( \alpha_i \frac{\partial b_i}{\partial s} b_i^{\alpha_i-1} \Pi_{j=0}^{d} b_i^{\alpha_i} \right),
\]
with the understanding that a term in the sum vanishes if $\alpha_i = 0$. This can readily be rewritten as
\begin{equation}
\frac{\partial B^n_\alpha}{\partial s}
= n \sum_{i=0}^{d} B_{\alpha-e_i}^{n-1} \frac{\partial b_i}{\partial s},
\end{equation}
again with the terms vanishing if any $\alpha_i = 0$,
so that the derivative of each Bernstein polynomial is a short linear combination of lower-degree Bernstein polynomials.
Iterating over spatial directions, the gradient of each Bernstein polynomial can be written as
\begin{equation}
\label{eq:berngrad}
\nabla B^n_\alpha
= n \sum_{i=0}^{d} B_{\alpha-e_i}^{n-1} \nabla b_i.
\end{equation}
Note that each $\nabla b_i$ is a fixed vector in $\mathbb{R}^n$ for a
given simplex $T$. In~\cite{kirbyssec}, we provide a data structure
called a \emph{pattern} for representing gradients as well as exterior
calculus basis functions. For implementation details, we refer the
reader back to~\cite{kirbyssec}.
The \emph{degree elevation} operator will also play a crucial role in
our algorithms. This operator expresses a B-form polynomial of degree $n-1$
as a degree $n$ polynomial in B-form. For the orthogonal and
hierarchical bases in~\cite{KarShe05}, this operation would be trivial
-- appending the requisite number of zeros in a vector, while for
Lagrange bases it is typically quite dense.
Whiel not trivial, degree elevation for Bernstein polynomials is still
efficient. Take any Bernstein polynomial and multiply it by
$\sum_{i=0}^db_i = 1$ to find
\begin{equation}
\begin{split}
B^{n-1}_\alpha & = \left( \sum_{i=0}^{d} b_i \right) B^{n-1}_\alpha
= \sum_{i=0}^d b_i B^{n-1}_\alpha \\
& = \sum_{i=0}^d \frac{(n-1)!}{\alpha!} \mathbf{b}^{\alpha+e_i}
= \sum_{i=0}^d \frac{\alpha_i+1}{n} \frac{n!}{\left(\alpha+e_i\right)!}
\mathbf{b}^{\alpha+e_i} \\
& = \sum_{i=0}^d \frac{\alpha_i+1}{n} B^n_{\alpha+e_i}.
\end{split}
\end{equation}
We could encode this operation as a $P^d_n \times P^d_{n-1}$
matrix consisting of exactly $d+1$ nonzero entries, but it can also
be applied with a simple nested loop. At any rate, we denote this
linear operator as $E^{d,n}$, where $n$ is the degree of the
resulting polynomial. We also denote
$E^{d,n_1,n_2}$ the operation that successively raises a polynomial
from degree $n_1$ into $n_2$. This is just the product of $n_2 - n_1$
(sparse) operators:
\begin{equation}
E^{d,n_1,n_2} = E^{d,n_2} \dots E^{d,n_1+1}.
\end{equation}
We have that $E^{d,n} = E^{d,n-1,n}$ as a special case.
\subsection{Stroud conical rules and the Duffy transform}
The Duffy transform~\cite{duffy1982quadrature} tensorializes the
Bernstein polynomials, so sum factorization can be used for
evaluating and integrating these polynomials with Stroud conical
quadrature. We used similar quadrature rules in our own work on
Bernstein-Vandermonde-Gauss matrices~\cite{kirby2012fast}, but the connection to
the Duffy transform and decomposition of Bernstein polynomials was
quite cleanly presented by Ainsworth \emph{et al} in~\cite{ainsworth2011bernstein}.
The Duffy transform maps any point $\mathbf{t} = (t_1,t_2,\dots,t_n)$
in the $d$-cube $[0,1]^n$ into the barycentric coordinates for a
$d$-simplex by first defining
\begin{equation}
\lambda_0 = t_1
\end{equation}
and then inductively by
\begin{equation}
\lambda_{i} = t_{i+1} \left( 1 - \sum_{j=0}^{i-1} \lambda_j \right)
\end{equation}
for $1 \leq i \leq d-1$, and then finally
\begin{equation}
\lambda_{d} = 1 - \sum_{j=0}^{d-1} \lambda_j.
\end{equation}
If a simplex $T$ has vertices $\{ \mathbf{x}_i \}_{i=0}^{d}$, then the mapping
\begin{equation}
\mathbf{x}(\mathbf{t}) = \sum_{i=0}^{d} \mathbf{x}_i \lambda_i(\mathbf{t})
\end{equation}
maps the unit $d$-cube onto $T$.
This mapping can be used to write integrals over $T$ as iterated
weighted integrals over $[0,1]^d$
\begin{equation}
\int_T f(\mathbf{x}) d\mathbf{x} = \frac{|T|}{d!} \int_0^1
dt_1 (1-t_1)^{d-1} \int_0^1 dt_2 (1-t_2)^{d-2} \dots \int_0^1
dt_t f(\mathbf{x}(t)).
\end{equation}
The \emph{Stroud conical rule}~\cite{stroud} is based on this observation
and consists of tensor products of certain Gauss-Jacobi quadrature
weights in each $t_i$ variable, where the weights are chosen to absorb
the factors of $(1-t_i)^{n-i}$. These rules play an important role in
the collapsed-coordinate framework of~\cite{KarShe05} among many other
places.
As proven in~\cite{ainsworth2011bernstein}, pulling the Bernstein
basis back to $[0,1]^d$ under the Duffy transform reveals a
tensor-like structure.
It is shown that with $B^n_i(t) = \binom{n}{i} t^i (1-t)^{n-i}$ the
one-dimensional Bernstein polynomial, that
\begin{equation}
B^n_\alpha(\mathbf{x}(\mathbf{t})) = B_{\alpha_0}^n(t_1) B_{\alpha_1}^{n-\alpha_0}(t_2) \cdots
B_{\alpha_{d-1}}^{n-\sum_{i=0}^{d-2}\alpha_i}(t_d).
\end{equation}
This is a ``ragged'' rather than true tensor product, much as the
collapsed coordinate simplicial bases~\cite{KarShe05}, but entirely
sufficient to enable sum-factored algorithms.
\subsection{Basic algorithms}
The Stroud conical rule and tensorialization of Bernstein polynomials
under the Duffy transformation lead to highly efficient algorithms for
evaluating B-form polynomials and approximating moments of functions
against sets of Bernstein polynomials.
Three algorithms based on this decomposition turns out to be fundamental for
optimal assembly and application of Bernstein-basis bilinear forms.
First, any polynomial
$u(\mathbf{x})=\sum_{|\alpha|=n} \mathrm{u}_\alpha
B^n_\alpha(\mathbf{x})$
may be evaluated at the Stroud conical points in
$\mathcal{O}(n^{d+1})$ operations. In~\cite{kirby2012fast}, this
result is presented as exploiting certain block structure in the
matrix tabulating the Bernstein polynomials at quadrature points.
In~\cite{ainsworth2011bernstein}, it is done by explicitly factoring
the sums.
Second, given some function $f(\mathbf{x})$ tabulated at the Stroud points, it
is possible to approximate the set of Bernstein moments
\[
\mu^n_\alpha(f) = \int_T f(\mathbf{x}) B^n_\alpha d\mathbf{x}
\]
for all $|\alpha|=n$
via Stroud quadrature in $\mathcal{O}(n^{d+1})$ operations.
In the the case where $f$ is constant on
$T$, we may also use the algorithm for applying a mass matrix
in~\cite{kirby2011fast} to bypass numerical integration.
Finally, it is shown in~\cite{ainsworth2011bernstein} that the moment
calculation can be adapted to the evaluation of element mass and hence
stiffness and convection matrices utilizing another remarkable
property of the Bernstein polynomials. Namely, the product of two
Bernstein polynomials of any degrees is, up to scaling, a Bernstein
polynomial of higher degree:
\begin{equation}
B_\alpha^{n_1} B_\beta^{n_2} =
\frac{\binom{\alpha+\beta}{\alpha}}{\binom{n_1+n_2}{n_1}}
B^{n_1+n_2}_{\alpha+\beta},
\end{equation}
Also, the first two algorithms described above for evaluation and
moment calculations demonstrate that $M$ may be applied to a vector
without explicitly forming its entries in only $\mathcal{O}(n^{d+1})$
entries. In~\cite{kirbyssec}, we show how to adapt these algorithms to short linear
combinations of Bernstein polynomials so that stiffness and convection
matrices require the same order of complexity as the mass.
\subsection{Application to DG methods}
As part of each explicit time stepping stage, we must evaluate
$\mathrm{M}^{-1} \mathrm{F(u)}$. Evaluating $\mathrm{F(u)}$ requires
handling the two flux terms in~\eqref{eq:dg}. To handle
\[
\left( F(u_h) , \nabla v_h \right)_T,
\]
we simply evaluate $u_h$ at the Stroud points on $T$, which requires
$\mathcal{O}(n^{d+1})$ operations. Then, evaluating $F$ at each of
these points is purely pointwise and so requires but
$\mathcal{O}(n^{d})$. Finally, the moments against gradients of
Bernstein polynomials also requires $\mathcal{O}(n^{d+1})$
operations. This term, then, is readily handled by existing Bernstein
polynomial techniques.
Second, we must address, on each interface $\gamma \in \mathcal{E}$,
\[
\langle \hat{F} \cdot n , v_h \rangle_\gamma.
\]
The numerical flux $\hat{F} \cdot n$ requires the values of $u_h$ on
each side of the interface and is evaluated pointwise at each facet
quadrature point. Because of the Bernstein polynomials'
geometric decomposition, only $P^{d-1}_n$ basis functions are nonzero
on that facet, and their traces are in fact exactly the Bernstein
polynomials on the facet. So we have to evaluate two polynomials (the
traces from each side) of
degree $n$ in $d-1$ variables at the facet Stroud points. This
requires $\mathcal{O}(n^d)$ operations. The numerical flux is
computed pointwise at the $\mathcal{O}(n^{d-1})$ points, and then the
moment integration is performed on facets for an overall cost of
$\mathcal{O}(n^d)$ for the facet flux term.
In fact, the geometric decomposition makes this term much easier to
handle optimally with Bernstein polynomials than collapsed-coordinate
bases, although though specially adapted Radau-like quadrature rules,
the boundary sums may be lifted into the volumetric
integration~\citep{tim}.
The mass matrix, on the other hand, presents a much deeper challenge
for Bernstein polynomials than for collapsed-coordinate ones. Since
it is dense with
$\mathcal{O}(n^d)$ rows and columns, a standard matrix
Cholesky decomposition requires $\mathcal{O}(n^{3d})$
operations as a startup cost, followed by a pair of triangular solves
on each solve at $\mathcal{O}(n^{2d})$ each.
For $d>1$, this complexity clearly dominates the steps
above, although an optimized Cholesky routine might very well win at
practical orders. In the next section, we turn to a careful study of
the mass matrix, deriving an algorithm of optimal complexity.
\section{The Bernstein mass matrix}
\label{sec:mass}
We begin by defining the rectangular
Bernstein mass matrix on a $d$-simplex $T$ by
\begin{equation}
M^{T,m,n}_{\alpha\beta} = \int_T B^m_{\alpha} B^n_{\beta} dx,
\end{equation}
where $m,n \geq 0$.
By a change of variables, we can write
\begin{equation}
M^{T,m,n} = M^{d,m,n} |T| d!,
\end{equation}
where $M^{d,m,n}$ is the mass matrix on the unit right simplex $S_d$ in
$d$-space and $|T|$ is the $d$-dimensional measure of $T$.
When $m=n$, we suppress the third superscript and write $M^{T,m}$ or $M^{d,m}$.
We include the more general case of a
rectangular matrix because such will appear later in our discussion of the
block structure.
This mass matrix has many beautiful properties. Besides the block-recursive structure developed in~\citep{kirby2011fast},
it is related to the Bernstein-Durrmeyer
operator~\cite{derriennic1985multivariate,farouki2003construction} of approximation theory. Via this connection, we
provide an exact characterization of its eigenvalues and
associated eigenspaces in the square case $m=n$. Finally, and most
pertinent to the case of discontinuous Galerkin methods, we describe
algorithms for solving linear systems involving the mass matrix.
Before proceeding, we recall from~\cite{kirby2011fast} that,
formulae for integrals of products of powers of barycentric
coordinates, the mass matrix formula is exactly
\begin{equation}
\label{eq:Mdef}
M^{d,m,n}_{\alpha,\beta} = \frac{m!n!\left( \alpha +
\beta\right)!}{\left(m+n+d\right)!\alpha!\beta!}
\end{equation}
\subsection{Spectrum}
The Bernstein-Durrmeyer
operator~\cite{derriennic1985multivariate} is defined on $L^2$ by
\begin{equation}
D_n(f) = \frac{\left(n+d\right)!}{n!} \sum_{|\alpha|=n} \left( f , B^n_\alpha \right).
\end{equation}
This has a structure similar to a discrete Fourier series, although
the Bernstein polynomials are orthogonal. The original Bernstein
operator~\citep{LaiSch07} has the form of a Lagrange interpolant,
although the basis is not interpolatory.
For $i\geq 1$, we let $Q_i$ denote the space of $d$-variate
polynomials of degree $i$ that are $L^2$ orthogonal to all polynomials of degree $i-1$ on the simplex.
The following result is given in~\cite{derriennic1985multivariate},
and also referenced in~\cite{farouki2003construction} to generate the
B-form of simplicial orthogonal polynomials.
\begin{theorem}[Derriennic]
For each $0 \leq i \leq n$, each
\[
\lambda_{i,n} = \frac{\left(n+d\right)! n!}{\left( n + i + d \right)!
\left( n - i \right)!}
\]
is an eigenvalue of $D_n$ corresponding to the eigenspace $Q_i$.
\end{theorem}
This gives a sequence of eigenvalues
$\lambda_{0,n} > \lambda_{1,n} > \dots > \lambda_{n,n} > 0$, each
corresponding to polynomial eigenfunctions of increasing degree.
Up to scaling, the Bernstein-Durrmeyer operator restricted to
polynomials $P_n$ exactly corresponds to the action of the mass
matrix. To see this, suppose that $P_n \ni p = \sum_{|\alpha|=n}
\mathrm{p}_\alpha B^n_\alpha$. Then
\begin{equation}
\begin{split}
\frac{n!}{\left(n+d\right)!} D_n(p) & = \sum_{|\alpha|=n} \left( p ,
B^n_\alpha \right) B^n_\alpha \\
& = \sum_{|\alpha|=n} \left( \sum_{|\beta|=n} p_\beta B^n_\beta ,
B^n_\alpha \right) B^n_\alpha \\
& = \sum_{|\alpha|=n} \sum_{|\beta|=n} p_\beta \left( B^n_\beta ,
B^n_\alpha \right) B^n_\alpha \\
& = \sum_{|\alpha|=n} \left( \sum_{|\beta|=n} M^n_{\alpha,\beta}
p_\beta \right) B^n_\alpha \\
\end{split}
\end{equation}
This shows that the coefficients of the B-form of
$\frac{n!}{\left(n+d\right)!} D_n(p)$ are just the entries of the
Bernstein mass matrix times the coefficients of $p$. Consequently,
\begin{theorem}
\label{thm:eigs}
For each $0 \leq i \leq n$, each
\[
\lambda_{i,n} = \frac{\left(n+d\right)! \left(n!\right)^2}{\left( n + i + d \right)!
\left( n - i \right)!}
\]
is an eigenvalue of $M^n$ of multiplicity of $\binom{d+i-1}{d-1}$, and
the eigenspace is spanned by the B-form of any basis for $Q_i$.
\end{theorem}
This also implies that the Bernstein mass matrices are quite
ill-conditioned in the two norm, using the characterization in terms
of extremal eigenvalues for SPD matrices.
\begin{corollary}
\label{cor:badguy}
The 2-norm condition number of $M^{d,n}$ is
\begin{equation}
\frac{\lambda_{0,n}}{\lambda_{n,n}}
= \frac{(2n+d)!}{(n+d)!n!}
\end{equation}
\end{corollary}
However, the spread in eigenvalues does not tell the whole story.
We have exactly $n+1$ distinct eigenvalues,
independent of the spatial dimension. This shows significant
clustering of eigenvalues when $d \geq 1$.
\begin{corollary}
\label{cor:wonthappen}
In exact arithmetic, unpreconditioned conjugate gradient iteration
will solve a linear system of the form $M^{d,n} x = y$ in exactly
$n+1$ iterations, independent of $d$.
\end{corollary}
If the fast matrix-vector algorithms
in~\citep{ainsworth2011bernstein,kirby2011fast}
are used to compute the matrix-vector product, this gives a total
operation count of $\mathcal{O}(n^{d+2})$. Interestingly, this ties
the per-element cost of Cholesky factorization when $d=2$, but without
the startup or storage cost. It even beats a pre-factored matrix when
$d \geq 2$, but still loses asymptotically to the cost of
evaluating $\mathrm{F(u)}$.
However, in light of the large condition number given by
Corollary~\ref{cor:badguy}, it is doubtful whether this iteration
count can be realized in actual floating point arithmetic.
The high condition number also suggests an additional source of error
beyond discretization error. Suppose that we commit an error of order
$\epsilon$ in solving $M x = y$, computing instead some $\hat{x}$ such
that $\left\| x - \hat{x} \right\| = \epsilon$ in the $\infty$ norm.
Let $u$ and $\hat{u}$ be the polynomial with B-form coefficients $x$
and $\hat{x}$, respectively. Because a polynomial in B-form lies in
the convex hull of its control points~\citep{LaiSch07}, we also know
that $u$ and $\hat{u}$ differ by at most this same $\epsilon$ in the
max-norm. Consequently, the roundoff error in mass inversion can
conceivably pollute the finite element approximation at high order,
although ten-digit accuracy, say, will still only give a maximum of $10^{-10}$
additional pointwise error in the finite element solution -- typically
well below discretization error.
\subsection{Block structure and a fast solution algorithm}
Here, we recall several facts proved in~\citep{kirby2011fast} related
to the block structure of $M^{d,m,n}$, which we will apply now for
solving square systems.
We consider partitioning the mass matrix formula~(\ref{eq:Mdef}) by
freezing the first entry in $\alpha$ and $\beta$. Since there are
$m+1$ possible values for
for $\alpha_0$ and $n+1$ for $\beta_0$, this partitions
$M^{d,m,n}$ into an $(m+1) \times (n+1)$ array, with blocks
of varying size. In fact, each block
$M^{d,m,n}_{\alpha_0,\beta_0}$ is
$P^{d-1}_{m-\alpha_0} \times P^{d-1}_{n-\beta_0}$.
These blocks are themselves, up to scaling,
Bernstein mass matrices of lower dimension. In particular, we showed that
\begin{equation}
\label{eq:Mblock}
M^{d,m,n}_{\alpha_0,\beta_0} =
\frac{\binom{m}{\alpha_0}
\binom{n}{\beta_0}}{\binom{m+n+d+1}{\alpha_0+\beta_0} \left( m + n +
d \right)}
M^{d-1,m-\alpha_0,n-\beta_0}.
\end{equation}
We introduce the $(m+1)\times (n+1)$ array consisting of the scalars
multiplying the lower-dimensional mass matrices as
\begin{equation}
\label{eq:nu}
\nu^{d,m,n}_{\alpha_0,\beta_0} =
\frac{\binom{m}{\alpha_0}
\binom{n}{\beta_0}}{\binom{m+n+d+1}{\alpha_0+\beta_0} \left( m + n +
d \right)}
\end{equation}
so that $M^{d,m,n}$ satisfies the block structure, with superscripts
on $\nu$ terms dropped for clarity
\begin{equation}
M^{d,m,n} =
\begin{pmatrix}
\nu_{0,0} M^{d-1,m,n} & \nu_{0,1} M^{d-1,m,n-1} & \dots & \nu_{0,n} M^{d-1,m,0} \\
\nu_{1,0} M^{d-1,m-1,n} & \nu_{1,1} M^{d-1,m-1,n-1} & \dots &
\nu_{1,n} M^{d-1,m-1,0} \\
\vdots & \vdots & \ddots & \vdots \\
\nu_{n,0} M^{d-1,0,n} & \nu_{n,1} M^{d-1,0,n-1} & \dots & \nu_{n,n} M^{d-1,0,0} \\
\end{pmatrix}.
\end{equation}
We partition the right-hand side and solution vectors $y$ and $x$
conformally to $M$, so that the block $y_j$ is of dimension
$P^{d-1}_{n-j}$ and corresponds to a polynomial's $B$-form
coefficients with first indices equal to $j$.
We write the linear system in an augmented block matrix as
\begin{equation}
\label{eq:aug}
\left(
\begin{array}{cccc|c}
\nu_{0,0} M^{d-1,n,n} & \nu_{0,1} M^{d-1,n,n-1} & \dots & \nu_{0,n} M^{d-1,n,0} & y_0 \\
\nu_{1,0} M^{d-1,n-1,n} & \nu_{1,1} M^{d-1,n-1,n-1} & \dots & \nu_{1,n} M^{d-1,n-1,0} &
y_1 \\
\vdots & \vdots & \ddots & \vdots \\
\nu_{n,0} M^{d-1,n-1,n} & \nu_{n,1} M^{d-1,n-1,n-1} & \dots & \nu_{n,n} M^{d-1,0,0} & y_n \\
\end{array}
\right).
\end{equation}
From~\citep{kirby2011fast}, we also know that mass matrices of the
same dimension but differing degrees are related via degree elevation
operators by
\begin{equation}
\label{eq:elTM}
M^{d,m-1,n} = \left( E^{d,m} \right)^t M^{d,m,n}.
\end{equation}
and
\begin{equation}
\label{eq:Mel}
M^{d,m,n-1} = M^{d,m,n} E^{d,n}.
\end{equation}
Iteratively, these results give
\begin{equation}
\label{eq:elevateforward}
M^{d,m-i,n} = \left( E^{d,m-i,m} \right)^T M^{d,m,n}.
\end{equation}
for $1 \leq i \leq m$ and
\begin{equation}
\label{eq:elevatebackward}
M^{d,m,n-j} = M^{d,m,n} E^{d,n-j,n}
\end{equation}
for $1 \leq j \leq n$.
In~\citep{kirby2011fast}, we used these features to provide a fast
algorithm for matrix multiplication, but here we use them to
efficiently solve linear systems.
Carrying out blockwise Gaussian elimination in~\eqref{eq:aug}, we multiply the first row,
labeled with 0 rather than 1, by
$\frac{\nu_{1,0}}{\nu_{0,0}} M^{d-1,n-1,n} \left( M^{d-1,n,n} \right)^{-1}$
and subtract from row 1 to introduce a zero
block below the diagonal. However, this simplifies, as~\eqref{eq:elTM} tells us that
\begin{equation}
\label{eq:elimMtrick}
M^{d-1,n-1,n}
\left(M^{d-1,n,n} \right)^{-1}
= \left( E^{d-1,n} \right)^t M^{d-1,n,n} \left(M^{d-1,n,n}
\right)^{-1}
= \left( E^{d-1,n} \right)^t.
\end{equation}
Because of this, along row 1 for $j \geq 1$, the elimination step gives entries of the form
\[
\nu_{1j} M^{d-1,n-1,n-j} - \frac{\nu_{10}\nu_{0j}}{\nu_{00}} \left( E^{d-1,n} \right)^t M^{d-1,n,n-j},
\]
but~\eqref{eq:elTM} renders this as simply
\begin{equation}
\nu_{1j} M^{d-1,n-1,n-j} - \frac{\nu_{10}\nu_{0j}}{\nu_{00}} M^{d-1,n-1,n-j}
= \left( \nu_{1j} - \frac{\nu_{10}\nu_{0j}}{\nu_{00}} \right) M^{d-1,n-1,n-j}.
\end{equation}
That is, the row obtained by block Gaussian elimination is the same as
one would obtain simply by performing a step of Gaussian elimination on the
matrix of coefficients $N^{d,n}$ containing the $\nu$ values above, as the
matrices those coefficients scale do not change under the row
operations. Hence, performing elimination on the $(n+1)\times(n+1)$
matrix, independent of the dimension $d$, forms a critical step in the
elimination process. After the block upper triangularization, we arrive at
a system of the form
\begin{equation}
\label{eq:augut}
\left(
\begin{array}{cccc|c}
\widetilde{\nu}_{0,0} M^{d-1,n,n} & \widetilde{\nu}_{0,1} M^{d-1,n,n-1} & \dots & \widetilde{\nu}_{0,n} M^{d-1,n,0} & \widetilde{y}_0 \\
0 & \widetilde{\nu}_{1,1} M^{d-1,n-1,n-1} & \dots & \widetilde{\nu}_{1,n} M^{d-1,n-1,0} &
\widetilde{y}_1 \\
\vdots & \vdots & \ddots & \vdots \\
0 & 0 & \dots & \widetilde{\nu}_{n,n} M^{d-1,0,0} & \widetilde{y}_n \\
\end{array}
\right),
\end{equation}
where the tildes denote that quantities updated through elimination.
The backward substition proceeds along similar lines, though it
requires the solution of linear systems with mass matrices in
dimension $d-1$. Multiplying through each block row by
$\frac{1}{\widetilde{\nu}_{i,i}} (M^{d-1,n-i})^{-1}$ then gives, using~\eqref{eq:elevatebackward}
\begin{equation}
\label{eq:auguut}
\left(
\begin{array}{cccc|c}
I & \widetilde{\nu}^\prime_{0,1} E^{d-1,n-1,n} & \dots
& \widetilde{\nu}^\prime_{0,n} E^{d-1,0,n} & \widetilde{y}^\prime_0 \\
0 & I & \dots & \widetilde{\nu}^\prime_{1,n} E^{d-1,0,n-1} &
\widetilde{y}^\prime_1 \\
\vdots & \vdots & \ddots & \vdots \\
0 & 0 & \dots & I & \widetilde{y}^\prime_n \\
\end{array}
\right),
\end{equation}
where the primes denote quantities updated in the process. We reflect
this in the updated $N$ matrix by scaling each row by its diagonal
entry as we proceed.
At this point, the last block of the solution is revealed,
and can be successively elevated, scaled, and subtracted from the right-hand
side to eliminate it from previous blocks. This reveals the next-to last
block, and so-on. We summarize this discussion in
Algorithm~\ref{alg:elim}.
\begin{algorithm}
\label{alg:elim}
\caption{Block-wise Gaussian elimination for solving $M^{d,n} x = y$}
\begin{algorithmic}
\REQUIRE Input vector $y$
\ENSURE On output, $y$ is overwritten with $(M^{d,n})^{-1} y$
\STATE Initialize coefficient matrix $N_{a,b} := \frac{\binom{n}{a}\binom{n}{b}}{\binom{2n+d+1}{a+b}\left(2n+d\right)}$
\FOR[Forward elimination]{$a := 0$ \TO $n$}
\STATE $z \gets y_a$
\FOR{$b := a+1$ \TO $n$}
\STATE $z \gets (E^{n-1,d-b+1})^T z$
\STATE $y_b \gets y_b - \frac{N_{b,a}}{N_{a,a}} z$
\FOR[Elimination on $N$]{$c := a$ \TO $n$}
\STATE $N_{b,c} \gets N_{b,c} - \frac{N_{b,a}N_{a,c}}{N_{a,a}}$
\ENDFOR
\ENDFOR
\ENDFOR
\FOR[Lower-dimensional inversion]{$a := 0$ \TO $n$}
\STATE $y_a \gets \frac{1}{N_{a,a}} \left( M^{d-1,n-a,n-a}
\right)^{-1} y_a$
\FOR{$b:= a$ \TO $n$}
\STATE $N_{b,a} \gets \frac{N_{b,a}}{N_{a,a}}$
\ENDFOR
\ENDFOR
\FOR[Backward elimination]{$a:= n$ \TO $0$}
\STATE $z \gets y_a$
\FOR{$b:= a-1$ \TO $0$}
\STATE $z = E^{d-1,n-b} z$
\STATE $y_b \gets y_b - N_{b,a} z$
\ENDFOR
\ENDFOR
\end{algorithmic}
\end{algorithm}
Since we will need to solve many linear systems with the same element
mass matrix, it makes sense to extend our elimination algorithm into
a reusable factorization. We will derive a blockwise $L D L^T$
factorization of the element matrix, very much along the lines of the
standard factorizatin~\cite{strang}.
Let $N^{d,n}$ be the matrix of coefficients given in~\eqref{eq:nu}.
Suppose that we have its $LDL^T$ factorization
\begin{equation}
N^{d,n} = L_N^{d,n} D_N^{d,n} \left( L_N^{d,n} \right)^t,
\end{equation}
with
$\ell_{ij}$ and $d_{ii}$ the entries of $L_N^{d,n}$ and $D_N^{d,n}$, respectively.
We also define $U_N^{1,n} = D_N^{1,n} \left( L_N^{1,n} \right)^t$ with
$u_{ij} = d_{ii} \ell_{ji}$
Then, we can use the block matrix
\[
\widetilde{L}^{0}
=
\begin{pmatrix}
I & 0 & \dots & 0 \\
-\ell_{10} \left( E^{d-1,n-1,n} \right)^T & I & \dots & 0 \\
-\ell_{20} \left( E^{d-1,n-2,n} \right)^T & 0 & \dots & 0 \\
\vdots & \vdots & \ddots & \vdots \\
-\ell_{n0} \left( E^{d-1,0,n} \right)^T & 0 & \dots & I
\end{pmatrix}
\]
to act on $M^{d,n}$ to produce zeros below the diagonal in the first
block of columns. Similarly, we act on $\widetilde{L}^0 M^{d,n}$ with
\[
\widetilde{L}^1
= \begin{pmatrix}
I & 0 &\dots & 0 \\
0 & I & \dots & 0 \\
0 & -\ell_{21} \left( E^{d-1,n-2,n-1} \right)^T & \dots & 0 \\
\vdots & \vdots & \ddots & \vdots \\
0 & -\ell_{n1} \left( E^{d-1,0,n-1} \right)^T & \dots & I
\end{pmatrix}
\]
to introduce zeros below the diagonal in the second block of columns.
Indeed, we have a sequence of block matrices $\widetilde{E}^{k}$ for
$0 \leq k < n$ such that $\widetilde{L}^k_{ij}$ is
$P^{d-1}_{n-i} \times P^{d-1}_{n-j}$ with
\[
\widetilde{L}^k_{ij} =
\begin{cases}
I & \text{for $i = j$} \\
0 & \text{for $i \neq j$ and $j \neq k$} \\
0 & \text{for $i < j$ and $j = k$} \\
-\ell_{ij} \left( E^{d-1,n-i,n-j} \right)^T & \text{for $i > j$ and
$j=k$} \\
\end{cases}
\]
Then, in fact, we have that
\[
\widetilde{L}^{n-1} \widetilde{L}^{n-2} \dots
\widetilde{L}^0 M^{d,n}
=
\begin{pmatrix}
u_{00} M^{d-1,n,n} & u_{01} M^{d-1,n,n-1} & \dots & u_{0n} M^{d-1,n,0}
\\
0 & u_{11} M^{d-1,n-1,n-1} & \dots & u_{1n} M^{d-1,n-1,0} \\
\vdots & \vdots & \ddots & \vdots \\
0 & 0 & \dots & u_{nn} M^{d-1,0,0}
\end{pmatrix}
\]
Much as with elementary row matrices for classic $LU$ factorization,
we can invert each of these
$\widetilde{L}^k$ matrices simply by flipping the sign of the
multiplier, so that
\[
\left( \widetilde{L}^k \right)^{-1}_{ij} =
\begin{cases}
I & \text{for $i = j$} \\
0 & \text{for $i \neq j$ and $j \neq k$} \\
0 & \text{for $i < j$ and $j = k$} \\
\ell_{ij} \left( E^{d-1,n-i,n-j} \right)^T & \text{for $i > j$ and
$j=k$} \\
\end{cases}.
\]
Then, we define $L^{d,n}$ to be the inverse of these products
\begin{equation}
\label{eq:Ldn}
L^{d,n} =
\left(
\widetilde{L}^{n-1} \widetilde{L}^{n-2} \dots \widetilde{L}^0
\right)^{-1}
= \left( \widetilde{L}^0 \right)^{-1}
\left( \widetilde{L}^1 \right)^{-1}
\dots
\left( \widetilde{L}^{n-1} \right)^{-1}
\end{equation}
so that $\left( L^{d,n} \right)^{-1} M^{d,n} \equiv U^{d,n}$ is block
upper triangular. Like standard factorization, we can
also multiply the elimination matrices together so that
\begin{equation}
\label{eq:Ldinv}
\left( L^{d,n} \right)^{-1}
=
\begin{pmatrix}
I & 0 &\dots & 0 \\
-\ell_{10} \left(E^{d-1,n-1,n}\right)^t & I & \dots & 0 \\
-\ell_{20} \left(E^{d-1,n-2,n}\right)^t & -\ell_{21} \left(E^{d-1,n-2,n-1}\right)^t & \dots & 0 \\
\vdots & \vdots & \ddots & \vdots \\
-\ell_{n0} \left(E^{d-1,0,n}\right)^t & -\ell_{n1} \left(E^{d-1,0,n-1}\right)^t & \dots & I
\end{pmatrix}.
\end{equation}
Moreover, we can turn the block upper triangular matrix into a block
diagonal one times the transpose of $L^{d,n}$ giving a kind of block
$LDL^T$ factorization. We factor out the pivot blocks from
each row, using~\eqref{eq:elimMtrick} so that
\[
U^{d,n} =
\begin{pmatrix}
d_{00} M^{d-1,n,n} & 0 & \dots & 0 \\
0 & d_{11} M^{d-1,n-1,n-1} & \dots & 0 \\
\vdots & \vdots & \ddots & \vdots \\
0 & 0 & \dots & d_{nn} M^{d-1,0,0}
\end{pmatrix}
\begin{pmatrix}
I & \ell_{10} E^{d-1,n-1,n} & \dots & \ell_{n0} E^{d-1,0,n} \\
0 & I & \dots & \ell_{n1} E^{d-1,0,n} \\
\vdots & \vdots & \ddots & \vdots \\
0 & 0 & \dots & I
\end{pmatrix}.
\]
The factor on the right is just $\left( L^{d,n} \right)^T$.
We introduce the block-diagonal matrix $\Delta^{d,n}$ by
\begin{equation}
\label{eq:Delta}
\Delta_{ii} = d_{ii} M^{d-1,n-i}.
\end{equation}
Our discussion has established:
\begin{theorem}
\label{thm:ldlt}
The Bernstein mass matrix $M^{d,n}$ admits the block factorization
\begin{equation}
M^{d,n} =
L^{d,n} \Delta^{d,n} \left( L^{d,n} \right)^T.
\end{equation}
\end{theorem}
We can apply the decomposition inductively down spatial dimension, so
that each of
the blocks in $\Delta^{d,n}$ can be also factored according to
Theorem~\ref{thm:ldlt}. This fully expresses any mass matrix as a
diagonal matrix sandwiched in between sequences of sparse unit
triangular matrices.
So, computing the $LDL^T$ factorization of $M^{d,n}$ requires
computing the $LDL^T$ factorization of the one-dimensional coefficient
matrix $N^{d,n}$. Supposing we use standard direct method such as
Cholesky factorization to solve
the one-dimensional mass matrices in the base case, we will have a
start-up cost of factoring $n+1$ matrices of size no larger than
$n+1$. With Cholesky, this is a $\mathcal{O}(n^4)$ process, although
since the one-dimensional matrices factor into
into Hankel matrices pre- and post-multiplied by diagonal matrices,
one could use Levinson's or Bareiss'
algorithm~\cite{bareiss1969numerical,levinson1947wiener} to obtain a
merely $\mathcal{O}(n^3)$ startup phase.
\begin{algorithm}
\label{alg:ldlt}
\caption{Mass inversion via block-recursive $LDL^T$ factorization
for $d \geq 2$}
\begin{algorithmic}
\REQUIRE $N^{d,n}$ factored as $N^{d,n} = L D L^T$
\REQUIRE Input vector $y$
\ENSURE On output, $x = (M^{d,n})^{-1} y$
\STATE Initialize vector $x \gets 0$
\FOR[Apply $(L^{d,n})^{-1}$ to $y$, store in $x$]{$a := 0$ \TO $n$}
\STATE $z \gets y_a$
\FOR{$b := a+1$ \TO $n$}
\STATE $z \gets \left( E^{d-1,n-b+1} \right)^t$
\STATE $x_b \gets x_b - L_{b,a} z$
\ENDFOR
\ENDFOR
\FOR[Overwrite $x$ with $(\Delta^{d,n})^{-1} x$]{$a := 0$ \TO $n$}
\STATE $x_a \gets \frac{1}{D_{a,a}} M^{d,n-a} x_a$
\ENDFOR
\FOR[Overwrite $x$ with $(L^{n,d})^{-T}x$]{$a := n$ \TO $0$}
\STATE $z \gets x_a$
\FOR {$b := a-1$ \TO $0$}
\STATE $z \gets E^{d-1,n-b} z$
\STATE $x_b \gets x_b - L_{b, a} z$
\ENDFOR
\ENDFOR
\end{algorithmic}
\end{algorithm}
Now, we also consider the cost of solving a linear system using the
block factorization, pseudocode for which is presented in Algorithm~\ref{alg:ldlt}. In two dimensions, one must apply the inverse of
$L^{2,n}$, followed by the inverse of $\Delta^{2,n}$, accomplished by
triangular solves using pre-factored one-dimensional mass matrices,
and the inverse of $(L^{2,n})^T$. In fact, the action of applying
$(L^{2,n})^{-1}$ requires exactly the same process as described above
for block Gaussian elimination, except the arithmetic on the $\nu$
values is handled in preprocessing.
That is, for each block $y_j$, we will need to compute
$\ell_{ij} (E^{1,j-i,j})^T y_j$ for $1 \leq i \leq n-j-1$ and
accumulate scalings of these vectors into corresponding blocks of the
result. Since these elevations are needed for each $i$, it is
helpful to reuse these results. Applying
$(L^{2,n})^{-1}$ then requires applying
$E^{1,i-j}$ for all valid $i$ and $j$, together with all of the
axpy operations. Since the one-dimensional elevation into degree $i$
has $2(i+1)$ nonzeros in it, the required elevations required cost
\begin{equation}
\label{eq:elev2count}
\sum_{i=1}^n \sum_{j=1}^{i-1} 2(j+1) = \frac{n(n^2+3n-4)}{3},
\end{equation}
operations, which is $\mathcal{O}(n^3)$,
and we also have a comparable number of operations for the axpy-like
operations to accumulate the result. A similar discussion shows that
applying $(L^{2,n})^{-T}$ requires the same number of operations.
Between these stages, one must invert the lower-dimensional mass
matrices using the pre-computed Cholesky factorizations and perform
the scalings to apply $\Delta^{-1}$. Since a
pair of $m \times m$ triangular solves costs $m(m+1)$
operations, the total cost of the one-dimensional mass inversions is
\[
\sum_{i=0}^{n} (i+1)(i+2) = \frac{(n+1)(n+2)(n+3)}{3},
\]
together with the lower-order term for scalings
\[
\sum_{i=0}^{n} P^1_i = \sum_{i=0}^{n} (i+1) = \frac{(n+1)(n+2)}{2}.
\]
So, the whole three-stage process is $\mathcal{O}(n^3)$ per element.
In dimension $d > 2$, we may proceed inductively in space dimension to
show that Algorithm~\ref{alg:ldlt} requires, after start-up,
$\mathcal{O}(n^{d+1})$ operations. The application of $\Delta^{-1}$
will always require $n+1$ inversions of $(d-1)$-dimensional mass
matrices , each of which costs $\mathcal{O}(n^{d})$ operations by the
induction hypothesis. Inverting $\Delta^{d,n}$ onto a vector will cost
$\mathcal{O}(n^{d+1})$ operations for all $n$ and $d$. To see that a
similar complexity holds for applying the inverses of $L^{d,n}$ and
its transpose, one can simply replace the summand
in~\eqref{eq:elev2count} with $2 P^{d-1}_j$ and execute the sum. To
conclude,
\begin{theorem}
Algorithm~\ref{alg:ldlt} applies the inverse of $M^{d,n}$ to an
arbitrary vector in $\mathcal{O}(n^{d+1})$ operations.
\end{theorem}
\section{Numerical results}
\subsection{Mass inversion}
Because of Corollary~\ref{cor:badguy}, we must pay special attention
to the accuracy with which linear systems involving the mass matrix
are computed. We began with Cholesky decomposition as a baseline.
For degrees one through twenty in one, two, and three space dimension,
we explicitly formed the reference mass matrix in Python and used the
\texttt{scipy}~\citep{jones2001scipy} interface LAPACK to form
the Cholesky decomposition. Then, we chose several
random vectors to be sample solutions and formed the right-hand side by
direct matrix-vector multiplication. In Figure~\ref{fig:cholacc}, we
plot the relative accuracy of a function of degree in each space
dimension. Although we observe expontial growth in the error (fully
expected in light of Corollary~\ref{cor:badguy}), we see that we still
obtain at least ten digits of relative accuracy up to degree ten.
\begin{figure}
\caption{Relative accuracy of solving linear systems with mass
matrices of various degrees using Cholesky decomposition.}
\label{fig:cholacc}
\end{figure}
Second, we also attempt to solve the linear system using conjugate
gradients. We again used systems with random solution,
and both letting CG run to a relative residual tolerance of
$10^{-12}$ and also stopping after $n+1$ iterations in light of
Corollary~\ref{cor:wonthappen}. We display the results of a fixed
tolerance in Figure~\ref{fig:cgtol}. Figure~\ref{acc},
shows the actual accuracy obtained for each polynomial degree and
Figure~\ref{its} gives the actual iteration count required. Like
Cholesky factorization, this approach gives nearly ten-digit accuracy
up to degree ten polynomials. On the other hand,
Figure~\ref{fig:cgits} shows that accuracy degrades markedly when
only $n+1$ iterations are used.
Finally, our block algorithm gives accuracy comparable to that of
Cholesky factorization. Our two-dimensional implementation of
Algorithm~\ref{alg:ldlt} uses Cholesky factorizations of the
one-dimensional mass matrices. Rather than full recursion, our
three-dimensional implementation uses Cholesky factorization of the
two-dimensional matrices. At any rate, Figure~\ref{fig:blockacc}
shows, when compared to Figure~\ref{fig:cholacc}, that we lose very
little additional accuracy over Cholesky factorization. Whether
replacing the one-dimensional solver with a specialized method
for totally positive matrices~\cite{koev2007accurate} would also give
high accuracy for the higher-dimensional problems will be the subject
of future investigation.
\begin{figure}
\caption{
Accuracy obtained solving mass matrix system using conjugate gradient
iteration in one, two, and three space dimensions.
}
\label{acc}
\label{its}
\label{fig:cgtol}
\end{figure}
\begin{figure}
\caption{Relative accuracy of solving $M^{d,n}
\label{fig:cgits}
\end{figure}
\begin{figure}
\caption{Relative accuracy of solving linear systems with mass
matrices of various degrees using one level of the block
algorithm with Cholesky factorization for lower-dimensional
matrices.}
\label{fig:blockacc}
\end{figure}
\subsection{Timing for first-order acoustics}
We fixed a $32 \times 32$ square mesh subdivided into right triangles
and computed the time to perform the DG function evaluation (including
mass matrix inversion) at various polynomial degrees. We used the
mesh from DOLFIN~\citep{dolfin} and wrote the Bernstein polynomial algorithms
in Cython~\citep{behnelcython}.
With an $\mathcal{O}(n^3)$ complexity for two-dimensional problems, we
expect a doubling of the polynomial degree to produce an eightfold
increase in run-time. In Figure~\ref{fig:2dtimings}, though, we see
even better results. In fact, a least-squares fit of the log-log data
in this table from degrees five to fifteen gives a very near fit with
a slope of less than two (about 1.7) rather than three. Since small
calculations tend to run at lower flop rates, it is possible that we
are far from the asymptotical regime predicted by our operation counts.
\begin{figure}
\caption{Timing of a DG function evaluation for various polynomial
degrees on a $32 \times 32$ mesh.}
\label{fig:2dtimings}
\end{figure}
\section{Conclusions and Future Work}
Bernstein polynomials admit optimal-complexity algorithms for
discontinuous Galerkin methods for conservation laws. The dense
element mass matrices might, at first blush, seem to prevent this,
but their dimensionally recursive block structure and other interesting properties, lead to an efficient blockwise factorzation. Despite the large
condition numbers, our current algorithms seem sufficient to deliver
reasonable accuracy at moderate polynomial orders.
On the other hand, these results still leave much room for future
investigation. First, it makes sense to explore the possibilities of
slope limiting in the Bernstein basis. Second, while our
mass inversion algorithm is sufficient for moderate order, it may be
possible to construct a different algorithm that maintains the low
complexity while giving higher relative accuracy, enabling very high
approximation orders. Perhaps such algorithms will either utilize the techniques in~\citep{koev2007accurate} internally, or else extend them somehow.
Finally, our new algorithm, while of optimal compexity, is quite
intricate to implement and still is not well-tuned for high
performance. Finding ways to make these algorithms more performant
will have important practical benefits.
\end{document}
|
\begin{document}
\thispagestyle{plain}
\centerline{\Large \bf AN APPLICATION OF {\boldmath $\lambda$}-METHOD}
\centerline{\Large \bf ON INEQUALITIES OF SHAFER-FINK'S TYPE}
\footnotetext{Research partially supported by the MNTRS, Serbia, Grant No. 144020.}
\footnotetext{Keywords: Inverse sine; Upper and lower bounds; Shafer-Fink type inequality.}
\vspace*{6.00 mm}
\centerline{\large \it Branko J. Male\v sevi\' c}
\vspace*{4.00 mm}
\begin{center}
\parbox{25.0cc}{\scriptsize \textbf{Abstract. In this article $\lambda$-method of Mitrinovi\' c-Vasi\' c
\cite{MitrinovicVasic70} is applied to improve the upper bound for the arc$\,$sin function of L.~Zhu \cite{Zhu05}.}}
\end{center}
\noindent
\section{\Large \bf \boldmath \hspace*{-7.0 mm}
1. Inequalities of \textbf{\textsc{Shafer}}-\textbf{\textsc{Fink}}'s type}
\noindent
{\rm D.$\,$S. Mitrinovi\' c} in \cite{MitrinovicVasic70} considered the lower bound
of the arc$\,$sin function, which belongs to {\rm R.$\,$E. Shafer}. Namely,
the following statement is true.
\begin{th}
For $0 \leq x \leq 1$ the following inequalities are true$:$
\begin{equation}
\displaystyle\frac{3x}{2 + \sqrt{1-x^2}}
\leq
\displaystyle\frac{6(\sqrt{1+x} - \sqrt{1-x})}{4 + \sqrt{1+x} + \sqrt{1-x}}
\leq
\mbox{\rm arc$\,$sin} \, x \, .
\end{equation}
\end{th}
\noindent
{\rm A.$\,$M. Fink} proved the following statement in \cite{Fink95} .
\begin{th}
For $0 \leq x \leq 1$ the following inequalities are true$:$
\begin{equation}
\label{Ineq_Fink95}
\displaystyle\frac{3x}{2 + \sqrt{1-x^2}}
\leq
\mbox{\rm arc$\,$sin} \, x
\leq
\displaystyle\frac{\pi x}{2 + \sqrt{1-x^2}}.
\end{equation}
\end{th}
\noindent
{\rm B.$\,$J. Male\v sevi\' c} proved the following statement in \cite{Malesevic97}.
\begin{th}
For $0 \leq x \leq 1$ the following inequalities are true$:$
\begin{equation}
\label{Ineq_Malesevic97}
\displaystyle\frac{3x}{2 + \sqrt{1-x^2}}
\leq
\mbox{\rm arc$\,$sin} \, x
\leq
\displaystyle\frac{\mbox{\small $\displaystyle\frac{\pi}{\pi-2}$} x}{
\mbox{\small $\displaystyle\frac{2}{\pi-2}$} + \sqrt{1-x^2}}
\leq
\displaystyle\frac{\pi x}{2 + \sqrt{1-x^2}}.
\end{equation}
\end{th}
\noindent
The main result of the article \cite{Malesevic97} can be formulated with the next statement.
\begin{pr}
\label{Prop_01} In the family of the functions$:$
\begin{equation}
\label{Prop_Family_01}
f_{b}(x) = \displaystyle\frac{(b+1)x}{b + \sqrt{1-x^2}}
\quad (0 \leq x \leq 1),
\end{equation}
according to the parameter $b > 0$, the function $f_{2}(x)$ is the greatest lower bound of the
$\mbox{arc$\,$sin}\,x$ function and the function $f_{2/(\pi-2)}(x)$ is the least upper bound of the
$\mbox{arc$\,$sin}\,x$ function.
\end{pr}
\noindent
{\rm L. Zhu} proved the following statement in \cite{Zhu05}.
\begin{th}
For $x \in [0,1]$ the following inequalities are true$:$
\begin{equation}
\label{Ineq_Zhu05}
\begin{array}{rcl}
\displaystyle\frac{3x}{2 + \sqrt{1-x^2}}
&\!\!\!\leq\!\!\!&
\displaystyle\frac{6(\sqrt{1+x} - \sqrt{1-x})}{4 + \sqrt{1+x} + \sqrt{1-x}}
\,\leq\,
\mbox{\rm arc$\,$sin} \, x \\[3.0 ex]
&\!\!\!\leq\!\!\!&
\displaystyle\frac{\mbox{\small $\pi(\sqrt{2}+\displaystyle\frac{1}{2})$}
(\sqrt{1+x} - \sqrt{1-x})}{4 + \sqrt{1+x} + \sqrt{1-x}}
\,\leq\,
\displaystyle\frac{\pi x}{2 + \sqrt{1-x^2}}.
\end{array}
\end{equation}
\end{th}
\noindent
In this article we further improve the upper bound of the arc$\,$sin function. Namely, in the next
section we will give proof of the following theorem:
\begin{th}
\label{Th_second}
For $x \in [0,1]$ the following inequalities are true$:$
\begin{equation}
\label{Ineq_Malesevic06}
\begin{array}{rcl}
\displaystyle\frac{3x}{2 + \sqrt{1-x^2}}
&\!\!\!\leq\!\!\!&
\displaystyle\frac{6(\sqrt{1+x} - \sqrt{1-x})}{4 + \sqrt{1+x} + \sqrt{1-x}}
\,\leq\,
\mbox{\rm arc$\,$sin} \, x \\[3.0 ex]
&\!\!\!\leq\!\!\!&
\displaystyle\frac{\mbox{\small $\displaystyle\frac{\pi(2-\sqrt{2})}{\pi-2\sqrt{2}}$}( \sqrt{1+x} - \sqrt{1-x} )}{
\mbox{\small $\displaystyle\frac{\sqrt{2}(4-\pi)}{\pi-2\sqrt{2}}$} + \sqrt{1+x} + \sqrt{1-x}}
\\[5.0 ex]
&\!\!\!\leq\!\!\!&
\displaystyle\frac{\mbox{\small $\pi(\sqrt{2}+\displaystyle\frac{1}{2})$}
(\sqrt{1+x} - \sqrt{1-x})}{4 + \sqrt{1+x} + \sqrt{1-x}}
\,\leq\,
\displaystyle\frac{\pi x}{2 + \sqrt{1-x^2}}.
\end{array}
\end{equation}
\end{th}
\begin{re}
Using numerical method from {\rm \cite{Malesevic06c}} we have the following conclusions$:$
\noindent
{\boldmath $1^{0}.$} For values $x \in (0,0.387 \, 266 \, 274 \ldots)$ the following inequality is true$:$
\begin{equation}
\mbox{\rm arc$\,$sin}\,x < \displaystyle\frac{\mbox{\small
$\displaystyle\frac{\pi}{\pi-2}$} x}{ \mbox{\small
$\displaystyle\frac{2}{\pi-2}$} + \sqrt{1-x^2}} <
\displaystyle\frac{\mbox{\small
$\pi(\sqrt{2}+\displaystyle\frac{1}{2})$} (\sqrt{1+x} -
\sqrt{1-x})}{4 + \sqrt{1+x} + \sqrt{1-x}},
\end{equation}
and for values $x \in (0.387 \, 266 \, 274 \ldots,1)$ the following inequality is true$:$
\begin{equation}
\mbox{\rm arc$\,$sin}\,x < \displaystyle\frac{\mbox{\small
$\pi(\sqrt{2}+\displaystyle\frac{1}{2})$} (\sqrt{1+x} -
\sqrt{1-x})}{4 + \sqrt{1+x} + \sqrt{1-x}} <
\displaystyle\frac{\mbox{\small $\displaystyle\frac{\pi}{\pi-2}$}
x}{ \mbox{\small $\displaystyle\frac{2}{\pi-2}$} + \sqrt{1-x^2}}.
\end{equation}
Numerically determined constant $c = 0.387 \, 266 \, 274 \ldots$ is the unique number where
the previous bounds have the same values over $(0,1)$.
\noindent {\boldmath $2^{0}.$} For values $x \in (0, 1)$ the following inequality is true$:$
\begin{equation}
\mbox{\rm arc$\,$sin}\,x < \displaystyle\frac{\mbox{\small
$\displaystyle\frac{\pi(2-\sqrt{2})}{\pi-2\sqrt{2}}$}( \sqrt{1+x} -
\sqrt{1-x} )}{ \mbox{\small
$\displaystyle\frac{\sqrt{2}(4-\pi)}{\pi-2\sqrt{2}}$} + \sqrt{1+x} +
\sqrt{1-x}} < \displaystyle\frac{\mbox{\small
$\displaystyle\frac{\pi}{\pi-2}$} x}{ \mbox{\small
$\displaystyle\frac{2}{\pi-2}$} + \sqrt{1-x^2}}.
\end{equation}
\end{re}
\break
\noindent
\section{\Large \bf \boldmath \hspace*{-7.0 mm}
2. The main results}
\noindent
In this article, using $\lambda$-method of Mitrinovi\' c-Vasi\' c we give an analogous statement
to Proposition \ref{Prop_01}. Let us notice that from inequality given by {\rm L. Zhu} \cite{Zhu05}:
\begin{equation}
\displaystyle\frac{6(\sqrt{1+x} - \sqrt{1-x})}{4 + \sqrt{1+x} +
\sqrt{1-x}} \,\leq\, \mbox{\rm arc$\,$sin} \, x \,\leq\,
\displaystyle\frac{\mbox{\small
$\pi(\sqrt{2}+\displaystyle\frac{1}{2})$} (\sqrt{1+x} -
\sqrt{1-x})}{4 + \sqrt{1+x} + \sqrt{1-x}},
\end{equation}
for $x \in [0,1]$, we can conclude that the function $\varphi(x) = \mbox{arc$\,$sin}\,x$ has a lower bound
and upper bound in the family of the functions:
\begin{equation}
\Phi_{\alpha,\beta}(x)
=
\displaystyle\frac{\alpha(\sqrt{1+x} - \sqrt{1-x})}{\beta + \sqrt{1+x} + \sqrt{1-x}}
\quad (0 \leq x \leq 1),
\end{equation}
for some values of parameters $\alpha, \beta > 0$. Next for $x = 0$ it is true that $\Phi_{\alpha,\beta}(0) =0$,
for $\alpha, \beta > 0$. On the other hand, for values $x \in (0, 1]$ it is true:
\begin{equation}
\label{Ineq_Comp_1}
\Phi_{\alpha_{1},\beta_{1}}(x) >
\Phi_{\alpha_{2},\beta_{2}}(x) \Longleftrightarrow \alpha_{1}
\beta_{2} - \alpha_{2} \beta_{1}
>
(\alpha_{2} - \alpha_{1}) (\sqrt{1\!+\!x} + \sqrt{1\!-\!x}),
\end{equation}
for $\alpha_{1,2}, \beta_{1,2} > 0$. Let us apply $\lambda$-method of Mitrinovi\' c-Vasi\' c on the considered
two-parameters family $\Phi_{\alpha,\beta}(x)$ in order to determine the bounds of the function $\varphi(x)$
under the following conditions:
\begin{equation}
\label{Cond_Ph}
\Phi_{\alpha,\beta}(0) = \varphi(0)
\quad \mbox{and} \quad
\displaystyle\frac{d}{dx} \Phi_{\alpha,\beta}(0) = \displaystyle\frac{d}{dx} \varphi(0).
\end{equation}
It follows that $\alpha = \beta + 2$. In that way we get one-parameter subfamily:
\begin{equation}
\label{Family_b}
f_{\beta}(x) = \Phi_{\beta+2,\beta}(x) =
\displaystyle\frac{(\beta+2)(\sqrt{1+x} - \sqrt{1-x})}{\beta +
\sqrt{1+x} + \sqrt{1-x}} \qquad (0 \leq x \leq 1),
\end{equation}
according to the parameter $\beta > 0$. For that family the condition (\ref{Cond_Ph}) is true:
\begin{equation}
\label{Cond_f1}
f_{\beta}(0) = \varphi(0)
\;\; \mbox{and} \;\;
\displaystyle\frac{d}{dx} f_{\beta}(0) = \displaystyle\frac{d}{dx} \varphi(0).
\end{equation}
Additionally, we have:
\begin{equation}
\label{Cond_f2}
\displaystyle\frac{d^{2}}{dx^{2}} f_{\beta}(0)
=
\displaystyle\frac{d^{2}}{dx^{2}} \varphi(0)
\;\; \mbox{and} \;\;
\displaystyle\frac{d^{3}}{dx^{3}} f_{\beta}(0)
=
\displaystyle\frac{d^{3}}{dx^{3}} \varphi(0) + \displaystyle\frac{4 \!-\! \beta}{4{\big (}2 \!+\! \beta{\big )}}
\end{equation}
and
\begin{equation}
\label{Cond_f3}
\;\;
\displaystyle\frac{d^{4}}{dx^{4}} f_{\beta}(0)
=
\displaystyle\frac{d^{4}}{dx^{4}} \varphi(0)
\;\; \mbox{and} \;\;
\displaystyle\frac{d^{5}}{dx^{5}} f_{\beta}(0)
=
\displaystyle\frac{d^{5}}{dx^{5}} \varphi(0) + \displaystyle\frac{3{\big (}128 \!+\! 18 \beta \!-\! 13\beta^2{\big )}}{
16 {\big (}2 \!+\! \beta{\big )}^2}.
\end{equation}
Let us notice that for the family of the functions $f_{\beta}(x)$, on the basis of (\ref{Ineq_Comp_1}),
for values $x \in (0,1]$ the following equivalence is true:
\begin{equation}
\label{Ineq_Comp_2} f_{\beta_{1}}(x) > f_{\beta_{2}}(x)
\Longleftrightarrow \beta_{1} < \beta_{2},
\end{equation}
for $\beta_{1,2} > 0$. Let us emphasize that there is a better upper bound $f_{b_{1}}(x)$
than upper bound $\Phi_{\pi(\sqrt{2}+1/2), 4}(x)$ of the function $\varphi(x)$ over $(0,1]$.
It is true that the parameter $\beta = b_{1}$ fulfils:
\begin{equation}
f_{b_{1}}(1) = \varphi(1) = \displaystyle\frac{\pi}{2},
\end{equation}
hence:
\begin{equation}
b_{1} = \displaystyle\frac{\sqrt{2}(4 - \pi)}{\pi - 2\sqrt{2}} =
3.876 \, 452 \, 527 \, \ldots \, < 4 .
\end{equation}
Let us prove that the function $f_{b_{1}}(x)$ is the upper bound of the function $\varphi(x)$ over $[0,1]$.
Let us define the function:
\begin{equation}
h(x)
=
f_{b_{1}}(x) - \varphi(x)
\end{equation}
for $0 \leq x \leq 1$. For the function $h(x)$ we introduce two substitutions
$x = \cos t$ ${\big (}t \!\in\! [0, \mbox{\small $\displaystyle\frac{\pi}{2}$}]{\big )}$ and
$t = 4 \, \mbox{\rm arc}\,\mbox{\rm tg} \, u$ ${\big (}u \!\in\! [0, \mbox{\rm tg} \mbox{\small
$\displaystyle\frac{\pi}{8}$}]{\big )}$ respectively, and we get a new function:
\begin{equation}
\;\;
\mbox{\newsit{w}}(u)
\!=\!
h{\big (}\mbox{\rm cos}(4 \, \mbox{\rm arc}\,\mbox{\rm tg} \, u){\big )}
\!=\!
\displaystyle\frac{\sqrt{2}(b_{1}\!+\!2)(u^2\!+\!2u\!-\!1)}{
(\sqrt{2}\!-\!b_{1})u^2\!-\!2\sqrt{2}u\!-\!b_{1}\!-\!\sqrt{2}}
-
\displaystyle\frac{\pi}{2}
+
4 \, \mbox{\rm arc}\,\mbox{\rm tg} \, u
\end{equation}
for $0 \leq u \leq \mbox{\rm tg} \mbox{\small $\displaystyle\frac{\pi}{8}$} = \sqrt{2}\!-\!1$. Then:
\begin{equation}
\quad
\mbox{\small $\begin{array}{rcl}
\mbox{\normalsize $\displaystyle\frac{d}{d u} \mbox{\newsit{w}}(u)$}
\!&\!\!\!=\!\!\!&\!
{\Big (}
{\big (} \mbox{\normalsize $4 b_{1}^2$} \!+\! \mbox{\normalsize $2\sqrt{2}b_{1}^2$} \!-\! \mbox{\normalsize $8 b_{1}$}
\!-\! \mbox{\normalsize $4 \sqrt{2} b_{1}$} \!-\! \mbox{\normalsize $8$} {\big )} \mbox{\normalsize $u^4$} \!+\!
{\big (} \! \!-\!\mbox{\normalsize $4 \sqrt{2} b_{1}^2$} \!+\! \mbox{\normalsize $8 \sqrt{2} b_{1}$}
\!-\! \mbox{\normalsize $32$} {\big )} \mbox{\normalsize $u^3$} \!+\! \\[1.5 ex]
\!&\!\!\! \!\!\!&\!
\;\;
{\big (} \mbox{\normalsize $8 b_{1}^2$} \!-\! \mbox{\normalsize $16 b_{1}$} \!-\! \mbox{\normalsize $16$} {\big )}
\mbox{\normalsize $u^2$} \!+\!
{\big (} \! \!-\!\mbox{\normalsize $4 \sqrt{2} b_{1}^2$} \!+\! \mbox{\normalsize $8 \sqrt{2} b_{1}$}
\!+\! \mbox{\normalsize $32$} {\big )} \mbox{\normalsize $u$} \, + \\[1.5 ex]
\!&\!\!\! \!\!\!&\!
\;\;
{\big (} \mbox{\normalsize $4 b_{1}^2$} \!-\! \mbox{\normalsize $2 \sqrt{2} b_{1}^2$} \!-\! \mbox{\normalsize $8 b_{1}$}
\!+\! \mbox{\normalsize $4 \sqrt{2} b_{1}$} \!-\! \mbox{\normalsize $8$} {\big )} {\Big )} {\Big /} \\[1.5 ex]
\!&\!\!\! \!\!\!&\!
\;\;
{\Big (}
{\big (} \mbox{\normalsize $u^2$} \!+\! \mbox{\normalsize $1$} {\big )}
{\big (} \mbox{\normalsize $b_{1} u^2$} \!-\! \mbox{\normalsize $\sqrt{2} u^2$}
\!+\! \mbox{\normalsize $2 \sqrt{2} u$} \!+\! \mbox{\normalsize $b_{1}$} \!+\! \mbox{\normalsize $\sqrt{2}$} {\big )}^2 {\Big )}.
\end{array}$}
\end{equation}
All solutions of the equation $\displaystyle\frac{d}{d u} \mbox{\newsit{w}}(u) = 0$ are determined by terms:
\begin{equation}
\begin{array}{rcl}
u_{1,4} \!&\!\!=\!\!&\!
\displaystyle\frac{2 \sqrt{2} \mp \sqrt{-b_{1}^4 + 4 b_{1}^3 + 4 b_{1}^2 - 16 b_{1}}}{
b_{1}^2 - 2 b_{1} + 2 \sqrt{2} - 4 } \, , \\[1.5 ex]
u_{2,3} \!&\!\!=\!\!&\!
\sqrt{2} - 1 \, ;
\end{array}
\end{equation}
or by numerical values: $u_{1} \!=\! 0.0869 \ldots \,$, $u_{2,3} \!=\! 0.4142 \ldots \,$,
$u_{4} \!=\! 0.8400 \ldots \;$. The function $\mbox{\newsit{w}}(u)$ has local maximum at the point $u_{1}$ and
$\mbox{\newsit{w}}(0) \!=\! \mbox{\newsit{w}}(\sqrt{2} \!-\! 1) \!=\! 0$. Hence $\mbox{\newsit{w}}(u) \geq 0$
for $u \in [0, \sqrt{2} \!-\! 1]$. Therefore the function:
\begin{equation}
f_{b_{1}}(x)
=
\displaystyle\frac{\mbox{\small
$\displaystyle\frac{\pi(2-\sqrt{2})}{\pi-2\sqrt{2}}$}( \sqrt{1+x} -
\sqrt{1-x} )}{ \mbox{\small
$\displaystyle\frac{\sqrt{2}(4-\pi)}{\pi-2\sqrt{2}}$} + \sqrt{1+x} +
\sqrt{1-x}}
\end{equation}
is the upper bound of $\varphi(x)$ over $[0,1]$. Let us notice that, for values $x \in (0,1]$,
on the basis (\ref{Ineq_Comp_1}), the following inequalities are true:
\begin{equation}
\varphi(x)
<
f_{b_{1}}(x) = \Phi_{b_{1}+2,b_{1}}(x)
<
\Phi_{\pi(\sqrt{2}+1/2), 4}(x).
\end{equation}
Let us prove that the function $f_{b_{1}}(x)$ is the least upper bound of the function $\varphi(x)$ from
the family (\ref{Family_b}). The following implication is true:
\begin{equation}
\label{Impl_2} b_{1} < b \; \Longrightarrow \; f_{b}(1) <
f_{b_{1}}(1) = \varphi(1) = \displaystyle\frac{\pi}{2}.
\end{equation}
Hence for $b > b_{1}$ the function $f_{b}(x)$ is not the upper bound for the function $\varphi(x)$ over $[0,1]$.
According to the previous consideration we can conclude that the function $f_{b_{1}}(x)$ is the least upper bound
of the function $\varphi(x)$ over $[0,1]$.
\noindent
The lower bound of the function $f_{4}(x)$ of the function $\varphi(x)$ over $[0,1]$, which belongs to
{\rm R.$\,$E. Shafer}, according to formulas (\ref{Cond_f1}) - (\ref{Cond_f3}), has at $x =0$ the root
of the fifth order. Let us prove that the function $f_{4}(x)$ is the greatest lower bound of the function
$\varphi(x)$ from the family (\ref{Family_b}). For fixed $b \in (b_{1},4)$ let us define the function:
\begin{equation}
g(x) = \left\{
\begin{array}{ccc}
\alpha & : & x = 0, \\[2.0 ex]
\displaystyle\frac{f_{b}(x) - \varphi(x)}{x^3} & : & x \in (0,1];
\end{array}
\right.
\end{equation}
with the constant:
\begin{equation}
\alpha
=
\displaystyle\frac{\mbox{\small $\displaystyle\frac{d^3}{d x^3}$} f_{b}(0)
- \mbox{\small $\displaystyle\frac{d^3}{d x^3}$} \varphi(0)}{6}
=
\displaystyle\frac{4-b}{24{\big (}2+b{\big )}}
>
0.
\end{equation}
The function $g(x)$ is continuous over $[0,1]$ and the following is true:
\begin{equation}
g(0) > 0 \quad\mbox{and}\quad g(1) < 0.
\end{equation}
Therefore we can conclude that there is $c_{b} \in (0,1)$ such that $g(c_{b}) = 0$. Let us notice that
$g(0) > 0$ and $g(c_{b}) = 0$. Then, there is some point $\xi_{b} \in (0,c_{b})$ such that
$g(\xi_{b}) \!>\! 0$ {\big (}$g \!\in\! \mbox{\rm C}[0,c_{b}]${\big )}. This is sufficient
for conclusion that, for each $b \in (b_{1},4)$, the function $f_{b}(x)$ is not the lower bound of
the function $\varphi(x)$ over $[0,1]$. According to the previous consideration we can conclude
that the function $f_{4}(x)$ is the greatest lower bound of the function $\varphi(x)$ over $[0,1]$.
\noindent
On the basis of the previous consideration the following statement is true.
\begin{pr}
\label{Prop_02} In the family of the functions$:$
\begin{equation}
\label{Prop_Family_02}
f_{b}(x) = \Phi_{b+2,b}(x) = \displaystyle\frac{(b+2)(\sqrt{1+x} -
\sqrt{1-x})}{b + \sqrt{1+x} + \sqrt{1-x}} \qquad (0 \leq x \leq 1),
\end{equation}
according to the parameter $b > 0$, the function $f_{4}(x)$ is the greatest lower bound of the
$\mbox{arc$\,$sin}\,x$ function and the function $f_{\sqrt{2}(4 - \pi)/(\pi - 2\sqrt{2})}(x)$ is
the least upper bound of the $\mbox{arc$\,$sin}\,x$ function.
\end{pr}
\begin{re}
Let us emphasize that Theorem {\rm \ref{Th_second}} has been recently considered
in \mbox{\rm \cite{Zhu07}}~and~\mbox{\rm \cite{Zhu07b}}. In the article
{\rm \cite{Zhu07b}} a simple proof of Theorem {\rm \ref{Th_second}}
based on "L'Hospital ru\-le for mono\-tonicity" is obtained.
\end{re}
{\small
\noindent University of Belgrade,
(Received 09/30/2006) \break
\noindent Faculty of Electrical Engineering,
$($Revised 05/08/2007$)\,$ \break
\noindent P.O.Box 35-54, $11120$ Belgrade, Serbia
\break
\noindent {\footnotesize \bf [email protected]}, {\footnotesize \bf [email protected]}
}
\end{document}
|
\betaegin{document}
\partialate{}
\title{The Freiheitssatz for Novikov algebras}
\betaegin{center}
{\betaf Leonid Makar-Limanov}\footnote{Supported
by an NSA grant H98230-09-1-0008, by an NSF grant DMS-0904713, and a Fulbright fellowship awarded by the United States--Israel Educational Foundation; The Weizmann Institute of Science, Rehovot, Israel, University of Michigan, Ann Arbor, and
Wayne State University, Detroit, MI 48202, USA,
e-mail: {\varepsilonm [email protected]}}
and
{\betaf Ualbai Umirbaev}\footnote{Supported by an NSF grant DMS-0904713 and by a grant of Kazakhstan; Eurasian National University,
Astana, Kazakhstan and
Wayne State University,
Detroit, MI 48202, USA,
e-mail: {\varepsilonm [email protected]}}
\varepsilonnd{center}
\betaegin{abstract}
We prove the Freiheitssatz for Novikov algebras in characteristic zero. It is also proved that
the variety of Novikov algebras is generated by a Novikov algebra on the space of polynomials $k[x]$ in a single variable $x$ over a field $k$ with respect to the multiplication $f\circirc g=\partial(f)g$.
It follows that the base rank of the variety of Novikov algebras equals 1.
\varepsilonnd{abstract}
\noindent {\betaf Mathematics Subject Classification (2010):} Primary
17A50, 17D25; Secondary 16R10, 17A36.
\noindent
{\betaf Key words:} Novikov algebras, Freiheitssatz, identities.
\sigmaection{Introduction}
\hspace*{\parindent}
In 1930 W.\, Magnus proved
one of the most important theorems of the combinatorial group
theory (see \circite{Magnus}): \tauextsl{Let $G=\langle x_1,x_2,\ldots,
x_n | r=1\rangle$ be a group defined by a single cyclically
reduced relator $r$. If $x_n$ appears in $r$, then the subgroup of
$G$ generated by $x_1,\ldots, x_{n-1}$ is a free group, freely
generated by $x_1,\ldots, x_{n-1}$.} He called it
\tauextit{the Freiheitssatz} (``freedom/independence theorem" in German). In the same paper W.\, Magnus proved the decidability of the word problem for
groups with a single defining relation. The Freiheitssatz for solvable and nilpotent
groups was researched by many authors (see, for example \circite{Romanovskii}).
In 1962 A.\,I.\,Shirshov \circite{Shir2} established the Freiheitssatz for Lie algebras
and proved the decidability of the word problem for Lie algebras
with a single defining relation. These results recently were generalized in \circite{KMLU}
for right-symmetric
algebras. In 1985 L.\,Makar-Limanov \circite{Makar2} proved the Freiheitssatz for
associative algebras of characteristic zero and in \circite{MLU2010} it was also proved for
Poisson algebras of characteristic zero. Note that the question of decidability of the word problem for
associative algebras and Poisson algebras with a single defining
relation and the Freiheitssatz for associative algebras in a
positive characteristic remain open. The Freiheitssatz for Poisson algebras in a
positive characteristic is not true \circite{MLU2010}.
In this paper we prove the Freiheitssatz for Novikov algebras over fields of characteristic zero.
There are two principal methods of proving the Freiheitssatz: one, employing
the combinatorics of free algebras, applied in \circite{Magnus,Romanovskii,Shir2,KMLU},
and the other, related to the study of algebraic and differential equations,
applied in \circite{Makar2,MLU2010}. The latter is used here.
Recall that an algebra $A$ over a field $k$ is called {\varepsilonm
right-symmetric} if it satisfies the identity
\betaegin{eqnarray}\label{f1}
(xy)z-x(yz)=(xz)y-x(zy).
\varepsilonnd{eqnarray}
In other words, the associator
$(x,y,z)=(xy)z-x(yz)$ is symmetric in $y$ and $z$. The variety of
right-symmetric algebras is Lie-admissible, i.e., each
right-symmetric algebra $A$ with the operation $[x,y]=xy-yx$ is a
Lie algebra.
A right-symmetric algebra $A$ is called {\it Novikov}
(\circite{Novikov}, \circite{Osborn}, \circite{Gelfand}),
if it satisfies also the identity
\betaegin{eqnarray}\label{f2}
x(yz)=y(xz).
\varepsilonnd{eqnarray}
Let $k[x]$ be the polynomial algebra in a single variable $x$ over a field $k$ of characteristic $0$.
There are two interesting multiplications on $k[x]$ (see, for example \circite{Dzhuma02,Dzhuma05,Dzhuma09}):
\betaegin{eqnarray*}
f*g=f\int_0^x g dx
\varepsilonnd{eqnarray*}
and
\betaegin{eqnarray*}
f\circirc g=\partial(f)g, \ \ \partial=\frac{d}{d x}.
\varepsilonnd{eqnarray*}
The algebra $\langle k[x],*\rangle$ is a free dual Leibniz algebra freely generated by $1$ and it was proved in \circite{NU} that
the variety of dual Leibniz algebras is generated by $\langle k[x],*\rangle$. The algebra $A=\langle k[x],\circirc\rangle$ is a Novikov algebra \circite{Dzhuma02} and it is the main object of this paper. We prove that the variety of Novikov algebras is generated by $A$. It follows that the base rank of the variety of Novikov algebras is equal to $1$.
The paper is organized as follows. In Section 2 we prove that all identities of $A$ are corollaries of ({\rangle}f{f1})--({\rangle}f{f2}).
In Section 3, using the homomorphisms of free Novikov algebras into $A$ and some results on differential equations from \circite{MLU2010}, we prove the Freiheitssatz.
\sigmaection{Identities}
\hspace*{\parindent}
Let $k$ be a field of characteristic $0$. Denote by $\mathfrak{N}$ the variety of Novikov algebras over $k$ and
denote by $\mathrm{N}\langle X\rangle$ the free Novikov algebra freely generated by $X=\{x_1,x_2,\ldots,x_n\}$. Put $x_1<x_2<\ldots<x_n$. In \circite{Dzhuma02,Dzhuma09} several constructions of a linear basis of $\mathrm{N}\langle X\rangle$ are given. We use a linear basis of $\mathrm{N}\langle X\rangle$ given in \circite{Dzhuma09} in
terms of Young diagrams.
Recall that a Young diagram is a set of boxes (we denote them by
bullets) with non-increasing numbers of boxes in each row. Rows
and columns are numbered from the top to the bottom and from the left to the
right. Let $k$ be the number of rows and $r_i$ be the number of
boxes in the $i$th row. The total number of boxes, $r_1+\circdots+r_k,$
is called the {\it degree} of the Young diagram.
To get a Novikov diagram, we need to add
one box (call it "a nose") to a Young diagram.
Namely, we need to add one more box to the first row, i.e.,
$$\betaegin{array}{ccccc} \betaullet&\circdots&\betaullet&\betaullet&\betaullet\\
\betaullet&\circdots&\betaullet&\betaullet&\\ \vdots& \circdots&\vdots &\vdots&\\
\betaullet&\circdots&\betaullet&&\\
\varepsilonnd{array}
\mapsto
\betaegin{array}{cccccc}
\betaullet&\circdots&\betaullet&\betaullet&\betaullet&*\\
\betaullet&\circdots&\betaullet&\betaullet&\\ \vdots&
\circdots&\vdots&\vdots&&\\\betaullet&\circdots&\betaullet&&&\\
\varepsilonnd{array}
$$ The number of boxes in a Novikov diagram is also called its {\it degree}. So, the
difference between the degrees of a Novikov diagram and the corresponding
Young diagram is $1$.
To construct
Novikov tableaux on $X$ we need to fill Novikov diagrams by
elements of $X$. Denote by $a_{i,j}$ the element of $X$
in the box $(i,j)$, that is the cross of the $i$-th row and the $j$-th
column. The {\it filling rules} are
\betaegin{itemize}
\item[(F1)] $a_{i,1}\gammae a_{i+1,1},$ if $r_i=r_{i+1},
i=1,2,\ldots,k-1$;
\item[(F2)] the sequence of elements $a_{k,2},\ldots,a_{k,r_k},
a_{k-1,2},\ldots,a_{k-1,r_{k-1}},\ldots,a_{1,2},\ldots,
a_{1,r_1},a_{1,r_1+1}$ is non-decreasing.
\varepsilonnd{itemize}
In particular, all boxes beginning from the second place in each
row are labeled by non-decreasing elements of $X$.
To any Novikov tableau
\betaegin{eqnarray}\label{f3}
T=
\betaegin{array}{cccccc}
a_{1,1}&\circdots&\circdots&a_{1,r_1-1}&a_{1,r_1}&a_{1,r_1+1}\\
a_{2,1}&\circdots&a_{2,r_2-1}&a_{2,r_2}&\\ \vdots&
\circdots&\vdots&\vdots&&\\ a_{k,1}&\circdots&a_{k,r_k}&&&\\
\varepsilonnd{array}
\varepsilonnd{eqnarray}
associate a non-associative word
\betaegin{eqnarray}\label{f4}
W_T= W_k(W_{k-1}(\ldots (W_2
W_1)\ldots )),
\varepsilonnd{eqnarray}
in the alphabet $X$ where
\betaegin{eqnarray*}
W_1=(\ldots
((a_{1,1} a_{1,2}) a_{1,3})\ldots a_{1,r_1})
a_{1,r_1+1},
\varepsilonnd{eqnarray*}
\betaegin{eqnarray*}
W_i=(\ldots
((a_{i,1} a_{i,2}) a_{i,3})\ldots
a_{i,r_i-1}) a_{i,r_i},\quad 1<i{\langle}q k.
\varepsilonnd{eqnarray*}
The set of all non-associative words associated with Novikov tableaux composes a linear basis of the free Novikov algebra $\mathrm{N}\langle X\rangle$ \circite{Dzhuma09}.
Recall that $A=\langle k[x], \circirc\rangle$ is the Novikov algebra on the space of the polynomial algebra $k[x]$ with respect to
multiplication $\circirc$. For any $s=(s_1,\ldots,s_n)\in \mathbb{Z}_{+}^n$, where $\mathbb{Z}_{+}$ is the set of all nonnegative integers, we define a homomorphism
\betaegin{eqnarray*}
\overline{s} : \mathrm{N}\langle X\rangle \longrightarrow A=\langle k[x], \circirc\rangle
\varepsilonnd{eqnarray*}
given by $\overline{s}(x_i)=x^{s_i}$ for all $1{\langle}q i{\langle}q n$.
Consider the polynomial algebra $k[\lambda_1,\ldots,\lambda_n]$ in the variables $\lambda_1,\ldots,\lambda_n$. Put
$\lambda=(\lambda_1,\ldots,\lambda_n)$ and $k[\lambda]=k[\lambda_1,\ldots,\lambda_n]$. Put also
$x^{k[\lambda]}=\{x^{f(\lambda)} | f(\lambda)\in k[\lambda]\}$. Define a multiplication on $x^{k[\lambda]}$ by
\betaegin{eqnarray*}
x^{f(\lambda)}x^{g(\lambda)}=x^{f(\lambda)+g(\lambda)}.
\varepsilonnd{eqnarray*}
Obviously, $x^{k[\lambda]}$ is a multiplicative copy of the additive group of $k[\lambda]$. Denote by $G$ the group algebra of $x^{k[\lambda]}$ over $k[\lambda]$. It is easy to check that there exists a unique $k[\lambda]$-linear derivation
\betaegin{eqnarray*}
D : G\longrightarrow G
\varepsilonnd{eqnarray*}
such that $D(x^{f(\lambda)})=f(\lambda)x^{f(\lambda)-1}$ for all $f(\lambda)\in k[\lambda]$. With respect to
\betaegin{eqnarray*}
a\circirc b= D(a)b, \ \ \ a,b\in G,
\varepsilonnd{eqnarray*}
$G$ is a Novikov algebra again. Denote by $A(\lambda)$ the Novikov $k$-subalgebra of $G$ generated by
$x^{\lambda_1},\ldots,x^{\lambda_n}$. The algebra $A(\lambda)$ looks like an algebra of general matrices (see, for example \circite{Drensky00}).
Let
\betaegin{eqnarray*}
\overline{\lambda} : \mathrm{N}\langle X\rangle \longrightarrow A(\lambda)
\varepsilonnd{eqnarray*}
be an epimorphism of Novikov algebras defined by $\overline{\lambda}(x_i)=x^{\lambda_i}$ for all $1{\langle}q i{\langle}q n$. Note that $\overline{\lambda}$ is a "general" element for the set of all homomorphisms $\overline{s}$, where $\overline{s}\in \mathbb{Z}_{+}^n$. A homomorphism $\overline{s}$ is called a {\varepsilonm specialization} of $\overline{\lambda}$.
Now we fix a Novikov tableau $T$ and its associated non-associative
word $W_T$ from ({\rangle}f{f3})--({\rangle}f{f4}). Denote by $\partialeg$ the standard degree function on $\mathrm{N}\langle X\rangle$ and by $\partialeg_{x_i}$ the degree function with respect to $x_i$ for all $1{\langle}q i{\langle}q n$. Denote by $d$ the degree of $T$ and by $d_i$ the number of occurrences of $x_i$ in $T$. Obviously, $d=\partialeg\,W_T$, $d_i=\partialeg_{x_i}W_T$, and
\betaegin{eqnarray*}
\overline{\lambda}(W_T)=f_T(\lambda) x^{g_T(\lambda)}
\varepsilonnd{eqnarray*}
for some $f_T(\lambda), g_T(\lambda)\in k[\lambda]=k[\lambda_1,\ldots,\lambda_n]$.
Our first aim is to calculate the polynomials $f_T(\lambda)$ and $g_T(\lambda)$. For this reason we change the tableau $T$ from ({\rangle}f{f3}) by substituting $\lambda_i$ instead of $x_i$ for all $1{\langle}q i{\langle}q n$. Denote the new tableau by $T(\lambda)$. Then denote by $\lambda_{i,j}$ the element
in the box $(i,j)$ of $T(\lambda)$. In fact, we have just changed all $a_{i,j}$ to $\lambda_{i,j}$ in ({\rangle}f{f3}).
\betaegin{lm}\label{l1} The following statements are true:
\betaegin{itemize}
\item[(a)] $g_T(\lambda)=(d_1\lambda_1+\ldots+d_n\lambda_n-d+1)$;
\item[(b)] $f_T(\lambda)=f_1f_2\ldots f_k$
where
\betaegin{eqnarray*}
f_i=\lambda_{i,1}(\lambda_{i,1}+\lambda_{i,2}-1)\ldots(\lambda_{i,1}+\ldots+\lambda_{i,r_i}-r_i+1), \ 1{\langle}q i{\langle}q k.
\varepsilonnd{eqnarray*}
\varepsilonnd{itemize}
\varepsilonnd{lm}
{\mit\Phi}roof
Direct calculation gives that
\betaegin{eqnarray*}
\overline{\lambda}(W_1)=\overline{\lambda}((\circdots
((a_{1,1} a_{1,2}) a_{1,3})\circdots a_{1,r_1})
a_{1,r_1+1})\\
=\overline{\lambda}((\circdots
((x^{\lambda_{1,1}}\circirc x^{\lambda_{1,2}})\circirc x^{\lambda_{1,3}})\circirc\circdots \circirc x^{\lambda_{1,r_1}})\circirc
x^{\lambda_{1,r_1+1}})\\
=\lambda_{1,1}(\lambda_{1,1}+\lambda_{1,2}-1)\ldots(\lambda_{1,1}+\ldots+\lambda_{1,r_1}-r_1+1)
x^{(\lambda_{1,1}+\ldots+\lambda_{1,r_1}+\lambda_{1,r_1+1}-r_1)}.
\varepsilonnd{eqnarray*}
Using this and leading an induction on $k$ we get
\betaegin{eqnarray*}
\overline{\lambda}(W_k)=\lambda_{k,1}(\lambda_{k,1}+\lambda_{k,2}-1)\ldots(\lambda_{k,1}+\ldots+\lambda_{k,r_k-1}-r_k+2)
x^{(\lambda_{k,1}+\ldots+\lambda_{k,r_k}-r_k+1)}
\varepsilonnd{eqnarray*}
and
\betaegin{eqnarray*}
\overline{\lambda}(W_{k-1}(W_{k-2}\circdots (W_2
W_1)\circdots ))=f_1f_2\ldots f_{k-1}x^s,
\varepsilonnd{eqnarray*}
where
$s=\sigmaum_{i<k,j} \lambda_{i,j}-d+r_k+1$. Consequently,
\betaegin{eqnarray*}
\overline{\lambda}(W_T)=\overline{\lambda}(W_k)\circirc \overline{\lambda}(W_{k-1}(W_{k-2}\circdots (W_2
W_1)\circdots ))\\
=\partial(\overline{\lambda}(W_k)) \overline{\lambda}(W_{k-1}(W_{k-2}\circdots (W_2
W_1)\circdots ))\\
=f_k x^{(\lambda_{k,1}+\ldots+\lambda_{k,r_k}-r_k)}f_1f_2\ldots f_{k-1}x^s=f_T x^t,
\varepsilonnd{eqnarray*}
where $t=\lambda_{k,1}+\ldots+\lambda_{k,r_k}-r_k+s=\sigmaum_{i,j} \lambda_{i,j}-d+1=g_T(\lambda)$.
$\Box$
\betaegin{lm}\label{l2}
A Novikov tableau $T$ is uniquely defined by the polynomials $f_T(\lambda)$ and $g_T(\lambda)$.
\varepsilonnd{lm}
{\mit\Phi}roof For any linear form $l$ of the type
\betaegin{eqnarray}\label{f5}
l=t_1\lambda_1+\ldots+t_n\lambda_n-t_1-\ldots-t_n+1
\varepsilonnd{eqnarray}
we put
$\alphalpha(l)=t_1+\ldots+t_n$ and $\widehat{l}=t_1\lambda_1+\ldots +t_n\lambda_n$. Let $s_i$ be the number of boxes in the $i$-th column of the Young diagram corresponding to $T$. It follows from Lemma {\rangle}f{l1}(b) that $s_i$ is equal to the number of all divisors $l$ of $f_T$ of the form ({\rangle}f{f5}) with $\alpha(l)=i$, counted together with multiplicity. So, the Young diagram and the Novikov diagram corresponding to $T$ are uniquely defined.
By Lemma {\rangle}f{l1}(a), the degree of $T$ and the number of occurrences of $x_i$ in $T$ are also uniquely defined by $g_T(\lambda)$. It follows from Lemma {\rangle}f{l1}(b) that $x_i$ occurs in the first column of $T$ $m$-times if and only if $\lambda_i^m | f_T$ and $\lambda_i^{m+1} \nmid f_T$. Consequently, the elements of all columns of $T$, except the first one, are uniquely defined by the filling rule (F2).
So, the only question to answer is that how to arrange the elements of the first row. Let $l_1,\ldots,l_s$ be all divisors of $f_T$ of the form ({\rangle}f{f5}) with maximal $\alphalpha=\alphalpha(l_1)=\ldots=\alphalpha(l_s)$. By Lemma {\rangle}f{l1}(b), $l_1,\ldots,l_s$ correspond to the first $s$ rows of $T$ and the first $s$ rows of the Young diagram corresponding to $T$ have lengths $r_1=\ldots=r_s=\alphalpha$. We have
\betaegin{eqnarray*}
\sigmaum_{1{\langle}q i{\langle}q s} \sigmaum_{1{\langle}q j{\langle}q r_i} \lambda_{i,j}=\widehat{l_1}+\ldots+\widehat{l_s}.
\varepsilonnd{eqnarray*}
Suppose that
\betaegin{eqnarray*}
\sigmaum_{1{\langle}q i{\langle}q s} \lambda_{i,1}=\widehat{l_1}+\ldots+\widehat{l_s}-\sigmaum_{1{\langle}q i{\langle}q s} \sigmaum_{2{\langle}q j{\langle}q r_i} \lambda_{i,j}=\sigmaum_{i=1}^n t_i\lambda_i.
\varepsilonnd{eqnarray*}
Obviously $t_i\gammaeq 0$, $t_1+\ldots+t_n=s$, and
\betaegin{eqnarray*}
(a_{1,1},\ldots,a_{s,1})=(\underbrace{x_n,\ldots,x_n}_{t_n},\ldots,\underbrace{x_1,\ldots,x_1}_{t_1})
\varepsilonnd{eqnarray*}
by the filling rule (F1). So, the first $s$ rows of the Novikov tableaux $T$ are uniquely determined. Consequently,
the polynomials $f_1,\ldots,f_s$ are also uniquely determined. Using the polynomial $f_T/(f_1\ldots f_s)$ and continuing the same discussions, we can uniquely determine $T$.
$\Box$
Denote by $\mathbb{T}_n$ the set of all Novikov tableaux of degree $n$ on $X=\{x_1,\ldots,x_n\}$ without repeated elements. Then $\{W_T | T\in \mathbb{T}_n\}$ is a linear basis of the space of all multi-linear homogeneous of degree $n$ elements of the free Novikov algebra $\mathrm{N}\langle X\rangle$ \circite{Dzhuma09}.
\betaegin{co}\label{c1} Suppose that $T\in \mathbb{T}_n$. Then $T$ is uniquely defined by $f_T$.
\varepsilonnd{co}
Let $u=\lambda_1^{k_1}\ldots \lambda_n^{k_n}$ be an arbitrary monomial in $k[\lambda]=k[\lambda_1,\ldots,\lambda_n]$. Put $|u|=k_1+\ldots+k_n$. Put also $\gammaamma(u)=(s_1,\ldots,s_n)$ if $u=\lambda_{\sigmaigma(1)}^{s_1}\ldots \lambda_{\sigmaigma(n)}^{s_n}$ where $\sigmaigma$ is a permutation on $\{1,\ldots,n\}$ and $s_1\gammaeq s_2\gammaeq \ldots\gammaeq s_n$. We define a linear order $\preceq$ on the set of all monomials of $k[\lambda]$. If $u$ and $v$ are two monomials then put $u\preceq v$ if $|u|<|v|$ or $|u|=|v|$ and $\gammaamma(u)$ is preceeds to $\gammaamma(v)$ with respect to the lexicographical order (from left to right) on $\mathbb{Z}_+^n$. If $|u|=|v|$ and $\gammaamma(u)=\gammaamma(v)$ then
$u\preceq v$ is defined arbitrarily.
For any $f\in k[\lambda]$ denote by $\widetilde{f}$ its highest term with respect to $\preceq$.
The statement of the next corollary trivially follows from Lemma {\rangle}f{l1}(b).
\betaegin{co}\label{c2} Suppose that $T\in \mathbb{T}_n$ and $(a_{1,1},a_{2,1},\ldots,a_{k,1})=(x_{i_1},x_{i_2},\ldots,x_{i_k})$ in ({\rangle}f{f3}). Then,
\betaegin{eqnarray*}
\widetilde{f_T}= \lambda_{i_1}^{r_1}\lambda_{i_2}^{r_2}\ldots \lambda_{i_k}^{r_k} \ \ {\it and} \ \
\gammaamma(\widetilde{f_T})= (r_1,r_2,\ldots,r_k).
\varepsilonnd{eqnarray*}
\varepsilonnd{co}
\betaegin{co}\label{c3} The set of polynomials $f_T\in k[\lambda]$, where $T$ runs over $\mathbb{T}_n$, is linearly independent over $k$.
\varepsilonnd{co}
{\mit\Phi}roof
Suppose that $(a_{1,1},a_{2,1},\ldots,a_{k,1})=(x_{i_1},x_{i_2},\ldots,x_{i_k})$ in ({\rangle}f{f3}). Then,
$\gammaamma(\widetilde{f_T})= (r_1,r_2,\ldots,r_k)$ by Corollary {\rangle}f{c2}. It follows that the Novikov diagram corresponding to $T$ is uniquely determined by $\widetilde{f_T}$. Moreover, $x_{i_s}$ is the first element of the row with length $r_s$. Then the filling rule (F1) uniquely determines the elements of the first row of $T$. The filling rule (F2) determines uniquely the other part of $T$.
So, the mapping $T\mapsto \widetilde{f_T}$ associates different tableaux to different basis elements of $k[\lambda]$. Consequently, the set of polynomials $\widetilde{f_T}$, where $T$ runs over $\mathbb{T}_n$, is linearly independent. This proves the lemma. $\Box$
In characteristic $0$ any identity is equivalent to the set of multi-linear homogeneous identities \circite{KBKA}.
Any nontrivial multi-linear homogeneous Novikov identity of degree $n$ can be written as
\betaegin{eqnarray}\label{f6}
\sigmaum_{T\in \mathbb{T}_n} \alpha_T W_T=0
\varepsilonnd{eqnarray}
where $\alpha_T\in k$ and at least one of $\alpha_T$ is nonzero.
\betaegin{theor}\label{t1}
The Novikov algebra $A=\langle k[x],\circirc\rangle$ does not satisfy any nontrivial Novikov identity.
\varepsilonnd{theor}
{\mit\Phi}roof Suppose that $A$ satisfies a nontrivial identity of the form ({\rangle}f{f6}). Consider the homomorphism $\overline{\lambda}$. Applying $\overline{\lambda}$ to the left hand side of ({\rangle}f{f6}) we get
\betaegin{eqnarray*}
\overline{\lambda}(\sigmaum_{T\in \mathbb{T}_n} \alpha_T W_T)=\sigmaum_{T\in \mathbb{T}_n} \alpha_T f_T x^{g_T}
=(\sigmaum_{T\in \mathbb{T}_n} \alpha_T f_T)x^{\lambda_1+\ldots+\lambda_n-n+1}
\varepsilonnd{eqnarray*}
since $g_T(\lambda)=\lambda_1+\ldots+\lambda_n-n+1$ for all $T$.
By Corollary {\rangle}f{c3}, $\sigmaum_{T} \alpha_T f_T$ is a nontrivial polynomial from $k[\lambda]$.
Then it is not difficult to find $s=(s_1,\ldots,s_n)\in \mathbb{Z}_{+}^n$ such that $\sigmaum_{T} \alpha_T f_T(s_1,\ldots,s_n)\neq 0$. This means that the image of the left hand side of ({\rangle}f{f6}) under the homomorphism $\overline{s}$ is not equal to $0$. Consequently, ({\rangle}f{f6}) is not a nontrivial identity of $A$. $\Box$
\betaegin{co}\label{c4} The variety of Novikov algebras $\mathfrak{N}$ is generated by $A=\langle k[x],\circirc\rangle$, i.e.,
$\mathfrak{N}=\mathrm{Var}\,A$.
\varepsilonnd{co}
Recall that the least natural number $n$ such that the variety $Var(\mathrm{N}\langle x_1,x_2,\ldots,x_n\rangle)$ of algebras generated by $\mathrm{N}\langle x_1,x_2,\ldots,x_n\rangle$ is equal to $\mathfrak{N}$ is called the {\varepsilonm base rank} $rb(\mathfrak{N})$ of the variety $\mathfrak{N}$ (see, for example \circite{NU}).
\betaegin{co}\label{c5}
The base rank of the variety of Novikov algebras is equal to one.
\varepsilonnd{co}
{\mit\Phi}roof Consider the ideal $I$ of the polynomial algebra $k[x]$ generated by $x^2$. It is easy to check that $\langle I,\circirc\rangle$ is a Novikov algebra generated by $x^2$. In the proof of Theorem {\rangle}f{t1}, we can easily chose $s=(s_1,\ldots,s_n)$ such that $s_i\gammaeq 2$ for all $i$. Consequently, $\langle I,\circirc\rangle$ does not satisfy any nontrivial Novikov identity. Then, $\mathfrak{N}=\mathrm{Var}\,\langle I,\circirc\rangle$. We have $\mathrm{Var}(\mathrm{N}\langle x_1\rangle)\sigmaupseteq \mathrm{Var}\,\langle I,\circirc\rangle$ since $\langle I,\circirc\rangle$ is a homomorphic image of $\mathrm{N}\langle x_1\rangle$. Therefore, $\mathfrak{N}=\mathrm{Var}(\mathrm{N}\langle x_1\rangle)$. $\Box$
\sigmaection{The Freiheitssatz}
\hspace*{\parindent}
To prove the Freiheitssatz we need the following corollary of Proposition 1 from \circite{MLU2010}.
\betaegin{co}\label{c6}\circite{MLU2010}
Let $f(x,t_{\alpha_1},t_{\alpha_2},\ldots,t_{\alpha_m})\in k[x,t_{\alpha_1},t_{\alpha_2},\ldots,t_{\alpha_m}]$ and $\alpha_1< \alpha_2< \ldots < \alpha_m$ are nonnegative integers.
Suppose that there exists $(c,c_{\alpha_1},c_{\alpha_2},\ldots,c_{\alpha_m})\in k^{1+m}$ so that
$f(c,c_{\alpha_1},c_{\alpha_2},\ldots,c_{\alpha_m})=0$ and $\frac{\partial f}{\partial t_{\alpha_m}}(c,c_{\alpha_1},c_{\alpha_2},\ldots,c_{\alpha_m})\neq 0$. Then the differential equation
\betaegin{eqnarray*}
f(x,\partial^{\alpha_1}(T),\partial^{\alpha_2}(T),\ldots,\partial^{\alpha_m}(T))=0
\varepsilonnd{eqnarray*}
has a solution in the formal power series algebra $k[[x-c]]$.
\varepsilonnd{co}
Note that in the formulation of this corollary, the variables $x,t_{\alpha_1},t_{\alpha_2},\ldots,t_{\alpha_m}$ are independent variables, $\partial$ is the standard derivation $\frac{d}{d x}$ of $k[[x-c]]\sigmaupseteq k[x]$, and $\partial^{\alpha_i}$ is the $\alpha_i$th power of $\partial$.
If $f\in \mathrm{N}\langle x_1,\ldots,x_n\rangle$, then we denote $\mathrm{id}(f)$ the ideal of $\mathrm{N}\langle x_1,\ldots,x_n\rangle$ generated by $f$.
\betaegin{theor}\label{t2}{\betaf (Freiheitssatz)} Let $\mathrm{N}\langle x_1,\ldots,x_n\rangle$ be the free Novikov algebra over a field
$k$ of characteristic $0$ in the variables $x_1,\ldots,x_n$. If $f \in \mathrm{N}\langle x_1,\ldots,x_n\rangle$ and $f\notin
\mathrm{N}\langle x_1,\ldots,x_{n-1}\rangle$, then $\mathrm{id}(f)\circap \mathrm{N}\langle x_1,\ldots,x_{n-1}\rangle=0$.
\varepsilonnd{theor}
{\mit\Phi}roof Without loss of generality we may assume that $k$ is algebraically closed and that
$f(x_1,\ldots,x_{n-1}, 0) \neq 0$.
The theorem will be proved if for $f$ and any nonzero $g\in \mathrm{N}\langle x_1,\ldots,x_{n-1}\rangle$ there exist a Novikov algebra $B$ and
a homomorphism $\tauheta : \mathrm{N}\langle x_1,\ldots,x_n\rangle\rightarrow B$ of Novikov algebras such that
$\tauheta(g)\neq 0, \tauheta(f)=0$.
Let $\hat{f}$ be the highest homogeneous part of $f$ with respect to $x_n$.
By Theorem {\rangle}f{t1}, there exists a homomorphism
$\phi : \mathrm{N}\langle x_1,\ldots,x_n\rangle\rightarrow A=\langle k[x],\circirc\rangle$ such that $\phi((gf)\hat{f})\neq 0$. Denote by $Z_1,Z_2,\ldots,Z_{n-1}$ the images of $x_1,x_2,\ldots,x_{n-1}$ under $\phi$,
by $Z$ a general element of $A$, and consider the equation
\betaegin{eqnarray*}
f(Z_1,Z_2,\ldots,Z_{n-1},Z)=0
\varepsilonnd{eqnarray*}
in $A$. Using the definition of the multiplication in $A$, we can rewrite the last equation in the form
\betaegin{eqnarray}\label{f7}
h(x,\partial^{\alpha_1}(Z),\partial^{\alpha_2}(Z),\ldots,\partial^{\alpha_r}(Z))=0
\varepsilonnd{eqnarray}
where $h=h(x,t_{\alpha_1},\ldots,t_{\alpha_r})$ is a polynomial in the variables
$x,t_{\alpha_1},\ldots,t_{\alpha_r}$. Since $f\notin
\mathrm{N}\langle x_1,\ldots,x_{n-1}\rangle$ the polynomial $h$ essentially depends on $t_{\alpha_1},\ldots,t_{\alpha_r}$, i.e.
$r>0$ in ({\rangle}f{f4}).
Assume that $\alpha_1< \ldots <\alpha_r$ and that $h$ is irreducible. If $h$ is not irreducible we can replace it with its irreducible factor which contains $t_{\alpha_r}$.
We assert that there exists $L=(c,c_{\alpha_1},\ldots,c_{\alpha_r})\in k^{1+r}$
such that $h(L)=0$ and $\frac{\partial h}{\partial t_{\alpha_r}}(L)\neq 0$. If this is not true then by Hilbert's Nulstellenssatz $h$ divides $(\frac{\partial h}{\partial t_{\alpha_r}})^s$ for some $s>0$. But then, since $h$ is irreducible, $h$ divides $(\frac{\partial h}{\partial t_{\alpha_r}})$, which is clearly impossible.
Therefore we can use Corollary {\rangle}f{c6} and find a solution $Z_n$ of the differential equation ({\rangle}f{f7}) in the formal power series algebra $k[[x-c]]$. Note that $B=\langle k[[x-c]],\circirc\rangle$ is a Novikov algebra and $A$ is a subalgebra of $B$.
Take a homomorphism of Novikov algebras $\tauheta : \mathrm{N}\langle x_1,\ldots,x_n\rangle\rightarrow B$ defined by
\betaegin{eqnarray*}
\tauheta(x_1)=Z_1,\tauheta(z_2)=Z_2,\ldots,\tauheta(z_{n-1})=Z_{n-1},\tauheta(x_n)=Z_n.
\varepsilonnd{eqnarray*}
Then $\tauheta_{|\mathrm{N}\langle x_1,\ldots,x_{n-1}\rangle}=\phi_{|\mathrm{N}\langle x_1,\ldots,x_{n-1}\rangle}$ and $\tauheta(f)=0$.
$\Box$
In many cases the Freiheitssatz is formulated directly in the language of freeness.
\betaegin{co}\label{c7}{\betaf (Freiheitssatz)} Let $\mathrm{N}\langle x_1,\ldots,x_n\rangle$ be the free Novikov algebra over a field $k$ of characteristic $0$ in the variables $x_1,\ldots,x_n$. Suppose that $f \in \mathrm{N}\langle x_1,\ldots,x_n\rangle$ and $f\notin
\mathrm{N}\langle x_1,\ldots,x_{n-1}\rangle$. Then the subalgebra of the quotient algebra $\mathrm{N}\langle x_1,\ldots,x_n\rangle/\mathrm{id}(f)$ generated by $x_1+\mathrm{id}(f),\ldots,x_{n-1}+\mathrm{id}(f)$ is a free Novikov algebra with free generators $x_1+\mathrm{id}(f),\ldots,x_{n-1}+\mathrm{id}(f)$.
\varepsilonnd{co}
\betaigskip
\betaegin{center}
{\betaf\large Acknowledgments}
\varepsilonnd{center}
\hspace*{\parindent}
The authors are grateful to Professor Askar Dzhumadil'daev for interesting discussions.
\betaegin{thebibliography}{99}
\betaibitem{Drensky00}
Drensky, V., Free algebras and PI-algebras,
Graduate course in algebra, Springer-Verlag Singapore, Singapore, 2000.
\betaibitem{Novikov} Balinskii, A.A., Novikov, S.P., Poisson bracket
of hamiltonian type, Frobenius algebras and Lie algebras. Dokladu
AN SSSR 283 (1985), No. 5, 1036--1039.
\betaibitem{Dzhuma02} Dzhumadil'daev, A.S., Lofwall, C.,
Trees, free right-symmetric algebras, free Novikov algebras
and identities. Homology, Homotopy and Appl. 4 (2002),
No.2(1), 165-190.
\betaibitem{Dzhuma05} Dzhumadil'daev, A.S., Tulenbaev, K.M., Nilpotency of Zinbiel algebras. J. Dyn. Control Syst. 11 (2005), no. 2, 195--213.
\betaibitem{Dzhuma09} Dzhumadil'daev, A.S., Codimension growth and non-Koszulity of Novikov operad. arXiv:0902.3187, 7 pages.
\betaibitem{Gelfand} Gelfand, I.M., Dorfman, I.Ya., Hamiltonian operators and related algebraic structures. Funct. Anal. Appl. 13 (1979), 248--262.
\betaibitem{KMLU}
Kozybaev, D., Makar-Limanov, L., Umirbaev, U., The Freiheitssatz and the automorphisms
of free right-symmetric algebras. Asian-European Journal of Mathematics, 1 (2008), No. 2, 243--254.
\betaibitem{Magnus} Magnus, M., \"Uber discontinuierliche Gruppen mit einer definierenden
Relation (Der Freiheitssatz). J. Reine Angew. Math. 163 (1930), 141–-165
\betaibitem{Makar2} Makar-Limanov, L., Algebraically closed skew fields. J. Algebra 93 (1985), no. 1, 117--135.
\betaibitem{MLU2010} Makar-Limanov, L., Umirbaev, U., The Freiheitssatz
for Poisson algebras. J. Algebra 328 (2011), 495-503.
\betaibitem{NU}
Naurazbekova, A., Umirbaev, U., Identities of dual Leibniz algebras.
TWMS J. Pure Appl. Math. 1 (2010), No. 1, 86--91.
\betaibitem{Osborn} Osborn, J.M., Infinite-dimensional Novikov algebras of
characteristic $0$. J. Algebra 167 (1994), 146--167.
\betaibitem{Romanovskii}
Romanovskii, N.S., A theorem on freeness for groups with one defining relation in varieties of solvable and nilpotent groups of given degrees. (Russian) Mat. Sb. (N.S.) 89(131) (1972), 93--99.
\betaibitem{Shir2}
Shirshov, A.I., Some algorithm problems for Lie algebras.
Sibirsk. Mat. Z. 3 (1962), 292--296.
\betaibitem{KBKA} Zhevlakov, K.A., Slinko, A.M., Shestakov, I.P., Shirshov, A.I.,
Rings that are nearly associative, Academic Press, Inc. [Harcourt Brace Jovanovich, Publishers], New York-London, 1982.
\varepsilonnd{thebibliography}
\varepsilonnd{document}
|
\begin{document}
\newcommand{\abs}[1]{\left|#1\right|}
\newcommand{\ket}[1]{\left|#1\right>}
\newcommand{\bra}[1]{\left<#1\right|}
\title{Heralded generation of entanglement with coupled cavities}
\author{Jaeyoon Cho}
\affiliation{Department of Physics and Astronomy, University College London, Gower St., London WC1E 6BT, UK}
\author{Dimitris G. Angelakis}
\affiliation{ Centre for Quantum Technologies, National University
of Singapore, 2 Science Drive 3, Singapore 117542}
\affiliation{Science Department, Technical University of Crete,
Chania, Crete, Greece, 73100}
\author{Sougato Bose}
\affiliation{Department of Physics and Astronomy, University College London, Gower St., London WC1E 6BT, UK}
\date{\today}
\begin{abstract}
We propose a scheme to generate two-photon, two-atom, or atom-photon entangled states with a coupled system of two cavities. In our scheme, two cavity photons are exchanged by the direct inter-cavity coupling, while atoms in the cavities simply play the role of generating and probing them. By virtue of the high efficiency of atomic state measurement, this method enables the realization of efficient heralded entanglement generation robust against photon loss, which greatly facilitates applications in quantum information processing.
\end{abstract}
\pacs{}
\maketitle
Entanglement is one of the essential ingredients of quantum information science. While various physical systems have exhibited entanglement, entangled photons have found unique applications in quantum communication between distant parties, such as quantum cryptography \cite{e91} and quantum teleportation \cite{bbc93}, thanks to their high portability. Because of its relative ease of implementation, spontaneous parametric down-conversion has long been a conventional source of entangled photons \cite{s03}, and has allowed us to carry out various proof-of-principle experiments, such as the test of quantum nonlocality \cite{b64} and demonstration of two-qubit gates \cite{pittman03} and multipartite entanglement \cite{lu07}. The long-lived electronic ground states of single atoms, on the other hand, are favored for storing and manipulating local quantum information. There have been numerous proposals and experiments to implement conditional gates and generate entanglement of single atoms. To name a few, entanglement of single atoms were generated in ion traps by exploiting the collective vibrational mode as a quantum bus \cite{haffner05} and in optical lattices by exploiting collisional phase shifts \cite{mandel03}.
Another line of effort has been directed toward interfacing between single photons and single atoms to take advantage of both systems. A reasonable choice for doing that by is exploiting cavity QED to amplify otherwise weak coupling between them. For instance, cavity QED allows one to entangle a photon with an atom, and to map an atomic state into a photon \cite{raimond01}. Recently, these operations were combined to generate two-photon entangled states with an atom in a cavity \cite{wwk07}. One of the ultimate goals in this direction is to connect multiple cavities so that they can communicate with each other through intermediate photons. Such coupled-cavity systems have been considered as basic building blocks toward a scalable architecture for quantum information processing \cite{p97,asy07,amb07}, and recently also considered for quantum simulation \cite{hbp06}. The simplest case of coupled-cavity systems, i.e., that of two cavities each having a single atom, would be ideal for the generation of Bell-type entanglement, which is of great use in various modes of quantum information processing. However, most of the two-atom entanglement schemes suffer from loss of the intermediate photons leading to degradation of the final quality of entanglement. It would thus be desirable to have a heralded method for generating entanglement, where the cases of photon loss can be eliminated by heralding. This could be achieved by performing (non-deterministic) Bell-state measurement of two photons leaking out of cavities \cite{duan03,lim06}, or by performing polarization measurement of a photon reflected sequentially from cavities \cite{cho05}. These schemes, however, rely on single-photon measurement, which is still far from efficient. Moreover, a scheme for generating two-photon entanglement in such a system is still missing, except for a trivial extension of two-atom entanglement schemes, i.e., mapping entanglement of two atoms into two photons. In contrast to the case of a single cavity \cite{wwk07}, this system would emit entangled photons into different spatial modes, facilitating their use for other applications.
In this paper, we introduce a conceptually different mechanism for generating two-photon, two-atom, or atom-photon entangled states with a coupled system of two cavities each having a single atom. The brief idea is as follows. We first load two cavities each with a single photon having an orthogonal polarization. Both photons are then subjected to free inter-cavity hopping, which can be described by a beamsplitter-like transformation \cite{hbp06}. In a specific time, both photons will be distributed over two cavities, where the photons are in a superposition state of four possibilities, two of which are with both photons occupying only one cavity and the other two with one photon per cavity. At this instant, each cavity mode is probed by the atom in such a way that the resulting atomic state indicates whether the cavity mode was empty or not. Only when the ensuing measurement of the atomic state reveals that neither of the cavities is empty, we take the state, whereby the polarization-entanglement is diverted from the superposion state of the cavities. According to the way of probing, we end up with two-photon, two-atom, or atom-photon entanglement.
A remarkable property of this approach is that the success of entanglement generation is heralded by measurement of atoms, which is known to be efficient. Moreover, the quality of the generated entanglement heavily relies on that of the measurement and any dissipation prior to the measurement can be detected. The heralded entangled state is thus guaranteed to be of high quality. This kind of entanglement source is actually preferable, especially in quantum cryptography \cite{grt02} and linear optics quantum computation \cite{dhn06}. Moreover, a redundant array of heralded entanglement sources accompanied with appropriate feedforward will asymptotically serve as a deterministic source of entangled states.
\begin{figure}
\caption{Coupled systems of two cavities. Two cavity modes are coupled either (a) directly or (b) via a fiber. $J$ and $\kappa$ denote the inter-cavity hopping rate of photons and decay rate of photons into the output channel, respectively. By tailoring both the fiber length $L$ and the resonant frequency of the cavity, system (b) reduces to (a).}
\label{fig:setup}
\end{figure}
The system at hand consists of two single-mode cavities having the same resonant frequency, which are coupled to each other. There are two considerable cases. FIG.~\ref{fig:setup}(a) shows the first case in which the coupling is achieved by the overlap of evanescent fields out of the intermediate cavity mirror. In this case, the interaction Hamiltonian for cavity photons is simply given by (without cavity decay)
\begin{equation}
H_I=J(a^\dagger b + b^\dagger a),
\label{eq:hamiltonian}
\end{equation}
where $a$ and $b$ are the annihilation operators for the two cavity modes, respectively, and $J$ is the rate of inter-cavity hopping of photons \cite{hbp06}. The other case is where the coupling is mediated by a quantum channel such as a fiber (or simply vacuum), as shown in FIG.~\ref{fig:setup}(b). In this case, the interaction Hamiltonian in the rotating frame can be written as
$H_F=\sum_n \nu_n[a^\dagger f_n + (-1)^n b^\dagger f_n + h.c.] + \sum_n \Delta_n f_n^\dagger f_n$,
where $f_n$ is the annihilation operator for the $n$th fiber mode, $\nu_n$ is the coupling rate of the cavity mode to the $n$th fiber mode, and $\Delta_n$ is the frequency difference between the $n$th fiber mode and the cavity mode \cite{p97}. Here, the factor $(-1)^n$ accounts for the phase difference between adjacent modes at the fiber end. We confine our interest to the case of a short fiber, wherein the fiber mode is highly discrete. Moreover, we assume that no fiber mode is resonant to the cavity mode and the nearest mode is far-detuned, that is, the minimum of $\abs{\Delta_n}$ is much larger than the coupling rate $\nu_n$. In this regime, excitation to the fiber mode is highly suppressed and the Hamiltonian reduces to Eq.~(\ref{eq:hamiltonian}) by adiabatic elimination, with an effective inter-cavity coupling rate given by $J=\sum_n(-1)^n\nu_n^2/\Delta_n$. Note that this regime is within the reach of current technology. For example, if we take $L=1~\text{cm}$ and $\kappa_c/2\pi=10~\text{MHz}$, where $L$ is the length of the fiber and $\kappa_c$ is the decay rate of the cavity into continuum modes, the mode spacing of the fiber $(\Delta_{n+1}-\Delta_n)/2\pi=c/2L=15~\text{GHz}$, where $c$ is the speed of light, is found to be much larger than the cavity-fiber mode coupling rate $\nu_n/2\pi\sim\sqrt{(\kappa_c/2\pi)(c/L)}=0.55~\text{GHz}$. In what follows, we shall consider Eq.~(\ref{eq:hamiltonian}) as our model Hamiltonian.
\begin{figure}
\caption{Involved atomic levels and transitions. Two transitions are coupled, respectively, to two orthogonally polarized modes of the cavity with coupling rate $g$ and detuning $\Delta$. The amount of $\Delta$ is adjusted by an additional laser. Ground levels $\ket{g_{L}
\label{fig:atom}
\end{figure}
We first consider the generation of two-photon polarization entangled states.
The first step toward the entanglement generation is to load two cavities, respectively, with orthogonally polarized single photons $\ket{L}$ (left-circular polarization) and $\ket{R}$ (right-circular polarization). For this, two single atoms are introduced, one into each cavity. The present scheme can be applied to both cases of trapped atoms and flying atoms \cite{yvk99,raimond01,wsb04}. For convenience, we explain the scheme assuming the atoms are trapped in the cavities, which is more straightforward to understand. FIG.~\ref{fig:atom} depicts the atomic level structure we consider. The transition between ground state $\ket{g_{0}}$ and excited state $\ket{e_L}$ ($\ket{e_R}$) is coupled to the left-circularly (right-circularly) polarized mode of the cavity with coupling rate $g$ and detuning $\Delta$. Other ground states $\ket{g_{L}}$ and $\ket{g_{R}}$ are decoupled from the cavity mode. We assume that although the atom is initially far-detuned ($\Delta\gg g$), the detuning can be controlled by ac Stark shift induced by strong classical fields. In the case of generation of entangled photons, the Rabi frequency $\Omega$ of the classical field is not taken into account. In order to generate single photons $\ket{L}$ and $\ket{R}$ in the cavities, two atoms are initialized, respectively, into excited states $\ket{e_L}$ and $\ket{e_R}$, and the detuning $\Delta$ is adjusted to zero for a period of time $\pi/2g$, allowing resonant interaction between the atom and the cavity. For this process not to be disturbed by other transition channels of the cavity photon, we require the strong atom-cavity coupling regime $g\gg J, \kappa, \gamma$, where $\kappa$ is the decay rate of the cavity photon into the output channel (see FIG.~\ref{fig:setup}) and $\gamma$ is the spontaneous decay rate of the atom.
Once the cavity photons are prepared, the second step is to turn off the classical fields applied for the ac Stark shift in the first step, so that the detuning $\Delta$ is returned to the initial value. Since the atomic transition is far-detuned, the Hamiltonian conditional on no cavity decay is now expressed by the summation of the inter-cavity hopping terms [Eq.~(\ref{eq:hamiltonian})] and the cavity decay terms:
\begin{equation}
H=J\sum_{p=L,R}(a_p^\dagger b_p+b_p^\dagger a_p)-i\frac{\kappa}{2}\sum_{p=L,R}(a_p^\dagger a_p+b_p^\dagger b_p),
\end{equation}
where the subscripts represent the polarization. By changing the basis as $x_p=\frac{1}{\sqrt2}(a_p+b_p)$ and $y_p=\frac{1}{\sqrt2}(a_p-b_p)$, this Hamiltonian takes a simple form:
\begin{equation}
H=J\sum_{p=L,R}(x_p^\dagger x_p-y_p^\dagger y_p)-i\frac{\kappa}{2}\sum_{p=L,R}(x_p^\dagger x_p+y_p^\dagger y_p),
\label{eq:hamil2}
\end{equation}
and the photonic state $a_L^\dagger b_R^\dagger \ket0$ is written as
\begin{equation}
\ket{\Psi(0)}=\frac{1}{\sqrt2}(\ket{\Phi^-}+\ket{\Psi^-}),
\end{equation}
where $\ket{\Phi^-}=\frac{1}{\sqrt2}(x_L^\dagger x_R^\dagger - y_L^\dagger y_R^\dagger)\ket0=\frac{1}{\sqrt2}(a_L^\dagger b_R^\dagger+a_R^\dagger b_L^\dagger)\ket0$ and $\ket{\Psi^-}=-\frac{1}{\sqrt2}(x_L^\dagger y_R^\dagger - x_R^\dagger y_L^\dagger)\ket0=\frac{1}{\sqrt2}(a_L^\dagger b_R^\dagger-a_R^\dagger b_L^\dagger)\ket0$.
Note that the state $\ket{\Psi^-}$, which is a two-photon polarization entangled state, is invariant under the inter-cavity hopping part in Hamiltonian~(\ref{eq:hamil2}). Consequently, once this state is prepared in the cavity, the output photons are guaranteed to remain in the same entangled state with a definite pulse shape. To this end, we are interested in the (unnormalized) conditional state at time $\pi/4J$:
\begin{equation}
\ket{\Psi\left(\frac{\pi}{4J}\right)}_C=\frac{e^{-\frac{\pi\kappa}{4J}}}{\sqrt2}(-i\ket{\Phi^+}+\ket{\Psi^-}),
\label{eq:state}
\end{equation}
where $\ket{\Phi^+}=\frac{1}{\sqrt2}(x_L^\dagger x_R^\dagger + y_L^\dagger y_R^\dagger)\ket0=\frac{1}{\sqrt2}(a_L^\dagger a_R^\dagger+b_L^\dagger b_R^\dagger)\ket0$.
Note that the state $\ket{\Phi^+}$ is such that only one cavity has both the photons while the other cavity is empty. This is clearly distinguished from the state $\ket{\Psi^-}$ characterized by one photon per cavity. Consequently, if we perform a non-demolition measurement distinguishing between zero and one photon at each cavity and take the state only when both cavities have one photon, we can extract the state $\ket{\Psi^-}$. If the measurement is ideal, it succeeds with probability $P=\frac{1}{2}\exp\left(-\frac{\pi\kappa}{2J}\right)$, which approaches $\frac12$ as $J/\kappa$ increases. Remarkably, regardless of the success probability, the resulting state has in principle unit fidelity.
The remaining question is how to perform the non-demolition measurement distinguishing between zero and one photon. We also require the measurement to be achieved without distinguishing between polarizations and to be fast enough for the photonic state~(\ref{eq:state}) not to evolve during the measurement. Such a measurement is again aided by the atoms inside the cavities \cite{raimond01,devitt07}. Before starting the measurement, each atom should be prepared in state $\ket+\equiv\frac{1}{\sqrt2}(\ket{g_L}+\ket{g_0})$. Recalling that the atoms are in state $\ket{g_{0}}$ as a result of the single-photon generation in the first step, this can be easily done by classical Raman pulses. Note that the preparation can be performed without disturbing the evolution of photons during the second step, since the atoms are far-detuned from the cavity mode. The detuning $\Delta$ is now adjusted to zero, allowing resonant atom-cavity interaction, for a period of time $\pi/g$. From FIG.~\ref{fig:atom}, it is easily seen that if there were one photon in a cavity, the resulting state would be $\ket-\equiv\frac{1}{\sqrt2}(\ket{g_L}-\ket{g_0})$ with a remaining single photon having the same polarization. If there were no photon, however, the atomic state would not be changed. We can thus distinguish between the two cases by measuring the resulting atoms in the $\ket\pm$ basis. In the case of two photons, the resulting atomic state contains excited-state components as well, thus the atom could be measured in both states. This does not arise as a problem, however, since in that case the other cavity should be empty. To sum up, the generation of two-photon entanglement succeeds only when both atoms are measured in state $\ket-$. Note that this measurement is fast because we are assuming the strong atom-cavity coupling regime.
The two-atom entangled state can also be generated in a similar manner just by modifying the above non-demolition measurement step as follows. In this case, we start from the state $\ket{g_{0}}$, which is automatically prepared by the single-photon generation step. Right after the second step, the detuning $\Delta$ is adjusted to zero for a period of time $\pi/\sqrt{2}g$, during which the classical field with Rabi frequency $\Omega$ in FIG.~\ref{fig:atom} is also applied with $\Omega=g$. It is easily seen that if the cavity was in the single-photon state $\ket{L}$ ($\ket{R}$), this operation coherently transfers the atomic state completely into state $\ket{g_{L}}$ ($\ket{g_{R}}$). On the other hand, if the cavity was empty, this operation does not change the atomic state. Consequently, if the resulting atomic state is measured by observing the resonance fluorescence on a transition between $\ket{g_{0}}$ and an excited state, the zero-photon state can be distinguished from the single-photon state, while at the same time the photonic state is mapped into the atom. By discarding the state with an empty cavity, the atoms thus remain in an entangled state $\frac{1}{\sqrt{2}}(\ket{g_{L}}\ket{g_{R}}-\ket{g_{R}}\ket{g_{L}})$. Note that by replacing the state mapping with the previous non-demolition measurement in either of the cavities, this scheme is straightforwardly extended to the case of atom-photon entanglement generation. A further extension would be combining entangled atom pairs to generate a multi-qubit cluster state \cite{lim06}.
In the remainder of this paper, we discuss the effects of decays on the performance of the scheme. Since the Hilbert space of the system at hand is not small enough to be dealt with by exact analytic calculations, we take the perturbation approach and obtain the involved states up to first order in small constants $J/g$, $\kappa/g$, and $\gamma/g$. Let us first consider the generation of cavity photon $\ket{L}$, which is achieved by having an atom in state $\ket{e_L}$ interact with the resonant cavity mode for a period of time $\pi/2g$. This process is disturbed by the atomic spontaneous decay, the cavity decay, and hopping of the photon into the other cavity. Among them, the first two, which result in photon loss, do not affect the fidelity of the final entangled state, since loss of photons can be detected by the measurement. Conditional on having one photon, the final state of this process is found to be $(a_L^\dagger-i\frac{J}{g}b_L^\dagger)\ket{0}\ket{g_1}$ up to first order. Here, we neglected the effect occurring when the photon having hopped into the other cavity is absorbed by the other atom, since it is a higher-order contribution. This state is approximately the same as the state we get when the second step of the scheme proceeds for a time $\tau$ given by $J\tau=J/g$. This amount of time can be thus compensated by decreasing the interaction time for the second step. Assuming the pulse timing is exact up to first order, we can regard the state of the cavity photons just before the final step as being nearly perfect. Let us first consider the case of the two-photon entanglement generation. The non-demolition measurement succeeds only when both atoms prepared in state $\ket+$ are measured in state $\ket-$. Up to first order, this process completely filters out the case of having two photons in one cavity and no photon in the other cavity, since the probability that the photon hops to the empty cavity and flips the atomic state is of higher order. In case each cavity has one photon, however, the photon loss could lead to an erroneous measurement result due to the loss of atomic coherence. If both atoms are measured in state $\ket-$, the final state of the photons $\rho_P$ is proportional to $(1-2P_{L})\ket{\Psi^-}\bra{\Psi^-}+P_L\cdot\frac14 I\otimes I$, where $P_{L}=\frac{\pi}{4g}(\gamma+\kappa)$ and $I$ is the identity operator. Here, for simplicity, we have assumed that when photons are lost, the state is fully mixed. The fidelity of the final state $\bra{\Psi^-}\rho_P\ket{\Psi^-}$ is thus given by
$F_{P}=1-(3\pi/16)\cdot(\gamma/g+\kappa/g)$.
Note that inaccurate control of the detuning, i.e., a nonzero $\Delta$, rather increases the fidelity by suppressing the spontaneous decay, although it decreases the success probability.
In the case of two-atom entanglement generation, cavity decay during the final step can be detected, since it leaves the atom in state $\ket{g_{0}}$. Some portion of the atomic spontaneous decay, i.e., decay into state $\ket{g_{0}}$, is also detected, according to its branching ratio. For simplicity, we assume the branching ratio of the decay to $\ket{g_{0}}$ and $\ket{g_{L,R}}$ is 50:50. A similar calculation yields the fidelity of the two-atom entangled state as
$F_{A}=1-(3\pi/16\sqrt2)\cdot(\gamma/g)$.
Recalling that we are assuming the strong atom-cavity coupling regime $g\gg\gamma,\kappa$, these fidelities are reasonably high. There are several cavity models which are expected to exhibit very strong coupling regimes \cite{skv05}. Even in a moderately strong coupling regime $g/10\sim\gamma,\kappa$ \cite{yvk99}, if we take the state only when both cavities output a photon, as in most current experiments based on single photons, the fidelity of the two-photon entangled state becomes $F_{P}=1$ up to first order.
This work has been supported by the Korea Research Foundation Grant (KRF-2007-357-C00016) funded by the Korean Government (MOEHRD), QIP IRC (GR/S821176/01), and the
European Union through the Integrated Projects SCALA (CT-015714).
SB would like to thank the Engineering and Physical Sciences Research
Council (EPSRC) UK for an Advanced Research Fellowship the support
of the Royal Society and the Wolfson foundation.
\begin{references}
\bibitem{e91} T. Jennewein, C. Simon, G. Weihs, H. Weinfurter, and A. Zeilinger, Phys. Rev. Lett. \textbf{84}, 4729 (2000).
\bibitem{bbc93} Y.-H. Kim, S. P. Kulik, and Y. Shih, Phys. Rev. Lett. \textbf{86}, 1370 (2001).
\bibitem{s03} Y. Shih, Rep. Prog. Phys. \textbf{66}, 1009 (2003).
\bibitem{b64} G. Weihs, T. Jennewein, C. Simon, H. Weinfurter, and A. Zeilinger, Phys. Rev. Lett. \textbf{81}, 5039 (1998).
\bibitem{pittman03} T. B. Pittman, M. J. Fitch, B. C. Jacobs, and J. D. Franson, Phys. Rev. A \textbf{68}, 032316 (2003).
\bibitem{lu07} C.-Y. Lu \textit{et al.}, Nature Phys. \textbf{3}, 91 (2007).
\bibitem{haffner05} H. Haffner \textit{et al.}, Nature \textbf{438}, 643 (2005).
\bibitem{mandel03} O. Mandel \textit{et al.}, Nature \textbf{425}, 937 (2003).
\bibitem{raimond01} J. M. Raimond, M. Brune, and S. Haroche, Rev. Mod. Phys. \textbf{73}, 565 (2001).
\bibitem{wwk07} T. Wilk, S. C. Webster, A. Kuhn, and G. Rempe, Science \textbf{317}, 488 (2007).
\bibitem{p97} T. Pellizzari, Phys. Rev. Lett. \textbf{79}, 5242 (1997); J. Cho and H.-W. Lee, Phys. Rev. A \textbf{72}, 052309 (2005); A. Serafini, S. Mancini, and S. Bose, Phys. Rev. Lett. \textbf{96}, 010503 (2006).
\bibitem{asy07} D. G. Angelakis, M. F. Santos, V. Yannopapas, and A. Ekert, Phys. Lett. A \textbf{362}, 377 (2007).
\bibitem{amb07} D. G. Angelakis and S. Bose, J. Opt. Soc. Am. B \textbf{24}, 266 (2007); D. G. Angelakis, S. Mancini, and S. Bose, arXiv:0711.1830.
\bibitem{hbp06} M. J. Hartmann, F. G. S. L. Brand\~ao, and M. B. Plenio, Nature Phys. \textbf{2}, 849 (2006); A. D. Greentree, C. Tahan, J. H. Cole, and L. C. L. Hollenberg, Nature Phys. \textbf{2}, 856 (2006); D. G. Angelakis, M. F. Santos, and S. Bose, Phys. Rev. A \textbf{76}, 031805(R) (2007); M. Paternostro, G. S. Agarwal, and M. S. Kim, arXiv:0707.0846.
\bibitem{duan03} L.-M. Duan and H. J. Kimble, Phys. Rev. Lett. \textbf{90}, 253601 (2003); C. Simon and W. T. M. Irvine, Phys. Rev. Lett. \textbf{91}, 110405 (2003).
\bibitem{lim06} Y. L. Lim, S. D. Barrett, A. Beige, P. Kok, and L. C. Kwek, Phys. Rev. A \textbf{73}, 012304 (2006).
\bibitem{cho05} J. Cho and H.-W. Lee, Phys. Rev. Lett. \textbf{95}, 160501 (2005).
\bibitem{grt02} N. Gisin, G. Ribordy, W. Tittel, and H. Zbinden, Rev. Mod. Phys. \textbf{74}, 145 (2002).
\bibitem{dhn06} D. E. Browne and T. Rudolph, Phys. Rev. Lett. \textbf{95}, 010501 (2005).
\bibitem{yvk99} J. Ye, D. W. Vernooy, and H. J. Kimble, Phys. Rev. Lett. \textbf{83}, 4987 (1999).
\bibitem{wsb04} A. Wallraff \textit{et al.}, Nature \textbf{431}, 162 (2004); T. Aoki \textit{et al.}, Nature \textbf{443}, 671 (2006); M. Trupke \textit{et al.}, Phys. Rev. Lett. \textbf{99}, 063601 (2007).
\bibitem{devitt07} S. J. Devitt \textit{et al.}, Phys. Rev. A \textbf{76}, 052312 (2007).
\bibitem{skv05} S. M. Spillane \textit{et al.}, Phys. Rev. A \textbf{71}, 013817 (2005).
\end{references}
\end{document}
|
\begin{document}
\author{{Shiqiu Zheng$^{1, 2}$\thanks{Corresponding author, E-mail: [email protected](S. Zheng).}\ , \ \ Shoumei Li$^{1}$\thanks{E-mail: [email protected](S.Li).}}
\\
\small(1, College of Applied Sciences, Beijing University of Technology, Beijing 100124, China)\\
\small(2, College of Sciences, Hebei United University, Tangshan 063009, China)\\
}
\date{}
\title{\textbf{Representation for filtration-consistent nonlinear expectations under a general domination condition}\thanks{This work is supported by the National Natural Science Foundation of China (No. 11171010) and the Science and Technology Program of Tangshan (No.
13130203z).}}\maketitle
\textbf{Abstract:}\quad In this paper, we consider filtration-consistent nonlinear expectations which satisfy a general domination condition (dominated by ${\cal{E}}^{\phi}$). We show that this kind of nonlinear expectations can be represented by $g$-expectations defined by the solutions of backward stochastic differential equations, whose generators are independent on $y$ and uniformly continuous in $z$.
\\
\textbf{Keywords:}\quad Filtration-consisitent expectation; $g$-expectation; Backward stochastic differential equation; Doob-Meyer decomposition\\
\textbf{AMS Subject Classification:} \quad 60H10.
\section{Introduction}
The $g$-expectation initiated in Peng [17] in 1997, is a kind of nonlinear expectation defined by the solution of backward stochastic differential equation (BSDE) and can be considered as a nonlinear extension of the Girsanov transformation. The original motivation for studying $g$-expectation comes from the theory of expected utility, which is challenged by the famous Allais paradox and Ellsberg paradox. As a nonlinear expectation, $g$-expectation preserves many properties of the classical linear expectations except the linearity. In particular, it is time-consistent. For properties of $g$-expectation and its applications in utility and risk measures, one can see Briand et al. [1], Chen et al. [2], Chen and Epstein [3], Cohen [4], Coquet et al. [5], Delbaen et al. [6], Jia [10, 11], Jiang [12, 13], Ma and Yao [16], Peng [17, 18, 19], Royer [20] and Rosazza Gianin [21], and among many others.
Time-consistency is one of important properties of $g$-expectation, which will change based on the new observations as time goes on. As a natural extension of $g$-expectation, the notion of filtration-consistent nonlinear expectation is firstly introduced in Coquet et al. [5]. A axiomatic system of this dynamically nonlinear expectation is further introduced in Peng [19]. A very important and interesting result in the Coquet et al. [5] shows that a filtration-consistent nonlinear expectation ${\cal{E}}$ can be represented by a $g$-expectation defined by the solution of a BSDE whose generator $g$ is independent on $y$ and Lipschitz in $z$, when it is translation invariant and satisfies the following domination condition:
$${\cal{E}}[X]-{\cal{E}}[Y]\leq{\cal{E}}^\mu[X-Y],\eqno(1)$$
where ${\cal{E}}^\mu$ is a $g$-expectation defined by the solution of a BSDE whose generator $g=\mu|z|$ for some constant $\mu>0.$
As some extensions of the representation theorem in Coquet et al. [5], Royer [20] obtains a result based on BSDE with jump whose generator $g$ is Lipschitz continuous. Cohen [4] obtains a result based on BSDE in general probability space, whose generator $g$ is also Lipschitz continuous. In fact, the domination conditions in Royer [20] and Cohen [4] are both similar to domination condition (1). Hu et al. [9] obtains a result based on BSDE whose generator $g$ has a quadratic growth, under three domination conditions (see Hu et al. [9, Definition 3.8]) and some other extra conditions. In fact, filtration-consistent nonlinear expectations have a direct correspondence to a fairly large class of risk measures in finance (see Peng [19]). Furthermore, in Hu et al. [9, Page 1519], the authors give the following consequence in finance:
\begin{center}
\emph{Any time-consistent risk measure satisfying the required domination condition can be represented by the solution of a simple BSDE!}
\end{center}
In this topic, a interesting problem is that can we represent filtration-consistent nonlinear expectation by $g$-expectation under the following domination condition (2)?
$${\cal{E}}[X|{\cal{F}}_t]-{\cal{E}}[Y|{\cal{F}}_t]\leq{\cal{E}}^{\phi}[X-Y|{\cal{F}}_t],\ \ \forall t\in[0,T],\eqno(2)$$
where ${\cal{E}}^\phi$ is a $g$-expectation defined by the solution of a BSDE whose generator $g=\phi(|z|),$ and $\phi(\cdot):{\mathbf{R_+}}\rightarrow{\mathbf{R_+}},$ is subadditive and increasing with $\phi(0)=0$ and has a linear growth. This problem is considered as an unsolved problem by Jia in 2010 (see Jia [11, Remark 4.6]).
In this paper, we answer this problem, using some methods derived from Coquet et al. [5], Hu et al. [9] and Peng [19]. To solve this problem, we will come across some new difficulties, one of which is the most fundamental. That is, the estimation $E|{\cal{E}}^\mu[X-Y]|^2\leq CE|X-Y|^2,$ which holds true for ${\cal{E}}^\mu$, is not always true for ${\cal{E}}^{\phi}$, where $C>0$ is a constant. As a result of this, we can not find a fixed point method can be used directly to solve the BSDE considered in Coquet et al. [5, Theorem 6.1], when such BSDE has a $L^2$ terminal variable and the filtration-consistent expectation ${\cal{E}}$ is dominated by ${\cal{E}}^{\phi}.$ In fact, solving such BSDEs under domination condition (2) is crucial in our paper. Inspired by Hu et al. [9], we use the following strategy. Under domination condition (2), we consider a class of special BSDEs under filtration-consistent expectation ${\cal{E}}$ with bounded terminal variable. Such BSDEs can help us obtain a Doob-Meyer decomposition for ${\cal{E}}$-supermartingale with special construct. Finally, this special Doob-Meyer decomposition is sufficient to establish our representation theorem under domination condition (2). By our representation theorem, we conversely can obtain a existence and uniqueness of BSDEs under ${\cal{E}}$ with $L^2$ terminal variable and a general Doob-Meyer decomposition for ${\cal{E}}$-supermartingale.
This paper is organized as follows. In the next section, we will recall the definitions of $g$-expectation, $g$-martingale and some important results. In section 3, we will recall the definitions of filtration-consistent expectation ${\cal{E}}$, ${\cal{E}}$-martingale and prove some useful properties. In section 4, we will give a Doob-Meyer decomposition for ${\cal{E}}$-supermartingale with special construct. In section 5, a representation theorem for filtration-consistent expectation is obtained under domination condition (2).
\section{Preliminaries}
Let $(\Omega,\cal{F},\mathit{P})$ be a complete probability space
carrying a $d$-dimensional standard Brownian motion ${{(B_t)}_{t\geq
0}}$, let $({\cal{F}}_t)_{t\geq 0}$ denote
the natural filtration generated by ${{(B_t)}_{t\geq 0}}$, augmented
by the $\mathit{P}$-null sets of ${\cal{F}}$. Let $|z|$ denote its
Euclidean norm, for $\mathit{z}\in {\mathbf{R}}^d$, $T>0$ be a given time horizon. For stopping times $\tau_1$ and $\tau_2$ satisfying $\tau_1\leq \tau_2,$ Let ${\cal{T}}_{\tau_1,\tau_2}$ be the set of all stopping times $\tau$ satisfying $\tau_1\leq \tau\leq \tau_2.$ For $\tau\in{\cal{T}}_{0,T},$ we define the following usual
spaces:
$L^2({\mathcal {F}}_\tau;{\mathbf{R}}^d)=\{\xi:\ {\cal{F}}_\tau$-measurable
${\mathbf{R}}^d$-valued random variable; ${\mathbf{E}}\left[|\xi|^2\right]<\infty\};$
$L^\infty({\mathcal {F}}_\tau;{\mathbf{R}}^d)=\{\xi:\ {\cal{F}}_\tau$-measurable
${\mathbf{R}}^d$-valued random variable; $\|\xi\|_{L^\infty}=\textrm{esssup}_{\omega\in\Omega}|\xi|<\infty\};$
$L^2_{\cal{F}}(0,\tau;{\mathbf{R}}^d)=\{\psi:\ {\mathbf{R}}^d$-valued predictable
process; $E\left[\int_0^\tau|\psi_t|^2dt\right]
<\infty \};$
$L^\infty_{\cal{F}}(0,\tau;{\mathbf{R}}^d)=\{\psi:\ {\mathbf{R}}^d$-valued predictable
process; $\|\psi\|_{L^\infty_{\cal{F}}}=\textrm{esssup}_{(\omega,t)\in\Omega\times [0,T]}|\psi _t| <\infty\};$
${\mathcal{D}}^2_{\cal{F}}(0,\tau;{\mathbf{R}}^d)=\{\psi:\ $ RCLL
process in $L^2_{\cal{F}}(0,\tau;{\mathbf{R}}^d)$;\ $E
[{\mathrm{sup}}_{0\leq t\leq \tau} |\psi _t|^2] <\infty \}$
${\mathcal{D}}^\infty_{\cal{F}}(0,\tau;{\mathbf{R}}^d)=\{\psi:\ $ RCLL
process in $L^\infty_{\cal{F}}(0,\tau;{\mathbf{R}}^d) \};$
${\mathcal{S}}^2_{\cal{F}}(0,\tau;{\mathbf{R}}^d)=\{\psi:\ $ continuous
process in ${\mathcal{D}}^2_{\cal{F}}(0,\tau;{\mathbf{R}}^d)\};$
${\mathcal{S}}^\infty_{\cal{F}}(0,\tau;{\mathbf{R}}^d)=\{\psi:\ $ continuous
process in ${\mathcal{D}}^\infty_{\cal{F}}(0,\tau;{\mathbf{R}}^d)\}.$\\
Note that when $d=1,$ we always denote $L^2({\mathcal {F}}_\tau;{\mathbf{R}}^d)$ by $L^2({\mathcal {F}}_\tau)$ for convention and use the same treatment for above notations of other spaces.\\
Let us consider a function $g$
$${g}\left( \omega ,t,y,z\right) : \Omega \times [0,T]\times \mathbf{
R\times R}^{\mathit{d}}\longmapsto \mathbf{R},$$ such that
$\left(g(t,y,z)\right)_{t\in [0,T]}$ is progressively measurable for
each $(y,z)\in\mathbf{
R\times R}^{\mathit{d}}$. For the function $g$, in this paper, we make the following assumptions:
\begin{itemize}
\item (A1). There exists a constant
$K\geq0$ and a continuous function $\phi(\cdot)$, such that $P$-$a.s.,\ \forall t\in[0,T],\
\forall (y_i,z_i)\in {\mathbf{ R\times R}}^{\mathit{d}},\ \
(i=1,2):$
$$|{g}( t,y_{1},z_{1})-{g}( t,y_{2},z_{2}) |\leq K|y_{\mathrm 1}
-y_{2}|+\phi(|z_{\mathrm1}-z_{2}|),$$
where $\phi(\cdot):{\mathbf{R_+}}\rightarrow{\mathbf{R_+}},$ is subadditive and increasing with $\phi(0)=0$ and has a linear growth with constant $\nu$, i.e., $\forall x\in {\mathbf{R}}^d, \ \phi(|x|)\leq \nu(|x|+1);$
\item (A2). $\forall (y,z)\in{\mathbf{ R\times R}}^{\mathit{d}},\ g(t,y,z)\in L^2_{\cal{F}}(0,T);$
\item (A3). $P$-$a.s.$, $\forall (t,y)\in[0,T]\times{\mathbf{R}},\ g(t,y,0)=0.$
\item (A1)$^*$. There exists a constant
$\mu\geq0$, such that $P$-$a.s.,\ \forall t\in[0,T],\
\forall (y_i,z_i)\in {\mathbf{ R\times R}}^{\mathit{d}},\ \
(i=1,2):$
$$|{g}( t,y_{1},z_{1})-{g}( t,y_{2},z_{2}) |\leq \mu(|y_{\mathrm 1}
-y_{2}|+|z_{\mathrm1}-z_{2}|).$$
\end{itemize}
We consider the following BSDEs with parameter $(g,\xi,T):$
$$Y_t=\xi +\int_t^Tg\left(s,Y_s,Z_s\right)
ds-\int_t^TZ_sdB_s,\ \ \forall t\in[0,T].$$
If the generator $g$ satisfies (A1) and (A2), $\xi\in L^2({\mathcal {F}}_T)$, then the BSDE has a unique solution $(Y_t,Z_t)\in{\mathcal{S}}^2_{\cal{F}}(0,T)\times L^2_{\cal{F}}(0,T;{\mathbf{R}}^d)$ (see Jia [10, Theorem 3.3.6], Jia [11, Theorem 2.3] or Fan and Jiang [8, Theorem 2]). Note that since $\phi$ given in (A1) is subadditive and increasing, the BSDE with parameter $(\phi(|z|),\xi,T)$ (resp. $(-\phi(|z|),\xi,T)$) has a unique solution. If $g$ satisfy (A1), (A2) and (A3), a new $g$-expectation and corresponding $g$-martingale are introduced in Jia [10, 11], they are extensions of standard $g$-expectation and $g$-martingale introduced by Peng [17, 18, 19] under (A1)$^*$, (A2) and (A3).\\\\
\
\textbf{Definition 2.1} Let $g$ satisfy (A1), (A2) and (A3), $\xi\in L^2({\mathcal {F}}_T)$, $(Y_t,Z_t)\in{\mathcal{S}}^2_{\cal{F}}(0,T)\times L^2_{\cal{F}}(0,T;{\mathbf{R}}^d)$ is the solution of BSDE with parameter $(g,\xi,T)$. The conditional $g$-expectation of $\xi$ is defined by
$${\cal{E}}^g[\xi|{\cal{F}}_t]:=Y_t$$
for $t\in[0,T]$ and $g$-expectation of $\xi$ is defined by
$${\cal{E}}^g[\xi]:=Y_0.$$
\textbf{Definition 2.2} Let $g$ satisfy (A1), (A2) and (A3). A process $Y_t$ with $Y_t\in L^2({\cal{F}}_t)$ for $t\in [0,T],$ is called a $g$-martingale (resp. $g$-supermartingale, $g$-submartingale), if, for each $s\leq t\leq T,$ we have
\begin{center}
${\cal{E}}^g[Y_t|{\cal{F}}_s]=Y_s,$\ \ \ (resp. $\leq,\ \geq$).
\end{center}
Note that we denote ${\cal{E}}^{g}$ by ${\cal{E}}^{\phi}$ (resp. denote ${\cal{E}}^{g}$ by ${\cal{E}}^{-\phi}$), if $g=\phi(|z|)$ (resp. $g=-\phi(|z|)$) for a function $\phi(\cdot)$, and denote ${\cal{E}}^{g}$ by ${\cal{E}}^{\mu}$ (resp. denote ${\cal{E}}^{g}$ by ${\cal{E}}^{-\mu}$), if $g=\mu|z|$ (resp. $g=-\mu|z|$), for constant $\mu>0$. In fact, following Peng [19], we also can define $g$-martingale (resp. $g$-supermartingale, $g$-submartingale) without (A3), only under (A1) (or (A1)$^*$) and (A2).\\
The following Lemma 2.1 coming from Jia [10, Theorem 3.6.11] is the Doob-Meyer decomposition of $g$-supermartingale under (A1) and (A2). \\\\
\
\textbf{Lemma 2.1} Let $g$ satisfies (A1) and (A2), $Y_t$ is a $g$-supermartingale and has right-continuous path. Then there exists a RCLL process $A_t$, which is increasing with $A_0=0$ and $A_T\in L^2({\cal{F}}_T),$ such that $(Y_t, Z_t)$ is the solution of the following BSDE:
$$Y_t=\xi +\int_t^Tg\left(s,Y_s,Z_s\right)
ds+A_T-A_t-\int_t^TZ_sdB_s,\ \ t\in[0,T].$$
\section{Filtration-consistent nonlinear expectation}
In this section, we will recall the definitions of filtration-consistent expectation ${\cal{E}}$, ${\cal{E}}$-martingale introduced in Peng [19] and prove some important properties which are useful in the proof of our main result. \\\\
\
\textbf{Definition 3.1} Define a system of operators:
$${\cal{E}}[\cdot|{\cal{F}}_t]:\ L^2({\cal{F}}_T)\longrightarrow L^2({\cal{F}}_t), \ t\in[0,T].$$
The operator ${\cal{E}}[\cdot|{\cal{F}}_t]$ is called filtration-consistent condition expectation (${\cal{F}}$-expectation for short), if it satisfies the following aximos:
(i) Monotonicity: ${\cal{E}}[\xi|{\cal{F}}_t]\geq{\cal{E}}[\eta|{\cal{F}}_t], P-a.s., $ if $\xi\geq \eta,\ P-a.s.;$
(ii) Constant preservation: ${\cal{E}}[\xi|{\cal{F}}_t]=\xi, P-a.s., $ if $\xi\in L^2({\mathcal {F}}_t);$
(iii) Consistency: ${\cal{E}}[{\cal{E}}[\xi|{\cal{F}}_t|{\cal{F}}_s]={\cal{E}}[\xi|{\cal{F}}_s], P-a.s.,$ if $s\leq t\leq T;$
(iv) "0-1 Law": ${\cal{E}}[1_A\xi|{\cal{F}}_t]=1_A{\cal{E}}[\xi|{\cal{F}}_t], P-a.s., $ if $A\in{\mathcal {F}}_t.$\\\\
\
\textbf{Definition 3.2} A process $Y_t$ with $Y_t\in L^2({\cal{F}}_t)$ for $t\in [0,T],$ is called a ${\cal{E}}$-martingale (resp. ${\cal{E}}$-supermartingale, ${\cal{E}}$-submartingale), if, for each $s\leq t\leq T,$ we have
\begin{center}
${\cal{E}}[Y_t|{\cal{F}}_s]=Y_s,$\ \ \ (resp. $\leq,\ \geq$).
\end{center}
Note that $g$-expectation defined in Section 2 is an ${\cal{F}}$-expectation (see Jia [11, Theorem 4.3]). Thus the corresponding $g$-martingale (resp. $g$-supermartingale, $g$-submartingale) is also an ${\cal{E}}$-martingale (resp. ${\cal{E}}$-supermartingale, ${\cal{E}}$-submartingale).\\
Now we give some conditions for ${\cal{F}}$-expectation ${\cal{E}}$:
\begin{itemize}
\item (H1). For each $X,\ Y$ in $L^2({\cal{F}}_T),$ we have
\begin{center}
${\cal{E}}[X|{\cal{F}}_t]-{\cal{E}}[Y|{\cal{F}}_t]\leq{\cal{E}}^{\phi}[X-Y|{\cal{F}}_t], \ \ \forall t \in[0,T],$
\end{center}
where $\phi(\cdot)$ is the function given in (A1).
\item (H2). (Translation invariance) For each $X$ in $L^2({\cal{F}}_T)$ and $t$ in $[0,T],$ we have,
$${\cal{E}}[X+Y|{\cal{F}}_t]={\cal{E}}[X|{\cal{F}}_t]+Y, \ \ \forall Y\in L^2({\cal{F}}_t)$$
\item (H1)$^*$. For each $X,\ Y$ in $L^2({\cal{F}}_T),$ we have
\begin{center}
${\cal{E}}[X|{\cal{F}}_t]-{\cal{E}}[Y|{\cal{F}}_t]\leq{\cal{E}}^\mu[X-Y|{\cal{F}}_t], \ \ \forall t \in[0,T],$
\end{center}
where $\mu>0$ is a constant.
\end{itemize}
We list some properties of ${\cal{F}}$-expectation ${\cal{E}},$ which are useful in this paper.\\\\
\
\textbf{Lemma 3.1} Let ${\cal{F}}$-expectation ${\cal{E}}$ satisfy (H1). Then for each $X,\ Y, \ X_n$ in $L^2({\cal{F}}_T),\ n\geq1,$ we have, $\forall t\in[0,T]$,
(i) $-{\cal{E}}^{-\phi}[X|{\cal{F}}_t]={\cal{E}}^{\phi}[-X|{\cal{F}}_t];$
(ii) ${\cal{E}}^{-\phi}[X-Y|{\cal{F}}_t]\leq{\cal{E}}[X|{\cal{F}}_t]-{\cal{E}}[Y|{\cal{F}}_t]\leq{\cal{E}}^{\phi}[X-Y|{\cal{F}}_t];$
(iii) ${\cal{E}}^{-\phi}[X|{\cal{F}}_t]\leq{\cal{E}}[X|{\cal{F}}_t]\leq{\cal{E}}^{\phi}[X|{\cal{F}}_t];$
(iv) $|{\cal{E}}[X|{\cal{F}}_t]-{\cal{E}}[Y|{\cal{F}}_t]|\leq{\cal{E}}^{\phi}[|X-Y||{\cal{F}}_t];$
(v) $\lim_{n\rightarrow\infty}E[|{\cal{E}}[X_n|{\cal{F}}_t]-{\cal{E}}[X|{\cal{F}}_t]|^2]=0,$ if $\lim_{n\rightarrow\infty}E[|X_n-X|^2]=0;$
\\\\
\
\emph{Proof.} (i) can be checked immediately. By (i) and (H1), we have
$${\cal{E}}^{-\phi}[X-Y|{\cal{F}}_t]=-{\cal{E}}^{\phi}[Y-X|{\cal{F}}_t]\leq{\cal{E}}[X|{\cal{F}}_t]-{\cal{E}}[Y|{\cal{F}}_t]
\leq{\cal{E}}^{\phi}[X-Y|{\cal{F}}_t],$$
then (ii) holds true. (iii) is a consequence of (ii) and "Constant preservation" of ${\cal{E}}$. By (i), (ii) and "Monotonicity" of ${\cal{E}}$, we have
$$-{\cal{E}}^{\phi}[|X-Y||{\cal{F}}_t]={\cal{E}}^{-\phi}[-|X-Y||{\cal{F}}_t]\leq{\cal{E}}[X|{\cal{F}}_t]-{\cal{E}}[Y|{\cal{F}}_t]\leq{\cal{E}}^{\phi}[|X-Y||{\cal{F}}_t]$$
then (iv) holds true. If $\lim_{n\rightarrow\infty}E[|X_n-X|^2]=0,$ by the "Constant preservation" of ${\cal{E}}^{\phi}$ and Jia [11, Theorem 3.11], we can get
$$\lim_{n\rightarrow\infty}E\left[{\cal{E}}^{\phi}[|X_n-X||{\cal{F}}_t]\right]^2=
\lim_{n\rightarrow\infty}E\left[{\cal{E}}^{\phi}[|X_n-X||{\cal{F}}_t]-{\cal{E}}^{\phi}[0|{\cal{F}}_t]\right]^2=0.$$
Then combining above equality and (iv), we obtain (v). \ \ $\Box$ \\\\
\textbf{Remark 3.1} \begin{itemize}
\item Let ${\cal{F}}$-expectation ${\cal{E}}$ satisfy (H1). By (H1) and (ii) in Lemma 3.1, for each $X$ in $L^2({\cal{F}}_T)$ and $t$ in $[0,T],$ we have,
$$Y={\cal{E}}^{-\phi}[Y|{\cal{F}}_t]\leq{\cal{E}}[X+Y|{\cal{F}}_t]-{\cal{E}}[X|{\cal{F}}_t]\leq{\cal{E}}^{\phi}[Y|{\cal{F}}_t]=Y,\ \ \forall Y\in L^2({\cal{F}}_t).$$
Then we can get that (H1) implies (H2). Consequently, (H1)$^*$ implies (H2).
\item Coquet et al. [5] shows that an ${\cal{F}}$-expectation is a $g$-expectation defined by the solution of a BSDE whose generator $g$ is independent on $y$ and satisfies (A1)$^*$, (A2) and (A3), when ${\cal{E}}$ satisfies (H2), domination condition (1) and a strict monotonicity condition. In fact, By Coquet et al. [5, Lemma 4.3 and Lemma 4.4] and the fact (H1)$^*$ implies (H2), we can easily check that the strict monotonicity condition for ${\cal{E}}$ in Coquet et al. [5] guarantees that (H1)$^*$ is equivalent to (H2) plus domination condition (1).
\end{itemize}
\textbf{Lemma 3.2} Let ${\cal{F}}$-expectation ${\cal{E}}$ satisfy (H1). Then for each $X$ in $L^2({\cal{F}}_T),$ ${\cal{E}}[X|{\cal{F}}_t]$ admits a RCLL version.\\\\
\
\emph{Proof.} Since $\phi(\cdot)$ has a linear growth, by Lepeltier and San Martin [15, Lemme 1], we can find a function $\varphi(z): {\mathbf{R}}_+\mapsto \mathbf{R},$ which satisfies (A1)$^*$ and (A2), such that for each $z\in{\mathbf{R}}^{\mathit{d}}$, $\varphi(|z|)\leq {-\phi(|z|)}.$ By (iii) in Lemma 3.1 and comparison theorem (see Jia [11, Theorem 3.1]), we have $${\cal{E}}[X|{\cal{F}}_t]\geq {\cal{E}}^{-\phi}[X|{\cal{F}}_t]\geq {\cal{E}}^{\varphi}[X|{\cal{F}}_t].$$
Consequently, we can easily check that ${\cal{E}}[X|{\cal{F}}_t]$ is a $\varphi$-supermartingale. By Peng [19, Theorem 3.7], we get that for a denumerable dense subset ${{D}}$ of $[0,T]$, almost all $\omega$ and all $t\in[0,T],$ we have $\lim_{s\in{{D}},\ s\searrow t}{\cal{E}}[X|{\cal{F}}_s]$ and $\lim_{s\in{{D}},\ s\nearrow t}{\cal{E}}[X|{\cal{F}}_s]$ both exist and are finite. For each $t\in[0,T]$, we set
$$Y_t:=\lim_{s\in{{D}},\ s\searrow t}{\cal{E}}[X|{\cal{F}}_s],\eqno(3)$$
then $Y_t$ is RCLL. By (iv) in Lemma 3.1 and "Constant preservation" of ${\cal{E}}$, we have
$$|{\cal{E}}[X|{\cal{F}}_t]|\leq{\cal{E}}^{\phi}[|X||{\cal{F}}_t].\eqno(4)$$
By Jia [11, Theorem 2.3], we also have
$$ E[\sup_{t\in[0,T]}|{\cal{E}}^{\phi}[|X||{\cal{F}}_t]|^2]<+\infty.\eqno(5)$$
By (3)-(5) and Lebesgue dominated convergence theorem, we have
$$\lim_{s\in{{D}},\ s\searrow t}{\cal{E}}[X|{\cal{F}}_s]=Y_t, \ \ \forall t\in[0,T]. \eqno(6)$$
in $L^2({\cal{F}}_T)$ sense.
By the "Constant preservation" of ${\cal{E}}$, we have
$$
{\cal{E}}[{\cal{E}}[X|{\cal{F}}_s]|{\cal{F}}_t]-Y_t= {\cal{E}}[{\cal{E}}[X|{\cal{F}}_s]|{\cal{F}}_t]-{\cal{E}}[Y_t|{\cal{F}}_t].
$$
Then by (6) and (v) in Lemma 3.1, we get
$$\lim_{s\in{{D}},\ s\searrow t}{\cal{E}}[{\cal{E}}[X|{\cal{F}}_s]|{\cal{F}}_t]=Y_t,\ \ \forall t\in[0,T], \eqno(7)$$
in $L^2({\cal{F}}_T)$ sense. On the other hand, by "Consistency" of ${\cal{E}}$, we have
$$\lim_{s\in{{D}},\ s\searrow t}{\cal{E}}[{\cal{E}}[X|{\cal{F}}_s]|{\cal{F}}_t]={\cal{E}}[X|{\cal{F}}_t],\ \ \forall t\in[0,T].\eqno(8)$$
By (7) and (8), we have $P-a.s.,\ {\cal{E}}[X|{\cal{F}}_t]=Y_t.$ The proof is complete.\ \ $\Box$ \\
Note that, in the sequel, we always take the RCLL version of ${\cal{E}}[X|{\cal{F}}_t]$. If for each $t\in[0,T]$, $Y_t={\cal{E}}[X|{\cal{F}}_t]$, then for each stopping time $\sigma\in{\cal{T}}_{0,T},$ we set
${\cal{E}}[X|{\cal{F}}_\sigma]:=Y_\sigma.$ Then we have the following optional stopping theorem, which can be proved by Lemma 3.1 and the same arguments as Peng [19, Theorem 7.4], directly. We omit its proof here.\\\\
\textbf{Lemma 3.3} Let ${\cal{F}}$-expectation ${\cal{E}}$ satisfy (H1). If $Y_t\in {\cal{D}}^2_{{\cal{F}}}(0,T)$ is an ${\cal{E}}$-supermartingale (resp. ${\cal{E}}$-submartingale), then for each stopping time $\sigma,\ \tau\in{\cal{T}}_{0,T},$ we have
$${\cal{E}}[Y_\tau|{\cal{F}}_\sigma]\leq Y_{\sigma\wedge\tau},\ (\textrm{resp.} \geq Y_{\sigma\wedge\tau}),\ \ P-a.s.$$
The following Lemma 3.4 can be considered as a representation theorem for ${\cal{E}}$-martingale.\\\\
\textbf{Lemma 3.4} Let ${\cal{F}}$-expectation ${\cal{E}}$ satisfy (H1) and for each $X\in L^2({\cal{F}}_T),$ set $$y_t^X:={\cal{E}}[X|{\cal{F}}_t],\ \forall t\in[0,T].$$ Then there exists a pair $(g_t^X, Z_t^X)$ in $L^2_{{\cal{F}}}(0,T)\times L^2_{{\cal{F}}}(0,T;{\textbf{R}}^d)$ such that $$|g_t^X|\leq \phi(|Z_t^X|),\eqno(9)$$ and
$$y_t^X=X+\int_t^Tg_s^Xds-\int_t^TZ_s^XdB_s.\eqno(10)$$
Moreover, for $Y$ in $L^2({\cal{F}}_T),$ we have $$|g_t^X-g_t^Y|\leq \phi(|Z_t^X-Z_t^Y|).\eqno(11)$$
\emph{Proof.} By (iii) in Lemma 3.1 and Lemma 3.2, we can easily check that ${\cal{E}}[X|{\cal{F}}_t], t\in[0,T]$ is a right-continuous ${-\phi}$-supermartingale (resp. ${\phi}$-submartingale). Then by Lemma 2.1, we get that there exists $(Z^{X,-\phi}_t,A^{X,-\phi}_t)$ (resp. $(Z^{X,\phi}_t,A^{X,\phi}_t)$) in $L^2_{{\cal{F}}}(0,T;{\textbf{R}}^d)\times D^2_{{\cal{F}}}(0,T)$ with $A^{X,-\phi}_t$ (resp. $A^{X,\phi}_t$) RCLL, increasing and $A^{X,-\phi}_0=0$ (resp. $A^{X,\phi}_0=0$), such that
$$y_t^X=X-\int_t^T\phi(|Z_s^{X,-\phi}|)ds+A^{X,-\phi}_T-A^{X,-\phi}_t-\int_t^TZ_s^{X,-\phi}dB_s,\eqno(12)$$
and
$$y_t^X=X+\int_t^T\phi(|Z_s^{X,\phi}|)ds-A^{X,\phi}_T+A^{X,\phi}_t-\int_t^TZ_s^{X,\phi}dB_s.\eqno(13)$$
Comparing the martingale parts and the bounded variation parts of (12) and (13), we get
\begin{eqnarray*}
Z_s^{X,-\phi}&\equiv& Z_s^{X,\phi},\\
-\phi(|Z_s^{X,-\phi}|)ds+dA^{X,-\phi}_s&\equiv&\phi(|Z_s^{X,\phi}|)ds-dA^{X,\phi}_s.
\end{eqnarray*}
Thus we have
$$2\phi(|Z_s^{X,\phi}|)ds\equiv dA^{X,-\phi}_s+dA^{X,\phi}_s.$$
Consequently, $A^{X,\phi}_s$ and $A^{X,-\phi}_s$ are both absolutely continuous. Thus there exist $a^{X,\phi}_s\geq0$ and $a^{X,-\phi}_s\geq0$ such that
$$dA^{X,\phi}_s=a^{X,\phi}_sds,\ \ dA^{X,-\phi}_s=a^{X,-\phi}_sds.$$
Then we have
$$a^{X,\phi}_s+a^{X,-\phi}_s\equiv 2\phi(|Z_s^{X,\phi}|).$$
By setting
$$Z^X:=Z_s^{X,\phi},\ \ \ g^X:=\phi(|Z_s^{X}|)-a^{X}_sds,$$
we get (9) and (10).
By (10), for $Y$ in $L^2({\cal{F}}_T),$ there exists a pair $(g_t^Y, Z_t^Y)$ in $L^2_{{\cal{F}}}(0,T)\times L^2_{{\cal{F}}}(0,T;{\textbf{R}}^d)$ such that
$$y_t^Y=Y+\int_t^Tg_s^Yds-\int_t^TZ_s^YdB_s.\eqno(14)$$
By (10) and (14), we have
$$y_t^X-y_t^Y=X-Y+\int_t^T(g_s^X-g_s^Y)ds-\int_t^T(Z_s^X-Z_s^Y)dB_s.\eqno(15)$$
By (ii) in Lemma 3.1, we have, for each $s\leq t\leq T,$
$${\cal{E}}^{-\phi}[y_t^X-y_t^Y|{\cal{F}}_s]\leq{\cal{E}}[y_t^X|{\cal{F}}_s]-{\cal{E}}[y_t^Y|{\cal{F}}_s]
=y_s^X-y_s^Y={\cal{E}}[y_t^X|{\cal{F}}_s]-{\cal{E}}[y_t^Y|{\cal{F}}_s]\leq {\cal{E}}^{\phi}[y_t^X-y_t^Y|{\cal{F}}_s].$$
Thus $y_t^X-y_t^Y$ is a ${\phi}$-submaringale and a ${-\phi}$-supermaringale. Then by Lemma 2.1 again, there exist $(Z^1_t,A^1_t)$ (resp. $(Z^2_t,A^2_t)$) in $L^2_{{\cal{F}}}(0,T;{\textbf{R}}^d)\times D^2_{{\cal{F}}}(0,T)$ with $A^1_t$ (resp. $A^2_t$) RCLL, increasing and $A^1_0=0$ (resp. $A^2_0=0$), such that
$$y_t^X-y_t^Y=X-Y+\int_t^T\phi(|Z_s^1|)ds-A_T^1+A_t^1-\int_t^TZ_s^1dB_s.\eqno(16)$$
$$y_t^X-y_t^Y=X-Y-\int_t^T\phi(|Z_s^2|)ds+A_T^2-A_t^2-\int_t^TZ_s^2dB_s.\eqno(17)$$
Comparing the martingale parts and the bounded variation parts of (15) and (16), we get
\begin{eqnarray*}
Z_s^X-Z_s^Y&\equiv& Z_s^1,\\
(g_s^X-g_s^Y)ds&\equiv&\phi(|Z_s^1|)ds-dA_s^1.
\end{eqnarray*}
Then we have
$$g_t^X-g_t^Y\leq \phi(|Z_t^X-Z_t^Y|).\eqno(18)$$
Comparing the martingale parts and the bounded variation parts of (15) and (17), we get
\begin{eqnarray*}
Z_s^X-Z_s^Y&\equiv& Z_s^2,\\
(g_s^X-g_s^Y)ds&\equiv&-\phi(|Z_s^2|)ds+dA_s^2.
\end{eqnarray*}
Then we have
$$g_t^X-g_t^Y\geq -\phi(|Z_t^X-Z_t^Y|).\eqno(19)$$
Thus by (18) and (19), we obtained (11). The proof is completed. \ \ $\Box$\\\\
\
\textbf{Remark 3.2} Let ${\cal{F}}$-expectation ${\cal{E}}$ satisfy (H1), $X\in L^2({\cal{F}}_T)$ and $\eta_t\in L^2_{{\cal{F}}}(0,T).$ By (10), ${\cal{E}}[X|{\cal{F}}_t]$ is a continuous process. By (H2), we can further get ${\cal{E}}[X+\int_t^T\eta_sds|{\cal{F}}_t]$ is continuous, from the fact that $${\cal{E}}[X+\int_t^T\eta_sds|{\cal{F}}_t]={\cal{E}}[X+\int_0^T\eta_sds|{\cal{F}}_t]-\int_0^t\eta_sds.$$
The following Lemma 3.5 describes a property of ${\cal{F}}$-expectation, which plays an important role in this paper.\\\\
\textbf{Lemma 3.5} Let ${\cal{F}}$-expectation ${\cal{E}}$ satisfy (H1). Then for each $z\in \textbf{R}^d$ and each $X\in L^\infty({\cal{F}}_T),$ there exists a process $\eta_t\in {\cal{S}}^\infty_{{\cal{F}}}(0,T)$ such that for each $t\in[0,T],$
$${\cal{E}}[X+zB_T|{\cal{F}}_t]=\eta_t+zB_t,\ \ P-a.s.$$
\emph{Proof.} For $z\in \textbf{R}^d$ and $X\in L^\infty({\cal{F}}_T),$ by Lemma 3.4, there exists a pair $(g_t, Z_t)$ in $L^2_{{\cal{F}}}(0,T)\times L^2_{{\cal{F}}}(0,T;{\textbf{R}}^d)$ such that
$${\cal{E}}[X+zB_T|{\cal{F}}_t]=X+zB_T+\int_t^Tg_sds-\int_t^TZ_sdB_s.$$
Then we have
$${\cal{E}}[X+zB_T|{\cal{F}}_t]-zB_t=X+\int_t^Tg_sds-\int_t^T(Z_s-z)dB_s.\eqno(20)$$
Set $$(\tilde{Y}_t,\tilde{Z}_t):=({\cal{E}}[X+zB_T|{\cal{F}}_t]-zB_t,Z_t-z).\eqno(21)$$
Then by (20) and (21), $(\tilde{Y}_t,\tilde{Z}_t)$ is the unique solution of BSDE with parameter $(g_s,X,T).$
Now, we consider the following two BSDEs:
$$\overline{{Y}}_t=X+\int_t^T(\phi (|\overline{{Z}}_s|)+\phi (|z|))ds-\int_t^T\overline{Z}_sdB_s.\eqno(22)$$
$$\underline{{Y}}_t=X-\int_t^T(\phi (|\underline{{Z}}_s|)+\phi (|z|))ds-\int_t^T\underline{Z}_sdB_s.\eqno(23)$$
By (9), (21) and the fact that $\phi$ is increasing and subadditive, we have
$$|g_t|\leq \phi(|Z_t|)=\phi(|\tilde{Z}_t+z|)\leq \phi(|\tilde{Z}_t|)+\phi(|z|),\eqno(24)$$
Then by (20)-(24) and comparison theorem (see Jia [11, Theorem 3.1]), we can get that for each $t\in[0,T],$
$$\underline{{Y}}_t\leq\tilde{Y}_t\leq\overline{{Y}}_t, \ \ P-a.s.\eqno(25)$$
Since $X\in L^\infty({\cal{F}}_T)$ and $\phi$ has a linear growth, then applying the boundness of solution of quadratic BSDEs (whose generator has a quadratic growth in $z$) with bounded terminal variable (see Kobylanski [14, Theorem
2.3]) to (22) and (23), we get that $\overline{{Y}}_t$ and $\underline{{Y}}_t$ both belong to ${\cal{S}}^\infty_{{\cal{F}}}(0,T).$ Then by (25), we have
$$\|\tilde{Y}_t\|_{L^\infty_{\cal{F}}}\leq\|\underline{{Y}}_t\|_{L^\infty_{\cal{F}}}\vee\|\overline{{Y}}_t\|_{L^\infty_{\cal{F}}}< \infty.$$
From (21) and the above inequality, the proof is complete. \ \ $\Box$\\\\
\
\textbf{Lemma 3.6} Let ${\cal{F}}$-expectation ${\cal{E}}$ satisfy (H1), $\sigma\in{\cal{T}}_{0,T}$ and $X,\ Y\in L^2({\cal{F}}_T).$ Then we have
$$1_A{\cal{E}}[X+Y|{\cal{F}}_\sigma]=1_A{\cal{E}}[1_AX+Y|{\cal{F}}_\sigma],\ \ P-a.s. \ \ \forall A\in {\cal{F}}_{\sigma}.$$
\emph{Proof.} The proof can be completed by Lemma 3.2 and the similar argument as Hu et al. [9, Proposition 4.2(iii)]. We omit it here.\ \ $\Box$\\\\
\
\textbf{Lemma 3.7} Let ${\cal{F}}$-expectation ${\cal{E}}$ satisfy (H1), $\sigma\in{\cal{T}}_{0,T}$ and $X\in L^2({\cal{F}}_T).$ Then we have
$${\cal{E}}[X+Y|{\cal{F}}_\sigma]={\cal{E}}[X|{\cal{F}}_\sigma]+Y,\ \ P-a.s.,\ \ \forall Y\in L^2({\cal{F}}_\sigma).$$
\emph{Proof.} The proof can be completed by (H2), Lemma 3.2 and the similar argument as Hu et al. [9, Proposition 4.2(iv)]. We omit it here.\ \ $\Box$
\section{Doob-Meyer decomposition of ${\cal{E}}$-supermartingale}
In this section, we will study the Doob-Meyer decomposition of ${\cal{E}}$-supermartingale. Firstly, we consider a BSDE under ${\cal{F}}$-expectation ${\cal{E}}.$\\
Given a function $f: \Omega \times [0,T]\times \mathbf{R}\longmapsto \mathbf{R},$ in this paper, we always suppose $f$ satisfy the following Lipschitz condition.
$$\exists \lambda\geq0,\ s.t.\ |f(t,y_1)-f(t,y_2)|\leq \lambda|y_1-y_2|, \ \forall y_1,\ y_2\in{\mathbf{R}},\ \forall t\in[0,T].$$
Now, we consider the following BSDE under ${\cal{F}}$-expectation ${\cal{E}}:$
$$y_t+zB_t={\cal{E}}\left[X+zB_T+\int_t^Tf(s,y_s)ds|{\cal{F}}_t\right],\ \ t\in[0,T]$$
which has been studied in Coquet et al. [5] for the case $z=0$, $X\in L^2({\cal{F}}_T)$ and $f(\cdot,0)\in L^2_{{\cal{F}}}(0,T),$ and in Hu et al. [9] for the case $z\in\textbf{R}^d$, $X\in L^\infty({\cal{F}}_T)$ and $f(\cdot,0)\in L^\infty_{{\cal{F}}}(0,T).$ We denote this BSDE by ${\cal{E}}(f,T,X,z).$ The following Theorem 4.1 shows that it has a unique solution under (H1). \\\\
\textbf{Theorem 4.1} Let ${\cal{F}}$-expectation ${\cal{E}}$ satisfy (H1), $z\in\textbf{R}^d,$ $X\in L^\infty({\cal{F}}_T)$ and $f(\cdot,0)\in L^\infty_{{\cal{F}}}(0,T).$ Then ${\cal{E}}(f,T,X,z)$ has a unique solution $y_t\in {\cal{S}}_{\cal{F}}^\infty (0,T).$ \\\\
\emph{Proof.} For $y_t\in {\cal{S}}^\infty_{{\cal{F}}}(0,T),$ set $$I(y_t):={\cal{E}}\left[X+zB_T+\int_t^Tf(s,y_s)ds|{\cal{F}}_t\right]-zB_t,$$
By (H2), we have
$$I(y_t)={\cal{E}}\left[X+zB_T+\int_0^Tf(s,y_s)ds|{\cal{F}}_t\right]-zB_t-\int_0^tf(s,y_s)ds,\eqno(26)$$
Since $f$ satisfies Lipschitz condition, $y_t\in {\cal{S}}^\infty_{{\cal{F}}}(0,T)$ and $f(\cdot,0)\in L^\infty_{{\cal{F}}}(0,T)$, thus, we have
\begin{eqnarray*}
\left\|\int_0^tf(s,y_s)ds\right\|_{L^\infty_{\cal{F}}}\leq\int_0^T\|f(s,y_s)\|_{L^\infty_{\cal{F}}} ds&\leq&\int_0^T\|f(s,0)\|_{L^\infty_{\cal{F}}}ds+\lambda\int_0^T\|y_s\|_{L^\infty_{\cal{F}}}ds\\
&=&T\|f(s,0)\|_{L^\infty_{\cal{F}}}+\lambda T\|y_s\|_{L^\infty_{\cal{F}}}\\
&<&\infty.
\end{eqnarray*}
With the help of $X\in L^\infty({\cal{F}}_T),$ the above inequality, Remark 3.2 and Lemma 3.5, we can get $I(y_t)\in {\cal{S}}_{\cal{F}}^\infty (0,T) $ from (26), Thus
$$I(\cdot):{\cal{S}}_{\cal{F}}^\infty (0,T)\longmapsto {\cal{S}}_{\cal{F}}^\infty (0,T).$$
By (iv) in Lemma 3.1, "Monotonicity" and "Constant preservation" of ${\cal{E}}^{\phi},$ for each $t\in[0,T]$ and $y_t^1,\ y_t^2\in {\cal{S}}_{\cal{F}}^\infty (0,T),$ we have
\begin{eqnarray*}
|I(y_t^1)-I(y_t^2)|&=&\left|{\cal{E}}\left[X+zB_T+\int_t^Tf(s,y_s^1)ds|{\cal{F}}_t\right]
-{\cal{E}}\left[X+zB_T+\int_t^Tf(s,y_s^2)ds|{\cal{F}}_t\right]\right|\\
&\leq&{\cal{E}}^{\phi}\left[\left|\int_t^Tf(s,y_s^1)ds
-\int_t^Tf(s,y_s^2)ds\right||{\cal{F}}_t\right]\\
&\leq&{\cal{E}}^{\phi}\left[\int_t^T\left|f(s,y_s^1)-f(s,y_s^2)\right|ds|{\cal{F}}_t\right]\\
&\leq&\lambda T\|y_t^1-y_t^2\|_{L^\infty_{\cal{F}}}
\end{eqnarray*}
\textbf{Case 1: } $T\leq\frac{1}{2\lambda}.$
In this case, we have $\|I(y_t^1)-I(y_t^2)\|_{L^\infty_{\cal{F}}}\leq\frac{1}{2}\|y_t^1-y_t^1\|_{L^\infty_{\cal{F}}}.$ Thus $I(\cdot)$ is a strict contraction. The proof is complete.
\textbf{Case 2: } $T>\frac{1}{2\lambda}.$
In this case, we can complete the proof using a "patching-up" method given in Hu et al. [9, Proposition 4.4]. We omit it here.\ \ $\Box$\\\\
\
\textbf{Remark 4.1 } Let ${\cal{F}}$-expectation ${\cal{E}}$ satisfy (H1) and $y_t$ is the solution of ${\cal{E}}(f,T,X,z),$ then by (H2), we can get the process $y_t+zB_t+\int_0^tf(s,y_s)ds$ is an ${\cal{E}}$-martingale.\\\\
\
\textbf{Theorem 4.2} Let ${\cal{F}}$-expectation ${\cal{E}}$ satisfy (H1), $z\in \textbf{R}^d$, $X\in L^\infty({\cal{F}}_T)$, $f(\cdot,0)\in L^\infty_{{\cal{F}}}(0,T),$ $y_t$ is the solution of ${\cal{E}}(f,T,X,z)$ and $\bar{y}_t$ is the solution of the following ${\cal{E}}(f+\eta_t,T,\bar{X},z)$:
$$\bar{y}_t+zB_t={\cal{E}}\left[\bar{X}+zB_T+\int_t^T(f(s,\bar{y}_s)+\eta_s)ds|{\cal{F}}_t\right],\ \ t\in[0,T],\eqno(27)$$
where $\bar{X}$ in $L^\infty({\cal{F}}_T)$ and $\eta_t\in L_{\cal{F}}^\infty (0,T)$ satisfy
$$\bar{X}\geq X,\ \ \ \eta_t\geq0,\ \ dP\times dt-a.e.$$
Then $\forall t\in[0,T],$ we have
$$\bar{y}_t\geq y_t,\ \ P-a.s.\eqno(28)$$
\emph{Proof.} \textbf{Case 1:} $\eta_t\equiv0.$
For constant $n\geq1$, we define the stopping time
$$\tau_1^n:=\inf\left\{t\geq0;\ \bar{y}_t\leq y_t-\frac{1}{n}\right\}\wedge T.$$
Clearly, if (28) is not true, then there exists a integer $k\geq1$ such that $P(\{\tau_1^k<T\})>0.$
By setting $A:=\{\tau_1^k<T\}$ and the continuity of $\bar{y}_t$ and $y_t$, and $\bar{y}_T=\bar{X}\geq X={y}_T,$ we have
$$P(A)>0\ \ \textrm{and} \ \ A=\left\{\bar{y}_{\tau_1^k}\leq y_{\tau_1^k}-\frac{1}{k}\right\}.\eqno(29)$$
Now, we define the stopping time
$$\tau_2:=\inf\{t\geq\tau_1^k;\ \bar{y}_t\geq y_t\}\wedge T.$$
By the continuity of $\bar{y}_t$ and $y_t$, and $\bar{y}_T=\bar{X}\geq X={y}_T,$ we have
$$1_A\bar{y}_{\tau_2}=1_A{y}_{\tau_2}.$$
Clearly, $A\in{\cal{F}}_{\tau_1^k}.$ Then, for each stopping time $\tau\in{\cal{T}}_{\tau_1^k,\tau_2},$ we have
\begin{eqnarray*}
&&1_A{\cal{E}}\left[1_A\bar{y}_{\tau_2}+zB_{\tau_2}+\int_{\tau}^{{\tau_2}}1_Af(s,1_A\bar{y}_s)ds|{\cal{F}}_{\tau}\right]\\
&=& 1_A{\cal{E}}\left[1_A\bar{y}_{\tau_2}+zB_{\tau_2}+1_A\int_{\tau}^{{\tau_2}}f(s,\bar{y}_s)ds|{\cal{F}}_{\tau}\right]\\
&=& 1_A{\cal{E}}\left[\bar{y}_{\tau_2}+zB_{\tau_2}+\int_{\tau}^{{\tau_2}}f(s,\bar{y}_s)ds|{\cal{F}}_{\tau}\right]\\
&=& 1_A{\cal{E}}\left[\bar{y}_{\tau_2}+zB_{\tau_2}+\int_{0}^{{\tau_2}}f(s,\bar{y}_s)ds|{\cal{F}}_{\tau}\right]
-1_A\int_{0}^{{\tau}}f(s,\bar{y}_s)ds\\
&=& 1_A\left(\bar{y}_{\tau}+zB_{\tau}+\int_{0}^{{\tau}}f(s,\bar{y}_s)ds\right)
-1_A\int_{0}^{{\tau}}f(s,\bar{y}_s)ds\\
&=& 1_A(\bar{y}_{\tau}+zB_{\tau})
\end{eqnarray*}
In the above, the second equality is due to Lemma 3.6, the third equality is due to Lemma 3.7, the fourth equality is due to Remark 4.1 and Lemma 3.3.
By the same argument as above, for each stopping time $\tau\in{\cal{T}}_{\tau_1^k,\tau_2},$ we have
$$1_A{\cal{E}}\left[1_Ay_{\tau_2}+zB_{\tau_2}+\int_{\tau}^{\tau_2}1_Af(s,1_Ay_s)ds
|{\cal{F}}_{\tau}\right]= 1_A(y_{\tau}+zB_{\tau}).$$
For each $t\in[0,T]$, we set stopping time $\hat{t}:=(t\vee\tau_1^k)\wedge\tau_2.$ Then by above three equalities, (iv) in Lemma 3.1, "Monotonicity" and "Constant preservation" of ${\cal{E}}^{\phi},$ we can get,
\begin{eqnarray*}
|1_A\bar{y}_{\hat{t}}-1_Ay_{\hat{t}}|&=&|1_A(\bar{y}_{\hat{t}}+zB_{\hat{t}})-1_A(y_{\hat{t}}+zB_{\hat{t}})|\\
&\leq&1_A{\cal{E}}^{\phi}\left[\left|\int_{\hat{t}}^{{\tau_2}}1_Af(s,1_A\bar{y}_s)ds
-\int_{\hat{t}}^{\tau_2}1_Af(s,1_Ay_s)ds\right||{\cal{F}}_{\hat{t}}\right]\\
&\leq&{\cal{E}}^{\phi}\left[\int_{\hat{t}}^{{\tau_2}}\left|f(s,1_A\bar{y}_s)-f(s,1_Ay_s)\right|ds|{\cal{F}}_{\hat{t}}\right]\\
&\leq&{\cal{E}}^{\phi}\left[\left\|\int_{\hat{t}}^T\lambda|1_A\bar{y}_s-1_Ay_s|ds\right\|_{L^\infty}|{\cal{F}}_{\hat{t}}\right]\\
&\leq&\left\|\int_t^T\lambda|1_A\bar{y}_{\hat{s}}-1_Ay_{\hat{s}}|ds\right\|_{L^\infty}\\
&\leq&\lambda\int_t^T\left\|1_A\bar{y}_{\hat{s}}-1_Ay_{\hat{s}}\right\|_{L^\infty} ds.
\end{eqnarray*}
Then we have, for each $t\in[0,T]$,
$$\|1_A\bar{y}_{\hat{t}}-1_Ay_{\hat{t}}\|_{L^\infty} \leq\lambda\int_t^T\left\|1_A\bar{y}_{\hat{s}}-1_Ay_{\hat{s}}\right\|_{L^\infty} ds.$$
By Gronwall inequality, we have for each $t\in[0,T]$, $1_A\bar{y}_{\hat{t}}=1_Ay_{\hat{t}},\ P-a.s.$ By setting $t=0,$ we have
$$1_A\bar{y}_{\tau_1^k}=1_Ay_{\tau_1^k},\ \ P-a.s.$$
which contradicts (29). Thus (28) holds true.
\textbf{Case 2:} $\eta_t\equiv 0$ is not true.
For $n\geq1,$ set $t^n_i:=\frac{i}{n}T,\ 1\leq i\leq n.$ As in Coquet et al. [5] and Hu et al. [9], we define the following BSDEs recursively
$$y_t^{i,n}+zB_t={\cal{E}}\left[\left(X^n_i+\int_{t^n_{i-1}}^{t^n_i}\eta_sds\right)+zB_{t^n_i}+\int_{t}^{t^n_i}f(s,y_s^{i,n})ds|{\cal{F}}_t\right],\ t\in[0,t^n_i],$$
where $X^n_n=\bar{X}$ and $X^n_{i-1}=y_{t^n_{i-1}}^{i,n},$ for $1\leq i\leq n.$ By the result of Case 1, we have $y_t^{n,n}\geq y_t,\ t\in[t^n_{n-1},{t^n_n}].$ Thus $X^n_{n-1}+\int_{t^n_{n-2}}^{t^n_{n-1}}\eta_sds\geq y_{t^n_{n-1}}.$ then by the result of Case 1 again, we have $y_t^{n-1,n}\geq y_t,\ t\in[{t^n_{n-2}},{t^n_{n-1}}].$ Similarly, we also have $y_t^{i,n}\geq y_t,\ t\in[{t^n_{i-1}},{t^n_{i}}],\ 1\leq i\leq n-2.$ We define the process $y_t^n=y_t^{i,n},\ t\in[{t^n_{i-1}},{t^n_i}),\ 1\leq i\leq n,\ y_T^n=\bar{X}.$ Then we can check that
$$y_t^{n}+zB_t={\cal{E}}\left[\bar{X}+\int_{t^n_{i-1}}^{T}\eta_sds+zB_{T}+\int_{t}^{T}f(s,y_s^n)ds|{\cal{F}}_t\right],\ t\in[{t^n_{i-1}},{t^n_i}),\ \ 1\leq i\leq n.\eqno(30)$$
By (27), (30), (iv) in Lemma 3.1, "Monotonicity" and "Constant preservation" of ${\cal{E}}^{\phi},$ for $t\in[{t^n_{i-1}},{t^n_i}),\ 1\leq i\leq n,$ we have
\begin{eqnarray*}
&&|y_t^{n}-\bar{y}_t|\\&=&\left|{\cal{E}}\left[\bar{X}+\int_{t^n_{i-1}}^{T}\eta_sds
+zB_T+\int_{t}^{T}f(s,y_s^n)ds|{\cal{F}}_t\right]-{\cal{E}}\left[\bar{X}+zB_T
+\int_t^T(f(s,\bar{y}_s)+\eta_s)ds|{\cal{F}}_t\right]\right|\\
&\leq&{\cal{E}}^{\phi}\left[\left|\int_{t^n_{i-1}}^{t}\eta_sds
+\int_{t}^{T}(f(s,y_s^n)-f(s,\bar{y}_s))ds\right||{\cal{F}}_t\right]\\
&\leq&{\cal{E}}^{\phi}\left[\int_{t^n_{i-1}}^{t}|\eta_s|ds
+\int_{t}^{T}|f(s,y_s^n)-f(s,\bar{y}_s)|ds|{\cal{F}}_t\right]\\
&\leq&\int_{t^n_{i-1}}^{t}\left\|\eta_s\right\|_{L^\infty} ds
+\int_{t}^{T}\left\|(f(s,y_s^n)-f(s,\bar{y}_s))\right\|_{L^\infty} ds\\
&\leq&\frac{T}{n}\left\|\eta_s\right\|_{L_{\cal{F}}^\infty}+\lambda\int_{t}^T\left\|\bar{y}_s-y_s^n\right\|_{L^\infty} ds.
\end{eqnarray*}
By Gronwall inequality, we get for $t\in[0,T],$ $y_t^{n}\rightarrow\bar{y}_t$ in $L^\infty({\cal{F}}_t)$ sense, as $n\rightarrow\infty.$ Consequently, $\forall t\in[0,T],\ \bar{y}_t\geq y_t,\ P-a.s.$ The proof is complete.\ \ $\Box$\\
Now, we give the following Doob-Meyer decomposition of ${\cal{E}}$-supermartingale.\\\\
\textbf{Theorem 4.3} Let ${\cal{F}}$-expectation ${\cal{E}}$ satisfy (H1), $z\in \textbf{R}^d$, $Y_t\in S^\infty_{{\cal{F}}}(0,T)$ and $Y_t+zB_t$ is an ${\cal{E}}$-supermartingale, then there exists a process $A_t\in S^2_{{\cal{F}}}(0,T)$, which is increasing with $A_0=0$ such that $\forall t\in [0,T],$
\begin{center}
${\cal{E}}[Y_T+zB_T+A_T|{\cal{F}}_t]=Y_t+zB_t+A_t,\ \ P-a.s.$
\end{center}
\emph{Proof.} For $n\geq1,$ we consider the following BSDEs under ${\cal{F}}$-expectation ${\cal{E}}$:
$$y_t^n+zB_t={\cal{E}}\left[Y_T+zB_T+\int_t^Tn(Y_s-y_t^n)ds|{\cal{F}}_t\right],\ \ \ t\in[0,T].\eqno(31)$$
By Theorem 4.1, the above BSDE (31) has a unique solution $y_t^n\in S^\infty_{{\cal{F}}}(0,T).$ Then we have the following Proposition 4.1. \\\\
\textbf{Proposition 4.1} For $n\geq1$ and $t\in[0,T],$ we have $$Y_t\geq y_t^{n+1}\geq y_t^n,\ \ \ P-a.s.$$
\emph{Proof.} With the help of Lemma 3.7, Remark 4.1, Lemma 3.3 and Theorem 4.2, we can obtain this proposition from the argument of Coquet et al. [5, Lemma 6.2] or Hu et al. [9, Lemma 5.3], immediately. \ \ $\Box$\\
Set $$A_t^n:=\int_0^tn(Y_s-y_s^n)ds,\eqno(32)$$
Clearly, $A_t^n$ belongs to $S^\infty_{{\cal{F}}}(0,T)$ and is increasing with $A_0^n=0.$ By (31) and (32), we get that $\forall t\in [0,T],$
$${\cal{E}}[Y_T+zB_T+A_T^n-A_t^n|{\cal{F}}_t]=y_t^n+zB_t.\eqno(33)$$
By (H2), we have $\forall t\in [0,T],$
$${\cal{E}}[Y_T+zB_T+A_T^n|{\cal{F}}_t]=y_t^n+zB_t+A_t^n.$$
Thus $y_t^n+zB_t+A_t^n$ is an ${\cal{E}}$-martingale, by Lemma 3.4, there exists a pair $(g^n_s, Z^n_s)$ in $L^2_{{\cal{F}}}(0,T)\times L^2_{{\cal{F}}}(0,T;{\textbf{R}}^d)$ such that
$$g_s^n-g_s^m\leq\phi(|Z^n_s-Z^m_s|),\ \ \forall m\geq1,\eqno(34)$$
and$$y_t^n+zB_t+A_t^n=Y_T+zB_T+A_T^n+\int_t^Tg^n_sds-\int_t^TZ^n_sdB_s.$$
Then
$$y_t^n=Y_T+A_T^n-A_t^n+\int_t^Tg^n_sds-\int_t^T(Z^n_s-z)dB_s.\eqno(35)$$
Now, we can get\\\\
\textbf{Proposition 4.2} There exists a constant $C$ independent on $n$, such that
$$\textrm{(i)}\ \ E\int_0^T|Z^n_s-z|^2ds\leq C\ \ \textrm{ and }\ \ \textrm{(ii)} \ \ E|A^n_T|^2\leq C.$$
\emph{Proof.} In this proof, $C$ is assumed as a constant independent on $n$, its value may change line by line. By Proposition 4.1, we get that $y_t^1\leq y_t^n\leq y_t^{n+1}\leq Y_t.$ Thus, we have $$\|y^n_t\|_{L_{\cal{F}}^\infty}\leq C,\ \ n\geq1.\eqno(36)$$
By (35), (36), (9) and the fact that $\phi$ is increasing, subadditive and has a linear growth, we have
\begin{eqnarray*}
E|A_T^n|^2&\leq& 3E|y^n_0-y^n_T|^2+3TE\int_0^T|g^n_s|^2ds+3E\int_0^T|Z^n_s-z|^2ds\\
&\leq&C+ 3TE\int_0^T|\phi(|Z_s^n-z|)+\phi(|z|)|^2ds+3E\int_0^T|z^n_s-z|^2ds\\
&\leq&C+ 3TE\int_0^T(4\nu^2|Z_s^n-z|^2+4\nu^2+2(\phi(|z|))^2)ds+3E\int_0^T|Z^n_s-z|^2ds\\
&\leq&C+ 3(4\nu^2T+1)E\int_0^T|Z_s^n-z|^2ds\\
\end{eqnarray*}
Applying It\^{o} formula to $|y^n_t|^2,$ and by (36), (9), the fact that $\phi$ is increasing, subadditive and has a linear growth, and the inequality $2ab\leq \beta a^2+\frac{b^2}{\beta},\ \beta>0,$ we have
\begin{eqnarray*}|y_0^n|^2+E\int_0^T|Z^n_s-z|^2ds&=&E|Y_T|^2+2E\int_0^Ty_s^ng^n_sds
+2E\int_0^Ty_s^ndA_s^n\\
&\leq& C+2E\int_0^T|y_s^n||\phi(|Z_s^n-z|)+\phi(|z|)|ds
+2E\int_0^T|y_s^n|dA_s^n\\
&\leq& C+2E\int_0^T|y_s^n|(\nu|Z_s^n-z|+\nu+\phi(|z|))ds
+C[E|A_T^n|^2]^\frac{1}{2}\\
&\leq& C+\frac{1}{4}E\int_0^T|Z_s^n-z|^2ds+\frac{1}{6(4\nu^2T+1)}E|A_T^n|^2\\
\end{eqnarray*}
By above two inequalities, we can complete the proof. \ \ $\Box$\\\\
By (32), Proposition 4.1 and (ii) in Proposition 4.2, we get that as $n\rightarrow\infty,$
$$y^n_t\nearrow Y_t,\ \ dP\times dt-a.e.\eqno(37)$$
Then by (36) and Lebesgue dominated convergence theorem, we have
$$y^n_t\rightarrow Y_t,\ \ dt-a.e.\eqno(38)$$
in $L^2({\cal{F}}_T)$ sense.
By (9), (i) in Proposition 4.2 and linear growth of $\phi$, there exists a constant $C$ independent $n$ such that
$$E\int_0^T|g^n_s|^2ds\leq C.\eqno(39)$$
With the help of (36)-(39) and Proposition 4.2, we can apply the monotonic limit theorem (see Peng [18, Theorem 2.1] or Peng [19, Theorem 7.2]) to (35), then we have
$$Y_t=Y_T+A_T-A_t+\int_t^Tg_sds-\int_t^T(Z_s-z)dB_s.\eqno(40)$$
where $(Z_s-z)\in L_{\cal{F}}^2(0,T,\textbf{R}^d)$, $g_s\in L_{\cal{F}}^2(0,T)$ are the weak limits of $Z_s^n-z$ and $g_s^n$ in $L_{\cal{F}}^2(0,T,\textbf{R}^d)$ and $L_{\cal{F}}^2(0,T),$ respectively. For $t\in[0,T],\ A_t$ is the weak limit of $A_t^n$ in $L^2({\cal{F}}_T)$ and $A_t\in{\mathcal{D}}^2_{\cal{F}}(0,T)$ is increasing with $A_0=0$. Since $Y_t$ is continuous, then by (40), $A_t$ is a continuous process and by the monotonic limit theorem in Peng [18, 19] again, we further have
$$Z_t^n-z\rightarrow Z_t-z,\eqno(41)$$
in $L_{\cal{F}}^2$ sense, as $n\rightarrow \infty$. Then by (34), (41) and the fact that $\phi$ is continuous with $\phi(0)=0,$ we can deduce that there exists a subsequence we still denote by ${n},$ such that the limit of $g_t^n$ exists, $dP\times dt-a.e.$ Thus by (39), Lebesgue dominated convergence theorem and the fact that $g_s\in L_{\cal{F}}^2(0,T)$ is the weak limit of $g_s^n$ in $L_{\cal{F}}^2(0,T),$ we can get
$$g_t^n\rightarrow g_t,\eqno(42)$$
in $L_{\cal{F}}^2$ sense, as $n\rightarrow \infty$. Thanks to (38), (41) and (42), by (35) and (40), we can deduce that
$$A_T^n-A_t^n\rightarrow A_T-A_t,\ \ dt-a.e.\eqno(43)$$
in $L^2({\cal{F}}_T)$ sense, as $n\rightarrow \infty$. Then by (33), (38), (43) and (v) in Lemma 3.1, we can deduce that
$${\cal{E}}[Y_T+zB_T+A_T-A_t|{\cal{F}}_t]=Y_t+zB_t,\ \ dP\times dt-a.e.$$
By the continuity $Y_t$ and (H2), we have $\forall t\in[0,T],$
$${\cal{E}}[Y_T+zB_T+A_T|{\cal{F}}_t]=Y_t+zB_t+A_t,\ \ P-a.s.$$
The proof is complete. \ \ $\Box$
\section{Representation for ${\cal{F}}$-expectation by $g$-expectation}
The following representation theorem is the main result of this paper.\\\\
\textbf{Theorem 5.1} Let ${\cal{F}}$-expectation ${\cal{E}}$ satisfy (H1), Then there exists a function $g(t,z): \Omega \times [0,T]\times {\mathbf{
R}}^d\longmapsto \mathbf{R}$ satisfying (A1), (A2) and (A3), such that, for each $X\in L^2({\cal{F}}_T)$ and $t\in[0,T],$ we have
\begin{center}
${\cal{E}}[X|{\cal{F}}_t]={\cal{E}}^g[X|{\cal{F}}_t],\ \ P-a.s.$
\end{center}
\emph{Proof.}
For $z\in \textbf{R}^d$, we consider the following SDE:
$$dY_t^z=-\phi(|z|)dt+zdB_t,\ \ Y_0^z=0.$$
Then
$$Y_t^z=Y_T^z+\int_t^T\phi(|z|)ds-\int_t^TzdB_s,\ \ Y_0^z=0.\eqno(44)$$
Clearly, $-\phi(|z|)t+zB_t=Y_t^z$ is a $\phi$-martingale and $-\phi(|z|)t\in S^\infty_{{\cal{F}}}(0,T).$ Then by (iii) in Lemma 3.1, we can check that $-\phi(|z|)t+zB_t$ is an ${\cal{E}}$-supermartingale. From the Theorem 4.3, there exists a process $A_t^z\in S^2_{{\cal{F}}}(0,T)$, which is increasing with $A_0^z=0$ such that
$${\cal{E}}[-\phi(|z|)T+zB_T+A_T^z|{\cal{F}}_t]=-\phi(|z|)t+zB_t+A_t^z,\ \ \ \forall t\in [0,T].\eqno(45)$$
Then by Lemma 3.4, there exists a pair $(g(s,z), Z^z_s)$ such that
$$-\phi(|z|)t+zB_t+A_t^z=-\phi(|z|)T+zB_T+A_T^z+\int_t^Tg(s,z)ds-\int_t^TZ_s^zdB_s.\eqno(46)$$
and
$$|g(s,z)|\leq\phi(|Z_s^z|) \ \ \textrm{and}\ \ |g(s,z)-g(s,\bar{z})|\leq \phi(|Z_s^z-Z_s^{\bar{z}}|),\ \ \textrm{for}\ \ \bar{z}\in \textbf{R}^d.\eqno(47)$$
Comparing the bounded variation parts and martingale parts and of (44) and (46), we get
\begin{eqnarray*}
\phi(|z|)t&\equiv&A_t^z+\int_0^tg(s,z)ds,\\
Z_s^z&\equiv& z.
\end{eqnarray*}
Then combining above equalities and (46), (47), we have
$$-\phi(|z|)t+zB_t+A_t^z=-\phi(|z|)T+zB_T+A_T^z+\int_t^Tg(s,z)ds-\int_t^TzdB_s.\eqno(48)$$
and
$$|g(s,z)|\leq\phi(|z|)\ \ \textrm{ and}\ \ |g(s,z)-g(s,\bar{z})|\leq \phi(|z-\bar{z}|).\eqno(49)$$
Thus $g(t,z)$ satisfies (A1), (A2) and (A3).
By (48), (H2) and (45), we can get for $0\leq r \leq t\leq T$
\begin{eqnarray*}
{\cal{E}}\left[-\int_r^tg(s,z)ds+\int_r^tzdB_s|{\cal{F}}_r\right]
&=&{\cal{E}}\left[-\phi(|z|)t+zB_t+A_t^z-(-\phi(|z|)r+zB_r+A_r^z)|{\cal{F}}_r\right]\\
&=&{\cal{E}}\left[-\phi(|z|)t+zB_t+A_t^z|{\cal{F}}_r\right]-(-\phi(|z|)r+zB_r+A_r^z)\\
&=&0.
\end{eqnarray*}
Thanks to the above equality, (49), the fact that $\phi$ is continuous with $\phi(0)=0$ and has a linear growth and (v) in Lemma 3.1, using the same argument as (7.4) in Coquet et al. [5], we can get for $0\leq r \leq t\leq T$ and $\forall \eta_s\in L^2_{{\cal{F}}}(0,T,\textbf{R}^d),$
$${\cal{E}}\left[-\int_r^tg(s,\eta_s)ds+\int_r^t\eta_sdB_s|{\cal{F}}_r\right]=0.\eqno(50)$$
For $X \in L^2({\cal{F}}_T),$ we consider the BSDE
$$Y_t=X+\int_t^Tg(s,Z_s)ds-\int_t^TZ_sdB_s.\eqno(51)$$
By (49), BSDE (51) has a unique solution $(Y_t,Z_t)$ in ${\mathcal{S}}^2_{\cal{F}}(0,T)\times L^2_{\cal{F}}(0,T;{\mathbf{R}}^d).$ By (49) and Definition 2.1, we have
$${\cal{E}}^g\left[X|{\cal{F}}_t\right]=Y_t.$$
By (H2), (51) and (50), we get
$${\cal{E}}\left[X|{\cal{F}}_t\right]-Y_t={\cal{E}}\left[X-Y_t|{\cal{F}}_t\right]\\
={\cal{E}}\left[-\int_t^Tg(s,Z_s)ds+\int_t^TZ_sdB_s|{\cal{F}}_t\right]=0.$$
From above two equalities, we have
$${\cal{E}}[X|{\cal{F}}_t]={\cal{E}}^g[X|{\cal{F}}_t],\ \ P-a.s.$$
Now, we prove the uniqueness of $g$. Suppose there exists another function $\bar{g}(t,z): \Omega \times [0,T]\times {\mathbf{
R}}^d\longmapsto \mathbf{R}$ satisfying (A1), (A2) and (A3), such that for each $X\in L^2({\cal{F}}_T)$ and $t\in[0,T],$ we have
$${\cal{E}}^g[X|{\cal{F}}_t]={\cal{E}}^{\bar{g}}[X|{\cal{F}}_t],\ \ P-a.s.$$
For each $z\in \textbf{R}^d$, $t\in[0,T]$ and $\varepsilon\in[0,T-t]$, Let $y_s^{t+\varepsilon}$ and $\bar{y}_s^{t+\varepsilon}$ are solutions of BSDEs with parameters $(g,z(B_{t+\varepsilon}-B_t),t+\varepsilon)$ and $(\bar{g},z(B_{t+\varepsilon}-B_t),t+\varepsilon),$ respectively. By (A3), we can check that
$$y_s^{t+\varepsilon}={\cal{E}}^g[z(B_{t+\varepsilon}-B_t)|{\cal{F}}_s]\ \ \textrm{and}\ \ \bar{y}_s^{t+\varepsilon}={\cal{E}}^{\bar{g}}[z(B_{t+\varepsilon}-B_t)|{\cal{F}}_s],\ \ s\in[0,t+\varepsilon].$$
Thus we have
$$y_s^{t+\varepsilon}=\bar{y}_s^{t+\varepsilon},\ \ P-a.s.,\ \ s\in[0,t+\varepsilon].$$
Then by the representation theorem for generator of BSDEs with continuous and linear growth generators (see Jia [11, Theorem 3.4] or Fan and Jiang [7, Theorem 2]) and some simple arguments, we can get that $\forall z\in\textbf{R}^d,$
$$g(t,z)=\bar{g}(t,z),\ \ dP\times dt-a.e.$$
By (A1), we have $dP\times dt-a.e.,$
$$g(t,z)=\bar{g}(t,z),\ \ z\in\textbf{R}^d.$$
The proof is complete. \ \ $\Box$\\
Theorem 4.1 and Theorem 4.2 are existence and uniqueness theorem and comparison theorem of ${\cal{E}}(f,T,X,z),$ respectively, with $X\in L^\infty({\cal{F}}_T)$ and $f(\cdot,0)\in L^\infty_{{\cal{F}}}(0,T)$. From Theorem 5.1, we can get the following general result.\\\\
\
\textbf{Corollary 5.1} Let ${\cal{F}}$-expectation ${\cal{E}}$ satisfy (H1), $z\in\textbf{R}^d,$ $X\in L^2({\cal{F}}_T)$ and $f(\cdot,0)\in L^2_{{\cal{F}}}(0,T).$ Then ${\cal{E}}(f,T,X,z)$ has a unique solution $y_t\in {\cal{S}}_{\cal{F}}^2(0,T).$ Moreover if $\bar{y}_t$ is the solution of the following ${\cal{E}}(f+\eta_t,T,\bar{X},z)$:
$$\bar{y}_t+zB_t={\cal{E}}\left[\bar{X}+zB_T+\int_t^T(f(s,\bar{y}_s)+\eta_s)ds|{\cal{F}}_t\right],\ \ t\in[0,T],$$
where $\bar{X}$ in $L^2({\cal{F}}_T)$ and $\eta_t\in L^2_{{\cal{F}}}(0,T)$ satisfy
$$\bar{X}\geq X,\ \ \ \eta_t\geq0,\ \ dP\times dt-a.e.,$$
then $\forall t\in[0,T],$ we have
$$\bar{y}_t\geq y_t,\ \ \ P-a.s.$$
\emph{Proof.} By the proof of Theorem 5.1, there exist a function $g: \Omega \times [0,T]\times {\mathbf{
R}}^d\longmapsto \mathbf{R}$ satisfying (A1), (A2) and (A3) such that $g$ satisfies (50) and for $\xi$ in $L^2({\cal{F}}_T),$ $${\cal{E}}[\xi|{\cal{F}}_t]={\cal{E}}^g[\xi|{\cal{F}}_t],\ \ P-a.s.\eqno(52)$$
Let $z\in\textbf{R}^d,$ $X\in L^2({\cal{F}}_T)$ and $f(\cdot,0)\in L^2_{{\cal{F}}}(0,T).$ Set $\hat{g}(t,\hat{y},\hat{z}):=f(t,\hat{y}-zB_t)+g(t,\hat{z}).$ Clearly, $\hat{g}(t,\hat{y},\hat{z})$ satisfies (A1) and (A2), Thus, the BSDE
$$Y_t=X+zB_T+\int_t^Tf(s,Y_s-zB_s)ds+\int_t^Tg(s,Z_s)ds-\int_t^TZ_sdB_s.\eqno(53)$$
has a unique solution $(Y_t,Z_t)\in{\mathcal{S}}^2_{\cal{F}}(0,T)\times L^2_{\cal{F}}(0,T;{\mathbf{R}}^d)$.
By (53), (50) and (H2), we can get
$$Y_t={\cal{E}}\left[X+zB_T+\int_t^Tf(s,Y_s-zB_s)ds|{\cal{F}}_t\right].$$
Then by setting $y_t:={Y}_t-zB_t$, we get ${\cal{E}}(f,T,X,z)$ has a solution $y_t\in S^2_{{\cal{F}}}(0,T).$ Moreover, by (52) and the uniqueness of solution of BSDE (53), we also can deduce $y_t$ is a unique solution. In fact, if ${\cal{E}}(f,T,X,z)$ has another solution $\hat{y}_t\in S^2_{{\cal{F}}}(0,T),$ then there exists a process $\hat{Z}_t\in L^2_{\cal{F}}(0,T;{\mathbf{R}}^d)$ such that
\begin{eqnarray*}
\hat{y}_t+zB_t&=&{\cal{E}}\left[X+zB_T+\int_t^Tf(s,\hat{y}_s)ds|{\cal{F}}_t\right]\\
&=&{\cal{E}}\left[X+zB_T+\int_0^Tf(s,\hat{y}_s)ds|{\cal{F}}_t\right]-\int_0^tf(s,\hat{y}_s)ds\\
&=&{\cal{E}}^g\left[X+zB_T+\int_0^Tf(s,\hat{y}_s)ds|{\cal{F}}_t\right]-\int_0^tf(s,\hat{y}_s)ds\\
&=&X+zB_T+\int_t^Tf(s,\hat{y}_s)ds+\int_t^Tg(s,\hat{Z}_s)ds-\int_t^T\hat{Z}_sdB_s.
\end{eqnarray*}
Thus, by the uniqueness of solution of BSDE (53), we can get $dP\times dt-a.e.,\ y_t=\hat{y}_t.$
Set $\bar{g}(t,\bar{y},\bar{z}):=f(t,\bar{y}-zB_t)+\eta_t+g(t,\bar{z}).$ Clearly, $\bar{g}(t,\bar{y},\bar{z})$ also satisfies (A1) and (A2). Thus, the BSDE
$$\bar{Y}_t=\bar{X}+zB_T+\int_t^T(f(s,\bar{Y}_s-zB_s)+\eta_s)ds+\int_t^Tg(s,\bar{Z}_s)ds-\int_t^T\bar{Z}_sdB_s.\eqno(54)$$
has a unique solution $(\bar{Y}_t,\bar{Z}_t)\in{\mathcal{S}}^2_{\cal{F}}(0,T)\times L^2_{\cal{F}}(0,T;{\mathbf{R}}^d)$. By the argument as above, we get $\bar{y}_t:={\bar{Y}}_t-zB_t$ is the unique solution of ${\cal{E}}(f+\eta_t,T,\bar{X},z).$ Then by (53), (54) and comparison theorem for BSDEs under (A1) and (A2) (see Jia [11, Theorem 3.1]), we can get $\forall t\in[0,T],$ $\bar{Y}_t\geq Y_t, \ P-a.s.$ Thus $\forall t\in[0,T],$ $\bar{y}_t\geq y_t, \ P-a.s.$ The proof is complete. \ \ $\Box$
In fact, if ${\cal{F}}$-expectation ${\cal{E}}$ satisfy (H1), by Theorem 5.1, Lemma 2.1 and similar argument as Corollary 5.1, we also can get a Doob-Meyer decomposition for ${\cal{E}}$-supermartingale $y_t+zB_t$ with $y_t\in S^2_{{\cal{F}}}(0,T).$ We leave it to the interested readers.
\end{document}
|
\begin{document}
\rule{0cm}{1cm}
\begin{center}
{\Large\bf The matching energy of graphs with given edge connectivity\footnote{The first author is supported by NNSFC (Nos. 11326216 and 11301306);
the second author is supported by NNSFC (Nos. 11101351 and 11171288) and
NSF of the Jiangsu Higher Education Institutions (No. 11KJB110014)
.}
}
\end{center}
\vskip 2mm \centerline{ Shengjin Ji$^{1}$, Hongping Ma$^{2}$
\footnote{Corresponding author.\\
E-mail addresses: [email protected](S.Ji), [email protected](H.Ma)}}
\begin{center}
$^{1}$ School of Science, Shangdong University of Technology,
\\ Zibo, Shandong 255049, China\\
$^{2}$ School of Mathematics and Statistics, Jiangsu Normal
University,\\
Xuzhou, Jiangsu 221116, China
\end{center}
\begin{center}
{\bf Abstract}
\end{center}
{\small Let G be a simple graph of order $n$ and
$\mu_1,\mu_2,\ldots,\mu_n$ the roots of its matching polynomial. The
matching energy of $G$ is defined as the sum $\sum_{i=1}^n|\mu_i|$.
Let $K_{n-1,1}^k$ be the graph obtained from $K_1\cup K_{n-1}$ by
adding $k$ edges between $V(K_1)$ and $V(K_{n-1})$. In this paper,
we show that $K_{n-1,1}^k$ has maximum matching energy among all
connected graph with order $n$ and edge connectivity $k$.}
\vskip 3mm
\noindent {Keywords: Matching energy, Edge connectivity, Graph
energy, Matching} \vskip 3mm
\vskip 3mm \noindent {\bf AMS Classification:} 05C50, 05C35
\section{Introduction}
We use Bondy and Murty \cite{graphBondy2008} for terminology and
notations not defined in this paper and consider undirected and
simple graphs only. Let $G=(V, E)$ be such a graph with order $n$.
Denote by $m(G,t)$ the number of $t$-matchings of $G$. Clearly,
$m(G,1)=e(G)$, the size of $G$, and $m(G,t)=0$ for $t > \lfloor n/2\rfloor$. It is both
consistent and convenient to define $m(G,0)=1$.
Recall that the \emph{matching polynomial} of a graph $G$ is defined as
$$\alpha(G)=\alpha(G,\lambda)=\sum\limits_{t\geq 0}(-1)^t m(G,t)\lambda^{2t}$$
and its theory is well elaborated \cite{CDGT,Farrell1979,gutmanmatch1979}.
The eigenvalues $\lambda_{1}, \lambda_{2},\ldots, \lambda_{n}$ of the adjacency matrix $A(G)$ of $G$ are said to be the eigenvalues of the graph $G$.
The $energy$ of $G$ is defined as
\begin{equation}
E(G)=\sum_{i=1}^{n}|\lambda_{i}|.
\end{equation}
The theory of graph energy is well developed nowadays, for details see \cite{gutman2001,GLZ,li&shi&gutman2012}.
The Coulson integral formula \cite{gutman&polansky1986} plays an important role in the study on graph energy, its version for an acyclic graph $T$ is as follows:
\begin{equation}\label{cformula}
E(T)=\frac{2}{\pi}\int_0^{+\infty}\frac{1}{x^2}\ln\Big[\sum\limits_{t\geq
0} m(T,t)x^{2t}\Big]dx.
\end{equation}
Motivated by formula \eqref{cformula}, Gutman and Wagner \cite{gutman&Wagner2012} defined the \emph{matching
energy} of a graph $G$ as
\begin{equation}\label{matchenergyformula}
ME=ME(G)=\frac{2}{\pi}\int^{+\infty}_{0}\frac{1}{x^2}\ln\Big[
\sum\limits_{t\geq 0}m(G,t) x^{2t}\Big]dx. \qquad\qquad
\end{equation}
Energy and matching energy of graphs are closely related, and they are two quantities of relevance for chemical applications,
for details see \cite{gutmanmt1975,aihara1976,gutmanmt1977}.
The following result gives an equivalent definition of matching
energy.
\begin{defi} \textnormal{\cite{gutman&Wagner2012}}
Let $G$ be a graph of order $n$, and let $\mu_1,\mu_2$, $\cdots$, $\mu_n$ be the roots of its matching polynomial. Then
\begin{equation}
ME(G)=\sum\limits_{i=1}^n|\mu_i|.
\end{equation}
\end{defi}
The formula \eqref{matchenergyformula}
induces a \emph{quasi-order} relation over the set of all graphs on $n$
vertices: if $G_1$ and $G_2$ are two graphs of order $n$, then
\begin{equation}\label{quasi-order} G_1\preceq G_2 \Leftrightarrow m(G_1,t)\leq m(G_2,t) \mbox{ for
all } t=0, 1, \ldots, \lfloor \frac{n}{2}\rfloor.\end{equation}
If $G_1\preceq G_2$ and there
exists some $i$ such that $m(G_1,i) < m(G_2, i )$, then we write
$G_1\prec G_2$. Clearly, $$ G_1\prec G_2\Rightarrow
ME(G_1)<ME(G_2).$$
Recall that the \emph{Hosoya index} of a graph $G$ is defined as
$Z(G)=\sum\limits_{t\geq 0} m(G,t)$ \cite{Hosoya}. So we also have
that
$$ G_1\prec G_2\Rightarrow Z(G_1)<Z(G_2).$$
The following result gives two fundamental identities for the number of $t$-matchings of a graph \cite{Farrell1979,gutmanmatch1979}.
\begin{lem}\label{twomindentity}
Let $G$ be a graph, $e=uv$ an edge of $G$, and
$N(u)=\{v_1(=v),v_2,\ldots,v_j \}$ the set of all neighbors of $u$
in $G$. Then we have
\begin{equation}\label{removeedge}
m(G,t)=m(G-uv,t)+m(G-u-v,t-1),
\end{equation}
\begin{equation}\label{removevertex}
m(G,t)=m(G-u,t)+\sum_{i=1}^{j}m(G-u-v_i,t-1).
\end{equation}
\end{lem}
From Lemma \ref{twomindentity}, it is easy to get the following
result.
\begin{lem}\textnormal{\cite{gutman&Wagner2012}}\label{subgraph}
Let $G$ be a graph and $e$ one of its edges. Let $G-e$ be the
subgraph obtained from G by deleting the edge $e$. Then $ G-e\prec
G$ and $ME(G-e)<ME(G)$.
\end{lem}
By Lemma \ref{subgraph}, among all graphs on $n$ vertices, the empty graph $E_n$ without edges and the complete graph $K_n$ have, respectively minimum and maximum matching energy \cite{gutman&Wagner2012}.
It follows from Eqs. \eqref{cformula} and \eqref{matchenergyformula} that $ME(T)=E(T)$ for any tree $T$ \cite{gutman&Wagner2012}. By using the quasi-order relation, it has also been obtained
some results on extremal graphs with respect to matching
energy among some classes of connected graphs with $n$ vertices. For example, the extremal graphs in connected unicyclic,
bicyclic graphs were determined by \cite{gutman&Wagner2012} and
\cite{ji&li&shi2013}, respectively; the minimal graphs among connected
$k$-cyclic ($k\leq n-4$) graphs and bipartite graphs were
characterized by \cite{ji&ma2014}; the maximal connected graph with
given connectivity (resp. chromatic number) was determined by
\cite{li&yan2013}.
Let $\mathcal {G}_{n,k}$ be the set of connected graphs of order $n $ $(\geq 2)$ with edge connectivity $k$ ($1\leq k\leq n-1$).
Let $K_{n-1,1}^k$ be the graph, as shown in Fig. 1, obtained from $K_1\cup K_{n-1}$ by adding $k$ edges between $V(K_1)$ and $V(K_{n-1})$.
In this paper, we show that $K_{n-1,1}^k$ is the unique graph with maximum matching energy (resp. Hosoya index) in $\mathcal {G}_{n,k}$.
\setlength{\unitlength}{1mm}
\begin{picture}(40,20)
\put(30,20){\circle{20}}\put(50,20){\circle{20}}
\put(32,22){\circle*{1.0}}\put(32,19){\circle*{1.0}}
\put(32,15){\circle*{1.0}}
\put(48,22){\circle*{1.0}}\put(48,19){\circle*{1.0}}
\put(48,15){\circle*{1.0}}
\put(32,22){\line(1,0){16}}
\put(32,15){\line(1,0){16}}\put(32,19){\line(1,0){16}}
\multiput(40,15.5)(0,1.5){3}{\circle*{0.7}}
\put(27,20){\makebox(0,0){$K_{n-m}$
}}\put(53,20){\makebox(0,0){$K_m$ }} \put(40,20){\makebox(0,0){$k$
}}
\put(105,20){\circle{20}}
\put(87,20){\circle*{1.5}}
\put(103,20){\circle*{1.0}}\put(103,24){\circle*{1.0}}
\put(103,15){\circle*{1.0}}
\put(87,20){\line(1,0){16}}\put(87,20){\line(4,1){16}}\put(87,20){\line(3,-1){16}}
\multiput(100,16.5)(0,1.3){3}{\circle*{0.6}}
\put(82,20){\makebox(0,0){$K_1$
}}\put(109,20){\makebox(0,0){$K_{n-1}$}}
\put(100,20){\makebox(0,0){$k$ }}
\put(40,9){\makebox(0,0){$K^k_{n-m,m}$ $(k\leq m\leq \lfloor
\frac{n}{2}\rfloor)$}} \put(98,9){\makebox(0,0){$K^k_{n-1,1}$ }}
\put(65,0){\makebox(0,0){Fig. 1 \ Graphs $K^k_{n-m,m}$ and
$K^k_{n-1,1}$. }}
\end{picture}
\section{Main results}
First we recall some notations. By $\kappa'(G)$ and $\delta(G)$, we denote
the edge connectivity and the minimum degree of a graph $G$, respectively.
Let $S$ be a nonempty proper subset of $V$. We use $G[S]$ to denote the subgraph of $G$ induced by $S$. An \emph{edge cut} of $G$, denoted by $\partial(S)$, is a subset of $E(G)$ of the
form $[S,\bar{S}]$, where $\bar{S}=V\backslash S$. An edge cut $\partial(v)$ ($v\in V$) is
called a \emph{trivial} edge cut. A \emph{$k$-edge cut} is an edge
cut of $k$ elements.
Let $G\in \mathcal {G}_{n,k}$. Then $G$ must have a $k$-edge cut $\partial(S)$ with $1\leq |S| \leq \lfloor
\frac{n}{2}\rfloor$.
\begin{lem}\label{trivial-edge-cut}
Let $G\ncong K_{n-1,1}^k$ be a graph in $\mathcal {G}_{n,k}$ with a trivial $k$-edge cut.
Then $G\prec K_{n-1,1}^k$.
\end{lem}
\noindent {\it Proof.} Let $\partial(S)$ be a trivial $k$-edge cut of $G$ with $|S|=1$. Since $G\ncong
K_{n-1,1}^k$, $G[\bar{S}]$ is a proper subgraph of $K_{n-1}$. Hence $G$ is a proper subgraph of
$K_{n-1,1}^k$, and so the result follows from Lemma
\ref{subgraph}.
\qed
\begin{lem}\label{m&k}
Let $G \in \mathcal {G}_{n,k}$ be a graph without trivial $k$-edge cuts. Then for any $k$-edge cut $\partial(S)$ of $G$ with $2\leq |S| \leq \lfloor \frac{n}{2}\rfloor$, we have $|S|\geq k$.
\end{lem}
\noindent {\it Proof.} For $k\leq 2$, the assertion is trivial, so suppose $k\geq 3$.
Assume, to the contrary, that $G$ has a $k$-edge cut $\partial(S)$ with $2\leq |S|\leq
k-1$. By the facts that $\delta(G)\geq \kappa'(G)=k$ and $G$ has no trivial $k$-edge
cuts, we have $\delta(G)\geq k+1$, and thus $\sum_{v\in
S}d_G(v)\geq |S|(k+1)$. On the other hand, $\sum_{v\in
S}d_G(v)=2e(G[S])+k\leq |S|(|S|-1)+k$. Therefore, we have
$|S|(k+1) \leq |S|(|S|-1)+k$, that is, $(|S|-1)(k-|S|)+|S|\leq 0$, which is a contradiction. Therefore the result holds.
\qed
For $k\leq m\leq \lfloor \frac{n}{2}\rfloor$, let $K_{n-m,m}^k$ be
the graph, as shown in Fig. 1, obtained from $K_{n-m}\cup K_m$ by adding $k$ independent edges between $V(K_{n-m})$ and $V(K_m)$.
It is easy to see that $\kappa'(K_{n-m,m}^k)=k$ and $\kappa'(K_{n-1,1}^k)=k$.
We next show that for a graph $G \in \mathcal {G}_{n,k}$ without trivial $k$-edge cuts, $G\preceq K_{n-m,m}^k$ for some $m$.
Before this, we introduce a new graph operation as follows.
let $G_1$ be a graph in $\mathcal {G}_{n,k}$ such that $G_1$ has a $k$-edge
cut $\partial(S)$ with $G[S]=K_m$, $G[\bar{S}]=K_{n-m}$, and $k\leq m\leq \lfloor \frac{n}{2}\rfloor$. Suppose that $u_1, u_2\in \bar{S}$, $v_1, v_2\in
S$, $e_1=u_1v_1$, $e_2=u_1v_2$ are two edges of $\partial(S)$, and $u_2$ is not incident with any edge in $\partial(S)$.
If $G_2$ is obtained from $G_1$ by delating the edge $e_2$ and adding a new edge $e'_2=u_2v_2$, we say that $G_2$ is obtained from $G_1$ by \emph{Operation
I}, as shown in Fig. 2. Clearly, $G_2 \in \mathcal {G}_{n,k}$.
\setlength{\unitlength}{1mm}
\begin{picture}(40,20)
\put(30,20){\circle{20}}\put(50,20){\circle{20}}
\put(32,22){\circle*{1.0}}\put(32,19){\circle*{1.0}}\put(32,25){\circle*{1.0}}
\put(32,15){\circle*{1.0}}
\put(48,22){\circle*{1.0}}\put(48,19){\circle*{1.0}}\put(48,25){\circle*{1.0}}
\put(48,15){\circle*{1.0}}
\put(32,25){\line(1,0){16}}\put(32,25){\line(5,-1){16}}
\put(32,15){\line(1,0){16}}\put(32,19){\line(1,0){16}}
\multiput(40,15.5)(0,1.5){3}{\circle*{0.7}}
\put(40,23){\makebox(0,0){$e_2$}}\put(40,26){\makebox(0,0){$e_1$}}
\put(30,25){\makebox(0,0){$u_1$}}\put(30,22){\makebox(0,0){$u_2$}}
\put(51,25){\makebox(0,0){$v_1$}}\put(51,22){\makebox(0,0){$v_2$}}
\put(27,20){\makebox(0,0){$K_{n-m}$ }}\put(53,19){\makebox(0,0){$K_m$ }}
\put(85,20){\circle{20}}\put(105,20){\circle{20}}
\put(87,22){\circle*{1.0}}\put(87,19){\circle*{1.0}}\put(87,25){\circle*{1.0}}
\put(87,15){\circle*{1.0}}
\put(103,22){\circle*{1.0}}\put(103,19){\circle*{1.0}}\put(103,25){\circle*{1.0}}
\put(103,15){\circle*{1.0}}
\put(87,25){\line(1,0){16}}\put(87,22){\line(1,0){16}}
\put(87,15){\line(1,0){16}}\put(87,19){\line(1,0){16}}
\multiput(95,15.5)(0,1.5){3}{\circle*{0.7}}
\put(93,23){\makebox(0,0){$e'_2$}}\put(93,26){\makebox(0,0){$e_1$}}
\put(83,25){\makebox(0,0){$u_1$}}\put(83,22){\makebox(0,0){$u_2$}}
\put(105,25){\makebox(0,0){$v_1$}}\put(105,22){\makebox(0,0){$v_2$}}
\put(82,20){\makebox(0,0){$K_{n-m}$ }}\put(108,19){\makebox(0,0){$K_m$ }}
\put(65,20){\makebox(0,0){$\longrightarrow$ }}
\put(68,23){\makebox(0,0){\emph{Operation I} }}
\put(45,10){\makebox(0,0){$G_1$ }}
\put(97,10){\makebox(0,0){$G_2$ }}
\put(60,5){\makebox(0,0){Fig. 2 The graphs $G_1$ and $G_2$ of
$\mathcal {G}_{n,k}$ in Operation I. }}
\end{picture}
\begin{lem}\label{operation}
If $G_2$ is obtained from $G_1$ by Operation I, then
$G_1\prec G_2$.
\end{lem}
\noindent {\it Proof.} By formula \eqref{removeedge}, we have
\begin{equation*}
m(G_1,t)=m(G_1-e_2,t)+m(G_1-u_1-v_2,t-1),
\end{equation*}and
\begin{equation*}
m(G_2,t)=m(G_2-e'_2,t)+m(G_2-u_2-v_2,t-1).
\end{equation*}
Note that $G_1-e_2\cong G_2-e'_2$, and $G_1-u_1-v_2$ is isomorphic
to a proper subgraph of $G_2-u_2-v_2$. So, $m(G_1-u_1-v_2,t-1)\leq
m(G_2-u_2-v_2,t-1)$ for all $t$ and
$m(G_1-u_1-v_2,1)<m(G_2-u_2-v_2,1)$. The result thus follows. \qed
\begin{lem}\label{without trivial-edge-cut}
Let $G \in \mathcal {G}_{n,k}$ be a graph without trivial $k$-edge cuts.
Then $G\preceq K_{n-m,m}^k$ for some $m$ with $\max\{k, 2\} \leq m\leq \lfloor \frac{n}{2}\rfloor$.
\end{lem}
\noindent {\it Proof.} Let $\partial(S)$ be a $k$-edge cut of $G$ with $2\leq |S| \leq \lfloor \frac{n}{2}\rfloor$. Let $|S|=m$. Then $m\geq k$ by Lemma \ref{m&k}.
Let $G_1$ be the graph obtained from $G$, by adding edges if necessary, such that $G[S]$ and $G[\bar{S}]$
are complete graphs. Therefore $G\preceq G_1$ by Lemma \ref{subgraph}. If $G_1 \ncong K_{n-m,m}^k$, then by using
Operation I repeatedly, we can finally get $K_{n-m,m}^k$ from
$G_1$. Hence $G_1 \preceq K_{n-m,m}^k$ by Lemma \ref{operation}.
The proof is thus complete.
\qed
In the following, we show that $K_{n-m,m}^k\prec K_{n-1,1}^k$ for $m\geq 2$.
\begin{lem}\label{compare edge}
Suppose $\max\{k, 2\} \leq m\leq \lfloor \frac{n}{2}\rfloor$. Then
$e(K^k_{n-m,m})<e(K^k_{n-1,1}).$
\end{lem}
\noindent {\it Proof.} Note that
\begin{equation*}
\begin{split}
e(K^k_{n-m,m})&=\frac{m(m-1)}{2}+\frac{(n-m)(n-m-1)}{2}+k,
\end{split}
\end{equation*}
and
\begin{equation*} e(K^k_{n-1,1})=\frac{(n-1)(n-2)}{2}+k.
\end{equation*}
Hence we have
\begin{equation*}
\begin{split}
e(K^k_{n-1,1})-e(K^k_{n-m,m})
&=\frac{n^2-3n+2}{2}-\frac{n^2+2m^2-2mn-n}{2}\\
&=(m-1)(n-m-1)>0.
\end{split}
\end{equation*}
The proof is thus complete. \qed
\begin{lem}\label{n=2m&n=2m+1}
Let $m \geq 1$ be a positive integer. Then we have
\begin{equation}\label{n=2m}
m(K^1_{m,m},t)\leq m(K^1_{2m-1,1},t) \mbox{ for all } t=0, 1,
\ldots, m,
\end{equation}
and
\begin{equation}\label{n=2m+1} m(K^1_{m+1,m},t)\leq
m(K^1_{2m,1},t) \mbox{ for all } t=0, 1, \ldots, m.
\end{equation}
\end{lem}
\noindent {\it Proof.} We apply induction on $m$. For $m=1$ and $m=2$, the assertions are trivial
since $K^1_{2,2}$ and $K^1_{3,2}$ are proper subgraphs of $K^1_{3,1}$ and $K^1_{4,1}$, respectively.
So suppose that $m\geq 3$ and Ineqs. \eqref{n=2m} and \eqref{n=2m+1} hold for smaller values of $m$.
By Lemma \ref{twomindentity}, we obtain that
\begin{equation*}
\begin{split}
m(K^1_{m,m},t)&=m(K^1_{m,m-1},t)+(m-2)m(K^1_{m,m-2},t-1)+m(K_m\cup K_{m-2},t-1)\\
&=m(K^1_{m,m-1},t)+(m-1)m(K^1_{m,m-2},t-1)-m(K_{m-1}\cup K_{m-3},t-2)\\
&=m(K^1_{m,m-1},t)-m(K_{m-1}\cup K_{m-3},t-2)+(m-1)[m(K^1_{m-1,m-2},t-1)\\
&\quad+(m-1)m(K^1_{m-2,m-2},t-2)-m(K_{m-3}\cup K_{m-3},t-3)]\\
&\leq m(K^1_{m,m-1},t)+(m-1)m(K^1_{m-1,m-2},t-1)+(m-1)^{2}m(K^1_{m-2,m-2},t-2)\\
&\quad-m(K_{m-1}\cup K_{m-3},t-2),
\end{split}
\end{equation*}and
\begin{equation*}
\begin{split}
m(K^1_{2m-1,1},t)&=m(K^1_{2m-2,1},t)+(2m-3)m(K^1_{2m-3,1},t-1)+m(K_{2m-3},t-1)\\
&=m(K^1_{2m-2,1},t)+m(K_{2m-3},t-1)+(2m-3)[m(K^1_{2m-4,1},t-1)\\
&\quad+(2m-5)m(K^1_{2m-5,1},t-2)+m(K_{2m-5},t-2)]\\
&\geq m(K^1_{2m-2,1},t)+(2m-3)m(K^1_{2m-4,1},t-1)\\
&\quad+(2m-3)(2m-5)m(K^1_{2m-5,1},t-2).
\end{split}
\end{equation*}
By the induction hypothesis, we obtain that
\begin{equation*}
\begin{split}
m(K^1_{m,m-1},t)&\leq m(K^1_{2m-2,1},t),\\
m(K^1_{m-1,m-2},t-1)&\leq m(K^1_{2m-4,1},t-1),\\
m(K^1_{m-2,m-2},t-2)&\leq m(K^1_{2m-5,1},t-2).\\
\end{split}
\end{equation*}
Since $m\geq 3$, we have that $m-1\leq 2m-3$ and $(m-1)^2\leq(2m-3)(2m-5)$ when
$n\geq4$. Notice that for $m=3$, $K^1_{m-2,m-2}=K_{m-1}\cup
K_{m-3}$, and $(m-1)^2-1=(2m-3)(2m-5)$.
Hence Ineq. (\ref{n=2m}) holds.
By Lemma \ref{twomindentity}, we get that
\begin{equation*}
\begin{split}
m(K^1_{m+1,m},t)&=m(K^1_{m,m},t)+(m-1)m(K^1_{m-1,m},t-1)+m(K_{m-1}\cup K_{m},t-1)\\
&\leq m(K^1_{m,m},t)+m \cdot m(K^1_{m-1,m},t-1)\\
&=m(K^1_{m,m},t)+m \cdot [m(K^1_{m-1,m-1},t-1)\\
&\quad+(m-2)m(K^1_{m-1,m-2},t-2)+m(K_{m-1}\cup K_{m-2},t-2)]\\
&\leq m(K^1_{m,m},t)+m\cdot [m(K^1_{m-1,m-1},t-1)\\
&\quad+(m-1)m(K^1_{m-1,m-2},t-2)]\\
&=m(K^1_{m,m},t)+m \cdot m(K^1_{m-1,m-1},t-1)
+m(m-1)m(K^1_{m-1,m-2},t-2),
\end{split}
\end{equation*}and
\begin{equation*}
\begin{split}
m(K^1_{2m,1},t)&=m(K^1_{2m-1,1},t)+(2m-2)m(K^1_{2m-2,1},t-1)+m(K_{2m-2},t-1)\\
&=m(K^1_{2m-1,1},t)+m(K_{2m-2},t-1)+(2m-2)[m(K^1_{2m-3,1},t-1)\\
&\quad+(2m-4)m(K^1_{2m-4,1},t-2)+m(K_{2m-4},t-2)]\\
&\geq m(K^1_{2m-1,1},t)+(2m-2)m(K^1_{2m-3,1},t-1)\\
&\quad+(2m-2)(2m-4)m(K^1_{2m-4,1},t-2).
\end{split}
\end{equation*}
By the induction hypothesis and Ineq. (\ref{n=2m}), we have that
\begin{equation*}
\begin{split}
m(K^1_{m,m},t)&\leq m(K^1_{2m-1,1},t)\\
m(K^1_{m-1,m-1},t-1)&\leq m(K^1_{2m-3,1},t-1)\\
m(K^1_{m-1,m-2},t-2)&\leq m(K^1_{2m-4,1},t-2).
\end{split}
\end{equation*}
Notice that $m\leq 2m-2$ and $m(m-1)\leq (2m-2)(2m-4)$. Therefore Ineq.(\ref{n=2m+1}) holds.
The proof is thus complete. \qed
\begin{lem}\label{k=1}
Suppose $2\leq m\leq \lfloor \frac{n}{2}\rfloor$. Then
$$m(K^1_{n-m,m},t)\leq m(K^1_{n-1,1},t) ~\mbox{for all}~ t=0, 1, \ldots, \lfloor \frac{n}{2}\rfloor.$$
\end{lem}
\noindent {\it Proof.} We apply induction on $n$. As the two cases $n=2m$ and $n=2m+1$
were proved by Lemma \ref{n=2m&n=2m+1}, we proceed to the induction
step. By Lemma \ref{twomindentity} and the induction hypothesis, we
have that
\begin{equation*}\label{n-m}
\begin{split}
m(K^1_{n-m,m},t)&=m(K^1_{n-m,m-1},t)+(m-2)m(K^1_{n-m,m-2},t-1)+m(K_{n-m}\cup K_{m-2},t-1)\\
&\leq m(K^1_{n-m,m-1},t)+(m-1)m(K^1_{n-m,m-2},t-1)\\
&\leq m(K^1_{n-2,1},t)+(m-1)m(K^1_{n-3,1},t-1)\\
&=m(K_{n-2},t)+m(K_{n-3},t-1)\\
&\quad +(m-1)(m(K_{n-3},t-1)+m(K_{n-4},t-2))\\
&=m(K_{n-2},t)+m\cdot m(K_{n-3},t-1)+(m-1)m(K_{n-4},t-2),
\end{split}
\end{equation*}and
\begin{equation*}
\begin{split}
m(K^1_{n-1,1},t)&=m(K_{n-1},t)+m(K_{n-2},t-1)\\
&=m(K_{n-2},t)+(n-2)m(K_{n-3},t-1)\\
&\quad+m(K_{n-3},t-1)+(n-3)m(K_{n-4},t-2)\\
&=m(K_{n-2},t)+(n-1)m(K_{n-3},t-1)+(n-3)m(K_{n-4},t-2).
\end{split}
\end{equation*}
Thus the result follows by the fact $m\leq n-2$.\qed
\begin{lem}\label{twopartitionc}
Suppose $k\leq m\leq \lfloor \frac{n}{2}\rfloor$. Then
$$m(K^k_{n-m,m},t)\leq m(K^k_{n-1,1},t) ~\mbox{for all}~ t=0, 1, \ldots, \lfloor \frac{n}{2}\rfloor.$$
\end{lem}
\noindent {\it Proof.} We apply induction on $k$. As the case $k=1$ was proved by
Lemma \ref{k=1}, we suppose that $k\geq 2$ and the assertion holds for smaller values of $k$.
By formula \eqref{removeedge}, we have that
\begin{equation*}
m(K^k_{n-m,m},t) = m(K^{k-1}_{n-m,m},t)+ m(K^{k-1}_{n-m-1,m-1},t-1),
\end{equation*}
and
\begin{equation*}
m(K^k_{n-1,1},t) = m(K^{k-1}_{n-1,1},t)+ m(K_{n-2},t-1).
\end{equation*}
By the induction hypothesis and Lemma \ref{subgraph}, we obtain that
$m(K^{k-1}_{n-m,m},t)\leq m(K^k_{n-1,1},t)$ and
$m(K^{k-1}_{n-m-1,m-1},t-1) \leq m(K_{n-2},t-1)$. Thus the result
follows. \qed
Together with Lemmas \ref{compare edge} and \ref{twopartitionc}, we
directly obtain the following result.
\begin{cor}\label{compare two graphs}
Suppose $\max\{k, 2\} \leq m\leq \lfloor \frac{n}{2}\rfloor$. Then
$K^k_{n-m,m}\prec K^k_{n-1,1}.$
\end{cor}
\begin{thm}\label{mainresult}
Let $G$ be a graph in $\mathcal {G}_{n,k}$. Then $ME(G)\leq
ME(K^k_{n-1,1})$. The equality holds if and only if $G\cong
K^k_{n-1,1}$.
\end{thm}
\noindent {\it Proof.} Notice that $K^k_{n-1,1} \in \mathcal {G}_{n,k}$. Let $G\ncong
K^k_{n-1,1}$ be a graph in $\mathcal {G}_{n,k}$. It suffices to show
that $G\prec K^k_{n-1,1}$. If $G$ has a trivial $k$-edge cut, then
we have $G\prec K_{n-1,1}^k$ by Lemma \ref{trivial-edge-cut}.
Otherwise, by Lemma \ref{without trivial-edge-cut} and Corollary
\ref{compare two graphs}, we obtain that $G\prec K_{n-1,1}^k$ again.
The proof is thus complete. \qed
By the proof of Theorem \ref{mainresult} and the definition of Hosoya index, we can get the following result on Hosoya index.
\begin{thm}
Let $G$ be a graph in $\mathcal {G}_{n,k}$. Then $Z(G)\leq
Z(K^k_{n-1,1})$. The equality holds if and only if $G\cong
K^k_{n-1,1}$.
\end{thm}
\end{document}
|
\begin{document}
\title{Some remarks on the monotonicity\ of primary matrix functions on the set of symmetric matrices}
\begin{abstract}
This note contains some observations on primary matrix functions and different notions of monotonicity with relevance towards constitutive relations in nonlinear elasticity. Focussing on primary matrix functions on the set of symmetric matrices, we discuss and compare different criteria for monotonicity. The demonstrated results are particularly applicable to computations involving the \emph{true-stress-true-strain} monotonicity condition, a constitutive inequality recently introduced in an Arch.\ Appl.\ Mech.\ article by C.S. Jog and K.D. Patil. We also clarify a statement by Jog and Patil from the same article which could be misinterpreted.
\end{abstract}
\tableofcontents
\section{Preliminaries}
This note has been inspired by our reading of Jog's and Patil's interesting work on elastic stability \cite{Jog2013} which is full of new ideas and insights, notably the inspiring introduction of the \emph{true-stress-true-strain} monotonicity condition (c.f. \cite{NeffGhibaLankeit})
\begin{align}
&\innerproduct{\sigma(X)-\sigma(Y),\, X-Y}\geq 0\quad \forall X,Y\in\Sym(3)\,, \tag{TSTS-M$^+$}\label{eq:jogInequality}\\[2mm]
&\sigma(\log V) = \tel{\det V}\cdot\tau(\log V) = e^{-\tr(\log V)}\cdot\underbrace{\partial_{\log V}\, W(\log V)}_{=:\tau(\log V)}\,,\nonumber
\end{align}
where $\sigma$ is the \emph{Cauchy-stress} (or \emph{true stress}) tensor considered as a function of the \emph{logarithmic} (or \emph{true}) \emph{strain} $\log V$,\: $V=\sqrt{FF^T}$ is the \emph{left Biot-stretch} tensor and \[\innerproduct{M,N}:=\tr(M^TN)=\sum_{i,j=1}^n X_{i,j}Y_{i,j}\] denotes the canonical inner product on $\mathbb{R}nn$.
Inequality \eqref{eq:jogInequality}, which can also be stated as
\begin{equation}
\sym\,\pdd{\sigma(\log V)}{\log V} \quad \text{ is positive definite,}
\end{equation}
was originally used by Jog and Patil \cite{Jog2013} to characterize material instabilities in elastic materials.
While inequality \eqref{eq:jogInequality} is not fulfilled by the stress response induced by the \emph{isotropic Hencky energy}\cite{Anand79,Anand86,Hencky1929,henckyTranslation,Neff_Eidel_Osterbrink_2013}
\[
W_{\textrm{H}} = \mu\,\norm{\dev_n \log V}^2 \,+\, \frac\kappa2 \, [\tr(\log V)]^2
\]
with the shear modulus $\mu$ and the bulk modulus $\kappa$, the energy function
\begin{equation}
W = \frac{\mu}{k} \, e^{k\,\norm{\log V}^2} \,+\, \frac{\lambda}{2\,\hat{k}} \, e^{\hat{k}\,[\tr(\log V)]^2}\,, \qquad k>\frac38\,,\; \hat{k}>\frac18\,, \label{eq:TSTSenergy}
\end{equation}
which approximates the Hencky energy for sufficiently small strains, satisfies \eqref{eq:jogInequality} on all of $\PSym(n)$ \cite[Corollary 4.1]{NeffGhibaLankeit}; here, $\tr X = \sum_{i=1}^n X_{i,i}$ is the trace of $X\in\mathbb{R}nn$, $V=\sqrt{FF^T}$ is the left Biot-stretch tensor, $\norm{X} = \sqrt{\tr X^TX}$ denotes the Frobenius matrix norm, $\dev_n X = X-\frac{\tr X}{n} \, {1\!\!\!\:1 } $ is the deviatoric part of $X$ and $\mu,\lambda$ are the two Lam\'e constants. Furthermore, another variant recently introduced as the \emph{exponentiated Hencky energy} \cite{NeffGhibaLankeit,NeffGhibaExpHenckyPart3,NeffGhibaExpHenckyPart2}
\[
W_{\textrm{eH}} = \frac{\mu}{k} \, e^{k\,\norm{\dev_n \log V}^2} \,+\, \frac{\kappa}{2\,\hat{k}} \, e^{\hat{k}\,[\tr(\log V)]^2}
\]
with dimensionless parameters $k>\frac13$ and $\hat{k}>\frac18$ fulfils \eqref{eq:jogInequality} on the conical \enquote{elastic domain}
\[
\mathcal{E} = \left\{ V\in\PSym(n) : \norm{\dev \log V}^2 \leq \frac23\, \text{\boldmath$\displaystyle\mathbf{\sigma}$}_{\mathrm{y}}^2 \right\}
\]
for a given yield stress $\text{\boldmath$\mathbf{\sigma}$}_{\mathrm{y}}$ \cite[Remark 4.1]{NeffGhibaLankeit}. For other well-known energy functions like Neo-Hooke, Mooney-Rivlin or the Ogden energy, \eqref{eq:jogInequality} is not satisfied. Until \eqref{eq:TSTSenergy} it was not even clear whether there exists an isotropic hyperelastic formulation satisfying \eqref{eq:jogInequality} at all.
We believe that the true-stress-true-strain monotonicity condition has the potential to greatly advance the subject of constitutive requirements in nonlinear elasticity.
Therefore, we find it apt to shed some light on different notions of monotonicity and their interconnections which arise in nonlinear elasticity in general as well as in computations for checking inequality \eqref{eq:jogInequality} in particular.
Since many of the stress tensors in nonlinear elasticity are symmetric, we consider in the following matrix functions mapping a convex subset of $\Sym(n)$ to the set $\Sym(n)$ of symmetric matrices. Of particular interest is the monotonicity of the principal matrix logarithm $\log$ on the set $\PSym(n)$ of positive definite matrices.
\subsection{A simple observation on monotonicity}
Let $V$ be a finite-dimensional Hilbert space with the inner product $\innerproduct{\cdot,\cdot}$ and let $M\subset V$ be a convex open subset of $V$. A function $f:M\to V$ is called \emph{monotone} (or \emph{Hilbert space monotone}) on $M$ if \[\innerproduct{f(A)-f(B),\, A-B}\geq 0\] for all $A,B\in M$, and it is called \emph{strictly monotone} if \[\innerproduct{f(A)-f(B),\, A-B} > 0\] for all $A\neq B\in M$. It is well known that a function $f\in C^1(M,V)$ is monotone on $M$ if and only if \[\innerproduct{Df[A].H,\,H}\geq 0\] for all $H\in V$, i.e.\ if and only if the Fr\'echet derivative $Df[A]$ is positive semi-definite for all $A\in M$, and it is strictly monotone on $M$ if
\begin{equation}
\innerproduct{Df[A].H,\,H}> 0 \label{eq:strongStrictPositiveDefiniteness}
\end{equation}
for all $H\in V$, i.e.\ if $Df[A]$ is positive definite for all $A\in M$. Note, however, that \eqref{eq:strongStrictPositiveDefiniteness} is not a necessary condition for strict monotonicity.
The following lemma shows that for a continuously differentiable function $f$ on a convex set whose derivative $Df$ is self-adjoint and invertible everywhere, the positive definiteness of $Df$ in a single point is sufficient for $f$ to be strictly monotone everywhere.
\begin{lemma}
\label{lemma:posDefSinglePoint}
Let $M\subset V$ be a convex open subset of $V$, and let $f\in C^1(M,V)$ satisfy
\begin{alignat*}{3}
&\text{i)} && \quad\exists\:A_0\in M: \quad &&Df[A_0]\text{ is \emph{positive definite},}\\
&\text{ii)} && \quad\forall\:A\in M: \quad &&Df[A]\text{ is \emph{invertible} and \emph{self-adjoint}.}
\end{alignat*}
Then $\innerproduct{Df[A].H,\, H} > 0$ for all $H\in V$ and thus $f$ is strictly monotone on $M$.
\end{lemma}
\begin{proof}
Assume that $f$ is not monotone on $M$. Then there exists $A_1\in M$ such that $Df[A_1]$ is not positive semi-definite. Since $f$ is continuously differentiable, the function
\begin{equation*}
\varphi: M\to\mathbb{R}:\quad \varphi(A) = \lambda_{\min}(Df[A])
\end{equation*}
mapping $A$ to the smallest eigenvalue of $Df[A]$ is continuous on $M$; note that the mapping of a matrix to its smallest eigenvalue is continuous on a set of self-adjoint tensors.\\
The set $M$ is convex (and thus connected) by assumption, hence we can choose a curve $\gamma\in C^1([0,1];M)$ with $\gamma(0)=A_0$, $\gamma(1)=A_1$ and obtain
\begin{align*}
\varphi(\gamma(0)) &\:=\: \lambda_{\min}(Df[A_0]) \:>\: 0\,,\nonumber \\
\varphi(\gamma(1)) &\:=\: \lambda_{\min}(Df[A_1]) \:\leq\: 0\,.
\end{align*}
Thus there exists $a\in(0,1]$ with $\varphi(\gamma(a))=0$, according to the intermediate value theorem. But then $0$ is an eigenvalue of $Df[\gamma(a)]$ and hence $Df[\gamma(a)]$ is not invertible, contradicting ii).
\end{proof}
\begin{remark}
Note that while the proof requires $M$ to be convex in order to show monotonicity, connectedness of $M$ is sufficient to show that $Df$ is positive definite everywhere.
\end{remark}
\begin{remark}
In the one-dimensional case, Lemma \ref{lemma:posDefSinglePoint} simply states the fact that for a continuously differentiable function $f$ on $\mathbb{R}$ it follows from $f' \neq 0$ everywhere and $f'(a_0)>0$ for some $a_0\in\mathbb{R}$ that $f'>0$ everywhere on $\mathbb{R}$.
\end{remark}
\section{Monotonicity of primary matrix functions}
In this section we consider a \emph{primary matrix function} $f$ on the set $\Sym(n)$ of symmetric matrices. Such a function is defined as follows\footnote{For a more general definition of primary matrix functions for non-symmetric arguments we refer to \cite[Ch. 6.2]{horn1994topics}.}:
Let $I$ be an open interval in $\mathbb{R}$ and let $f\in C^1(I)$. We denote\footnote{Note that $S_\mathbb{R}=\Sym(n)$ and $S_{\mathbb{R}^+}=\PSym(n)$.} by $S_I$ the set of symmetric matrices with no eigenvalues outside $I$:
\[S_I := \{M\in\Sym(n) \,|\, \lambda(M)\subset I\}\,,\]
where $\lambda(M)\in\mathbb{R}n$ is the ordered vector of the $n$ (not necessarily distinct) eigenvalues of $M$.
Then the primary matrix function $f:S_I\to\Sym(n)$ is defined by
\[
f(A) \;=\; f(Q^T \diag(\lambda_1,\dotsc,\lambda_n) \,Q) \;=\; Q^T \diag(f(\lambda_1),\dotsc,f(\lambda_n))\,Q \;=\; Q^T \emptydmatr{f(\lambda_1)}{\ddots}{f(\lambda_n)}\,Q\,,
\]
where $A = Q^T \diag(\lambda_1,\dotsc,\lambda_n) \,Q = Q^T \matrs{\lambda_1&&\\&\ddots&\\&&\lambda_n} Q$, $Q\in\OO(n)$, is any orthogonal diagonalization of $A$.
Furthermore we denote by $\innerproduct{X,Y}=\tr(X^TY)=\sum_{i,j=1}^n X_{i,j}Y_{i,j}$ the canonical inner product on $\Sym(n)$.
\subsection[Analytic primary matrix functions on $\Sym(n)$ and $\PSym(n)$]{Analytic primary matrix functions on \boldmath$\Sym(n)$ and $\PSym(n)$}
For now we assume that $f\in\mathscr{H}(\mathbb{R})$, where $\mathscr{H}(\mathbb{R})$ is the set of analytic functions on $\mathbb{R}$. The more general case will be considered later on.\\
For readability reasons all lemmas, propositions and proofs will be stated for the case $f:\mathbb{R}\to\mathbb{R}$ and, correspondingly, $f:\Sym(n)\to\Sym(n)$. The restriction to the set of positive definite matrices (or even, for some open interval $I\subset\mathbb{R}$, the (convex) set $S_I$ of symmetric matrices $A$ with $\lambda(A)\subset I$) allows for nearly identical proofs.
The following lemma is stated in \cite{mathias1994} in a more general form. The proof given there is based on the expansion of $f$ into a matrix power series: observe for example that, for $f(A)=A^2$,
\[
(A+H)^2 = A^2 + AH + HA + H^2 \quad \mathbb{R}ightarrow \quad Df[A].H = AH+HA
\]
and hence
\[
\innerproduct{Df[A].H,\, \widetilde H} = \innerproduct{AH+HA,\, \widetilde H} = \innerproduct{AH,\, \widetilde H} + \innerproduct{HA,\, \widetilde H} = \innerproduct{H,\, A\widetilde H+\widetilde H A} = \innerproduct{H,\, Df[A].\widetilde H}
\]
for $A,H,\widetilde H\in\Sym(n)$, thus $Df[A]$ is self-adjoint with respect to the canonical inner product on $\mathbb{R}nn$. Similarly, the derivative of $A\mapsto A^k$ is self-adjoint for all $k\in\mathbb{N}$, from which one can show that the derivative of an analytic matrix function $f(A) = \sum_{k=1}^\infty \alpha_k \cdot A^k$ is self-adjoint as well.
\begin{lemma}
\label{lemma:primarySelfAdjoint}
Let $f\in \mathscr{H}(\mathbb{R})$. Then the derivative of $f:\Sym(n)\to\Sym(n)$ is self-adjoint with respect to the canonical inner product on $\Sym(n)$:
\begin{equation}
\innerproduct{Df[A].H,\,\widetilde H} = \innerproduct{H,\,Df[A].\widetilde H} \quad \forall A,H,\widetilde H\in\Sym(n)\,.
\end{equation}
\end{lemma}
\begin{proof}
We use an integral formula given in \cite[(6.6.2)]{horn1994topics}:
\[
DF[A].H = \intfrac \int_\Gamma f(z) (z {1\!\!\!\:1 } -A)^{-1} \,H\, (z {1\!\!\!\:1 } -A)^{-1}\,\mathrm{dz}
\]
for $A\in\Sym(n)$, where $\gamma$ is a closed curve in $\mathbb{C}$ such that every eigenvalue of $A$ has winding number $1$.\\
For $H,\widetilde H\in\Sym(n)$ we compute
\begin{align}
\innerproduct{DF[A].H,\,\widetilde H} &= \innerproduct{\intfrac\int_\Gamma f(z)\,(z {1\!\!\!\:1 } -A)^{-1} \,H\, (z {1\!\!\!\:1 } -A)^{-1} \, \mathrm{dz},\: \widetilde H}\nonumber \\
&= \intfrac\int_\Gamma f(z)\,\cproduct{(z {1\!\!\!\:1 } -A)^{-1} \,H\, (z {1\!\!\!\:1 } -A)^{-1},\: \widetilde H}\,\mathrm{dz}\nonumber \\
&= \intfrac\int_\Gamma f(z)\,\cproduct{H,\: (z {1\!\!\!\:1 } -A)^{-*} \,\widetilde H\, (z {1\!\!\!\:1 } -A)^{-*}}\,\mathrm{dz}\nonumber \\
&= \intfrac\int_\Gamma f(z)\,\cproduct{\Big((z {1\!\!\!\:1 } -A)^{-*} \,\widetilde H\, (z {1\!\!\!\:1 } -A)^{-*}\Big)^*,\:H^*}\,\mathrm{dz}\nonumber \\
&= \intfrac\int_\Gamma f(z)\,\cproduct{\Big((z {1\!\!\!\:1 } -A)^{-*} \,\widetilde H\, (z {1\!\!\!\:1 } -A)^{-*}\Big)^*,\:H^*}\,\mathrm{dz}\label{eq:integralOne}\\
&= \intfrac\int_\Gamma f(z)\,\cproduct{(z {1\!\!\!\:1 } -A)^{-T} \,\widetilde H\, (z {1\!\!\!\:1 } -A)^{-T},\:H}\,\mathrm{dz}\label{eq:integralTwo}\\
&= \intfrac\int_\Gamma f(z)\,\cproduct{(z {1\!\!\!\:1 } -A)^{-1} \,\widetilde H\, (z {1\!\!\!\:1 } -A)^{-1},\:H}\,\mathrm{dz}\label{eq:integralThree}\\
&= \cproduct{\intfrac\int_\Gamma f(z)\,(z {1\!\!\!\:1 } -A)^{-1} \,\widetilde H\, (z {1\!\!\!\:1 } -A)^{-1}\,\mathrm{dz},\:H} = \innerproduct{DF[A].\widetilde H,\:H} = \innerproduct{H,\:DF[A].\widetilde H}\,,\nonumber
\end{align}
where equality of \eqref{eq:integralOne} and \eqref{eq:integralTwo} holds due to $H$ and $\widetilde H$ being real and symmetric while the symmetry of $(z {1\!\!\!\:1 } -A)$ implies \eqref{eq:integralThree}.
\end{proof}
This lemma can now be used to obtain some interesting properties of primary matrix functions and their derivatives.
\begin{proposition}
\label{prop:specmonToHilmonAna}
Let $f\in \mathscr{H}(\mathbb{R})$ with $f'(t)>0$ for all $t\in\mathbb{R}$. Then the primary matrix function $f:\Sym(n)\to\Sym(n)$ is Hilbert space monotone.
\end{proposition}
\begin{proof}
According to Lemma \ref{lemma:primarySelfAdjoint}, the derivative $Df[A]$ is self-adjoint with respect to the inner product $\innerproduct{\cdot,\cdot}$ for every $A\in\Sym(n)$. Thus we can apply Lemma \ref{lemma:posDefSinglePoint} if we show that $DF[A]$ is invertible everywhere and positive definite in one point.\\
Let $0\neq H\in\Sym(n),\: H=Q^T\diag(h_1,\dotsc,h_n)Q$. Then the derivative of $f$ at $ {1\!\!\!\:1 } \in\Sym(n)$ is
\begin{align*}
Df[ {1\!\!\!\:1 } ].H &= \lim_{t\to 0}\:\tel{t}\:[f( {1\!\!\!\:1 } +t\,H)-f( {1\!\!\!\:1 } )]\\
&= \lim_{t\to 0}\:\tel{t}\:Q^T[f( {1\!\!\!\:1 } +\diag(th_1,\dotsc,th_n))-f(1) {1\!\!\!\:1 } ]Q\\
&= \lim_{t\to 0}\:\tel{t}\:Q^T[f(\diag(1+th_1,\dotsc,1+th_n))-\diag(f(1),\dotsc,f(1))]Q\\
&= \lim_{t\to 0}\:\tel{t}\:Q^T[\diag(f(1+th_1)-f(1),\dotsc,f(1+th_n)-f(1))]Q\\
&= Q^T\diag(h_1f'(1),\dotsc,h_nf'(1)) \;=\; f'(1)\,H\,,
\end{align*}
thus
\begin{align*}
\innerproduct{Df[ {1\!\!\!\:1 } ].H,\,H} = f'(1)\,\innerproduct{H,\,H} = f'(1)\,\norm{H}^2 \:>\:0
\end{align*}
because $f'>0$ and $H\neq0$ by assumption.\\
To see that $Df[A]$ is invertible for every $A\in\Sym(n)$ we simply note that $f$ is invertible on $\mathbb{R}$ and the differentiable primary matrix function $f^{-1}:f(\Sym(n))\to\Sym(n)$ is the inverse of $f$ on $\Sym(n)$. Then for all $A\in\Sym(n)$, the linear mapping $Df[A]$ must be invertible as well.
\end{proof}
The next lemma shows that every analytic primary matrix function can be represented as the gradient field (differentiated with respect to $U$) of an isotropic energy function satisfying the \emph{Valanis-Landel hypothesis} \cite{valanis1967} of additive separation\footnote{The Valanis-Landel hypothesis was introduced by K.C. Valanis and R.F. Landel in 1967 as an assumption on the elastic energy potential of incompressible materials \cite{valanis1967}. Their hypothesis was later found to be in good agreement with the elastic behaviour of vulcanized rubber \cite{treloar1973elasticity}; D.F. Jones and L.R.G. Treloar concluded that \enquote{the hypothesis is valid over the range covered} in their experiments, \enquote{namely $\lambda=0.189\textendash2.62_5$} \cite{jones1975}.}:
\[
\boxed{W(U)=\sum_{i=1}^n F(\lambda_i(U))\,,}
\]
where $\lambda_i(U)$ is the $i$-th eigenvalue of $U$. This might be considered the \enquote{hidden assumption} underlying the theory of primary matrix functions.
\begin{proposition}
\label{prop:potentialAna}
Let $f\in \mathscr{H}(\mathbb{R})$. Then for any $F\in \mathscr{H}(\mathbb{R})$ with $F'=f$, the function
\begin{equation}
W: \Sym(n) \to \mathbb{R}\,, \quad W(A)=\sum_{i=1}^n F(\lambda_i(A)) = \tr F(A)
\end{equation}
is a \emph{potential} of $f:\Sym(n)\to\Sym(n)$, i.e.
\begin{equation}
DW[A] = f(A)
\end{equation}
or, more precisely,
\begin{equation*}
DW[A].H = \innerproduct{f(A),\,H}
\end{equation*}
for all $A,H\in\Sym(n)$.
\end{proposition}
\begin{proof}
Let $A,H\in\Sym(n)$. Since $DW[A].H$ is the partial derivative of $W$ in direction $H$ at the point $A$ we find
\begin{equation*}
DW[A].H = \lim_{t\to 0} \:\tel{t}\: [W(A+tH)-W(A)]\,.
\end{equation*}
We choose $Q\in\OO(n)$ such that $A=Q^TDQ$, where
$D=\diag(\lambda_1,\dotsc,\lambda_n)$,\: $\lambda_i$ denoting the eigenvalues of $A$, and compute
\begin{align*}
DW[A].H &= \lim_{t\to 0} \:\tel{t}\: [W(A+tH)-W(A)] = \lim_{t\to 0} \:\tel{t}\:[\tr F(A+tH) - \tr F(A)]\\
&= \lim_{t\to 0} \:\tel{t}\:\innerproduct{F(A+tH)-F(A),\, {1\!\!\!\:1 } } = \innerproduct{\lim_{t\to 0} \:\tel{t}\:[F(A+tH)-F(A)],\, {1\!\!\!\:1 } } = \innerproduct{DF[A].H,\, {1\!\!\!\:1 } }\,.
\end{align*}
According to Lemma \ref{lemma:primarySelfAdjoint}, the total derivative $DF[A]$ is self-adjoint with respect to $\innerproduct{\cdot,\cdot}$ and thus
\begin{equation*}
\innerproduct{DF[A].H,\, {1\!\!\!\:1 } } = \innerproduct{H,\,DF[A]. {1\!\!\!\:1 } }\,.
\end{equation*}
We find
\begin{align*}
DF[A]. {1\!\!\!\:1 } &= \lim_{t\to 0} \:\tel{t}\: [F(A+t\, {1\!\!\!\:1 } )-F(A)] \:=\: \lim_{t\to 0} \:\tel{t}\: [F(Q^TDQ+t\, {1\!\!\!\:1 } )-F(Q^TDQ)]\\
&= \lim_{t\to 0} \:\tel{t}\: [F(Q^T(D+t\, {1\!\!\!\:1 } )Q)-Q^TF(D)Q]\\
&= \lim_{t\to 0} \:\tel{t}\: [Q^TF(D+t\, {1\!\!\!\:1 } )Q-Q^TF(D)Q]\\
&= \lim_{t\to 0} \:\tel{t}\: [Q^TF(\diag(\lambda_1,\dotsc,\lambda_n)+t\, {1\!\!\!\:1 } )Q-Q^TF(\diag(\lambda_1,\dotsc,\lambda_n))Q]\\
&= \lim_{t\to 0} \:\tel{t}\: Q^T[F(\diag(\lambda_1+t,\dotsc,\lambda_n+t))-F(\diag(\lambda_1,\dotsc,\lambda_n))]Q\\
&= \lim_{t\to 0} \:\tel{t}\: Q^T\Big[\diag\Big(F(\lambda_1+t)-F(\lambda_1),\dotsc,F(\lambda_n+t)-F(\lambda_n)\Big)\Big]Q\\
&= Q^T\Big[\diag\Big(\lim_{t\to 0} \:\tel{t}\: F(\lambda_1+t)-F(\lambda_1),\dotsc,\lim_{t\to 0} \:\tel{t}\: F(\lambda_n+t)-F(\lambda_n)\Big)\Big]Q\\
&= Q^T\Big[\diag\Big(F'(\lambda_1),\dotsc,F'(\lambda_n)\Big)\Big]Q = Q^Tf(D)\,Q \:=\: f(A)
\end{align*}
and therefore
\[
DW[A].H = \innerproduct{DF[A].H,\, {1\!\!\!\:1 } } = \innerproduct{H,\,DF[A]. {1\!\!\!\:1 } } = \innerproduct{H,\,f(A)} = \innerproduct{f(A),\,H}\,.\qedhere
\]
\end{proof}
\begin{remark}[Pseudo-potential]
Using the fact that $Df$ is self-adjoint everywhere, we can also obtain the potential directly by using \cite[Lemma 3.28]{NeffCISMnotes}. For $A=Q^TDQ$, $D=\diag(\lambda_1,\dotsc,\lambda_n)$, $Q\in\OO(n)$ we find
\begin{align*}
W(A) &= \int_0^1 \innerproduct{f(t\,A),\,A}\,\mathrm{dt} = \int_0^1 \innerproduct{Q^Tf(t\,D)Q,\,Q^TDQ}\,\mathrm{dt}\\
&= \int_0^1 \innerproduct{f(t\,D),\,D}\,\mathrm{dt} = \int_0^1 \tr \matr{f(t\,\lambda_1)\lambda_1&&\\&\ddots&\\&&f(t\,\lambda_n)\lambda_n}\,\mathrm{dt}\\
&= \sum_{i=1}^n \int_0^1 f(\lambda_i\,t)\,\lambda_i \,\mathrm{dt} = \sum_{i=1}^n \int_0^{\lambda_i} f(t) \,\mathrm{dt} \quad=\quad \sum_{i=1}^n F(\lambda_i) + C
\end{align*}
with $C=-n\, F(0)$.
\end{remark}
\subsection{The general case}
In this section we no longer require the function $f$ to be analytic. While the results are almost identical to those of the previous subsection, the more general proofs require them to be stated in a different order.\\
The first proposition shows that every continuously differentiable primary matrix function can be represented as the gradient field (differentiated with respect to $U$) of an isotropic energy function satisfying the \emph{Valanis-Landel hypothesis}.
\begin{proposition}
\label{prop:primaryPotential}
Let $f\in C^1(I)$. Then for $F\in C^2(I)$ with $F'=f$, the function
\begin{equation}
W: S_I \to \mathbb{R}\,, \quad W(A)=\sum_{i=1}^n F(\lambda_i(A)) = \tr F(A)
\end{equation}
is a \emph{potential} of $f:S_I\to\Sym(n)$, i.e.
\begin{equation}
DW[A] = f(A)
\end{equation}
or, more precisely,
\begin{equation*}
DW[A].H = \innerproduct{f(A),\,H}
\end{equation*}
for all $A\in S_I,\:H\in\Sym(n)$.
\end{proposition}
\begin{proof}
This is a corollary to Theorem 1.1 in \cite{Lewis96derivativesof}, where it is shown that any \emph{spectral function} of the form $W(A) = g(\lambda(f))$ with a symmetric function $g\in C^1(I^n)$ is differentiable with
\[
DW[A] = Q^T\diag(\nabla g(\lambda))Q
\]
for all $A\in I$ with $A=Q^T\diag(\lambda_1,\dotsc,\lambda_n)Q$. By putting $g(\lambda) = \sum_{i=1}^n F(\lambda_i)$ we find $\nabla g(\lambda) = (F'(\lambda_1),\dotsc,F'(\lambda_n))$, and since $F'=f$ we obtain
\[
DW[A] = Q^T\diag(f(\lambda_1),\dotsc,f(\lambda_n))Q = f(A)\,.\qedhere
\]
\end{proof}
The next lemma is due to Brown et al. \cite[Theorem 2.1]{brown2000calculus}. The proof can be found there.
\begin{lemma}
\label{lemma:primaryDifferentiability}
\pushQED{\qed}
Let $f\in C^1(I)$. Then the primary matrix function $f:S_I\to\Sym(n)$ is continuously differentiable on $S_I$.
\popQED
\end{lemma}
According to Proposition \ref{prop:primaryPotential} every primary matrix function $f$ on $S_I$ corresponding to $f\in C^1(I)$ has a potential. Thus the derivative of $f$ on $S_I$, which exists due to \ref{lemma:primaryDifferentiability}, is self adjoint according to Schwarz' theorem.
\begin{proposition}
\label{prop:primarySelfAdjoint}
\pushQED{\qed}
Let $f\in C^1(I)$. Then the primary matrix function $f:S_I\to\Sym(n)$ is differentiable on $S_I$ and its derivative $Df$ is self-adjoint with respect to the canonical inner product on $\Sym(n)$:
\begin{equation*}
\innerproduct{Df[A].H,\,\widetilde H} = \innerproduct{H,\,Df[A].\widetilde H} \quad \forall A\in S_I,\;H,\widetilde H\in\Sym(n)\,.\qedhere
\end{equation*}
\popQED
\end{proposition}
Proposition \ref{prop:primaryPotential} can also be used to show how the monotonicity of $f$ on $I\subset\mathbb{R}$ relates to the Hilbert space monotonicity of the primary matrix function $f$ on $S_I\subset\Sym(n)$.
\begin{proposition}
\label{prop:specmonToHilmon}
Let $f\in C^1(I)$. Then the primary matrix function $f:S_I\to\Sym(n)$ is Hilbert space monotone if and only if $f$ is monotone on $I$.
\end{proposition}
\begin{proof}
Choose an antiderivative $F\in C^2(I)$ of $f$ and define
\[W: S_I \to \mathbb{R}\,, \quad W(A)=\sum_{i=1}^n F(\lambda_i(A))\,.\]
According to Proposition \ref{prop:primaryPotential}, $W$ is a potential of $f$ on $S_I$, i.e.\ $DW[A]=f(A)$ for all $A\in I$. But then $f=DW$ is monotone on $S_I$ if and only if $W$ is convex on $S_I$. According to an extension of the Chandler Davis Theorem \cite[Corollary 2]{davis1957}, this is the case if and only if the function $\lambda\mapsto\sum_{i=1}^n F(\lambda_i)$ is convex on $S_I^n$, which in turn is the case if and only if $f=F'$ is monotone on $I$.
\end{proof}
\begin{remark}
It is possible to give a proof based on the eigenvalue formula given in \cite[Theorem 2.1]{brown2000calculus}. This might be useful to distinguish monotonicity and strict monotonicity as well as positive definiteness and positive semi-definiteness of $Df$.
\end{remark}
\begin{remark}
A very similar result is also given by Norris in \cite{norris2008eulerian} and \cite[Lemma 4.1]{norris2008higherDerivatives}, where it is shown that $Df[A]$ is self-adjoint and positive definite for all $A\in \PSym(n)$ if the function $f:\mathbb{R}^+\to\mathbb{R}$ has the following properties:
\[
f\in C^\infty(\mathbb{R}^+)\,, \quad f(1)=0\,,\quad f'(1)=1\quad \text{ and } \quad f'(t)>0 \quad \text{ for all } t\in\mathbb{R}^+\,.
\]
Norris calls these functions \emph{strain measures}, based on a definition by Hill given in \cite[p. 459]{hill1970} and \cite[p. 14]{hill1978}, although Norris requires the derivative $f'$ to be strictly positive, whereas Hill admits functions which are simply monotone on $\mathbb{R}^+$ as well.
\end{remark}
\section{Additional remarks and applications}
\subsection{The exponential function and the logarithm}
Returning to the principal logarithm $\log$ on $\PSym(n)$ and its inverse, the matrix exponential $\exp$ on $\Sym(n)$, we find that Proposition \ref{prop:specmonToHilmon} immediately shows that $\log$ and $\exp$ are monotone. Furthermore, both functions are diffeomorphisms, hence their derivatives $D\log[P]$ and $D\exp[S]$ for $S\in\Sym(n),\:P\in\PSym(n)$ are invertible as well. Since the monotonicity implies that $D\log[P]$ and $D\exp[S]$ are positive semi-definite, they are therefore positive definite, thus $\log$ and $\exp$ are strictly monotone as well.\\
For these two functions we can also compute some of the aforementioned properties directly: using a representation of $D\exp$ given in \cite[Ch. 10.2]{higham2008}, we find
\begin{align*}
\innerproduct{D\exp[A].H,\,\widetilde H} &= \innerproduct{\int_0^1 \exp(sA)\,H\,\exp((1-s)A)\,\mathrm{ds},\,\widetilde H}\\
&= \int_0^1 \innerproduct{\exp(sA)\,H\,\exp((1-s)A),\,\widetilde H} \,\mathrm{ds} = \int_0^1 \innerproduct{H,\,\exp(sA)^T\,\widetilde H\,\exp((1-s)A)^T} \,\mathrm{ds}\\
&= \int_0^1 \innerproduct{H,\,\exp(sA)\,\widetilde H\,\exp((1-s)A)} \,\mathrm{ds} \:=\: \innerproduct{H,\,D\exp[A].\widetilde H}
\end{align*}
for $A,H,\widetilde H\in\Sym(n)$, showing that $D\exp[A]$ is self-adjoint, as well as
\begin{align*}
\innerproduct{D\exp(A).H,\,H} &= \innerproduct{\int_0^1 \exp(sA)\,H\,\exp((1-s)A)\,\mathrm{ds},\,H}\\
&= \int_0^1 \innerproduct{H\,\exp((1-s)A),\,\exp(sA)^T\,H} \,\mathrm{ds} = \int_0^1 \innerproduct{\underbrace{H\,\exp((1-s)A)\,H^T}_{\text{pos. semi-definite}},\:\underbrace{\exp(sA)}_{\in\PSym(n)}}\,\mathrm{ds} \:\geq\: 0\,,
\end{align*}
showing that $D\exp[A]$ is positive semi-definite.\\
For the matrix logarithm and $A\in\PSym(n),\:H,\widetilde H\in\Sym(n)$ we use, again
\footnote{A formula for the derivative $D\log[A].H$ in a direction $H$ for commuting $A$ and $H$ as well as some properties of derivatives of primary matrix functions in arbitrary directions can be found in much earlier works by H. Richter \cite{richter1949log,richter1950}; however, Richter did not give the more general formula used here.}
, a representation formula given in \cite[Ch. 11.2]{higham2008} to find
\begin{align*}
\innerproduct{D\log[A].H,\,\widetilde H} &= \innerproduct{\int_0^1 (t(A- {1\!\!\!\:1 } )+ {1\!\!\!\:1 } )^{-1}\,H\,(t(A- {1\!\!\!\:1 } )+ {1\!\!\!\:1 } )^{-1}\,\mathrm{ds},\,\widetilde H}\\
&= \int_0^1 \innerproduct{(t(A- {1\!\!\!\:1 } )+ {1\!\!\!\:1 } )^{-1}\,H\,(t(A- {1\!\!\!\:1 } )+ {1\!\!\!\:1 } )^{-1},\,\widetilde H}\,\mathrm{ds}\\
&= \int_0^1 \innerproduct{H,\,(t(A- {1\!\!\!\:1 } )+ {1\!\!\!\:1 } )^{-T}\,\widetilde H\,(t(A- {1\!\!\!\:1 } )+ {1\!\!\!\:1 } )^{-T}} \,\mathrm{ds}\\
&= \int_0^1 \innerproduct{H,\,(t(A- {1\!\!\!\:1 } )+ {1\!\!\!\:1 } )^{-1}\,\widetilde H\,(t(A- {1\!\!\!\:1 } )+ {1\!\!\!\:1 } )^{-1}} \,\mathrm{ds} \;=\; \innerproduct{H,\,D\log[A].\widetilde H}\,,
\end{align*}
showing that $D\log[A]$ is self-adjoint, as well as
\begin{align*}
\innerproduct{D\log(A).H,\,H} &= \innerproduct{\int_0^1 (t(A- {1\!\!\!\:1 } )+ {1\!\!\!\:1 } )^{-1}\,H\,(t(A- {1\!\!\!\:1 } )+ {1\!\!\!\:1 } )^{-1}\,\mathrm{ds},\,H}\\
&= \int_0^1 \innerproduct{H\,(t(A- {1\!\!\!\:1 } )+ {1\!\!\!\:1 } )^{-1},\,(t(A- {1\!\!\!\:1 } )+ {1\!\!\!\:1 } )^{-T}\,H} \,\mathrm{ds}\\
&= \int_0^1 \innerproduct{\underbrace{H\,(t(A- {1\!\!\!\:1 } )+ {1\!\!\!\:1 } )^{-1}\,H^T}_{\text{pos. semi-definite}},\:\underbrace{(t(A- {1\!\!\!\:1 } )+ {1\!\!\!\:1 } )^{-1}}_{\in\PSym(n)}}\,\mathrm{ds} \;\geq\; 0\,,
\end{align*}
showing that $D\log[A]$ is positive semi-definite.\\
Note, however, that the matrix exponential is not monotone on $\mathbb{R}^{n\times n}$ or $\GLp(n)$: for $\alpha\in\mathbb{R}$ we compute
\begin{align*}
&\hspace{-14mm}\innerproduct{\exp\matr{0&-\alpha\\\alpha&0} - \exp\matr{0&\alpha\\-\alpha&0},\: \matr{0&-\alpha\\\alpha&0} - \matr{0&\alpha\\-\alpha&0}}\\[2mm]
&= \innerproduct{\matr{\cos(\alpha)&-\sin(\alpha)\\\sin(\alpha)&\cos(\alpha)} - \matr{\cos(-\alpha)&-\sin(-\alpha)\\\sin(-\alpha)&\cos(-\alpha)},\: \matr{0&-2\alpha\\2\alpha&0}}\\[2mm]
&= \innerproduct{\matr{0&-2\sin(\alpha)\\2\sin(\alpha)&0},\: \matr{0&-2\alpha\\2\alpha&0}} \:=\: 8\,\alpha\,\sin(\alpha)
\end{align*}
and thus, for $\alpha = \frac{3\pi}{2}$,
\[
\innerproduct{\exp\matr{0&-\alpha\\\alpha&0} - \exp\matr{0&\alpha\\-\alpha&0},\: \matr{0&-\alpha\\\alpha&0} - \matr{0&\alpha\\-\alpha&0}} = 12\,\pi\,\sin\Big(\frac{3\pi}{2}\Big) = -12\,\pi \:<\: 0\,.
\]
\subsection{Application to stress response functions in nonlinear elasticity}
We consider the \emph{Hencky constitutive model}, induced by the isotropic Hencky energy function
\[
W_{\textrm{H}} = \mu\,\norm{\dev_n \log V}^2 \,+\, \frac\kappa2 \, [\tr(\log V)]^2\,.
\]
In this constitutive model, the \emph{Kirchhoff stress} $\tau$ corresponding to the stretch $V$ is given by
\[
\tau = 2\,\mu\,\dev_n\log V \,+\, \kappa\tr(\log V) \, {1\!\!\!\:1 } \,.
\]
If $2\mu=\kappa$, this relation reduces to $\tau = 2\mu\log V$, thus the mapping $V\mapsto \tau(V)$ is strictly monotone on $\PSym(n)$ in this special case (also called the \emph{lateral contraction free} case). However, this monotonicity does not hold for arbitrary choices of $\mu,\kappa>0$. Moreover, the mapping $\log V \mapsto \tau$ of the \emph{true strain tensor} $\log V$ to the Kirchhoff stress $\tau$ is monotone (a property also called \emph{Hill's inequality} \cite{hill1970}), while the \emph{Cauchy stress response}
\[
V \mapsto \sigma = \frac{1}{\det V} \, \tau = \frac{1}{\det V} \, \left(2\,\mu\,\log V \,+\, 2\,\tr(\log V) \, {1\!\!\!\:1 } \right)
\]
as well as the mapping
\[
\log V \mapsto \sigma = \frac{1}{e^{\tr (\log V)}} \, \left(2\,\mu\,\log V \,+\, 2\,\tr(\log V) \, {1\!\!\!\:1 } \right)
\]
are not monotone, thus the Hencky model does not satisfy the true-stress-true-strain monotonicity condition \eqref{eq:jogInequality}.
\section{Different notions of monotonicity: a comparison}
We may distinguish three types of (strict) monotonicity:
\begin{itemize}
\item The \emph{Hilbert space monotonicity}
\begin{equation}
\innerproduct{f(B)-f(A),\: B-A} > 0 \qquad \forall A\neq B\in\Sym(n) \,,\label{eq:hilbertmon}\tag{H-mon}
\end{equation}
\item the \emph{operator monotonicity}
\begin{equation}
B-A \text{ positive definite } \quad\mathbb{R}ightarrow\quad f(B)-f(A) \text{ positive definite,} \label{eq:opmon}\tag{O-mon}
\end{equation}
\item the \emph{spectral monotonicity} (or monotonicity of $f$ on $\mathbb{R}$)
\begin{equation}
b > a \quad\mathbb{R}ightarrow\quad f(b)>f(a)\qquad \forall\: a,b\in\mathbb{R}\,.\label{eq:specmon}\tag{S-mon}
\end{equation}
\end{itemize}
Furthermore we consider the following condition on $f$:
\begin{equation}
\innerproduct{f(A+H)-f(A),\: H} > 0 \qquad \forall H\in\PSym(n)\,,\: A\in\Sym(n)\,.\label{eq:posdefCondition}\tag{P-mon}
\end{equation}
\begin{proposition}
\emph{Only} the following implications hold:
\begin{equation}
\eqref{eq:opmon} \mathbb{R}ightarrow \eqref{eq:specmon} \Leftrightarrow \eqref{eq:hilbertmon} \Leftrightarrow \eqref{eq:posdefCondition}\,.
\end{equation}
\end{proposition}
\begin{proof}~\\
$\eqref{eq:opmon} \mathbb{R}ightarrow \eqref{eq:posdefCondition}$: For given $H\in\PSym(n)$ choose $B=A+H$. Since $B-A=H$ is positive definite, \eqref{eq:opmon} implies that $f(B)-f(A)$ is positive definite as well. Thus \[\innerproduct{\underbrace{f(A+H)-f(A)}_{\in\PSym(n)},\: \underbrace{H}_{\in\PSym(n)}} > 0\,.\]
$\eqref{eq:posdefCondition} \mathbb{R}ightarrow \eqref{eq:specmon}$: Let $f$ satisfy condition \eqref{eq:posdefCondition}. Then, with $A=a\, {1\!\!\!\:1 } ,\,H=h\, {1\!\!\!\:1 } ,\,h\in\mathbb{R}^+$ we find
\begin{align*}
0 &< \innerproduct{f(a\, {1\!\!\!\:1 } +h\, {1\!\!\!\:1 } )-f(a\, {1\!\!\!\:1 } ),\: h\, {1\!\!\!\:1 } } = h\,\innerproduct{f((a+h)\, {1\!\!\!\:1 } )-f(a\, {1\!\!\!\:1 } ),\: {1\!\!\!\:1 } }\\
&= h\,\tr(f((a+h)\, {1\!\!\!\:1 } )-f(a\, {1\!\!\!\:1 } )) = h\,\tr(f(a+h)\, {1\!\!\!\:1 } -f(a)\, {1\!\!\!\:1 } ) \:=\; h\,n\,(f(a+h)-f(a))\,.
\end{align*}
For $a,b\in\mathbb{R}$ with $b>a$, choose $h=b-a$. Then $n(b-a)(f(b)-f(a))>0$ and thus $f(b)>f(a)$.\\
$\eqref{eq:specmon} \mathbb{R}ightarrow \eqref{eq:hilbertmon}$: Proposition \ref{prop:specmonToHilmon}\\
$\eqref{eq:hilbertmon} \mathbb{R}ightarrow \eqref{eq:posdefCondition}$: This implication is trivial; simply choose $B=A+H$.\\
To see that the operator monotonicity is not implied by the other conditions, consider the function $\PSym(n)\to\Sym(n),\; A\mapsto A^2$. While this function is monotone in the sense of \eqref{eq:hilbertmon} and \eqref{eq:specmon}, it is not operator monotone \cite[Example V.1.2]{bhatia1997matrix}.
\end{proof}
\begin{remark}
If $f$ is not a primary matrix function given through a scalar function on the spectrum (and \eqref{eq:specmon} is therefore not well defined), then the only generally true implications are
\begin{equation}
\eqref{eq:opmon} \mathbb{R}ightarrow \eqref{eq:posdefCondition} \Leftarrow \eqref{eq:hilbertmon}\,.
\end{equation}
To see that operator monotonicity does not imply Hilbert space monotonicity in this general case, consider the function
\begin{equation}
g: \PSym(n)\to\PSym(n), \quad C\mapsto \det(C)\cdot {1\!\!\!\:1 } \,. \label{eq:opMonNotHMonExample}
\end{equation}
For $A,H\in\PSym(n)$ we find
\[
Dg[C].H \:=\: \dd{t}\eval{\det(C+tH)}{t=0} \cdot {1\!\!\!\:1 } \:=\: \underbrace{\det(C)}_{>0\vphantom{\PSym(n)}}\tr(\,\underbrace{H\vphantom{()}}_{\mathclap{\in\PSym(n)}}\overbrace{C^{-1}}^{\mathclap{\in\PSym(n)}}) \cdot {1\!\!\!\:1 } \,.
\]
Since $\tr(MN)>0$ for $M,N\in\PSym(n)$ (c.f. \cite{Neff_Diss00}) we find $\det(C)\tr(HC^{-1})>0$. Thus $\det(C)\tr(HC^{-1})\, {1\!\!\!\:1 } $ is a positive definite matrix:
\[
Dg[C].H = \det(C)\tr(HC^{-1}) \, {1\!\!\!\:1 } \in \PSym(n)\,,
\]
hence $g$ is operator monotone. However $g$ is not Hilbert space monotone: in the case $n=2$, with $A=\matrs{3&0\\0&2}$ and $B=\matrs{5&0\\0&1}$ we find
\begin{align*}
\innerproduct{g(B)-g(A),\,B-A} &= (\det(B)-\det(A))\,\tr(B-A) = (5-6)\,\tr\matrs{2&0\\0&-1} \:=\: -1 \:<\: 0\,.
\end{align*}
For arbitrary dimensions $n>2$ the same follows for
\[
A=\matr{3&0&0&\dots&0\\0&2&0&\dots&\\0&0&1&\dots&\\\vdots&&&\ddots&\\0&&&&1} \qquad\text{and}\qquad B=\matr{5&0&0&\dots&0\\0&1&0&\dots&\\0&0&1&\dots&\\\vdots&&&\ddots&\\0&&&&1}\,.
\]
Note also that $Dg[C].H$ is generally not self-adjoint: for $H,\widetilde H\in\Sym(n)$ we find
\[
\innerproduct{Dg[C].H,\,\widetilde H} = \innerproduct{\innerproduct{\mathbb{C}of C,\, H}\cdot {1\!\!\!\:1 } ,\: \widetilde H} = \innerproduct{\mathbb{C}of C,\, H} \tr\widetilde H \neq \innerproduct{\mathbb{C}of C,\, \widetilde H} \tr H\,.
\]
Thus \eqref{eq:opMonNotHMonExample} does not admit a potential.
\end{remark}
\section{Some observations on Jog's and Patil's calculus}
\label{section:jog}
Returning to our original motivation, namely the true-stress-true-strain inequality, we consider the equation
\[
\pdd{\,\sigma(B)}{B} \cdot \pdd{B}{\log B} = \pdd{\,\widehat \sigma(\log B)}{\log B}
\]
based on the chain rule. To see how the positive definiteness of two of these terms imply the positive definiteness of the third we need the following lemma.
\begin{lemma}
\label{lemma:selfadjointProductPositiveDefinite}
Let $V$ be a finite-dimensional Hilbert space and let $A,B\in L(V,V)$ with
\begin{itemize}
\item[i)] $A$ and $B$ are self-adjoint and positive definite,
\item[ii)] $A B$ is self-adjoint.
\end{itemize}
Then $A B$ is positive definite.
\end{lemma}
\begin{proof}
Since $A$, $B$ and $A B$ are self-adjoint, we find \[A B = (A B)^T = B^T A^T = B A\,,\] hence $A$ and $B$ commute. Therefore $A$ and $B$ are simultaneously diagonalizable: we can choose an orthonormal basis such that the corresponding matrices $M_A$ and $M_B$ representing $A$ and $B$ are diagonal. Since $A$ and $B$ are positive definite, all diagonal entries of $M_A$ and $M_B$ are positive. The matrix $M_{AB}$ representing $AB$ in the same basis is given by $M_{AB} = M_A \cdot M_B$ and is therefore a diagonal matrix with positive diagonal entries as well, thus $AB$ is positive definite.\\
Note that in the case of $V=\mathbb{R}^n$ we can simply choose $Q\in\OO(n)$ such that $A=Q^TD_A Q$, $B=Q^TD_BQ$ with diagonal matrices $D_A, D_B$. Then $AB=Q^TD_AD_B Q$, and since the diagonal entries of $D_A$ and $D_B$ are positive, so are the diagonal entries of $D_AD_B$.
\end{proof}
We consider the derivatives
\[
\pdd{\,\sigma(B)}{B}\,,\quad \pdd{B}{\log B}\,,\quad \pdd{\,\widehat \sigma(\log B)}{\log B} \quad \in \: L(\mathbb{R}nn,\,\mathbb{R}nn)
\]
on $\Sym(n)$ and make the following assumptions on the functions $\sigma$ and $\widehat \sigma$:
\begin{alignat}{2}
&\pdd{\,\sigma(B)}{B} \quad &&\text{ is self-adjoint,}\label{eq:assumptions1}\\
&\pdd{\,\widehat \sigma(\log B)}{\log B} \quad &&\text{ is self-adjoint and positive definite in $L(\mathbb{R}nn,\,\mathbb{R}nn)$.}\label{eq:assumptions2}
\end{alignat}
Furthermore, we know from the previous sections that $\pdd{B}{\log B} = D\exp[\log B]$ and its inverse $\left(\pdd{B}{\log B}\right)^{-1} = \pdd{\log B}{B}$ $(= D\log[B])$ are self-adjoint and positive definite.\\[2mm]
Then according to Lemma \ref{lemma:selfadjointProductPositiveDefinite} the following holds:
\[
\pdd{\,\sigma(B)}{B} \cdot \pdd{B}{\log B} = \pdd{\,\widehat \sigma(\log B)}{\log B} \quad \Longrightarrow \quad \pdd{\,\sigma(B)}{B} \text{\: is positive definite.}
\]
This follows directly from $\pdd{\,\sigma(B)}{B} = \pdd{\,\widehat \sigma(\log B)}{\log B} \cdot \left(\pdd{B}{\log B}\right)^{-1}$. Note that \eqref{eq:assumptions1} and \eqref{eq:assumptions2} hold if $\sigma(B)=\widehat \sigma(\log B)$ for a primary matrix function $\widehat \sigma$ induced by a monotone function $f:\mathbb{R}^+\to\mathbb{R}$ with $f'(t)>0$ for all $t \in \mathbb{R}^+$.
To apply Lemma \ref{lemma:selfadjointProductPositiveDefinite}, all of the involved matrices $A$, $B$ and $AB$ must be self-adjoint.
While the term \enquote{positive definite} usually implies the symmetry by definition, we will now consider matrices $A$ which are \enquote{positive definite} in the sense that $\innerproduct{Ax,x} > 0$ for all $x\in\mathbb{R}n$, which is the case if and only if the symmetric part $\sym A = \tel{2}(A+A^T)$ of $A$ is positive definite.\\
We will show that the lemma does not generally hold if only one of the considered matrices is symmetric. Let
\[
A = \matr{1&0\\0&\tel8}\,, \qquad B_t = \matr{1&-t\\0&1}
\]
for $t\in[0,1]$. Then
\[
A\cdot B_t = \matr{1&-t \\ 0&\tel8}
\]
and we find:
\begin{itemize}
\item $t\mapsto B_t: [0,1]\to\mathbb{R}nn$ is continuous,
\item $A$ is invertible, symmetric and positive definite,
\item $B_t$ is invertible and \enquote{positive definite} (i.e.\ $\sym B_t = \tel{2}(B_t+B_t^T)$ is positive definite) for all $t\in[0,1]$ and
\item $A\cdot B_t$ is invertible for all $t\in[0,1]$.
\end{itemize}
However, while $A\cdot B_0$ is obviously positive definite, the matrix $A\cdot B_1 = \matr{1&-1\\0&\tel8}$ is not since
\[
\sym \matr{1&-1\\0&\tel8} = \matr{1&-\tel{2}\\-\tel{2}&\tel8} \quad \Longrightarrow \quad \det(\sym(A\cdot B_1)) = -\tel8 \:<\: 0\,,
\]
which implies that $\sym(A\cdot B_1)$ is not positive definite.\\
This shows not only that the lemma does not hold for non-symmetric matrices, but also that a \enquote{positive definite} (non-symmetric) matrix can be continuously deformed into a non-positive matrix without losing invertibility along the way.
In \cite[eq. (50)]{Jog2013}, Jog and Patil argue that a tensor valued function $A$ \enquote{loses positive definiteness} if and only if $B$ \enquote{loses positive definiteness}, which is deduced from the fact that $A = B C$ for some symmetric positive definite $C$. Since $A$ and $B$ are not symmetric in general, the authors define \enquote{losing positive definiteness} as the loss of invertibility. While for this definition the stated equivalence is correct, it should be carefully noted that since invertibility of a gradient is not a sufficient condition for monotonicity, this result cannot be applied to show the monotonicity of a function with gradient $A$. In particular, if $\pdd{\,\widehat \sigma(\log B)}{\log B}$ is not symmetric we cannot simply combine Lemma \ref{lemma:posDefSinglePoint} and Lemma \ref{lemma:selfadjointProductPositiveDefinite} to conclude that $\pdd{\,\sigma(B)}{B}$ is positive definite.
\begin{appendix}
\section{Appendix}
\subsection{On the derivative of the determinant function}
Consider the first order approximation
\[
\det(A+H) = \det A + D\det[A].H + \underbrace{\dots}_{\mathclap{\substack{\text{higher order}\\\text{terms}}}}
\]
of the determinant function at a \emph{diagonal} matrix $A=\diag(a_1,\dotsc,a_n)$. First we assume that $H\in\Sym(n)$ is an \emph{off-diagonal} matrix of the form
\begin{equation}
H = H^{\mathrm{off}} = \matrs{0&&h\\&\ddots&\\h&&0} \label{eq:offDiagonalMatrixSpecialCase}
\end{equation}
with $h\in\mathbb{R}$. We compute
\begin{align}
&\hspace{-14mm} \det\matr{\vec{a}_1 + \matrs{0\\\vdots\\h}, \: \vec{a}_2\:\:,\:\: \cdots \:\:\,, \:\: \vec{a}_n + \matrs{h\\\vdots\\0}}\nonumber \\
&= \det(\vec{a}_1\,, \: \vec{a}_2\,, \: \dotsc \,, \: \vec{a}_n + \matrs{h\\\vdots\\0}) \:+\: \det(\matrs{0\\\vdots\\h}, \: \vec{a}_2\,, \: \dotsc \,, \: \vec{a}_n + \matrs{h\\\vdots\\0})\nonumber \\
&= \det(\vec{a}_1\,, \: \vec{a}_2\,, \: \dotsc \,, \: \vec{a}_n) \:+\: \det(\vec{a}_1\,, \: \vec{a}_2\,, \: \dotsc \,, \: \vec{a}_n + \matrs{h\\\vdots\\0})\nonumber \\
&\quad \;\;+\: \det(\matrs{0\\\vdots\\h}, \: \vec{a}_2\,, \: \dotsc \,, \: \vec{a}_n) \:+\: \det(\matrs{0\\\vdots\\h}, \: \vec{a}_2\,, \: \dotsc \,, \: \matrs{h\\\vdots\\0})\,. \label{eq:detExtendedSum}
\end{align}
Since $A$ is diagonal by assumption, the column vector $\vec{a}_1$ has the form $\vec{a}_1 = \matr{a_{1}&\cdots&0}^T$ and $\vec{a}_n$ has the form $\vec{a}_n = \matr{0&\cdots&a_{n}}^T$. Therefore the vectors $\vec{a}_1$ and $\matr{h&\cdots&0}^T$ as well as $\vec{a}_n$ and $\matr{0&\cdots&h}^T$ are linearly dependent. Thus \eqref{eq:detExtendedSum} reduces to
\[
\det(\vec{a}_1\,, \: \vec{a}_2\,, \: \dotsc \,, \: \vec{a}_n) \:+\: \det(\matrs{0\\\vdots\\h}, \: \vec{a}_2\,, \: \dotsc \,, \: \matrs{h\\\vdots\\0}) \:=\: \det A \:+\: \underbrace{\det(\matrs{0\\\vdots\\h}, \: \vec{a}_2\,, \: \dotsc \,, \: \matrs{h\\\vdots\\0})}_{:=R(h)}\,.
\]
The term $R(h)$ is quadratic in $h$, thus the linear approximation is simply
\[
D\det[A].H = 0\,.
\]
for a matrix $H\in\Sym(n)$ of the form \eqref{eq:offDiagonalMatrixSpecialCase}. Through similar computations, it is easy to show that $D\det[A].H = 0$ for any off-diagonal $H\in\Sym(n)$.
To find the derivative $D\det[A]. {1\!\!\!\:1 } $ we compute
\begin{align*}
\det(A+h {1\!\!\!\:1 } ) &= \det\matrs{a_1+h & &0\\&\ddots &\\0&&a_n+h} = \prod_{i=1}^n (a_i+h) = \prod_{i=1}^n a_i + \Big(\sum_{i=1}^n\prod_{\substack{j=1\\j\neq i}}^n a_j\Big)\cdot h + h^2\cdot[\dots]\,,
\end{align*}
thus
\begin{equation}
D\det[A]. {1\!\!\!\:1 } = \dd{h} \det(A+h {1\!\!\!\:1 } ) = \sum_{i=1}^n \prod_{\substack{j=1\\j\neq i}}^n a_j\,. \label{eq:detDerivativeInUnityDirection}
\end{equation}
For $n=3$ we obtain
\[
D\det[A]. {1\!\!\!\:1 } = a_1 a_2 + a_2 a_3 + a_1 a_3\,.
\]
Furthermore, if we assume that $0$ is a simple eigenvalue of $A$ (which is the case for matrices of the form $D-\lambda_i(D)$ where $D$ is a diagonal matrix with simple eigenvalues; such matrices appeared in equation \eqref{eq:diagonalMinusSimpleEigenvalueExample}), then \eqref{eq:detDerivativeInUnityDirection} can be written as
\[
D\det[A]. {1\!\!\!\:1 } = \prod_{\substack{j=1\\j\neq k}}^n a_j \neq 0\,,
\]
where $0$ is the $k$-th eigenvalue of $A$.
\subsection{On the derivative of isotropic functions}
\begin{lemma}
\label{lemma:isotropicFunctionDerivative}
Let $W:\Sym(n)\to\mathbb{R}$ be an isotropic real valued function, i.e.
\[
\boxed{W(Q^TXQ)=W(X) \quad \forall \: X\in\Sym(n)\,,\:Q\in\OO(n)\,.}
\]
Then
\[
DW[Q^TXQ] = Q^TDW[X]Q\,.
\]
\end{lemma}
\begin{proof}
We directly compute:
\begin{alignat*}{1}
W(Q^T(X+H)Q) &= W(X+H)\\
\mathbb{R}ightarrow W(Q^TXQ + Q^THQ) &= W(X) + \innerproduct{DW[X],\,H} + \dots\\
\mathbb{R}ightarrow \smash{\overbrace{W(Q^TXQ)}^{=W(X)}} + \innerproduct{DW[Q^TXQ],\,Q^THQ} + \dots &= W(X) + \innerproduct{DW[X],\,H} + \dots\\
\mathbb{R}ightarrow \innerproduct{DW[Q^TXQ],\,Q^THQ} &= \innerproduct{DW[X],\,H}\\
\mathbb{R}ightarrow \innerproduct{QDW[Q^TXQ]Q^T,\,H} &= \innerproduct{DW[X],\,H}\,.
\end{alignat*}
Since this holds for all $H\in\Sym(n)$, we obtain
\[
QDW[Q^TXQ]Q^T = DW[X]
\]
and thus
\[
DW[Q^TXQ] = Q^TDW[X]Q\,. \qedhere
\]
\end{proof}
\subsection{The eigenvalue function}
We could also try to prove Proposition \ref{prop:primaryPotential} for the more general case of non-analytic functions by directly computing the derivative of the function
\[
W: \Sym(n)\to\mathbb{R},\quad W(A) = \sum_{i=1}^n F(\lambda_i(A))\,.
\]
Unfortunately, while the derivative of $W$ at a point $A\in\Sym(n)$ in directions $H$ can be explicitly computed if $A$ and $H$ commute, it is difficult to do so for arbitrary choices of $H\in\Sym(n)$.\\
One possible approach is to assume that the function $\lambda: \Sym(n)\to\mathbb{R}^n$ mapping a matrix $M\in\Sym(n)$ to its (ordered) eigenvalues $\lambda(M)$ is differentiable in a neighbourhood of $A\in\Sym(n)$. For example, this is the case if all eigenvalues of $A$ are simple \cite{Magnus1985eigen}. The basic idea is to write $W(A) = \Psi(\lambda(A))$ with $\Psi(\lambda_1,\dotsc,\lambda_n)=\sum_{i=1}^n F(\lambda_i)$. Then
\begin{equation}
DW[A] = D\Psi[\lambda(A)] \cdot D\lambda[A]\,. \label{eq:potentialEigenvalueDerivative}
\end{equation}
It is therefore useful to compute the derivative $D\lambda[A]$ of the eigenvalue function. Since Lemma \ref{lemma:isotropicFunctionDerivative} implies
\[
\lambda(Q^TAQ) = \lambda(A) \:\Longrightarrow\: D\lambda[Q^TAQ] = Q^T\,D\lambda[A]\,Q\,,
\]
the derivative of $\lambda$ at $A$ is determined by the derivative at the diagonal matrix corresponding to $A$. We will therefore assume w.l.o.g. that $A$ is already a diagonal matrix.\\
The eigenvalues $\lambda_i$ of $A$ are characterized by
\begin{equation}
\det(A-\lambda_i {1\!\!\!\:1 } )=0\,. \label{eq:eigenvalueDefinition}
\end{equation}
Let $H\in\Sym(n)$. We compute the first order approximation of \eqref{eq:eigenvalueDefinition}:
\begin{align}
&\quad\:\:\,\det(A+H - \lambda_i(A+H)\cdot {1\!\!\!\:1 } ) = 0\nonumber \\
&\mathbb{R}ightarrow\: \det(A+H-[\lambda_i(A) {1\!\!\!\:1 } + [D\lambda_i(A).H]\cdot {1\!\!\!\:1 } + \dots]) = 0\nonumber \\
&\mathbb{R}ightarrow\: \det([A-\lambda_i(A) {1\!\!\!\:1 } ]+H-[D\lambda_i(A).H]\cdot {1\!\!\!\:1 } + \dots) = 0\nonumber \\
&\mathbb{R}ightarrow\: \underbrace{\det[A-\lambda_i(A) {1\!\!\!\:1 } ]}_{=0} + \innerproduct{\mathbb{C}of[A-\lambda_i(A) {1\!\!\!\:1 } ]\,,\: H-[D\lambda_i(A).H]\cdot {1\!\!\!\:1 } + \dots} + \dots = 0 \label{eq:diagonalMinusSimpleEigenvalueExample}
\end{align}
By ignoring higher order terms we obtain
\begin{equation}
\innerproduct{\mathbb{C}of[A-\lambda_i(A) {1\!\!\!\:1 } ]\,,\: H-[D\lambda_i(A).H]\cdot {1\!\!\!\:1 } } = 0 \label{eq:ignoredHigherOrderTerms}
\end{equation}
Recall that $A$ is diagonal by assumption. Since $A$ commutes with diagonal matrices $H$ (and thus the derivative $DW[A].H$ could be computed by more direct means), we are only interested in cases where the symmetric matrix $H$ is \emph{off-diagonal}, i.e.\ $H_{i,i}=0$ for $i=1,\dotsc,n$. But then
\[
\innerproduct{\,\underbrace{\mathbb{C}of[A-\lambda_i(A) {1\!\!\!\:1 } ]}_{\text{diagonal}},\: \underbrace{H\vphantom{[]}}_{\mathclap{\text{off-diagonal}}}\,} = 0\,,
\]
thus \eqref{eq:ignoredHigherOrderTerms} reduces to
\[
\innerproduct{\mathbb{C}of[A-\lambda_i(A) {1\!\!\!\:1 } ]\,,\: [D\lambda_i(A).H]\cdot {1\!\!\!\:1 } } = 0\,,
\]
which we can also write as
\[
(D\lambda_i(A).H)\cdot\tr\left(\mathbb{C}of[A-\lambda_i(A) {1\!\!\!\:1 } ]\right) = 0\,.
\]
To conclude that $D\lambda_i(A).H=0$ it remains to show that $\tr\left(\mathbb{C}of[A-\lambda_i(A) {1\!\!\!\:1 } ]\right) \neq 0$. Assuming that the diagonal entries of $A$ are ordered we write $A=\diag(\lambda_1, \dots, \lambda_n)$ and find
\[
A - \lambda_i(A) {1\!\!\!\:1 } = \matr{\lambda_1-\lambda_i & & 0\\ & \ddots & \\ 0 & & \lambda_n-\lambda_i}
\]
and thus
\[
\mathbb{C}of[A - \lambda_i(A) {1\!\!\!\:1 } ] = \matr{\prod\limits_{k\neq1}(\lambda_k-\lambda_i) & & 0\\ & \ddots & \\ 0 & & \prod\limits_{k\neq n}(\lambda_k-\lambda_i)}\,.
\]
We compute the trace:
\begin{align*}
\tr\left(\mathbb{C}of[A - \lambda_i(A) {1\!\!\!\:1 } ]\right) &= \sum_{j=1}^n \, \prod\limits_{\substack{k=1\\k\neq j}}^n (\lambda_j-\lambda_i) = \prod\limits_{\substack{k=1\\k\neq i}}^n (\lambda_j-\lambda_i)\,,
\end{align*}
where the second equality holds due to the fact that the product is zero if it contains the factor $(\lambda_i-\lambda_i)$. Hence this term is nonzero if and only if all eigenvalues of $A$ are simple, in which case we can conclude that $D\lambda_i(A).H = 0$ for all off-diagonal $H\in\Sym(n)$.
Using these results, we can prove the following, which is a simple corollary to Proposition \ref{prop:primaryPotential}:
\begin{corollary}
Let $f\in C^1(\mathbb{R})$, $F\in C^2(\mathbb{R})$ with $F'=f$ and let $A\in\Sym(n)$ such that all eigenvalues of $A$ are simple. Then the function
\[
W: \Sym(n)\to\mathbb{R},\quad W(M) = \sum_{i=1}^n F(\lambda_i(M))
\]
is differentiable at $A$ with
\[
DW[A] = f(A) = Q^T \diag(f(\lambda_1),\dotsc,f(\lambda_n)) \,Q\,,
\]
where $A=Q^T \diag(\lambda_1,\dotsc,\lambda_n) \,Q$ is the spectral decomposition of $A$.
\end{corollary}
\begin{proof}
According to Lemma \ref{lemma:isotropicFunctionDerivative}, $DW[Q^TXQ] = Q^TDW[X]Q$, hence we find
\[
DW[A] = Q^T DW[\diag(\lambda_1,\dotsc,\lambda_n)] \,Q\,.
\]
Therefore it remains to show that
\begin{equation}
DW[\diag(\lambda_1,\dotsc,\lambda_n)].H = \innerproduct{\diag(f(\lambda_1),\dotsc,f(\lambda_n)),\: H} \label{eq:simpleCorollaryEquationToShow}
\end{equation}
for all $H\in\Sym(n)$ and pairwise different $\lambda_1,\dotsc,\lambda_n$.
We first consider the case of diagonal matrices $H = H^{\mathrm{diag}} = \diag(h_1,\dotsc,h_n)$. Writing $A^{\mathrm{diag}} = \diag(\lambda_1,\dotsc,\lambda_n)$ we find
\[
W(A^{\mathrm{diag}}+tH^{\mathrm{diag}}) = W(\diag(\lambda_1+th_1,\dotsc,\lambda_n+th_n)) = \sum_{i=1}^n F(\lambda_i+th_i)\,,
\]
thus
\begin{align}
DW[A^{\mathrm{diag}}].H^{\mathrm{diag}} &= \lim_{t\to 0} \:\tel{t} (W(A^{\mathrm{diag}}+tH^{\mathrm{diag}})-W(A^{\mathrm{diag}}))\\
&= \lim_{t\to 0} \:\tel{t} \sum_{i=1}^n F(\lambda_i+th_i) - F(\lambda_i) = \sum_{i=1}^n F'(\lambda_i)\,h_i\nonumber \\
&= \innerproduct{\diag(f(\lambda_1),\dotsc,f(\lambda_n)),\: \diag(h_1,\dotsc,h_n)} = \innerproduct{f(A^{\mathrm{diag}}),\:H^{\mathrm{diag}}}\,.\nonumber
\end{align}
Now let $H=H^{\mathrm{off}}$ be a symmetric off-diagonal matrix, i.e.\ $H^{\mathrm{off}}_{i,i}=0$ for $i=1,\dotsc,n$. Using equation \eqref{eq:potentialEigenvalueDerivative}:
\[
DW[A] = D\Psi[\lambda(A)] \cdot D\lambda[A]\,,
\]
as well as the result of the previous considerations for diagonal $A$ and off-diagonal $H^{\mathrm{off}}$:
\[
D\lambda[\diag(\lambda_1,\dotsc,\lambda_n)].H^{\mathrm{off}} = 0\,,
\]
we conclude
\[
DW[\diag(\lambda_1,\dotsc,\lambda_n)].H^{\mathrm{off}} = 0\,.
\]
Finally, for arbitrary $H\in\Sym(n)$, we can write $H=H^{\mathrm{diag}}+H^{\mathrm{off}}$ with a diagonal matrix $H^{\mathrm{diag}}$ and a symmetric off-diagonal matrix $H^{\mathrm{off}}$. Then
\begin{align}
DW[A^{\mathrm{diag}}].H &= DW[A^{\mathrm{diag}}].H^{\mathrm{diag}} + DW[A^{\mathrm{diag}}].H^{\mathrm{off}} = DW[A^{\mathrm{diag}}].H^{\mathrm{diag}}\nonumber \\
&= \innerproduct{f(A^{\mathrm{diag}}),\:H^{\mathrm{diag}}} = \innerproduct{f(A^{\mathrm{diag}}),\:H}\,,
\end{align}
showing \eqref{eq:simpleCorollaryEquationToShow} and concluding the proof.
\end{proof}
\end{appendix}
}
\end{document}
|
\begin{document}
\title{\textbf{Matrices of 3iet preserving morphisms}}
\author{P.~Ambro\v{z}\quad Z.~Mas\'akov\'a\quad E.~Pelantov\'a \\[5mm]
{\small Doppler Institute
\& Department of Mathematics}\\
{\small FNSPE, Czech Technical University, Trojanova 13, 120 00 Praha 2, Czech Republic}\\
{\small E-mail: \texttt{[email protected]},
\texttt{[email protected]}},\\{\small \texttt{[email protected]}}}
\date{}
\maketitle
\begin{abstract}
We study matrices of morphisms preserving the family of words
coding 3-interval exchange transformations. It is well known that
matrices of morphisms preserving sturmian words (i.e.\ words
coding 2-interval exchange transformations with the maximal
possible factor complexity) form the monoid
$\{\boldsymbol{M}\in\mathbb{N}^{2\times
2}\;|\;\det\boldsymbol{M}=\pm1\} = \{
\boldsymbol{M}\in\mathbb{N}^{2\times 2}\;|\;
\boldsymbol{M}\boldsymbol{E}\boldsymbol{M}^T =
\pm\boldsymbol{E}\}$, where $\boldsymbol{E} =
(\!\begin{smallmatrix}0&1\\-1&0\end{smallmatrix})$.
We prove that in case of exchange of three intervals, the matrices
preserving words coding these transformations and having the
maximal possible subword complexity belong to the monoid
$\{\boldsymbol{M}\in\mathbb{N}^{3\times 3}\;|\;
\boldsymbol{M}\boldsymbol{E}\boldsymbol{M}^T = \pm\boldsymbol{E},\
\det\boldsymbol{M}=\pm 1\}$, where $\boldsymbol{E} =
\Big(\!\begin{smallmatrix}0&1&1\\-1&0&1\\-1&-1&0\end{smallmatrix}\Big)$.
\end{abstract}
\section{Introduction}
Sturmian words are the most studied class of infinite aperiodic
words. By their nature, they are defined purely over a binary
alphabet. There exist several equivalent definitions of sturmian
words~\cite{berstel-ijac-12}, which give rise to several different
generalizations of sturmian words over larger alphabets. For
example, the generalization of sturmian words to Arnoux-Rauzy
words of order $r$ uses the characterization of sturmian words by
means of the so-called left and right special
factors~\cite{arnoux-rauzy-bcmf-119}.
Another natural generalization can be derived from the definition
of a sturmian word as an aperiodic word coding a transformation of
exchange of two intervals. The $r$-interval exchange
transformation has been introduced by Katok and
Stepin~\cite{katok-stepin-umn-22}: An exchange $T$ of $r$
intervals is defined by a vector of $r$ lengths and by a
permutation of $r$ letters; the unit interval is then partitioned
according to the vector of lengths, and $T$ interchanges these
intervals according to the given permutation. Rauzy was the first
one to observe that interval exchange transformation can be used
for the generalization of sturmian words.
In contrast to ergodic properties of these transformations, which
were studied by many
authors~\cite{keane-mz-141,rauzy-aa-34,veech-ajm-6,vershik-livshits-asm-9},
combinatorial properties of associated words have been so far
explored only a little. Some results, analogical to the properties
known for sturmian words, have been derived for the most simple
case, namely for 3-interval exchange transformations. Note that
for the exchange of three intervals, the most interesting
permutation is $(321)$ and all the results cited below apply to
transformations with this permutation. Words coding 3-interval
exchange transformation can be periodic or aperiodic, depending on
the choice of parameters. In accordance with the terminology
introduced by~\cite{damanik-zamboni-rmp-15}, infinite words which
code 3-interval exchange transformations and are aperiodic, are
called 3iet words. The factor complexity $\mathcal{C}_u(n)$ of a 3iet
word $u$, i.e., the number of different factors of length $n$
occurring in $u$, is known to satisfy $\mathcal{C}_u(n)\leq 2n+1$ for
all $n\in{\mathbb N}$. Words for which $\mathcal{C}_u(n)= 2n+1$, for all
$n\in{\mathbb N}$, are called non-degenerated (or regular) 3iet words.
In the paper~\cite{ferenczi-holton-zamboni-jam-89}, minimal
sequences coding 3-interval exchange transformations are fully
characterized. The structure of palindromes of these words was
described
in~\cite{damanik-zamboni-rmp-15,balazi-masakova-pelantova-tcs},
whereas the paper~\cite{ferenczi-holton-zamboni-jam-89} deals with
their return words. Here we study morphisms which map the set of
3iet words to itself.
Morphisms preserving sturmian words were completely described by Berstel, Mignosi and
S\'e\'ebold~\cite{berstel-seebold-bbms-1,mignosi-seebold-jtnb-5,seebold-tcs-88}.
Recall that there are two ways to define such a morphism:
\begin{itemize}
\item
A morphism $\varphi$ over the binary alphabet $\{0,1\}$ is said to be \emph{locally sturmian}
if there is a sturmian word $u$ such that $\varphi(u)$ is also sturmian.
\item
A morphism $\varphi$ over the binary alphabet $\{0,1\}$ is said to be \emph{sturmian}
if $\varphi(u)$ is sturmian for all sturmian words $u$.
\end{itemize}
Berstel, Mignosi and S\'e\'ebold showed that the families of
sturmian and locally sturmian morphisms coincide and that they
form a monoid generated by three morphisms, $\psi_1$, $\psi_2$ and
$\psi_3$, given by
\begin{equation}\label{eq:sturm-generators}
\psi_1:
\begin{aligned}
0 & \mapsto 01 \\ 1 &\mapsto 1
\end{aligned}\,, \quad\quad
\psi_2:
\begin{aligned}
0 & \mapsto 10 \\ 1 &\mapsto 1
\end{aligned}\,, \quad\quad
\psi_3:
\begin{aligned}
0 & \mapsto 1 \\ 1 &\mapsto 0
\end{aligned}\,.
\end{equation}
To each morphism $\varphi$ over a $k$-letter alphabet
$\{a_1,\ldots,a_k\}$ one can assign its incidence matrix
$\boldsymbol{M}_{\!\varphi}\in{\mathbb N}^{k\times k}$ by putting
\begin{equation}\label{eq:subst-matrix}
(\boldsymbol{M}_{\!\varphi})_{ij} = \text{number of letters $a_j$ in the word $\varphi(a_i)$}\,.
\end{equation}
As a simple consequence of the fact that the monoid of sturmian
morphisms is generated by $\psi_1$, $\psi_2$ and $\psi_3$
from~\eqref{eq:sturm-generators}, one has the following fact:
\emph{A matrix $\mat{M}\in{\mathbb N}^{2\times 2}$ is the incidence matrix
of a sturmian morphism if and only if\:\ $\det\mat{M}=\pm1$.} By
an easy calculation we can derive that for matrices of order
$2\times 2$
\[
\det\mat{M}=\pm1 \quad \Longleftrightarrow \quad \mat{M}\mat{E}\mat{M}^T = \pm\mat{E}\,,
\text{ where } \mat{E}=\bigl(\!\begin{smallmatrix} 0 & 1 \\ -1 & 0 \end{smallmatrix}\bigr)\,.
\]
In the theory of Lie groups, one can formulate this claim by
stating that the group $\text{SL}(2,{\mathbb Z})$ is isomorphic to the
group $\text{Sp}(2,{\mathbb Z})$, see~\cite{jacobson-liealg}.
The aim of this paper is to derive similar properties for matrices
of morphisms preserving the family of 3iet words, which we call
here 3iet preserving morphisms. We will prove the following
theorems.
\begin{thma}
Let $\varphi$ be a 3iet preserving morphism and let $\mat{M}$ be its incidence matrix. Then
\[
\mat{M}\mat{E}\mat{M}^T = \pm\mat{E}, \text{ where }
\mat{E}=\Bigl(\!\begin{smallmatrix}0&1&1\\-1&0&1\\-1&-1&0\end{smallmatrix}\Bigr)\,.
\]
\end{thma}
\begin{thmb}
Let $\varphi$ be a 3iet preserving morphism and let $\mat{M}$ be its incidence matrix.
Then one of the following holds
\begin{itemize}
\item
$\det\mat{M}=\pm 1$ and $\varphi(u)$ is non-degenerated for every non-degenerated 3iet
word $u$,
\item
$\det\mat{M}=0$ and $\varphi(u)$ is degenerated for every 3iet word $u$.
\end{itemize}
\end{thmb}
In the proof of Theorem~A we use the description of matrices of sturmian morphisms
given above, while the main tool employed in the proof of Theorem~B is
the connection between words coding 3-interval exchange transformations and
cut-and-project sets.
Note that we do not address at all the description of the 3iet
preserving morphisms themselves.
\section{Preliminaries}
In this paper we deal with finite and infinite words over a finite
alphabet $\mathcal{A}$, whose elements are called letters. The set of
all finite words over $\mathcal{A}$ is denoted by $\mathcal{A}^*$. This set,
equipped with the concatenation as a binary operation, is a free
monoid having the empty word as its identity. The length of a word
$w=w_1w_2\cdots w_n$ is denoted by $|w|=n$, the number of letters
$a_i$ in the word $w$ is denoted by $|w|_{a_i}$.
\subsection{Infinite words}
The set of two-sided infinite words over an alphabet $\mathcal{A}$,
i.e., of two-sided infinite sequences of letters of $\mathcal{A}$, is
denoted by $\mathcal{A}^{\mathbb Z}$, its elements are words $u=(u_n)_{n\in{\mathbb Z}}$.
Note that in all our considerations we will not identify infinite
words $(u_{n+k})_{n\in{\mathbb Z}}$ and $(u_{n})_{n\in{\mathbb Z}}$, and therefore
we will mark the position corresponding to the index $0$, usually
using $|$ as the delimiter, e.g.\ for $u\in\mathcal{A}^{\mathbb Z}$,
\[
u = \cdots u_{-3}u_{-2}u_{-1}|\:u_0u_1u_2\cdots\,.
\]
The words of this form are sometimes called pointed biinfinite words. Naturally, one can
define a metric on the set $\mathcal{A}^{\mathbb Z}$.
\begin{defi}
Let $u=(u_n)_{n\in{\mathbb Z}}$ and $v=(v_n)_{n\in{\mathbb Z}}$ be two biinfinite
words over $\mathcal{A}$. We define the \emph{distance}
$\dd(u,v)$ between $u$ and $v$ by setting
\begin{equation}\label{eq:metric}
\dd(u,v) \mathrel{\mathop:}= \frac{1}{1+j}\,,
\end{equation}
where $j\in{\mathbb N}$ is the minimal index such that either $u_j\neq v_j$ or $u_{-j}\neq v_{-j}$.
\end{defi}
It can be easily verified that the above defined distance
$\dd(u,v)$ is a metric and that the set $\mathcal{A}^{\mathbb Z}$ with this
metric is a compact metric space.
We consider also one-sided infinite words $u = (u_n)_{n\in{\mathbb N}}$, either right-sided
$u = u_0u_1u_2\cdots$ or left-sided $u = \cdots u_2u_1u_0$.
The degree of diversity of an infinite word $u$ is expressed by
the complexity function, which counts the number of factors of
length $n$ in the word $u$. Formally, a word $w$ of length $n$ is said
to be a \emph{factor} of a word $u=(u_n)_{n\in{\mathbb Z}}$ if there is an index $i\in{\mathbb Z}$ such
that $w=u_i u_{i+1}\cdots u_{i+n-1}$. The set of all factors of $u$ of length $n$
is denoted by $\mathcal{L}_n(u)$. The \emph{language} $\mathcal{L}(u)$ of an infinite
word $u$ is the set of all its factors, that is,
\[
\mathcal{L}(u) = \bigcup_{n\in{\mathbb N}} \mathcal{L}_n(u)\,.
\]
The \emph{(factor) complexity} $\mathcal{C}_u$ of an infinite word $u$ is the function
$\mathcal{C}_u:{\mathbb N}\rightarrow{\mathbb N}$ defined as
\[
\mathcal{C}_u(n) \mathrel{\mathop:}= \#\mathcal{L}_n(u)\,.
\]
Clearly, $\mathcal{C}_u(n)$ is a non-decreasing function. Recall that if
there exists $n_0\in{\mathbb N}$ such that $\mathcal{C}_u(n_0)\leq n_0$, then the
word $u$ is eventually periodic (if $u=(u_n)_{n\in{\mathbb N}}$), or
periodic (if $u=(u_n)_{n\in{\mathbb Z}}$), see~\cite{morse-hedlund-ajm-62}.
Hence for an aperiodic word $u$, one has $\mathcal{C}_u(n)\geq n+1$, for
all $n\in{\mathbb N}$.
A one-sided sturmian word $(u_n)_{n\in{\mathbb N}}$ is often defined as an
aperiodic word with complexity $C_u(n)=n+1$, for all $n\in{\mathbb N}$.
However, for biinfinite words, the condition $\mathcal{C}_u(n)\geq n+1$
is not enough for $u$ to be aperiodic. For example, the word
$\cdots111|000\cdots$ has the complexity $\mathcal{C}(n)=n+1$ for all
$n\in{\mathbb N}$. In order to define a biinfinite sturmian word
$(u_n)_{n\in{\mathbb Z}}$ by means of complexity, we need to add another
condition. We introduce the notion of the density of letters,
representing the frequency of occurrence of a given letter in an
infinite word.
The \emph{density of a letter} $a\in\mathcal{A}$ in a word
$u\in\mathcal{A}^{\mathbb Z}$ is defined as
\[
\rho(a) \mathrel{\mathop:}= \lim_{n\rightarrow\infty}
\frac{\#\{i\ |\ -n\leq i\leq n,\ u_i=a\}}{2n+1}\,,
\]
if the limit exists.
A biinfinite word $u=(u_n)_{n\in{\mathbb Z}}$ is called sturmian, if
$\mathcal{C}_u(n)=n+1$ for each $n\in{\mathbb N}$ and the densities of letters
are irrational.
Another equivalent definition of sturmian words uses the balance
property. We say that an infinite word $u$ over the alphabet
$\{0,1\}$ is \emph{balanced}, if for every pair of factors
$v,w\in\mathcal{L}_n(u)$ we have $\bigl||v|_0- |w|_0\bigr|\leq 1$. A
one-sided infinite word over the alphabet $\{0,1\}$ is sturmian, if
and only if it is balanced. A biinfinite word over $\{0,1\}$ is
sturmian, if and only if it is balanced and has irrational densities
of letters. For other properties of one-sided and two-sided infinite
sturmian words the reader is referred to~\cite{lothaire2,fogg}.
Unlike the metric space $\mathcal{A}^{\mathbb Z}$, the set of all sturmian words
equipped with the same metric~\eqref{eq:metric} is not compact,
however we have the following result.
\begin{lem}\label{lem:limit-of-sturm}
Let $u\in\{0,1\}^{\mathbb Z}$ be a limit of a sequence of sturmian words $u^{(m)}$.
Then $u$ is either sturmian or the densities of letters in $u$ are rational.
\end{lem}
\begin{proof}
Let $w,\widehat{w}\in\mathcal{L}(u)$ be factors of the same length in $u$.
Since $u = \lim_{m\rightarrow\infty} u^{(m)}$ there exists $m_0\in{\mathbb N}$ such
that $w,\widehat{w}$ are factors of $u^{(m_0)}$, which is sturmian. Therefore
$\bigl||w|_0 - |\widehat{w}|_0\bigr|\leq 1$ and $u$ is balanced. If, moreover, the
densities are irrational, then $u$ is sturmian. The statement follows.
\end{proof}
\subsection{Morphisms and incidence matrices}\label{sec:geometric}
A mapping $\varphi:\mathcal{A}^*\rightarrow\mathcal{A}^*$ is said to be a
\emph{morphism} over $\mathcal{A}$ if
$\varphi(w\widehat{w})=\varphi(w)\varphi(\widehat{w})$ holds for
any pair of finite words $w,\widehat{w}\in\mathcal{A}^*$. Obviously, a
morphism is uniquely determined by the images $\varphi(a)$ for all
letters $a\in\mathcal{A}$.
The action of a morphism $\varphi$ can be naturally extended to biinfinite words
by the prescription
\[
\varphi(u) = \varphi(\cdots u_{-2}u_{-1}|\:u_0u_1\cdots) \mathrel{\mathop:}=
\cdots\varphi(u_{-2})\varphi(u_{-1})|\,\varphi(u_{0})\varphi(u_{1})\cdots\,.
\]
The mapping $\varphi : u \mapsto \varphi(u)$ is continuous on $\mathcal{A}^{\mathbb Z}$;
a word $u\in\mathcal{A}^{\mathbb Z}$ is said to be a \emph{fixed point} of $\varphi$ if $\varphi(u)=u$.
Recall that the incidence matrix of a morphism $\varphi$ over the
alphabet $\mathcal{A}$ is defined by~\eqref{eq:subst-matrix}.
A morphism $\varphi$ is called primitive if there exist an integer $k$ such that
the matrix $\mat{M}_{\varphi}^k$ is positive.
Morphisms over $\mathcal{A}$ form a monoid, whose neutral element is the
identity morphism. Let $\varphi$ and $\psi$ be morphisms over
$\mathcal{A}$, then the matrix of their composition, that is, of the
morphism $u\mapsto(\varphi\circ\psi)(u) =
\varphi\bigl(\psi(u)\bigr)$ is obtained by
\begin{equation}\label{eq:matrix-compose}
\mat{M}_{\varphi\circ\psi} = \mat{M}_{\psi}\mat{M}_{\varphi}\,.
\end{equation}
Let us now explain the importance of the incidence matrix of a
morphism $\varphi$ for the combinatorial properties of infinite
words on which the morphism $\varphi$ acts. Assume that an
infinite word $u$ over the alphabet $\mathcal{A}=\{a_1,\dots, a_k\}$ has
well defined densities of letters, given by the vector
\[
\vec{\rho}_u = \bigl(\rho(a_1),\dots,\rho(a_k)\bigr)\,.
\]
It is easy to see that the densities of letters in the infinite
word $\varphi(u)$ are also well defined and it
holds that
\begin{equation}\label{eq:density-phi-u}
\vec{\rho}_{\varphi(u)} = \frac{\vec{\rho}_u\boldsymbol{M}_{\!\varphi}}
{\vec{\rho}_u\boldsymbol{M}_{\!\varphi}
\Bigl(\begin{smallmatrix}1\\[-2mm] \vdots\\1\end{smallmatrix}\Bigr)}\,,
\end{equation}
where $\boldsymbol{M}_{\!\varphi}$ is the incidence matrix of $\varphi$.
Assume now that the infinite word $u$ is a fixed point of a
morphism $\varphi$. Then from~\eqref{eq:density-phi-u}, we obtain
that the vector of densities $\vec{\rho}_u$ is a left eigenvector
of the incidence $\boldsymbol{M}_{\!\varphi}$, i.e.,
$\vec{\rho}_u\boldsymbol{M}_{\!\varphi}=\Lambda\vec{\rho}_u$. Since $\boldsymbol{M}_{\!\varphi}$ is a
non-negative integral matrix, we can use the Perron-Frobenius
Theorem stating that $\Lambda$ is the dominant eigenvalue of
$\boldsymbol{M}_{\!\varphi}$. Moreover, all eigenvalues of $\boldsymbol{M}_{\!\varphi}$ are algebraic
integers.
The right eigenvector of the incidence matrix corresponding to the
dominant eigenvalue has also a nice interpretation. It plays an
important role for the \emph{geometric representation} of a fixed
point of a morphism. Let $u$ be a fixed point of a morphism
$\varphi$ over a $k$-letter alphabet $\{a_1,\ldots,a_k\}$ and let
$\boldsymbol{M}_{\!\varphi}$ have a positive right eigenvector $\vec{x}$. The infinite
word $u$ can be geometrically represented by a self-similar set
$\Sigma$ as follows.
Let us denote by $x_1,x_2,\ldots,x_k$ the positive components of
$\vec{x}$, and let $\Lambda$ be the corresponding eigenvalue,
i.e., $\boldsymbol{M}_{\!\varphi}\vec{x}=\Lambda\vec{x}$. Since $\boldsymbol{M}_{\!\varphi}$ is non-negative
and $\vec{x}$ is positive, the eigenvalue $\Lambda$ is equal to
the spectral radius of the matrix $\boldsymbol{M}_{\!\varphi}$. Moreover, $\boldsymbol{M}_{\!\varphi}$ being
an integral matrix implies $\Lambda\geq 1$.
For a biinfinite word $u = \cdots
u_{-3}u_{-2}u_{-1}|u_0u_1u_2\cdots$ we denote
\begin{multline*}
\qquad \Sigma = \Bigl\{ \sum_{i=1}^k |w|_{a_i}x_i\ \Big|\
w \text{ is an arbitrary prefix of } u_0u_1u_2\cdots \Bigr\} \\
\cup \Bigl\{ -\sum_{i=1}^k |w|_{a_i}x_i\ \Big|\
w \text{ is an arbitrary suffix of } \cdots u_{-3}u_{-2}u_{-1} \Bigr\}\,. \qquad
\end{multline*}
The set $\Sigma$ can be equivalently defined as
\[
\Sigma = \{t_n\ |\ n\in{\mathbb Z}\}\,,\quad \text{where} \quad t_0=0 \
\hbox{ and } \ t_{n+1} - t_n = x_i\ \Leftrightarrow\ u_n=a_i\,.
\]
Since $u$ is a fixed point of a morphism, the construction of
$\Sigma$ implies that $\Lambda\Sigma \subset \Sigma$.
A set having this property is called self-similar.
\begin{figure}
\caption{Action of the morphism $0\mapsto10$, $1\mapsto110$ on the
geometric representation of its fixed point
$u=\lim_{n\rightarrow\infty}
\label{fig:morphism}
\end{figure}
Moreover, if $u_n=a_i$ then the number of points of the set
$\Sigma$ belonging to $(\Lambda t_n,\Lambda t_{n+1}]$ is equal to
the length of $\varphi(a_i)$. Formally, we have
\begin{equation}\label{eq:points-in-tntn1}
\# \bigl((\Lambda t_n,\Lambda t_{n+1}] \cap \Sigma\bigr) = |\varphi(a_i)|\,.
\end{equation}
In Figure~\ref{fig:morphism}, one can see the geometric
representation of the fixed point of the morphism $0\mapsto10$,
$1\mapsto110$. The matrix of this morphism, $\mat{M} =
\big(\begin{smallmatrix}1&1\\1&2\end{smallmatrix}\big)$, has the
dominant eigenvalue $\Lambda=\tau^2$, where
$\tau=\tfrac{1+\sqrt{5}}{2}$ is the golden ratio. The
corresponding right eigenvector of $M$ is
$\big(\begin{smallmatrix}1\\\tau\end{smallmatrix}\big)$. Hence the
lengths assigned to letters $0$ and $1$ are $\ell(0)=1$ and
$\ell(1)=\tau$, respectively.
\section{Interval exchange words}
Before we define infinite words coding a 3-interval exchange
transformation, we will show the definition of sturmian words
using a 2-interval exchange transformation. It is well known (see
e.g.~\cite{morse-hedlund-ajm-62,lothaire2}) that every sturmian
word $u=(u_n)_{n\in{\mathbb Z}}$ over the alphabet $\{0,1\}$ satisfies
\begin{equation}\label{eq:dolni-mechanicke}
u_n = \lfloor (n+1)\alpha + x_0\rfloor - \lfloor n\alpha+x_0\rfloor
\qquad \text{for all $n\in{\mathbb Z}$,}
\end{equation}
or
\begin{equation}\label{eq:horni-mechanicke}
u_n = \lceil (n+1)\alpha + x_0\rceil - \lceil n\alpha+x_0\rceil
\qquad \text{for all $n\in{\mathbb Z}$,}
\end{equation}
where $\alpha\in(0,1)$ is an irrational number called the slope,
and $x_0\in[0,1)$ is called the intercept of $u$. In the former
case, $(u_n)_{n\in{\mathbb Z}}$ is the so-called upper mechanical word, in
the latter case the lower mechanical word, with slope $\alpha$ and
intercept $x_0$.
If $(u_n)_{n\in{\mathbb Z}}$ is of the form~\eqref{eq:dolni-mechanicke} then, obviously,
\begin{equation}\label{eq:un-dolni-mech}
u_n = \begin{cases}
0 & \text{if $\{n\alpha+x_0\}\in[0,1-\alpha)\,,$} \\
1 & \text{if $\{n\alpha+x_0\}\in[1-\alpha,1)\,,$}
\end{cases}
\end{equation}
where $\{x\}$ denotes the fractional part of $x$, i.e.,
$\{x\}=x-\lfloor x\rfloor$. We can define a transformation
$T:[0,1)\rightarrow[0,1)$ by the prescription
\begin{equation}\label{eq:2iet}
T(x) = \begin{cases}
x + \alpha & \text{if $\{n\alpha+x_0\}\in [0,1-\alpha)\mathrel{=\!\!\mathop:} I_0\,,$} \\
x + \alpha - 1 & \text{if $\{n\alpha+x_0\}\in [1-\alpha,1)\mathrel{=\!\!\mathop:} I_1\,,$}
\end{cases}
\end{equation}
which satisfies $T(x) = \{x+\alpha\}$. It follows easily that the $n$-th
iteration of $T$ is given as
\begin{equation}\label{eq:T-n}
T^n(x) = \{x+n\alpha\} \qquad \text{for all $n\in{\mathbb Z}$}.
\end{equation}
Putting~\eqref{eq:un-dolni-mech} and~\eqref{eq:T-n} together, we see that
a sturmian word $(u_n)_{n\in{\mathbb Z}}$ can be defined using the transformation
$T$ by
\[
u_n = \begin{cases}
0 & \text{if $T^n(x_0)\in I_0$,} \\
1 & \text{if $T^n(x_0)\in I_1$.}
\end{cases}
\]
Hence a sturmian word is given by iterations of the intercept $x_0$
under the mapping $T$, that is, by the orbit of $x_0$ under $T$.
The action of the mapping $T$ from~\eqref{eq:2iet} is illustrated
on Figure~\ref{fig:2iet}.
\begin{figure}
\caption{Graph of a 2-interval exchange transformation.}
\label{fig:2iet}
\end{figure}
We see that $T$ is in fact an exchange of two intervals $I_0=[0,1-\alpha)$ and
$I_1=[1-\alpha,1)$. It is therefore called a 2-interval exchange transformation.
Let us mention that if $(u_n)_{n\in{\mathbb Z}}$ is an upper mechanical word, the corresponding
2-interval exchange transformation is given by
$T:(0,1]\mapsto(0,1]$, with $I_0=(0,1-\alpha]$ and $I_1=(1-\alpha,1]$.
Note also that it was not necessary that $T$ was acting on a unit interval.
We could choose an arbitrary interval divided into two parts, ratio of whose
lengths would be irrational.
Analogically to the case of exchange of two intervals, we can
define a 3-interval exchange transformation.
\begin{defi}
Let $\alpha,\beta,\gamma$ be three positive real numbers. Denote
$$
\begin{array}{rcl}
I_A&\mathrel{\mathop:}=&[0,\alpha)\\
I_B&\mathrel{\mathop:}=& [\alpha,\alpha+\beta)\\
I_C&\mathrel{\mathop:}=& [\alpha+\beta,\alpha+\beta+\gamma)
\end{array}
\quad\hbox{or }\quad
\begin{array}{rcl}
I_A&\mathrel{\mathop:}=& (0,\alpha]\\
I_B&\mathrel{\mathop:}=& (\alpha,\alpha+\beta]\\
I_C&\mathrel{\mathop:}=& (\alpha+\beta,\alpha+\beta+\gamma]
\end{array}
$$
respectively, and $I:=I_A\cup I_B\cup I_C$.
A mapping
$T:I\rightarrow I$, given by
\begin{equation}\label{eq:3iet}
T(x) = \begin{cases}
x + \beta+\gamma & \text{if $x\in I_A$,} \\
x -\alpha+\gamma & \text{if $x\in I_B$,} \\
x -\alpha-\beta & \text{if $x\in I_C$,}
\end{cases}
\end{equation}
is called a \emph{3-interval exchange transformation} (3iet)\footnote{Note that the
above defined mapping $T$ should be more precisely called 3-interval
exchange with the permutation $(321)$, since the initial arrangement
of intervals $I_A<I_B<I_C$ is changed to $T(C)<T(B)<T(A)$. Indeed,
one can define also 3iet with a different permutation of intervals,
e.g.\ (231). The corresponding 3iet word has the property that by
changing all the letters $C$ into $B$ one obtains a sturmian word
over the alphabet $\{A,B\}$. We will not consider such words.}
with parameters $\alpha,\beta,\gamma$.
\end{defi}
The graph of a 3-interval exchange is on Figure~\ref{fig:3iet}.
\begin{figure}
\caption{Graph of a 3-interval exchange transformation.}
\label{fig:3iet}
\end{figure}
With a 3-interval exchange transformation $T$, one can naturally
associate a ternary biinfinite word $u_T(x_0)=(u_n)_{n\in{\mathbb Z}}$,
which codes the orbit of a point $x_0$ from the domain of $T$, as
\begin{equation}\label{eq:u_T}
u_n = \begin{cases}
A & \text{if $\ T^n(x_0)\in I_A$,} \\
B & \text{if $\ T^n(x_0)\in I_B$,} \\
C & \text{if $\ T^n(x_0)\in I_C$.}
\end{cases}
\end{equation}
Similarly as in the case of a 2-interval exchange transformation,
the infinite word coding a 3iet can be periodic or aperiodic,
according to the choice of parameters $\alpha,\beta,\gamma$. We
will focus only on aperiodic words.
\begin{defi}
An aperiodic\footnote{A biinfinite word $(u_n)_{n\in{\mathbb Z}}$ is called aperiodic
if neither $u_0u_1u_2\cdots$ nor $\cdots u_{-3}u_{-2}u_{-1}$ is eventually
periodic.} word $u_T(x_0)$ coding the orbit of the point $x_0$
under the 3iet $T$ defined above is called a \emph{3iet word} with parameters
$\alpha,\beta,\gamma$ and $x_0$.
\end{defi}
The following lemma shows a close relation between words coding
3-interval exchange and 2-interval exchange transformations.
\begin{lem}\label{lem:delta-of-3iet}
Let $u = (u_n)_{n\in{\mathbb Z}}$ be a word coding 3-interval exchange transformation and
let $\sigma:\{A,B,C\}^*\rightarrow\{0,1\}^*$ be a morphism given by
\begin{equation}\label{eq:delta}
A \mapsto 0\,, \qquad
B \mapsto 01\,, \qquad
C \mapsto 1\,.
\end{equation}
Then $\sigma(u)$ codes a 2-interval exchange transformation.
\end{lem}
\begin{proof}
Let $u$ be the coding of $x_0$ under the 3-interval exchange transformation $T$ with
intervals $[0,\alpha)$, $[\alpha,\alpha+\beta)$
and $[\alpha+\beta,\alpha+\beta+\gamma)$.
Let $S$ be the 2-interval exchange transformation of the intervals
$I_0=[0,\alpha+\beta)$ and $I_1=[\alpha+\beta,\alpha+2\beta+\gamma)$, i.e.,
\[
S(x) = \begin{cases}
x + \beta + \gamma & \text{if $x\in I_0$,} \\
x - \alpha - \beta & \text{if $x\in I_1$.}
\end{cases}
\]
One can easily see that
\begin{align*}
x & \in[0,\alpha) && {\mathbb R}ightarrow \qquad x\in I_0 \text{ and } T(x)=S(x)\,, \\
x & \in[\alpha,\alpha+\beta) && {\mathbb R}ightarrow \qquad x\in I_0,\ S(x)\in I_1
\text{ and } S^2(x) = T(x)\,, \\
x & \in[\alpha+\beta,\alpha+\beta+\gamma) && {\mathbb R}ightarrow \qquad x\in I_1,
\text{ and } S(x) = T(x)\,.
\end{align*}
This proves that $\sigma(u)$ is the coding of $x_0$ under $S$.
\end{proof}
\section{Periodic and aperiodic words coding 3iet}
In order to clarify the relation between the parameters of a 3iet
and the complexity of the corresponding infinite words, we recast
the definition of these words in a new formalism. We show that
every 3iet word codes distances in a discrete set arising as a
projection of points of the lattice ${\mathbb Z}^2$. This construction is
known as the cut-and-project method.
Let $\varepsilon,\eta$ be real numbers, $\varepsilon\neq-\eta$.
Every point $(a,b)\in{\mathbb Z}^2$ can be written in the form
\[
(a,b) = (a+b\eta)\vec{x}_1 + (a-b\varepsilon)\vec{x}_2\,,
\]
where
\[
\vec{x}_1 = \frac{1}{\varepsilon+\eta}(\varepsilon,1)
\qquad\text{and}\qquad \vec{x}_2 =
\frac{1}{\varepsilon+\eta}(\eta,-1)\,.
\]
Let $V_1$ and $V_2$ denote the lines in ${\mathbb R}^2$ spanned by
$\vec{x}_1$ and $\vec{x}_2$, respectively. Then
$(a+b\eta)\vec{x}_1$ is the projection of the lattice point
$(a,b)$ on $V_1$, whereas $(a-b\varepsilon)\vec{x}_1$ is its projection
on $V_2$. Let $\Omega$ be a bounded interval. Then the set
\begin{equation}\label{eq:Sigma}
\Sigma_{\varepsilon,\eta}(\Omega) \mathrel{\mathop:}= \{ a+b\eta\ |\
a,b\in{\mathbb Z}, \ a-b\varepsilon\in\Omega\}
\end{equation}
is called the \emph{Cut-and-project (C\&P) set} with parameters
$\varepsilon,\eta,\Omega$. Thus C\&P sets arise by projection on
the line $V_1$ of points of ${\mathbb Z}^2$ having their second projection
in a chosen segment on $V_2$.
\begin{prop}\label{prop:cap}
Let $\alpha, \beta, \gamma$ be positive real numbers, and
let $T : [0,\alpha+\beta+\gamma) \mapsto
[0,\alpha+\beta+\gamma) $ be a 3iet defined by~\eqref{eq:3iet}.
Let $x_0 \in [0,\alpha+\beta+\gamma)$ and let $u_T(x_0) =
(u_n)_{n\in \mathbb{Z}}$ be the biinfinite word given by~\eqref{eq:u_T}.
Put
\begin{equation}\label{eq:prevod}
\varepsilon \mathrel{\mathop:}=
\frac{\beta+\gamma}{\alpha+2\beta+\gamma}\,,\quad l\mathrel{\mathop:}=
\frac{\alpha+\beta+\gamma}{\alpha+2\beta+\gamma}\,,\quad c
\mathrel{\mathop:}= \frac{x_0}{\alpha+2\beta+\gamma}\, \quad\hbox{and}\quad
\Omega = (c-l,c]\,,
\end{equation}
and choose arbitrary $\eta >0 $. Then the C\&P set
$\Sigma_{\varepsilon,\eta}(\Omega)$ is a discrete set with the following properties:
\begin{enumerate}
\item
$0 \in \Sigma_{\varepsilon,\eta}(\Omega)$;
\item
the distances between adjacent elements of $\Sigma_{\varepsilon,\eta}(\Omega)$
take values $\mu_A = \eta$, $\mu_B= 1+2\eta$, and $\mu_C = 1+\eta $;
\item
the ordering of the distances with respect to the origin is coded
by the word $u_T(x_0)$;
\item
$\Sigma_{\varepsilon,\eta}(\Omega) = \bigl\{ \lfloor
c+n\varepsilon\rfloor+n\eta\ |\ n\in{\mathbb Z},\
\{c+n\varepsilon\}\in[0,l) \bigr\}$.
\end{enumerate}
\end{prop}
\begin{proof}
The parameters $\varepsilon$, $l$, and $c$ satisfy clearly
\begin{equation}\label{eq:param-cap}
\varepsilon \in (0,1)\,,\quad \max\{\varepsilon,1-\varepsilon\} <
l \leq 1\,,\quad 0 \in (c-l,c]\,.
\end{equation}
The condition in~\eqref{eq:Sigma} determining whether a given point $a+b\eta$ belongs to the
C\&P set $\Sigma_{\varepsilon,\eta}(\Omega)$ can be rewritten
\[
a-b\varepsilon\in\Omega\quad \Leftrightarrow\quad c+b\varepsilon-l< a\leq
c+b\varepsilon\quad \Leftrightarrow\quad a=\lfloor
c+b\varepsilon\rfloor\text{ and }\{c+b\varepsilon\}\in[0,l)\,.
\]
Therefore, the C\&P set $\Sigma_{\varepsilon,\eta}(\Omega)$ can be
expressed as
\begin{equation}\label{eq:CnP-cl}
\Sigma_{\varepsilon,\eta}(\Omega) = \bigl\{ \lfloor
c+n\varepsilon\rfloor+n\eta\ |\ n\in{\mathbb Z},\
\{c+n\varepsilon\}\in[0,l) \bigr\}\,.
\end{equation}
Let us denote
$y_n\mathrel{\mathop:}=\lfloor c+n\varepsilon\rfloor+n\eta$ and
$y_n^*\mathrel{\mathop:}=\{c+n\varepsilon\}$. From the choice of the
parameter $\varepsilon$ and $\eta$ we can derive that the sequence
$(y_n)_{n\in{\mathbb Z}}$ is strictly increasing. Since
$\Sigma_{\varepsilon,\eta}(\Omega) \subset \{ y_n\:|\:n\in
\mathbb{Z}\}$, to every element $y\in
\Sigma_{\varepsilon,\eta}(\Omega)$ corresponds a point $y^*$. We
show that the distance of $y$ and its right neighbour depends on
the position of $y^*$ in the interval $[0,l)$. Moreover, if $z$ is
the right neighbour of $y$ in $\Sigma_{\varepsilon,\eta}(\Omega)$,
then $z^*=\widetilde{T}(y^*)$, where
$\widetilde{T}:[0,l)\rightarrow[0,l)$ is a 3iet given by the
prescription
\begin{equation}\label{eq:T-CaP}
\widetilde{T}(x) =
\begin{cases}
x + \varepsilon & \text{if }x\in[0,l-\varepsilon) \,, \\
x + 2\varepsilon-1 & \text{if }x\in[l-\varepsilon,1-\varepsilon) \,, \\
x + \varepsilon-1 & \text{if }x\in[1-\varepsilon,l) \,.
\end{cases}
\end{equation}
Let us determine the right neighbour of a point
$y\in\Sigma_{\varepsilon,\eta}(c-l,c]$. Let $y=y_n$, $n\in{\mathbb Z}$,
i.e., $y_n^*=\{c+n\varepsilon\}\in[0,l)$. We discuss three
separate cases, all the time using the fact that
$\max\{\varepsilon,1-\varepsilon\} < l \leq 1$.
\begin{enumerate}[i)]
\item
if $y_n^*\in[0,l-\varepsilon)$ then $y_{n+1}^*=\{c+(n+1)\varepsilon\} =
y_n^* + \varepsilon\in[0,l)$ and
$\lfloor c+n\varepsilon\rfloor = \lfloor c+(n+1)\varepsilon\rfloor$. Hence the
distance between $y_n$ and its right neighbour is $y_{n+1}-y_n=\eta$.
\item
if $y_n^*\in[l-\varepsilon,1-\varepsilon)$ then
$y_{n+1}^*=\{c+(n+1)\varepsilon\} = y_n^* + \varepsilon \in[l,1)$,
hence $y_{n+1}$ does not belong to the set $\Sigma_{\varepsilon,\eta}(c-l,c]$.
However, $y_{n+2}^*=\{c+(n+2)\varepsilon\} = y_n^* + 2\varepsilon - 1\in[0,l)$
and $\lfloor c+(n+2)\varepsilon\rfloor = 1+\lfloor c+n\varepsilon\rfloor$.
Therefore the right neighbour of $y_n$ is $y_{n+2}$ and we have
$y_{n+2}-y_n = 1+2\eta$.
\item
if $y_n^*\in[1-\varepsilon,l)$ then $y_{n+1}^*=\{c+(n+1)\varepsilon\} =
y_n^* + \varepsilon-1\in[0,l)$, $y_{n+1}$ is the right neighbour of $y_n$
and $y_{n+1}-y_n = 1 + \eta$.
\end{enumerate}
As $y_0 = 0 \in \Sigma_{\varepsilon,\eta}(c-l,c]$ and $y_0^* =
\{c\}=c$, the distances between consecutive elements of the C\&P set
$\Sigma_{\varepsilon,\eta}(c-l,c]$ are coded by the infinite word
$u_{\widetilde{T}}(c)$. It is easy to see that with our choice of $l$,
$\varepsilon$, and $c$, the lengths of the partial intervals in the definition
of the 3iet $\widetilde{T}$ and the starting point $c$ are only
$(\alpha+2\beta+\gamma)$-multiples of the partial intervals of the
3iet $T$ and its starting point $x_0$, ($\widetilde{T}$ and $T$ are homothetic 3iets).
Therefore $u_{\widetilde{T}}(c) = u_T(x_0)$.
\end{proof}
Let us mention that a 3iet $T$ with the domain
$(0,\alpha+\beta+\gamma]$ corresponds also to a C\&P set with
parameters similar to~\eqref{eq:prevod}.
It is known that a word coding an $r$-interval exchange
transformation with arbitrary permutation of intervals has
complexity ${\cal C}(n)\leq (r-1)n+1$ for all $n\in{\mathbb N}$,
see~\cite{keane-mz-141}. It is useful to distinguish the words with full
complexity and the others.
\begin{defi}
A 3iet word is called \emph{non-degenerated}, if ${\cal C}(n)= 2n+1$ for
all $n\in{\mathbb N}$. Otherwise it is called \emph{degenerated}.
\end{defi}
The following proposition allows one to classify the words coding
3iet according to the parameters to periodic, 3iet degenerate, and
3iet non-degenerate infinite words.
\begin{prop}\label{prop:dege}
Let $T$ be a 3iet transformation of the interval $I$ with
parameters $\alpha$, $\beta$, $\gamma$, and let $x_0\in I$.
\begin{itemize}
\item
The infinite word $u_T(x_0)$ defined by~\eqref{eq:u_T} is aperiodic
if and only if
$$
\alpha+\beta \text{ and } \beta+\gamma \text{ are linearly independent over ${\mathbb Q}$.}
$$
\item
If the word $u_T(x_0)$ is aperiodic then it is degenerated if and only if
$$
\alpha+\beta+\gamma \in (\alpha+\beta){\mathbb Z} + (\beta+\gamma){\mathbb Z}\,.
$$
\end{itemize}
\end{prop}
\begin{proof}
The formula~\eqref{eq:CnP-cl} for the C\&P set
$\Sigma_{\varepsilon,\eta}(c-l,c]$ implies easily that if
$\varepsilon$ is rational, then the set
$\Sigma_{\varepsilon,\eta}(\Omega)$ is periodic, i.e., the orbit
of every point under the 3iet $\widetilde{T}$ is periodic. On the
other hand, if $\varepsilon$ is irrational, the sequence
$\{c+n\varepsilon\}$ is uniformly distributed, and thus also the
orbit of every point under $\widetilde{T}$ is dense in $[0,l)$.
The relation~\eqref{eq:prevod} between the parameters
$\varepsilon$ and $\alpha, \beta, \gamma$ implies the statement
about periodicity of $u_T(x_0)$.
The complexity of an infinite word coding a C\&P set with
irrational parameters $\varepsilon,\eta$ has been described
in~\cite{gmp-jtnb-15}. It is shown that such a word has the
complexity ${\cal C}(n)=2n+1$ for all $n$ if and only if the
length $l$ of the interval $\Omega$ from~\eqref{eq:Sigma}
satisfies $l\notin{\mathbb Z}+{\mathbb Z}\varepsilon$. The
relation~\eqref{eq:prevod} implies the necessary and sufficient
condition for the degeneracy of the corresponding infinite word.
\end{proof}
We will use the following reformulation of the above statements.
\begin{coro}\label{coro:period}
The infinite word $u_T(x_0)$, defined by~\eqref{eq:u_T}, with
parameters $\alpha,\beta,\gamma>0$ is
\begin{itemize}
\item
periodic if there exist $K,L\in{\mathbb Z}$, \ $K,L\neq 0$ such that
\begin{equation}\label{eq:podminka-periodic}
(\alpha,\beta,\gamma)\left(\begin{smallmatrix}K\\K+L\\L\end{smallmatrix}\right)= 0\,,
\end{equation}
\item
aperiodic degenerate if there exist unique $K,L\in{\mathbb Z}$ such that
\begin{equation}\label{eq:podminka-degenerate}
(\alpha,\beta,\gamma)\left(\begin{smallmatrix}1\\1\\1\end{smallmatrix}\right)=
(\alpha,\beta,\gamma)\left(\begin{smallmatrix}K\\K+L\\L\end{smallmatrix}\right)\,.
\end{equation}
\end{itemize}
\end{coro}
Note that the sequence $\{c+n\varepsilon\}$ being uniformly
distributed for $\varepsilon$ irrational implies not only the
aperiodicity of the infinite word, but also that the densities of
letters are well defined.
\begin{coro}\label{coro:hustoty}
All letters in a 3iet word $u$ with parameters
$\alpha,\beta,\gamma$ have a well defined density and the vector
of densities of $u$, denoted by $\vec{\rho}_u \mathrel{\mathop:}=
\big(\rho(A),\rho(B),\rho(C)\big)$, is proportional to the vector
$(\alpha,\beta,\gamma)$.
\end{coro}
For the transformation $T$ of exchange of $r$ intervals, it is
generally difficult to describe the conditions under which the
corresponding dynamical system is minimal, i.e., under which
condition the orbit $\{T^n(x_0)\ |\ n\in \mathbb{Z}\}$ of any
point $x_0$ is dense in the domain of $T$. Keane provides
in~\cite{keane-mz-141} two sufficient conditions for the
minimality of $T$: one of them is the linear independence of
parameters $\alpha$, $\beta$ and $\gamma$ over ${\mathbb Q}$;
second, weaker condition is that the orbits of all discontinuity
points of $T$ are disjoint. This condition is called i.d.o.c.
In~\cite{ferenczi-holton-zamboni-jam-89} it is shown that the
parameters $\alpha,\beta,\gamma$ fulfill i.d.o.c. if and only if
they satisfy neither~(\ref{eq:podminka-periodic})
nor~(\ref{eq:podminka-degenerate}). Nevertheless, even the weaker
condition i.d.o.c. is only sufficient, but not necessary for the
minimality of the dynamical system of $T$. The geometric
representation of 3iet $T$ using a cut-and-project set allows us
to provide a simple characterization of minimal dynamical systems
among 3iet.
\begin{coro}
The dynamical system given by a 3-interval exchange transformation
$T$ with parameters $\alpha, \beta,\gamma$ is minimal if and only if the numbers
$\alpha + \beta$ and $\beta + \gamma$ are linearly independent over
$\mathbb{Q}$.
\end{coro}
\begin{rem} It can be shown,
(see~\cite{arnoux-rauzy-bcmf-119,ferenczi-holton-zamboni-jam-89,gmp-jtnb-15}),
that a 3iet word is degenerated if and only if the orbits of the
two discontinuity points of the corresponding 3iet $T$ have a
non-empty intersection, formally, $\{T^n(\alpha)\ |\ n\in
\mathbb{Z}\}\cap \{T^n(\alpha+\beta)\ |\ n\in
\mathbb{Z}\}\neq\emptyset$. The complexity of a degenerate 3iet
word is ${\cal C}(n) = n+ const$ for sufficiently large $n$.
Cassaigne~\cite{cassaigne-dlt3} calls one-sided infinite words
with such complexity quasi-sturmian words. By a slight
modification of his results one can show that for any 3iet word
$u$ with complexity $\mathcal{C}_u(n) \leq n+\text{const}$ there exists
a sturmian word $(v_n)_{n\in{\mathbb Z}}$ over $\{0,1\}$ and finite words
$w_1,w_2\in\{A,B,C\}^*$ such that
\[
u = \cdots w_{v_{-2}}w_{v_{-1}}\:|\: w_{v_0} w_{v_1} w_{v_2} \cdots
\,,
\]
that is, $u$ is obtained from $v$ by applying the morphism $0\mapsto w_0$ and
$1\mapsto w_1$.
\end{rem}
\section{Morphisms preserving 3iet words}
\begin{defi}
A morphism on the alphabet $\{A,B,C\}$ is said to be \emph{3iet preserving} if
$\varphi(u)$ is a 3iet word for every 3iet word $u$.
\end{defi}
Let us recall that 3iet words are defined as those words coding
3-interval exchange transformations, which are aperiodic.
Similarly, sturmian words are aperiodic words coding 2-interval
exchange transformations.
In the rest of this section we give several useful examples of
3iet preserving morphisms.
\begin{ex}\label{ex:proof-3iet-pres}
We will prove that the morphism $\varphi$ over $\{A,B,C\}$ given by prescriptions
\begin{equation}\label{eq:pr-phi}
A \mapsto AC\,, \qquad
B \mapsto BC\,, \qquad
C \mapsto C\,,
\end{equation}
is 3iet preserving. Let us consider an arbitrary 3iet word $u$ with
arbitrary parameters $\alpha,\beta,\gamma$ and $x_0$. The corresponding transformation
$T$ is given by~\eqref{eq:3iet}. We show that the infinite word
$\varphi(u)$ is a 3iet word, namely the one with parameters
$\alpha'=\alpha,\beta'=\beta,\gamma'=\alpha+\beta+\gamma$
and $x_0'=x_0$.
\begin{figure}
\caption{Graph of the transformation $T'$.}
\label{fig:tp-ex}
\end{figure}
The transformation $T'$ (see Figure~\ref{fig:tp-ex}) corresponding
to the parameters $\alpha',\beta',\gamma'$ is given by
\begin{equation}\label{eq:ex-tp}
T'(x) =
\begin{cases}
x + \alpha+2\beta+\gamma & \text{if $x\in[0,\alpha)\mathrel{=\!\!\mathop:} I_A'$,} \\
x +\beta+\gamma & \text{if $x\in[\alpha,\alpha+\beta)\mathrel{=\!\!\mathop:} I_B'$,} \\
x -\alpha-\beta & \text{if $x\in[\alpha+\beta,2\alpha+2\beta+\gamma)\mathrel{=\!\!\mathop:} I_C'$.}
\end{cases}
\end{equation}
Obviously, \eqref{eq:3iet} and~\eqref{eq:ex-tp}
imply for a point $x\in I_A=I_A'$ that
\begin{align*}
T'(x) & = x + \alpha + 2\beta + \gamma \in I_C'\,, \\
(T')^2(x) & = x + \beta + \gamma = T(x)\,.
\end{align*}
Hence any point $x\in I_A$ belongs in the new 3iet to the interval $I_A'$,
its first iteration is $T'(x)\in I_C'$ and the second iteration
$(T')^2(x)$ sends to the same place as the first iteration of the
original transformation $T$. Therefore we substitute $A\mapsto AC$.
Similarly, for a point $x\in I_B=I_B'$ we have
\begin{align*}
T'(x) & = x + \beta + \gamma \in I_C'\,, \\
(T')^2(x) & = x - \alpha + \gamma = T(x)\,,
\end{align*}
and so $B\mapsto BC$.
Finally, for $x\in I_C \subsetneqq I_C'$ we get $T'(x) = T(x)$ and hence $C\mapsto C$.
Thus we see that the 3iet word coding $x'_0$ under $T'$ coincides with
the word $\varphi(u)$.
\end{ex}
\begin{ex}\label{ex:prehod-AC}
It is easy to see that the morphism $\xi$ over $\{A,B,C\}$ given by prescriptions
\begin{equation}\label{eq:pr-xi}
A \mapsto C\,, \qquad B \mapsto B\,, \qquad C \mapsto A\,,
\end{equation}
is a 3iet preserving morphism. To a 3iet word, which codes the orbit of $x_0$
under the transformation $T$ with intervals
$[0,\alpha)\cup[\alpha,\alpha+\beta)\cup[\alpha+\beta,\alpha+\beta+\gamma)$,
is assigns a 3iet word, which codes the orbit of $\alpha+\beta+\gamma-x_0$
under the transformation $\tilde{T}$ with intervals
$(0,\gamma]\cup(\gamma,\gamma+\beta]\cup(\gamma+\beta,\gamma+\beta+\alpha]$.
\end{ex}
\begin{ex}\label{ex:primitive-3iet-pres}
Let us consider the morphism $\varphi_0$ on $\{A,B,C\}$ given by $A\mapsto B$,
$B\mapsto BCB$ and $C\mapsto CAC$. It is a primitive morphism with
$\det\mat{M}_{\varphi_0}=1$ and $\mat{M}_{\varphi_0}^3>0$. Let $u$ be an arbitrary
3iet word with parameters $\alpha,\beta,\gamma$. Using the same technique as in
Example~\ref{ex:proof-3iet-pres} one can show that $\varphi_0$ is 3iet preserving;
the 3iet word coinciding with $\varphi_0(u)$ has parameters $\alpha'=\gamma$,
$\beta'=\beta+\alpha+\beta$, $\gamma'=\gamma+\beta+\gamma$.
\end{ex}
\section{Proof of Theorem A}
The aim of this section is to prove that the matrix $\mat{M}$
of a 3iet preserving morphism fulfills the following condition
\begin{equation}\label{eq:meme-cond}
\mat{M}\mat{E}\mat{M}^T = \pm\mat{E}, \qquad\text{where }
\mat{E} =
\Bigl(\!\begin{smallmatrix}
0 & 1 & 1 \\
-1 & 0 & 1 \\
-1 & -1 & 0
\end{smallmatrix}\Bigr)\,.
\end{equation}
The main tool used in the proof of this property of $\mat{M}$ is
the fact that the matrix of a sturmian morphism has determinant
$\pm1$ and some auxiliary statements formulated as
Lemma~\ref{lem:podprostorP} and Lemma~\ref{lem:e1e2e3}.
\begin{lem}\label{lem:podprostorP}
Let $\varphi$ be a 3iet preserving morphism and $\mat{M}$ its
incidence matrix. Let ${\cal P}$ be a subspace of ${\mathbb R}^3$ spanned
by the vectors
$\Bigl(\begin{smallmatrix}1\\1\\0\end{smallmatrix}\Bigr)$,
$\Bigl(\begin{smallmatrix}0\\1\\1\end{smallmatrix}\Bigr)$. Then
$\mat{M}{\cal P}={\cal P}$.
\end{lem}
\begin{proof}
If $u$ is a 3iet word with parameters $(\alpha,\beta,\gamma)$, then
according to~(\ref{eq:density-phi-u}) and
Corollary~\ref{coro:hustoty}, $\varphi(u)$ is a 3iet word with
parameters $(\alpha,\beta,\gamma)\mat{M}$. Since $\varphi$ is a 3iet
preserving morphism, it means that $\varphi(u)$ is aperiodic,
whenever $u$ is aperiodic. With the help of
Corollary~\ref{coro:period}, it implies that for every pair
$K,L\in{\mathbb Z}\setminus\{0\}$ and every triple of positive numbers
$(\alpha,\beta,\gamma)$, we have
\begin{equation}\label{eq:zachovava}
(\alpha,\beta,\gamma)\mat{M}\!\!\left(\!\!\begin{smallmatrix}K\\K+L\\L\end{smallmatrix}\!\!\right)
= 0 \quad\implies\quad \exists\, H,S\in{\mathbb Z}\setminus\{0\} \ \hbox{ such that }\
(\alpha,\beta,\gamma)\left(\!\!\begin{smallmatrix}H\\H+S\\S\end{smallmatrix}\!\!\right)
= 0\,.
\end{equation}
Since
$\Bigl\{\mat{M}\!\!\left(\!\!\begin{smallmatrix}K\\K+L\\L\end{smallmatrix}\!\!\right)
\,\Bigm|\, K,L\in{\mathbb Z} \Bigr\}$ is a 2-dimensional lattice in ${\mathbb R}^3$,
there exist two linearly independent pairs $K_1,L_1$, $K_2,L_2$
such that
$\mat{M}\!\!\left(\!\!\begin{smallmatrix}K_i\\K_i+L_i\\L_i\end{smallmatrix}\!\!\right)$,
$i=1,2$, have both positive and negative components, and therefore
for both $i=1,2$, there exist infinitely many triples
$(\alpha,\beta,\gamma)$ such that
$(\alpha,\beta,\gamma)\mat{M}\!\!\left(\!\!\begin{smallmatrix}
K_i\\K_i+L_i\\L_i\end{smallmatrix}\!\!\right)=0$. This, together
with~\eqref{eq:zachovava}, implies
\begin{equation}\label{eq:901}
\mat{M}\!\!\left(\!\!\begin{smallmatrix}
K_i\\K_i+L_i\\L_i\end{smallmatrix}\!\!\right) = {\it
const.}\!\!\left(\!\!\begin{smallmatrix}
H_i\\H_i+S_i\\S_i\end{smallmatrix}\!\!\right)\,,\quad \hbox{ for
some } \ H_i,S_i\in{\mathbb Z}\setminus\{0\}, \ i=1,2.
\end{equation}
Consequently, $\mat{M}{\cal P}\subseteq{\cal P}$. We now show that
$\mat{M}{\cal P}={\cal P}$. Suppose the opposite, i.e., that
$\mat{M}\!\!\left(\begin{smallmatrix}
1\\1\\0\end{smallmatrix}\right)$ and
$\mat{M}\!\!\left(\begin{smallmatrix}
0\\1\\1\end{smallmatrix}\right)$ are linearly dependent. Then
there exist $K,L\in{\mathbb Z}\setminus\{0\}$ such that
$$
\left(\begin{smallmatrix} 0\\0\\0\end{smallmatrix}\right)=
K\mat{M}\left(\begin{smallmatrix} 1\\1\\0\end{smallmatrix}\right)+
L \mat{M}\left(\begin{smallmatrix}
0\\1\\1\end{smallmatrix}\right)=\mat{M}\left(\!\!\begin{smallmatrix}
K\\K+L\\L\end{smallmatrix}\!\!\right)\,.
$$
This, however, implies that for arbitrary parameters
$(\alpha,\beta,\gamma)$, we have
$$
(\alpha,\beta,\gamma)\mat{M}\left(\!\!\begin{smallmatrix}
K\\K+L\\L\end{smallmatrix}\!\!\right) = 0\,,
$$
i.e., the word $\varphi(u)$ is periodic for arbitrary 3iet word $u$, which
is a contradiction with the assumption that $\varphi$ is a 3iet
preserving morphism.
\end{proof}
\begin{rem}\label{ex:regular}
Denote
$$
\vec{x}_1\mathrel{\mathop:}=
\Bigl(\begin{smallmatrix}1\\1\\0\end{smallmatrix}\Bigr)\,,\qquad
\vec{x}_2\mathrel{\mathop:}=
\Bigl(\begin{smallmatrix}0\\1\\1\end{smallmatrix}\Bigr)\,,\qquad
\vec{x}_3\mathrel{\mathop:}=
\Bigl(\begin{smallmatrix}0\\1\\0\end{smallmatrix}\Bigr)\,.
$$
The triplet of vectors $\vec{x}_1$, $\vec{x}_2$, $\vec{x}_3$ forms
a basis of ${\mathbb R}^3$. Denoting
$\mat{P}=\Bigl(\begin{smallmatrix}1&0&0\\1&1&1\\0&1&0\end{smallmatrix}\Bigr)\,,$
we have $\det\mat{P}=1$, and thus $\vec{x}_1$, $\vec{x}_2$,
$\vec{x}_3$ is also a basis of the integer lattice ${\mathbb Z}^3$. In the
same time, the pair $\vec{x}_1$, $\vec{x}_2$ is a basis of the
invariant subspace ${\cal P}$ of the matrix $\mat{M}$. We have
\[
\mat{P}^{-1}=\Bigl(\!\begin{smallmatrix}1&0&0\\0&0&1\\-1&1&-1\end{smallmatrix}\Bigr)\,
\quad\text{ and }\quad
\mat{P}^{-1}\mat{M}\mat{P} =
\begin{pmatrix}
m_{11}+m_{12} & m_{12}+m_{13} & m_{12}\\
m_{31}+m_{32} & m_{32}+m_{33} & m_{32}\\
0&0& -m_{12} +m_{22} - m_{32}
\end{pmatrix}\,,
\]
where the 0's in the third row correspond to the fact that
$\mat{P}^{-1}\mat{M}\mat{P}$ can be seen as the matrix $\mat{M}$
written in the basis $\vec{x}_1$, $\vec{x}_2$, $\vec{x}_3$, where
the first two vectors form a basis of the invariant subspace
${\cal P}$. Since $\mat{M}{\cal P}={\cal P}$, we have
\[
\det\begin{pmatrix}
m_{11}+m_{12} & m_{12}+m_{13}\\
m_{31}+m_{32} & m_{32}+m_{33}
\end{pmatrix} \neq 0\,.
\]
\end{rem}
\begin{lem}\label{lem:e1e2e3}
Let $\mat{M}=(m_{ij})$ be the incidence matrix of a 3iet preserving morphism $\varphi$.
Then
\begin{equation}\label{eq:mala-delta}
\det\begin{pmatrix}
m_{11}+m_{12} & m_{12}+m_{13}\\
m_{31}+m_{32} & m_{32}+m_{33}
\end{pmatrix} = \delta \in \{1,-1\}\,.
\end{equation}
\end{lem}
\begin{proof}
Let us choose a sturmian word $u\in\{A,C\}^{\mathbb Z}$ and a sequence $(u^{(m)})_{m\in{\mathbb N}}$ of 3iet
words such that $u=\lim_{m\rightarrow\infty} u^{(m)}$.
For example, let $u$ be the coding of
$x_0=0$ under the 2-interval exchange transformation $T$ with $I_0=[0,1-\alpha)$
and $I_1=[1-\alpha,1)$, where $\alpha$ is an arbitrary irrational number. Then we can choose
$u^{(m)}$ to be the 3iet word that codes $x_0=0$ under the 3-interval exchange
transformation with intervals $I_A=[0,1-\alpha - \frac{1}{m})$,
$I_B=[1-\alpha -\frac{1}{m},1-\alpha)$ and $I_C=[1-\alpha,1)$.
Let $\sigma$ be a morphism given by
\[
A \mapsto A\,,\quad B \mapsto AC\,, \quad C \mapsto C\,.
\]
Since any morphism on $\{A,B,C\}^{\mathbb Z}$ is a continuous mapping, we have
\[
(\sigma\circ\varphi)(u^{(m)}) \rightarrow (\sigma\circ\varphi)(u)\,.
\]
According to the assumption, the morphism $\varphi$ is 3iet preserving, hence
$\varphi(u^{(m)})$ are 3iet words. By
Lemma~\ref{lem:delta-of-3iet},
the words $(\sigma\circ\varphi)(u^{(m)})$, $m\in{\mathbb N}$, code 2-interval exchange transformations,
and by Lemma~\ref{lem:limit-of-sturm}, the limit of these words, that is
the word $(\sigma\circ\varphi)(u)$, is either sturmian or the densities
of its letters are rational.
The matrix of $\sigma$ is
$\Bigl(\!\begin{smallmatrix} 1 & 0 & 0 \\ 1 & 0 & 1 \\ 0 & 0 & 1\end{smallmatrix}\Bigr)$,
which implies by~(\ref{eq:matrix-compose}) that the matrix of $\sigma\circ\varphi$
is
\[
\mat{M}_{\sigma\circ\varphi} =
\begin{pmatrix}
m_{11}+m_{12} & 0 & m_{12}+m_{13} \\
m_{21}+m_{22} & 0 & m_{22}+m_{23} \\
m_{31}+m_{32} & 0 & m_{32}+m_{33}
\end{pmatrix} \,.
\]
Since $\sigma\circ\varphi$ maps a sturmian word $u$ over $\{A,C\}$ to a word
over the same alphabet, we are interested only in the matrix of this morphism over
$\{A,C\}$, that is,
\begin{equation}\label{eq:msvlnkou}
\widetilde{\mat{M}}\ = \
\begin{pmatrix}
m_{11}+m_{12} & m_{12}+m_{13} \\
m_{31}+m_{32} & m_{32}+m_{33}
\end{pmatrix} \,.
\end{equation}
Let us suppose that the densities of $A$ and $C$ in $u$ are $1-\alpha$ and $\alpha$,
respectively. Using~(\ref{eq:density-phi-u}) we find the density of $A$ in
$(\sigma\circ\varphi)(u)$ to be
\begin{equation}\label{eq:density-b}
\rho(A) = \frac{(1-\alpha, \alpha)\ \widetilde{\mat{M}}
\Bigl(\begin{smallmatrix}1\\0\end{smallmatrix}\Bigr)}
{(1-\alpha, \alpha)\ \widetilde{\mat{M}}
\Bigl(\begin{smallmatrix}1\\1\end{smallmatrix}\Bigr)}\,.
\end{equation}
If $\rho(A)$ is irrational, the word $(\sigma\circ\varphi)(u)$ is sturmian
and hence the morphism $\sigma\circ\varphi$ is sturmian. This implies
$\det\widetilde{\mat{M}}=\pm 1$.
The irrational number $\alpha$, i.e., the density of $A$ in the sturmian word $u$,
was chosen arbitrarily. Therefore $\rho(A)$, given by~(\ref{eq:density-b}),
will be rational for any irrational $\alpha$ only in case when
\begin{equation}\label{eq:rational-rhoB}
p\widetilde{\mat{M}}\Bigl(\begin{smallmatrix}1\\0\end{smallmatrix}\Bigr) = q
\widetilde{\mat{M}}\Bigl(\begin{smallmatrix}1\\1\end{smallmatrix}\Bigr),
\quad \hbox{for some} \ \ p,q \in {\mathbb Z}\setminus\{0\}\,.
\end{equation}
This however implies that the matrix $\widetilde{\mat{M}}$ is
singular, which contradicts Remark~\ref{ex:regular}.
\end{proof}
We are now in position to finish the proof of Theorem A.
\begin{thma}
Let $\mat{M}$ be the incidence matrix of a 3iet preserving morphism.
Then
\begin{equation}\label{eq:meme}
\mat{M}\mat{E}\mat{M}^T = \pm\mat{E},\quad\text{where }\ \mat{E} =
\Bigl(\!\begin{smallmatrix}0&1&1\\-1&0&1\\-1&-1&0\end{smallmatrix}\Bigr)\,.
\end{equation}
\end{thma}
\begin{proof}
Using the notation of Remark~\ref{ex:regular} for the matrix
$\mat{P}$, we obviously see that the matrix
$\mat{P}^{-1}\mat{M}\mat{P}$ has $(0,0,-1)$ for its left
eigenvector corresponding to the eigenvalue $-m_{12} +m_{22} -
m_{32}$. It is then trivial to verify that
$(0,0,-1)\mat{P}^{-1}=(1,-1,1)$ is a left eigenvector of the
matrix $\mat{M}$ corresponding to the same eigenvalue. Since
\begin{equation}\label{eq:det_PMP}
\det{\mat{M}} = \det (\mat{P}^{-1}\mat{M}\mat{P}) = \delta (-m_{12} +m_{22} - m_{32})\,,
\end{equation}
where $\delta\in\{-1,1\}$ is given by~(\ref{eq:mala-delta}),
we derive that $(1,-1,1)$ is a left eigenvector of the matrix $\mat{M}$ corresponding to the
eigenvalue $\delta\det{\mat{M}}$. Denoting $\Delta\mathrel{\mathop:}= \det{\mat{M}}$, we can write
\begin{equation}\label{eq:vector}
(1,-1,1)\mat{M}= \delta\Delta (1,-1,1)\,.
\end{equation}
This implies that the matrix $\mat{M}$ can be written in the following form,
\begin{equation}\label{eq:matM}
\mat{M}=
\begin{pmatrix}
m_{11} & m_{12} & m_{13} \\
m_{11}+m_{31}-\delta\Delta & m_{12}+m_{32}+\delta\Delta & m_{13}+m_{33} -\delta\Delta \\
m_{31}& m_{32} & m_{33}
\end{pmatrix}\,.
\end{equation}
With this, one can verify by inspection, that $\mat{M}\mat{E}\mat{M}^T = \delta\mat{E}$,
using Lemma~\ref{lem:e1e2e3} for simplification of algebraic expressions.
\end{proof}
As a partial result, we have shown in the above proof the following interesting statement.
\begin{coro}\label{coro:eigenvalues-M}
Let $\mat{M}$ be the matrix of a 3iet preserving morphism $\varphi$.
Then the vector $(1,-1,1)$ is a left eigenvector
of $\mat{M}$, associated with the eigenvalue $\det\mat{M}$ or $-\det\mat{M}$, i.e.,
\begin{equation}\label{eq:left-eigen}
(1,-1,1)\mat{M} = \pm\det\mat{M}(1,-1,1)\,.
\end{equation}
The other eigenvalues $\lambda_1$ and $\lambda_2$ of the matrix
$\mat{M}$ are either quadratic mutually conjugate algebraic units,
or $\lambda_1, \lambda_2 \in \{1, -1\}$.
\end{coro}
From the form~(\ref{eq:matM}) of the matrix $\mat{M}$ we derive
the following Corollary.
\begin{coro}\label{coro:sumy-radku}
Let $\mat{M}$ be a matrix of a 3iet preserving morphism. Then the
sum of its first and the third row differs from the sum of its
second row by $\pm\det\mat{M}$. Formally,
\[
(1,0,1)\mat{M}\Big(\begin{smallmatrix}1\\1\\1\end{smallmatrix}\Big) -
(0,1,0)\mat{M}\Big(\begin{smallmatrix}1\\1\\1\end{smallmatrix}\Big) = \pm\det\mat{M}\,.
\]
\end{coro}
\section{3iet preserving morphisms versus fixed points}
The proof of Theorem B, which is performed in
Section~\ref{sec:proof-B} is based on the properties of 3iet
words, which are fixed points of morphisms. In this section we
therefore inspect, which 3iet preserving morphisms have a fixed
point.
A fixed point of a morphism $\varphi$ over an alphabet $\mathcal{A}$ is
the limit $\lim_{n\rightarrow\infty}\varphi^n(a_i)|\varphi^n(a_j)$
for some $a_i,a_j\in\mathcal{A}$. Similarly to the case of sturmian
words, the set of 3iet words is not compact, and therefore in
general the accumulation point $u$ of a sequence
$(u^{(m)})_{m\in{\mathbb N}}$ of 3iet words is not necessarily a 3iet word.
The special case when the accumulation point belongs to the set of
3iet words is treated by the following Lemma.
\begin{lem}\label{lem:apendix}
Let $\alpha,\beta,\gamma$ be positive real numbers such that
$\alpha+\beta$ and $\beta+\gamma$ are linearly independent over ${\mathbb Q}$.
Let $T_1$, $T_2$ be the 3iet transformations with parameters
$\alpha,\beta,\gamma$ and domain $[0,\alpha+\beta+\gamma)$,
$(0,\alpha+\beta+\gamma]$, respectively. Let $(u^{(n)})_{n\in{\mathbb N}}$
be a sequence of 3iet words and $(x^{(n)})_{n\in{\mathbb N}}$ a sequence of
points in $[0,\alpha+\beta+\gamma]$ such that
\begin{itemize}
\item
$u^{(n)}=u_{T_1}(x^{(n)})$ or $u^{(n)}=u_{T_2}(x^{(n)})$ for all $n\in{\mathbb N}$;
\item
$x^{(n)}$ is a monotonous sequence with the limit $x$.
\end{itemize}
Then $\lim_{n\to\infty}u^{(n)}$ exists and is equal to the 3iet
word $u_{T_1}(x)$ or $u_{T_2}(x)$.
\end{lem}
\begin{proof}
We use a statement from~\cite{gmp-jtnb-15}.
For a given $m$ put
$$
D_m\mathrel{\mathop:}= \{T^{i}_1(\alpha), T^{i}_1(\alpha+\beta),
T^{i}_2(\alpha), T^{i}_2(\alpha+\beta)\mid -m\leq i\leq m \}\,.
$$
Let $a<b$ and let $(a,b)\cap D_m=\emptyset$. Then for all
$z\in(a,b)$ we have
\begin{eqnarray}
\label{eq:Dmi}
\dd\bigl(u_{T_1}(a),u_{T_1}(z)\bigr) < \frac{1}{1+m}\,, &\quad&
\dd\bigl(u_{T_1}(a),u_{T_2}(z)\bigr) < \frac{1}{1+m}\,,\\[2mm]
\dd\bigl(u_{T_2}(b),u_{T_1}(z)\bigr) < \frac{1}{1+m}\,, &\quad&
\dd\bigl(u_{T_2}(b),u_{T_2}(z)\bigr) < \frac{1}{1+m}\,.
\label{eq:Dmii}
\end{eqnarray}
Assume that the sequence $(x^{(n)})_{n\in{\mathbb N}}$ is decreasing. For
$\varepsilon>0$, we find $m\in{\mathbb N}$ such that
$\varepsilon>\frac1{m+1}$ and we put $\delta_m\mathrel{=\!\!\mathop:} \sup\{y>x
\mid y\notin D_m \}$. Since $x^{(n)}\searrow x$, there exists
$n_0$ such that for all $n>n_0$ we have $x\leq
x^{(n)}<x+\delta_m$. Since $u^{(n)}=u_{T_1}(x^{(n)})$ or
$u^{(n)}=u_{T_2}(x^{(n)})$, we obtain, using~\eqref{eq:Dmi} for
the interval $(a,b)=(x,x+\delta_m)$, that
$\dd\bigl(u_{T_1}(x),u^{(n)}\bigr)<\varepsilon$, which implies
$\lim_{n\to\infty} u^{(n)}=u_{T_1}(x)$. Similarly we
use~\eqref{eq:Dmii} in the case $x^{(n)}\nearrow x$.
\end{proof}
\begin{rem}
The assumption of primitivity of the morphism $\varphi$ is essential in the above statement.
For example, the morphism $\varphi$ defined by~\eqref{eq:pr-phi} is 3iet preserving,
yet the only fixed points of an arbitrary power $\varphi^p$, $p\in{\mathbb N}$, $p\geq 1$, are
\[
\cdots CCC|ACCC \cdots, \qquad
\cdots CCC|BCCC \cdots, \qquad
\cdots CCC|CCC \cdots
\]
\end{rem}
The following proposition deals with the original aim of this
section, namely with the search for 3iet preserving morphisms
having 3iet words as their fixed points.
\begin{prop}\label{prop:fixedpoint}
Let $\varphi$ be a primitive 3iet preserving morphism.
Then there exists $p\in{\mathbb N}$, $p\geq 1$, such
that $\varphi^p$ has a fixed point, and this fixed point is a 3iet word.
\end{prop}
\begin{proof}
Without loss of generality, we may assume that the incidence
matrix $\mat{M}$ of the morphism $\varphi$ is positive. Otherwise,
we show the validity of the statement for $\psi=\varphi^k$ for
some $k$, which implies the validity of the statement for
$\varphi$.
Let $(\alpha,\beta,\gamma)$ be a positive left eigenvector of
$\mat{M}$. First we show that an infinite word coding a 3iet with
such parameters is not periodic. For contradiction, assume that
$(\alpha,\beta,\gamma)$ satisfy~\eqref{coro:period}, that is,
\begin{equation}\label{eq:xxxx}
(\alpha,\beta,\gamma)\left(\!\begin{smallmatrix}K\\K+L\\L\end{smallmatrix}\!\right)
= 0\,, \quad\text{for some $K,L\in{\mathbb Z}\setminus\{0\}$.}
\end{equation}
If the Perron eigenvalue $\lambda_1$ of $\mat{M}$ is a quadratic
irrational number, one can assume without loss of generality that
the components of the vector $(\alpha,\beta,\gamma)$ belong to the
quadratic field $\mathbb{Q}(\lambda_1)$. For any $x\in
\mathbb{Q}(\lambda_1)$, denote by $x'$ the image of $x$ under the
Galois automorphism of $\mathbb{Q}(\lambda_1)$. Since the matrix
$\mat{M}$ and the vector
$\left(\!\begin{smallmatrix}K\\K+L\\L\end{smallmatrix}\!\right)$
have integer components, the vector $(\alpha',\beta',\gamma')$ is
an eigenvector to the eigenvalue $\lambda'_1 = \lambda_2$ and
satisfies
\[
(\alpha',\beta',\gamma')\left(\!\begin{smallmatrix}K\\K+L\\L\end{smallmatrix}\!\right)
= 0\,.
\]
Using Corollary~\ref{coro:eigenvalues-M}, the vector $(1, -1,1)$
is a left eigenvector of $\mat{M}$ corresponding to the eigenvalue
$\pm \det\mat{M}$. Therefore vectors $(\alpha',\beta',\gamma')$,
$(\alpha,\beta,\gamma)$ and $(1, -1,1)$ are eigenvectors of
$\mat{M}$ corresponding to different eigenvalues, which means that
they are linearly independent. All of them are orthogonal to the
vector
$\left(\!\begin{smallmatrix}K\\K+L\\L\end{smallmatrix}\!\right)$,
which implies $K=L=0$. This contradicts~\eqref{eq:xxxx}.
By Corollary~\ref{coro:eigenvalues-M}, it remains to discuss the
case when the Perron eigenvalue of $\mat{M}$ is $\lambda_1=1$.
This is impossible due to the fact that a positive integral matrix
$\mat{M}$ cannot have $1$ as its eigenvalue corresponding to a
positive eigenvector. Thus we have shown that the infinite word
coding a 3iet with parameters $\alpha,\beta,\gamma$ is not
periodic.
Denote $T_1$, $T_2$ the 3iet transformations with parameters
$\alpha,\beta,\gamma$ and domain $[0,\alpha+\beta+\gamma)$,
$(0,\alpha+\beta+\gamma]$, respectively.
Let $u^{(0)}$ be an arbitrary 3iet word coding the orbit of a
point by $T_1$. Put
\[
u^{(n)}\mathrel{\mathop:}= \varphi^n(u^{(0)})\,, \quad\text{for $n\geq 1$.}
\]
Since the vector of densities of $u^{(0)}$ is a left eigenvector
of the incidence matrix of the morphism $\varphi$, every word
$u^{(n)}$, $n\in{\mathbb N}$, has the same density of letters. As $\varphi$
is a 3iet preserving morphism, the word $u^{(n)}$ is a 3iet word
coding the orbit of a point under $T_1$ or $T_2$, for every
$n\in{\mathbb N}$.
The space of infinite words over the alphabet $\{A,B,C\}$ is
compact, and thus there exists a Cauchy subsequence of the
sequence $(u^{(n)})_{n\in{\mathbb N}}$. Therefore there exist
$m_0,n_0\in{\mathbb N}$, $n_0>m_0$, such that
\begin{equation}\label{eq:401}
\dd\bigl(u^{(n_0)},u^{(m_0)}\bigr)<\frac12\,.
\end{equation}
Set $p\mathrel{\mathop:}= n_0-m_0$ and $v=\cdots v_{-2}v_{-1}|v_0v_1\cdots
\mathrel{\mathop:}= u^{(m_0)}$. Since $ u^{(n_0)} =
\varphi^{n_0-m_0}(u^{(m_0)}) = \varphi^p(v)$,
inequality~\eqref{eq:401} can be rewritten as
\begin{equation}\label{eq:402}
\dd\bigl(\varphi^p(v),v\bigr)<\frac12 \,.
\end{equation}
The latter, together with the primitivity of the morphism
$\varphi$, implies
\[
\varphi^p(v_0)=v_0w_0\quad\hbox{ and }\quad
\varphi^p(v_{-1})=w_{-1}v_{-1}
\]
for some non-empty words $w_0,w_{-1}\in\{A,B,C\}^*$. Therefore the
morphism $\varphi^p$ has the fixed point
\[
\lim_{n\to\infty} \varphi^{np}(v)\,.
\]
Since $\varphi^{np}(v)$ is a 3iet word given by $T_1$, or $T_2$,
there exist for every $n$ a number
$x^{(n)}\in[0,\alpha+\beta+\gamma]$, such that
\[
\varphi^{np}(v) = u_{T_1}(x^{(n)})\ \hbox{ or }\
u_{T_2}(x^{(n)})\,.
\]
Denote by $x$ the limit of some monotonous subsequence of
$(x^{(n)})_{n\in{\mathbb N}}$, i.e., $ x=\lim_{n\to\infty}x^{(k_n)}$.
According to Lemma~\ref{lem:apendix},
\[
\lim_{n\to\infty} \varphi^{np}(v) = u_{T_1}(x)\ \hbox{ or }\
u_{T_2}(x)\,,
\]
which means that $\varphi^p$ has as its fixed point a 3iet word,
namely $u_{T_1}(x)$ or $u_{T_2}(x)$, respectively.
\end{proof}
\section{Proof of Theorem~B}\label{sec:proof-B}
In the proof of Theorem B we use certain properties of discrete
sets associated with 3iet words. Every 3iet word can be
geometrically represented using a C\&P set. On the other hand,
every fixed point of a primitive morphism can be represented by a
self-similar set, which is constructed using a right eigenvector
of the matrix of the morphism. The crucial point in the proof of
Theorem B is the fact that for a 3iet word being a fixed point of
a primitive morphism these two geometric representations coincide.
We first show that the determinant of the incidence matrix of a
3iet preserving morphism is in modulus smaller than 1. For that we
use the following technical lemma.
\begin{lem}\label{lem:Pk}
Let $\varepsilon\in(0,1)$ be a quadratic irrational number with
conjugate $\varepsilon'<0$. Let $\lambda\in(0,1)$ be a quadratic
unit such that its conjugate satisfies $\lambda'>1$ and
$\lambda'{\mathbb Z}[\varepsilon']={\mathbb Z}[\varepsilon'] \mathrel{\mathop:}= {\mathbb Z} +
\varepsilon'{\mathbb Z}$. Let us denote $\Lambda\mathrel{\mathop:}=\lambda'$,
$\eta\mathrel{\mathop:}=-\varepsilon'$ and
\[
P_n(x)\mathrel{\mathop:}=
\#\bigl(x,x+(1+2\eta)\Lambda^n\bigr]\cap\Sigma_{\varepsilon,\eta}(\Omega)\,,
\]
where $\Omega$ is a bounded interval. Then there is a constant $R$ such that
\[
|P_n(x) - P_n(y)| \leq R\,,
\]
for any $x,y\in{\mathbb R}$ and $n\in{\mathbb N}$.
\end{lem}
The proof of Lemma exploits some simple properties of C\&P sets,
which are however not related to infinite words. Therefore we
postpone it to the appendix.
\begin{prop}\label{prop:detM-01}
The incidence matrix $\mat{M}$ of a primitive 3iet preserving
morphism $\varphi$ satisfies $|\det\mat{M}|\leq 1$.
\end{prop}
\begin{proof}
Without loss of generality we assume that $\varphi$ has a 3iet
fixed point $u$, and, moreover, that both the matrix $\mat{M}$ and
its spectrum are positive. This is possible since according to
Proposition~\ref{prop:fixedpoint} for any primitive 3iet
preserving morphism there exists $p\in{\mathbb N}$ such that $\varphi^p$
has a fixed point and $|\det\mat{M}|\leq 1\ \Leftrightarrow\
|\det\mat{M}^p|\leq 1$.
Let us denote by $\Lambda$ the dominant (Perron) eigenvalue of
$\mat{M}$. Its second eigenvalue is by
Corollary~\ref{coro:eigenvalues-M} equal to $\pm\det\mat{M}$, the
third one is denoted by $\lambda$. A positive integer matrix
cannot have $1$ as its dominant eigenvalue, hence by
Corollary~\ref{coro:eigenvalues-M}, $\Lambda>1$ is a quadratic
algebraic unit such that $\Lambda'=\lambda$.
Without loss of generality we assume that a positive right
eigenvector associated with the Perron eigenvalue $\Lambda$ is
such that the modulus of its third component is greater than the
modulus of the first one. Otherwise, we use
$\xi\circ\varphi\circ\xi$ instead of $\varphi$, where $\xi$ is
defined as in Example~\ref{ex:prehod-AC}. Matrices corresponding
to $\varphi$ and $\xi\circ\varphi\circ\xi$ have the same spectrum,
the first and the last component of eigenvectors being
interchanged.
The fixed point $u$ of $\varphi$ is the coding of a 3iet with parameters
$\alpha,\beta,\gamma$, with a starting point $x_0$. By~(\ref{eq:density-phi-u})
and Corollary~\ref{coro:hustoty} the vector $(\alpha,\beta,\gamma)$ is a left
eigenvector of $\mat{M}$ corresponding to $\Lambda$.
In the proof we use properties of a C\&P set; we construct it in
such a way that it coincides with the geometric representation of
the fixed point $u$. Let us define parameters $\varepsilon,l,c$
and the interval $\Omega$ by~(\ref{eq:prevod}). Note that
$(l-\varepsilon,1-l,l-1+\varepsilon)$ is also an eigenvector to
$\Lambda$, because it is just a scalar multiple of
$(\alpha,\beta,\gamma)$, and, moreover, since $\Lambda$ is a
quadratic irrational number, the parameters $\varepsilon, l$
belong to the same quadratic algebraic field ${\mathbb Q}(\Lambda)$. By
$x'$ we denote the image of $x\in{\mathbb Q}(\Lambda)$ under the Galois
automorphism on ${\mathbb Q}(\Lambda)$.
Let us denote $\vec{F} =
\Big(\!\begin{smallmatrix}-\varepsilon\\1-2\varepsilon\\1-\varepsilon\end{smallmatrix}\!\Big)$.
The vector $\vec{F}$ is orthogonal to two left eigenvectors $(1,-1,1)$ and
$(l-\varepsilon,1-l,l-1+\varepsilon)$ associated with eigenvalues
$\pm\det\mat{M}$ and $\Lambda$, respectively. The matrix $\mat{M}$ has three
different eigenvalues, therefore $\vec{F}$ is a right eigenvector to the third
eigenvalue $\lambda$.
Since the matrix $\mat{M}$ is integral, the vector $\vec{F}'
\mathrel{\mathop:}=
\bigg(\!\begin{smallmatrix}-\varepsilon'\\1-2\varepsilon'\\1-\varepsilon'\end{smallmatrix}\!\bigg)$
is a right eigenvector corresponding to the dominant eigenvalue
$\lambda'=\Lambda$, that is,
\begin{equation}\label{eq:MF-ML}
\mat{M}
\bigg(\!\begin{smallmatrix}-\varepsilon'\\1-2\varepsilon'\\1-\varepsilon'\end{smallmatrix}\!\bigg)
= \lambda'
\bigg(\!\begin{smallmatrix}-\varepsilon'\\1-2\varepsilon'\\1-\varepsilon'\end{smallmatrix}\!\bigg)
= \Lambda
\bigg(\!\begin{smallmatrix}-\varepsilon'\\1-2\varepsilon'\\1-\varepsilon'\end{smallmatrix}\!\bigg)\,.
\end{equation}
Therefore the components of $\vec{F}'$ are either all positive or
all negative. By assumption, the modulus of the third component of
a right dominant eigenvector is greater than the modulus of the
first one, which implies that all components of $\vec{F}'$ are
positive, i.e., $-\varepsilon'>0$.
We define a C\&P set with parameters $\varepsilon,\eta,\Omega$,
where $\varepsilon$ and $\Omega$ are as above and we put
$\eta\mathrel{\mathop:}=-\varepsilon'$. By Proposition~\ref{prop:cap}, the
distances between adjacent elements of
$\Sigma_{\varepsilon,\eta}(\Omega)$ take values $\mu_A=\eta$,
$\mu_B=1+2\eta$, and $\mu_C = 1 + \eta$ and their ordering with
respect to the origin is coded by the word $u$. Let
$(t_n)_{n\in{\mathbb Z}}$ denote a strictly increasing sequence such that
$\Sigma_{\varepsilon,\eta}(\Omega) = \{t_n\:|\:n\in{\mathbb Z}\}$.
According to Section~\ref{sec:geometric}, this C\&P set is also
the geometric representation of the fixed point $u$ of $\varphi$
and
\[
\#\big((\Lambda t_n,\Lambda t_{n+1}]\cap \Sigma_{\varepsilon,\eta}(\Omega)\big) =
\begin{cases}
|\varphi(A)| & \text{if $t_{n+1}-t_n = \mu_A = \eta\,,$} \\
|\varphi(B)| & \text{if $t_{n+1}-t_n = \mu_B = 1+2\eta\,.$} \\
|\varphi(B)| & \text{if $t_{n+1}-t_n = \mu_C = 1+\eta\,.$}
\end{cases}
\]
As the fixed point of a morphism is also the fixed point of an arbitrary power
of this morphism, the geometric representations of $\varphi$ and $\varphi^n$ are the same
for any $n\in{\mathbb N}$. Since $AC$ is a factor of any 3iet word, there exist $k,m\in{\mathbb N}$ such that
\begin{align}
|\varphi^n(AC)| &= \#\big((\Lambda^n t_k,\Lambda^n t_{k+2}]
\cap \Sigma_{\varepsilon,\eta}(\Omega)\big)\,, \label{eq:vaphiAC}\\
|\varphi^n(B)| &= \#\big((\Lambda^n t_m,\Lambda^n t_{m+1}]
\cap \Sigma_{\varepsilon,\eta}(\Omega)\big)\,. \label{eq:varphiB}
\end{align}
By definition of the matrix of a morphism and by
Corollary~\ref{coro:sumy-radku}, we have
$|\varphi^n(AC)|-|\varphi^n(B)|=\pm(\det\mat{M})^n$. Observe that
intervals $(\Lambda^n t_k,\Lambda^n t_{k+2}]$ and $(\Lambda^n
t_m,\Lambda^n t_{m+1}]$ have the same length, namely
$\Lambda^n(1+2\eta)$, and that the equality
$\lambda'{\mathbb Z}[\varepsilon']={\mathbb Z}[\varepsilon']$ holds due
to~(\ref{eq:MF-ML}). We can therefore use Lemma~\ref{lem:Pk},
which states that the difference between the right hand sides
of~(\ref{eq:vaphiAC}) and~(\ref{eq:varphiB}) is bounded by a
constant $R$ independent of $n$. Putting both facts together one
obtains
\[
|\det\mat{M}^n| \leq R\quad\text{ for any $n\in{\mathbb N}$.}
\]
The statement follows from the fact that $\det\mat{M}$ is an integer.
\end{proof}
\begin{coro}\label{coro:primitive-staci}
The incidence matrix of a 3iet preserving morphism satisfies $|\det\mat{M}|\leq 1$.
\end{coro}
\begin{proof}
Consider the primitive morphism $\varphi_0$, defined in
Example~\ref{ex:primitive-3iet-pres}, and let us denote by $\mat{M}_0$ a
power of this matrix, which is positive. Let $\varphi$ be a
non-primitive 3iet preserving morphism and let $\mat{M}$ be its
matrix. The matrix $\mat{M}\mat{M}_0$ is positive, and thus it is
the matrix of a primitive 3iet preserving morphism. By
Proposition~\ref{prop:detM-01} we have
\[
1 \geq |\det(\mat{M}\mat{M}_0)| =
|\det\mat{M}|\underbrace{|\det\mat{M}_0|}_{=1} = |\det\mat{M}|\,.
\qedhere
\]
\end{proof}
\begin{thmb}
Let $\varphi$ be a 3iet preserving morphism and let $\mat{M}$ be its incidence matrix.
Then one of the following holds
\begin{itemize}
\item
$\det\mat{M}=0$ and $\varphi(u)$ is degenerated for every 3iet word $u$,
\item
$\det\mat{M}=\pm 1$ and $\varphi(u)$ is non-degenerated for every non-degenerated 3iet
word $u$.
\end{itemize}
\end{thmb}
\begin{proof}
We use notation from Lemma~\ref{lem:podprostorP} and
Remark~\ref{ex:regular}. In particular, recall
\[
\vec{x}_1\mathrel{\mathop:}=
\Bigl(\begin{smallmatrix}1\\1\\0\end{smallmatrix}\Bigr)\,,\qquad
\vec{x}_2\mathrel{\mathop:}=
\Bigl(\begin{smallmatrix}0\\1\\1\end{smallmatrix}\Bigr)\,,\qquad
\vec{x}_3\mathrel{\mathop:}=
\Bigl(\begin{smallmatrix}0\\1\\0\end{smallmatrix}\Bigr)\,.
\]
Lemma~\ref{lem:podprostorP} states that $\mat{M}{\cal P}={\cal
P}$, where ${\cal P}$ is a subspace of ${\mathbb R}^3$ spanned by vectors
$\vec{x}_1$, $\vec{x}_2$. Let us denote by $\mathcal{S}$ the
lattice $\mathcal{S} = {\mathbb Z}\vec{x}_1 + {\mathbb Z}\vec{x}_2$. The action of
the matrix $\mat{M}$ on the 2-dimensional subspace $\mathcal{P}$
has the matrix $\widetilde{M}$ from~\eqref{eq:msvlnkou}, which has
by Lemma~\eqref{lem:e1e2e3} determinant $\delta\in\{1,-1\}$.
Therefore the vectors $\mat{M}\vec{x}_1$ and $\mat{M}\vec{x}_2$
form a basis of $\mathcal{S}$ as well and thus
\begin{equation}\label{eq:SMS}
\mat{M}\mathcal{S} = \mathcal{S}\,.
\end{equation}
By an easy computation, $\mat{M}\vec{x}_3 = m_{12}\vec{x}_1 +
m_{32}\vec{x}_2 +
(-m_{12}-m_{32}+m_{22})\vec{x}_3$,
hence by~\eqref{eq:det_PMP} we have $\mat{M}\vec{x}_3\in\delta\Delta\vec{x}_3 +
\mathcal{S}$, where $\Delta=\det\mat{M}$ as before.
Moreover,
\[
\Bigl(\begin{smallmatrix}1\\1\\1\end{smallmatrix}\Bigr) =
-\vec{x}_3 + \vec{x}_1 + \vec{x}_2 \quad\implies\quad
\mat{M}\Bigl(\begin{smallmatrix}1\\1\\1\end{smallmatrix}\Bigr) \in
-\delta\Delta\vec{x}_3 + \mathcal{S}\,,
\]
and if we replace $\vec{x}_3$ on the right-hand side using
$-\vec{x}_3 = \Bigl(\begin{smallmatrix}1\\1\\1\end{smallmatrix}\Bigr)-\vec{x}_1 -\vec{x}_2$
we obtain
\begin{equation}\label{eq:M1inS}
\mat{M}\Bigl(\begin{smallmatrix}1\\1\\1\end{smallmatrix}\Bigr) \in
\delta\Delta\Bigl(\begin{smallmatrix}1\\1\\1\end{smallmatrix}\Bigr) + \mathcal{S}\,.
\end{equation}
\underline{Case 1:} Let $\Delta = \det\mat{M} = 0$. Then
by~\eqref{eq:M1inS} and~(\ref{eq:SMS}) there exist $K_1,L_1\in{\mathbb Z}$
such that
\[
\mat{M}\Bigl(\!\begin{smallmatrix}1\\1\\1\end{smallmatrix}\Bigr) =
\mat{M}\Bigl(\!\!\begin{smallmatrix}K_1\\K_1+L_1\\L_1\end{smallmatrix}\!\!\Bigr)
\quad\hbox{ which implies }\quad
(\alpha,\beta,\gamma)\mat{M}\Bigl(\!\begin{smallmatrix}1\\1\\1\end{smallmatrix}\Bigr)
= (\alpha,\beta,\gamma)
\mat{M}\Bigl(\!\!\begin{smallmatrix}K_1\\K_1+L_1\\L_1\end{smallmatrix}\!\!\Bigr)\,,
\]
for arbitrary parameters $(\alpha,\beta,\gamma)$. It means that
$(\alpha,\beta,\gamma)\mat{M}$ are parameters of a degenerated 3iet word.
\underline{Case 2:} Let $\Delta = \det\mat{M} = \pm1$. Again,
by~\eqref{eq:M1inS} there exist $K_2,L_2\in{\mathbb Z}$ such that
\begin{equation}\label{eq:det1KL}
\mat{M}\Bigl(\!\begin{smallmatrix}1\\1\\1\end{smallmatrix}\Bigr) =
\pm\Bigl(\!\begin{smallmatrix}1\\1\\1\end{smallmatrix}\Bigr)
+ \Bigl(\!\!\begin{smallmatrix}K_2\\K_2+L_2\\L_2\end{smallmatrix}\!\!\Bigr)\,.
\end{equation}
We show that parameters $(\alpha,\beta,\gamma)\mat{M}$ correspond to a degenerated
3iet word only if the original parameters $(\alpha,\beta,\gamma)$ correspond to
a degenerated 3iet word.
Let $(\alpha,\beta,\gamma)$ be such that $(\alpha,\beta,\gamma)\mat{M}$ are
parameters of a degenerated 3iet word, i.e., there exist $K_3,L_3,H,S\in{\mathbb Z}$
\begin{equation}\label{eq:thm2-pstrany1}
(\alpha,\beta,\gamma)\mat{M}\Bigl(\!\begin{smallmatrix}1\\1\\1\end{smallmatrix}\Bigr)
= (\alpha,\beta,\gamma)
\mat{M}\Bigl(\!\!\begin{smallmatrix}K_3\\K_3+L_3\\L_3\end{smallmatrix}\!\!\Bigr) =
(\alpha,\beta,\gamma)
\Bigl(\!\!\begin{smallmatrix}H\\H+S\\S\end{smallmatrix}\!\!\Bigr)\,,
\end{equation}
where the last equality comes from~\eqref{eq:SMS}.
Multiplying the equation~\eqref{eq:det1KL} by $(\alpha,\beta,\gamma)$ from the left
one obtains
\begin{equation}\label{eq:thm2-pstrany2}
(\alpha,\beta,\gamma)\mat{M}\Bigl(\!\begin{smallmatrix}1\\1\\1\end{smallmatrix}\Bigr) =
\pm(\alpha,\beta,\gamma)\Bigl(\!\begin{smallmatrix}1\\1\\1\end{smallmatrix}\Bigr)
+ (\alpha,\beta,\gamma)
\Bigl(\!\!\begin{smallmatrix}K_2\\K_2+L_2\\L_2\end{smallmatrix}\!\!\Bigr)\,.
\end{equation}
Finally, comparing right-hand sides of~\eqref{eq:thm2-pstrany1}
and~\eqref{eq:thm2-pstrany2} we have
\[
(\alpha,\beta,\gamma)\Bigl(\!\begin{smallmatrix}1\\1\\1\end{smallmatrix}\Bigr) =
\pm(\alpha,\beta,\gamma)
\biggl(\!\!\begin{smallmatrix}K_2-H\\K_2+L_2-H-S\\L_2-S\end{smallmatrix}\!\!\biggr)\,,
\]
which means that
$(\alpha,\beta,\gamma)$ are parameters of a degenerated 3iet word.
\end{proof}
\section{Comments and open problems}
\begin{enumerate}[1)]
\item
We have derived that matrices of 3iet preserving morphisms belong to the monoid
$\text{E}(3,{\mathbb N}) := \{\boldsymbol{M}\in\mathbb{N}^{3\times 3}\;|\;
\boldsymbol{M}\boldsymbol{E}\boldsymbol{M}^T = \pm\boldsymbol{E}\:
\text{ and }\det\boldsymbol{M}=\pm 1\}$ where $\boldsymbol{E} =
\Big(\!\begin{smallmatrix}0&1&1\\-1&0&1\\-1&-1&0\end{smallmatrix}\Big)$.
Unfortunately --- in contrast to the sturmian case --- the opposite is not
true. In fact, the monoid $\text{E}(3,{\mathbb N})$ contains matrices associated with morphisms,
which are not 3iet preserving. As an example one can consider the matrix
$\mat{M} =\Big(\begin{smallmatrix}0&2&1\\2&3&5\\3&0&5\end{smallmatrix}\Big)$.
\item
The mapping $\varphi\rightarrow\mat{M}_{\varphi}$, where $\varphi$ a is
3iet preserving morphism and $\mat{M}_{\varphi}$ is its incidence matrix is not
one-to-one. One can show that for
$\big(\begin{smallmatrix}a&b\\c&d\end{smallmatrix}\big)
\in{\mathbb N}^{2\times2}$ with $ad-bc=\pm1$ there exist $a+b+c+d-1$ different
sturmian morphisms. The same question for matrices of 3iet preserving
morphisms is not solved.
\item
Unlike the free monoid
$\text{SL}(2,{\mathbb N})=\{\mat{M}\in{\mathbb N}^{2\times2}\:|\:\det\mat{M}=1\}$, which is generated
by two matrices $\big(\begin{smallmatrix}1&1\\0&1\end{smallmatrix}\big)$
and $\big(\begin{smallmatrix}1&0\\1&1\end{smallmatrix}\big)$, the monoid
$\text{SL}(3,{\mathbb N})$ is not free, and, moreover, it is not finitely
generated~\cite[Appendix~A]{fogg}. It would be interesting to derive similar
results for the monoid $\text{E}(3,{\mathbb N})$.
\item
Even though the aim of this paper is not to investigate explicite prescriptions
of 3iet preserving morphisms, we can still provide some
information about it, based on our results.
It follows from the proof of Lemma~\ref{lem:e1e2e3} that for every 3iet
preserving morphism $\varphi:\{A,B,C\}^*\rightarrow\{A,B,C\}^*$ the morphism
given by $A\mapsto\sigma_{A,B}\circ\varphi(A)$ and
$B\mapsto\sigma_{A,B}\circ\varphi(B)$ is sturmian, where
$\sigma_{A,B}$: $A\mapsto A$, $B\mapsto AB$, $C\mapsto B$; analogously
for morphisms $\sigma_{A,C}$ and $\sigma_{B,C}$.
\item
In this paper we were not at all interested in the characterization of 3iet words,
which are fixed points of primitive morphisms, that is, of 3iet words $u$ such
that there exists a primitive morphisms $\varphi$ for which $\varphi(u)=u$.
This question is completely solved for sturmian
words~\cite{yasutomi-dm-2,balazi-masakova-pelantova-i-5}.
Adamczewski~\cite{adamczewsi-jtnb-14} studied for 3iet words a weaker
property, the so-called primitive substitutivity. An infinite word $u$ over an alphabet
$\mathcal{A}$ is said to be primitively substitutive if there exists a word $v$ over an
alphabet $\mathcal{B}$, which is a fixed point of a primitive morphism, and
a morphism $\psi:\mathcal{B}^*\rightarrow\mathcal{A}^*$ such that $\psi(v)=u$.
Adamczewski, using results of Boshernitzan and Carroll~\cite{boshernitzan-carroll-jam-72},
proved that a non-degenerated 3iet word is primitively substitutive
if and only if normalized parameters $\varepsilon,l,c$ (see~\eqref{eq:prevod})
of the corresponding transformation belong to the
same quadratic field. Similar study can be foud in~\cite{harriss-lamb-arxiv06}.
\end{enumerate}
\section{Proof of Lemma~\ref{lem:Pk}}
In this Appendix we prove Lemma~\ref{lem:Pk}, which is rather
technical. The proof uses the four following claims.
\begin{app-claim}
Let $\varepsilon,\eta$ be irrational numbers, $\varepsilon\neq-\eta$,
and let $\Omega_1$, $\Omega_2$
be arbitrary bounded intervals. Then
$\#\big(\Omega_1 \cap \Sigma_{\varepsilon,\eta}(\Omega_2)\big) =
\#\big(\Omega_2 \cap \Sigma_{\eta,\varepsilon}(\Omega_1)\big)$.
\end{app-claim}
\begin{proof}
\begin{align*}
\#\big(\Omega_1 \cap \Sigma_{\varepsilon,\eta}(\Omega_2)\big) &=
\#\{a+b\eta\:|\:a,b\in{\mathbb Z},\ a+b\eta\in\Omega_1,\ a-b\varepsilon\in\Omega_2\} = \\
&= \#\{a+c\varepsilon\:|\:a,c\in{\mathbb Z},\ a+c\varepsilon\in\Omega_2,\ a-c\eta\in\Omega_1\} = \\
&= \#\big(\Omega_2 \cap \Sigma_{\eta,\varepsilon}(\Omega_1)\big)\,.
\qedhere
\end{align*}
\end{proof}
\begin{app-claim}
Let $\varepsilon$ be a quadratic irrational number with conjugate
$\varepsilon'$. Let $\lambda$ be a quadratic unit whose conjugate
$\lambda'$ satisfies $\lambda'{\mathbb Z}[\varepsilon'] =
{\mathbb Z}[\varepsilon']$. Then
$\lambda'\Sigma_{\varepsilon,-\varepsilon'}(\Omega) =
\Sigma_{\varepsilon,-\varepsilon'}(\lambda\Omega)$.
\end{app-claim}
\begin{proof}
Let us consider $x = a-b\varepsilon\in{\mathbb Z}[\varepsilon]$. If we
denote $\eta = -\varepsilon'$, the number
$a+b\eta=a-b\varepsilon'\in{\mathbb Z}[\varepsilon']$ is the image of $x$
under the Galois automorphism and therefore we denote it by
$x'=a-b\varepsilon'$.
Note that the condition $\lambda'{\mathbb Z}[\varepsilon'] = {\mathbb Z}[\varepsilon']$
is equivalent to the condition $\lambda{\mathbb Z}[\varepsilon] = {\mathbb Z}[\varepsilon]$,
and that these two equalities imply that the mappings $x'\mapsto\lambda'x'$ and
$x\mapsto\lambda x$ are bijections on ${\mathbb Z}[\varepsilon']$ and ${\mathbb Z}[\varepsilon]$,
respectively.
By definition of a C\&P set we have
\[
\Sigma_{\varepsilon,-\varepsilon'}(\Omega) =
\{x'\in{\mathbb Z}[\varepsilon']\:|\:x\in\Omega\}\,.
\]
We derive
\begin{align*}
\lambda'\Sigma_{\varepsilon,-\varepsilon'}(\Omega) &=
\lambda'\{x'\in{\mathbb Z}[\varepsilon']\:|\:x\in\Omega\} =
\{\lambda'x'\in{\mathbb Z}[\varepsilon']\:|\:\lambda x\in\lambda\Omega\} = \\
&= \{y'\in{\mathbb Z}[\varepsilon']\:|\:y\in\lambda\Omega\} =
\Sigma_{\varepsilon,-\varepsilon'}(\lambda\Omega)\,.
\qedhere
\end{align*}
\end{proof}
The following Claim is given without the proof, since it is just a special
case of Proposition~6.2 in~\cite{gmp-jtnb-15}.
\begin{app-claim}
Let $\hat{\varepsilon},\hat{\eta}$ be irrational numbers,
$\hat{\varepsilon}\neq-\hat{\eta}$, and let $\hat{\Omega}$ be an
arbitrary bounded interval. Then
\[
\Sigma_{\hat{\varepsilon},\hat{\eta}}((1+2\hat{\varepsilon})\hat{\Omega})
=
(1-2\hat{\eta})\Sigma_{\frac{\hat{\varepsilon}}{1-2\hat{\varepsilon}},
\frac{\hat{\eta}}{1+2\hat{\eta}}}(\hat{\Omega})\,.
\]
\end{app-claim}
\begin{app-claim}
Let $\tilde{\varepsilon},\tilde{\eta}$ be irrational numbers such that
$\tilde{\varepsilon}\neq-\tilde{\eta}$. Let $z\in{\mathbb R}$ and let $J$ be a bounded
interval. We denote
$Q(J,z)\mathrel{\mathop:}=\#(J\cap\Sigma_{\tilde{\varepsilon},\tilde{\eta}}(z-1,z])$.
Then there is a constant $R$ such that $|Q(J,z)-Q(J,t)|\leq R$ for every
$z,t\in{\mathbb R}$ and for every interval $J$.
\end{app-claim}
\begin{proof}
The condition $a-b\tilde{\varepsilon}\in(z-1,z]$, where
$a,b\in{\mathbb Z}$, can be equivalently rewritten as $a=\lfloor
z+b\tilde{\varepsilon}\rfloor=z+b\tilde{\varepsilon}-\{z+b\tilde{\varepsilon}\}$.
Hence
\[
\Sigma_{\tilde{\varepsilon},\tilde{\eta}}(z-1,z] =
\{b(\tilde{\varepsilon}+\tilde{\eta})+z-\{z+b\tilde{\varepsilon}\}\:|\:b\in{\mathbb Z}\}\,.
\]
We consider the interval $J$ with boundary points $c,c+l$, where
$c,l\in{\mathbb R}$ and $l>0$. If the point
$b(\tilde{\varepsilon}+\tilde{\eta})+z-\{z+b\tilde{\varepsilon}\}$
belongs to the set
$J\cap\Sigma_{\tilde{\varepsilon},\tilde{\eta}}(z-1,z]$, then
$c-z\leq b(\tilde{\varepsilon}+\tilde{\eta})\leq c+l-z+1$. On the
other hand, if $c-z+1<b(\tilde{\varepsilon}+\tilde{\eta})<c+l-z$
then the point
$b(\tilde{\varepsilon}+\tilde{\eta})+z-\{z+b\tilde{\varepsilon}\}$
belongs to
$J\cap\Sigma_{\tilde{\varepsilon},\tilde{\eta}}(z-1,z]$. It means
that the number of points in the set
$J\cap\Sigma_{\tilde{\varepsilon},\tilde{\eta}}(z-1,z]$ is at
least
$\big\lfloor\frac{l-1}{\tilde{\varepsilon}+\tilde{\eta}}\big\rfloor$
and at most
$\big\lceil\frac{l+1}{\tilde{\varepsilon}+\tilde{\eta}}\big\rceil$,
and hence
\[
\bigg\lfloor\frac{l-1}{\tilde{\varepsilon}+\tilde{\eta}}\bigg\rfloor \leq
Q(J,z) \leq \bigg\lceil\frac{l+1}{\tilde{\varepsilon}+\tilde{\eta}}\bigg\rceil\,.
\]
Note that the bounds on $Q(J,z)$ do not depend on $z$, and thus
the same estimate holds for $Q(J,t)$. Therefore
\[
|Q(J,z)-Q(J,t)| \leq
\bigg\lceil\frac{l+1}{\tilde{\varepsilon}+\tilde{\eta}}\bigg\rceil
-
\bigg\lfloor\frac{l-1}{\tilde{\varepsilon}+\tilde{\eta}}\bigg\rfloor\leq
2\bigg(1+\frac{1}{\tilde{\varepsilon}+\tilde{\eta}}\bigg)\mathrel{=\!\!\mathop:}
R\,.
\]
\end{proof}
Now we are in the position to conclude the proof of
Lemma~\ref{lem:Pk}.
\begin{proof}[Proof of Lemma~\ref{lem:Pk}]
Let us recall the definition of $P_n(x) =
\#\Big(\big(x,x+(1+2\eta)\Lambda^n\big]\,\cap\,\Sigma_{\varepsilon,\eta}(\Omega)\Big)$.
By Claim 1, we have
\[
P_n(x) = \#\Big(\Omega \,\cap\,
\Sigma_{\eta,\varepsilon}(x,x+(1+2\eta)\Lambda^n]\Big) =
\#\Big(\Lambda^n\Omega \,\cap\,
\Lambda^n\Sigma_{\eta,\varepsilon}(x,x+(1+2\eta)\Lambda^n]\Big)\,.
\]
As $\eta=-\varepsilon'$ and $\lambda'=\Lambda$, we have, by Claim
2,
\[
P_n(x) = \#\Big(\Lambda^n\Omega \,\cap\,
\Sigma_{\eta,\varepsilon}\big(\lambda^nx,\lambda^nx+(1+2\eta)\big]\Big)\,,
\]
where we used $\lambda\lambda'=\lambda\Lambda = 1$. Claim 3
further implies
\[
P_n(x) = \#\bigg(\Lambda^n\Omega \,\cap\,
(1-2\varepsilon)\Sigma_{\frac{\eta}{1-2\eta},\frac{\varepsilon}{1+2\varepsilon}}
\Big(\frac{\lambda^nx}{1+2\eta},\frac{\lambda^nx}{1+2\eta}+1\Big]\bigg)\,.
\]
Thus $P_n(x)=Q\big(J,\frac{\lambda^nx}{1+2\eta}+1\big)$ as defined
in Claim 4, where $J=\frac{1}{1-2\varepsilon}\Lambda^n\Omega$,
$\tilde{\varepsilon}= \frac{\eta}{1-2\eta}$ and
$\tilde{\eta}=\frac{\varepsilon}{1+2\varepsilon}$. The statement
of Lemma follows by application of Claim 4.
\end{proof}
\end{document}
|
\mathbf{e}gin{document}
\mathbf{e}gin{frontmatter}
\title{New Methods for Small Area Estimation with Linkage Uncertainty}
\author[rvt]{Dario Briscolini}
\ead{[email protected]}
\author[focal]{Loredana Di Consiglio}
\ead{[email protected]}
\author[rvt] {Brunero Liseo\corref{mycorrespondingauthor}}
\ead{[email protected]}
\author[rvt]{Andrea Tancredi}
\ead{[email protected]}
\author[focal]{Tiziana Tuoto}
\ead{[email protected]}
\address[rvt]{Sapienza Universit\`a di Roma, Via del Castro Laurenziano 9, Roma 00161,Italy}
\address[fox]{Eurostat, Luxembourg}
\address[focal]{Istat, Via Cesare Balbo, 16, 00184 Roma Italy }
\cortext[mycorrespondingauthor]{Corresponding author}
\mathbf{e}gin{abstract}
In Official Statistics, interest for data integration has been increasingly growing, due to the need of extracting information from different sources. However, the effects of these procedures on the validity of the resulting statistical analyses has been disregarded for a long time.
In recent years, it has been largely recognized that linkage is not an error-free procedure and linkage errors, as false links and/or missed
links, can invalidate the reliability of estimates in standard statistical models.
In this paper we consider the general problem of making inference using data that have been probabilistically linked and we explore the effect of potential linkage errors on the production of small area estimates.
We describe the existing methods and propose and compare new approaches both from a classical and from a Bayesian perspective.
We perform a simulation study to assess pros and cons of each proposed method;
our simulation scheme aims at reproducing a realistic context both for small area estimation and record linkage procedures.
\end{abstract}
\mathbf{e}gin{keyword} Markov Chain Monte Carlo
\sep Measurement error \sep Nested error model \sep Record Linkage \sep Uncertainty.
\end{keyword}
\end{frontmatter}
\section{Data integration and impact of linkage errors}
\lambdabel{diconsiglio_sec:1}
In Official Statistics, interest for data integration has been increasingly growing, due to the need of extracting information from different sources. However, the effects of these procedures on the validity of the resulting statistical analyses has been disregarded for a long time.
In recent years, it has been largely recognized that linkage is not an error-free procedure and linkage errors, as false links and/or missed links can invalidate the reliability of estimates in standard statistical models.
The effect of linkage errors on the calibration of linear regression models with variables observed in different sources was firstly illustrated by Neter et al. \cite{neter}. Major contributions to the development of this study can be found in Scheuren and Winkler (\cite{SW93},\cite{SW97}) and Lahiri and Larsen \cite{lahiri2005}.
Chambers \cite{chambers09} also considers the construction of a Best Linear Unbiased Estimator and its empirical version. He also proposes a maximum likelihood estimator, providing examples with application in linear regression models, with a partial generalization to the logistic case. A possible extension to sample-to-register linkage is also suggested.
On the Bayesian side, Tancredi and Liseo \cite{tl15} and Tancredi et al. \cite{tsl17} have proposed an integrated model with a feed-back effect in which inferential procedures for the regression are able to borrow strength from the linkage process and vice versa.
This article focuses on the effects of linking errors on the production of small area estimates. In particular we consider the case of unit-level small area methods. They apply when some auxiliary variables $X$, whose totals are known for each small area, are available for each sampled unit. Small area predictions are usually constructed using linear (or possibly generalized) mixed models expressing the survey variable $Y$ in terms of $X$.
Samart and Chambers \cite{samartchambers14} consider the effect of linkage errors on mixed effect models, extending the settings in Chambers \cite{chambers09} and suggesting estimators of the variance effects which are adjusted for linkage errors.
In official statistics, these mixed models are largely exploited for small area estimation in order to increase the detail of information at local level. Administrative data can also be used to increase information collected in sample surveys, in order to expand auxiliary information and improve the model fitting for small area estimation.
Linkage of external sources with basic statistical registers as well as with sample surveys can be carried out on different linkage scenarios. Di Consiglio and Tuoto \cite{diconsiglio14} performed a sensitivity analysis for different alternative linkage error scenarios in the linear and logistic regression settings.
In this paper, we present a comparative analysis of several different estimators of the parameters of a unit-level small area model both from a classical and a Bayesian perspective. We compare the results on
a pseudo population, where the values of the survey variable $Y$ and those of covariates $X$ are obtained from the survey on Household Income and Wealth, Bank of Italy and the person identifiers come from the fictitious population census data \cite{ESSnet2011} created for the ESSnet DI, an European project on data integration run from 2009 to 2011. The data set contains 26,625 observations and consists of 25 variables.
In a classical framework, under the assumption that false matches occur only within the same small area, the linkage error affects the small area predictors via a bias on the estimation of fixed components and random effects. In addition, sample means of the covariates would also be erroneously evaluated.
Following Chambers \cite{chambers09}, we assume that sampling does not change the outcome of the linkage process and we derive an adjusted EBLUP estimator.
We also propose a Bayesian strategy where we jointly model the record linkage and the small area model using response variable and covariates available in different data sets.
We believe that the latter approach is able - in a very natural way - to
\mathbf{e}gin{itemize}
\item
improve the performance of the linkage step through the use of the extra information contained in the $Y$'s (the response variable values) and the covariates $X$'s. This happens because pairs of records which do not adequately fit the small area model, say ${\cal M}$, will be automatically down-weighted in the matching process;
\item
allow to account for matching uncertainty in the estimation procedure related to model ${\cal M}$ involving $Y$'s and $X$'s.
\item
improve the accuracy of the estimators of the parameters of model ${\cal M}$ in terms of bias.
\end{itemize}
Although we present several different strategies for estimating the parameters of the small area model, we stress the fact that a fair comparison among the different methods is not possible, since they consider different sets of assumptions. In the simulation study section we will discuss these issues in detail.
The linkage methods used in this paper refer to those implemented in \thetaxtit{RELAIS} \cite{Relais} on the frequentist side; for the Bayesian approaches we have used the methods described in \cite{tl15} and \cite{tsl17}, where categorical variables are used for the linkage procedure, while either
continuous or categorical variable can be considered in the inferential post-linkage step, as it might be the case in small area models.
The rest of the paper is organized as follows: Section \ref{due} describe the statistical problem of linking data both from a classical and from a Bayesian perspective.
Section \ref{tre} illustrates the different strategies of estimation in small area models. Section \ref{quattro} compares the different methods using
a simulation setting and a realistic pseudo-population, as described above, which mimic typical data sets to be used in record linkage problems and in small area estimation as well.
We also perform a sensitivity analysis with some simulated data sets in order to assess the impact of the various assumptions in the different approaches.
\section{Linkage model and linkage errors}
\lambdabel{due}
From a statistical perspective, the operation of merging two (or more)
data sets can be important for two different and complementary reasons:
\mathbf{e}gin{itemize}
\item[(i)] to obtain a larger reference data set or frame, suitable to perform more accurate statistical analyses;
\item[(ii)] to make inference on suitable statistical models via the additional information which could not be extracted from either one of the two single data sets.
\end{itemize}
If the merging step can be accomplished without errors (maybe because an error-free identification key is available and it can
be used to match units in different data sets), there are no specific consequences on the statistical procedures undertaken in both the situations. In practice, however, identification keys are rarely available and linkage between records is usually performed under uncertainty.
This issue has caused a very active line of research among the statistical and the Information Technology communities, named ``record linkage'', where
the possibility to make wrong matching decisions must be accounted for, especially when the result of the linking operation, namely the merged data set, must be used for further statistical analyses.
In order to briefly recall what record linkage is,
let us suppose we have two data sets, say $F_1$ and $F_2$, whose records respectively relate to statistical units (e.g. individuals, firms, etc.) of partially overlapping samples (or populations), say $S_1$ and $S_2$.
Records in each data set consist of several fields, or variables, either quantitative or categorical, which may be observed together with a potential amount of measurement error.
The goal of a record linkage procedure is to detect all the pairs of units $(j,j^\prime)$, with $j\in S_1$ and $j^\prime \in S_2$, such that $j$ and $j^\prime$ actually refer to the same unit.
If the main goal of the record linkage process is the former outlined above (case (i)), a new data set is created by merging together three different subsets of units: those which are present in both data sets, those belonging to $S_1$ only and those belonging to $S_2$ only.
Appropriate statistical data analyses may be then performed on the enlarged data set. Since the linkage step is done with uncertainty, the efficiency of the statistical analysis may be jeopardized by $i)$ the presence of duplicate units and $ii)$ a loss of power, mainly due to erroneous matching in the merging process.
On the other hand, the latter situation (case (ii)), which is more important for the scope of this paper, is even more challenging.
Let us denote the observed variables in $F_1$ by $(Y, W_1,W_2,\ldots,W_h)$, whereas the observed variables in $F_2$ are $(X_1, X_2, \dots, X_p, W_1,W_2,\ldots,W_h)$.
Also suppose that one is interested in performing a small area analysis in order to produce estimates of the variable $Y$ at area level, using as covariates, variables $X$'s, restricted to those pairs of records which are declared matches after a record linkage analysis based on variables $(W_1, \dots, W_h)$.
The intrinsic difficulties in such a problem are well documented, for the linear regression case in Neter \cite{neter} and deeply discussed in Scheuren and Winkler (\cite{SW93}, \cite{SW97}), Lahiri and Larsen \cite{lahiri2005} and Chambers \cite{chambers09}.
In the regression example, it might be easily seen that the presence of false matches (that is, matching record pairs which do not actually refer to the same statistical unit) reduces the observed level of association between $Y$ and $X$ and, as a consequence, they introduce a bias effect towards zero when estimating the slope of the regression line.
Similar biases may appear in every statistical procedure and, in most of the cases, the bias takes a specific direction. As another example, when linkage procedures are used for estimating the size $N$ of a population through a capture-recapture approach, the presence of false matches may severely reduce the final estimate of $N$.
\subsection{Record Linkage: Fellegi and Sunter's approach}
\lambdabel{2.1}
The most widespread and successful theory for record linkage was proposed by Fellegi and Sunter \cite{FS1969}. We start from two lists (i.e. a register and a sample), say $F_1$ and $F_2$, of size $N_1$ and $N_2$, and we let $\Omegaega= F_1 \times F_2$ be the set of all possible pairs of units belonging to different data sets.
The goal of a linkage process can be viewed as a classification problem where the pairs in
$\Omegaega= ( (i,j), i \in F_1, j \in F_2)$ have to be classified into two disjoint subsets $M$ and $U$, such that $M= \{(i,j) \in \Omegaega: i \equiv j \}$ is the link set and $U=\Omegaega \setminus M$ is the non-link set.
At the end of the linkage procedure, two possible kinds of error may occur: i) a false match or \thetaxtit{false positive}, that is a pair is declared as a link but the two records are actually referred to different units; ii) the missing match or \thetaxtit{false negative}, that is the pair is declared as a non-link but the two records are referred to the same units.
In a more formal way, data sets $F_1$ and $F_2$ may be represented as two matrices, say $W_1$ and $W_2$
Here
$$W_i=(w_{i1}, w_{i2}, \ldots w_{i N_i}) \qquad i = 1, 2,$$
where each single $w_{ij}$
is a vector $w_{ij}=(w_{ij1},\ldots,w_{ijh})$, that is $w_{ij}$ contains the
observed values of a categorical random vector $w=(w_1,\ldots,w_h)$ whose support is
$$
\mathcal{W}=\{w_{s_1 s_2,\ldots,s_h}= (s_1,\ldots,s_h) \quad
s_1=1\ldots,k_1;\ldots; s_h=1,\ldots k_h\}.$$
Under this notation,we have
$$
M=\{ (j,j'): \thetaxtnormal{ record } j\in W_1
\thetaxtnormal{ and } j'\in W_2 \thetaxtnormal{ refer to the same unit}\},$$
and, of course, $U=\Omegaega \setminus M$ is the complementary set.
Notice that, in any application, no matter what is the overlapping of the two files of records, the cardinality of $U$ is always much larger than the cardinality of $M$.
The statistical model for a record linkage analysis is built upon the so called comparison vectors
$q_{j j'}= (q_{jj'1}, \cdots, q_{jj'h})$, where, in the simplest setting,
$$
q_{jj'l}= \left \{ \mathbf{e}gin{array}{cc} 1 \\ 0 \end{array}
\qquad \mathbf{e}gin{array}{cc} w_{1jl}=w_{2j'l} \\
w_{1jl}\neq w_{2j'l} \end{array} \right ., \qquad \qquad l=1,\ldots,h.
$$
The comparison vectors $q_{j j'}$ are usually assumed to be independent and identically distributed random vectors with a distribution given by the following mixture density
\mathbf{e}gin{equation}
\lambdabel{mixbern}
p(q_{j j'}|m,u,\zeta)=\zeta \prod_{l=1}^h m_l^{q_{jj' \,l}} (1-m_l)^{1-q_{jj'\,l}}+(1-\zeta)
\prod_{l=1}^h u_l^{q_{jj'\,l}} (1-u_l)^{1-q_{jj'\,l}}.
\end{equation}
In the above formula, $\zeta$ represents the marginal probability that a random pair of records belong to the same unit. In other words, $\zeta$ may be interpreted as the percentage of overlapping of the two data sets.
The quantities $m_l$ and $u_l$, $l=1, \dots,h$, are the parameters of the two multinomial distributions associated with the two set of comparisons $M$ and $U$, that is
$$
m_l=P(q_{jj' \,l }=1| j,j'\in M)
\quad \quad
u_l=P(q_{jj' \,l }=1| j,j'\in U)$$
The independence assumption of the comparison vectors $q_{jj}$'s is, strictly speaking, untenable from a probabilistic perspective. Consider the following example: after comparing record $A_1$ with records $B_1$ and $B_2$, and then record $A_2$ with $B_1$ only, the result of the comparison between $A_2$ and $B_2$ is often already known.
Also, in the standard setting,
the key variables are assumed independent of each other.
Several extensions of this basic set-up have been proposed, mainly by introducing potential interactions among key variables, see for example Winkler \cite{winkler:95} and Larsen and Rubin \cite{laru:01}.
To test whether a given pair should be allocated to $M$ or $U$,
one may consider either the likelihood ratio
$$
\psi = \frac{P(q_{jj'}| (j,j')\in M)}{P(q_{jj'} | (j,j') \in U)}=
\frac{\prod_{l=1}^h m_l^{q_{jj'l}} (1-m_l)^{1-q_{jj'l}}}
{\prod_{l=1}^h u_l^{q_{jj'l}} (1-u_l)^{1-q_{jj'l}}},
$$
or - in a Bayesian setting - the posterior probability that a single pair is a match
$p( (j,j') \in M |q_{jj'})$.
In general, a pair of records with a likelihood ratio $\psi$ - or a posterior probability - above a fixed threshold, is declared a match. In practice, the choice of the threshold can be problematic, as illustrated, for example, in Belin and Rubin \cite{br:95}. In this context, optimization techniques may be helpful to rule out the multiple matches issue, that is the possibility that a single unit in data set $F_1$ is linked with more than one unit in data set $F_2$.
Sadinle \cite{Sadinle17} argues that such decision rules can lead to inconsistencies and proposes alternative Bayes estimates based on loss functions.
\subsection{A Bayesian perspective on record linkage}
Tancredi and Liseo \cite{tancredi} have proposed a different approach based on the direct modeling of the observed data matrices $W_1$ and $W_2$ of the key variables, rather than using the mutual comparisons.
This way, one is able to take into account both the potential measurement error and the matching constraints.
Let $\tilde{w}_{ijl}$ be true unobserved value for the field $l$
of the record $j$ on data set $W_i$ and let $\tilde{W}_i$ be the corresponding unobserved data matrix.
We assume that
\mathbf{e}gin{align*}
p(W_1,W_2 \vert \tilde{W}_1,\tilde{W}_2,\nu)& =
\prod_{ijl} p(w_{ijl}| \tilde{w}_{ijl}, \nu_l) \\
&=\prod_{ijl} \left[ \nu_l I(w_{ijl}=\tilde{w}_{ijl})
+ (1-\nu_l) \xi( w_{ijl})\right] .
\end{align*}
The above expression is a mixture of two components: the former is degenerate at the true value while the latter can be any distribution whose support is the set of all possible values of the variable $W_l$;
in absence of specific information, the use of a uniform distribution for the second component of the mixture is a reasonable assumption. This way, $\xi(w_{ijl})=1/k_l$.
Also notice that, in this context, $\nu_l$
represents the probability that the variable $W_l$ is observed without noise.
This model, known as ``hit and miss'', was introduced in the record linkage literature by Copas and Hilton \cite{copas:hilton}.
In order to build a model for true values $\tilde{w}_{ijl} s$ one needs to introduce a matching matrix $C$.
In particular, let $C$ be a $N_1\times N_2$ matrix whose unknown entries are either $0$ or $1$, where $C_{jj'}=1$ represents a match, $C_{j j'}=0$ denotes a non-match.
We assume that each data set does not contain replications of the same unit, so that $\sum_{j'}C _{jj'}\leq 1 $, and $\sum_{j }C _{jj'}\leq 1.$
We also assume that the joint distribution of $\tilde{W}_1$ and $\tilde{W}_2$ both depends on the entries of the matching matrix $C$ and on the probability vector
$\theta=(\theta_{s_1\dots s_h}, s_1=1\ldots,k_1;\ldots; s_h=1\ldots, k_h)$ which describes the distribution of the true values one can observe on each sample. More precisely, we assume that
\mathbf{e}gin{equation}
\lambdabel{tildeV}
p(\tilde{W}_1, \tilde{W}_2 |C,\theta) =\prod_{j: C_{j j'}=0 \,, \forall j'}
p(\tilde{w}_{1j}|\theta) \prod_{j': C_{j j'}=0 \,, \forall j}
p(\tilde{w}_{2j'}|\theta) \prod_{j j': C_{j j'}=1} p(\tilde{w}_{1j},
\tilde{w}_{2j'}|\theta),
\end{equation}
where
$$p(\tilde{w}_{ij}|\theta)=\prod_{s_1\ldots s_h} \theta_{s_1,\dots, s_h}^{I(\tilde{w}_{ij}=(s_1,\ldots,s_h))}, $$
and
$$
p(\tilde{w}_{1j}, \tilde{w}_{2j'}|\theta)=\left\{
\mathbf{e}gin{array}{l l}
0 & \thetaxtnormal{if } \tilde{w}_{1j} \neq \tilde{w}_{2j'}\\
\prod_{s_1\ldots s_h} \theta_{s_1,\dots, s_h}^{I(\tilde{w}_{ij}=(s_1,\ldots,s_h))} & \thetaxtnormal{if } \tilde{w}_{1j} = \tilde{w}_{2j'}
\end{array}\right.
$$
The above record linkage model is a simplified version of the one
proposed in Tancredi and Liseo \cite{tancredi}, where an additional layer - introducing a super-population model -
was added at the top of the hierarchy.
This simplest version, already used in Hall et al.\cite{hallWP} and Tancredi and Liseo \cite{tl15},
can be easily obtained by integrating out the additional layer of hierarchy,
under specific prior assumptions.
Following Hall et al. \cite{hallWP}, we also assume that the key variables are independent.
In symbols, setting $\theta_{l,s_l}=p(\tilde{w}_{ijl }=s_l|\theta_l)$, with $\theta_l=(\theta_{l 1}, \ldots, \theta_{l,k_l})$, we assume that
$$\theta_{s_1,\dots, s_h}= \prod_{l=1}^k \theta_{l,s_l}.$$
To complete the model we need to specify a prior distribution for the matching matrix $C$ and prior distributions for the parameters $\nu_l$ and $\theta_l$, $l=1,\ldots, h$. For these
latter quantities the standard assumptions of independent Beta distributions for the probabilities $\nu_l$ and independent Dirichlet distributions for the vectors $\theta_l$ can be adopted. Regarding $C$, the prior can be elicited in two stages. First, we elicit a prior distribution $p(t)$, $t = 0, 1, 2, \dots N_1 \wedge N_2$ on $T$: ``number of matched pairs in the two data sets''. At this stage, the researcher can easily collect information, looking at previous experiences or at the statistical characteristics of the data sets (e.g. if the
two data sets refer respectively to a census and a sample, we can expect a
large number of matched pairs). At the second stage we define a conditional prior
distribution for the configuration matrix $C$ {\em given} the number of matches.
We take the natural noninformative choice of a uniform conditional prior on the set
$
C^{(t)}= \{ C : \sum_{j j'} C_{j,j^\prime}= t \mathbf{i}g \}.
$
The model just outlined cannot be analyzed in a closed form and simulation from
the posterior distribution is necessary.
In particular, we have implemented a Metropolis within Gibbs algorithm where
the updating of parameters $\nu_l$ and $\theta_l$ can be easily performed by simulating from their respective full conditional distributions, for $l=1, \dots, h$. On the other hand, the updating of the matching matrix $C$ and the true values $\tilde{W}_1$ and $\tilde{W}_2$ is jointly obtained.
In particular, we adopt a Metropolis-Hastings step by proposing a new matching matrix $C$, which is obtained
by adding or deleting one matches or switching two already existing matches. Conditionally
on the acceptance of the proposed value for $C$, a Gibbs step is used for the updating of the elements of $\tilde{W}_1$ and $\tilde{W}_2$.
This Metropolis step can be easily adapted to specific situations which we will discuss in the application section. For example, it might be the case that
the data set $F_1$ is a subset of $F_2$ so that we already know that the number
of matches is exactly $N_1$. In this case the prior over $C$ will be restricted on those matrices with exactly $N_1$ matches and the Metropolis step will only propose a permutation of the matches or a simultaneous addition and deletion of matches.
Details of the algorithm can be found in Tancredi and Liseo \cite{tl15}.
Finally, in order to produce a point estimate of the matching configuration $C$, one can use the following - rather natural - strategy:
$$ \widehat{C}_{ij}=\left\{
\mathbf{e}gin{array}{c c}
1 & \thetaxtnormal{if } p(C_{ij}=1|W_1,W_2) \geq \frac12 \\
0 & \thetaxtnormal{otherwise} \\
\end{array}
\right. .
$$
The above estimator is not the only possibility. Sadinle \cite{Sadinle17} proposed different ``point estimators'' of the $C$ matrix based on a more general class of loss functions
\section{Small area estimation based on unit linear mixed model}
\lambdabel{tre}
When the sample sizes within some domains are moderate or small, standard estimators are often not reliable enough to produce estimates at a finer level of (geographical) detail; for a general review on this topic, see Rao and Molina \cite{rao}.
The empirical best linear unbiased predictor (EBLUP) based on a unit level model was firstly proposed by Battese et al. \cite{battese},
to improve the reliability of estimators by exploiting the relationship between the
target variable and the auxiliary variables.
\subsection{The unit linear mixed model }
\lambdabel{diconsiglio_subsec:3.1}
\noindent
Suppose that the population units can be grouped in $D$ areas or domains,
let $Y$ be the target variable and $X$ be auxiliary variables observed on the same units.
Assume a linear mixed relationship between the target variable and the covariates
\mathbf{e}gin{equation}
\lambdabel{unit}
y_{id}=X_{id}^T\mathbf{e}ta+u_d+e_{id}, \enspace i=1,\dots, N_d, \enspace d=1,\dots, D,
\end{equation}
where $\mathbf{e}ta$ is a $p$-dimensional vector of fixed regression coefficients and $u_d$, $d=1,\ldots, D$, are the i.i.d. random variables related to the specific area or domain contributions, with $\mathbb{E}(u_d)=0$ and $\var{u_d}=\sigma^2_{u}$ and i.i.d. errors $e_{id}$ with $\mathbb{E}(e_{id})=0$ and $ \var{e_{id}}=\sigma^2_e$.
In matrix notation
$$ Y=X\mathbf{e}ta+Zu+e, $$
where $Z$ is the area design matrix, $Z=\thetaxtnormal{Blockdiag}(Z_d=1_{N_d}; d=1, \cdots, D)$.
The total variance is then
$\var{Y}=V=\sigma^2_u ZZ^T+\sigma^2_e I$ or $V=\diag{V_1, \dots, V_D}$, with
$V_d=\sigma_e^2I_{N_d}+\sigma_u^2Z_dZ_d^T$.
When $\sigma^2_u$ and $\sigma^2_e$ are known, the BLU predictor of a small area mean $\mathbf{a}r Y_d$, is given by
\mathbf{e}gin{equation}
\lambdabel{eblup}
\hat {\mathbf{a}r Y}_d^{BLUP} = \frac1{N_d} \left( \sum_{i \in P^{\star}i_d} y_{id} + \sum_{i \notin P^{\star}i_d} \hat y_{id}^{BLUP} \right)
\end{equation}
where $\hat y_{id}^{BLUP} = X_{id}^T \hat \mathbf{e}ta + \hat u_d$ with
$$
\hat \mathbf{e}ta=(X^TV^{-1}X)^{-1}X^TV^{-1}y
$$
$\hat u = \sigma_u Z^T V(y-X\hat \mathbf{e}ta)$, and $P^{\star}i_d$ is the subset of units in domain $d$ which were actually sampled.
An EBLUP is obtained by plugging the estimates $\hat{\sigma}_u$ and $\hat{\sigma}_e$ in the previous expressions. Estimation strategies for estimating $\hat{\sigma}_u$ and $\hat{\sigma}_e$ are described in \S \ref{sec_varcomponents}.
The mean squared error (MSE) of the standard EBLUP estimator is given by
\mathbf{e}gin{equation}
\lambdabel{MSE}
MSE (\mathbf{a}r{Y}_d^{EBLUP}) \approx g_{1d}(\sigma^2_e{,}\sigma^2_u)+g_{2d}(\sigma^2_e{,}\sigma^2_u)+g_{3d}(\sigma^2_e{,}\sigma^2_u)
\end{equation}
see Prasad and Rao \cite{prasadrao}.
The $g$ terms are, respectively,
$$
g_{1d}(\sigma^2_e{,}\sigma^2_u)=(1-\phi_d)\sigma^2_u
$$
$$
g_{2d}(\sigma^2_e{,}\sigma^2_u)=(\mathbf{a}r{X_d}-\phi_d\mathbf{a}r{x_d})^T(X^T V^{-1} X)^{-1}(\mathbf{a}r{X_d}-\phi_d\mathbf{a}r{x_d})
$$
where $\phi_d= \sigma^2_u/(\sigma^2_u+\sigma^2_e/n_d)$ and
$$
g_{3d}(\sigma^2_e{,}\sigma^2_u)= (\sigma^2_u/ n^{−2}_d +n_d\sigma^2_e)^{-3}
\sigma_e^4Var(\hat\sigma^2_u)+ \sigma_u^4Var(\hat\sigma^2_e)
-2\sigma_e^2 \sigma_u^2Cov(\hat\sigma^2_u{,} \hat\sigma^2_e),
$$
see Rao \cite{rao}, Chapter 7, for details about the component $g_3$ when the variance components are estimated with ML.
The Prasad and Rao's \cite{prasadrao} proposal for the estimation of the MSE is given by
\mathbf{e}gin{equation}
\lambdabel{mse}
mse (\mathbf{a}r{Y}_d^{EBLUP}) \approx g_{1d}(\hat\sigma^2_e{,}\hat\sigma^2_u)+g_{2d}(\hat\sigma^2_e{,}\hat\sigma^2_u)+2g_{3d}(\hat\sigma^2_e{,}\hat\sigma^2_u)
\end{equation}
It is possible to obtain an estimate of the MSE using alternative techniques, such as bootstrap and jackknife.
\subsection{The unit linear mixed model under RL: the classical approach.}
\lambdabel{author_subsec:2.2}
Here we consider the case where the covariates $X$ and the target variable $Y$ are not observed on the same data set: for example, they have been obtained by linking a sample with a register list; in this situation, the plain use of the previous described techniques may produce strongly biased estimates.
Following Chambers \cite{chambers09} and Samart and Chambers \cite{samartchambers14},
let $y_{id}^*$ be the value of the response variable observed on unit $i$, matched with the value $X_{id}$ .
\noindent
Let $Z_2$ be a blocking variable that partitions both registers so that linkage errors may only occur within the groups of records defined by the distinct values of this variable.
In order to simplify the notation, we assume that blocks coincide with the actual domains. This implies, here, that $Z_2$ simply represents the domain indicator.
We assume that $Z_2$ is measured without error on both the $Y$-register and the $X$-register.
An exchangeable linkage errors model can be defined by assuming that the probability of correct linkage is the same for all records in a domain.
We take the following standard assumptions (Chambers \cite{chambers09}):
\mathbf{e}gin{enumerate}
\item the linkage is complete, i.e. the $X$-register and Y-register refer to the same population and have no duplicates, so the smallest $Y$-register is contained in the largest $X$-register;
\item the linkage is one-to-one between the $Y$ and $X$ registers;
\item the linkage errors model is exchangeable within domains.
\end {enumerate}
Then, for each area $d$, the observed response vector may be considered a permutation of the true one, say
$Y^*_d=A_dY_d$, where $A_d$ is a random permutation matrix such that
$\mathbb{E}(A_d \vert X)=G_d$.
Set
$$P(a^d_{ii}=1|X)=P(\thetaxtnormal{correct\enspace linkage})= \lambdambda_d$$ { and }
$$P(a^d_{ij}=1|X)=P(\thetaxtnormal{incorrect\enspace linkage})=\gamma_d;$$
then the expected value can be written as:
\mathbf{e}gin{equation}
\lambdabel{lam}
G_d = (\lambdambda_d -\gamma_d) I_{n_{d}} + \gamma_d 1_{n_{d}} 1^T_{n_{d}}.
\end{equation}
As in Chambers \cite{chambers09}, $1^T_{n_{d}}A_d =1^T_{n_{d}}$ and $A_d1_{n_{d}} =1_{n_{d}}$ thus, $1^T_{n_{d}}G_d =1^T_{n_{d}}$ and $G_d1_{n_{d}} =1_{n_{d}}.$ That is, \eqref{lam} implies
$$\lambdambda_d + (n_{d} - 1) \gamma_d= 1 $$
$$ \gamma_d=\frac{1-\lambdambda_d }{n_d-1}, $$
so, the first order properties of the linkage mechanism are completely specified by the parameters $\lambdambda_d$.
The values of the $\lambdambda_d$'s can be estimated, as suggested in \cite{kim}, using the
correctly linked/incorrectly linked status of some randomly sub-sampled linked records in sample in each domain.
Samart and Chambers \cite{samartchambers14} proposed a ratio-type corrected estimator for $\mathbf{e}ta$
\mathbf{e}gin{equation}
\tilde\mathbf{e}ta_R=(X^T V^{-1}G X)^{-1} X^T V^{-1} y^{*},
\end{equation}
where $G= \diag{G_1, \dots, G_D}.$
Then, by exploiting the relationship between $y^*$ and $X$, a BLU estimator can be derived as
\mathbf{e}gin{equation}
\tilde\mathbf{e}ta_{BLUE}=(X^T G^T\Sigma^{-1} G X)^{-1} X^T G^T \Sigma^{-1} y^{*},
\end{equation}
which takes into account the derived variance of the observed $y^*$
\mathbf{e}gin{equation}
\thetaxtrm{Var}\left (Y^\ast \right )=\Sigma= \sigma^2_u K +\sigma^2_eI + \tilde{V},
\end{equation}
where
\mathbf{e}gin{equation}
\tilde{V} = \diag{\tilde{V}_1, \tilde{V}_2, \dots \tilde{V}_D},
\end{equation}
$\tilde{V}_d = \var{A_d X_d \mathbf{e}ta}$ which is approximated by
$$
\tilde{V}_d \approx \diag{(1-\lambdambda_d)(\lambdambda_d (f_{id}-\mathbf{a}r f_d)+\mathbf{a}r f_d^{(2)}-\mathbf{a}r f_d^2); i=1, \dots, n_d},
$$
where, for each domain $d$, $f_{id} = X_i\hat \mathbf{e}ta$, restricted to those units in domain $d$, $\mathbf{a}r{f}$ and $\mathbf{a}r{f}^{(2)}$ are the means of $f_i$'s
and their squares respectively. Finally, $K$ is a function of the domain sizes and the vector of $\lambdambda$'s (Samart and Chambers \cite{samartchambers14}).
\subsection{Estimation of variance components (ML)}
\lambdabel{sec_varcomponents}
\noindent
As $\sigma_u$ and $\sigma_e$ are unknown,
they have to be estimated; usual strategies include the method of moments, maximum likelihood (ML) or restricted ML (Harville \cite{Harville1977}).
Here we confine ourselves to ML, and we assume a multivariate normal model.
In general, there are no closed form expressions for the variance component estimators.
Samart and Chambers \cite{samartchambers14} use the method of scoring as an algorithm to obtain the estimators.
In the standard case, i.e. when the variables are recorded on the same sample, one has $ y\sim N(X\mathbf{e}ta;V)$; in the record linkage case, recall that
$ y^*\sim N(G f;\Sigma)$. The scoring algorithm can be applied on the derivatives of the previous likelihood. An estimate of $\mathbf{e}ta$ is then obtained by replacing the variance components estimates, and an iterative process is usually needed.
\subsection{Small area estimation under linkage errors}
\noindent
For the purpose of small area estimation, the usual scenario
to be considered is the linkage of a sample with a larger register. Here we assume that the register is complete, i.e. neither duplicates or coverage issues occur.
This setting is considered in Chambers \cite{chambers09}.
Following this framework, we also assume that the record linkage process is independent of the sampling process.
Chambers \cite{chambers09} assumes that an hypothetical linkage can be performed before the sampling process.
Under these conditions, the variance component matrices $G$, $V$ and $\Sigma$ only depend on the domain variables and linkage errors, so the use of sampling weights is not really needed.
Besides these assumptions, as specified in section \ref{author_subsec:2.2}, we assume an exchangeable linkage errors model.
This implies that
$
\hat {\mathbf{a}r Y}^{*} = \hat {\mathbf{a}r Y}
$,
so one can exploit the distribution of $Y^*$ in order to obtain the EBLU predictor
\mathbf{e}gin{equation}
\lambdabel{eblup*}
\hat {\mathbf{a}r Y}_d^{*BLUP} = \frac1{N_d} \left( \sum_{i \in P^{\star}i_d} y^*_{id} + \sum_{i \notin P^{\star}i_d} \hat y_{id}^{BLUP} \right)
\end{equation}
where
$\hat y_{id}^{BLUP}= G X \tilde\mathbf{e}ta_{BLUE}+ \hat u_d$, with
$$\hat u = (\hat{u}_1, \dots \hat{u}_D) =\sigma_u Z^T \Sigma^{-1}(y^\ast - G X\tilde \mathbf{e}ta_{BLUE}).$$
For computational ease, the sum of the predicted values of non sampled units can be obtained as the difference of the population total predicted values and the sum of the sample predicted values. The EBLU predictors are obtained by replacing the estimators of the regression coefficients and variance components in (\ref{eblup*}).
A key aspect for the evaluation of the small area estimator in real cases applications is the estimation of its MSE. For the proposed small area estimator derived on the distribution of the $y^*$, even under the assumption of known record linkage errors and consequently known $G$ (i.e. not introducing additional element of variability to the standard case), the structure of $\var{y^*}=\Sigma$ is far more complex than in the standard linear mixed model setting described above as the it depends also through $\tilde V$ on $\mathbf{e}ta$ . Consequently the structure of the MSE of $\hat {\mathbf{a}r Y}^{*EBLUP}$ require additional components. Moreover in practice the linkage errors are unknown, and their estimation will require the introduction of an additional source of uncertainty. Research for a new proposal for the mse of $\hat {\mathbf{a}r Y}^{*EBLUP}$ is needed.
\subsection{The unit linear mixed model under RL: the Bayesian approach.}
\lambdabel{usae:ba}
From a Bayesian perspective, there are no theoretical complications in adapting the integrated model proposed by Tancredi and Liseo \cite{tl15} to a small area framework.
In the following, we will make distributional assumptions which matches those described in \S \ref{author_subsec:2.2} in order to make valid comparisons.
We then assume the usual standard unit-level model (\ref{unit}), and we also suppose that both the random effects and the stochastic terms of the models are independent Gaussian random variables; in particular
$$u_d \vert \sigma_u^2 \stackrel{\thetaxtnormal{iid}}{\sim} N(0,\sigma_u^2), \mbox{ and } e_{id}\vert \sigma_e^2 \stackrel{\thetaxtnormal{iid}}{\sim}N(0, \sigma_e^2), \quad i=1, \dots, n_d;\, d=1, \dots, D.
$$
We also assume that the mean vector of the auxiliary variables for the generic area $d$, namely $\mathbf{a}r{\bm X}_{d}=\sum_{j=1}^{N_{d}}{\bm{x_{d,j}}},$ is known.
Alternatively, if the domain population sizes $N_d$ are large enough, one can state that the small area means are approximately equal to
$${\mu}_d=\mathbf{a}r{\bm X}_{d.}^\prime \bm{\mathbf{e}ta}+ u_d.$$
Assume that, as in the previous section, we start from data sets $F_1$ and $F_2$, both being samples of size $N_1$ and $N_2$, respectively.
Let $N_{1d}$ be the number of units belonging to domain $d$ and observed in $F_1, d=1, \dots, D.$
We observe, on data set $F_1$, the quantities
$(Y_{1d},W_{1,j,1},W_{1,j,2},\dots,W_{1,j,h}), d=1,2,\dots, D$; $j=1,2,\dots, N_{1d}$, and $\sum_{d=1}^{D}N_{1d}=N_1$.
Similarly, on data set $F_2$ we observe
$(W_{2,j,1},W_{2,j,2},\dots,W_{2,j,h},X_{2,j,1},X_{2,j,2},\dots,X_{2,j,h}),\\ j=1,2,\dots, N_{2d}$, where $N_{2d}$ is the number of units belonging to domain $d$ in list $F_2$ and $\sum_{d=1}^{D} N_{2d}=N_2.$
Regarding the matching matrix $C$, its parameter space is restricted by the additional and reasonable constraint that false links may only occur within the same domain.
Then we assume that $C$ is block diagonal. In other words, linkage uncertainty concerns only single domains, and we assume that two units belonging to different areas cannot be matched.
This restriction allows us to separately deal with each single domain: this results in considering $D$ different $C_d$ matrices, $d=1, \dots, D$.
It must be stressed, however, that these assumptions are relatively weaker than those required in \S~\ref{author_subsec:2.2}. In this case there are no exchangeability restrictions, and the posterior estimate of $C$ heavily relies on the observed key variables.
The Bayesian model is then completed with the elicitation of a prior distribution.
We assume standard priors on the parameters of the small area model. In particular we assume that $\mathbf{e}ta, \sigma_e$ and $\sigma_u$ are mutually independent. Then we take an improper uniform prior for the location parameter vector $\bm{\mathbf{e}ta}$, and an Inverse Gamma density for the variance component $\sigma_e^2$, that is
$$\sigma_e^2 \sim IG(a_e,b_e),$$
with small values for the hyperparameters.
The choice of the prior of $\sigma_u$ is a more critical issue. Rao and Molina (\cite{rao}) suggest the use of another Inverse Gamma density in order to keep the model conditionally conjugate and, consequently, amenable to a straightforward Gibbs sampler.
However, Gelman (\cite{gel06}) noticed that, when this prior is used in its weakly informative version, that is setting $a_u=b_u=\varepsilon$ with $\varepsilon$ very small, the final posterior may be very sensitive to the value of $\varepsilon$, especially when the ``true'' value of $\sigma_u$ is very small and the number of domains is not large. This happens because, as $\varepsilon \to 0$, the resulting joint posterior would be improper.
Gelman's (\cite{gel06}) alternative suggestion is then the use
of an improper uniform prior over the standard deviation $\sigma_u$. This implies an improper prior for $\sigma^2_u$, which is proportional to $\sigma_u^{-1}$ and which produces a proper posterior, provided that the number of domains is larger than 3.
The goal of a Bayesian analysis is the production of a sample from the joint posterior distribution of the above parameters and those related to the record linkage part, that is
$$\pi(C,{\mathbf{e}ta},\bm u, \sigma_u^2, \sigma_e^2 \vert W_1, W_2, Y_1,X_2),$$
where $Y_1$ is the vector of responses of the survey variable recorded in $F_1$ and $X_2$ is the set of covariates in $F_2$ and $\bm u=(u_1,u_2,\dots, u_D)'$.
To sample from this distribution we adopt a straightforward Gibbs sampling with a Metropolis step which is necessary to propose values from the full conditional distribution of the matching matrix $C$, as described in \S~2.2.
However, in this specific framework, the algorithm must be tailored in such a way that the proposed values are consistent with the information that a data set is a subset of the other; this implies that one knows in advance that the total number of links must be exactly $N_1$: consequently, the range of possible proposals for moving the chain around the parameter space of $C$ is restricted to $0$/$1$ matrices $C$ of size $N_1 \times N_2$ such that there are exactly $N_1$ entries equal to $1$, each row of the matrix has a single $1$ and no more than one entry can be equal to 1 in each column of the proposed $C$. This implies that only ``switching moves'' between columns of $C$ are allowed.
For a given value of $C$, the other full conditional distributions belong to well-known families, independently on which prior is used on $\sigma_u$, either a uniform prior or an Inverse Gamma on $\sigma^2_u$. The implementation of a Gibbs algorithm (conditional on $C$) can be found in Rao and Molina, chapter 10 \cite{rao}.
In our record linkage framework, two alternative estimation strategies can be envisaged.
\mathbf{e}gin{enumerate}[a.]
\item Feedback strategy: the algorithm produces a sample from the joint posterior distribution of the parameters of the record linkage and of the small area model together. This allows a feedback effect: not only the small area model depends on the selected matches, but even the selection of potential links will depend on the information carried by the small area model.
\item Non-feedback strategy: The record linkage part of the model obviously affects the small area part; however the reverse does not hold: in practice, we perform a Gibbs sampling for $(\mathbf{e}ta, \sigma_u, \sigma_e, \bm{u})$ for each single $C$ generated by the algorithm and retain the last value of the chain.
\end{enumerate}
\section{Results on simulated data}
\lambdabel{quattro}
\noindent
In this Section we describe a paradigmatic application, where we have used the fictitious population census data \cite{ESSnet2011} created for the European Statistical System Data Integration project, (ESSnet DI), and the micro-data from the Survey on Household Income and Wealth, Bank of Italy, (SHIW), freely available in anonymous form. Specifically, the ESSnet population, which comprises over 26,000 records with name, surname gender and date of birth, has been augmented by adding two new variables representing the annual income and the area domain. The values of these two variables have been drawn from the SHIW data set; in particular, the domain comprises 18 areas resulting from the aggregation of the Italian administrative regions. Table {\ref{ESSPOP}} shows some records from this population register.
To perform a realistic record linkage and small area estimation exercise the augmented ESSNET data set has been further modified by perturbing the potential linking variables (names, gender and date of birth) via the introduction either of missing values and typos.
Moreover, from the perturbed data set we have removed the income variable, and we have added the corresponding value of the consumption variable resulting from the SHIW data set.
A list of records from this perturbed population is shown in Table \ref{ESSPOPMOD}, as an example.
\mathbf{e}gin{table}
\mathbf{e}gin{footnotesize}
\caption{A sample list of records from the population register}
\lambdabel{ESSPOP}
\mathbf{e}gin{tabular}{l l l c r r r c r}
\hline
Identifier & Name & Surname & Gender & \multicolumn{3}{c}{ Date of birth}& Domain & Income\\
& & & & Day & Month & Year & & \\
\hline
DE03US003001 & NATHAN & RUSSELL & M & 11 & 11 & 1934 & Area1 & 6500\\
DE03US013003 & CHARLOTTE & JONES & F &26 & 4 & 1974 & Area1 & 22000\\
EX985AF008003 & OWEN & LLOYD M & M & 9 & 4 & 1976 & Area2 & 20000\\
EX985AF015002 & EVELYN & THOMPSON & F & 12 & 12 & 1990 & Area2 & 17703\\
HR167XE022003 & MACEY & SHAW & F & 6 & 2 & 1982 & Area3 & 28264\\
HR167XE027001 & OLLIE & JONES & M & 21 & 4 & 1951 & Area3 & 25766\\
LS992DB012005 & OLIVIA & ANDERSON & F & 28 & 10 & 1995 & Area4 & 20800\\
M141DQ001002 & MILLIE & JAMES & F & 24 & 11 & 1972 & Area4 & 4990\\
\hline
\end{tabular}
\end{footnotesize}
\end{table}
\mathbf{e}gin{table}
\mathbf{e}gin{footnotesize}
\caption{A sample of list of records from the perturbed population.}
\lambdabel{ESSPOPMOD}
\mathbf{e}gin{tabular}{l l l c r r r c r}
\hline
Identifier & Name & Surname & Gender & \multicolumn{3}{c}{ Date of birth}& Domain & Consumption\\
& & & & Day & Month & Year & & \\
\hline
DE03US003001 & NATHAN & RUSSELL & M & 11 & 11 & - & Area1 &5583\\
DE03US013003 & CHARIOTTE & JONES & F &26 & 4 & 1974 & Area1 & 19266\\
EX985AF008003 & OWEN & LLOYD & M & 9 & 4 & 1976 & Area2 & 11636\\
EX985AF015002 & EVELYN & THOMPSON & F & 12 & 12 & 1990 & Area2 & 16323\\
\hline
\end{tabular}
\end{footnotesize}
\end{table}
\noindent
In practice, in order to compare the various methodologies, 100 replicated samples of size 1000 have been independently and randomly selected without replacement from the perturbed population.
Each sample has been linked to the register population by using, as key-variables, \thetaxtit{Day and Year of Birth} (with respectively 31 and 101 categories) and \thetaxtit{Gender}; the \thetaxtit{Domain} played the role of the blocking variable. The aim of the linkage process is the calibration of a small area model using the consumption as target variable and the income as covariate. Table \ref{NUMAREA} shows the population sizes for each domain and the corresponding average sample sizes. Notice that some areas comprise a very small number of records at the sample level.
\mathbf{e}gin{table}
\mathbf{e}gin{footnotesize}
\caption{Population and sample size in the domains }
\mathbf{e}gin{center}
\lambdabel{NUMAREA}
\mathbf{e}gin{tabular}{l r r}
\hline
Domain & Population Size & Average sample size \\
Area1 & 2880 & 107 \\
Area2 & 2302 & 88\\
Area3 & 2443 & 92\\
Area4 & 2404 & 92 \\
Area5 & 314 & 11\\
Area6 & 255 & 10\\
Area7 & 113 & 4 \\
Area8 & 296 & 12\\
Area9 & 488 & 18\\
Area10 & 490 & 18 \\
Area11 & 106 & 4\\
Area12 & 421 & 16\\
Area13 & 231 & 9 \\
Area14 & 2840 & 107\\
Area15 & 2915 & 110\\
Area16 & 2325 & 87 \\
Area17 & 2354 & 87\\
Area18 & 3448 & 130\\
\hline
\end{tabular}
\end{center}
\end{footnotesize}
\end{table}
\noindent
The classical version of the probabilistic record linkage model (\cite{FS1969}, \cite{Jaro1989}) has been implemented by means of the batch version of the software \thetaxtit{RELAIS} \cite{Relais}. The linkage procedure resulted, on average across 100 replications, on 957 declared matches; the probability of false link was close to 0.14 and the probability of missing link was about 0.04.
In each simulation we have considered as links those pairs of records whose posterior probability of being a match was larger than $0.5$. The posterior probability has been computed as
$$
\frac{\zeta \psi}{1 - \zeta + \zeta \psi},
$$
where $\psi$ is the likelihood ratio defined in \S~\ref{2.1} and $\zeta$ is the estimated probability that a random pair of records belong to the same unit, introduced in \S~\ref{2.1} formula (\ref{mixbern}).
The Bayesian version of the record linkage procedure has been implemented following the lines described in \S~\ref{usae:ba}; see also \cite{lt-11}, \cite{tancredi} and \cite{tl15}. Also in this case we have considered matches those pairs with a posterior probability, computed via the MCMC algorithm, higher than $0.5$
The main goal of this section is to relatively compare the statistical performance of the different estimators of the regression coefficients
of the mixed linear model describing the small area set up. We have considered the following estimators:
\mathbf{e}gin{enumerate}
\item[A.] the EBLUP with $X$ and $Y$ observed on the same data set, i.e. no linkage step is considered in this setting. It should be considered as the gold standard for any comparisons;
\item[B.] the EBLUP restricted on the subset of linked records. This implies a reduction of the sample size due to missed links; however, we do not introduce linkage errors, and no false link are considered;
\item[C.] a na\"{\i}ve EBLUP, restricted on the subset of linked records, and considering $X$ and $Y$ observed on two different data sets. No adjustment for linkage errors is considered.
\item[D.] the adjusted EBLUP estimator, as in formula (10).
\item[A$^\ast$] the Bayesian version of strategy $A$: in practice a hierarchical Bayesian small area model with vaguely informative priors on the hyperparameters, as illustrated in \cite{rao}, chapter 10.
\item[C$^\ast$] the Bayesian version of strategy $C$: again a hierarchical Bayesian small area model built upon a point estimate of the matching matrix $C$.
\item[E.] the posterior mean of the regression coefficients $\bm{\mathbf{e}ta}$ using a Bayesian approach for the linkage step based only on the key variables $W_1, \dots, W_h$ (no feed-back effect).
\item[F.] the posterior mean of the regression coefficients $\bm{\mathbf{e}ta}$ using a Bayesian approach with both the key variables and the regression variables $X$ and $Y$. In this case there is a feedback effect which makes the posterior distribution of the matching matrix $C$ also depending on $X$ and $Y$.
\end{enumerate}
\mathbf{e}gin{table}
\lambdabel{one1}
\mathbf{e}gin{center}
\caption{Comparison of different estimators of the regression coefficients $(\mathbf{e}ta_0, \mathbf{e}ta_1)$: first row reports the ``true'' estimates based on the entire population. Each other row reports mean and standard deviation of the various estimators over 100 repeated sampling of size 1000}
\lambdabel{author_tab:2}
\mathbf{e}gin{tabular}{p{3cm}p{2cm}p{2cm}p{2cm}p{2cm}}
\hline\noalign{
}
Estimates & Intercept &Sd Intercept & Slope &Sd Slope \\
Population & 3.576 & --- & 0.538 & --- \\
Estimates A & 3.057 & 1.412 & 0.565 & 0.070 \\
Estimates B & 3.030 & 1.552 & 0.567 & 0.077 \\
Estimates C & 5.224 & 1.367 & 0.450 & 0.073 \\
Estimates D & 3.008 & 1.633 & 0.567 & 0.086 \\
Estimates $A^{\ast}$ & 3.045 & 1.399 & 0.566 & 0.070\\
Estimates $C^{\ast}$ & 3.749 & 1.592 & 0.533 & 0.081\\
Estimates E & 4.099 & 1.285 & 0.513 & 0.066 \\
Estimates F & 2.722 & 1.290 & 0.647 & 0.079\\
\noalign{
}\hline\noalign{
}
\end{tabular}
\end{center}
\end{table}
\noindent
All the Bayesian estimators were computed using independent priors on the ($\mathbf{e}ta, \sigma_u, \sigma_e$),
with an improper flat prior on $\mathbf{e}ta$, an Inverse Gamma with hyperparameters $(0.01, 0.01)$ on $\sigma^2_v$ and the Gelman's prior for $\sigma_u^2$.
In Table \ref{one1} results for the proposed estimators are reported. The values of estimates $A$ and $A^\ast$ are only affected by sample selection; the small differences between them can be explained in terms of sampling variability and the minimal effect of the Inverse Gamma prior over $\sigma_v$.
Estimates B are affected by missing matches: this only results in a sample size reduction due to non relevant bias in missing matches, at least in this simulated situation. On the other hand, the na\"ive estimates $C$ show the worst performance; this is mainly due to the introduction of false matches. As expected, this effect is well accounted for using the $D$ method. $C^\ast$ estimates are much better than their natural competitors $C$: this can be explained in terms of a better performance of the Bayesian Record Linkage in terms of point estimate of the matching matrix $C$.
The proposed method $D$ produces a slight improvement when the magnitude of linkage errors is relatively low (the average in areas and replications is less than 15 \%). One can expect a more sensitive improvement with higher linkage error levels.
The proposed adjustment is still subject to very restrictive assumptions, such as the identification of small areas with blocking variables in the linkage process, the exchangeability of linkage errors and, finally, the assumption of known linkage errors. When the vector $\bm{\lambdambda}$ (and $\bm{\gamma}$, if the exchangeability assumption is not postulated) need to be estimated, the trade-off of the adjustment between bias and variance should be assessed.
In our simulation study, the $\lambdambda_d$'s were simply estimated as the relative frequency of corrects links.
Among the three main assumptions described above, only the first one plays a role in the Bayesian approaches $E$ and $F$. We delay a general discussion of pros and cons of different methods to \S~\ref{five}.
In terms of comparison between the two Bayesian strategies, one can see that, at least in our simulation set up, the general performance of the non-feedback effect strategy is definitely superior compared to that based on a feedback effect.
We do not have a plain answer to explain this fact. Our conjecture is that the result may depend on the fact that the assumption of a linear relation between consumption and income, implicit in the unit level small area model, is not adequate for this data set.
In order to support our conjecture, we notice that, when the assumed model is ``correct'', the information contained in the variables involved in the small area model may contribute to flag the correct links. On the other hand, when the model is not correct, this advantage may turn itself into a bias, as in our example.
The Bayesian approaches based on MCMC simulations also allow to provide an immediate estimate of the standard deviation of the estimators. Let us denote
with $\hat{\sigma}(H, \mathbf{e}ta)$ and $\hat{\sigma}(H, \alpha)$ the standard deviations of the posterior distribution of $\mathbf{e}ta$ and $\alpha$ using method $H$. In our study we have obtained,
\mathbf{e}gin{align*}
& \hat{\sigma}(E, \alpha)= 0.784; \quad \hat{\sigma}(E, \mathbf{e}ta)=0.036 \\
& \hat{\sigma}(F, \alpha)= 0.596; \quad \hat{\sigma}(F, \mathbf{e}ta)=0.025.
\end{align*}
Table \ref{author_tab:3} reports the absolute relative biases (ARB), the standard deviations and the MSE of all the competing estimators. ARB is defined as
$$
\thetaxtnormal{ARB}= \frac 1D \sum_{d=1}^D \frac{\vert \hat{Y}_d - Y_d \vert} {Y_d },
$$
where $\hat{Y}_d$ is the predicted value of the consumption mean in area $d$, averaged over the 100 simulations
and $Y_d$ is the true mean value.
\mathbf{e}gin{table}
\mathbf{e}gin{center}
\caption{Comparisons among estimators: ARB is the "\thetaxtit{Absolute relative bias}; SD is the observed standard deviation among different simulations; MSE is the mean square error. }
\lambdabel{author_tab:3}
\lambdabel{two}
\mathbf{e}gin{tabular}{|p{2.3cm}|p{2.3cm}p{2.3cm}p{2.3cm}|}
\hline\noalign{
}
Estimates & ARB & SD & MSE \\
\noalign{
}\hline\noalign{
}
Estimates A & 0.033 & 0.463 & 0.517 \\
Estimates B & 0.033 & 0.540 & 0.595 \\
Estimates C & 0.043 & 0.508 & 0.705 \\
Estimates D & 0.035 & 0.523 & 0.605 \\
Estimates$A^{\ast}$ & 0.0286& 0.498 & 0.488 \\
Estimate$C^{\ast}$ &0.032 & 0.505 & 0.534 \\
Estimates E & 0.033 & 0.498 & 0.535 \\
Estimates F & 0.0289& 0.494 & 0.516 \\
Sample Mean & 0.0196& 1.908 & 4.297 \\
\noalign{
}\hline\noalign{
}
\end{tabular}
\end{center}
\end{table}
\noindent
On the other hand, one should also note from Table \ref{author_tab:3}
that the estimation method $F$ performs better in terms of absolute relative efficiency: this may be due to a more accurate estimation of the random effects.
As a final comment on the simulation study, we notice that all methods behave sufficiently well; this happens because the key variables (apart from \thetaxttt{Gender}) are really informative, with a large number of categories.
We have also included the sample mean estimator among the competitors.
From Table \ref{author_tab:3} one can notice how the sample mean outperforms all the proposed estimators in terms of bias; however, at the same time, it produces very large standard errors.
The resulting mean square error of the sample mean is then very high; this confirms that, at the price of a possible increase in bias, composed and synthetic estimators may produce great benefits. Sample means show a relevant MSE mainly because in the dataset there are areas characterized by small sample sizes, as shown in Table \ref{NUMAREA}.
A non standard case is represented by Area 17 which has a not so small sample size but it shows a large value of the MSE of the sample mean.
This is likely due to the very high degree of variability of the consumption at population level in the above mentioned domain.
Another important point to stress is that, in our opinion, the increase in bias is mainly caused by an at least incomplete model specification and not by the linkage procedure. In fact, in terms of bias, the sample mean outperforms also the benchmark estimators $A$ and $A^{\ast}$. We argue that the model, being a very simple model between income and consumption, is not able to catch variability of $Y$.
\section{Discussion}
\lambdabel{five}
The main objective of this paper was to compare different statistical methods to calibrate a unit level small area model in the presence of linked data.
Since the previous literature on this topic is relatively scarce, we have considered all the existing methods and compare them with some natural Bayesian versions of the same model.
Given that a thoroughly comparison of the methods would imply an intensive simulation study, we confine ourselves in this paper to a practical comparison in a relatively typical situation as the one described in the previous section.
From a more general perspective, we stress the fact that the frequentist strategy $D$ can be rigorously implemented and a correction can be produced only when the exchangeability assumption holds.
In practical situations, it is hard to meet an exchangeable structure of linkage errors;
however, as in our simulation study, the "naive" application of estimator $D$ shows good a performance in a typical, probably not completely exchangeable, situation.
A drawback of the $D$ strategy may be found in the use of known values for $\bm{\lambdambda}$. A non reported sensitivity study shows that the final results are robust with respect to small variations of those value, although a more accurate sensitivity analysis should be considered.
On the other hand, the Bayesian approaches $E$ and $F$ rely on minimal assumptions: the most important, which is common to all methods discussed here is that linkage errors may occur only within the same domain. Although this limitations can be avoided in theory, it is obvious that any linkage method must be based on some blocking mechanism in order to avoid computational intractability.
We should also say that we have confined ourselves to a comparative study in a situation where the key variable came from ``simple'' data sets, ready to be processed through standard record linkage procedure; we have not considered more complex situations because the main gist of the paper was the comparison between methods which can work reasonably well in standard applications of record linkage.
Another difference between frequentist and Bayesian approaches is the estimation of the vector $\bm{\lambdambda}$. While it represents one of the parameters to be routinely estimated in the Bayesian algorithm, his value is externally introduced when using method $D$: in these cases, $\bm{\lambdambda}$ can be either estimated through a training data set or using previous knowledge.
As far as a comparison within Bayesian methods is concerned, we believe that a feedback strategy should be preferred when a specific statistical model must be used and the model has been found adequate to fit the data.
In other situations, when the linkage process aims at producing a new data set which will be routinely used for many different purposes, then a non feedback strategy seems more appropriate.
Finally, our approach are essentially model-based, and their performance should always be considered in these terms.
When the model is not adequate, simple design based estimators may have a better performance, at least for moderate to large sample sizes.
\mathbf{i}bliographystyle{spbasic}
\mathbf{e}gin{thebibliography}{99.}
\mathbf{i}bitem{battese} Battese, G.E., Harter, R.M., Fuller, W.A. (1988). An Error-Components Model for Prediction of Crop Areas Using Survey and Satellite Data, \thetaxtit{Journal of the American Statistical Association}, 83, 28--36.
\mathbf{i}bitem{br:95}
Belin, T., Rubin, D. B. (1995). A method for calibrating false - match rates in record linkage, \thetaxtit{Journal of the American Statistical Association}, 90, pp. 694--707.
\mathbf{i}bitem{chambers09} Chambers R. (2009) Regression analysis of probability-linked data, \thetaxtit{Official Statistics Research Series}, Vol. 4, Statistics New Zealand.
\mathbf{i}bitem{copas:hilton}
Copas, J., Hilton, F. (1990). Record linkage: statistical models for matching
computer records. \thetaxtit{Journal of the Royal Statistical Society}, A, 153, pp. 287--320.
\mathbf{i}bitem{diconsiglio14}
Di Consiglio L., Tuoto T. (2014) When adjusting for bias due to linkage errors: a sensitivity analysis. In \thetaxtit{European Conference on Quality in Official Statistics} (Q2014), Vienna, 3-5 June 2014
\mathbf{i}bitem{ESSnet2011}
Essnet DI- McLeod, Heasman and Forbes, (2011). Simulated data for the on the job training, http://www.cros-portal.eu/content/job-training
\mathbf{i}bitem{FS1969}
Fellegi, I. P., Sunter, A. B. (1969). A Theory for Record Linkage. \thetaxtit{Journal of the American Statistical Association}, 64, 1183--1210
\mathbf{i}bitem{gel06}
Gelman, A. B. (2006). Prior distributions for variance parameters in hierarchical models (comment on article by Browne and Draper) \thetaxtit{Bayesian Analysis}, 1, n.3, 515--534.
\mathbf{i}bitem{hallWP}
Hall, R., Steorts, R. C., Fienberg, S. E. (2013). Bayesian parametric and
nonparametric inference for multiple record linkage.
\thetaxtit{Modern Nonparametric Methods in Machine Learning Workshop} NIPS.
\mathbf{i}bitem{Harville1977}
Harville D.A. (1977). Maximum Likelihood Approaches to Variance Component Estimation and to Related Problems. \thetaxtit{Journal of American Statistical Association}, 72, 320--338.
\mathbf{i}bitem{kim}
Kim, G. and Chambers, R. (2012). Regression analysis under incomplete linkage. \thetaxtit{Computational Statistics and Data Analysis}, 56, 2756--2770.
\mathbf{i}bitem{Jaro1989}
Jaro, M. (1989). Advances in record linkage methodology as applied to matching the 1985 test census of Tampa, Florida. \thetaxtit{Journal of American Statistical Association}, 84, 414--420.
\mathbf{i}bitem{lahiri2005} Lahiri P., Larsen, M.D. (2005). Regression Analysis With Linked Data. \thetaxtit{Journal of the American Statistical Association}, 100, 222--230.
\mathbf{i}bitem{laru:01}
Larsen, M.D., Rubin, D.B. (2001). Iterative automated record linkage using mixture models. \thetaxtit{Journal of the American Statistical Association}, 96, pp. 32--41.
\mathbf{i}bitem{lt-11}
Liseo, B., Tancredi, A. (2011). Bayesian estimation of population size via linkage of multivariate normal data sets. \thetaxtit{Journal of Official Statistics}, 27, pp. 491--505.
\mathbf{i}bitem{neter}
Neter, J., Maynes, E.S, Ramanathan, R., (1965). The effect of mismatching on the measurement of response errors. \thetaxtit{Journal of the American Statistical Association}, 60, 1005--1027.
\mathbf{i}bitem{prasadrao} Prasad, N.G.G., and Rao, J.N.K. (1990). The Estimation of the Mean Squared Error in Small-Area Estimators,\thetaxtit{ournal of the American Statistical Association},85,163--171
\mathbf{i}bitem{rao} Rao, J.N.K, Molina, I. (2015). \thetaxtit{Small Area Estimation}, 2nd edition. Wiley, New York
\mathbf{i}bitem{Relais}RELAIS 3.0 User’s Guide (2015) available at \\ \thetaxttt{http://www.istat.it/it/strumenti/metodi-e-strumenti-it/} \\
\thetaxttt{strumenti-di-elaborazione/relais}
\mathbf{i}bitem{Sadinle17}
Sadinle, M. (2017). Bayesian Estimation of Bipartite Matchings for Record Linkage. \thetaxtit{Journal of the American Statistical Association} (in press).
\mathbf{i}bitem{samartchambers14} Samart, K. , Chambers R. (2014) Linear regression with nested errors using probability-linked data, \thetaxtit{Australian and New Zealand Journal of Statistics}, 56(1), 27--46.
\mathbf{i}bitem{SW93} Scheuren, F. , Winkler, W.E., (1993). Regression analysis of data files that are computer matched – Part I. \thetaxtit{Survey Methodology}, 19, pp. 39--58.
\mathbf{i}bitem{SW97} Scheuren F. , Winkler W.E., (1997). Regression analysis of data files that are computer matched- part II, \thetaxtit{Survey Methodology}, 23, pp. 157--165.
\mathbf{i}bitem{tancredi}Tancredi, A. Liseo, B. (2011) A hierarchical Bayesian approach to record linkage and population size problems. \thetaxtit{Annals of Applied Statistics}, 5, 1553--1585.
\mathbf{i}bitem{tl15} Tancredi, A., Liseo, B. (2015)
Regression Analysis with linked data: problems and possible solutions. \thetaxtit{Statistica}, 75,1, pp. 19--35.
\mathbf{i}bitem{tsl17} Tancredi, A., Steorts, R.C., Liseo, B. (2017).
A Bayesian approach for deduplication, record linkage and inference with linked data. \thetaxtit{Working paper}, MEMOTEF, Sapienza Universit\`a di Roma.
\mathbf{i}bitem{winkler:95}
Winkler, W. (1995). Matching and record linkage. In \thetaxtit{Business Survey Methods}, Wiley, New York, pp. 355--384. B. G. Cox, D. A. Binder, B. N. Chinnappa, A. Christianson, M.J. Colledge and P.S. Kott Editors.
\end{thebibliography}
\end{document}
|
\begin{document}
\title{Rough sets determined by tolerances}
\author[J.~J{\"a}rvinen]{Jouni J{\"a}rvinen}
\address{J.~J{\"a}rvinen, Sirkankuja 1, 20810~Turku, Finland}
\email{[email protected]}
\urladdr{\url{http://sites.google.com/site/jounikalervojarvinen/}}
\author[S.~Radeleczki]{S{\'a}ndor Radeleczki}
\thanks{Acknowledgements: The research of the second author was carried out as
part of the TAMOP-4.2.1.B-10/2/KONV-2010-0001 project supported by the
European Union, co-financed by the European Social Fund.}
\address{S.~Radeleczki, Institute of Mathematics\\
University of Miskolc\\3515~Miskolc-Egyetemv{\'a}ros\\Hungary}
\email{[email protected]}
\urladdr{\url{http://www.uni-miskolc.hu/~matradi/}}
\begin{abstract}
We show that for any tolerance $R$ on $U$, the ordered sets of lower and upper rough approximations
determined by $R$ form ortholattices. These ortholattices are completely distributive, thus forming
atomistic Boolean lattices, if and only if $R$ is induced by an irredundant covering of $U$, and in
such a case, the atoms of these Boolean lattices are described.
We prove that the ordered set $\mathit{RS}$ of rough sets determined by a tolerance $R$ on $U$
is a complete lattice if and only if it is a complete subdirect product of the complete lattices
of lower and upper rough approximations. We show that $R$ is a tolerance
induced by an irredundant covering of $U$ if and only if $\mathit{RS}$ is an algebraic completely
distributive lattice, and in such a situation a quasi-Nelson algebra can be defined on $\mathit{RS}$.
We present necessary and sufficient conditions which guarantee that for a tolerance $R$ on $U$,
the ordered set $\mathit{RS}_X$ is a lattice for all $X \subseteq U$, where $R_X$ denotes the
restriction of $R$ to the set $X$ and $\mathit{RS}_X$ is the corresponding set of rough sets.
We introduce the disjoint representation and the formal concept representation of rough
sets, and show that they are Dedekind--MacNeille completions of $\mathit{RS}$.
\end{abstract}
\keywords{Rough set, tolerance relation, knowledge representation, representation of lattices,
ortholattice, formal concept lattice}
\maketitle
\section{Introduction}
Rough sets were introduced in \cite{Pawl82} by Z.~Pawlak. The
key idea is that our knowledge about the properties of the objects of a
given universe of discourse $U$ may be inadequate or incomplete in the sense that the
objects of the universe $U$ can be observed only within the accuracy of indiscernibility relations.
According to Pawlak's original definition, an indiscernibility relation $E$ on $U$
is an equivalence relation interpreted so that two elements of $U$ are $E$-related if they cannot
be distinguished by their properties known by us. Thus, indiscernibility relations allow us to partition
a set of objects into classes of indistinguishable objects.
For any subset $X \subseteq U$, the \emph{lower approximation} $X^\blacktriangledown$
of $X$ consists of elements such that their $E$-class is
included in $X$, and the \emph{upper approximation} $X^\blacktriangle$ of $X$
is the set of the elements whose $E$-class intersects with $X$. This means that
$X^\blacktriangledown$ can be viewed as the set of elements certainly belonging
to $X$, because all elements $E$-related to them are also in $X$.
Similarly, $X^\blacktriangle$ may be interpreted as the set of elements that
possibly are in $X$, because in $X$ there is at least one
element indiscernible to them. The \emph{rough set} of $X$
is the pair $(X^\blacktriangledown,X^\blacktriangle)$ and the set of all
rough sets is
\begin{equation*}
\mathit{RS} = \{ (X^\blacktriangledown, X^\blacktriangle) \mid X \subseteq U \}.
\end{equation*}
The set $\mathit{RS}$ may be canonically ordered by the
coordinatewise order:
\begin{equation*}
(X^\blacktriangledown,X^\blacktriangle) \leq (Y^\blacktriangledown,Y^\blacktriangle) \iff X^\blacktriangledown \subseteq Y^\blacktriangledown \mbox{ \
and \ } X^\blacktriangle \subseteq Y^\blacktriangle.
\end{equation*}
In \cite{PomPom88} it was proved that $\mathit{RS}$ is a lattice
which forms also a Stone algebra. Later this result was improved in
\cite{Com93} by showing that $\mathit{RS}$ is in fact a regular double Stone algebra.
Therefore, $\mathit{RS}$ determines also a three-valued {\L}ukasiewicz algebra
and a semi-simple Nelson algebra, because it is well known that these three types of algebras
can be transformed to each other \cite{Pagliani97}.
In the literature can be found numerous generalizations of rough sets such that
equivalences are replaced by relations of different types. For instance, it is
known that in the case of quasiorders (reflexive and transitive binary relations),
a Nelson algebra such that the underlying rough set lattice is an
algebraic lattice can be defined on $\mathit{RS}$ \cite{JarRad,JRV09}.
If rough sets are determined by relations that are symmetric and transitive,
then the structure of $\mathit{RS}$ is analogous to the case of equivalences \cite{Jarv04}.
For a more general approach in the case of partial equivalences, see \cite{Mani08}.
There exist also studies in which approximation operators are defined in terms
of an arbitrary binary relation -- this idea was first proposed in \cite{YaoLin1996}.
In \cite{Dzik2013}, expansions of bounded distributive lattices equipped with a Galois connection
are represented in terms of rough approximation operators defined by arbitrary binary relations. One may
also observe that in the current literature new approximation operators based on different
viewpoints are constantly being proposed (see e.g. \cite{AbuDonia2012,Ma2012} for some recent studies).
In this paper, we assume that indiscernibility relations are tolerances
(reflexive and symmetric binary relations). The term \emph{tolerance relation} was introduced
in the context of visual perception theory by E.~C.~Zeeman \cite{Zeeman62},
motivated by the fact that indistinguishability of ``points'' in the visual
world is limited by the discreteness of retinal receptors.
One can argue that tolerances suit better for representing indistinguishability
than equivalences, because transitivity is the least obvious property of indiscernibility.
Namely, we may have a finite sequence of objects $x_1, x_2, \ldots, x_n$ such that each
two consecutive objects $x_i$ and $x_{i+1}$ are indiscernible, but there is a notable difference
between $x_1$ and $x_n$. It is known \cite{Jarv99,Jarv01} that in the case of tolerances,
$\mathit{RS}$ is not necessarily a lattice if the cardinality of $U$ is greater than four.
Our main goals in this work are to find conditions under which $\mathit{RS}$ forms a lattice,
and, in case $\mathit{RS}$ is a lattice, to study its properties.
As mentioned, originally rough set approximations were defined in terms of equivalences,
being bijectively related to partitions. In this paper, we consider tolerances, which
are closely connected to coverings. In the literature can be found several ways to define approximations in terms of coverings
(see recent surveys in \cite{Restrepo2013,Yao2012}), and in this work we connect our approximation operators
to some covering-based approximation operators, also.
The paper is organized as follows: In Section~\ref{Sec:Preliminaries}, we
present the definition of rough approximation operators and present their essential
properties. In addition, we give preliminaries of Galois connections, ortholattices, and formal
concepts. Section~\ref{Sec:ToleranceApproximations}
is devoted to the rough set operators defined by tolerance relations.
Starting from the well-known fact that for any tolerance on $U$, the pair $({^\blacktriangle},{^\blacktriangledown})$
is a Galois connection on the power set lattice of $U$ and characterize
rough set approximation pairs as certain kind of Galois connections
$(F,G)$ on a power set. We show that
$\wp(U)^\blacktriangledown = \{ X^\blacktriangledown \mid X \subseteq U \}$
and $\wp(U)^\blacktriangle = \{ X^\blacktriangle \mid X \subseteq U \}$ form
ortholattices and prove that these ortholattices are completely distributive if and only if
$R$ is induced by an irredundant covering of $U$. Note that distributive
ortholattices are Boolean lattices, and a Boolean lattice is atomistic
if and only if it is completely distributive. This means that
$\wp(U)^\blacktriangledown$ and $\wp(U)^\blacktriangle$ are atomistic Boolean lattices exactly when $R$
is induced by an irredundant covering of $U$, and we describe the atoms of these lattices.
In Section~\ref{Sec:OrderedSets}, we study the ordered set of rough sets
$\mathit{RS}$ and show that it can be up to isomorphism identified with a set of pairs
$ \{ (\mathcal{I}(X),\mathcal{C}(X)) \mid X \subseteq U \}$, where
$\mathcal{I}$ and $\mathcal{C}$ are interior and closure operators
on the set $U$ satisfying certain conditions. We prove that $\mathit{RS}$ is a
complete lattice if and only if it is a complete subdirect
product of $\wp(U)^\blacktriangledown$ and $\wp(U)^\blacktriangle$. We also show that $\mathit{RS}$ is an
algebraic completely distributive lattice if and only if $R$ is induced by an irredundant
covering of $U$, and in such a case, on $\mathit{RS}$ a quasi-Nelson algebra can be defined.
The section ends with necessary and sufficient conditions which guarantee that
for a tolerance $R$ on $U$, the ordered set $\mathit{RS}_X$ is a lattice for all
$X \subseteq U$, where $R_X$ denotes the restriction of $R$ to the set
$X$ and $\mathit{RS}_X$ is the set of all rough sets determined by $R_X$. Finally,
Section~\ref{Sec:DisjointRepresentations} is devoted to the disjoint representation
and the formal concept representation of rough sets. In particular, we prove that
these representations are Dedekind--MacNeille completions of $\mathit{RS}$.
\section{Preliminaries: Rough approximation operators, Galois connections, and formal concepts}
\label{Sec:Preliminaries}
First we recall from \cite{Jarv07} some notation and basic properties of rough approximation operators
defined by arbitrary binary relations. Let $R$ be a binary relation on the set $U$.
For any $X \subseteq U$,
we denote $$R(X) = \{ y \in U \mid x \, R \,y \text{ for some $x \in X$} \}.$$
For the singleton sets, $R(\{x\})$ is written simply as $R(x)$, that is,
$R(x) = \{ y \in U \mid x \, R \, y\}$. It is clear that
$R(X) = \bigcup_{x\in X} R(x)$ for all $X \subseteq U$.
The \emph{lower approximation} of a set $X \subseteq U$ is
\[
X^\blacktriangledown = \{ x \mid R(x) \subseteq X\}
\]
and
$X$'s \emph{upper approximation} is
\[
X^\blacktriangle = \{ x \mid R(x) \cap X \neq \emptyset \}.
\]
Let $\wp(U)$ denote the \emph{power set} of $U$. It is a complete Boolean lattice
with respect to the set-inclusion order. The
map $^\blacktriangle$ is a complete join-homomorphism on $\wp(U)$, that is, it
preserves all unions:
\[
\big ( \bigcup_{X \in \mathcal{H}} X \big )^\blacktriangle = \bigcup_{X \in \mathcal{H}} X^\blacktriangle.
\]
Analogously, $^\blacktriangledown$ is a complete meet-homomorphism on $\wp(U)$ preserving all intersections:
\[
\big ( \bigcap_{X \in \mathcal{H}} X \big )^\blacktriangledown = \bigcap_{X \in \mathcal{H}} X^\blacktriangledown.
\]
Hence, the approximation operators are order-preserving, that is, $X \subseteq Y$ implies
$X^\blacktriangledown \subseteq Y^\blacktriangledown$ and $X^\blacktriangle \subseteq Y^\blacktriangle$.
In addition, approximation operators are dual, meaning that for all $X \subseteq U$, $X^{c\blacktriangle} = X^{\blacktriangledown c}$
and $X^{c\blacktriangledown} = X^{\blacktriangle c}$, where $X^c$ denotes the set-theoretical \emph{complement} $U \setminus X$ of $X$.
By the above, the set
\[
\wp(U)^\blacktriangledown = \{ X^\blacktriangledown \mid X \subseteq U\}
\]
is a \emph{closure system}, that is, it is closed under arbitrary intersections. Similarly,
\[
\wp(U)^\blacktriangle = \{ X^\blacktriangle \mid X \subseteq U\} \
\]
forms an interior system, that is, it is closed under any union.
The complete lattices $ \wp(U)^\blacktriangledown$ and $\wp(U)^\blacktriangle$ are with respect
to the set-inclusion relation dually
order-isomorphic by the map $X^\blacktriangledown \mapsto X^{\blacktriangledown c} = X^{c \blacktriangle}$.
For two ordered sets $P$ and $Q$, a pair
$(f,g)$ of maps $f \colon P \to Q$ and $g \colon Q \to P$
is called a \emph{Galois connection\/} between $P$ and $Q$
if for all $p \in P$ and $q \in Q$,
\[
f(p) \leq q \iff p \leq g(q).
\]
In the next lemma are listed some of the known properties of Galois connections.
\begin{lemma}\label{Lem:GaloisProperty}
Let $(f,g)$ be a Galois connection between two ordered sets $P$ and $Q$.
\begin{enumerate}[\rm (a)]
\item The composition $f \circ g \circ f$ equals $f$ and the composition
$g \circ f \circ g$ equals $g$.
\item The composition $g \circ f$ is a lattice-theoretical closure operator on $P$ and the set
of $g \circ f$-closed elements is $g(Q)$, that is, $(g \circ f)(P) = g(Q)$.
\item The composition $f \circ g$ is a lattice-theoretical interior operator on $Q$ and
the set of $f \circ g$-closed elements is $f(P)$, that is,
that is, $(f \circ g)(Q) = f(P)$.
\item The image sets $f(P)$ and $g(Q)$ are order-isomorphic.
\item The map $f$ is a complete join-homomorphism and $g$ is a complete meet-homomorphism.
\item The maps $f$ and $g$ uniquely determine each other by the equations
\[ f(p) = \bigwedge \{ q \in Q \mid p \leq g(q)\} \mbox{ \ and \ }
g(q) = \bigvee \{ p \in P \mid f(p) \leq q\} .\]
\end{enumerate}
\end{lemma}
In addition, if the maps $f \colon P \to Q$ and $g \colon Q \to P$ between two complete
lattices $P$ and $Q$ form a Galois connection $(f,g)$, then $f(P)$ is a complete lattice such that for all
$S \subseteq f(P)$,
\[ \textstyle
\bigvee S = \bigvee_Q S
\ \mbox{ and } \
\bigwedge S = f \big ( g \big (\bigwedge_Q S \big ) \big) =
f \big ( \bigwedge_P g \big ( S \big ) \big),
\]
and $g(Q)$ is a complete lattice such that for all $S \subseteq g(Q)$,
\[ \textstyle
\bigvee S = g \big ( f \big (\bigvee_P S \big ) \big) =
g \big ( \bigvee_Q f \big ( S \big ) \big)
\ \mbox{ and } \
\bigwedge S = \bigwedge_P S.
\]
An \emph{orthocomplementation} on a bounded lattice is a function that maps each element
$x$ to an \emph{orthocomplement} $x^\bot$ in such a way that the following axioms hold:
\begin{enumerate}[({O}1)]
\item $x \leq y$ implies $y^\bot \leq x^\bot$;
\item $x^{\bot \bot} = x$;
\item $x \vee x^\bot = 1$ and $x \wedge x^\bot = 0$.
\end{enumerate}
An \emph{ortholattice} is a bounded lattice equipped with an orthocomplementation.
Ortholattices are self-dual by the map $^\bot$. Note that if an ortholattice
is distributive, then it is a Boolean lattice such that
the complement of the element $x$ is $x^\bot$.
Let $(f,g)$ be a Galois connection on a Boolean lattice $(B,\vee,\wedge,{^c},0,1)$.
We may define the maps $^\bot \colon f(B) \to f(B)$ and $^\top \colon g(B) \to g(B)$
by setting
\[
f(x)^\bot = f(f(x)^c) \text{ \ and \ } g(x)^{\top} = g(g(x)^c).
\]
The maps $^\bot$ and $^\top$ satisfy (O1), and if $f$ and $g$ are dual, that is,
$f(x)^c = g(x^c)$ for all $x \in B$, then $^\bot$ and $^\top$ satisfy (O2).
Additionally, if $f$ is extensive, that is, $x \leq f(x)$ for all $x \in B$, then $^\bot$
and $^\top$ satisfy (O3). These observations are summarized in the following
well-known lemma.
\begin{lemma}\label{Lem:Ortho}
If $(f,g)$ is a Galois connection on a Boolean lattice $B$ such
that $f$ and $g$ are dual, and $f$ is extensive, then $f(B)$ and $g(B)$
are ortholattices.
\end{lemma}
We end this section by presenting some terminology concerning formal concepts from
\cite{ganter1999formal}. A \emph{formal context} $\mathbb{K} = (G,M,I)$ consists of two
sets $G$ and $M$ and a relation $I$ from $G$ to $M$. The elements of $G$ are
called the \emph{objects} and the elements of $M$ are called \emph{attributes} of
the context. We write $g \, I \, m$\, or\, $(g,m) \in I$ to mean that the object $g$ has the
attribute $m$. For $A \subseteq G$ and $B \subseteq M$, we define
\[ A' = \{ m \in M \mid g \, I \, m \text{ for all } g \in A\}
\text{ \ and \ } B' = \{ g \in G \mid g \, I \, m \text{ for all } m \in B\}.
\]
A \emph{formal concept} of the context $(G,M,I)$ is a pair $(A,B)$ with
$A \subseteq G$, $B \subseteq M$, $A' = B$, and $B' = A$.
We call $A$ the \emph{extent} and $B$ the \emph{intent} of the
concept $(A,B)$. It is easy to see that $(A,B) \in \wp(G) \times \wp(M)$
is a concept if and only if $(A,B) = (A'',A') = (B',B'')$.
The set of all concepts of the context $\mathbb{K} = (G,M,I)$ is denoted
by $\mathfrak{B}(\mathbb{K})$.
The set $\mathfrak{B}(\mathbb{K})$ is ordered by
\begin{equation} \label{Eq:ConceptOrder}
(A_1,B_1) \leq (A_2,B_2) \iff A_1 \subseteq A_2 \iff B_1 \supseteq B_2.
\end{equation}
With respect to this order, $\mathfrak{B}(\mathbb{K})$ forms a complete lattice,
called the \emph{concept lattice} of the context $\mathbb{K}$, in which
\[
\bigvee_{j \in J} (A_j,B_j) = \Big ( \big ( \bigcup_{j \in J} A_j \big )'', \bigcap_{j \in J}B_j \Big )
\text{ \ and \ }
\bigwedge_{j \in J} (A_j,B_j) = \Big ( \bigcap_{j \in J}A_j , \big ( \bigcup_{j \in J} B_j \big )'' \Big ).
\]
\section{Approximation operations defined by tolerances}
\label{Sec:ToleranceApproximations}
In this section, we are recalling from \cite{Jarv99,Jarv07} the characteristic properties of
rough sets approximation operators defined by \emph{tolerance relations}, which are reflexive
and symmetric binary relations. Also some new results are presented. It is known that the relation
$R$ is reflexive if and only if $X^\blacktriangledown \subseteq X \subseteq X^\blacktriangle$ for all $X \subseteq U$.
Similarly, $R$ is symmetric if and only if the pair $({^\blacktriangle}, {^\blacktriangledown})$ is a Galois
connection on $\wp(U)$. In the rest of this section, we assume that $R$ is a tolerance on
$U$. Note that for all $X \subseteq U$, we have $X^\blacktriangle = R(X) = \bigcup_{x \in X} R(x)$.
\begin{proposition}\label{Prop:GaloisCharacterization}
Let $(F,G)$ be a Galois connection on the complete lattice $\wp(U)$. Then,
there exists a tolerance $R$ on $U$ such that $F$ equals ${^\blacktriangle}$ and
$G$ equals ${^\blacktriangledown}$ if and only if the following conditions hold for
all $x,y \in U$:
\begin{enumerate}[\rm (i)]
\item $x \in F(\{x\})$;
\item $x \in F(\{y\})$ implies $y \in F(\{x\})$.
\end{enumerate}
\end{proposition}
\begin{proof}
($\Rightarrow$)\, Suppose that $F$ equals ${^\blacktriangle}$ and $G$ equals ${^\blacktriangledown}$ for some tolerance $R$.
Then, condition (i) means that $x \in R(x)$ for all $x \in X$ and (ii) is equivalent
to that $x \in R(y)$ implies $y \in R(x)$ for any $x,y \in U$. These conditions are obviously
satisfied, because $R$ is a tolerance.
($\Leftarrow$)\, Let us define a binary relation $R$ by setting $x \, R \, y$ if and only
if $x \in F(\{y\})$. Because $F$ satisfies (i) and (ii), and $R(x) = F(\{x\})$ for all $x \in U$,
the relation $R$ is a tolerance. In addition,
\[ X^\blacktriangle = \bigcup_{x \in X} R(x) = \bigcup_{x \in X} F(\{x\}) = F \big ( \bigcup_{x \in X} \{x\} \big )
= F(X),
\]
because $F$ is a complete join-homomorphism. Since the pairs of maps forming Galois connections are
unique by Lemma~\ref{Lem:GaloisProperty}(f), we have that $G$ must equal $^\blacktriangledown$.
\end{proof}
Because $({^\blacktriangle}, {^\blacktriangledown})$ is a Galois connection on $\wp(U)$, the approximation operators
have all the properties listed in Lemma~\ref{Lem:GaloisProperty}. In particular, the map
$X \mapsto X^{\blacktriangle\blacktriangledown}$
is the closure operator corresponding to the closure system $\wp(U)^\blacktriangledown$,
which forms a complete lattice with respect to the order $\subseteq$ such that
\begin{equation}\label{Eq:DownLattice}
\bigvee_{X \in \mathcal{H}} X^\blacktriangledown = \big ( \bigcup_{X \in \mathcal{H}} X^\blacktriangledown \big )^{\blacktriangle\blacktriangledown}
\quad \mbox{ and } \quad
\bigwedge_{X \in \mathcal{H}} X^\blacktriangledown = \bigcap_{X \in \mathcal{H}} X^\blacktriangledown
\end{equation}
for all $\mathcal{H} \subseteq \wp(U)$. In addition,
$\wp(U)^\blacktriangledown = \{ X^{\blacktriangle\blacktriangledown} \mid X \subseteq U \}$.
Analogously, the map $X \mapsto X^{\blacktriangledown\blacktriangle}$
is the interior operator that corresponds the interior system $\wp(U)^\blacktriangle$,
which is a complete lattice such that
\begin{equation}\label{Eq:UpLattice}
\bigvee_{X \in \mathcal{H}} X^\blacktriangle = \bigcup_{X \in \mathcal{H}} X^\blacktriangle
\quad \mbox{ and } \quad
\bigwedge_{X \in \mathcal{H}} X^\blacktriangle
= \big (\bigcap_{X \in \mathcal{H}} X^\blacktriangle \big )^{\blacktriangledown \blacktriangle}
\end{equation}
for all $\mathcal{H} \subseteq \wp(U)$. The closure system $\wp(U)^\blacktriangle$ can be
also written in the form
$\{ X^{\blacktriangledown\blacktriangle} \mid X \subseteq U \}$.
By Lemma~\ref{Lem:Ortho}, $\wp(U)^\blacktriangle$ and $\wp(U)^\blacktriangledown$ are ortholattices.
In $\wp(U)^\blacktriangle$, the orthocomplementation is $^\bot \colon X^\blacktriangle \mapsto X^{\blacktriangle c \blacktriangle}$,
and the map $^\top \colon X^\blacktriangledown \mapsto X^{\blacktriangledown c \blacktriangledown}$ is the
orthocomplementation operation of $\wp(U)^\blacktriangledown$. \label{Def:Ortho}
Hence, $\wp(U)^\blacktriangle$ and $\wp(U)^\blacktriangledown$ are self-dual, and
\[ (\wp(U)^\blacktriangle,\subseteq) \cong (\wp(U)^\blacktriangle,\supseteq) \cong(\wp(U)^\blacktriangledown,\subseteq) \cong(\wp(U)^\blacktriangledown,\supseteq). \]
Next we study the relationship between the lattices of approximations
and concept lattices. For a tolerance $R$ on a set $U$, we consider the context
$\mathbb{K} = (U,U,R^c)$, whose concept lattice is
$\mathfrak{B}(\mathbb{K}) = \{ (X'',X') \mid X \subseteq U\}$.
For $X \subseteq U$,
\begin{align*}
X' = \{ x \in U \mid y \, R^c \, x \text{ for all } y \in X\}
= \{ x \in U \mid (x,y) \notin R \text{ for all } y \in X\}
= X^{\blacktriangle c}.
\end{align*}
Thus, $X^\blacktriangle = X^{\prime \, c}$ and $X^\blacktriangledown = X^{c \blacktriangle c} = X^{c \, \prime}$.
In addition, $X'' = X^{\blacktriangle\blacktriangledown}$ and hence
\[
\mathfrak{B}(\mathbb{K}) = \{ (X^{\blacktriangle\blacktriangledown},X^{c \blacktriangledown}) \mid X \subseteq U\}.
\]
If $(A,B) \in \mathfrak{B}(\mathbb{K})$, then $A,B \in \wp(U)^\blacktriangledown$ such that $A = B'$ and
$B = A'$. For any $A \in \wp(U)^\blacktriangledown$, $A' = A^{\blacktriangle c} = A^{c \blacktriangledown} = A^\top$,
where $^\top$ is the orthocomplement defined in $\wp(U)^\blacktriangledown$.
Thus, $(A,B) = (A,A^\top) = (B^\top,B)$. On the other hand, if $A \in \wp(U)^\blacktriangledown$,
then $A = A^{\blacktriangle \blacktriangledown}$ and $(A,A^\top)$ belongs to $\in \mathfrak{B}(\mathbb{K})$. Hence,
\[
\mathfrak{B}(\mathbb{K}) = \{ (A,A^\top) \mid A \in \wp(U)^\blacktriangledown \}.
\]
Notice that in the literature can be found studies in which notions of formal concept
analysis are applied to rough set theory.
Particularly, Y.~Y.~Yao considers in \cite{yao2004concept} so-called
``complement contexts'', which actually lead us to study the contexts of the
form $(U,U,R^c)$.
Let $\wp(U)^{\blacktriangledown \mathrm{op}}$ denote the dual of the lattice $\wp(U)^{\blacktriangledown}$, that is,
$(\wp(U)^\blacktriangledown,\supseteq)$.
\begin{proposition} \label{Prop:ConceptIsom}
Let $R$ be a tolerance on a set $U$ and\/ $\mathbb{K} = (U,U,R^c)$.
\begin{enumerate}[\rm (a)]
\item The complete lattices $\wp(U)^\blacktriangle$, $\wp(U)^\blacktriangledown$, and $\mathfrak{B}(\mathbb{K})$ are isomorphic.
\item The concept lattice $\mathfrak{B}(\mathbb{K})$ is a complete sublattice of
$\wp(U)^\blacktriangledown \times \wp(U)^{\blacktriangledown \mathrm{op}}$.
\end{enumerate}
\end{proposition}
\begin{proof}
(a) It is obvious that the map $A \mapsto (A,A^\top)$ is an isomorphism between
$\wp(U)^\blacktriangledown$ and $\mathfrak{B}(\mathbb{K})$, and we have already
noted that $\wp(U)^\blacktriangledown$ and $\wp(U)^\blacktriangle$ are isomorphic.
(b) Clearly, $\mathfrak{B}(\mathbb{K}) \subseteq \wp(U)^\blacktriangledown \times \wp(U)^\blacktriangledown$.
For all $\{A_j\}_{j \in J} \subseteq \wp(U)^\blacktriangledown$, the join in $\wp(U)^{\blacktriangledown}$
is $\bigvee_{j \in J} A_j = ( \bigcup_{j \in J} A_j )^{\blacktriangle\blacktriangledown} =
( \bigcup_{j \in J} A_j )''$, and the meet in $\wp(U)^\blacktriangledown$
is $\bigwedge_{j \in J} A_j = \bigcap_{j \in J} A_j$. Thus, the join in $\wp(U)^{\blacktriangledown \mathrm{op}}$
is $\bigvee_{j \in J} A_j = \bigcap_{j \in J} A_j$. Therefore, for any
$\{(A_j,B_j)\}_{j \in J} \subseteq \mathfrak{B}(\mathbb{K})$, its join $\bigvee_{j \in J} (A_j,B_j)$
coincides in $\mathfrak{B}(\mathbb{K})$ and $\wp(U)^\blacktriangledown \times \wp(U)^{\blacktriangledown \mathrm{op}}$.
An analogous observation can be done with respect to meets. Thus, $\mathfrak{B}(\mathbb{K})$
is a complete sublattice of $\wp(U)^\blacktriangledown \times \wp(U)^{\blacktriangledown \mathrm{op}}$.
\end{proof}
Note that Proposition~\ref{Prop:ConceptIsom} implies that for a tolerance $R$ and for the
context $\mathbb{K} = (U,U,R^c)$, the concept lattice $\mathfrak{B}(\mathbb{K})$ is an
ortholattice and the orthocomplement is obtained by swapping the sets (cf. \cite[p.~54]{ganter1999formal}),
in other words, for $(A,A^\top) \in \mathfrak{B}(\mathbb{K})$, its orthocomplement is $(A^\top, A)$.
Now we may present a characterization of complete ortholattices in terms of rough sets
operators defined by tolerances.
\begin{proposition} \label{Prop:OrthoCharacterization}
A complete lattice $L$ forms an ortholattice if and only if there exists a set $U$
and a tolerance $R$ on $U$ such that $L \cong \wp(U)^\blacktriangledown \cong \wp(U)^\blacktriangle$.
\end{proposition}
\begin{proof}
As noted, for a tolerance $R$ on $U$, the complete lattices $\wp(U)^\blacktriangledown \cong \wp(U)^\blacktriangle$
are ortholattices. Conversely, it is known that if $L$ is a complete ortholattice, then
there exists a context $\mathbb{K} = (U,U,I)$, where $I$ is an irreflexive and symmetric binary
relation on $U$, such that $L \cong \mathfrak{B}(\mathbb{K})$; see
\cite{ganter1999formal}. The maps $A \mapsto A'$ and $B \mapsto B'$ defined in this
context form an order-reversing Galois connection between $(\wp(U),\subseteq)$ and $(\wp(U),\supseteq)$.
Because $X \mapsto X^c$ is an order-isomorphism between $(\wp(U),\supseteq)$ and $(\wp(U),\subseteq)$,
the composite maps
\[
f \colon A \mapsto A^{\prime c} \qquad \text{and} \qquad g \colon B \mapsto B^{c \prime}
\]
form an order-preserving Galois-connection $(f,g)$ on $(\wp(U),\subseteq)$. If we set $R = I^c$,
then $R$ is obviously a tolerance and $\mathbb{K} = (U,U,R^c)$. By our above observations
$f(A) = A^\blacktriangle$ and $g(A) = A^\blacktriangledown$ for all $A \subseteq U$, and $L \cong \mathfrak{B}(\mathbb{K}) \cong \wp(U)^\blacktriangledown \cong \wp(U)^\blacktriangle$.
\end{proof}
The lattice $\wp(U)^\blacktriangledown$ is not necessarily even modular; for instance, in Example~\ref{Ex:Counter}
(p.~\pageref{Ex:Counter}), we define a tolerance $R$ on the set $U = \{a,b,c,d,e\}$ such that
$\wp(U)^\blacktriangledown = \{ \emptyset, \{a\}, \{c\}, \{e\}, \{a,b\}, \{a,e\}, \{d,e\}, \{a,b,c\}, \{c,d,e\}, U \}$.
Now, the set
$\{ \emptyset, \{a\}, \{c\}, \{a,b\}, \{a,b,c\} \}$ forms a sublattice of $\wp(U)^\blacktriangledown$
isomorphic to $\mathbf{N_5}$.
A complete lattice $L$ is \emph{completely distributive} if for any doubly indexed
subset $\{x_{i,\,j}\}_{i \in I, \, j \in J}$ of $L$, we have
\[
\bigwedge_{i \in I} \Big ( \bigvee_{j \in J} x_{i,\,j} \Big ) =
\bigvee_{ f \colon I \to J} \Big ( \bigwedge_{i \in I} x_{i, \, f(i) } \Big ), \]
that is, any meet of joins may be converted into the join of all
possible elements obtained by taking the meet over $i \in I$ of
elements $x_{i,\,k}$\/, where $k$ depends on $i$.
In \cite{ganter1999formal}, Theorem 40 presents the following condition equivalent to
the assertion that the concept lattice $\mathfrak{B}(\mathbb{K})$ is completely distributive:
\begin{itemize}
\item[($\dag$)] For every non-incident object-attribute pair $(g,m)\notin I$, there
exists an object $h\in G$ and an attribute $n\in M$ with $(g,n)\notin
I,(h,m)\notin I$, and $h\in k^{\prime\prime}$ for all $k\in G\setminus \{n\}^{\prime}$.
\end{itemize}
We know by Proposition~\ref{Prop:ConceptIsom} that for any tolerance $R$ on $U$,
$\wp(U)^\blacktriangle$ and $\wp(U)^\blacktriangledown$ are isomorphic
to the concept lattice of the context $\mathbb{K} = (U,U,R^c)$. If $\mathbb{K}$
is identified with $(G,M,I)$, then for all $x,y \in U$, $(x,y) \notin I$ means that
$x \, R \, y$ and $y \, R \, x$.
Since $x'' = \{x\}^{\blacktriangle\blacktriangledown} = R(x)^\blacktriangledown$, $y \in x''$ means that $R(y) \subseteq R(x)$.
In addition, $U \setminus \{x\}' = \{x\}^{\prime \, c} = \{x\}^\blacktriangle = R(x)$.
Hence ($\dag$) is equivalent to the following condition:
\begin{itemize}
\item[($\ddag$)] For any $a \, R \, b$, there exist $c,d\in U$ with
$a \, R \, c$ and $b \, R \, d$ such that for all $k\in R(c)$, we have $R(d)\subseteq R(k)$.
\end{itemize}
In what follows, we are going to present some conditions equivalent to ($\ddag$).
Let $R$ be a tolerance on $U$. A set $X\subseteq U$ is a \emph{preblock} of
$R$ if $a \, R \, b$ holds for all $a,b\in X$, that is, $X^{2} \subseteq R$,
where $X^{2}$ means the Cartesian product $X \times X$. A \emph{block} of $R$ is a maximal
preblock $B$. It is well known that $B = \bigcap_{x\in B} R(x)$, and that any preblock
is contained in some block of $R$ (see e.g. \cite{Shreider}). Hence, for any
$x,y \in U$, $x \, R \, y$ if and only if there exists a block $B$
such that $x,y \in B$.
Denoting the set of blocks of $R$ by
$\mathcal{B}(R)$, we obtain $R = \bigcup \{ B^{2} \mid B \in \mathcal{B}(R) \}$.
A collection $\mathcal{H} \subseteq \wp(U)$ of nonempty subsets of $U$ is called a
\emph{covering} of $U$ if $\bigcup \mathcal{H} = U$. A covering $\mathcal{H}$
is \emph{irredundant} if $\mathcal{H} \setminus \{X\}$ is not a covering of $U$ for any
$X\in \mathcal{H}$. Clearly, the blocks of any tolerance on form a covering, which is
not in general irredundant. Conversely, for
any covering $\mathcal{H}$ of $U$, the relation
$R_{\mathcal{H}}= \bigcup \{ X^{2} \mid X \in \mathcal{H\}}$
is tolerance on $U$, called the \emph{tolerance induced by $\mathcal{H}$}.
Note that $\mathcal{H} \subseteq \mathcal{B}(R_\mathcal{H})$ for any irredundant covering $\mathcal{H}$ and that
this inclusion can be proper (see Example~\ref{Ex:Schreider}).
In \cite{Pomykala88}, J.~A.~Pomyka{\l}a presented the following definition of covering-based rough approximations.
Let $\mathcal{H} \subseteq U$ be a covering and denote $\mathcal{H}_x = \bigcup \{ B \in \mathcal{H} \mid x \in B\}$
for any $x \in U$. The approximations of any $X \subseteq U$ are defined by
\[ \underline{\mathcal{H}}(X) = \{ x \in U \mid \mathcal{H}_x \subseteq X \} \text{ \ and \ }
\overline{\mathcal{H}}(X) = \bigcup \{ B \in \mathcal{H} \mid B \cap X \ne \emptyset \}.
\]
If $R$ is a tolerance on $U$ and $\mathcal{H}$ is the family of blocks of $R$, that is,
$\mathcal{H} = \mathcal{B}(R)$, then for all $X \subseteq U$, $\underline{\mathcal{H}}(X) = X^\blacktriangledown$
and $\overline{\mathcal{H}}(X) = X^\blacktriangle$. Note also that $R(x) = \mathcal{H}_x$ for any $x \in U$ (see \cite{Jarv99} for details).
\begin{theorem} \label{Thm:DC}
Let $R$ be a tolerance on $U$. The following assertions are equivalent:
\begin{enumerate}[\rm (a)]
\item $R$ satisfies {\rm ($\ddag$)}.
\item For any $a \, R \, b$, there exists $d\in U$ with $R(d)\subseteq R(a)\cap R(b)$ such that for
all $x \, R \, d$, we have $R(d) \subseteq R(x)$.
\item For any $a \, R \, b$, there exists a block $B \in \mathcal{B}(R)$ and an element
$d\in B$ such that $a,b \in R(d) = B$.
\item $R$ is a tolerance induced by an irredundant covering of $U$.
\end{enumerate}
\end{theorem}
\begin{proof}
(a)$\Rightarrow$(b): Let $R$ be a tolerance satisfying ($\ddag$). Then,
$a \, R \, b$ implies that there are $c\in R(a)$ and $d\in R(b)$ such that
$R(d)\subseteq R(c)$ and $R(d)\subseteq R(a)$.
Additionally, $b \in R(d)$ implies $b \in R(c)$, which gives $R(d) \subseteq R(b)$.
Thus, $R(d) \subseteq R(a)\cap R(b)$.
Finally, if $x \, R \, d$, then $x \in R(d) \subseteq R(c)$, and hence $R(d) \subseteq R(x)$
by ($\ddag$).
(b)$\Rightarrow$(c): Suppose $R$ satisfies (b). If $a \, R \, b$, then there
is an element $d\in U$ with $R(d)\subseteq R(a)\cap R(b)$. Thus, also $a \, R \, d$ and $b \, R \, d$
hold, and we have $\{a,b,d\}^{2}\subseteq R$. Hence, there is a block $B\in$
$\mathcal{B}(R)$ with $a,b,d\in B$. Note that since $R$ satisfies (b),
we have $x \, R \, d$ and $R(d) \subseteq R(x)$ for all $x\in B$. Thus, we get
\[
B\subseteq R(d) \subseteq \bigcap_{x\in B} R(x) = B,
\]
and hence $a,b \in R(d) = B$.
(c)$\Rightarrow$(d): Suppose that (c) holds and let us define a
family $\mathcal{K}$ of blocks by
$\mathcal{K} = \{B \in \mathcal{B}(R) \mid B= R(d) \text{ for some $d\in U$} \}$.
Now, for all $x\in U$, $x \, R \, x$ implies that there is a block
$B \in\mathcal{K}$ containing $x$, and so $\mathcal{K}$ is a
covering of $U$. In view of (c), for any $a \, R \, b$, there is a block
$B\in\mathcal{K}$ with $a,b\in B$. Hence,
$R\subseteq \bigcup_{B\in\mathcal{K}} B^{2}\subseteq R$, giving
$R = \bigcup_{B\in\mathcal{K}} B^{2}$. Thus, $R$ is induced by the covering
$\mathcal{K}$. Finally, we show
that $\mathcal{K}$ is irredundant by proving that
$\bigcup (\mathcal{K} \setminus\{B\}) \neq U$ for any $B \in \mathcal{K}$.
Indeed, if $B \in \mathcal{K}$, then there exists $d \in U$ such that $R(d) = B$.
Suppose that $d \in X$ for some block $X \in \mathcal{K} \setminus\{B\}$.
Then, $d \, R \, x$ for all $x\in X$, whence we get $X \subseteq R(d) = B$.
Since $X$ and $B$ are blocks, we obtain $X = B$, a contradiction.
Thus, $d \notin \bigcup (\mathcal{K}\setminus\{B\})$.
(d)$\Rightarrow$(b): Assume that $R = \bigcup \{ X^{2} \mid X \in \mathcal{H} \}$,
where $\mathcal{H}$ is an irredundant covering of $U$ and suppose that $a \,R \, b$.
Then, there exists $X \in \mathcal{H}$ such that $a,b \in X$, and clearly, $x \,R \, y$ for all $x,y\in X$.
Hence, $X \subseteq R(x)$ for all $x \in X$. Since
$\bigcup ( \mathcal{H} \setminus \{X\} ) \neq U$, there is $d \in X$ such
that $d \notin Y$ for all $Y \in \mathcal{H} \setminus \{X\}$.
Observe that $d \, R \, y$ for some $y \in U \setminus X$ would imply that
$\{d,y\}$ is contained in some block $Y \in \mathcal{H}$ different from $X$.
Since this is impossible, we get $R(d) \subseteq X \subseteq R(x)$
for all $x \in X$. In particular, we obtain $R(d)\subseteq R(a)\cap R(b)$,
and also $R(d)\subseteq R(x)$ for each $x\in U$ with $x \, R \, d$,
because $x \, R \, d$ implies $x \in R(d) \subseteq X$.
(b)$\Rightarrow$(a): If (b) holds, then ($\ddag$) is satisfied with $c = d$.
\end{proof}
Our next proposition is now clear by the above-mentioned observations.
\begin{proposition} \label{Prop:ComplDistributive}
Let $R$ be a tolerance on a set $U$. The complete lattices $\wp(U)^\blacktriangledown$ and
$\wp(U)^\blacktriangle$ are completely distributive if and only if
$R$ is induced by an irredundant covering of $U$.
\end{proposition}
\begin{remark} \label{Rem:Block}
If $a \, R \, b$ holds for any $a,b \in R(x)$, then $R(x)$ is a block. Namely,
in this case $R(x)= \bigcap_{a \in R(x)} R(a)$, which means that
$R(x)$ is block of $R$.
\end{remark}
Next we characterize irredundant coverings in terms of tolerances they induce.
\begin{proposition} \label{Prop:IrredundantCovering}
Let $R$ be a tolerance induced by a covering $\mathcal{H} \subseteq \wp(U)$. Then, the
following assertions are equivalent:
\begin{enumerate}[\rm (a)]
\item $\mathcal{H}$ is an irredundant covering;
\item For each $B \in \mathcal{H}$, there exists $d \in U$ such that $R(d)= B$.
\end{enumerate}
\end{proposition}
\begin{proof}
(a)$\Rightarrow$(b): Let $\mathcal{H}$ be an irredundant covering and $B \in \mathcal{H}$. This means
that there is $d \in B$ such that $d \notin \bigcup ( \mathcal{H} \setminus \{B\} )$. Since $R$ is
induced by $\mathcal{H}$, we have $d \, R \, b$ for all $b \in B$. Thus, $B \subseteq R(d)$.
On the other hand, if $d \, R \, x$, then there is a block $B' \in \mathcal{H}$ such that $d,x \in B'$.
Since $d \notin \bigcup ( \mathcal{H} \setminus \{B\} )$, we obtain $B' = B$ and $x \in B$.
Thus, also $R(d) \subseteq B$ and $R(d) = B$.
(b)$\Rightarrow$(a): It is enough to show that $d \notin \bigcup ( \mathcal{H} \setminus \{B\} )$,
where $B$ belongs to $\mathcal{H}$ and $d$ is such that $R(d) = B$. Assume for contradiction
that there exists $B ' \in \mathcal{H}$ such that $B' \neq B$ and $d \in B'$. By (b),
there is $d'$ such that $R(d') = B'$. Since $d,d' \in B'$, we have $d \, R \, d'$ and $d, d' \in B$.
Hence, if $a \in B$, then $a \, R \, d'$ and $a \in R(d') = B'$, that is,
$B \subseteq B'$. Similarly, $a \in B'$ implies $a \, R \, d$ and $a \in R(d) = B$, giving
$B' \subseteq B$. Therefore, $B = B'$, a contradiction.
\end{proof}
\begin{example} \label{Ex:InfSyst1}
In this example, we consider how tolerances induced by an irredundant covering
arise in information systems. By Proposition~\ref{Prop:IrredundantCovering}, the essential condition
is that for each member $B$ of a covering $\mathcal{H}$, there exists $d \in U$ such that $R(d)= B$.
\noindent
(a) Information systems introduced by Pawlak \cite{pawlak1981information} are triples
$\mathcal{S} = (U,A,\{V_a\}_{a \in A})$, where $U$ is a nonempty set of \emph{objects},
$A$ is nonempty set of \emph{attributes}, and $\{V_a\}_{a \in A}$ is an
indexed set of \emph{value sets of attributes}. Each attribute is a function $a \colon U \to V_a$.
In real-world situations, some attribute values for an object may be missing. In
\cite{Kryszkiewicz1998} these \emph{null values} are dealt with marking them
by $*$. This kind of information systems are called \emph{incomplete information systems}.
For each $B \subseteq A$, the following tolerance is defined:
\[
\mathit{sim}_B = \{ (x,y) \in U \times U \mid (\forall a \in A) \, a(x) = a(y) \text{ or }
a(x) = * \text{ or } a(y) = * \}.
\]
Additionally, let us denote by
\[
\mathit{compl}_B(U) = \{ x \in U \mid a(x) \neq * \text{ for all } a \in B \}
\]
the set of \emph{$B$-complete elements}.
Let us assume that each element of $U$ is $\mathit{sim}_B$-related to at least one $B$-complete element.
Then, the family
$\mathcal{H}_B = \{ \mathit{sim}_B(x) \mid x \in \mathit{compl}_B(U) \}$ is a covering and $\mathcal{H}$
is clearly irreducible, because each $B$-complete element $x$ can belong to only to
$\mathit{sim}_B(x)$. It is also obvious that $\mathit{sim}_B$ is induced by
$\mathcal{H}_B$.
\noindent
(b) Another way to present incomplete information is to use nondeterministic information systems (see e.g. \cite{Jarv99,DemOrl02}).
A \emph{nondeterministic information system} $\mathcal{S} = (U,A,\{V_a\}_{a \in A})$ is such that each attribute is a function
$a \colon U \to \wp(V_a) \setminus \emptyset$.
Note that Pawlak's ``original'' information systems considered in (a) can be viewed as nondeterministic
information systems such that $|a(x)| = 1$ for all $x \in U$ and $a \in A$.
We may interpret a nondeterministic information system $\mathcal{S}$ as a so-called \emph{approximate information system} in
such a way that for an object $x \in U$ and an attribute $a \in A$,
the unique value of the attribute $a$ for the object $x$ is assumed to be in $a(x)$; complete ignorance is
denoted by $a(x) = V_a$.
Let $B \subseteq A$. Resembling case (a), we say that an object $x$ is \emph{$B$-complete}
if $a(x)$ is a singleton for all $a \in B$. In other words, for a $B$-complete object, all its $B$-values are known precisely,
without ambiguity. These elements can be considered as leaning examples.
Let us again denote by $\mathit{compl}_B(U)$ the set of $B$-complete elements. Note that for all $B$-complete elements $x$ and $a \in A$,
we can simply write $a(x) = v$, that is, $a$ behaves like a single-valued attribute for them.
For any $B \subseteq A$, we can now define the following relation $R_B$:
\[ (x,y) \in R_B \iff \text{exists } c \in \mathit{compl}_B(U) \text{ such that } a(c) \in a(x) \cap a(y) \text{ for all $a \in B$} .\]
This means that two objects $x$ and $y$ are $R_B$-related if and only if there is a completely known element $c$, which
is ``potentially'' $B$-indiscernible with $x$ and $y$.
It is easy to see that for all $B \subseteq A$, the relation $R_B$ satisfies conditions (b) and (c) of Theorem~\ref{Thm:DC}.
The learning examples $c \in \mathit{compl}_B(U)$ are such that $\{ R_B(c) \mid c \in \mathit{compl}_B(U)\}$ forms an irredundant covering and the relation
induced by this covering is $R_B$.
Note that both in (a) and (b), if all elements of $U$ are $B$-complete, then the relations $\mathit{sim}_B$ and
$R_B$ are equivalences, and the corresponding irredundant covering is a partition corresponding these equivalences.
\end{example}
Let $L$ be a lattice with a least element $0$.
The lattice $L$ is \emph{atomistic}, if any element of $L$ is the join of
atoms below it. It is well known (see e.g.\@ \cite{Grat98}) that a
complete Boolean lattice is atomistic if and only if it is completely distributive.
\begin{proposition} \label{Prop:Boolean}
Let $R$ be a tolerance induced by an irredundant covering of $U$. The complete lattices
$\wp(U)^\blacktriangledown$ and $\wp(U)^\blacktriangle$ are atomistic Boolean lattices such that
$\{ R(x)^\blacktriangledown \mid R(x) \text{ is a block}\, \}$ and $\{ R(x) \mid R(x) \text{ is a block}\, \}$
are their sets of atoms, respectively.
\end{proposition}
\begin{proof}
By Proposition~\ref{Prop:ComplDistributive}, $\wp(U)^\blacktriangledown$ and
$\wp(U)^\blacktriangle$ are completely distributive.
Because they are ortholattices also, they are Boolean lattices, and
since they are completely distributive, they must be atomistic.
Atoms of $\wp(U)^\blacktriangle$ need to be of the form $R(x)$, because the map $^\blacktriangle$ is
order-preserving and $R(x) = \{x\}^\blacktriangle$. Since $^\blacktriangledown$ is an isomorphism from
$\wp(U)^\blacktriangle$ to $\wp(U)^\blacktriangledown$, the atoms of $\wp(U)^\blacktriangledown$ are of the form
$R(x)^\blacktriangledown$.
Suppose that $R(x)$ is a block and $R(y) \subseteq R(x)$. Because $y \in R(x)$
and $R(x)$ is a block, we must have $R(y) = R(x)$, and
hence $R(x)$ is an atom of $\wp(U)^\blacktriangle$. Analogously, $R(x)^\blacktriangledown$
is an atom of $\wp(U)^\blacktriangledown$, whenever $R(x)$ is a block.
On the other hand, suppose $R(x)$ is an atom of $\wp(U)^\blacktriangle$; then
$R(x)^\blacktriangledown$ is an atom of $\wp(U)^\blacktriangledown$. Suppose that $a,b \in R(x)$.
Since $a \, R \, x$, by Theorem~\ref{Thm:DC}, there exists $d \in U$ with
$R(d)\subseteq R(a) \cap R(x)$, and so $d \in R(a)^\blacktriangledown \cap R(x)^\blacktriangledown$.
Hence, $\emptyset \subset R(a)^\blacktriangledown \cap R(x)^\blacktriangledown \subseteq R(x)^\blacktriangledown$, and
because $R(x)^\blacktriangledown$ is an atom,
we have $R(a)^\blacktriangledown \cap R(x)^\blacktriangledown = R(x)^\blacktriangledown$, that is,
$R(x)^\blacktriangledown \subseteq R(a)^\blacktriangledown$. Now $x \in R(x)^\blacktriangledown \subseteq R(a)^\blacktriangledown$
implies $b \in R(x) \subseteq R(a)$. Thus, $a \, R \, b$, and so,
by Remark~\ref{Rem:Block}, $R(x)$ is a block.
\end{proof}
\begin{example}\label{Ex:Schreider}
Let $A = \{1,2,\ldots,n\}$ be a finite set. We define a tolerance $R$ on
$U = \wp(A) \setminus \{\emptyset\}$ by setting for any nonempty subsets $B,C\subseteq A$:
\[
(B, C) \in R \iff B \cap C \neq \emptyset.
\]
The structure $(U,R)$ is called an \emph{$n-1$-dimensional simplex} (see \cite{Shreider}).
Let $i\in A$ and define the set $K_{i} = \{B \in U \mid i\in B\}$.
Clearly, $R(\{i\})=K_{i}$, and it is easy to see that $K_{i}$ is also a
tolerance block. Now let $\mathcal{H} = \{K_{1},K_{2},...,K_{n}\}$. Then,
$(B, C) \in R$ means that $B$ and $C$ have a common element $j$ and
$(B,C)\in {K_j}^2$. Hence,
\[
R = {K_1}^2 \cup {K_2}^2 \cup \cdots \cup {K_n}^2.
\]
Clearly, $\wp(A)\setminus \{\emptyset\} = K_{1} \cup K_{2} \cup...\cup K_{n}$, that is,
$\mathcal{H}$ is a covering of $U$. This covering is irredundant, because
if we omit $K_j$, then the set $\{j\}$ cannot be covered.
For instance, if $n = 3$, then $K_1 = \{ \{1\}, \{1,2\}, \{1,3\}, U$\},
$K_2 = \{ \{2\}, \{1,2\}, \{2,3\}, U$\}, and $K_3 = \{ \{3\}, \{1,3\}, \{2,3\}, U$\}.
Also the set $\{ \{1,2\}, \{1,3\}, \{2,3\}, U \}$ is a block of $R$, showing
that $\mathcal{H} \subset \mathcal{B}(R_\mathcal{H})$.
Because $\wp(U)$ is finite, by Proposition~\ref{Prop:Boolean} this means that $\wp(U)^\blacktriangledown$ and $\wp(U)^\blacktriangle$
are finite Boolean lattices.
\end{example}
For a tolerance $R$, an \emph{$R$-path} is a sequence $a_0, a_1, \ldots,a_n$ of distinct elements of $U$
such that $a_i \, R \, a_{i+1}$ for all $0 \leq i \leq n-1$. The \emph{length} of a path
is the number of elements in the sequence minus one. Note that each point of $U$ forms a path of length zero.
We denote by $\overline{R}$ the transitive closure of $R$, that is,
\[ \overline{R} = R \cup R^2 \cup R^3 \cup \cdots \cup R^n \cup \cdots \ . \]
Then, $\overline{R}$ is the smallest equivalence containing $R$.
Note that for any $X \subseteq U$, the upper $\overline{R}$-approximation ${\overline{R}}(X)$
of $X$ consists of the elements that are connected to at
least one element in $X$ by an $R$-path.
We denote ${\overline{R}}(X)$ simply by $\overline{X}$.
For any $a \in \overline X$, we define the \emph{distance of $a$ from $X$}, denoted
$\delta(a,X)$, as the minimal length of an $R$-path connecting $a$ at least to one element
in $X$. Note that if $a \in X$, then $\delta(a,X) = 0$. For any $n \geq 0$, let us define the set
\[ X^n = \{ a \in U \mid \delta(a,X) = n \} .\]
The above means that $\overline{X} = \bigcup_{n \geq 0} X^n$. In addition, we denote:
\begin{align*}
X_{\rm even} &= X^0 \cup X^2 \cup X^4 \cup \cdots \cup X^{2k} \cup \cdots \ \\
X_{\rm odd} &= X^1 \cup X^3 \cup X^5 \cup \cdots \cup X^{2k+1} \cup \cdots \
\end{align*}
Our the next lemma presents some properties of $X_{\rm even}$ and $X_{\rm odd}$
that will be needed in the proofs of the next section.
\begin{lemma}\label{Lem:OddEven}
If $\rho$ be a tolerance on $U$, then the
following assertions hold for all $X \subseteq U$:
\begin{enumerate}[\rm (a)]
\item $\overline{X}$ is a disjoint union of $X_{\rm even}$ and $X_{\rm odd}$.
\item $X\subseteq X_{\rm even}$ and $\rho(X) \setminus X \subseteq X_{\rm odd}$.
\item $X_{\rm odd} \subseteq \rho(X_{\rm even}) = \overline{X}$ and $\overline{X} \setminus X \subseteq \rho( X_{\rm odd})$.
\item $X \subseteq \rho(\overline{X}\setminus X)$ implies
$\rho(X_{\rm odd}) = \rho(X_{\rm even}) = \overline{X}$.
\end{enumerate}
\end{lemma}
\begin{proof} Claim (a) is obvious.
(b) Clearly $X = X^0 \subseteq X_{\rm even}$, and $\rho(X) = X \cup X^1$ implies
$\rho(X) \setminus X = X^1 \subseteq X_{\rm odd}$.
(c) Suppose that $a \in X_{\rm odd}$. Then, there is a $\rho$-path $(a_0,\ldots,a_{2k},a_{2k+1})$ of length
$2k + 1$ such that $a_0 \in X$ and $a_{2k + 1} = a$. So, $(a_0,\ldots,a_{2k})$ is a $\rho$-path of length
$2k$ and thus $a_{2k} \in X_{\rm even}$. Now $(a, a_{2k}) \in \rho$ gives $a \in \rho(X_{\rm even})$.
In addition, $\overline{X} = X_{\rm odd} \cup X_{\rm even} \subseteq \rho(X_{\rm even}) \subseteq \overline{X}$.
For the other part, let $a \in \overline{X} \setminus X$. Then, we have $a \in X^n$ for some $n \geq 1$.
If $n$ is an odd number, then $a \in X_{\rm odd} \subseteq \rho(X_{\rm odd})$. If $n$ is even,
then $n-1$ is odd and $a \in \rho(X^{n-1}) \subseteq \rho(X_{\rm odd})$.
(d) Assume $X \subseteq \rho(\overline{X} \setminus X)$. Then, for each $x \in X$, there is
$y \in \overline{X} \setminus X$ such that $x \, \rho \, y$. Since $y \in X^1 \subseteq X_{\rm odd}$,
we get $x \in \rho(X_{\rm odd})$ and $X \subseteq \rho( X_{\rm odd})$. Thus,
$\overline{X} = X \cup (\overline{X} \setminus X) \subseteq \rho(X_{\rm odd}) \subseteq \overline{X}$.
\end{proof}
\section{Lattice structures of rough sets determined by tolerances}
\label{Sec:OrderedSets}
Let $R$ be a tolerance on $U$.
We begin by considering the set $\wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle$ ordered coordinatewise
by $\subseteq$. It is clear that $\wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle$
is a complete lattice such that
\begin{align}
\bigwedge_{i \in I} (A_i,B_i) =
\Big ( \bigcap_{i \in I} A_i, \big (\bigcap_{i \in I} B_i \big )^{\blacktriangledown \blacktriangle} \Big )
\intertext{and}
\bigvee_{i \in I} (A_i,B_i) =
\Big ( \big (\bigcup_{i \in I} A_i, \big )^{\blacktriangle \blacktriangledown}, \bigcup_{i \in I} B_i \Big )
\end{align}
for all $(A_i,B_i)_{i \in I} \subseteq \wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle$. Let us
define a map ${\sim}$ on $\wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle$ by setting for any $(A,B) \in
\wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle$:
\begin{equation}\label{Eq:Negation}
{\sim}(A,B) = (B^c, A^c).
\end{equation}
The map ${\sim}$ can be viewed as a so-called \emph{De~Morgan operation},
because it satisfies for all $(A,B) \in \wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle$,
\[
{\sim}{\sim}(A,B) = (A,B)
\]
and
\[(A_1, B_1) \leq (A_2,B_2) \text{ if and only if }
{\sim} (A_1, B_1) \geq {\sim} (A_2, B_2) .
\]
In this work, lattices with a De~Morgan operation are called \emph{polarity lattices}.
Note that so-called De~Morgan lattices/algebras are usually distributive, but polarity
lattices are generally not \cite{Birk95}.
As in the case of equivalences, the set of all rough sets is denoted by
$\mathit{RS} = \{ (X^\blacktriangledown, X^\blacktriangle) \mid X \subseteq U \}$. Obviously,
$\mathit{RS} \subseteq \wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle$ and $\mathit{RS}$ is
bounded with $(\emptyset,\emptyset)$ as the least element and $(U,U)$
as the greatest element. It is known \cite{Jarv07} that $\mathit{RS}$ is self-dual by the map
${\sim}$, and for all $X \subseteq U$,
\[ {\sim}(X^\blacktriangledown, X^\blacktriangle) = (X^{\blacktriangle c}, X^{\blacktriangledown c}) = (X^{c \blacktriangle}, X^{c \blacktriangledown}).\]
Recall that the map $X \mapsto X^{\blacktriangle \blacktriangledown}$ is a closure operator and $X \mapsto X^{\blacktriangledown \blacktriangle}$
is an interior operator (see Section~\ref{Sec:ToleranceApproximations}).
Our next lemma shows how $\mathit{RS}$ can be also represented up to isomorphism
as interior-closure pairs.
\begin{lemma} \label{Lem:Isomorphic}
If $R$ is a tolerance, then $\mathit{RS} \cong \{ (X^{\blacktriangledown\blacktriangle}, X^{\blacktriangle\blacktriangledown}) \mid X \subseteq U \}$.
\end{lemma}
\begin{proof}
We show that the map $\varphi \colon (X^\blacktriangledown,X^\blacktriangle) \mapsto (X^{\blacktriangledown\blacktriangle}, X^{\blacktriangle\blacktriangledown})$ is an order-isomorphism.
If $(X^\blacktriangledown,X^\blacktriangle) \leq (Y^\blacktriangledown,Y^\blacktriangle)$, then $X^\blacktriangledown \subseteq Y^\blacktriangledown$ implies
$X^{\blacktriangledown\blacktriangle} \subseteq Y^{\blacktriangledown\blacktriangle}$. Similarly, $X^\blacktriangle \subseteq Y^\blacktriangle$ gives
$X^{\blacktriangle\blacktriangledown} \subseteq Y^{\blacktriangle\blacktriangledown}$.
Thus, $(X^{\blacktriangledown\blacktriangle}, X^{\blacktriangle\blacktriangledown}) \leq (Y^{\blacktriangledown\blacktriangle}, Y^{\blacktriangle\blacktriangledown})$.
On the other hand, if $(X^{\blacktriangledown\blacktriangle}, X^{\blacktriangle\blacktriangledown}) \leq (Y^{\blacktriangledown\blacktriangle}, Y^{\blacktriangle\blacktriangledown})$,
then $X^{\blacktriangledown\blacktriangle} \subseteq Y^{\blacktriangledown\blacktriangle}$ implies $X^\blacktriangledown = X^{\blacktriangledown\blacktriangle\blacktriangledown} \subseteq
Y^{\blacktriangledown\blacktriangle\blacktriangledown} = Y^\blacktriangledown$, and from $X^{\blacktriangle\blacktriangledown} \subseteq Y^{\blacktriangle\blacktriangledown}$ we get
$X^\blacktriangle = X^{\blacktriangle\blacktriangledown\blacktriangle} \subseteq Y^{\blacktriangle\blacktriangledown\blacktriangle} = Y^\blacktriangle$.
So, $(X^\blacktriangledown,X^\blacktriangle) \leq (Y^\blacktriangledown,Y^\blacktriangle)$.
Thus, $\varphi$ is an order-embedding. The map $\varphi$ is surjective,
because any pair $(X^{\blacktriangledown\blacktriangle}, X^{\blacktriangle\blacktriangledown})$ is the image of
$(X^\blacktriangledown,X^\blacktriangle) \in \mathit{RS}$.
\end{proof}
However, not any pair of interior and closure operators defines up to isomorphism
the same structure as rough sets determined by tolerances. We present the following characterization
of rough sets in terms of interior and closure operators.
\begin{proposition}\label{Prop:ClosureRepr}
Let $\mathcal{I}$ and $\mathcal{C}$ be lattice-theoretical
interior and closure operators on the set $U$. Then, there exists a tolerance on U such that
$\mathit{RS} \cong \{ ( \mathcal{I}(X), \mathcal{C}(X) ) \mid X \subseteq U\}$ if
and only if there exists a Galois connection $(F,G)$ on $\wp(U)$
such that $\mathcal{C} = G \circ F$, \ $\mathcal{I} = F \circ G$
and the following conditions hold for all $x,y \in U$:
\begin{enumerate}[\rm (i)]
\item $x \in F(\{x\})$;
\item $x \in F(\{y\})$ implies $y \in F(\{x\})$.
\end{enumerate}
\end{proposition}
\begin{proof}
($\Rightarrow$)\, Let $R$ be a tolerance on $U$.
We denote the closure operator $X \mapsto X^{\blacktriangle\blacktriangledown}$ by $\mathcal{C}$
and the interior operator $X \mapsto X^{\blacktriangledown\blacktriangle}$ by $\mathcal{I}$. Then,
$\mathit{RS}$ is order-isomorphic to $\{ ( \mathcal{I}(X), \mathcal{C}(X) ) \mid X \subseteq U\}$
by Lemma~\ref{Lem:Isomorphic}. Because $R(x) = \{x\}^\blacktriangle$ for all $x \in U$,
conditions (i) and (ii) hold.
($\Leftarrow$)\, Suppose that $(F,G)$ is a Galois connection satisfying $\mathcal{C} = G \circ F$,
$\mathcal{I} = F \circ G$, and that conditions (i) and (ii) hold for $F$.
Let us define a relation $R$ by setting $R(x) = F(\{x\})$.
As in the proof of Proposition~\ref{Prop:GaloisCharacterization}, we can see that
$R$ is a tolerance on $U$ such that $X^\blacktriangle = F(X)$ and $X^\blacktriangledown = G(X)$ for all $X \subseteq U$.
It is now clear that for all $X \subseteq U$, $\mathcal{I}(X) = X^{\blacktriangledown\blacktriangle}$ and
$\mathcal{C}(X) = X^{\blacktriangle\blacktriangledown}$, and
$\mathit{RS} \cong \{ ( \mathcal{I}(X), \mathcal{C}(X) ) \mid X \subseteq U\}$
follows from Lemma~\ref{Lem:Isomorphic}.
\end{proof}
If $|U| \leq 4$, then $\mathit{RS}$ is a lattice, but when $|U| \geq 5$,
$\mathit{RS}$ does not necessarily form a lattice, as can be seen
in the following example; see \cite{Jarv99,Jarv01}.
\begin{example} \label{Ex:Counter}
Let $R$ be a tolerance on $U = \{a,b,c,d,e\}$ such that
$R(a) = \{a,b\}$, $R(b) = \{a,b,c\}$, $R(c) = \{b,c,d\}$, $R(d) = \{c,d,e\}$, and
$R(e) = \{d,e\}$. The ordered set $\mathit{RS}$ is depicted in Figure~\ref{Fig:Fig1}.
\begin{figure}
\caption{\label{Fig:Fig1}
\label{Fig:Fig1}
\end{figure}
For instance, the elements $(a, abc)$ and $(\emptyset, abcd)$ do not have a least upper bound.
Similarly, $(ab, abcd)$ and $(a, U)$ do not have the greatest lower bound.
\end{example}
Next we consider completions of $\mathit{RS}$. Let us define the set
\begin{equation}\label{EQ:DefineS}
\mathcal{S} = \{x \in U \mid R(x)=\{x\}\},
\end{equation}
that is, $\mathcal{S}$ consists of such $x$'s that $R(x)$ is a singleton.
Then, $\{x\}^{\blacktriangledown} =\{x\}$ for all $x\in \mathcal{S}$ and
$\mathcal{S}^\blacktriangledown = \mathcal{S} = \mathcal{S}^\blacktriangle$.
Clearly, for any $X \subseteq U$ and $x \in \mathcal{S}$,
\[
x\in X^\blacktriangle \iff R(x) \cap X \neq \emptyset \iff R(x)\subseteq X \iff x\in X^\blacktriangledown.
\]
Hence, for all $x \in \mathcal{S}$, we have either $x\in X^{\blacktriangledown}$ or $x \in X^{\blacktriangle c}$.
This means that $\mathcal{S} \subseteq X^{\blacktriangledown} \cup X^{\blacktriangle c}$ holds for any $X\subseteq U$.
We define the set of pairs
\[
\mathcal{I}(\mathit{RS}) = \{ (A,B) \in \wp(U)^{\blacktriangledown} \times \wp(U)^{\blacktriangle}
\mid A^{\blacktriangle}\subseteq B^\blacktriangledown \text{ and } \mathcal{S} \subseteq A\cup B^{c}\}.
\]
Because $(X^{\blacktriangledown})^{\blacktriangle} \subseteq X \subseteq (X^{\blacktriangle})^{\blacktriangledown}$ and
$\mathcal{S} \subseteq X^\blacktriangledown \cup X^{\blacktriangle c}$
for any $(X^\blacktriangledown,X^\blacktriangle) \in \mathit{RS}$, we have $RS\subseteq\mathcal{I}(\mathit{RS})$.
The \emph{Dedekind--MacNeille completion} of an ordered set $P$ can
be defined as the smallest complete lattice with $P$ order-embedded in it
(see \cite{DaPr02}, for example). In \cite{UmaThesis}, D.~Umadevi
proved that for a reflexive relation $R$ on $U$, the Dedekind--MacNeille completion
of $\mathit{RS}$ is
\[
\mathcal{DM}(\textit{RS}) = \{ (A,B) \in \wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle
\mid A^{\blacktriangle \vartriangle} \subseteq B \text{ and } A \cap \mathcal{S} = B \cap \mathcal{S}\},
\]
where $X^\vartriangle$ denotes the upper approximation of $X$ defined
in terms of the inverse $R^{-1}$ of the relation $R$, that is,
$X^{\vartriangle} = \{x\in U \mid R^{-1}(x) \cap X \neq \emptyset\}$.
If $R$ is a tolerance, we have $A^{\vartriangle} = A^{\blacktriangle}$ and
$A^{\blacktriangle\vartriangle} \subseteq B \iff A^{\blacktriangle\blacktriangle} \subseteq B
\iff A^{\blacktriangle} \subseteq B^{\blacktriangledown}$ for any $A,B \subseteq U$.
Additionally,
\begin{equation} \label{Eq:Congruence}
\mathcal{S} \subseteq A \cup B^{c} \iff \mathcal{S} \cap (B\setminus A) = \emptyset
\iff \mathcal{S} \cap B = \mathcal{S} \cap A.
\end{equation}
Hence, for tolerances, we have
$\mathcal{I}(\textit{RS})= \mathcal{DM}(\textit{RS})$, and consequently,
$\mathit{RS} = \mathcal{I}(\mathit{RS})$ holds whenever $\mathit{RS}$ is a
complete lattice. In \cite{JPR12}, we proved that for any quasiorder $R$ on $U$,
$\mathit{RS}$ is a complete, completely distributive lattice and
\[
\mathit{RS} = \{(A,B) \in \wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle \mid
A \subseteq B \text{ and } \mathcal{S} \subseteq A \cup B^{c}\}.
\]
Therefore, $\mathcal{I}(\mathcal{RS})$ can be called the
\emph{increasing representation of rough sets}.
A \textit{complete subdirect product} $\mathcal{L}$ of an indexed family of
complete lattices $\{L_i\}_{i \in I}$ is a complete sublattice of the direct product
$\prod_{i \in I} L_i$ such that the canonical projections
$\pi_i$ are all surjective, that is, $\pi_i(\mathcal{L}) = L_i$.
Note that the projections $\pi_i$ are complete lattice homomorphisms,
that is, they preserve all meets and joins.
\begin{proposition} \label{Prop:Completion}
Let $R$ be a tolerance on $U$.
\begin{enumerate}[\rm (a)]
\item $\mathcal{I}(\mathit{RS})$ is a complete polarity sublattice of the polarity
lattice $\wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle$.
\item $\mathcal{I}(\mathit{RS})$ is a complete subdirect product of $\wp(U)^\blacktriangledown$ and $\wp(U)^\blacktriangle$.
\end{enumerate}
\end{proposition}
\begin{proof}
(a) We first note that the map $\sim$ defined in \eqref{Eq:Negation} is a De~Morgan operation on
$\mathcal{I}(\mathit{RS})$.
If $(A,B) \in \mathcal{I}(\mathit{RS})$, then $A^\blacktriangle \subseteq B^\blacktriangledown$ implies
$B^{c \blacktriangle} = B^{\blacktriangledown c} \subseteq A^{\blacktriangle c} = A^{c \blacktriangledown}$. Additionally,
$\mathcal{S} \subseteq A \cup B^{c} = B^{c} \cup (A^c)^c$. So,
${\sim}(A,B) = (B^c,A^c) \in\mathcal{I}(\mathit{RS})$.
Let $\{(A_i,B_i)\}_{i \in I}\subseteq \mathcal{I}(\mathit{RS})$.
Its meet defined in $\wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle$ is
\[ \bigwedge_{i \in I} (A_i,B_i)=
\Big( \bigcap_{i \in I} A_i, \big ( \bigcap_{i \in I} B_{i} \big )^{\blacktriangledown \blacktriangle} \Big) . \]
We show that this meet is in $\mathcal{I}(\mathit{RS})$.
For all $i \in I$, we have ${A_i}^\blacktriangle \subseteq {B_i}^\blacktriangledown$.
Thus, for all $i \in I$, we have $A_i \subseteq {A_i}^{\blacktriangle \blacktriangledown} \subseteq {B_i}^{\blacktriangledown\blacktriangledown}$,
and $\bigcap_{i \in I} A_i \subseteq \bigcap_{i \in I} B_{i}^{\blacktriangledown \blacktriangledown} =
(\bigcap_{i \in I} B_{i})^{\blacktriangledown \blacktriangledown}$. This implies
$(\bigcap_{i \in I} A_i)^\blacktriangle \subseteq (\bigcap_{i \in I} B_{i})^{\blacktriangledown\blacktriangledown \blacktriangle}
\subseteq (\bigcap_{i \in I} B_{i})^{\blacktriangledown}
= \big((\bigcap_{i \in I} B_{i})^{\blacktriangledown\blacktriangle}\big)^\blacktriangledown$.
For the second part,
assume that $\mathcal{S} \nsubseteq \bigcap_{i \in I} A_i \cup
(\bigcap_{i \in I} B_{i})^{\blacktriangledown \blacktriangle c}$. This means that there
exists $x\in \mathcal{S}$ such that $x \notin \bigcap_{i \in I} A_i$
and $x \notin (\bigcap_{i \in I} B_i)^{\blacktriangledown\blacktriangle c}$. Therefore,
there is $k \in I$ with $x \notin A_k$ and
$x \in ( \bigcap_{i \in I} B_i ) ^{\blacktriangledown\blacktriangle} \subseteq
\bigcap_{i \in I} B_i \subseteq B_k$. We get
$x \notin A_{k} \cup {B_k}^{c}$, contradicting our assumption
$\mathcal{S} \subseteq A_i \cup {B_i}^c$ for all $i \in I$.
Thus, $\mathcal{S} \subseteq \bigcap_{i \in I} A_i \cup
(\bigcap_{i \in I} B_{i})^{\blacktriangledown \blacktriangle c}$ must hold.
Additionally, the join
\[ \bigvee_{i \in I} (A_i,B_i) =
\Big ( \big (\bigcup_{i \in I} A_i \big )^{\blacktriangle \blacktriangledown}, \bigcup_{i \in } B_i \Big )
\]
defined in $\wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle$ equals ${\sim} \bigwedge_{i\in I} {\sim}(A_i,B_i)$, which,
by the above, belongs to $\mathcal{I}(\mathit{RS})$. Thus, $\mathcal{I}(\mathit{RS})$ is a
complete sublattice of $\wp(U)^{\blacktriangledown}\times\wp(U)^\blacktriangle$.
(b) The maps $\pi_{1} \colon (X^\blacktriangledown, Y^\blacktriangle) \mapsto X^\blacktriangledown$ and
$\pi_{2} \colon (X^\blacktriangledown, Y^\blacktriangle) \mapsto Y^\blacktriangle$ are the canonical projections
of the product $\wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle$. Obviously,
their restrictions to $\mathcal{I}(\mathcal{RS})$ are surjective, because
$\mathit{RS} \subseteq \mathcal{I}(\mathit{RS})$. Combined
with (a), this proves the claim.
\end{proof}
\begin{corollary} \label{Cor:Subdirect}
$\mathit{RS}$ is a complete lattice if and only if it is a complete subdirect
product of the complete lattices $\wp(U)^\blacktriangledown$ and $\wp(U)^\blacktriangle$.
\end{corollary}
\begin{remark}
The fact that for a tolerance $R$ on $U$, $\mathcal{I}(\mathit{RS})$ is the
Dedekind--MacNeille completion of $\mathit{RS}$ can be proved independently of
\cite{UmaThesis} by showing that $\mathit{RS}$ is both join-dense and
meet-dense in $\mathcal{I}(\mathit{RS})$. It is known that a complete lattice $L$ is
the Dedekind--MacNeille completion of an ordered subset $P$ of $L$, whenever $P$ is
both join-dense and meet-dense in $L$, that is, every element of $L$ can be
represented as a join and a meet of some elements of $P$
(see e.g. \cite[Theorem~7.41]{DaPr02}). In fact, one can show that for any pair
$(A,B)\in\mathcal{I}(\mathit{RS})$, we have
\begin{equation} \label{Eq:repre}
(A,B)= \bigvee \big ( \{ (R(x)^{\blacktriangledown},R(x)^{\blacktriangle} \mid x\in A\}
\cup \{ (\emptyset,R(x)) \mid x \in B^{\blacktriangledown}\setminus A\} \big ).
\end{equation}
Trivially, the pairs $(R(x)^{\blacktriangledown},R(x)^{\blacktriangle})$ are rough sets for every $x \in A$.
If $x \in B^\blacktriangledown \setminus A$, then $x \notin \mathcal{S}$ by \eqref{Eq:Congruence}, and
hence $\{x\}^\blacktriangledown = \emptyset$ and $(\{x\}^\blacktriangledown,\{x\}^\blacktriangle) = (\emptyset,R(x))$
is a rough set.
Thus, \eqref{Eq:repre} implies that $\mathit{RS}$ is join-dense in $\mathcal{I}(\mathit{RS})$.
Because $\mathcal{I}(\mathit{RS})$ and $\mathit{RS}$ are self-dual by the map $\sim$,
$\mathit{RS}$ is also meet-dense in $\mathcal{I}(\mathit{RS})$.
\end{remark}
By Corollary~\ref{Cor:Subdirect}, to show that $\textit{RS}$ is a complete lattice
it is enough to prove that $\textit{RS}$ is a complete sublattice of $\wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle$.
Additionally, since $\textit{RS}$ is a self-dual subset of the complete polarity lattice $\wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle$,
it suffices to find for any $\mathcal{H} \subseteq \wp(U)$ a set $Z \subseteq U$ such that
\begin{equation}\label{Eq:ExistZed}
Z^\blacktriangledown = \bigcap_{X \in \mathcal{H}} X^\blacktriangledown \text{ \quad and \quad }
Z^\blacktriangle = \big (\bigcap_{X \in \mathcal{H}} X^\blacktriangle \big )^{\blacktriangledown\blacktriangle}.
\end{equation}
Observe that
\[ \big (\bigcap_{X \in \mathcal{H}} X \big )^{\blacktriangledown\blacktriangle} =
\big (\bigcap_{X \in \mathcal{H}} X^\blacktriangledown \big )^\blacktriangle = Z^{\blacktriangledown\blacktriangle} \subseteq Z
\subseteq Z^\blacktriangle = \big (\bigcap_{X \in \mathcal{H}} X^\blacktriangle \big )^{\blacktriangledown\blacktriangle}.
\]
So, we have a lower bound and an upper bound for this $Z$. Especially, concerning
Lemma~\ref{Lem:ExistsS}, the interpretation is that
\[ T = \big (\bigcap_{X \in \mathcal{H}} X \big )^{\blacktriangledown\blacktriangle}
\quad \mbox{and} \quad
Y = \big (\bigcap_{X \in \mathcal{H}} X^\blacktriangle \big )^{\blacktriangledown\blacktriangle}.
\]
\begin{lemma} \label{Lem:ExistsS}
Let $Y,T\subseteq U$ be such that $Y \in \wp(U)^\blacktriangle$ and $T\subseteq Y^\blacktriangledown$.
If\/ $|R(x)| \geq2$ for all $x\in Y\setminus T^\blacktriangle$, then there exists a set
$S\subseteq Y^\blacktriangledown\setminus T$ such that $Y=S^\blacktriangle\cup T^\blacktriangle$ and
$R(y)\nsubseteq S\cup T$ for all $y\in S$.
\end{lemma}
\begin{proof}
Since $T\subseteq Y^{\blacktriangledown} \subseteq Y$, the set $Y \setminus T$ is a disjoint union of
$Y \setminus Y^{\blacktriangledown}$ and $Y^{\blacktriangledown} \setminus T$, and hence
\begin{equation} \label{Eq:DifferenceY}
(Y \setminus T) \setminus (Y\setminus Y^{\blacktriangledown}) = Y^{\blacktriangledown} \setminus T.
\end{equation}
Clearly, $T^{\blacktriangle}\subseteq Y^{\blacktriangledown\blacktriangle}=Y$, because $Y \in \wp(U)^{\blacktriangle}$.
If $T^{\blacktriangle}=Y$, then our assertion is satisfied trivially with
$S = \emptyset$. Thus, we may suppose $T^{\blacktriangle}\subset Y$, which yields
$Y \setminus T \neq \emptyset$, because $T \subseteq$ $T^{\blacktriangle}$. Let
$\rho$ denote the restriction of $R$ to the set $Y \setminus T$. Then, $\rho$ is
a tolerance, and its transitive closure $\overline{\rho}$ is an equivalence on
$Y\setminus T$. Now, consider the sets
\begin{align*}
A & =\{ y \in Y\setminus T \mid \overline{\rho}(y) \cap (Y\setminus Y^{\blacktriangledown})\neq\emptyset\}; \\
B &= \{y\in Y\setminus T\mid\overline{\rho}(y)\cap(Y\setminus
Y^{\blacktriangledown})=\emptyset \text{ \ and \ } \overline{\rho}(y) \nsubseteq T^{\blacktriangle}\}.
\end{align*}
Then, $(Y\setminus Y^{\blacktriangledown}) \cap B \subseteq A \cap B = \emptyset$, and by \eqref{Eq:DifferenceY} we obtain
\[
B \subseteq (Y\setminus T) \setminus (Y\setminus Y^{\blacktriangledown}) = Y^{\blacktriangledown}\setminus T.
\]
We apply Lemma~\ref{Lem:OddEven} with the tolerance $\rho$ and the sets
$U = Y\setminus T$, $X = Y\setminus Y^{\blacktriangledown}$, and
$\overline{X} = \overline{\rho}(Y\setminus Y^{\blacktriangledown}) = A$.
We obtain two disjoint sets $(Y\setminus Y^{\blacktriangledown})_{\text{odd}}$
and $(Y\setminus Y^{\blacktriangledown})_{\text{even}}$ such that
$(Y \setminus Y^{\blacktriangledown})_{\text{odd}} \cup (Y\setminus Y^{\blacktriangledown})_{\text{even}} = A$, and
\begin{align}
&Y\setminus Y^{\blacktriangledown} \subseteq (Y\setminus Y^{\blacktriangledown})_{\text{even}} \label{Eq:4A}; \\
&(Y\setminus Y^{\blacktriangledown})_{\text{odd}}\subseteq \rho((Y\setminus Y^{\blacktriangledown})_{\text{even}}); \label{Eq:4B} \\
&A \setminus (Y\setminus Y^{\blacktriangledown}) \subseteq \rho((Y\setminus Y^{\blacktriangledown})_{\text{odd}}). \label{Eq:4C}
\end{align}
Next, let $\Pi=\{H_{k}\mid k\in K\}$ be the partition induced by the equivalence
$\overline{\rho}$ on $B$. Note that $\overline{\rho}(B)=B$,
because $x \in B$ and $\overline{\rho}(x) = \overline{\rho}(y)$ imply $y \in B$.
For each $k\in K$, we may select an element $c_{k}\in H_{k} \subseteq B$
such that $c_{k} \notin T^{\blacktriangle}$. This is because $H_{k}=\overline{\rho}(b)$ for some $b \in B$
and $B$ was defined so that $\overline{\rho}(b) \nsubseteq T^{\blacktriangle}$ for all $b\in B$.
Denote the set of all these elements by $C$, that is,
\[
C=\{c_{k}\mid k\in K\}.
\]
Then, $C \subseteq B \setminus T^{\blacktriangle} \subseteq Y^{\blacktriangledown} \setminus T^{\blacktriangle}$.
Observe that $C \subseteq \rho(B\setminus C)$. Indeed,
$C \subseteq Y \setminus T^{\blacktriangle}$ yields $|R(x)| \geq 2$ for all $x\in C$ by our
assumption. Thus, for each $x \in C$ there is an element $y \neq x$ with $x \, R \, y$.
Then, $y \in C^{\blacktriangle} \subseteq Y^{\blacktriangledown\blacktriangle} \subseteq Y$, and $x \notin T^{\blacktriangle}$ yields
$y\notin T$. Hence, $x,y \in Y \setminus T$ and $x \, \rho \, y$ holds also.
This implies $y \in \overline{\rho}(x) \subseteq B$. Since $x \in C$ is the unique element
picked from the set $\overline{\rho}(x) \in \Pi$, $y \neq x$ implies
$y \in B\setminus C$. This proves $C \subseteq\rho(B\setminus C)$.
By applying Lemma~\ref{Lem:OddEven} again with the tolerance $\rho$ and the sets
$X = C \subseteq Y \setminus T$ and $\overline{X} = B$, we obtain two disjoint sets
$C_{\text{odd}}$ and $C_{\text{even}}$ such that $C_{\text{odd}} \cup C_{\text{even}} = B$.
Because $C \subseteq \rho(B\setminus C)$, we have
\begin{equation}\label{Eq:4D}
\rho(C_{\text{odd}})=\rho(C_{\text{even}})=B.
\end{equation}
Finally, consider the set
\[
S=(Y\setminus Y^{\blacktriangledown})_{\text{odd}}\cup C_\text{odd}.
\]
We prove that $S$ has the required properties, that is, (i) $S\subseteq Y^{\blacktriangledown}\setminus T$,
(ii) $S^{\blacktriangle}\cup T^{\blacktriangle}=Y$, and (iii) $R(y) \nsubseteq S\cup T$ for all $y\in S$.
(i) Obviously, $C_\text{odd} \subseteq B \subseteq Y^{\blacktriangledown} \setminus T$ and
$(Y\setminus Y^{\blacktriangledown})_\text{odd} \subseteq A \subseteq Y\setminus T$.
By \eqref{Eq:4A},
\[ (Y\setminus Y^{\blacktriangledown})_\text{odd} \cap (Y\setminus Y^{\blacktriangledown})
\subseteq (Y\setminus Y^{\blacktriangledown})_\text{odd} \cap (Y\setminus Y^{\blacktriangledown})_\text{even}=\emptyset.
\]
Then, \eqref{Eq:DifferenceY} yields
$(Y \setminus Y^{\blacktriangledown})_\text{odd} \subseteq Y^{\blacktriangledown} \setminus T$, and
$S = (Y\setminus Y^{\blacktriangledown})_{\text{odd}}\cup C_\text{odd} \subseteq Y^{\blacktriangledown}\setminus T$.
(ii) Since $S,T\subseteq Y^{\blacktriangledown}$, we have
$S^{\blacktriangle} \cup T^{\blacktriangle} = (S\cup T)^{\blacktriangle}\subseteq Y^{\blacktriangledown\blacktriangle}\subseteq Y$.
For the other direction, assume $x \in Y\setminus T^{\blacktriangle} \subseteq Y \setminus T$. Then,
either $\overline{\rho}(x) \cap (Y\setminus Y^{\blacktriangledown}) =\emptyset$ or
$\overline{\rho}(x) \cap (Y\setminus Y^{\blacktriangledown}) \neq \emptyset$ holds.
Because $x \notin T^\blacktriangle$, we have $\overline{\rho}(x) \nsubseteq T^{\blacktriangle}$.
Therefore, if $\overline{\rho}(x) \cap (Y\setminus Y^{\blacktriangledown}) =\emptyset$, then
$x \in B$. Since $B = \rho(C_{\text{odd}})$ by \eqref{Eq:4D}
and $\rho(C_{\text{odd}})\subseteq S \subseteq S^{\blacktriangle}$,
we get $x \in S^\blacktriangle$. If $\overline{\rho}(x) \cap (Y\setminus Y^{\blacktriangledown}) \neq \emptyset$, then
$x \in A$, and either $x \in Y \setminus Y^{\blacktriangledown}$ or $x \notin Y\setminus Y^{\blacktriangledown}$.
If $x\in Y\setminus Y^{\blacktriangledown}$, then $x\in Y = Y^{\blacktriangledown\blacktriangle}$ yields that
$x \, R \, y$ for some $y \in Y^{\blacktriangledown}$.
Since $x \notin T^{\blacktriangle}$, we have $y \notin T$ and $y \in Y \setminus T$.
By $x,y\in Y \setminus T$, we obtain $x\, \rho \, y$. The facts
$x \in Y \setminus Y^{\blacktriangledown}$ and $y \notin Y \setminus Y^{\blacktriangledown}$ imply
$ y \in(Y\setminus Y^{\blacktriangledown})^{1} \subseteq (Y\setminus Y^{\blacktriangledown})_{\text{odd}}$,
which gives $x \in \rho((Y\setminus Y^{\blacktriangledown })_{\text{odd}}) \subseteq
((Y\setminus Y^{\blacktriangledown})_{\text{odd}})^{\blacktriangle}\subseteq S^{\blacktriangle}$.
If $x\notin Y\setminus Y^{\blacktriangledown}$, then
\[ x \in A \setminus (Y \setminus Y^{\blacktriangledown}) \subseteq \rho((Y\setminus Y^{\blacktriangledown})_\text{odd})
\subseteq((Y\setminus Y^{\blacktriangledown})_\text{odd})^{\blacktriangle} \subseteq S^{\blacktriangle}
\]
by \eqref{Eq:4C}.
As we obtained $x \in S^{\blacktriangle}$ in all possible cases,
$Y \subseteq S^{\blacktriangle}\cup T^{\blacktriangle}$ holds.
(iii) Let $x\in S = (Y\setminus Y^{\blacktriangledown})_\text{odd} \cup C_\text{odd}$.
Since $(Y\setminus Y^{\blacktriangledown})_\text{odd} \cap C_\text{odd} \subseteq A \cap B = \emptyset$,
either $x \in(Y \setminus Y^{\blacktriangledown})_\text{odd}$ or $x\in C_\text{odd}$.
In the first case, $x \in \rho((Y\setminus Y^{\blacktriangledown})_\text{even})$ by \eqref{Eq:4B}.
Hence, there is an element $y \in (Y\setminus Y^{\blacktriangledown})_\text{even}$ with
$(x,y) \in \rho \subseteq R$, that is, $y\in R(x)$.
The inclusion $(Y\setminus Y^{\blacktriangledown})_\text{even} \subseteq A \subseteq Y\setminus T$ gives
$y \notin T$. Because
\begin{align*}
S \cap (Y\setminus Y^{\blacktriangledown})_\text{even} &= ((Y \setminus Y^{\blacktriangledown})_\text{odd}
\cap (Y\setminus Y^{\blacktriangledown})_\text{even}) \cup (C_\text{odd} \cap (Y\setminus Y^{\blacktriangledown})_\text{even})\\
& \subseteq \emptyset \cup(B\cap A) = \emptyset,
\end{align*}
we get $y\notin S$. Thus, $R(x)\nsubseteq S\cup T$.
If $x\in C_{\text{odd}}$, then $x \in B = \rho(C_{\text{even}})$ by \eqref{Eq:4D}.
Hence, there is $y\in C_{\text{even}}$ with $(x,y) \in \rho \subseteq R$, that is, $y\in R(x)$.
Since $C_\text{even} \subseteq B \subseteq Y \setminus T$, we get $y \notin T$. \ Clearly,
$y \notin C_\text{odd}$ and
$C_\text{even} \cap (Y\setminus Y^{\blacktriangledown})_\text{odd} \subseteq B \cap A=\emptyset$
implies $y \notin(Y\setminus Y^{\blacktriangledown})_\text{odd}$. Hence,
$y \notin C_{\text{odd}} \cup(Y\setminus Y^{\blacktriangledown})_\text{odd}=S$,
and so $R(x) \nsubseteq S \cup T$.
\end{proof}
An element $x$ of a complete lattice $L$ is said to be \emph{compact} if for every subset
$S$ of $L$, $x \leq \bigvee S$ implies $x \leq \bigvee F$ for some finite subset $F$ of $S$.
A complete lattice is \emph{algebraic} if its every element can be given
as a join of compact elements.
\begin{theorem} \label{Thm:Dist2Lattice}
Let $R$ be a tolerance on $U$. Then $\mathit{RS}$ is an algebraic completely distributive
lattice if and only if $R$ is induced by an irredundant covering of $U$.
\end{theorem}
\begin{proof}
Suppose that $\mathit{RS}$ is an algebraic completely distributive lattice.
Then, $\mathit{RS}$ is a complete lattice and, by Corollary~\ref{Cor:Subdirect},
it is a complete subdirect product of the lattices $\wp(U)^\blacktriangledown$ and $\wp(U)^\blacktriangle$.
Since $\wp(U)^\blacktriangledown$ is the image of $\mathit{RS}$ under the complete lattice-homomorphism $\pi_{1}$,
the lattice $\wp(U)^\blacktriangledown$ is also completely distributive.
Hence, according to Proposition~\ref{Prop:ComplDistributive}, $R$ is induced
by an irredundant covering of $U$.
Conversely, let $R$ be a tolerance induced by an irredundant covering of $U$.
Then $R$ satisfies also condition (b) of Theorem~\ref{Thm:DC}. First, we show that
$\textit{RS}$ is a complete lattice. Let $\mathcal{H} \subseteq \wp(U)$.
By \eqref{Eq:ExistZed}, it is enough to show that there exists a set
$Z \subseteq U$ such that
\begin{equation}\label{Eq:MEET}
(Z^\blacktriangledown, Z^\blacktriangle) =
\Big ( \bigcap_{X \in \mathcal{H}} X^\blacktriangledown, \big (\bigcap_{X \in \mathcal{H}} X^\blacktriangle \big )^{\blacktriangledown\blacktriangle} \Big ).
\end{equation}
Let us first set
\begin{equation}\label{Eq:DefT+Y}
T = \big (\bigcap_{X \in \mathcal{H}} X \big )^{\blacktriangledown\blacktriangle}
\quad \mbox{and} \quad
Y = \big (\bigcap_{X \in \mathcal{H}} X^\blacktriangle \big )^{\blacktriangledown\blacktriangle}.
\end{equation}
Using the properties of $^\blacktriangle$ and $^\blacktriangledown$, it is clear that $T = T^{\blacktriangledown\blacktriangle}$, $Y = Y^{\blacktriangledown\blacktriangle}$, and
\[ T\subseteq \bigcap_{X \in \mathcal{H}} X \subseteq \bigcap_{X\in\mathcal{H}} X^{\blacktriangle\blacktriangledown} =
\Big ( \bigcap_{X \in \mathcal{H}} X^{\blacktriangle} \Big )^{\blacktriangledown} = \Big (\bigcap_{X\in\mathcal{H}} X^{\blacktriangle} \Big ) ^{\blacktriangledown\blacktriangle\blacktriangledown}
= Y^{\blacktriangledown}.
\]
In addition, $T^\blacktriangledown = \big (\bigcap_{X \in \mathcal{H}} X \big )^{\blacktriangledown}$ and \
$T^\blacktriangle \subseteq Y^{\blacktriangledown\blacktriangle} = Y$.
Suppose that $x \in Y \setminus T^\blacktriangle$ and $|R(x)| = 1$. Since
$R(x) = \{x\}$, $x \in Y$ implies that $x \in X$ for all $X \in \mathcal{H}$,
from which we get $x \in T^\blacktriangle$, a contradiction. Thus, we must have
$|R(x)| \geq 2$ for all $x \in Y \setminus T^\blacktriangle$.
Now, we may apply Lemma~\ref{Lem:ExistsS} with the sets $T$ and $Y$, and this yields
that there exists a set
\begin{equation}\label{Eq:DefS}
S \subseteq Y^\blacktriangledown\setminus T \text{ with } S^\blacktriangle\cup T^\blacktriangle = Y
\text{ and } R(y) \nsubseteq S\cup T \text{ for all } y\in S.
\end{equation}
Let us define the set
\[
V = \{v\in T\mid R(v) \nsubseteq T \mbox{ and } R(v) \subseteq S\cup T\}.
\]
First, we prove that if $V = \emptyset$, the set $Z = S \cup T$
satisfies \eqref{Eq:MEET}. Since $^\blacktriangle$ distributes over unions,
we have $Z^\blacktriangle = S^\blacktriangle \cup T^\blacktriangle = Y$.
Trivially, $T^\blacktriangledown \subseteq Z^\blacktriangledown$. On the other hand, if $z \in Z^\blacktriangledown$,
then $R(z) \subseteq S \cup T$.
We have $R(z) \subseteq T$, because $R(z) \nsubseteq T$
implies $z \in V$, but this is impossible because $V = \emptyset$. Thus,
$Z^\blacktriangledown = T^\blacktriangledown$ and \eqref{Eq:MEET} holds.
Now we prove that condition (b) of Theorem~\ref{Thm:DC} implies $V=\emptyset$,
which by previous observation yields that $\mathit{RS}$ is a complete lattice.
Suppose $V \neq \emptyset$. Then, there exists
$v \in T$ such that $R(v) \nsubseteq T$ and
$R(v) \subseteq S \cup T$. This also means that there is an element
$s \in R(v)$ with $s\in S\setminus T$. As $v \, R \, s$, by Theorem~\ref{Thm:DC}
there exists $c_{v} \in R(c_v) \subseteq R(v)\cap R(s)$ such that
$R(c_{v})\subseteq R(x)$ for all $x \, R \, c_{v}$.
Then $c_v \, R \,s$, and in particular, we have
\begin{equation} \label{EQ:INCL}
c_{v} \in R(c_v) \subseteq R(v)\subseteq S\cup T.
\end{equation}
Observe that $c_{v} \notin T$, because $c_v \in T = T^{\blacktriangledown\blacktriangle} $ means
that there is $a \in T^{\blacktriangledown}$ with $a \, R \, c_{v}$.
Since $R(c_{v})\subseteq R(x)$ for all $x \, R \, c_{v}$,
we get $s \in R(c_{v}) \subseteq R(a)\subseteq T^{\blacktriangledown \blacktriangle} \subseteq T$, but
this is not possible because $s \in S\setminus T$.
Therefore, $c_{v} \in S$, and we have $R(c_{v}) \nsubseteq S\cup T$ by
Lemma~\ref{Lem:ExistsS}. But this contradicts \eqref{EQ:INCL},
and we deduce $V = \emptyset$, which, as we have already noted,
implies that $\mathit{RS}$ is a complete lattice.
Finally, since $\mathit{RS}$ is a complete lattice, it is isomorphic to a complete subdirect
product of $\wp(U)^\blacktriangledown$ and $\wp(U)^\blacktriangle$ by Corollary~\ref{Cor:Subdirect}.
Since $R$ is a tolerance induced by an irredundant covering, in view of Proposition~\ref{Prop:Boolean},
$\wp(U)^\blacktriangledown$ and $\wp(U)^\blacktriangle$ are complete atomistic Boolean lattices. Thus, they are
completely distributive and algebraic, too. Hence, $\mathit{RS}$, as a complete subdirect product
of two completely distributive algebraic lattices is also completely distributive and algebraic.
\end{proof}
\begin{remark} \label{Rem:Bonikowski}
In \cite{Bonikowski1998}, Z.~Bonikowski with his co-authors considered rough set systems
of approximation pairs based on coverings. They presented a necessary and sufficient
condition under which their system forms a complete lattice. This condition was given in terms of
representative elements and representative coverings. To be more formal, let us recall
some notions from their work. Let $\mathcal{C} \subseteq \wp(U)$ be a covering. For any $x \in U$,
the family $\mathit{md}(x) = \min \{ K \in \mathcal{C} \mid x \in K \}$ is called the
\emph{minimal description of $x$}. In addition, for any $K \in \mathcal{C}$,
the element $x \in K$ is called a \emph{representative element} of $K$ if
for all $S \in \mathcal{C}$, $x \in S$ implies $K \subseteq S$. The covering
$\mathcal{C}$ is called \emph{representative} if every set in $\mathcal{C}$ has
a representative element.
For any set $X \subseteq U$, the family $\mathcal{C}_*(X) = \{ K \in \mathcal{C} \mid K \subseteq X\}$
is called the \emph{sets bottom-approximating $X$}. The family
$\mathit{Bn}(X) = \bigcup \{ \mathit{md}(x) \mid x \in X \setminus \bigcup \mathcal{C}_*(X) \}$
is referred to the \emph{sets approximating the boundary of $X$}, and the family
$\mathcal{C}^*(X) = \mathcal{C}_*(X) \cup \mathit{Bn}(X)$ consists of the
\emph{sets top-approximating $X$}. It is proved in \cite{Bonikowski1998} that
the coordinatewise ordered set $\{ (\mathcal{C}_*(X),\mathcal{C}^*(X)) \mid X \subseteq U\}$ is a complete lattice
if and only if
\begin{enumerate}[\rm (i)]
\item the covering $\mathcal{C}$ is representative;
\item every $K \in \mathcal{C}$ consisting at least two elements has at least two representative elements.
\end{enumerate}
Suppose that $R$ is a tolerance induced by an irredundant
covering $\mathcal{C}$ of $U$. Then, by our Theorem~\ref{Thm:Dist2Lattice}, the ordered structure
$\mathit{RS} = \{ (X^\blacktriangledown,X^\blacktriangle) \mid X \subseteq U \}$ is always an algebraic completely
distributive lattice. On the other hand, the irreducible covering $\mathcal{C}$ is representable,
because by Proposition~\ref{Prop:IrredundantCovering}, for each $K \in \mathcal{C}$, there exists $d \in U$
such that $R(d)= B$. It is easy to observe that this $d$ is a representative element of $B$.
Thus, irredundant coverings always satisfy (i). But for an irredundant covering forming a lattice
in the sense of \cite{Bonikowski1998}, also condition (ii) must be satisfied. Clearly, this means
that for every $K \in \mathcal{C}$ consisting at least two elements, there must be two distinct elements
$d$ and $d'$ such that $R(d) = R(d') = K$. By this, it is now obvious that the systems $\mathit{RS}$ and
$\{ (\mathcal{C}_*(X),\mathcal{C}^*(X)) \mid X \subseteq U\}$ cannot be isomorphic in general.
Note also that in the case of an irredundant covering $\mathcal{C}$, the \emph{maximal description}
$\mathit{MD}(x) = \max \{ K \in \mathcal{C} \mid x \in K \}$ coincides with the minimal
description $\mathit{md}(x)$ of any $x \in U$ (cf. \cite{Restrepo2013,Yao2012}).
\end{remark}
An \emph{Alexandrov topology} is a topology $\mathcal{T}$ that
contains all arbitrary intersections of its members. Alexandrov
topologies are also called \emph{complete rings of sets}.
It is known that a lattice $L$ is isomorphic to an Alexandrov topology
if and only if $L$ is completely distributive and algebraic (see e.g. \cite{DaPr02}). By
Theorem~\ref{Thm:Dist2Lattice}, $\mathit{RS}$ is isomorphic to some Alexandrov topology
whenever $R$ is induced by an irredundant covering of $U$.
A \emph{Heyting algebra} $L$ is a bounded distributive lattice such that for all
$a,b \in L$, there is a greatest element $x$ of $L$ such that $a \wedge x \leq b$.
This element is the \emph{relative pseudocomplement} of $a$ with respect to $b$,
and is denoted $a \Rightarrow b$. It is well known that any completely distributive
lattice $L$ is a Heyting algebra such that the relative pseudocomplement is defined as
\begin{equation} \label{Eq:Heyting}
x \Rightarrow y = \bigvee \big \{ z \in L \mid z \wedge x \leq y \big \}.
\end{equation}
Therefore, if $R$ is a tolerance induced by an irredundant covering of $U$, then
$\mathit{RS}$ is a Heyting algebra.
A \textit{Kleene algebra} is a structure $\mathbb{A} = (A, \vee, \wedge, {\sim}, 0, 1)$ such that
$A$ is a bounded distributive lattice and for all $a,b \in A$:
\begin{enumerate}[({K}1)]
\item ${\sim}\,{\sim}a = a$,
\item $a \leq b \text{ if and only if } {\sim}b \leq {\sim}a$,
\item $a \wedge {\sim}a \leq b \vee {\sim}b$.
\end{enumerate}
According to R. Cignoli \cite{Cign86}, a \emph{quasi-Nelson algebra} is a Kleene algebra
$\mathbb{A}$ such that for each pair $a$ and $b$ of its elements, the relative pseudocomplement
$a \Rightarrow ({\sim} a \vee b)$ exists. In quasi-Nelson algebras,
$a \Rightarrow ({\sim} a \vee b)$ is denoted simply by
$a \to b$ and this is called the \emph{weak relative pseudocomplement} of $a$
with respect to $b$. Obviously, each Kleene algebra such that its
underlying lattice forms a Heyting algebra is a quasi-Nelson algebra.
A \emph{Nelson algebra} is a quasi-Nelson algebra
$(A, \vee, \wedge, \to, {\sim}, 0, 1)$ satisfying the equation
\begin{equation*}\label{Eq:Nelson}
(a\wedge b)\rightarrow c = a \rightarrow (b\rightarrow c).
\end{equation*}
In the case of quasiorders, it is shown by J.~J{\"a}rvinen, S.~Radeleczki, and L.~Veres
\cite{JRV09} that $\mathit{RS}$ forms a complete sublattice of $\wp(U) \times\wp(U)$
ordered by the coordinatewise set-inclusion relation.
In addition, we have proved in \cite{JarRad} that $\mathit{RS}$ determines a Nelson
algebra.
Let the operation $\sim$ on $\mathit{RS}$ be defined as in \eqref{Eq:Negation}.
\begin{proposition} \label{Prop:QuasiNelson}
Let $R$ be a tolerance induced by an irredundant covering of $U$. Then, the algebra
\[ \mathbb{RS} = (\mathit{RS},\cup,\cap,{\sim},(\emptyset,\emptyset), (U,U))\]
is a quasi-Nelson algebra.
\end{proposition}
\begin{proof}
If $R$ is a tolerance induced by an irredundant covering of $U$, then by Theorem~\ref{Thm:Dist2Lattice},
$\mathit{RS}$ is a complete distributive lattice bounded by $(\emptyset,\emptyset)$ and $(U,U)$.
As we have already noted, conditions (K1) and (K2) are satisfied.
Let $\mathcal{A}(X) = (X^\blacktriangledown,X^\blacktriangle)$ and $\mathcal{A}(Y) = (Y^\blacktriangledown,Y^\blacktriangle)$ be in $\textit{RS}$. Then,
\begin{align*}
\mathcal{A}(X) \wedge {\sim} \mathcal{A}(X) &= (X^\blacktriangledown \cap X^{c \blacktriangledown} , (X^\blacktriangle \cap X^{c \blacktriangle})^{\blacktriangledown \blacktriangle} )
= (\emptyset, (X^\blacktriangle \setminus X^\blacktriangledown)^{\blacktriangledown \blacktriangle}), \mbox{ and} \\
\mathcal{A}(Y) \vee {\sim} \mathcal{A}(Y) &= ( (Y^\blacktriangledown \cup Y^{c \blacktriangledown})^{\blacktriangle \blacktriangledown}), Y^\blacktriangle \cup Y^{c \blacktriangle} )
= ((Y^{\blacktriangledown\blacktriangle} \cup Y^{c \blacktriangledown \blacktriangle})^\blacktriangledown,U).
\end{align*}
Hence, $\mathcal{A}(X) \wedge {\sim} \mathcal{A}(X) \leq \mathcal{A}(Y) \vee {\sim} \mathcal{A}(Y)$, and condition (K3) holds also.
Since $\mathit{RS}$ is a Heyting algebra when $R$ is a tolerance induced by an irredundant covering of $U$,
the Kleene algebra $\mathbb{RS}$ is a quasi-Nelson algebra.
\end{proof}
Note that (K3) has nothing to do with distributivity, so if $\mathit{RS}$ is a lattice,
${\sim}$ satisfies conditions (K1)--(K3), and even $R$ is induced by an irredundant covering of $U$,
the algebra $\mathbb{RS}$ does not necessarily form a Nelson algebra. For instance, if $R$ is a
tolerance on $U = \{a,b,c\}$ such that $R(a) = \{a,b\}$, $R(b) = U$, and $R(c) = \{b,c\}$, the
quasi-Nelson algebra $\mathbb{RS}$ is not a Nelson algebra.
For a tolerance $R$ on $U$ and any $X \subseteq U$, we denote the restriction of
$R$ to $X$ by $R_X$ and by $\mathit{RS}_X$ the
set of all rough sets determined by the relation $R_X$ on $X$. It is clear that for
an equivalence $R$, the relation $R_X$ is an equivalence and $\mathit{RS}_X$
is a lattice for all $X \subseteq U$. Similar observation holds for quasiorders.
Let us introduce the following condition related to $R$-paths:
\begin{itemize}
\item[(C)] For any $R$-path $(a_0,\ldots,a_4)$ of length $4$, there
exist $0\leq i,j \leq 4$ such that $|i - j| \geq 2$ and $a_i \, R \, a_j$.
\end{itemize}
\begin{lemma} \label{Lem:Sufficient}
Let $R$ be a tolerance on $U$. If $\mathit{RS}_X$ is a lattice
for all $X \subseteq U$ with $|X| = 5$, then $R$ satisfies condition\/ {\rm (C)}.
\end{lemma}
\begin{proof}
Suppose $R$ does not satisfy (C). Then, there exists an $R$-path
$(a_0, \ldots,a_4)$ such that $a_i \, R \, a_j$ if and only if
$|i - j| \leq 1$. Let us choose $X = \{a_0, \ldots,a_4\}$. Then $|X| = 5$ and
the situation is exactly as in Example~\ref{Ex:Counter}, that is,
$\mathit{RS}_X$ is not a lattice
\end{proof}
Now, we present our second main result.
\begin{theorem} \label{Thm:Main}
If $R$ is a tolerance satisfying\/ {\rm (C)}, then $\mathit{RS}$ is a complete lattice.
\end{theorem}
\begin{proof}
Let $\mathcal{H} \subseteq \wp(A)$. As in the proof of Theorem~\ref{Thm:Dist2Lattice},
we need to find a set $Z \subseteq U$ such that $Z^\blacktriangledown = \bigcap_{X \in \mathcal{H}} X^\blacktriangledown$
and $Z^\blacktriangle = (\bigcap_{X \in \mathcal{H}} X^\blacktriangle )^{\blacktriangledown\blacktriangle}$.
Let us form now the sets $T$, $Y$, $S$, and $V$ exactly as in the proof of Theorem~\ref{Thm:Dist2Lattice},
meaning that \eqref{Eq:DefT+Y} and \eqref{Eq:DefS} hold. Recall that
\[
V=\{v\in T\mid R(v)\nsubseteq T \text{ and } R(v) \subseteq S\cup T\}.
\]
According to the proof of Theorem~\ref{Thm:Dist2Lattice}, $V = \emptyset$ implies that
$\textit{RS}$ is a complete lattice, hence we may assume $V \neq \emptyset$.
Now, for each $v \in V$, we can choose an element $q_{v} \in R(v)$ such that $q_{v} \in T^\blacktriangle\setminus T$.
Denote by $Q$ the set of these selected elements, that is, $Q = \{q_{v} \mid v\in V\}$.
Then for all $v\in V$, $q_{v} \in T^\blacktriangle$, $q_v \notin T$, and $q_v \in R(v) \subseteq S \cup T$
give $q_{v}\in(S\setminus T)\cap T^\blacktriangle$, and so
\begin{equation}\label{Eq:Q}
Q \subseteq (S \setminus T) \cap T^\blacktriangle.
\end{equation}
Next, we define a set $P \subseteq Y$ by setting
\begin{equation}\label{Eq:defP}
P = \{p \in Y \setminus ((S\setminus Q)^{\blacktriangle} \cup T^{\blacktriangle}) \mid R(p)\subseteq Q^{\blacktriangle}\}.
\end{equation}
Then $P\subseteq Q^{\blacktriangle\blacktriangledown} \subseteq Q^{\blacktriangle}$, because for each $p \in P$,
$R(p)\subseteq Q^{\blacktriangle}$. In addition,
\[
P\cap( (S \setminus Q)^\blacktriangle \cup T^\blacktriangle) = P \cap ((S\setminus Q) \cup T)^\blacktriangle = \emptyset,
\]
that is,
\begin{equation}\label{Eq:ForAllP}
(\forall p \in P) \, R(p) \cap \big ( (S\setminus Q) \cup T \big ) = \emptyset.
\end{equation}
We have $Q\subseteq T^\blacktriangle$ by \eqref{Eq:Q}, which gives
$P\cap Q \subseteq P \cap \big ( (S\setminus Q)^\blacktriangle \cup T^\blacktriangle \big ) = \emptyset$.
Let us now define the set
\[
Z = (S\setminus Q) \cup T\cup P.
\]
We will prove that
\[
Z^\blacktriangledown = \big ( \bigcap_{X \in \mathcal{H}} X \big )^\blacktriangledown
= T^\blacktriangledown
\text{ \quad and \quad }
Z^\blacktriangle = \big (\bigcap_{X \in \mathcal{H}} X^\blacktriangle \big )^{\blacktriangledown\blacktriangle} = Y.
\]
Trivially, $T^\blacktriangledown \subseteq Z^\blacktriangledown$. To prove $Z^\blacktriangledown \subseteq T^\blacktriangledown$, let $z\in Z^\blacktriangledown$.
Then,
\begin{equation}\label{Eq:Z}
z \in R(z) \subseteq Z = (S \setminus Q) \cup T \cup P,
\end{equation}
and we have $z \in S \setminus Q$, or $z \in T$, or $z \in P$. We first show that $z \in T$.
If $z\in P$, then $R(z)\cap \big ( (S\setminus Q)\cup T \big ) = \emptyset$ by \eqref{Eq:ForAllP},
and \eqref{Eq:Z} gives $R(z)\subseteq P$. From this we obtain
$R(z) \cap Q \subseteq P\cap Q = \emptyset$. On the other hand,
$z\in P\subseteq Q^\blacktriangle$ yields $R(z)\cap Q \neq \emptyset$, a contradiction.
Similarly, $z \in S\setminus Q$ gives $R(z) \subseteq (S\setminus Q)^\blacktriangle$. Then,
$P \cap ((S \setminus Q)^\blacktriangle \cup T^\blacktriangle) = \emptyset$ implies
$P \cap R(z)=\emptyset$ and we must have
$R(z) \subseteq (S\setminus Q) \cup T \subseteq S \cup T$.
Since $z \in S$, this contradicts $R(z) \nsubseteq S\cup T$ following from
\eqref{Eq:DefS}. Hence, the only possibility left is $z \in T$.
Next, we prove $z \in T^\blacktriangledown$. Suppose, by the way of contradiction, that
$R(z) \nsubseteq T$. Since $R(z)\subseteq T^\blacktriangle$ and, by \eqref{Eq:defP},
$P \cap T^\blacktriangle = \emptyset$, we obtain $R(z) \cap P \subseteq T^\blacktriangle \cap P = \emptyset$,
which implies $R(z)\subseteq(S\setminus Q)\cup T\subseteq S\cup T$.
This means that $z\in V$. So, there exists an element $q_{z}\in Q$
with $q_{z}\in R(z)$. By the definition of $Q$, $q_{z}\notin T$. So,
we have $q_{z}\notin(S\setminus Q)\cup T$, which contradicts
$R(z)\subseteq (S\setminus Q)\cup T$. Therefore, we have now proved
$R(z) \subseteq T$, that is, $z\in T^\blacktriangledown$.
To complete our proof, we need to show that $Z^\blacktriangle=Y$. Recall that
$Y = S^\blacktriangle\cup T^\blacktriangle$ by \eqref{Eq:DefS}.
By the definition of $Z$, we have $Z^\blacktriangle=(S\setminus Q)^\blacktriangle\cup T^\blacktriangle\cup
P^\blacktriangle$. In view of \eqref{Eq:defP}, $R(p) \subseteq Q^\blacktriangle$ for all $p \in P$, which
implies $P^\blacktriangle = \bigcup_{p \in P} R(p) \subseteq Q^\blacktriangle \subseteq S^\blacktriangle$,
because $Q \subseteq (S \setminus T) \cap T^\blacktriangle \subseteq S$ holds by \eqref{Eq:Q}.
Hence, we have $Z^\blacktriangle\subseteq(S\setminus Q)^\blacktriangle\cup T^\blacktriangle\cup
S^\blacktriangle=T^\blacktriangle\cup S^\blacktriangle$. We show $Z^\blacktriangle=S^\blacktriangle\cup T^\blacktriangle$ by
proving $(S^\blacktriangle\cup T^\blacktriangle) \setminus Z^\blacktriangle = \emptyset$.
Assume now that $R$ satisfies (C) and suppose for contradiction that there exists an
element $y \in(S^\blacktriangle\cup T^\blacktriangle)\setminus Z^\blacktriangle = Y \setminus Z^\blacktriangle$.
Since $Q \subseteq S$, we have $S = Q \cup (S\setminus Q)$ and
$y\in Q^\blacktriangle\cup(S\setminus Q)^\blacktriangle\cup T^\blacktriangle$.
Because $y \notin Z^\blacktriangle = (S\setminus Q)^\blacktriangle \cup T^\blacktriangle\cup P^\blacktriangle$
yields $y \notin (S\setminus Q)^\blacktriangle$ and $y \notin T^\blacktriangle$,
we must have $y\in Q^\blacktriangle \setminus T^\blacktriangle$.
As $Q\subseteq T^\blacktriangle$, we get $y \in Q^\blacktriangle\setminus Q$.
Therefore, there are $v\in V\subseteq T$ and $q_{v}\in Q$
such that $q_{v}\in R(v)\subseteq S\cup T$, $R(v)\nsubseteq T$,
and $y\in R(q_{v})$. Note that since $y \notin Q$, we have $y\neq q_{v}$.
Because $q_{v}\notin T$, we obtain $v\neq q_{v}$ also. So, there exist
$v,q_v,y$ such that $v \neq q_v$, $q_v \neq y$, $v \, R \, q_v$,
and $q_v \, R \, y$.
Because $v\in T=T^{\blacktriangledown\blacktriangle}$, there is $a \in T^\blacktriangledown$ such
that $a \, R \, v$. The fact that $R(v) \nsubseteq T$ gives $v \notin T^\blacktriangledown$ and
hence we must have $a\neq v$.
Observe also that $R(y)\subseteq Q^\blacktriangle$ is not possible. This is because
$y \notin Z^\blacktriangle = (S \setminus Q)^\blacktriangle \cup T^\blacktriangle \cup P^\blacktriangle$,
that is, $y \notin (S \setminus Q)^\blacktriangle$, $y \notin T^\blacktriangle$,
and $y \notin P^\blacktriangle$, combined with $y \in S^\blacktriangle \cup T^\blacktriangle = Y$,
yield $y \in Y \setminus ((S\setminus Q)^\blacktriangle \cup T^\blacktriangle)$.
Hence, by \eqref{Eq:defP}, $R(y)\subseteq Q^\blacktriangle$ would imply
$y\in P\subseteq P^{\blacktriangle}$, a contradiction. Therefore,
$R(y) \nsubseteq Q^\blacktriangle = \bigcup_{q\in Q} R(q)$, and so there is an element $u\in R(y)$ such that
\begin{equation} \label{Eq:ForAllQ}
(\forall q \in Q) \, u \notin R(q).
\end{equation}
Then $y \, R \,u$, and clearly $u\neq y$, because $y \, R \, q_v$ holds.
We need to prove there are no $R$-related elements in the $R$-path
$(a,v,q_{v},y,u)$ except two consecutive ones. If this is true, then all the elements
of the path are distinct, because $a \neq v$, $v \neq q_{v}$, $q_{v},\neq y$,
$y\neq u$ and $R$ is reflexive. Since this is a contradiction to our assumption that $R$ satisfies (C),
there is no $y\in(S^\blacktriangle \cup T^\blacktriangle) \setminus Z^\blacktriangle$, and we may conclude that $Z^\blacktriangle = S^\blacktriangle \cup T^\blacktriangle=Y$,
which finishes the proof.
Indeed, $a \, R \, q_{v}$ implies $q_{v} \in R(a)\subseteq T^{\blacktriangledown \blacktriangle} = T$, contradicting
$q_{v}\notin T$. Similarly, $a \, R \, y$ and $v \, R \, y$
are not possible, because $a,v \in T$ and $y \notin T^\blacktriangle$. By \eqref{Eq:ForAllQ},
$q_{v} \, R \, u$ cannot hold. Furthermore, $a \, R \,u$ implies
$u \in R(a) \subseteq T$ and $y \in R(u) \subseteq T^\blacktriangle$, a contradiction.
Finally, since $R(v) \subseteq S\cup T$, $v \, R \, u$ implies
$u \in R(v)\subseteq S\cup T$. Moreover, we get $u \in(S\setminus Q)\cup T$,
because \eqref{Eq:ForAllQ} implies $u \notin Q$. So, this yields
$y \in R(u) \subseteq(S \setminus Q)^{\blacktriangle}\cup T^{\blacktriangle} \subseteq Z^{\blacktriangle}$, a
contradiction again. Hence, neither $v \, R \, u$ is possible.
\end{proof}
\begin{lemma}\label{Lem:ConditionC}
Any tolerance $R$ on $U$ satisfies {\rm (C)} if and only if for any $X\subseteq U$,
${R_X}^{3}$ is an equivalence on $X$.
\end{lemma}
\begin{proof}
The relation ${R_X}^{3}$ is a tolerance on any $X \subseteq U$ and ${R_X}^{3} \subseteq {R_X}^{4}$.
If $(x,y) \in {R_X}^{4}$, then there is
an $R$-path $(a_{0},...,a_{4})$ of length $4$ with $a_{0} = x$ and $a_{4} = y$.
Condition (C) implies that there are $0 \leq i,j \leq 4$ such that $|i-j| \geq 2$
and $a_i \, R \, a_j$. Hence, $(x,y) \in {R_X}^{3}$ and ${R_X}^{4} = {R_X}^{3}$.
Additionally, we can see by induction that (C) implies ${R_X}^{n} = {R_X}^{3}$ for all $n \geq 3$. Then,
${R_X}^{3}\circ {R_X}^{3} = {R_X}^{6} = {R_X}^{3}$ and hence the tolerance
${R_X}^{3}$ is transitive, that is, ${R_X}^{3}$ is an equivalence. Conversely, let $(a_{0},...,a_{4})$ be an
$R$-path of length $4$, $X = \{a_{0},...,a_{4}\}$, and suppose that ${R_X}^{3}$
is an equivalence. Then $(a_{0},a_{4}) \in {R_X}^{4} \subseteq {R_X}^{6}=
{R_X}^{3} \circ {R_X}^{3} \subseteq {R_X}^{3}$ implies
$(a_{0},a_{4}) \in {R_X}^{3}$. Observe that this is possible only if condition (C) holds.
\end{proof}
\begin{corollary} \label{Cor:Condition}
Let $R$ be a tolerance on $U$. Then, the following are equivalent:
\begin{enumerate}[\rm (a)]
\item $\mathit{RS}_{X}$ is a complete lattice for all $X\subseteq U$.
\item $\mathit{RS}_{X}$ is a lattice for all $X\subseteq U$ with $|X| = 5$.
\item For any $X \subseteq U$, ${R_X}^{3}$ is an equivalence on $X$.
\end{enumerate}
\end{corollary}
\begin{proof}
The implication (a)$\Rightarrow$(b) is trivial. If (b) holds, then $R$ satisfies condition (C) according to
Lemma~\ref{Lem:Sufficient}. Hence, by Lemma~\ref{Lem:ConditionC},
every ${R_X}^{3}$ is an equivalence, and we have (b)$\Rightarrow$(c). Again,
by Lemma~\ref{Lem:ConditionC}, (c) implies that $R_{X}$ satisfies (C) for all $X\subseteq U$.
Hence, by applying Theorem~\ref{Thm:Main} for each $X\subseteq U$ and $R_X$, we obtain (a), and
so (c)$\Rightarrow$(a).
\end{proof}
\begin{example}
Let $\mathcal{S} = (U,A,\{V_a\}_{a \in A})$ be an information in which each attribute is two-valued,
that is, $V_a = \{0,1\}$ for all $a \in A$. For any $B \subseteq A$, the \emph{weak $B$-indiscernibility} is
defined so that for all $x,y \in U$,
\[
(x,y) \in \mathit{wind}_B \iff (\exists a \in B)\, a(x) = a(y).
\]
Let $B \subseteq A$ and assume that there is a $\mathit{wind}_B$-path $(x_1,x_2,x_3,x_4,x_5)$ in $U$.
This means that for each $1 \leq i \leq 4$, there is an attribute $a \in B$ such that $a(x_i) = a(x_{i+1})$.
Assume that condition (C) does not hold. Then, in particular, $(x_1,x_3) \notin \mathit{wind}_B$, $(x_3,x_5) \notin \mathit{wind}_B$,
and $(x_1,x_5) \notin \mathit{wind}_B$.
This means that for all $a \in B$, $a(x_1) \neq a(x_3)$ and $a(x_3) \neq a(x_5)$. But since the attribute sets are two-valued,
this must imply that $a(x_1) = a(x_5)$ for all $a \in B$. Thus, $(x_1,x_5) \in \mathit{wind}_B$, a contradiction.
The following information system shows that (C) does not necessarily hold in cases when attribute sets have at least three values.
\begin{table}[h]
\centering
\begin{center}
\begin{tabular}{c|cc}
$U$ & $a$ & $b$ \\ \hline
$1$ & $0$ & $0$\\
$2$ & $0$ & $1$\\
$3$ & $1$ & $1$\\
$4$ & $1$ & $2$\\
$5$ & $2$ & $2$
\end{tabular}
\end{center}
\end{table}
\end{example}
\section{Disjoint representation of rough sets} \label{Sec:DisjointRepresentations}
Disjoint representations of rough sets were introduced by P.~Pagliani in \cite{Pagliani97}.
Each rough set $(X^\blacktriangledown,X^\blacktriangle)$ may as well be represented as a
pair $(X^\blacktriangledown,X^{\blacktriangle c})$, called the \emph{disjoint rough set} of $X$.
Clearly, $(X^\blacktriangledown,X^{\blacktriangle c}) \in \wp(U)^\blacktriangledown \times \wp(U)^\blacktriangledown$
and now $X^{\blacktriangle c}$ can be interpreted as the set of elements that certainly are outside $X$,
while $X^\blacktriangledown$ consists of elements certainly belonging to $X$. Let us denote
\[ \mathit{dRS} = \{ (X^\blacktriangledown, X^{\blacktriangle c}) \mid X \subseteq U \}, \]
and define an order-isomorphism $\phi$ between $\wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle$ and
$\wp(U)^\blacktriangledown \times \wp(U)^{\blacktriangledown \mathrm{op}}$ by $(A,B) \mapsto (A,B^c)$. Obviously, $\phi$
is also an order-isomorphism between $\mathit{RS}$ and $\mathit{dRS}$, when
$\mathit{dRS}$ is ordered by the order of $\wp(U)^\blacktriangledown \times \wp(U)^{\blacktriangledown \mathrm{op}}$.
We define a De~Morgan operation $\mathfrak{c}$ on $\wp(U)^\blacktriangledown \times \wp(U)^{\blacktriangledown \mathrm{op}}$ by
\begin{equation}\label{Eq:Swap}
\mathfrak{c} \colon (A,B) \to (B,A).
\end{equation}
Clearly, for all $(A,B) \in \wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle$,
\[ \phi({\sim}(A,B)) = \phi(B^c,A^c) = (B^c,A) = \mathfrak{c}(A,B^c) = \mathfrak{c}(\phi(A,B)),\]
where ${\sim}$ is the De~Morgan operation on $\wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle$ defined in \eqref{Eq:Negation}.
Additionally, if $(X^\blacktriangledown,X^{\blacktriangle c}) \in \mathit{dRS}$, then $\mathfrak{c}(X^\blacktriangledown,X^{\blacktriangle c}) =
(X^{c \blacktriangledown},X^{c \blacktriangle c}) \in \mathit{dRS}$.
In \cite{Pagliani97} Pagliani showed that in the case of equivalences, disjoint rough sets are closely
connected to the construction of Nelson algebras by Sendlewski \cite{Sendlewski90}.
Pagliani's results are generalized for quasiorders in \cite{JPR12}, where it is proved that for any
quasiorder $R$ on $U$,
\[
\mathit{dRS} = \{ (A,B) \in \wp(U)^\blacktriangledown \times \wp(U)^\blacktriangledown \mid A \cap B = \emptyset \text{ and }
\mathcal{S} \subseteq A \cup B \},
\]
where $\mathcal{S}$ is the set of singleton $R(x)$-sets defined as in \eqref{EQ:DefineS}.
By applying this equality it is possible to show that on $\mathit{dRS}$, and thus on $\mathit{RS}$,
a Nelson algebra can be defined by applying Sendlewski's construction. However, in the case of tolerances
the situation is quite different, because $\mathit{RS}$ and $\mathit{dRS}$ do not always form lattices, and
even they do, the lattices are not necessarily distributive. However, in case
the tolerance $R$ induced by an irredundant covering of $U$, these lattices are distributive, and a
quasi-Nelson algebra can be defined on $\mathit{RS}$ and $\mathit{dRS}$, as shown in
Proposition~\ref{Prop:QuasiNelson}. Anyway, these quasi-Nelson algebras are not necessarily
Nelson algebras.
In Section~\ref{Sec:OrderedSets}, we defined the increasing representation of
rough sets, that is,
\[
\mathcal{I}(\mathit{RS}) = \{ (A,B) \in \wp(U)^{\blacktriangledown} \times \wp(U)^{\blacktriangle}
\mid A^{\blacktriangle}\subseteq B^\blacktriangledown \text{ and } \mathcal{S} \subseteq A\cup B^{c}\},
\]
and showed that this is the Dedekind--MacNeille completion of $\mathit{RS}$.
If we map the set $\mathcal{I}(\mathit{RS})$ by the isomorphism $\phi$, we obtain the set
\[
\mathcal{D}(\mathit{RS}) = \{ (A,B) \in \wp(U)^{\blacktriangledown} \times \wp(U)^{\blacktriangledown}
\mid A^{\blacktriangle} \cap B^\blacktriangle = \emptyset \text{ and } \mathcal{S} \subseteq A\cup B\}.
\]
The set $\mathcal{D}(\mathit{RS})$ is called the \emph{disjoint representation of rough sets}. Obviously,
the map $\mathfrak{c}$ defined in \eqref{Eq:Swap} is a De~Morgan operation
on $\mathcal{D}(\mathit{RS})$, and if $\mathit{RS}$ is a complete lattice,
then $\mathit{RS}$ and $\mathcal{D}(\mathit{RS})$ can be identified by the
map $\phi$.
We end this work by studying the connection between $\mathcal{D}(\mathit{RS})$
and the concept lattice $\mathfrak{B}(\mathbb{K})$ defined by the context
$\mathbb{K} = (U,U,R^c)$. In \cite{Kwuida04,Wille2000}, it is considered for a
concept $(A,B)$ of an arbitrary context its \emph{weak negation} by
\[
(A, B)^\bigtriangleup = (A^{c \prime \prime}, A^{c \prime})
\]
and its \textit{weak opposition} by
\[
(A,B)^\bigtriangledown = (B^{c \prime}, B^{c \prime\prime}).
\]
Especially, we are here considering the weak opposition operation $^\bigtriangledown$,
which satisfies for all concepts $(A,B)$ and $(C,D)$:
\[
(A,B) \leq (C,D)^{\bigtriangledown} \iff (C,D) \leq (A,B)^{\bigtriangledown}.
\]
We already noted in Section~\ref{Sec:ToleranceApproximations} that the concept lattice
of the context $\mathbb{K} = (U,U,R^c)$ is
$\mathfrak{B}(\mathbb{K}) = \{ (A,A^\top) \mid A \in \wp(U)^\blacktriangledown \}$,
where $^\top$ is the orthocomplement operation of $\wp(U)^\blacktriangledown$.
Recall that $A^\top = A^{\prime} = A^{\blacktriangle c} = A^{c \blacktriangledown}$.
For $(A,B) \in \mathfrak{B}(\mathbb{K})$, the weak negation and the weak opposition are then
defined by
\[
(A, B)^\bigtriangleup = (A^{\blacktriangledown \top}, A^{\blacktriangledown})
\text{ \ and \ }
(A, B)^\bigtriangledown = (B^{\blacktriangledown}, B^{\blacktriangledown \top}).
\]
We now consider the complete lattice
${\mathfrak{B}}(\mathbb{K}) \times {\mathfrak{B}}(\mathbb{K})^\mathrm{op}$,
where ${\mathfrak{B}}(\mathbb{K})^\mathrm{op}$ is the dual of the concept lattice
${\mathfrak{B}}(\mathbb{K})$, that is, ${\mathfrak{B}}(\mathbb{K})^\mathrm{op}$ is ordered by
\[
(A_1,B_1) \leq (A_2,B_2) \iff A_1 \supseteq A_2 \iff B_1 \subseteq B_2.
\]
Let $\mathrm{ext}(\alpha)$ denote the extent $A$ of a concept $\alpha =(A,B)$. We
define the set
\[
\mathcal{FC}(\mathit{RS}) = \{ (\alpha,\beta) \in \mathfrak{B}(\mathbb{K}) \times \mathfrak{B}(\mathbb{K})
\mid \beta \leq \alpha^{\bigtriangledown} \text{ in $\mathfrak{B}(\mathbb{K})$ and } \mathcal{S} \subseteq
\mathrm{ext}(\alpha) \cup \mathrm{ext}(\beta) \},
\]
and we call it the \emph{formal concept representation of rough sets}.
We order the set $\mathcal{FC}(\mathit{RS})$ by the order of
${\mathfrak{B}}(\mathbb{K}) \times {\mathfrak{B}}(\mathbb{K})^\mathrm{op}$.
\begin{proposition}
Let $R$ be a tolerance on $U$ and \ $\mathbb{K} = (U,U,R^c)$.
\begin{enumerate}[\rm (a)]
\item $\mathcal{FC}(\mathit{RS})$ is a complete sublattice of
$\mathfrak{B}(\mathbb{K}) \times \mathfrak{B}(\mathbb{K})^\mathrm{op}$.
\item The complete lattices $\mathcal{FC}(\mathit{RS})$ and
$\mathcal{D}(\mathit{RS})$ are isomorphic.
\end{enumerate}
\end{proposition}
\begin{proof} First, let us define a map $\varphi \colon \wp(U)^\blacktriangledown \times \wp(U)^\blacktriangledown
\to \mathfrak{B}(\mathbb{K}) \times \mathfrak{B}(\mathbb{K})$
by setting
\[(A,B) \mapsto ((A,A^\top),(B,B^\top)).
\]
Trivially, the map $\varphi$ is well defined. Next we show that $\varphi$
is an order-isomorphism between the complete lattices $\wp(U)^\blacktriangledown \times \wp(U)^{\blacktriangledown\mathrm{op}}$
and $\mathfrak{B}(\mathbb{K}) \times \mathfrak{B}(\mathbb{K})^\mathrm{op}$.
If $(A,B),(C,D)\in \wp(U)^\blacktriangledown \times \wp(U)^\blacktriangledown$, then
\begin{align*}
& (A,B) \leq (C,D) \text{ in $\wp(U)^\blacktriangledown \times \wp(U)^{\blacktriangledown\mathrm{op}}$} &\iff \\
& A \subseteq C \text{ and } B \supseteq D & \iff \\
& (A,A^\top) \leq (C,C^\top) \text{ and } (B,B^\top) \geq (D,D^\top) \text{ in $\mathfrak{B}(\mathbb{K})$} &\iff \\
& \varphi(A,B)\leq \varphi(C,D) \text{ in $\mathfrak{B}(\mathbb{K}) \times \mathfrak{B}(\mathbb{K})^\mathrm{op}$}
\end{align*}
Thus, $\varphi$ is an order-embedding. If $((A,A^\top), (B,B^\top)) \in
\mathfrak{B}(\mathbb{K}) \times \mathfrak{B}(\mathbb{K})$, then
$A,B \in \wp(U)^\blacktriangledown$ and $\varphi(A,B) = ((A,A^\top), (B,B^\top))$. Therefore, the
map $\varphi$ is also onto, and it is an order-isomorphism.
Next, we prove that $\mathcal{FC}(\mathit{RS})$ is the image
of $\mathcal{D}(\mathit{RS})$ under $\varphi$. Note first that for all
$A,B \in \wp(U)^\blacktriangledown$,
\begin{align*}
A^{\blacktriangle} \cap B^\blacktriangle = \emptyset \iff A^{\blacktriangle \blacktriangle} \cap B = \emptyset
\iff B \subseteq A^{\blacktriangle \blacktriangle c} = A^{c \blacktriangledown \blacktriangledown} = A^{\top \blacktriangledown}.
\end{align*}
Since $(A,A^\top)^\bigtriangledown = (A^{\top \blacktriangledown}, A^{\top \blacktriangledown \top})$,
we have that
\[ A^{\blacktriangle} \cap B^\blacktriangle = \emptyset \iff (B,B^\top) \leq (A,A^\top)^\bigtriangledown \text{ in $\mathfrak{B}(\mathbb{K})$}.\]
Additionally, $\mathcal{S} \subseteq A \cup B = \mathrm{ext}(A,A^\top) \cup \mathrm{ext}(B,B^\top)$.
These facts imply
\[
(A,B) \in\mathcal{D}(\mathit{RS}) \iff \varphi(A,B) \in\mathcal{FC}(\mathit{RS}).
\]
Since $\varphi$ is a bijection, we get
\[
\mathcal{FC}(\mathit{RS}) = (\varphi \circ \varphi^{-1})(\mathcal{FC}(\mathit{RS})) = \varphi(\mathcal{D}(\mathit{RS})).
\]
Hence, $\varphi$ determines an order-isomorphism between the complete lattices $\mathcal{D}(\mathit{RS})$
and $\mathcal{FC}(\mathit{RS})$, which proves (b).
Because $\mathcal{I}(\mathit{RS})$ is a complete sublattice of $\wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle$
by Proposition~\ref{Prop:Completion}(b), its image
$\mathcal{D}(\mathit{RS})$ under the isomorphism $\phi \colon (A,B) \mapsto (A,B^c)$
is a complete sublattice of $\wp(U)^\blacktriangledown \times \wp(U)^{\blacktriangledown \mathrm{op}}$.
This implies that the image $\mathcal{FC}(\mathit{RS})$ of $\mathcal{D}(\mathit{RS})$ under $\varphi$
is a complete sublattice of
${\mathfrak{B}}(\mathbb{K})\times {\mathfrak{B}}(\mathbb{K})^\mathrm{op}$, and claim (a)
is proved.
\end{proof}
Since for all $(\alpha,\beta) \in \mathcal{FC}(\mathit{RS})$,
$\beta \leq \alpha^{\bigtriangledown} \iff \alpha \leq \beta^{\bigtriangledown}$,
it is easy to see that $\mathfrak{c}^* \colon (\alpha,\beta) \mapsto (\beta,\alpha)$
is a De~Morgan operation on $\mathcal{FC}(\mathit{RS})$. Up to isomorphism,
this operation is the same as in $\mathcal{I}(\mathit{RS})$ and
$\mathcal{D}(\mathit{RS})$. Namely, if $(A,B) \in \mathcal{D}(\mathit{RS})$, then
\[
\varphi( \mathfrak{c}(A,B)) = \varphi(B,A) = ((B,B^\top),(A,A^\top)) =
\mathfrak{c}^*((A,A^\top),(B,B^\top)) = \mathfrak{c}^*(\varphi(A,B) ). \]
\noindent
We conclude this section by giving the following summary of rough representations:
\begin{itemize}
\item For any tolerance $R$ on $U$, the representations $\mathcal{I}(\mathit{RS})$,
$\mathcal{D}(\mathit{RS})$, and $\mathcal{FC}(\mathit{RS})$ are Dedekind--MacNeille
completions of $\mathit{RS}$ equipped with De~Morgan operations satisfying (K3)
that are identical up to isomorphism.
\item The ordered sets $\mathit{RS}$ and $\mathit{dRS}$ are isomorphic, and they are
complete lattices if and only if $\mathit{RS}$ is a complete sublattice of
$\wp(U)^\blacktriangledown \times \wp(U)^\blacktriangle$, or, equivalently, $\mathit{dRS}$ is a complete sublattice of
$\wp(U)^\blacktriangledown \times \wp(U)^{\blacktriangledown \textrm{op}}$.
\item If $\mathit{RS}$ and $\mathit{dRS}$ are complete lattices, then
they are identical to $\mathcal{I}(\mathit{RS})$ and $\mathcal{D}(\mathit{RS})$, respectively.
This implies also $\mathit{RS} \cong \mathcal{FC}(\mathit{RS})$.
\item If $R$ induced by an irredundant covering of $U$, then $\mathit{RS}$, $\mathit{dRS}$, $\mathcal{I}(\mathit{RS})$,
$\mathcal{D}(\mathit{RS})$, and $\mathcal{FC}(\mathit{RS})$ determine isomorphic quasi-Nelson algebras.
\end{itemize}
\section{Some concluding remarks}
In this work, we have considered rough set systems determined by so-called element-based approximation pairs induced by a tolerance relation.
For any tolerance, we were able to give the Dedekind--MacNeille completion of $\mathit{RS}$ in terms of formal concept analysis, and also in the
terms of increasing and, respectively, disjoint representations. Under some certain conditions, the rough set system $\mathit{RS}$ forms a complete
lattices. Particularly, if the tolerance is induced by an irredundant covering of the universe, its rough set lattice is algebraic and completely
distribute, and a Kleene algebra (in fact, even a quasi-Nelson algebra) may be defined on it.
We learned that weak similarity satisfies condition (C) in the case attributes are two-valued, but this is generally no longer true even
for three-valued attributes. Additionally, we observed that tolerances induced by irredundant
coverings arise in incomplete and approximate information systems in the presence of learning examples.
We also would like to emphasize that if we have a finite universe $U$ and we want that the rough set lattice $\mathit{RS}$ is distributive,
this means that necessarily $\mathit{RS}$
is determined by a tolerance induced by an irredundant covering. This is because if a finite lattice is distributive, it is
also completely distributive. Since quite often in studies of rough set theory it is assumed that the universe is finite, this means
that case of $\mathit{RS}$ forming a distributive lattice is completely characterized.
In the future, we will study under which conditions rough sets systems determined by tolerances define Nelson algebras or 3-valued {\L}ukasiewicz algebras,
because it is known that in case of quasiorders the rough set systems form Nelson algebras, and in the particular case of equivalences,
these systems define 3-valued {\L}ukasiewicz algebras. We propose a deeper analysis of the tolerance relations induced by irredundant coverings,
and their relations to information systems.
It might also be fruitful to study lattice-theoretical properties of completions considered in Section~\ref{Sec:DisjointRepresentations},
that is, how the properties of the ortholattices $\wp(U)^\blacktriangledown \cong \wp(U)^\blacktriangle$ effect to the completions. These constructions may
have some similarities to the one of Sendlewski \cite{Sendlewski90} or they could be based on generalizations of
Heyting algebras \cite{Chajda03}, for instance.
Finally, we note that it still remains an open question under which condition on the tolerance $R$, the rough set system forms a lattice.
\begin{bibdiv}
\begin{biblist}
\bib{AbuDonia2012}{article}{
author={Abu-Donia, H.M.},
author={Salama, A.S.},
title={Generalization of pawlak’s rough approximation spaces by using
$\delta\beta$-open sets},
date={2012},
journal={International Journal of Approximate Reasoning},
volume={53},
pages={1094\ndash 1105},
}
\bib{Birk95}{book}{
author={Birkhoff, Garrett},
title={Lattice theory},
edition={3},
series={Colloquim publications},
publisher={American Mathematical Society (AMS)},
address={Providence, Rhode Island},
date={1995},
volume={XXV},
}
\bib{Bonikowski1998}{article}{
author={Bonikowski, Zbigniew},
author={Bryniarski, Edward},
author={Wybraniec-Skardowska, Urszula},
title={Extensions and intentions in the rough set theory},
date={1998},
journal={Information Sciences},
volume={107},
pages={149\ndash 167},
}
\bib{Chajda03}{article}{
author={Chajda, I.},
author={Radeleczki, S.},
title={On varieties defined by pseudocomplemented nondistributive lattices},
date={2003},
journal={Publicationes Mathematicae Debrecen},
volume={63},
pages={737\ndash 750},
}
\bib{Cign86}{article}{
author={Cignoli, Roberto},
title={The class of {K}leene algebras satisfying an interpolation property and {N}elson algebras},
date={1986},
journal={Algebra Universalis},
volume={23},
pages={262\ndash 292},
}
\bib{Com93}{article}{
author={Comer, Stephen~D.},
title={On connections between information systems, rough sets, and algebraic logic},
book={
date={1993},
title={Algebraic methods in logic and computer science},
series={Banach Center Publications}
},
pages={117\ndash 124},
}
\bib{DaPr02}{book}{
author={Davey, Brian~A.},
author={Priestley, Hilary~A.},
title={Introduction to lattices and order},
edition={2},
publisher={Cambridge University Press},
date={2002},
}
\bib{DemOrl02}{book}{
author={Demri, St{\'e}phane~P.},
author={Or{\l}owska, Ewa~S.},
title={Incomplete information: Structure, inference, complexity},
publisher={Springer},
address={Berlin/Heidelberg},
date={2002},
}
\bib{Dzik2013}{article}{
author={Dzik, Wojciech},
author={J{\"a}rvinen, Jouni},
author={Kondo, Michiro},
title={Representing expansions of bounded distributive lattices with {G}alois connections in terms of rough sets},
date={2013},
journal={International Journal of Approximate Reasoning},
doi={http://dx.doi.org/10.1016/j.ijar.2013.07.005},
}
\bib{ganter1999formal}{book}{
author={Ganter, Bernhard},
author={Wille, Rudolf},
title={Formal concept analysis: Mathematical foundations},
publisher={Springer},
address={Berlin/Heidelberg},
date={1999},
}
\bib{Grat98}{book}{
author={Gr{\"a}tzer, George},
title={General lattice theory},
edition={2},
publisher={Birkh{\"a}user},
address={Basel},
date={1998},
}
\bib{Jarv99}{thesis}{
author={J{\"a}rvinen, Jouni},
title={Knowledge representation and rough sets},
type={Ph.D. Thesis},
date={1999},
institution={University of Turku, Department of mathematics. TUCS Dissertations 14},
}
\bib{Jarv01}{article}{
author={J{\"a}rvinen, Jouni},
title={Approximations and rough sets based on tolerances},
date={2001},
journal={Lecture Notes in Computer Science},
volume={2005},
pages={182\ndash 189},
}
\bib{Jarv04}{article}{
author={J{\"a}rvinen, Jouni},
title={The ordered set of rough sets},
date={2004},
journal={Lecture Notes in Computer Science},
volume={3066},
pages={49\ndash 58},
}
\bib{Jarv07}{article}{
author={J{\"a}rvinen, Jouni},
title={Lattice theory for rough sets},
date={2007},
journal={Transactions on Rough Sets},
volume={VI},
pages={400\ndash 498},
}
\bib{JPR12}{article}{
author={J{\"a}rvinen, Jouni},
author={Pagliani, Piero},
author={Radeleczki, S\'{a}ndor},
title={Information completeness in {N}elson algebras of rough sets induced by quasiorders},
date={2013},
journal={Studia Logica},
volume={101},
pages={1073\ndash 1092},
}
\bib{JarRad}{article}{
author={J\"{a}rvinen, Jouni},
author={Radeleczki, S{\'a}ndor},
title={Representation of {N}elson algebras by rough sets determined by quasiorders},
date={2011},
journal={Algebra Universalis},
volume={66},
pages={163\ndash 179},
}
\bib{JRV09}{article}{
author={J{\"a}rvinen, Jouni},
author={Radeleczki, S{\'a}ndor},
author={Veres, Laura},
title={Rough sets determined by quasiorders},
date={2009},
journal={Order},
volume={26},
pages={337\ndash 355},
}
\bib{Kryszkiewicz1998}{article}{
author={Kryszkiewicz, Marzena},
title={Rough set approach to incomplete information systems},
date={1998},
journal={Information Sciences},
volume={112},
pages={39\ndash 49},
}
\bib{Kwuida04}{thesis}{
author={Kwuida, L\'{e}onard},
title={Dicomplemented lattices. {A} contextual generalization of {B}oolean algebras},
type={Ph.D. Thesis},
institution={Technical University of Dresden},
date={2004},
}
\bib{Ma2012}{article}{
author={Ma, Liwen},
title={On some types of neighborhood-related covering rough sets},
date={2012},
journal={International Journal of Approximate Reasoning},
volume={53},
pages={901\ndash 911},
}
\bib{Mani08}{article}{
author={Mani, A.},
title={Esoteric rough set theory: Algebraic semantics of a generalized {VPRS} and {VPFRS}},
date={2008},
journal={Transactions on Rough Sets},
volume={VIII},
pages={175\ndash 223},
}
\bib{Pagliani97}{article}{
author={Pagliani, Piero},
title={Rough set systems and logico-algebraic structures},
book={
editor={Or{\l}owska, E.},
title={Incomplete information: Rough set analysis},
publisher={Physica-Verlag},
place={Heidelberg},
date={1997}
},
pages={109\ndash 190},
}
\bib{pawlak1981information}{article}{
author={Pawlak, Zdzis{\l}aw},
title={Information systems theoretical foundations},
date={1981},
journal={Information systems},
volume={6},
pages={205\ndash 218},
}
\bib{Pawl82}{article}{
author={Pawlak, Zdzis{\l}aw},
title={Rough sets},
date={1982},
journal={International Journal of Computer and Information Sciences},
volume={11},
pages={341\ndash 356},
}
\bib{Pomykala88}{article}{
author={Pomyka{\l}a, J.~A.},
title={On definability in the nondeterministic information system},
date={1988},
journal={Bulletin of the Polish Academy of Science. Mathematics},
volume={36},
pages={193\ndash 210},
}
\bib{PomPom88}{article}{
author={Pomyka{\l}a, Jacek},
author={Pomyka{\l}a, Janusz~A.},
title={The {S}tone algebra of rough sets},
date={1988},
journal={Bulletin of Polish Academy of Sciences. Mathematics},
volume={36},
pages={495\ndash 512},
}
\bib{Restrepo2013}{article}{
author={Restrepo, Mauricio},
author={Cornelis, Chris},
author={G{\'o}mez, Jonatan},
title={Duality, conjugacy and adjointness of approximation operators in covering-based rough sets},
date={2013},
journal={International Journal of Approximate Reasoning},
doi={http://dx.doi.org/10.1016/j.ijar.2013.08.002},
}
\bib{Sendlewski90}{article}{
author={Sendlewski, Andrzej},
title={Nelson algebras through {H}eyting ones~{I}},
date={1990},
journal={Studia Logica},
volume={49},
pages={105\ndash 126},
}
\bib{Shreider}{book}{
author={Shreider, Yu.~A.},
title={Ravenstvo, skhodstvo, poryadok (Equality, Similarity, Order)},
publisher={Nauka},
address={Moskow},
date={1971},
}
\bib{UmaThesis}{thesis}{
author={Umadevi, D.},
title={A study on the ordered structure of rough sets},
type={Ph.D. Thesis},
institution={Madurai Kamaraj University, Tamil Nadu, India},
date={2012},
}
\bib{Wille2000}{article}{
author={Wille, Rudolf},
title={Boolean concept logic},
date={2000},
journal={Lecture Notes in Computer Science},
volume={1867},
pages={317\ndash 331},
}
\bib{YaoLin1996}{article}{
author={Yao, Y.~Y.},
author={Lin, T.~Y.},
title={Generalization of rough sets using modal logics},
date={1996},
journal={Intelligent Automation and Soft Computing},
volume={2},
pages={103\ndash 120},
}
\bib{Yao2012}{article}{
author={Yao, Yiyu},
author={Yao, Bingxue},
title={Covering based rough set approximations},
date={2012},
journal={Information Sciences},
volume={200},
pages={91\ndash 107},
}
\bib{yao2004concept}{article}{
author={Yao, Y.~Y.},
title={Concept lattices in rough set theory},
date={2004},
booktitle={Proceedings of the 23rd international meeting of the {North American Fuzzy Information Processing Society (NAFIPS 2004)}},
pages={796\ndash 801},
}
\bib{Zeeman62}{article}{
author={Zeeman, E.~C.},
title={The topology of the brain and visual perception},
book={
editor={M.~K.~Fort, Jr.},
title={Topology of 3-Manifolds},
publisher={Prentice-Hall},
address={Englewood Cliffs, NJ},
date={1962}
},
}
\end{biblist}
\end{bibdiv}
\end{document}
|
\begin{document}
\begin{abstract}
We study the spectrum of large a bi-diagonal Toeplitz matrix subject to
a Gaussian random perturbation with a small coupling constant.
We obtain a precise asymptotic description of the average
density of eigenvalues in the interior of the convex hull of the range
symbol.
\vskip.5cm
\par\noindent \textsc{R{\'e}sum{\'e}.}
Nous \'etudions le spectre d'une grande matrice de Toeplitz
soumise \`a une perturbation gaussienne avec petite constante
de couplage. Nous obtenons une description asymptotique pr\'ecise
de la densit\'e moyenne des valeurs propres \`a l'int\'erieur l'enveloppe
convexe de l'image du symbole.
\mathrm{e}nd{abstract}
\maketitle
\setcounter{tocdepth}{1}
\section{Introduction and main result}\label{int}
\setcounter{equation}{0}
It is well known that the spectrum of non-normal operators can be extremely
unstable even under tiny perturbations, see e.g. \cite{TrEm05,Da07}. It is
therefore a natural question to study the spectra of such operators subject
to small random perturbations. Recently, there has been a mounting interest
in the spectral properties of elliptic non-normal (pseudo-)differential operators
with small random perturbations, see for example \cite{BM,Ha06b,HaSj08,SjAX1002,Vo14,ZwChrist10}.
An interesting, perhaps surprising, result is that by adding a small random
perturbation, we can obtain a probabilistic Weyl law for the eigenvalues
for a large class of such operators.
\par
Another important example is the case of non-normal Toeplitz matrices,
since they can arise for example in models non-hermitian quantum
mechanics, see e.g. \cite{GoKh00,HaNe96}. The authors' interest in this
case, however, is motivated by the aspect of spectral instability.
\\
\par
The goal of this work is to study the spectrum of random perturbations of the
following bidiagonal $N\times N$ Toeplitz matrix:
\begin{equation}\label{int.1}
P=\begin{pmatrix} 0 &a &0 &.. &.. &0\\
b &0 &a &.. &..&0\\
0 &b &0 &.. &..&0\\
.. &.. &.. &..&..&..\\
0 & ..&.. &..&0 &a\\
0 &0 &.. &.. &b &0 \mathrm{e}nd{pmatrix}.
\mathrm{e}nd{equation}
Here $a,\, b\in {\bf C}\setminus \{ 0 \}$ and $N\gg 1$. Identifying ${\bf C}^N$
with $\mathrm{e}ll^2([1,N])$, $[1,N]=\{ 1,2,..,N\}$ and also with $\mathrm{e}ll^2_{[1,N]}({\bf
Z})$ (the space of all $u\in \mathrm{e}ll^2({\bf Z})$ with support in
$[1,N]$), we have:
\begin{equation}\label{int.3}
P=1_{[1,N]}(a\tau _{-1}+b\tau _1)1_{[1,N]}=1_{[1,N]}(a\mathrm{e}^{iD_x}+b\mathrm{e}^{-iD_x})1_{[1,N]},
\mathrm{e}nd{equation}
where $\tau _ku(j)=u(j-k)$ denotes translation by $k$, and
\begin{equation*}
(a\mathrm{e}^{iD_x}+b\mathrm{e}^{-iD_x})u(n)= \frac{1}{2\pi}
\int_{\mathbf{R}/2\pi\mathbf{Z}}\mathrm{e}^{in\xi}p(\xi)\widehat{u}(\xi)d\xi,
\quad u \in \mathrm{e}ll^2(\mathbf{Z}),
\mathrm{e}nd{equation*}
where $\widehat{u}$ denotes the Fourier transformation of $u$ and
$p(\xi)$ is the symbol of $P$, given by
\begin{equation}\label{int.5}
p(\xi )=a\mathrm{e}^{i\xi }+b\mathrm{e}^{-i\xi }.
\mathrm{e}nd{equation}
Assume, to fix the ideas, that $|b|\le |a|$. Then
$p({\bf R})$ is equal to the ellipse, $E_1$, centred at 0 with major
semi-axis of length $(|a|+|b|)$ pointing in the direction $e^{i(\alpha
+\beta )/2}$, where $\alpha = \mathrm{arg}(a)$, $\beta=\mathrm{arg}(b)$,
and minor semi-axis of length $|a|-|b|$. The focal points
of $E_1$ are
\begin{equation}\label{rasy.2.5}
\pm 2\sqrt{ab}=\pm e^{i\frac{\alpha +\beta }{2}} 2\sqrt{|a||b|}.
\mathrm{e}nd{equation}
In a previous work \cite{SjVo15b} the authors have shown that
the numerical range of $P$ is contained in the
convex hull of the ellipse $E_1$ described above and the
eigenvalues of $P$ are given by
\begin{equation}\label{spnp.12}
z=z(\nu )=2\sqrt{ab}\cos
\left( \frac{\pi \nu }{N+1} \right),
\quad \nu =1,\dots, N.
\mathrm{e}nd{equation}
This result is also illustrated in Figure \ref{fig1}. In this work, we consider
the following random perturbation of $P$
\begin{equation}\label{int.5a}
P_{\delta} := P + \delta Q_{\omega},
\quad
Q_{\omega}=(q_{j,k}(\omega))_{1\leq j,k\leq N},
\mathrm{e}nd{equation}
where $0\leq\delta\ll 1 $, possibly depending on $N$,
and $q_{j,k}(\omega)$ are independent and
identically distributed complex Gaussian random variables,
following the complex Gaussian law $\mathcal{N}_{\mathbf{C}}(0,1)$.
\begin{figure}[h]
\centering
\includegraphics[scale=0.6]{ellipse.pdf}
\caption{The black dots along the focal segment show the
spectrum (obtained using MATLAB) of the unperturbed operator $P$ with dimension $N=501$,
$a=0.5$, $b=i$ and $\delta=10^{-12}$. The blue cirlces show the spectrum
of the perturbed operator \mathrm{e}qref{int.5a}, and the red ellipse is the image of
the symbol $p$.}
\label{fig1}
\mathrm{e}nd{figure}
In \cite{SjVo15b}, the authors proved that when the coupling constant $\delta$ is
bounded from above and from below by sufficiently negative powers of $N$,
then most eigenvalues of $P_{\delta}$, \mathrm{e}qref{int.5a},
are close to the ellipse $p(\mathbf{R})$ and follow a Weyl law, with probability
close to one, as the dimension $N$ gets large (cf. Figure \ref{fig1}).
\par
The methods used in \cite{SjVo15b} are essentially based on probabilistic
subharmonic estimates of $\ln |\det(P_{\delta}-z)|$ and complex analysis,
using in particular a counting theorem of \cite{Sj09b} (see also \cite{Ha06,HaSj08}).
However, this approach is not fine to enough give a detailed description of the
exceptional eigenvalues seen inside the ellipse in Figure \ref{fig1} and
we only obtain a logarithmic upper bound on the number of eigenvalues in this
region. To gain more information about these eigenvalues, we study the random measure
\begin{equation}
\Xi :=\sum_{z\in\sigma(P_{\delta})}\delta_z,
\mathrm{e}nd{equation}
where the eigenvalues are counted with multiplicity. In particular we are
interested in studying the first intensity measure of $\Xi$, which is the
positive measure $\nu$ defined by
\begin{equation}
\mathds{E} \left[ \Xi(\varphi)\right] = \int \varphi(z)\nu(dz),
\mathrm{e}nd{equation}
where $\varphi$ is a test function of class $\mathcal{C}_0$. The measure
$\nu$ contains information about the average density of eigenvalues,
and we will show in Theorem \ref{thm1} below, that it admits a continuous
density with respect to the Lebesgue measure on $\mathbf{C}$, up to a small error
in the large $N$ limit.
\par
This approach
is more classical in the theory of random polynomials (cf. \cite{SZ03,BSZ00})
and random Gaussian analytic functions (cf. \cite{HoKrPeVi09,So00}). We
follow in particular the approach developed in \cite{Vo14}, which was
therein used to describe the average density of eigenvalues of a class of semiclassical differential
operators subject to small random perturbations.
\\
\par
The main result of this paper describes the average density of eigenvalues
in the interior of confocal ellipses. Let $p_{a,b}=p$ as in \mathrm{e}qref{int.5}.
For any $r>0$ we define $\Sigma_r$ to be the convex hull of
$p_{ra,r^{-1}b}(\mathbf{R})$. We will see in Section \ref{sizz} that
$p_{ra,r^{-1}b}(\mathbf{R})$, for $ (|b|/|a|)^{1/2} \leq r < +\infty$, are confocal
ellipses and that they are in the interior of $\Sigma_{r_0}$, for every $r_0>r$.
Moreover that $p_{ra,r^{-1}b}(\mathbf{R})$, with $r= (|b|/|a|)^{1/2}$, is the
focal segment.
\par
We prove the following result.
\begin{theo}\label{thm1}
Let $P_{\delta}$ be as in \mathrm{e}qref{int.5a} and let $p_{a,b}=p$ as in \mathrm{e}qref{int.5}.
Let $C\gg 1$ be arbitrary, but fixed (and not necessarily the same in the sequel).
Let $r_1=|b/a|^{1/2} +1/C$, let $\mathrm{e}^{-N/C} \leq \delta \ll 1$, $N\gg 1$
and let $r_0>0$ belong to the parameter range
\begin{equation}\label{eq1.1}
\begin{split}
& \frac{1}{C} \leq r_0 \leq 1- \frac{1}{N}, \\
& \frac{Nr_0^{N-1}}{\delta}(1-r_0)^2 +\delta N^3 \ll 1,
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
so that $\delta N^3 \ll 1$. For $r>0$, let $\Sigma_r$ be the convex hull of
$p_{ra,r^{-1}b}(\mathbf{R})$. Then, for all
$\varphi\in\mathcal{C}_0(\mathring{\Sigma}_{(r_0-1/N)}\backslash{\Sigma}_{r_1} )$,
\begin{equation}\label{eq1}
\mathds{E} \left[ \sum_{\lambda\in\sigma(P_{\delta})}\varphi(z)
\right] =
\int \varphi(z)\xi(z) L(dz)
+ \langle \mu_N, \varphi \rangle,
\mathrm{e}nd{equation}
for some $C\gg 1$. Here, the density $\xi$ is a continuous function
satisfying,
\begin{equation}\label{eq2}
\begin{split}
&\xi(z) = \frac{2}{\pi}\partial_{z}\partial_{\bar{z}}\ln K(z)
\left(1 +\mathcal{O}\!\left(\frac{N |\zeta_-|^{N-1}}{\delta }(1-|\zeta_-|)^2
+ \delta N^3
\right)\right), \\
&K(z)= \sum_{k=0}^{\infty}
\left|\frac{\zeta_-^{k+1} - \zeta_+^{k+1}}{a(\zeta_- - \zeta_+)}\right|^2,
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
where $\zeta_{\pm}(z)$ are the two solutions of the equation $p_{a,b}(\zeta)=z$
for $z\in\Sigma_1 \backslash [-2\sqrt{ab},2\sqrt{ab}]$, chosen such that
$|\zeta_-| \geq |\zeta_+|$. $\partial_{z}\partial_{\bar{z}}\ln K(z) $
is smooth and strictly positive.
\par
Furthermore, $\mu_N$ is a Radon measure of total mass $\leq N\mathrm{e}^{-N^2}$,
i.e. $ |\langle \mu_N, \varphi \rangle | \leq N\mathrm{e}^{-N^2}\|\varphi\|_{\infty}$.
\mathrm{e}nd{theo}
Let us give some remarks on this result. We will show in Section \ref{sizz} that
for $p(\zeta_{\pm})=z\in\mathring{\Sigma}_1\backslash [-2\sqrt{ab},2\sqrt{ab}]$
we have that $|\zeta_+| < |b/a|^{1/2} < |\zeta_-| <1$. In fact we have that
$|\zeta_-| \leq r_0$ when $z\in\Sigma_{r_0}\backslash [-2\sqrt{ab},2\sqrt{ab}]$.
\\
\par
Secondly, for $r_0$ satisfying the first condition in
\mathrm{e}qref{eq1.1}, the function $[0,r_0]\ni r \mapsto r^{N-1}(1-r)^2$ is increasing. Hence,
the error term in \mathrm{e}qref{eq2} is small, since it is dominated by the term in the second
line of \mathrm{e}qref{eq1.1}. More precisely, it satisfies for $|\zeta_-|\leq r_0$
\begin{equation*}
\frac{N |\zeta_-|^{N-1}}{\delta }(1-|\zeta_-|)^2+ \delta N^3
\leq
\frac{Nr_0^{N-1}}{\delta}(1-r_0)^2 +\delta N^3.
\mathrm{e}nd{equation*}
Theorem \ref{thm1} shows that in the interior of the ellipse $p(\mathbf{R})$ (see
Figure \ref{fig1}) there is a non-vanishing continuous density of eigenvalues
whose leading term is independent of the dimension $N$ and depends only
the symbol $p$.
\par
Furthermore, we note that the leading term of the density $\xi$ is related to
the Edelman-Kostlan formula (see for example \cite{HoKrPeVi09})
for the average density of the zeros of a Gaussian analytic function $g(z)$,
in the sense of \cite{HoKrPeVi09}, with covariance kernel $K(z)$, i.e.
\begin{equation*}
\mathds{E}[g(z)\overline{g(z)}] = K(z).
\mathrm{e}nd{equation*}
The above theorem, together with the result of \cite{SjVo15b}, is a generalisation
of the work done in the case where the unperturbed operator $P$ is given by
a large Jordan block, i.e. the case where $a=1$, $b=0$. This has already
been subject to intense study :
M.~Hager and E.B.~Davies \cite{DaHa09} showed that with a sufficiently small
coupling constant
most eigenvalues of $P_{\delta}$ can be found near a circle, with probability close to $1$, as
the dimension of the matrix $N$ gets large. This result has been refined by one of the
authors in \cite{Sj15}, showing that, with probability close to $1$, most eigenvalues
follow an angular Weyl law. Furthermore, M.~Hager and E.B.~Davies \cite{DaHa09} give a probabilistic upper bound of order $\log N$ for the number of eigenvalues in the interior of a circle.
\par
A recent result by A.~Guionnet, P.~Matched Wood and
O.~Zeitouni \cite{GuMaZe14} implies that when the coupling constant is
bounded from above and from below by (different) sufficiently negative powers of $N$, then
the normalized counting measure of eigenvalues of the randomly perturbed Jordan block converges weakly in probability to the uniform measure on $S^1$ as the dimension of the
matrix gets large.
\par
In \cite{SjVo15}, the authors show that in the case where $P$ is given by
a Jordan block matrix, the leading term of the average density of eigenvalues
is given by the density of the hyperbolic volume on the unit disk.
\par
A similar result has been obtained by C.~Bordenave and
M.~Capitaine in \cite{BoCa16}, where they allow for a more general class
of random matrices, however, with slower decay of the coupling
constant, as $N\gg 1$. In particular they show that the point process
$\Xi$ converges weakly inside some disc, in the limit $N\to\infty$, to
the point process given by
the zeros of a certain Gaussian analytic function (in the sense
of \cite{HoKrPeVi09}) on the Poincar\'e disc.
\\[2ex]
\textbf{Acknowledgements.} M.~Vogel was supported by the project
GeRaSic ANR-13-BS01-0007-01.
\section{Image of the symbol $p$}\label{sizz}
\setcounter{equation}{0}
It will be important to understand
the solutions of the characteristic equation $p(\xi)=z$. The discussion
that follows has been taken from \cite{SjVo15b} and is presented here
for the reader's convenience.
\\
\par
We recall that we have assumed for simplicity that
$|a|\ge |b|$. The case $|a|=|b|$ will be obtained as a limiting case
of the one when $|a|>|b|$, that we consider now. We write the symbol
$p$ \mathrm{e}qref{int.5} in the form
$$
f_{a,b}(\zeta )=a\zeta +b/\zeta, \quad \zeta=\mathrm{e}^{i\xi},
$$
and observe that when $r>0$
$$
f_{a,b}(\partial D(0,r))=f_{ar,b/r}(\partial D(0,1))
$$
which gives a family of confocal ellipses $E_r$. The length of the
major semi-axis of $E_r$ is equal to $|a|r+|b|/r=:g(r)$. $E_{r_1}$ is contained in the bounded domain which has $E_{r_2}$
as its boundary, precisely when $g(r_1)\le g(r_2)$. The function $g$
has a unique minimum at $r=r_\mathrm{min}=(|b|/|a|)^{1/2}$. $g$ is
strictly decreasing on $]0,r_\mathrm{min}]$ and strictly increasing on
$[r_\mathrm{min},+\infty [ $. It tends to $+\infty $ when $r\to 0$ and
when $r\to +\infty $. We have
$g_\mathrm{min}=g(r_\mathrm{min})=2(|a||b|)^{1/2}$ so
$E_{r_\mathrm{min}}$ is just the segment between the two focal points,
common to all the $E_r$. For $r\ne r_\mathrm{min}$, the map $\partial
D(0,r)\to E_r$ is a diffeomorphism. Let $r_1$ be the unique value in
$]0,1[$ for which $g(r_1)=|a|+|b|=g(1)$. We get the following result:
\begin{prop}\label{sizz1} Let $|b|<|a|$.
\begin{itemize}
\item
When $z$ is strictly inside the ellipse $E_1$ described above,
then both solutions of $f_{a,b}(\zeta )=z$ belong to $D(0,1)$.
\item When $z$ is on the ellipse, one solution is on $S^1$ and the
other belongs to $D(0,1)$.
\item When $z$ is in the exterior region to the ellipse, one solution
fulfils $|\zeta |>1$ and the other satisfies $|\zeta |<1$.
\mathrm{e}nd{itemize}
\mathrm{e}nd{prop}
\par In the case $|a|=|b|$, $E_1$ is just the segment between the two
focal points. In this case $r_\mathrm{min}=1$ and we get:
\begin{prop}\label{sizz2}
Assume that $|a|=|b|$. \begin{itemize}
\item If $z\in E_1$ then both solutions of
$f_{a,b}(\zeta )=z$ belong to $S^1$.
\item If $z$ is outside $E_1$, one solution is in $D(0,1)$ and the
other is in the complement of $\overline{D(0,1)}$.
\mathrm{e}nd{itemize}
\mathrm{e}nd{prop}
\begin{remark}\label{remh1}
Assuming that $0<|b|\leq |a|$, we observe that for
$z\in\mathbf{C}$ the two solutions, say $\zeta_{\pm}$ of $f_{a,b}(\zeta)=z$
are solutions of the equation
\begin{equation}
\zeta^2 -\frac{z}{a}\zeta + \frac{b}{a}=0,
\mathrm{e}nd{equation}
and they satisfy the relations
\begin{equation}\label{algrel}
\zeta_+\zeta_- = \frac{b}{a},
\quad
\zeta_+ + \zeta_- = -\frac{z}{a}.
\mathrm{e}nd{equation}
Furthermore, we can fix a branch of the square root such that
$\zeta_+(z)$ and $\zeta_-(z)$ are holomorphic functions of $z$
in $\mathbf{C}\backslash [-2\sqrt{ab},2\sqrt{ab}]$.
\mathrm{e}nd{remark}
Throughout this text, we will work with the convention that
\begin{equation}\label{signcon}
|\zeta_+| \leq |\zeta_-|
\mathrm{e}nd{equation}
which in particular yields by the above discussion that when
$z$ is inside $E_r$, for $r\in [r_{\mathrm{min}}, +\infty[$, then
\begin{equation}\label{signcon2}
0< |\zeta_+| \leq \sqrt{|b/a|} \leq |\zeta_-| \leq r.
\mathrm{e}nd{equation}
\section{Preparations for the density of eigenvalues in the interior}
In this section we are interested in the density of eigenvalues in the
interior of the ellipse $p_{a,b}(\mathbf{R})$, where $p_{a,b}=p$ denotes the
principal symbol of the unperturbed operator $P$, cf. \mathrm{e}qref{int.3}, \mathrm{e}qref{int.5}.
We study the first moment of linear statistics of the point process
given by the eigenvalues of $P_{\delta}$, see \mathrm{e}qref{int.5a}, i.e.
\begin{equation}\label{eq10.60}
I_{\varphi} = \mathds{E} \left[ \sum_{\lambda\in\sigma(P_{\delta})}\varphi(z)
\right], \quad
\varphi \in \mathcal{C}_0(\Omega),
\mathrm{e}nd{equation}
where $\Omega$ is some open subset in the interior of
$\mathrm{conv}(p_{a,b}(\mathbf{R}))\backslash[-2\sqrt{ab},2\sqrt{ab}]$,
where $\mathrm{conv}(\cdot)$ denotes the convex
hull of a set.
\par
W.~Bordeaux-Montrieux \cite{BM} noted that the Markov inequality implies
that if $C_1>0$ is large enough, then
for the Hilbert-Schmidt norm of $Q_{\omega}$ (as in \mathrm{e}qref{int.5a}),
\begin{equation}\label{grpp.0b}
\mathds{P}\left[
\Vert Q_{\omega}\Vert_\mathrm{HS}\le C_1N
\right] \ge 1-e^{-N^2}.
\mathrm{e}nd{equation}
Since the number of eigenvalues of $P_{\delta}$ in the support
of $\varphi$ is bounded from above by $N$, it follows from \mathrm{e}qref{grpp.0b} that
\begin{equation}\label{eq10.61}
\begin{split}
&I_{\varphi} = \mathds{E} \left[ \mathds{1}_{B_{\mathbf{C}^{N^2}}(0,C_1N)}(Q) \sum_{\lambda\in\sigma(P_{\delta})}\varphi(z)
\right] + \langle \mu_N, \varphi \rangle, \\
& |\langle \mu_N, \varphi \rangle | \leq N\mathrm{e}^{-N^2}\|\varphi\|_{\infty}.
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
Here, we identify the random matrix $Q_{\omega}$ (cf \mathrm{e}qref{int.5a}) with
a random vector $Q\in\mathbf{C}^{N^2}$. Furthermore, $\mu_N$ is a Radon measure
of total mass $\leq N\mathrm{e}^{-N^2}$.
\par
After the reduction to \ref{eq10.61}, it is sufficient to work with the assumption
that the random vector $Q$ is restricted to a ball of radius $C_1N$, i.e.
\begin{equation}
\|Q\|_2\leq C_1 N.
\mathrm{e}nd{equation}
Note that this assumption is equivalent, to the assumption that the Hilbert-Schmidt
norm of the random matrix $Q_{\omega}$ is bounded, more precisely that
\begin{equation}\label{eqa1}
\|Q\|_{HS}\leq C_1 N.
\mathrm{e}nd{equation}
Next, we define for $r>0$
\begin{equation}\label{eq10.1}
\Sigma_{r}:= \mathrm{conv}(p_{ar,br^{-1}}(\mathbf{R})).
\mathrm{e}nd{equation}
We let
\begin{equation}\label{eq10.1.5}
\Omega\Subset\mathring{\Sigma}_1\backslash
[-2\sqrt{ab},2\sqrt{ab}],
\mathrm{e}nd{equation}
be open, relatively compact and connected. It
may depend on $N$ (to be specified later on) but will
avoid a fixed neighbourhood of the focal segment. Moreover,
let $W=B(0,C_1N)$ for $C_1>0$ large enough such that
\mathrm{e}qref{grpp.0b} holds. By remark \ref{remh1} we see that by excluding
the focal segment in \mathrm{e}qref{eq10.1.5} we have that $\zeta_{\pm}(z)$,
the solutions to the characteristic equation, given by the symbol \mathrm{e}qref{int.5},
\begin{equation*}
a\zeta + b\zeta^{-1} = z,
\mathrm{e}nd{equation*}
are holomorphic functions of $z$,.
\\
\par
In the following we write for $\mu\in\mathbf{N}$
\begin{equation}\label{eq10.2.1}
F_{\mu+1}(t) = 1 + t + \dots + t^{\mu}
, \quad 0\leq t \leq 1.
\mathrm{e}nd{equation}
As in \cite{SjVo15b}, we work under the hypothesis that
\begin{equation}\label{grpp.1}
\delta N F_N(|\zeta_-|) \ll 1.
\mathrm{e}nd{equation}
Notice that this is fulfilled for all $z$ inside $E_1=p(\mathbf{R})$, if we make
the even stronger assumption
\begin{equation}\label{grpp.2}
\delta N^2 \ll 1.
\mathrm{e}nd{equation}
(Recall that $N\gg 1$).
We have shown in \cite{SjVo15b} that assuming \mathrm{e}qref{grpp.1}, \mathrm{e}qref{eqa1}
we can identify the eigenvalues of
$P_{\delta}$ in $\Omega$ with the zeros of $g(z,Q)$, a holomorphic function
on $\Omega\times W$. Note that since there are at most $N$ eigenvalues,
we have for every $Q\in W$ that $g(\cdot,Q)\not\mathrm{e}quiv 0$. Furthermore,
see \cite[Formula (8.18)]{SjVo15b}, $g$ is given by
\begin{equation}\label{eq10.2}
g(z,Q) = g_0(z) - \delta (Q|\overline{Z})+ T(z,Q;\delta,N),
\mathrm{e}nd{equation}
where $Z$ is given by
\begin{equation}\label{grpp.14}
\begin{split}
Z &= \left(
\frac{\zeta_+^{N+1-j} - \zeta_-^{N+1-j}}{a(\zeta_+ - \zeta_-)}
\frac{\zeta_+^{k} - \zeta_-^{k}}{a(\zeta_+ - \zeta_-)}
\right)_{1\leq j,k\leq N} \\
&=\left( a^{-2}
F_{N+1-j}(\zeta_+/\zeta_-)F_{k}(\zeta_+/\zeta_-) \zeta_-^{N-j+k-1}
\right)_{1\leq j,k\leq N},
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
and
\begin{equation}\label{eq10.3}
g_0(z) = \frac{\zeta_-^{N+1} - \zeta_+^{N+1}}{a(\zeta_- - \zeta_+)}
= \frac{\zeta_-^N}{a} F_{N+1}(\zeta_+/\zeta_-).
\mathrm{e}nd{equation}
Moreover,
\begin{equation}\label{eq10.3.5}
|T(z,Q)|=|T(z,q;\delta,N)|=\mathcal{O}(1)(\delta N F_N(|\zeta_-|^2))^2.
\mathrm{e}nd{equation}
We will frequently write $|\cdot |$ for the Hilbert-Schmidt norm and,
until further notice, we write $F_{\mu}=F_{\mu}(\zeta_+/\zeta_-)$.
By \mathrm{e}qref{grpp.14}, we get that
\begin{equation}\label{eq10.3.4}
|Z| =|a|^{-2} \left(\sum_{j,k=1}^N|\zeta_-|^{2(N-j+k-1)}|F_{N+1-j}|^2|F_{k}|^2\right)^{\frac{1}{2}}
= |a|^{-2}\sum_{\mu =0 }^{N-1}|\zeta_-|^{2\mu}|F_{\mu+1}|^2.
\mathrm{e}nd{equation}
For $z\in\Omega$ we have $|\zeta_+|/|\zeta_-|\leq C < 1$ and hence
$|F_k(\zeta_+/\zeta_-)|\asymp 1$. If we also assume $z\in\Sigma_{r_0}$,
$0<r_0\leq 1-1/N$, then
\begin{equation}\label{eq10.3.6}
|Z| \asymp F_N(|\zeta_-|^2) \asymp \frac{1}{1-|\zeta_-|^2} \asymp \frac{1}{1-|\zeta_-|},
\mathrm{e}nd{equation}
where we used as well that $\sqrt{|b/a|} \leq |\zeta_-| \leq 1- 1/N$
(see \mathrm{e}qref{signcon2},\mathrm{e}qref{eq10.8}, \mathrm{e}qref{eq10.9}), and that
\begin{equation}
F_N(|\zeta_-|^2) = \frac{1}{1-|\zeta_-|^2}(1 - |\zeta_-|^{2(N+1)})
\asymp \frac{1}{1-|\zeta_-|^2}.
\mathrm{e}nd{equation}
Recall that $\Omega$ in \mathrm{e}qref{eq10.1.5} avoids a fixed neighborhood of
the focal segment of the ellipse $E_1=p(\mathbf{R})$. More precisely, in
view of the discussion in Section \ref{sizz}, we assume that
\begin{equation}\label{eq10.4}
\begin{cases}
\Omega\Subset\mathring{\Sigma}_{1}\backslash \Sigma_{r_1}, \\
r_1= \sqrt{|b/a|} + 1/C, ~ C\gg 1.
\mathrm{e}nd{cases}
\mathrm{e}nd{equation}
Using \mathrm{e}qref{eq10.4}, it follows that the middle term in \mathrm{e}qref{eq10.2}
is bounded in modulus by
\begin{equation}\label{eq10.5}
\delta |Q| |Z| \leq \mathcal{O}(1)(C_1\delta N F_N(|\zeta_-|^2))
\mathrm{e}nd{equation}
where we assumed that $|Q|\leq C_1 N$ (cf. \mathrm{e}qref{grpp.1}). Moreover, we
assume that the
first term in \mathrm{e}qref{eq10.2} is smaller than the bound on the middle term, i.e.
\begin{equation}\label{eq10.6}
|g_0(z)| \ll C_1\delta N F_N(|\zeta_-|^2).
\mathrm{e}nd{equation}
Using that $|F_k(\zeta_+/\zeta_-)|\asymp 1$, we see that \mathrm{e}qref{eq10.6} is
implied by the assumption
\begin{equation}\label{eq10.7}
|\zeta_-|^N \ll C_1\delta N F_N(|\zeta_-|^2).
\mathrm{e}nd{equation}
More precisely, we will assume that $z$ satisfying \mathrm{e}qref{eq10.4} is
such that $\zeta_-(z)\in D(0,r_0)$ with
\begin{equation}\label{eq10.8}
|r_0|^N \ll C_1\delta N F_N(r_0^2), \quad r_0\leq 1 - \frac{1}{N}.
\mathrm{e}nd{equation}
Observe that the function $r^N/F_N(r^2)$ is strictly monotonically growing on
the interval $[0,1-N^{-1}]$. Thus, the inequality \mathrm{e}qref{eq10.7}
is preserved if we replace $r_0$ by $|\zeta_-|$, for $|\zeta_-|\leq r_0$.
\par
Combining the assumptions \mathrm{e}qref{eq10.4} and \mathrm{e}qref{eq10.7}, we get
\begin{equation}\label{eq10.9}
\begin{cases}
z\in\Omega\Subset\Sigma_{r_0,r_1}:=\mathring{\Sigma}_{r_0}\backslash \Sigma_{r_1}, \\
r_0>0 \text{ satisfies \mathrm{e}qref{eq10.8}}, \\
r_1= \sqrt{|b/a|} + 1/C, ~ C\gg 1.
\mathrm{e}nd{cases}
\mathrm{e}nd{equation}
By \mathrm{e}qref{grpp.1}, we see that the bound on $T$ is much smaller than the
upper bound on the middle term in \mathrm{e}qref{eq10.2}, i.e.
\begin{equation}\label{eq10.10}
(\delta NF_{N+1}(|\zeta_-|^2))^2 \ll \delta NF_{N}(|\zeta_-|^2)
\mathrm{e}nd{equation}
Here we used as well that $F_{N+1}(|\zeta_-|^2) \asymp F_{N}(|\zeta_-|^2)$.
From \mathrm{e}qref{eq10.2}, \mathrm{e}qref{eq10.3.5} and the Cauchy inequalities, we
get
\begin{equation}\label{eq10.11}
d_Qg(z,Q) = -\delta Z\cdot dQ + \mathcal{O}(\delta^2F_{N+1}^2(|\zeta_-|^2) N)
\mathrm{e}nd{equation}
where the norm of the first term is
$\asymp \delta |Z| \asymp \delta F_N(|\zeta_-|^2) \gg
\delta^2F_{N+1}^2(|\zeta_-|^2) N$. Here, we used \mathrm{e}qref{grpp.1},
\mathrm{e}qref{eq10.3.6}. Technically, we need to apply the Cauchy inequalities
in a ball of radius $\mathrm{e}ta C_1N$ for some $0<\mathrm{e}ta<0$, but we have
room for that if we choose $C_1$ in \mathrm{e}qref{grpp.1} slightly larger
to begin with.
\par
Recall that for every $Q\in W$, $g(\cdot,Q)\not\mathrm{e}quiv 0$. It has then been shown
in \cite{Vo14,SjVo15}, that if
\begin{equation*}
g(z,Q) = 0 \Rightarrow
d_Qg(z,Q) \neq 0
\mathrm{e}nd{equation*}
then
\begin{equation}\label{eq10.11.1}
\Gamma := \left\{
(z,Q)\in\Omega\times W; g(z,Q)=0
\right\}
\mathrm{e}nd{equation}
is a smooth complex hypersurface in $\Omega\times W$ and
\begin{equation}\label{eq10.11.2}
K_{\varphi} =
\mathds{E} \left[ \mathds{1}_{B(0,C_1N)}(Q) \sum_{\lambda\in\sigma(P_{\delta})}\varphi(z)
\right]
=
\int_{\Gamma}\varphi(z)\mathrm{e}^{-Q^*Q}\,\frac{j^*(d\overline{Q}\wedge dQ)}{(2i)^{N^2}},
\mathrm{e}nd{equation}
where $j^*$ denotes the pull-back by the regular embedding
$j:\Gamma \to \Omega\times W$ and
$$
d\overline{Q}\wedge dQ = d\overline{Q}_1\wedge dQ_1 \wedge \dots
d\overline{Q}_N\wedge dQ_N,
$$
which is a complex $(N^2,N^2)$-form on $\Omega\times W$. Thus,
$(2i)^{-N^2}j^*(d\overline{Q}\wedge dQ)$ is a non-negative differential form on $\Gamma$
of maximal degree.
\\
\par
Next, we identify $Z(z)$ in \mathrm{e}qref{grpp.14} with a vector in
$\mathbf{C}^{N^2}$ and write
\begin{equation}\label{eq10.12}
Q = Q(\alpha) =\alpha_1\overline{Z}(z) + \alpha', \quad
\alpha_1\in\mathbf{C}, ~\alpha'\in \overline{Z}(z)^{\perp}
\mathrm{e}nd{equation}
and we identify $\overline{Z}(z)^{\perp}$ unitarily with $\mathbf{C}^{N^2-1}$ by
means of an orthonormal basis $e_2(z),\dots,e_{N^2}(z)$, so that
$\alpha'=\sum_2^{N^2} \alpha_je_j(z)$. Then, we have
\begin{equation}\label{eq10.12.1}
Q=Q(\alpha,z) = \alpha_1\overline{Z}(z) + \sum_2^{N^2} \alpha_je_j(z)
\mathrm{e}nd{equation}
and we identify $g(z,Q)$ with $\tilde{g}(z,\alpha)=g(z,Q(\alpha,z))$ which is
holomorphic in $\alpha$ for every
fixed $z$ and, by \mathrm{e}qref{eq10.2}, \mathrm{e}qref{eq10.3.5}, we have that
\begin{equation}\label{eq10.13}
\begin{split}
&\tilde{g}(z,\alpha) = g_0(z) -\delta |Z|^2\alpha_1 +
T\!\left(z,\alpha_1\overline{Z}(z)+ \sum_2^{N^2}\alpha_je_j(z)\right) \\
&\partial_{\alpha_1}\tilde{g}(z,\alpha) = -\delta |Z|^2 + \mathcal{O}(\delta^2F_{N+1}^3 N).
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
In particular, by \mathrm{e}qref{grpp.1}, \mathrm{e}qref{eq10.3.6}, we see that
\begin{equation}\label{eq10.14}
|\partial_{\alpha_1}\tilde{g}(z,\alpha)| \asymp \delta F_{N+1}^2(|\zeta_-|^2).
\mathrm{e}nd{equation}
From \mathrm{e}qref{eq10.13},\mathrm{e}qref{eq10.3.5} and the Cauchy-inequalities, we obtain
\begin{equation}\label{eq10.15}
|\partial_{\alpha_j}\tilde{g}(z,\alpha)| =\mathcal{O}(\delta^2F_{N+1}^2 N),
\quad j=2,\dots,N^2.
\mathrm{e}nd{equation}
The Cauchy-inequalities applied to \mathrm{e}qref{eq10.3} together with \mathrm{e}qref{eq10.3.5},
\mathrm{e}qref{eq10.2} yield
\begin{equation}\label{eq10.16}
\partial_{z}g(z,Q) = \partial_z g_0(z) - \delta (Q| \overline{\partial_z Z})
+ \frac{\mathcal{O}(1)(\delta N F_{N+1}(|\zeta_-|^2))^2}{\mathrm{dist}(z,\partial\overline{\Sigma}_{r_0,r_1})}
\mathrm{e}nd{equation}
with
\begin{equation}\label{eq10.17}
\partial_z g_0(z)= (\partial_z \ln \zeta_- ) \frac{\zeta_-^N}{a}\left[
NF_{N+1}(\zeta_+/\zeta_-) -2(\zeta_+/\zeta_-)F_{N+1}'(\zeta_+/\zeta_-)\right].
\mathrm{e}nd{equation}
Here, we used as well \mathrm{e}qref{algrel} which implies that
$\partial_z (\zeta_+/\zeta_-) = -(\zeta_+/\zeta_- )\partial_z \ln \zeta_-$.
\begin{remark}
Note that in \mathrm{e}qref{eq10.16}
\begin{equation}\label{eq10.17.5}
\mathrm{dist}(z,\partial\overline{\Sigma}_{r_0,r_1}) \geq
\frac{\min(r_0-|\zeta_-|, |\zeta_-|-r_1)}{C}
\geq \frac{r_0-|\zeta_-|}{C},
\mathrm{e}nd{equation}
for some (not necessarily equal) $C\gg 1$.
\mathrm{e}nd{remark}
For $Q$ in \mathrm{e}qref{eq10.12.1}, we have the following result:
\begin{lemma}\label{lem10.1} Let $Q(\alpha)\in B(0,C_1N)$ and $z\in\Omega$
as in \mathrm{e}qref{eq10.9}. Then,
\begin{equation}\label{eq10.18}
\begin{split}
\partial_{z}\tilde{g}(z,\alpha) = \partial_z g_0(z) - \delta \alpha_1 \partial_z |Z|^2
&+ \frac{\mathcal{O}(1)(\delta N F_{N}(|\zeta_-|^2))^2}{\mathrm{dist}(z,\partial\overline{\Sigma}_{r_0,r_1})} \\
&+\mathcal{O}(\delta^2F_{N}(|\zeta_-|^2)^2N)\left| \sum_2^{N^2}\alpha_i \partial_z e_i(z) \right|,
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
\begin{equation}\label{eq10.19}
\begin{split}
\partial_{\overline{z}}\tilde{g}(z,\alpha) = - \delta \partial_{\overline{z}} |Z|^2\alpha_1
+\mathcal{O}(\delta^2F_{N}(|\zeta_-|^2)^2N)\left| \alpha_1\overline{\partial_z Z} +
\sum_2^{N^2}\alpha_i \partial_{\overline{z}} e_i(z) \right|.
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
\mathrm{e}nd{lemma}
\begin{proof}
Using \mathrm{e}qref{eq10.13}, one computes
\begin{equation}
\begin{split}
&\partial_z \widetilde{g} \\
&= \partial_z g_0 - \delta\alpha_1\partial_zZ\cdot \overline{Z}
+ \partial_z(T(z,Q(\alpha,z))) \\
&=\partial_z g_0 -\delta \partial_z Z\cdot\overline{Z} +(\partial_zT)(z,Q(\alpha,z))
+ d_QT(z,Q(\alpha))\cdot \partial_zQ(\alpha,z) \\
&=\partial_z g_0 -\delta \partial_z Z\cdot\overline{Z} +(\partial_zT)(z,Q(\alpha,z))
+ (d_QT)(z,Q(\alpha,z))\cdot \sum_2^{N^2}\alpha_j\partial_ze_j(z),
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
where, to obtain the last equality, we used \mathrm{e}qref{eq10.12} and the fact that $\overline{Z}(z)$
is antiholomorphic in $z$. The Cauchy-inequalities together with \mathrm{e}qref{eq10.3.5}
yield that
\begin{equation}
(\partial_zT)(z,Q(\alpha,z)) = \mathcal{O}(1)
\frac{(\delta NF_N)^2}{\mathrm{dist}(z,\partial\overline{\Sigma}_{r_0,r_1})},
\mathrm{e}nd{equation}
as well as
\begin{equation}
(d_QT)(z,Q(\alpha,z))\cdot \sum_2^{N^2}\alpha_j\partial_ze_j(z)
= \mathcal{O}(\delta^2N^2F_N)\left|\sum_2^{N^2}\alpha_j\partial_ze_j(z) \right|,
\mathrm{e}nd{equation}
and we conclude \mathrm{e}qref{eq10.18}. Similarly, we obtain \mathrm{e}qref{eq10.19}.
\mathrm{e}nd{proof}
Continuing, recall that we work under assumptions \mathrm{e}qref{grpp.1} and
\mathrm{e}qref{eq10.9} (recall as well that the last one implies \mathrm{e}qref{eq10.6}
and \mathrm{e}qref{eq10.7}). We use \mathrm{e}qref{eq10.6}, \mathrm{e}qref{eq10.7} and apply Rouch\'e's Theorem
to \mathrm{e}qref{eq10.13}, and we see that for $C_1>0$ large enough and for $|\alpha'|<C_1N$,
the equation
\begin{equation}\label{eq10.21}
\tilde{g}(z,\alpha_1,\alpha') = 0
\mathrm{e}nd{equation}
has exactly one solution
\begin{equation}\label{eq10.22}
\alpha_1=f(z,\alpha') \in D\left(0,\frac{C_1 N}{F_N(|\zeta_-|^2)}\right).
\mathrm{e}nd{equation}
Note that this yields the entire hypersurface \mathrm{e}qref{eq10.11.1} for
$\Omega$ satisfying \mathrm{e}qref{eq10.9}, since $\tilde{g}\neq 0$ for $\alpha_1$ outside the above
disc, which follows from \mathrm{e}qref{eq10.13},\mathrm{e}qref{eq10.3.5} and
\mathrm{e}qref{eq10.6}.
Moreover, $f$ satisfies
\begin{equation}\label{eq10.23}
f(z,\alpha')= \frac{g_0(z)}{\delta |Z|^2} + \mathcal{O}(1)\delta N^2
= \mathcal{O}\left( \frac{g_0(z)}{\delta F_N(|\zeta_-|^2)^2} +\delta N^2\right).
\mathrm{e}nd{equation}
Differentiating \mathrm{e}qref{eq10.21} with respect to $z$ and $\overline{z}$,
we obtain
\begin{equation}\label{eq10.24}
\partial_z\tilde{g} + \partial_{\alpha_1}\tilde{g}\cdot\partial_z f = 0,
\quad
\partial_{\overline{z}}\tilde{g} + \partial_{\alpha_1}\tilde{g}\cdot\partial_{\overline{z}} f = 0.
\mathrm{e}nd{equation}
Which implies that
\begin{equation}\label{eq10.25}
\partial_z f = -(\partial_{\alpha_1}\tilde{g})^{-1}\partial_z\tilde{g} , \quad
\partial_{\overline{z}} f = -(\partial_{\alpha_1}\tilde{g})^{-1}\partial_{\overline{z}}\tilde{g}.
\mathrm{e}nd{equation}
Recall from \mathrm{e}qref{eq10.13} that $\tilde{g}$ is holomorphic in $\alpha_1,\dots,\alpha_{N^2}$
and so we see that $f$ is holomorphic in $\alpha_2,\dots,\alpha_{N^2}$.
Applying $\partial_{\alpha_j}$, $j=2,\dots,N^2$, to \mathrm{e}qref{eq10.26}, we obtain
\begin{equation}\label{eq10.26}
\partial_{\alpha_j}f = -(\partial_{\alpha_1}\tilde{g})^{-1}\partial_{\alpha_j}\tilde{g},
\quad j=2,\dots,N^2.
\mathrm{e}nd{equation}
Using \mathrm{e}qref{eq10.13} in the form
\begin{equation}\label{eq10.27}
\partial_{\alpha_1}\tilde{g} = - \delta |Z|^2(1+\mathcal{O}(\delta F_{N+1}(|\zeta_-|^2)N)),
\mathrm{e}nd{equation}
and by Lemma \ref{lem10.1}, \mathrm{e}qref{eq10.25}, we obtain
\begin{equation}\label{leq10.28}
\begin{split}
\partial_z f =&
\frac{(1+\mathcal{O}(\delta F_{N+1}(|\zeta_-|^2)N))}{\delta |Z|^2}
\bigg[ \partial_z g_0(z) - \delta (\partial_z|Z|^2)f \\
&+\frac{\mathcal{O}(1)(\delta N F_{N+1}(|\zeta_-|^2))^2}{\mathrm{dist}(z,\partial\overline{\Sigma}_{r_0,r_1})}
+\mathcal{O}(\delta^2F_{N+1}^2(|\zeta_-|^2)N)\left| \sum_2^{N^2}\alpha_i \partial_z e_i(z) \right|
\bigg],
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
and
\begin{equation}\label{leq10.29}
\begin{split}
\partial_{\overline{z}} f = &
\frac{(1+\mathcal{O}(\delta F_{N+1}(|\zeta_-|^2)N))}{\delta |Z|^2}
\bigg[ - \delta (\partial_{\overline{z}}|Z|^2)f \\
&+\mathcal{O}(\delta^2F_{N+1}^2(|\zeta_-|^2)N)\left| f\overline{\partial_z Z}+
\sum_2^{N^2}\alpha_i \partial_{\overline{z}} e_i(z) \right|
\bigg].
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
Furthermore, using \mathrm{e}qref{eq10.15} and \mathrm{e}qref{eq10.26},
we get
\begin{equation}\label{leq10.30}
\partial_{\alpha_j}f= \mathcal{O}(1)\frac{\delta^2N F_{N+1}^2(|\zeta_-|^2)}{\delta F_N^2(|\zeta_-|^2)}
= \mathcal{O}(\delta N), \quad
j=2,\dots,N^2.
\mathrm{e}nd{equation}
\section{Choosing appropriate coordinates}
In the following we adopt the strategy developed in \cite[Section 5]{SjVo15}: The next
step is to find an appropriate orthonormal basis $e_1(z),\dots,e_{N^2}(z) \in\mathbf{C}^{N^2}$
with
\begin{equation}\label{eq10.31}
e_1(z)= \frac{\overline{Z}(z)}{|Z(z)|},
\mathrm{e}nd{equation}
such that we obtain a good control over the terms
$| \sum_2^{N^2}\alpha_i\partial_ze_i(z)|$,
$|\sum_2^{N^2}\alpha_i\partial_{\overline{z}}e_i(z)|$ and such that
the differential form $dQ_1\wedge \dots\wedge dQ_{N^2}|_{\alpha_1=f(z,\alpha')}$
can be expressed easily up to small errors.
\begin{prop}\label{prop10.1}
Let $z_0\in\Sigma_{r_0-N^{-1},r_1}$.
There exists an orthonormal basis $e_1(z),\dots,e_{N^2}(z)$ in $\mathbf{C}^{N^2}$ which
depends smoothly on $z$ in a small neighbourhood of $z_0$ in
$\mathbf{C}\backslash [-2\sqrt{ab},2\sqrt{ab}]$ such that
\begin{equation*}
\begin{split}
&1) \quad e_1(z)= \frac{\overline{Z}(z)}{|Z(z)|}, \\
&2) \quad \mathbf{C} e_1(z_0)\oplus \mathbf{C} e_2(z_0) =
\mathbf{C} \overline{Z}(z_0)\oplus \mathbf{C} \overline{\partial_zZ}(z_0), \\
& 3) \quad e_j(z) -e_j(z_0) = \mathcal{O}((z_0-z)^2), ~ j=3,\dots,N^2,
\text{ uniformly w.r.t. }(z,z_0).
\mathrm{e}nd{split}
\mathrm{e}nd{equation*}
\mathrm{e}nd{prop}
\begin{proof} The proof is identical, mutatis mutandis, to the proof of
Proposition 5.1 in \cite{SjVo15}.
\mathrm{e}nd{proof}
As remarked after the proof of Proposition 5.1 in \cite{SjVo15},
we can make the following choice:
\begin{equation}\label{eq10.32}
e_2(z)= |f_2(z)|^{-1}f_2(z), \quad
f_2(z) = \overline{\partial_z Z(z)} -
\sum_{j\neq 2} (\overline{\partial_z Z(z)}|e_j(z))e_j(z),
\mathrm{e}nd{equation}
so that for $z=z_0$,
\begin{equation}\label{eq10.33}
f_2(z_0) = \overline{\partial_zZ(z_0)} - \frac{(Z(z_0)|\partial_z Z(z_0))}{|Z(z_0)|^2}
\overline{Z(z_0)}.
\mathrm{e}nd{equation}
\begin{prop}\label{prop10.2}
For all $z\in\Sigma_1\backslash [-2\sqrt{ab},2\sqrt{ab}]$, we have
\begin{equation}\label{eq10.34}
|\partial_z Z(z)|^2 - \frac{|(Z(z)|\partial_z Z(z))|^2}{|Z(z)|^2}
= 2K_N(z)^2\partial_{z}\partial_{\bar{z}}\ln K_N(z),
\mathrm{e}nd{equation}
where
\begin{equation}\label{eq10.35}
K_N(z)= \sum_{\mu=0}^{N-1}
\left|\frac{\zeta_-^{\mu+1} - \zeta_+^{\mu+1}}{a(\zeta_- - \zeta_+)}\right|^2
=\frac{1}{|a|^2}\sum_{\mu=0}^{N-1}|\zeta_-|^{2\mu}\, |F_{\mu+1}(\zeta_+/\zeta_-)|^2.
\mathrm{e}nd{equation}
\mathrm{e}nd{prop}
Before giving the proof of this proposition, let us note that by \mathrm{e}qref{eq10.3.4} $K_N=|Z|$.
\begin{proof}
Until further notice, we write $F_n = F_n(\zeta_+/\zeta_-)$.
First, use \mathrm{e}qref{grpp.14}, in the form
\begin{equation*}
a^2 Z_{j,k} = \zeta_-^{N-j+k-1}F_{N-j+1}F_k
= \zeta_-^{\mu+\nu}F_{\mu+1}F_{\nu+1},
\mathrm{e}nd{equation*}
with $\mu = N-j$, $\nu =k-1$ and $\mu,\nu\in\{0,\dots,N-1\}$,
to compute that
\begin{equation*}
\begin{split}
\frac{a^{2}}{\partial_z\ln\zeta_-}\partial_zZ_{j,k}
= \zeta_-^{\mu+\nu} F_{\mu+1} F_{\nu+1}
\cdot\left[ (\mu+\nu) - L_{\mu+1} - L_{\nu+1}
\right],
\mathrm{e}nd{split}
\mathrm{e}nd{equation*}
where $L_{n}:=\frac{2\zeta_+}{\zeta_-}\partial_t\ln F_{n}(t)|_{t=\zeta_+/\zeta_-}$. Hence,
one obtains from the above expression and from \mathrm{e}qref{grpp.14} that
\begin{equation}\label{eq10.36}
\frac{|a|^4|(\partial_z Z|Z)|}{|\partial_z\ln\zeta_-|}=
\left|\sum_{\mu,\nu =0}^{N-1} |\zeta_-|^{2(\mu+\nu)}|F_{\mu+1}F_{\nu+1}|^2
[(\mu+\nu)-L_{\mu+1} -L_{\nu+1} ]\right|.
\mathrm{e}nd{equation}
Using \mathrm{e}qref{eq10.3.4} and a change of index, we obtain that
\mathrm{e}qref{eq10.36} is equal to
\begin{equation*}
\begin{split}
&2\left|\sum_{\nu =0}^{N-1}|\zeta_-|^{2\nu}|F_{\nu+1}|^2\sum_{\mu =0}^{N-1} |\zeta_-|^{2\mu}
|F_{\mu+1}|^2
[\mu-L_{\mu+1}]\right| \\
& =2|a|^2 |Z|\left|\sum_{\mu=0}^{N-1} |\zeta_-|^{2\mu}|F_{\mu+1}|^2
[\mu-L_{\mu+1} ]\right|,
\mathrm{e}nd{split}
\mathrm{e}nd{equation*}
so
\begin{equation}\label{eq10.36.1}
\frac{|a|^4|(\partial_z Z|Z)|}{|\partial_z\ln\zeta_-| |Z|}
=2|a|^2\left|\sum_{\mu=0}^{N-1} |\zeta_-|^{2\mu}|F_{\mu+1}|^2
[\mu-L_{\mu+1} ]\right|.
\mathrm{e}nd{equation}
Similarly,
\begin{equation}\label{eq10.37}
\frac{|a|^4 |\partial_z Z|^2}{|\partial_z\ln\zeta_-|^2}=
\sum_{\mu,\nu =0}^{N-1} |\zeta_-|^{2(\mu+\nu)}|F_{\mu+1}F_{\nu+1}|^2
|(\mu+\nu)-L_{\mu+1} -L_{\nu+1} |^2.
\mathrm{e}nd{equation}
Combining \mathrm{e}qref{eq10.36.1}, \mathrm{e}qref{eq10.37}, we obtain
\begin{equation}\label{eq10.37.1}
\begin{split}
&\frac{|a|^4}{|\partial_z\ln\zeta_-|^2}
\left(|\partial_z Z|^2-\frac{|(\partial_z Z|Z)|^2}{|Z|^2}\right) \\
& = \sum_{\mu,\nu =0}^{N-1} |\zeta_-|^{2(\mu+\nu)}|F_{\mu+1}F_{\nu+1}|^2
\big[
|(\mu+\nu)-L_{\mu+1} -L_{\nu+1} |^2 \\
&\phantom{..........................................................}
- 4(\mu-L_{\mu+1})(\nu-\overline{L_{\nu+1}})\big].
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
By permuting $\mu,\nu$ we get the same sum and after taking the
average of the two expressions we may replace
$ - 4(\mu-L_{\mu+1})(\nu-\overline{L_{\nu+1}})$ by its real part. Then,
\begin{equation}\label{eq10.37.2}
\begin{split}
&|(\mu+\nu)-L_{\mu+1} -L_{\nu+1} |^2 - 4\mathrm{Re}(\mu-L_{\mu+1})(\nu-\overline{L_{\nu+1}})\\
&=|(\mu-\nu)+(L_{\nu+1}-L_{\mu+1})|^2 \\
&= \left| (\mu +1) \frac{1 + t^{\mu+1}}{1 - t^{\mu+1}} - (\nu +1) \frac{1 + t^{\nu+1}}{1 - t^{\nu+1}}
\right|^2_{t=\zeta_+/\zeta_-},
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
where we also used that by the definition of $L_{\mu}$ above and
\mathrm{e}qref{eq10.2.1}
\begin{equation*}
\begin{split}
L_{\nu+1}-L_{\mu+1} &=
2\frac{\zeta_+}{\zeta_-}[\partial_t \ln (1 -t^{\nu+1}) - \partial_t\ln(1-t^{\mu+1} )]_{t=\zeta_+/\zeta_-}\\
&= \frac{2(\mu+1)t^{\mu+1}}{1-t^{\mu+1}}- \frac{2(\nu+1)t^{\nu+1}}{1-t^{\nu+1}}
\bigg|_{t=\zeta_+/\zeta_-}.
\mathrm{e}nd{split}
\mathrm{e}nd{equation*}
Combining this with \mathrm{e}qref{eq10.37.1}, we obtain
\begin{equation}\label{eq10.38}
\begin{split}
&\frac{|a|^4}{|\partial_z\ln\zeta_-|^2}
\left(|\partial_z Z|^2-\frac{|(\partial_z Z|Z)|^2}{|Z|^2}\right) \\
& = \sum_{\mu,\nu =0}^{N-1} |\zeta_-|^{2(\mu+\nu)}|F_{\mu+1}F_{\nu+1}|^2
\left| (\mu +1) \frac{\zeta_-^{\mu+1} + \zeta_+^{\mu+1}}{\zeta_-^{\mu+1} - \zeta_+^{\mu+1}}
- (\nu +1) \frac{\zeta_-^{\nu+1} + \zeta_+^{\nu+1}}{\zeta_-^{\nu+1} - \zeta_+^{\nu+1}}
\right|^2.
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
\begin{remark}
Observe that the summands in \mathrm{e}qref{eq10.38} are equal to zero whenever $\mu=\nu$
and that the summands corresponding to the index pair $(\mu,\nu)$ is equal to the
one corresponding to $(\nu,\mu)$. Hence, by calculating explicitly the terms for
$(\mu,\nu)=(1,0),(0,1)$, we obtain that \mathrm{e}qref{eq10.38} is larger or equal than
\begin{equation}\label{eq10.38.1}
2|\zeta_-|^2 |F_2 F_1|^2
\left| 2 \frac{\zeta_-^{2} + \zeta_+^{2}}{\zeta_-^{2} - \zeta_+^{2}}
- \frac{\zeta_- + \zeta_+}{\zeta_- - \zeta_+}
\right|^2.
\mathrm{e}nd{equation}
By \mathrm{e}qref{eq10.2.1}, we have that $F_1(\zeta_+/\zeta_-) = 1$ and
$F_2(\zeta_+/\zeta_-) = 1 + \zeta_+/\zeta_-$. Therefore, \mathrm{e}qref{eq10.38.1}
is equal to
\begin{equation}\label{eq10.38.3}
\begin{split}
2|\zeta_- +\zeta_+|^2
\left| 2 \frac{\zeta_-^{2} + \zeta_+^{2}}{\zeta_-^{2} - \zeta_+^{2}}
- \frac{\zeta_- + \zeta_+}{\zeta_- - \zeta_+}
\right|^2
&=\frac{2 \left|2\zeta_-^2 +2\zeta_+^2 -\zeta_-^2-\zeta_+^2-2\zeta_-\zeta_+\right|^2}
{|\zeta_- - \zeta_+|^2}\\
&=2|\zeta_- -\zeta_+|^2.
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
Hence,
\begin{equation}\label{eq10.39}
\left(|\partial_z Z|^2-\frac{|(\partial_z Z|Z)|^2}{|Z|^2}\right) \geq
\frac{2|\partial_z\ln\zeta_-|^2|\zeta_- -\zeta_+|^2}{|a|^{4}} =
\frac{2|\partial_z(\zeta_++\zeta_-)|^2 }{|a|^{4}}=
\frac{2}{|a|^6},
\mathrm{e}nd{equation}
where we used \mathrm{e}qref{algrel}, in particular that $\zeta_++\zeta_- = -z/a$ and
that
\begin{equation}\label{eq10.38.2}
\partial_z\ln\zeta_- = -\partial_z\ln\zeta_+.
\mathrm{e}nd{equation}
Thus, we conclude that for all $z\in\Sigma_1\backslash [-2\sqrt{ab},2\sqrt{ab}]$
the vectors $Z(z)$ and $\partial_zZ(z)$ are linearly independent.
\mathrm{e}nd{remark}
Continuing, observe that the summands on the right hand side of \mathrm{e}qref{eq10.38}
are equal to
\begin{equation}\label{eq10.40}
\left| (\mu +1)
\frac{(\zeta_-^{\mu+1} + \zeta_+^{\mu+1})(\zeta_-^{\nu+1} - \zeta_+^{\nu+1})}
{(\zeta_- - \zeta_+)^2}
- (\nu +1) \frac{(\zeta_-^{\nu+1} + \zeta_+^{\nu+1})(\zeta_-^{\mu+1} - \zeta_+^{\mu+1})}
{(\zeta_- - \zeta_+)^2}
\right|^2.
\mathrm{e}nd{equation}
By \mathrm{e}qref{eq10.38.2},
\begin{equation}\label{eq10.41}
(\mu+1)(\zeta_-^{\mu+1} + \zeta_+^{\mu+1}) \partial_z\ln\zeta_-
= \partial_z(\zeta_-^{\mu+1} - \zeta_+^{\mu+1}).
\mathrm{e}nd{equation}
Thus, \mathrm{e}qref{eq10.40} is equal to
\begin{equation}\label{eq10.42}
\frac{|\partial_z\ln\zeta_-|^{-2}}{|\zeta_- - \zeta_+|^4}
\left| (\zeta_-^{\nu+1} - \zeta_+^{\nu+1})\partial_z(\zeta_-^{\mu+1} - \zeta_+^{\mu+1})
- (\zeta_-^{\mu+1} - \zeta_+^{\mu+1})\partial_z(\zeta_-^{\nu+1} - \zeta_+^{\nu+1})
\right|^2.
\mathrm{e}nd{equation}
Writing $f_{\mu}(z)=\zeta_-^{\mu+1}(z)-\zeta_+^{\mu+1}(z)$, it follows from \mathrm{e}qref{eq10.38}
and \mathrm{e}qref{eq10.42} that
\begin{equation}\label{eq10.43}
\left(|\partial_z Z|^2-\frac{|(\partial_z Z|Z)|^2}{|Z|^2}\right)
= \frac{1}{|a|^4|\zeta_--\zeta_+|^2}\sum_{\mu,\nu =0}^{N-1}
\left| f_{\nu}(z)\partial_zf_{\mu}(z)-f_{\mu}(z)\partial_z f_{\nu}(z) \right|^2.
\mathrm{e}nd{equation}
Since $f_{\mu}$ is holomorphic in $z$, we have
$(\partial_zf_{\mu})(\overline{\partial_zf_{\mu}})=
\partial_z\partial_{\bar{z}}|f_{\mu}|^2$, and we obtain
\begin{equation}\label{eq10.43.2}
\begin{split}
| f_{\nu}(z)\partial_zf_{\mu}(z)-f_{\mu}(z)\partial_z f_{\nu}(z) |^2
=
|f_{\nu}(z)|^2\partial_z\partial_{\bar{z}}|f_{\mu}(z)|^2 +
|f_{\mu}(z)|^2\partial_z\partial_{\bar{z}}|f_{\nu}(z)|^2 \\
- (\partial_z |f_{\nu}(z)|^2)(\partial_{\bar{z}}|f_{\mu}(z)|^2)
- (\partial_z |f_{\mu}(z)|^2)(\partial_{\bar{z}}|f_{\nu}(z)|^2).
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
Using an exchange of summation index, we obtain
from \mathrm{e}qref{eq10.43} and \mathrm{e}qref{eq10.43.2}
\begin{equation}\label{eq10.43.1}
\begin{split}
&
\left(|\partial_z Z|^2-\frac{|(\partial_z Z|Z)|^2}{|Z|^2}\right) \\
& = \frac{2}{|a|^4|\zeta_--\zeta_+|^2}\sum_{\mu,\nu =0}^{N-1}
\big[|f_{\nu}(z)|^2\partial_{z}\partial_{\bar{z}}|f_{\mu}(z)|^2 - (\partial_{z}|f_{\mu}(z)|^2)(\partial_{\bar{z}}|f_{\nu}(z)|^2)\big ] \\
& = \frac{2}{|a|^4|\zeta_--\zeta_+|^2}
\big[L_N(z) \partial_{z}\partial_{\bar{z}}L_N(z) - (\partial_{z}L_N(z))(\partial_{\bar{z}}L_N(z))\big ],
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
where $L_N(z):= \sum_{\nu =0}^{N-1} |f_{\nu}(z)|^2$, so that by \mathrm{e}qref{eq10.35}
$$
K_N=\frac{L_N}{|a|^2|\zeta_- -\zeta_+|^2}
$$
Since we assumed that $z\notin [-2\sqrt{ab},2\sqrt{ab}]$,
$\zeta_{\pm}(z)$ are holomorphic functions in $z$ and $\zeta_-\neq\zeta_+$. It follows
that $\ln|\zeta_--\zeta_+|^2$ is harmonic, hence
$\partial_z\partial_{\bar{z}} \ln L_N =\partial_z\partial_{\bar{z}} \ln K_N $, and
\mathrm{e}qref{eq10.43} is equal to
\begin{equation}\label{eq10.44}
2K_N^2 \partial_z\partial_{\bar{z}} \ln K_N =
2\big[K_N(z)\partial_{z}\partial_{\bar{z}}K_N(z) - \partial_z K_N(z) \partial_{\bar{z}}K_N(z)\big].
\mathrm{e}nd{equation}
\mathrm{e}nd{proof}
Next we are interested in obtaining bounds on \mathrm{e}qref{eq10.34}.
\begin{prop}\label{prop10.3}
Assuming \mathrm{e}qref{eq10.9}, we have that
\begin{equation}\label{eq10.45}
\left(|\partial_z Z|^2-\frac{|(\partial_z Z|Z)|^2}{|Z|^2}\right)
\asymp
\left(F_N(|\zeta_-|^2)\right)^4.
\mathrm{e}nd{equation}
\mathrm{e}nd{prop}
\begin{proof}
For simplicity we assume that $a=1$. Recall from \mathrm{e}qref{eq10.9} that we
have \mathrm{e}qref{eq10.8}, so $0<\sqrt{|b/a|}\leq|\zeta_-|\leq 1-1/N$, where
we also used \mathrm{e}qref{signcon2} for the first two inequalities.
\par
We write $F_{\nu+1}=F_{\nu+1}(t)$. Set $t=\zeta_+/\zeta_-$, which
satisfies $|b/a| \leq |t|\leq 1 - 1/C$, see the remark after \mathrm{e}qref{eq10.4},
which also implies that $|F_{\nu+1}(t)|\asymp 1$.
\par
By \mathrm{e}qref{eq10.38},
\begin{equation}\label{eq10.46}
\begin{split}
&\left(|\partial_z Z|^2-\frac{|(\partial_z Z|Z)|^2}{|Z|^2}\right) \\
& = |\partial_z\ln\zeta_-|^2\sum_{\mu,\nu =0}^{N-1} |\zeta_-|^{2(\mu+\nu)}|F_{\mu+1}F_{\nu+1}|^2
\left| (\mu +1) \frac{1 + t^{\mu+1}}{1- t^{\mu+1}}
- (\nu +1) \frac{1 + t^{\nu+1}}{1 - t^{\nu+1}}
\right|^2\\
&\asymp \sum_{\mu,\nu =0}^{N-1} |\zeta_-|^{2(\mu+\nu)}
\left| (\mu +1) \frac{1 + t^{\mu+1}}{1- t^{\mu+1}}
- (\nu +1) \frac{1 + t^{\nu+1}}{1 - t^{\nu+1}}
\right|^2
=
\begin{cases}
\leq S_{2(N-1)} \\
\geq S_{N-1},
\mathrm{e}nd{cases}
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
where
\begin{equation}\label{eq10.47}
\begin{split}
&S_M = \sum_0^M |\zeta_-|^{2k}A_k, \\
&A_k = \sum_{\nu+\mu=k}
\left| (\mu +1) \frac{1 + t^{\mu+1}}{1- t^{\mu+1}}
- (\nu +1) \frac{1 + t^{\nu+1}}{1 - t^{\nu+1}}
\right|^2.
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
Here
$$\left|\frac{1 + t^{\mu+1}}{1- t^{\mu+1}} \right| \asymp 1, \quad
\left|\frac{1 + t^{\nu+1}}{1- t^{\nu+1}} \right| \asymp 1,$$
so $A_k=\mathcal{O}(k^3)$. The terms in $A_k$ with $\mu \gg \nu $ and
$\mu \ll \nu$ are $\asymp k^2$ and there are $\asymp k$ terms
of that kind, so $A_k\geq \frac{1}{C}k^3$, for some $C\gg 1$.
Thus, $A_k\asymp k^3$, for $k\gg 1$.
For $k=1$,
\begin{equation}\label{eq10.47.1}
A_1 = 2
\left| 2\frac{1 + t^{2}}{1- t^{2}}
- \frac{1 + t}{1 - t}
\right|^2 = 2.
\mathrm{e}nd{equation}
Hence, using that all $A_k \geq 0$, and that $|\zeta_-|\leq 1 - 1/N$
(see above), we obtain
\begin{equation}\label{eq10.48}
S_M \asymp \sum_0^M k^3|\zeta_-|^{2k} \asymp F_M(|\zeta_-|^2)^4 .
\mathrm{e}nd{equation}
Here, to obtain the second estimate, we used Proposition 4.2 of \cite{SjVo15}.
To conclude the statement of the proposition observe that $S_{2(N-1)}$
and $S_{N-1}$ are of the same order of magnitude, that is $F_N(|\zeta_-|^2)^4$.
\mathrm{e}nd{proof}
Continuing, recall that $F_N(\zeta_+/\zeta_-)\asymp 1$ for $z$ satisfying \mathrm{e}qref{eq10.9}
and that it depends holomorphically on
$z \in \mathring{\Sigma}_1\backslash [-2\sqrt{ab},2\sqrt{ab}]$. For simplicity, we
sharpen assumption \mathrm{e}qref{eq10.9} and assume
\begin{equation}\label{eq10.48.5}
\begin{cases}
z\in \Sigma_{(r_0-1/N),r_1} \\
r_0>0 \text{ satisfies \mathrm{e}qref{eq10.8}}, \\
r_1= \sqrt{|b/a|} + 1/C, ~ C\gg 1.
\mathrm{e}nd{cases}
\mathrm{e}nd{equation}
Next, note that by the Cauchy inequalities, for $z$ satisfying \mathrm{e}qref{eq10.48.5}, we have
\begin{equation}\label{eq10.49}
|\partial_z F_N(\zeta_+/\zeta_-)|
\leq \mathcal{O}(1).
\mathrm{e}nd{equation}
Furthermore, $\partial_z| F_N(\zeta_+/\zeta_-)|^2 = \mathcal{O}(1)$,
$\partial_{z}\partial_{\bar{z}} |F_N(\zeta_+/\zeta_-)|^2 = \mathcal{O}(1)$.
Using this and \cite[Proposition 4.2]{SjVo15}, we obtain for $K_N$
as \mathrm{e}qref{eq10.35} that
\begin{equation}\label{eq10.50}
\begin{split}
&\partial_z K_N = \partial_z K_{\infty}
+\mathcal{O}\!\left(
\frac{N |\zeta_-|^{2N}|\partial_z\ln\zeta_-|}{1-|\zeta_-|^2}
\right) \\
&\partial_{\bar{z}} K_N = \partial_{\bar{z}} K_{\infty}
+\mathcal{O}\!\left(
\frac{N |\zeta_-|^{2N}|\partial_z\ln\zeta_-|}{1-|\zeta_-|^2}
\right) \\
&\partial_{z}\partial_{\bar{z}} K_N =\partial_{z}\partial_{\bar{z}} K_{\infty}
+\mathcal{O}\!\left(
\frac{N^2 |\zeta_-|^{2N}|\partial_z\ln\zeta_-|^2}{1-|\zeta_-|^2}
\right),
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
where
\begin{equation}\label{eq10.51}
\begin{split}
&K_{\infty} \asymp \frac{1 }{1-|\zeta_-|^2} \\
& \partial_z K_{\infty}, \partial_{\bar{z}} K_{\infty} \asymp
\frac{N }{1-|\zeta_-|^2} \\
&\partial_{z}\partial_{\bar{z}} K_{\infty} \asymp
\frac{N^2}{1-|\zeta_-|^2}.
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
Thus, by Proposition \ref{prop10.2},
\begin{equation}\label{eq10.52}
\begin{split}
|\partial_z Z(z)|^2 - &\frac{|(Z(z)|\partial_z Z(z))|^2}{|Z(z)|^2} \\
&= 2K_{\infty}(z)^2\partial_{z}\partial_{\bar{z}}\ln K_{\infty}(z)
+\mathcal{O}\!\left( \frac{N^2 |\zeta_-|^{2N}|\partial_z\ln\zeta_-|^2}{(1-|\zeta_-|^2)^2}
\right).
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
Combining Proposition \ref{prop10.3} with \mathrm{e}qref{eq10.52} and
\mathrm{e}qref{eq10.51} with \mathrm{e}qref{eq10.3.6}, we see that
\begin{equation*}
\partial_{z}\partial_{\bar{z}}\ln K_{\infty}(z)\left( 1
+\mathcal{O}\!\left( N^2 |\zeta_-|^{2N}|\partial_z\ln\zeta_-|^2
\right)\right)
\asymp (F_N(|\zeta_-|^2))^2.
\mathrm{e}nd{equation*}
Since $|\zeta_-|\leq 1 - 2/N$, see \mathrm{e}qref{signcon2} and \mathrm{e}qref{eq10.48.5},
it then follows that
\begin{equation}\label{eq10.52.1}
\partial_{z}\partial_{\bar{z}}\ln K_{\infty}(z) \asymp (F_N(|\zeta_-|^2))^2.
\mathrm{e}nd{equation}
Continuing, let $e_1(z),\dots,e_{N^2}(z)$ be as in Proposition \ref{prop10.1}.
It has been observed in \cite[Section 5]{SjVo15} that if we we assume that
\begin{equation}\label{eq10.52.5}
|\nabla_z e_1(z)| = \mathcal{O}(m),
\mathrm{e}nd{equation}
for some weight $m\geq 1$, then
\begin{equation}\label{eq10.53}
\left|
\sum_{3}^{N^2} \alpha_j \nabla_z e_j
\right| \leq \mathcal{O}(m)\|\alpha\|_{\mathbf{C}^{N^2-2}}.
\mathrm{e}nd{equation}
In the following we shall perform the same steps as in
\cite{SjVo15}. We present this here for the readers convenience,
so the reader already familiar with \cite{SjVo15} may skip ahead to
formula \mathrm{e}qref{eq10.59}.
\par
Next we will show that we can take the weight $m=F_N(|\zeta_-|^2)$
in \mathrm{e}qref{eq10.52.5}.
Using, \mathrm{e}qref{eq10.3.6}, \mathrm{e}qref{eq10.31}, we have
\begin{equation}\label{eq10.54}
\begin{split}
\nabla_z e_1(z) &= \frac{\nabla_z \overline{Z}(z)}{|Z(z)|} -
\frac{\nabla_z |Z(z)|}{|Z(z)|^2}\overline{Z}(z) \\
&= \frac{\nabla_z \overline{Z}(z)}{|Z(z)|} -
\frac{(\nabla_z Z(z)|Z(z))+(Z(z)|\overline{\nabla}_z Z(z))}{2|Z(z)|^3}\overline{Z}(z).
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
Using \mathrm{e}qref{eq10.3.6} and the Cauchy inequalities, we obtain
the estimate
\begin{equation}\label{eq10.54.1}
|\partial_z Z(z)| \leq \frac{F_N(|\zeta_-|^2)}{\mathrm{dist}(z,\partial\Sigma_{1,r_1})}
\leq \mathcal{O}(1)(F_N(|\zeta_-|^2))^2,
\mathrm{e}nd{equation}
where in the second inequality we used that,
$\mathrm{dist}(z,\partial\Sigma_{1,r_1}) \geq (1-|\zeta_-|)/C$, for
some $C\gg 1$.
\par
Since $Z$ is holomorphic, we conclude the same
estimates for $|\nabla_z Z|$ and $|\nabla_{z}\overline{Z}|$,
and, by using the Cauchy-inequalities,
\begin{equation}\label{eq10.54.2}
|\partial^2_z Z| \leq \mathcal{O}(F_N^3).
\mathrm{e}nd{equation}
Using this and the fact that $K_N=|Z|$ (cf. the remark after
Proposition \ref{prop10.2}) in \mathrm{e}qref{eq10.54}, we get
\begin{equation}\label{eq10.55}
|\nabla_z e_1| = \mathcal{O}(F_N).
\mathrm{e}nd{equation}
We can therefore take $m=F_N$ in the above. Let $f_2$ be the vector as
in \mathrm{e}qref{eq10.32}, so that $e_2= |f_2|^{-1}f_2$. As in the proof of
Proposition 5.1 in \cite{SjVo15}, we let $V_0$ be the isometry from $\mathbf{C}^{N^2-2}$
to $\mathbf{C}^{N^2}$ defined by $V_0\nu_j^0 = e_j(z_0)$, $j=3,\dots,N^2$, where
$\nu_3^0,\dots,\nu_{N^2}^0$ is the standard basis of $\mathbf{C}^{N^2-2}$.
Moreover, for
$z$ in a complex neighbourhood of $z_0$, we let $V(z)=(1-e_1(z)e_1^*(z))V_0$.
Setting $U(z)= V(z)(V^*(z)V(z))^{-1/2}$, we get that $e_j=U(z)\nu_j^0$, $j=3,\dots,N^2$.
\par
It has been shown in \cite{SjVo15} that \mathrm{e}qref{eq10.52.5} implies that
$\| \nabla_zU(z)\| = \mathcal{O}(m)$. Thus, by \mathrm{e}qref{eq10.55}, we obtain
$\| \nabla_zU(z)\| = \mathcal{O}(F_N)$. Consider
\begin{equation}\label{eq10.56}
\begin{split}
\nabla_zf_2(z) = &\nabla_z\overline{\partial_z Z(z)} -
\sum_{j\neq 2}\big[(\nabla_z\overline{\partial_z Z(z)}|e_j(z))e_j(z) \\
&+(\overline{\partial_z Z(z)}|\nabla_ze_j(z))e_j(z)
+
(\overline{\partial_z Z(z)}|e_j(z))\nabla_z e_j(z)\big].
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
By \mathrm{e}qref{eq10.54.2}, we have that
$|\nabla_z\overline{\partial_z Z(z)} | = \mathcal{O}(F_N^3)$. Moreover,
the term for $j=1$ in the sum is of order $\mathcal{O}(F_N^3)$. It remains to
estimate,
\begin{equation*}
\begin{split}
& \mathrm{I} = \sum_{3}^{N^2} (\nabla_z\overline{\partial_z Z(z)}|e_j(z))e_j(z) \\
& \mathrm{II} = \sum_{3}^{N^2}(\overline{\partial_z Z(z)}|\nabla_ze_j(z))e_j(z) \\
& \mathrm{III} = \sum_{3}^{N^2} (\overline{\partial_z Z(z)}|e_j(z))\nabla_z e_j(z).
\mathrm{e}nd{split}
\mathrm{e}nd{equation*}
Here, $|\mathrm{I}| \leq |\nabla_z\overline{\partial_z Z}(z)| = \mathcal{O}(F_N^3)$
and, using \mathrm{e}qref{eq10.53}, $|\mathrm{III}| \leq \mathcal{O}(F_N) |\overline{\partial_z Z}(z)|
=\mathcal{O}(F_N^3) $. Moreover,
\begin{equation*}
\mathrm{II} = \sum _3^{N^2} (\overline{\partial_z Z}(z)|\nabla_zU(z)\nu_j^0)e_j(z)
= \sum _3^{N^2} ((\nabla_zU(z))^*\overline{\partial_z Z}(z)|\nu_j^0)e_j(z)
\mathrm{e}nd{equation*}
which yields that $|\mathrm{II}| = |(\nabla_zU(z))^*\overline{\partial_z Z}(z)| =
\mathcal{O}(F_N^3)$. Hence,
\begin{equation}\label{eq10.57}
|\nabla_zf_2(z)| = \mathcal{O}(F_N^3).
\mathrm{e}nd{equation}
By \mathrm{e}qref{eq10.33}, \mathrm{e}qref{eq10.45}, we have that for $z=z_0$
\begin{equation*}
|f_2(z_0)|^2 =
|\partial_z Z(z_0)|^2 - \frac{|(Z(z_0)|\partial_z Z(z_0))|^2}{|Z(z_0)|^2}
\asymp F_N(|\zeta_-|^2)^4.
\mathrm{e}nd{equation*}
Thus, for $z$ in a neighbourhood of $z_0$
\begin{equation}\label{eq10.58}
|f_2(z)|^2\asymp F_N(|\zeta_-|^2)^4.
\mathrm{e}nd{equation}
In view of \mathrm{e}qref{eq10.57} we then obtain that
$\nabla_z|f_2(z)| =\mathcal{O}(F_N^3)$. Since,
$e_2 = |f_2|^{-1} f_2$,
\begin{equation*}
|\nabla e_2(z)|= \mathcal{O}( F_N(|\zeta_-|^2)).
\mathrm{e}nd{equation*}
So,
\begin{equation}
\left| \sum_2^{N^2}\alpha_j\partial_z e_j\right|
\leq \mathcal{O}(F_N(|\zeta_-|^2))\|\alpha\|_{\mathbf{C}^{N^2-1}}
\leq \mathcal{O}(NF_N(|\zeta_-|^2)),
\mathrm{e}nd{equation}
where in the last inequality we used that $\|Q_{\omega}\|
= \|\alpha\| \leq C_1 N$. Combining this with \mathrm{e}qref{leq10.28},
\mathrm{e}qref{eq10.3.6}, \mathrm{e}qref{eq10.23}, \mathrm{e}qref{eq10.3.5} and
\mathrm{e}qref{eq10.17.5}, we obtain
\begin{equation}\label{eq10.59}
\partial_zf = \mathcal{O}(1)\left[
\frac{N|\zeta_-|^{N-1}}{\delta F_N^2} + \frac{|\zeta_-|^N}{\delta F_N}
+\delta N^2F_N + \frac{\delta N^2}{r_0-|\zeta_-|}
\right].
\mathrm{e}nd{equation}
Here, the first term dominates the second and the fourth term
dominates the third, thus
\begin{equation}\label{eq10.59.1}
\partial_z f = \mathcal{O}(1)\left[
\frac{N|\zeta_-|^{N-1}}{\delta F_N^2}+ \delta N^3
\right].
\mathrm{e}nd{equation}
Similarly, using \mathrm{e}qref{leq10.29},
\begin{equation}\label{eq10.59.2}
\begin{split}
\partial_{\bar{z}}f = &\mathcal{O}(1)\left[
\frac{|\zeta_-|^{N}}{\delta F_N} + \delta N^2 F_N + N|\zeta_-|^N + \delta^2 N^3 F_{N+1}^2
+\delta N^2F_N
\right] \\
&=\mathcal{O}(1)\left[ \frac{|\zeta_-|^{N}}{\delta F_N} +\delta N^2 F_N \right].
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
Repeating line by line (with the obvious changes) the proof
of Proposition 5.3 in \cite{SjVo15}, we obtain the following,
basically, identical result:
\begin{prop}\label{prop10.4}
We express $Q$ in the canonical basis in $\mathbf{C}^{N^2}$ or in any other fixed orthonormal
basis . Let $e_1(z),\dots,e_{N^2}(z)$ be an orthonormal basis in $\mathbf{C}^{N^2}$ depending
smoothly on $z$, with $e_1(z)=|Z(z)|^{-1}\overline{Z}(z)$, and
$\mathbf{C} e_1(z)\oplus \mathbf{C} e_2(z) = \mathbf{C} \overline{Z}(z) \oplus \mathbf{C} \overline{\partial_z Z}(z)$. Write
$Q = \alpha_1\overline{Z}(z) + \sum_2^{N^2}\alpha_j e_j(z)$, and recall that the hypersurface
\begin{equation*}
\{(z,Q) \in \Sigma_{r_0-1/N}\backslash\Sigma_{r_1} \times B(0,C_1N); g(z,Q)=0\},
\mathrm{e}nd{equation*}
is given by \mathrm{e}qref{eq10.22} with $f$ as in \mathrm{e}qref{eq10.23} (see also \mathrm{e}qref{eq10.11.1},
\mathrm{e}qref{eq10.48.5}).
Then, the restriction of $dQ\wedge d\overline{Q} $ to this hypersurface is given by
\begin{equation}\label{eq10.59.5}
\begin{split}
&dQ\wedge d\overline{Q} = J(f) dz \wedge d\overline{z}\wedge d\alpha' \wedge d\overline{\alpha}' \\
& J(f) = - \frac{|\alpha_2|^2}{|Z|^2} |(e_2|\overline{\partial_z Z})|^2 \\
&\phantom{J(f) = -}+\mathcal{O}(1)|\alpha_2||F_N|\left(\frac{N |\zeta_-|^{N-1}}{F_N \delta}
+\delta N^3 F_N + |\alpha_2| F_N^2 \delta N\right) \\
&\phantom{J(f) = -}+\mathcal{O}(1)\left(\frac{N |\zeta_-|^{N-1}}{F_N \delta} +\delta N^3 F_N
+ |\alpha_2| F_N^2\delta N\right)^2,
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
where $F_N=F_N(|\zeta_-|^2)$, $\alpha' =(\alpha_2,\dots,\alpha_{N^2})$ and
$d\alpha' \wedge d\overline{\alpha}' = d\alpha_2 \wedge d\overline{\alpha}_2\wedge \dots \wedge d\alpha_{N^2} \wedge d\overline{\alpha}_{N^2}$.
\mathrm{e}nd{prop}
Note that the Jacobian $J(f)$ in \mathrm{e}qref{eq10.59.5} is invariant under any
$z$-dependent unitary change of variables $\alpha_2,\dots,\alpha_{N^2}
\mapsto \alpha'_2,\dots,\alpha'_{N^2} $. Therefore, to calculate $J(f)$, and
thus $\xi$, at any given point $(z_0,\alpha_0)$ we may choose the most
appropriate orthogonal basis $e_2(z),\dots,e_{N^2}$ in $\overline{Z}(z)^{\perp}$
depending smoothly on $z$.
\section{The average density}
Recall \mathrm{e}qref{eq10.11.2}. Using \mathrm{e}qref{eq10.12}, \mathrm{e}qref{eq10.13}, it follows by a general
formula, obtained in Section 3 of \cite{SjVo15}, that
\begin{equation}\label{eq10.62}
K_{\varphi}
=
\int \varphi(z)\xi(z) L(dz),
\mathrm{e}nd{equation}
with
\begin{equation}\label{eq10.63}
\xi(z) = \pi^{-N^2}\int_{{\tiny |f(z)|^2|Z(z)|^2 + |\alpha'|^2 \leq (C_1N)^2}} \mathrm{e} ^{-|f(z)|^2|Z(z)|^2-|\alpha'|^2}
J(f(z,\alpha'))L(d\alpha').
\mathrm{e}nd{equation}
where $f$ is as in \mathrm{e}qref{eq10.23} and $J$ is as in Proposition \ref{prop10.4}. Recall that
we work under the hypotheses \mathrm{e}qref{grpp.1} and \mathrm{e}qref{eq10.48.5}. The latter in particular
implies \mathrm{e}qref{eq10.6}, \mathrm{e}qref{eq10.7}. Applying these to \mathrm{e}qref{eq10.23} we obtain
\begin{equation}\label{eq10.64}
|f| \leq \mathcal{O}(1)\left( \frac{g_0(z)}{\delta N F_N} + \delta N F_N \right)\frac{N}{F_N}
\ll \frac{N}{F_N}.
\mathrm{e}nd{equation}
Now we strengthen assumptions \mathrm{e}qref{grpp.1}, \mathrm{e}qref{eq10.7} to
\begin{equation}\label{eq10.65}
\left( \frac{|\zeta_-|^N}{\delta N F_N} + \delta N F_N \right)
\ll \frac{1}{N}.
\mathrm{e}nd{equation}
Then,
\begin{equation*}
\mathrm{e} ^{-|f(z)|^2|Z(z)|^2} = 1 +
\mathcal{O}(1)\left( \frac{|\zeta_-|^N}{\delta N F_N} + \delta N F_N \right)^2N^2.
\mathrm{e}nd{equation*}
Thus, using \mathrm{e}qref{eq10.59.5}
\begin{equation}\label{eq10.66}
\begin{split}
&\xi(z) = \left(1 + \mathcal{O}(1)\left( \frac{|\zeta_-|^N}{\delta N F_N}
+ \delta N F_N \right)^2N^2 \right) \cdot \\
&\frac{|(e_2|\overline{\partial_z Z})|^2}{|Z|^2}\int_{|(f|Z|,\alpha')|\leq C_1N}
|\alpha_2|^2
\mathrm{e}^{-|\alpha'|^2}\pi^{-N^2}L(d\alpha')\\
& + \mathcal{O}(1)\int_{|(f|Z|,\alpha')|\leq C_1N}
|\alpha_2||F_N|\left(\frac{N |\zeta_-|^{N-1}}{F_N \delta}
+\delta N^3 F_N + |\alpha_2| F_N^2 \delta N\right)
\mathrm{e}^{-|\alpha'|^2}\frac{L(d\alpha')}{\pi^{N^2}}\\
& + \mathcal{O}(1)\int_{|(f|Z|,\alpha')|\leq C_1N}
\left(\frac{N |\zeta_-|^{N-1}}{F_N \delta} +\delta N^3 F_N
+ |\alpha_2| F_N^2\delta N\right)^2
\mathrm{e}^{-|\alpha'|^2}\frac{L(d\alpha')}{\pi^{N^2}}.
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
By \mathrm{e}qref{eq10.64}, $|f| |Z| \ll N$. Therefore, the first integral is equal to
\begin{equation*}
\frac{1}{\pi^2}\int |w|^2 \mathrm{e}^{-|w|^2}L(dw)
+ \mathcal{O}\!\left(\mathrm{e}^{-\frac{N^2}{\mathcal{O}(1)}}\right)
= \frac{1}{\pi}\left(1 + \mathcal{O}\!\left(\mathrm{e}^{-\frac{N^2}{\mathcal{O}(1)}}\right) \right).
\mathrm{e}nd{equation*}
The sum of the other two integrals is equal to
\begin{equation*}
\mathcal{O}(1)\left[
\left(\frac{N|\zeta_-|^{N-1}}{F_N\delta}+\delta N^3F_N\right)^2 +
F_N\left(\frac{N|\zeta_-|^{N-1}}{F_N\delta}+\delta N^3F_N\right)
\right].
\mathrm{e}nd{equation*}
We have seen that
\begin{equation}\label{eq10.66.5}
\frac{|(e_2|\overline{\partial_z Z})|^2}{|Z|^2} = \mathcal{O}(F_N^2).
\mathrm{e}nd{equation}
Therefore, we obtain
\begin{equation}\label{eq10.67}
\begin{split}
\xi(z) = &\frac{1}{\pi}\frac{|(e_2|\overline{\partial_z Z})|^2}{|Z|^2}\\
&+
\mathcal{O}(1)\left[
\left(\frac{N|\zeta_-|^{N-1}}{F_N\delta}+\delta N^3F_N\right)^2 +
F_N\left(\frac{N|\zeta_-|^{N-1}}{F_N\delta}+\delta N^3F_N\right)
\right].
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
Next, let us study the leading term in \mathrm{e}qref{eq10.67}. Since
$\overline{\partial_z Z}$ belongs to the span of $e_1 = \overline{Z}/|Z|$
and $e_2$ for $z=z_0$, we obtain by Pythagoras' theorem that the leading term
is equal to
\begin{equation}\label{eq10.68}
\frac{1}{\pi |Z|^2}\left(
|\overline{\partial_z Z}|^2 -
\frac{|(\partial_z Z| Z)|^2}{|Z|^2}
\right), \text{ for } z=z_0.
\mathrm{e}nd{equation}
By the remark after Proposition \ref{prop10.4}, this is then true
for all $z$.
\\
\par
Recall from the remark after Proposition \ref{prop10.2} that $K_N=|Z|$.
Similarly to \mathrm{e}qref{eq10.50}, using \mathrm{e}qref{eq10.51} we get that
$K_N= K_{\infty}(1 + \mathcal{O}(|\zeta_-|^{2N})$,
where $K_{\infty} \asymp (1-|\zeta_-|^2)^{-1}$. Using this and \mathrm{e}qref{eq10.52},
we see that \mathrm{e}qref{eq10.67} becomes
\begin{equation}\label{eq10.69}
\begin{split}
\xi(z) = &\frac{2}{\pi}\partial_{z}\partial_{\bar{z}}\ln K_{\infty}(z)
+\mathcal{O}\!\left(N^2 |\zeta_-|^{2N}|\partial_z\ln\zeta_-|^2
\right)\\
&+
\mathcal{O}(1)\left[
\left(\frac{N|\zeta_-|^{N-1}}{F_N\delta}+\delta N^3F_N\right)^2 +
F_N\left(\frac{N|\zeta_-|^{N-1}}{F_N\delta}+\delta N^3F_N\right)
\right],
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
where by \mathrm{e}qref{eq10.52.1}
\begin{equation}\label{eq10.69.5}
\frac{2}{\pi}\partial_{z}\partial_{\bar{z}}\ln K_{\infty}(z)
\asymp F_N^2(|\zeta_-|^2).
\mathrm{e}nd{equation}
Thus, the error term in \mathrm{e}qref{eq10.69} can be written as
\begin{equation}\label{eq10.70}
\begin{split}
\mathcal{O}(F_N^2)\left(\frac{N^2 |\zeta_-|^{2N}|\partial_z\ln\zeta_-|^2}{F_N^2}
+\frac{N^2 |\zeta_-|^{2N-2}}{\delta^2 F_N^4} + \delta^2 N^6 + \frac{N |\zeta_-|^{N-1}}{\delta F_N^2} + \delta N^3
\right).
\mathrm{e}nd{split}
\mathrm{e}nd{equation}
By \mathrm{e}qref{eq10.65}, we have that $(\delta F_N)^{-1} \gg N^2$. Thus, by \mathrm{e}qref{grpp.1}
(which is implied by \mathrm{e}qref{eq10.65}), the second term in \mathrm{e}qref{eq10.70} is
\begin{equation*}
\gg \frac{N^6 |\zeta_-|^{2N-2}}{F_N^2}
\mathrm{e}nd{equation*}
which dominates the first term. Strengthening assumption \mathrm{e}qref{eq10.65} to
\begin{equation}\label{eq10.71}
\left( \frac{|\zeta_-|^{N-1} N}{\delta F_N^2} +\delta N^3\right) \ll 1,
\mathrm{e}nd{equation}
the remainder becomes
\begin{equation}\label{eq10.72}
\mathcal{O}(F_N^2)\left(\frac{N |\zeta_-|^{N-1}}{\delta F_N^2}
+ \delta N^3
\right).
\mathrm{e}nd{equation}
By \mathrm{e}qref{eq10.3.6}, assumption \mathrm{e}qref{eq10.71} is
equivalent to
\begin{equation}\label{eq10.72.1}
\left( \frac{|\zeta_-|^{N-1} N}{\delta}(1-|\zeta_-|)^2 +\delta N^3\right) \ll 1.
\mathrm{e}nd{equation}
Note that for $1/C\leq r_0 \leq 1-1/N$, for some $C\gg 1$, the function
$[0,r_0]\ni r\mapsto r^{N-1}(1-r)^2$ is increasing. Thus, unifying our
previous assumptions, we assume that
$z\in\Sigma_{r_0-1/N}\backslash \Sigma_{r_1}$, with $r_0$ satisfying
$1/C\leq r_0 \leq 1-1/N$ and \mathrm{e}qref{eq10.72.1} with $|\zeta_-|$ replaced
by $r_0$, and $r_1$ as in \mathrm{e}qref{eq10.48.5} (note that this assumption
implies \mathrm{e}qref{eq10.48.5}, \mathrm{e}qref{grpp.1} and \mathrm{e}qref{eq10.72.1}).
\par
Then,
by \mathrm{e}qref{eq10.69}, \mathrm{e}qref{eq10.69.5}, \mathrm{e}qref{eq10.3.6} we conclude that
\begin{equation}\label{eq10.73}
\xi(z) = \frac{2}{\pi}\partial_{z}\partial_{\bar{z}}\ln K_{\infty}(z)
\left(1 +\mathcal{O}\!\left(\frac{N |\zeta_-|^{N-1}}{\delta}(1-|\zeta_-|)^2
+ \delta N^3
\right)\right).
\mathrm{e}nd{equation}
We have proved Theorem \ref{thm1}, the main result of this paper.
\providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace}
\providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR }
\providecommand{\MRhref}[2]{
\href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2}
}
\providecommand{\href}[2]{#2}
\begin{thebibliography}{10}
\bibitem{BSZ00}
P.~Bleher, B.~Shiffman, and S.~Zelditch, \mathrm{e}mph{{Universality and scaling of
correlations between zeros on complex manifolds}}, Inventiones Mathematicae
\textbf{142} (2000), 351--395, 10.1007/s002220000092.
\bibitem{BM}
W.~Bordeaux-Montrieux, \mathrm{e}mph{{Loi de Weyl presque s{\^u}re et r{\'e}solvent
pour des op{\'e}rateurs diff{\'e}rentiels non-autoadjoints, Th{\'e}se}},
pastel.archives-ouvertes.fr/docs/00/50/12/81/PDF/manuscrit.pdf (2008).
\bibitem{BoCa16}
C.~Bordenave and M.~Capitaine, \mathrm{e}mph{Outlier eigenvalues for deformed i.i.d random
matrices}, Matrices. Commun. Pur. Appl. Math.. doi:10.1002/cpa.21629,
arxiv.org/abs/1403.6001 (2016).
\bibitem{ZwChrist10}
T.J. Christiansen and M.~Zworski, \mathrm{e}mph{{Probabilistic Weyl Laws for Quantized
Tori}}, Communications in Mathematical Physics \textbf{299} (2010).
\bibitem{Da07}
E.~B. Davies, \mathrm{e}mph{{Non-Self-Adjoint Operators and Pseudospectra}}, {Proc.
Symp. Pure Math.}, vol.~76, Amer. Math. Soc., 2007.
\bibitem{DaHa09}
E.B. Davies and M.~Hager, \mathrm{e}mph{{Perturbations of Jordan matrices}}, J. Approx.
Theory \textbf{156} (2009), no.~1, 82--94.
\bibitem{TrEm05}
M.~Embree and L.~N. Trefethen, \mathrm{e}mph{{Spectra and Pseudospectra: The Behavior
of Nonnormal Matrices and Operators}}, Princeton University Press, 2005.
\bibitem{GoKh00}
I.Y. Goldsheid and B.A. Khoruzhenko, \mathrm{e}mph{Eigenvalue curves of asymmetric
tridiagonal random matrices}, Elec. J. of Probability. \textbf{5} (2000),
no.~16, 1--28.
\bibitem{GuMaZe14}
A.~Guionnet, P.~Matchett Wood, and {0. Zeitouni}, \mathrm{e}mph{{Convergence of the
spectral measure of non-normal matrices}}, Proc.~AMS \textbf{142} (2014),
no.~2, 667--679.
\bibitem{Ha06b}
M.~Hager, \mathrm{e}mph{{Instabilit{\'e} Spectrale Semiclassique d{\rq}Op{\'e}rateurs
Non-Autoadjoints II}}, Annales Henri Poincare \textbf{7} (2006), 1035--1064.
\bibitem{Ha06}
\bysame, \mathrm{e}mph{{Instabilit{\'e} spectrale semiclassique pour des op{\'e}rateurs
non-autoadjoints I: un mod{\`e}le}}, Annales de la facult{\'e} des sciences
de Toulouse S{\'e}. 6 \textbf{15} (2006), no.~2, 243--280.
\bibitem{HaSj08}
M.~Hager and J.~Sj{\"o}strand, \mathrm{e}mph{{Eigenvalue asymptotics for randomly
perturbed non-selfadjoint operators}}, Mathematische Annalen \textbf{342}
(2008), 177--243.
\bibitem{HaNe96}
N.~Hatano and D.R. Nelson, \mathrm{e}mph{Localization transitions in non-hermitian
quantum mechanics}, Physical Review Letters \textbf{77} (1996), 570--573.
\bibitem{HoKrPeVi09}
J.B. Hough, M.~Krishnapur, Y.~Peres, and B.~Vir{\'a}g, \mathrm{e}mph{{Zeros of Gaussian
Analytic Functions and Determinantal Point Processes}}, American Mathematical
Society, 2009.
\bibitem{SZ03}
B.~Shiffman and S.~Zelditch, \mathrm{e}mph{{Equilibrium distribution of zeros of random
polynomials}}, Int. Math. Res. Not. (2003), 25--49.
\bibitem{Sj15}
J.~Sj{\"o}strand, \mathrm{e}mph{{Non-self-adjoint differential operators, spectral
asymptotics and random perturbations }}, Monograph in preparation,
http://sjostrand.perso.math.cnrs.fr/.
\bibitem{SjAX1002}
\bysame, \mathrm{e}mph{{Spectral properties of non-self-adjoint operators}}, Actes des
Journ{\'e}es d'{\'e}.d.p. d'{\'E}vian, arxiv.org/abs/1002.4844 (2009).
\bibitem{SjVo15}
J.~Sj{\"o}strand and M.~Vogel, \mathrm{e}mph{{Interior eigenvalue density of Jordan
matrices with random perturbations}}, (2015), accepted for publication as
part of a book in honour of Mikael Passare in the series Trends in
Mathematics, Springer/Birkh{\"a}user, arxiv.org/abs/1412.2230.
\bibitem{SjVo15b}
\bysame, \mathrm{e}mph{Large bi-diagonal matrices and random perturbations}, preprint
arxiv.org/abs/1512.06076 (2015).
\bibitem{Sj09b}
Johannes Sj\"ostrand, \mathrm{e}mph{Counting zeros of holomorphic functions of
exponential growth}, Journal of pseudodifferential operators and applications
\textbf{1} (2010), no.~1, 75--100.
\bibitem{So00}
M.~Sodin, \mathrm{e}mph{{Zeros of Gaussian Analytic Functions and Determinantal Point
Processes}}, Mathematical Research Letters (2000), no.~7, 371--381.
\bibitem{Vo14}
M.~Vogel, \mathrm{e}mph{{The precise shape of the eigenvalue intensity for a class of
non-selfadjoint operators under random perturbations}}, (2014),
arxiv.org/abs/1401.8134.
\mathrm{e}nd{thebibliography}
\mathrm{e}nd{document}
|
\begin{document}
\title{f A Note on Goldbach Partitions of Large Even Integers}
\begin{abstract}
Let $\Sigma_{2n}$ be the set of all partitions of the even
integers from the interval $(4,2n], n>2,$ into two odd prime
parts. We show that $\mid\Sigma_{2n}\mid\sim 2n^2/\log^2{n}$ as
$n\to\infty$. We also assume that a partition is selected
uniformly at random from the set $\Sigma_{2n}$. Let $2X_n\in
(4,2n]$ be the size of this partition. We prove a limit theorem
which establishes that $X_n/n$ converges weakly to the maximum of
two random variables which are independent copies of a uniformly
distributed random variable in the interval $(0,1)$. Our method of
proof is based on a classical Tauberian theorem due to Hardy,
Littlewood and Karamata. We also show that the same asymptotic
approach can be applied to partitions of integers into an
arbitrary and fixed number of odd prime parts.
\end{abstract}
\section{Introduction and Statement of the Main Result}
For a given sequence of positive integers
$\Lambda=\{\lambda_1,\lambda_2,...\}$, by a $\Lambda$-partition of
the positive integer $n$, we mean a way of writing it as a sum of
positive integers from $\Lambda$ without regard to order; the
summands are called parts. Let $\mathcal {P}=\{p_1,p_2,...\}$ be
the sequence of all odd primes arranged in increasing order. A prime
partition is a $\Lambda$-partition with $\Lambda=\mathcal{P}$. Let
$Q(n)$ be the number of prime partitions of $n$. Hardy and
Ramanujan [6,7] were apparently the first who studied the
asymptotic behavior of the number of integer
($\Lambda=\{1,2,...\}$) and prime partitions for large $n$. For
prime partitions they proved the following asymptotic formula:
$$
\log{Q(n)}\sim 2\pi\sqrt{\frac{n}{3\log{n}}}, \quad n\to\infty.
$$
The study of the asymptotic behavior of $Q(n)$ itself is quite
complicated. It turns out that the corresponding asymptotic
formula contains transcendental sums over the primes which can be
expressed in terms of zeros of the Riemann zeta function (for more
details, see e.g. [9; p. 240]). Recently Vaughan [16] proposed and
studied a modification of the problem, where $n$ is replaced by a
continuous real variable. His asymptotic results avoid
transcendental sums over primes.
Consider now the number $Q_m(n)$ of prime partitions of $n$ into
$m$ parts ($1\le m\le n$). The bivariate generating function of
the numbers $Q_m(n)$ is of Euler's type, namely,
\begin{equation}\label{euler}
G(x,z) =1+\sum_{n=1}^\infty z^n\sum_{m=1}^n Q_m(n)x^m
=\prod_{p_k\in\mathcal{P}}(1-xz^{p_k})^{-1}
\end{equation}
(the proof may be found in [1; Section 2.1]). In this note we
focus on the asymptotic behavior of the coefficients $Q_2(n)$ of
$x^2$ and $z^n$ in the power series expansion of $G(x,z)$ in
powers of $x$ and $z$. For $n>4$, $Q_2(n)$ counts the number of
ways of representing $n$ as a sum of two odd primes. Obviously,
$Q_2(n)=0$ if $n$ is odd. In 1742 Goldbach conjectured that
$Q_2(n)\ge 1$ for every even integer $n>4$. This problem remains
still unsolved (for more details, see e.g. [8; Section 2.8 and p.
594]). Another famous conjecture related to prime partitions was
stated by Hardy and Littlewood [5], who predicted the asymptotic
form of $Q_2(n)$ for large even $n$. They conjectured that
\begin{eqnarray}\label{hardlit}
& & Q_2(n)\sim 2C_2\left(\prod_{p_k\in\mathcal{P}, p_k\mid n}
\frac{p_k-1}{p_k-2}\right)\int_2^n\frac{du}{\log^2{u}} \nonumber
\\
& & \sim 2C_2 \left(\prod_{p_k\in\mathcal{P}, p_k\mid n}
\frac{p_k-1}{p_k-2}\right)\frac{n}{\log^2{n}}, \quad n\to\infty,
\end{eqnarray}
where $C_2$ is the twin prime constant
$$
C_2:=\prod_{p_k\in\mathcal{P}} \left(1-\frac{1}{(p_k-1)^2}\right)
=0.6601618158...
$$
(for the role of $C_2$ in the distribution of the prime numbers,
see again [8; Section 22.20]). This conjecture remains also still
open.
In the present note we do not deal with the asymptotic equivalence
(\ref{hardlit}) but consider the sum function
\begin{equation}\label{summatory}
S(2n)=\sum_{2<k\le n} Q_2(2k), \quad n>2,
\end{equation}
counting all partitions of the even integers from the interval
$(4,2n]$ into two odd prime parts. Sometimes this kind of partitions
are called Goldbach partitions. Let $\Sigma_{2n}$ denote the set
of these partitions. Our main result is the following asymptotic
equivalence.
\begin{theorem}
We have
$$
\mid\Sigma_{2n}\mid=S(2n)\sim\frac{2n^2}{\log^2{n}}, \quad
n\to\infty.
$$
\end{theorem}
Consider now a random experiment. Suppose that we select a
partition uniformly at random from the set $\Sigma_{2n}$, i.e. we
assign the probability $1/S(2n)$ to each Goldbach partition. We
denote by $\mathbb{P}$ the uniform probability measure on
$\Sigma_{2n}$. Let $2X_n\in (4,2n]$ be the number that is
partitioned by this random selection. $2X_n$ is also called the
size of this partition. Using Theorem 1, we determine the limiting
distribution of the random variable $X_n$.
\begin{theorem} If $0<u<1$, then
$$
lim_{n\to\infty}\mathbb{P}\left(\frac{X_n}{n}\le u\right)=u^2.
$$
\end{theorem}
{\it Remark 1.} Using the Prime Number Theorem [8; Section 1.8],
it is easy to show that the number of ordered pairs of primes not
exceeding $2n$ is also $\sim 2n^2/\log^2{n}$; cf. with the result
of Theorem 1. Hence, we conclude that almost all even integers
that are $\le 2n$ have only one partition into two prime parts.
{\it Remark 2.} In probabilistic terms Theorem 2 shows that the
typical size of a random Goldbach partition is a fraction of $2n$.
Moreover, Theorem 2 implies that $X_n/n$ converges weakly, as
$n\to\infty$, to a random variable whose cumulative distribution
function is
$$
F(u) =\left\{\begin{array}{ll} 0 & \qquad \mbox {if} \qquad u\le
0, \\
u^2 & \qquad \mbox {if}\qquad 0<u<1, \\
1 & \qquad \mbox {if} \qquad u\ge 1.
\end{array}\right.
$$
It can be
easily seen that $F(u)$ is the distribution function of
$\max{\{U_1,U_2\}}$, where $U_1$ and $U_2$ are two independent
copies of a uniformly distributed random variable in the interval
$(0,1)$.
{\it Remark 3.} One reason to study the sum function
(\ref{summatory}) is motivated by a result due to Brigham [2]. He
has studied the asymptotic behavior of a similar sum function
related to integer partitions weighted by the sequence of the von
Mangoldt functions (the definition of a von Mangoldt function and
its role in the proof of the Prime Number Theorem may be found in
[8; Section 17.7]). The asymptotic behavior of a single term in
Brigham's sum function was subsequently studied by Richmond [13]
and Yang [17]. Their results are essentially based on Brigham's
observations.
{\it Remark 4.} Another interesting problem on prime partitions is
related to the asymptotic behavior of the coefficients $Q_m(n)$,
the number of prime partitions of $n$ with $m$ parts (see
(\ref{euler})). Haselgrove and Temperley [9; p. 240] found an
asymptotic form for $Q_m(n)$, whenever $m=m(n)\to\infty$ as
$n\to\infty$ in a proper way. In probabilistic terms their result
can be stated as follows. Consider a random variable, whose
probability distribution function is defined by the ratio
\begin{equation}\label{pdf}
\frac{Q_m(n)}{Q(n)}, \quad m=1,...,n.
\end{equation}
Haselgrove and Temperley [9] showed that this random variable
converges weakly to a non-degenerate random variable as
$n\to\infty$. They also determined the moment generating function
of this limiting variable. The asymptotic form of the mean and the
variance of probability distribution (\ref{pdf}) were found
recently by Ralaivaosaona [12].
Our paper is organized as follows. Section 2 contains some
preliminaries. The proofs of Theorems 1 and 2 are given in Section
3. Our method of proof is essentially based on a classical
Tauberian theorem due to Hardy, Littlewood and Karamata (see [4]).
Finally, in Section 4 we present an extension of our main result.
In particular, we show that the same approach yields similar
results for prime partitions of $n$ into $m>2$ parts whenever $m$
is fixed integer.
\section{Preliminary Results}
We start with a generating function identity for the sequence
$\{Q_2(2k)\}_{k>2}$ of the counts of Goldbach partitions.
\begin{lemma} For any real variable $z$ with $\mid z\mid<1$, let
\begin{equation}\label{ef}
f(z)=\sum_{p_k\in\mathcal{P}} z^{p_k}.
\end{equation}
Then, we have
\begin{equation}\label{ident}
2\sum_{k>2} Q_2(2k)z^{2k} =f^2(z)+f(z^2).
\end{equation}
\end{lemma}
{\it Proof.} Differentiating the left-hand side of (\ref{euler})
twice with respect to $x$ and setting then $x=0$ and $m=2$, we get
\begin{eqnarray}
& & \frac{\partial^2 G(x,z)}{\partial x^2}\mid_{x=0,m=2}
=\sum_{n=1}^\infty z^n\sum_{m=2}^n
m(m-1)Q_m(n)x^{m-2}\mid_{x=0,m=2}
\nonumber \\
& & =2\sum_{n=1}^\infty Q_2(n)z^n =2\sum_{k>2} Q_2(2k)z^{2k}.
\nonumber
\end{eqnarray}
The last equality follows from the obvious identities
$Q_2(1)=Q_2(2)=Q_2(4)=0$ and $Q_2(2k+1)=0$ for $k=1,2,...$. The
right-hand side of (\ref{euler}) can be also written as
$\exp{(-\sum_{p_k\in\mathcal{P}}\log{(1-xz^{p_k})})}$.
Differentiating it twice, in the same way we find that
\begin{eqnarray}
& & \frac{\partial^2 G(x,z)}{\partial x^2}\mid_{x=0}
=\left(exp{\left(-\sum_{p_k\in\mathcal{P}}\log{(1-xz^{p_k})}\right)}
\right) \left(\sum_{p_k\in\mathcal{P}} \frac{z^{p_k}}{1-xz^{p_k}}
\right)^2\mid_{x=0} \nonumber \\
& &
+\left(exp{\left(-\sum_{p_k\in\mathcal{P}}\log{(1-xz^{p_k})}\right)}
\right) \left(\sum_{p_k\in\mathcal{P}}
\frac{z^{2p_k}}{(1-xz^{p_k})^2}\right)\mid_{x=0} \nonumber \\
& & =f^2(z)+f(z^2), \nonumber
\end{eqnarray}
which completes the proof.$\rule{2mm}{2mm}$
Further, we will use a Tauberian theorem by
Hardy-Littlewood-Karamata whose proof may be found in [4; Chapter
7]. We use it in the form given by Odlyzko [11; Section 8.2].
{\bf Hardy-Littlewood-Karamata Theorem.} {\it (See [11; Theorem
8.7, p. 1225].) Suppose that $a_k\ge 0$ for all $k$, and that
$$
g(x)=\sum_{k=0}^\infty a_k x^k
$$
converges for $0\le x<r$. If there is a $\rho>0$ and a function
$L(t)$ that varies slowly at infinity such that
\begin{equation}\label{funcsim}
g(x)\sim (r-x)^{-\rho}L\left(\frac{1}{r-x}\right), \quad x\to r^-,
\end{equation}
then
\begin{equation}\label{sumsim}
\sum_{k=0}^n a_k r^k\sim\left(\frac{n}{r}\right)^\rho
\frac{L(n)}{\Gamma(\rho+1)}, \quad n\to\infty.
\end{equation}}
{\it Remark.} A function $L(t)$ varies slowly at infinity if, for
every $u>0$, $L(ut)\sim L(t)$ as $t\to\infty$.
\section{Proof of the Main Result}
{\it Proof of Theorem 1.} We need to show that power series
(\ref{ident}) satisfies the conditions of
Hardy-Littlewood-Karamata theorem. The next lemma establishes an
asymptotic equivalence of $f(z)$ as $z\to 1^-$.
\begin{lemma} Let $f(z)$ be the power series defined by
(\ref{ef}). Then, as $z\to 1^-$,
$$
f(z)\sim -\frac{1}{\left(\log{\frac{1}{z}}\right)
\left(\log{\log{\frac{1}{z}}}\right)}.
$$
\end{lemma}
{\it Proof.} As usual, by $\pi(y)$ we denote the number of primes
which do not exceed the positive real number $y$. In (\ref{ef}) we
set $z=e^{-t}, t>0,$ and apply an argument similar to that given
by Stong [15] (see also [3]). We have
\begin{eqnarray}\label{intsum}
& & f(e^{-t}) =\int_0^\infty e^{-yt}d\pi(y) =\int_0^\infty
te^{-yt}\pi(y)dy =\int_0^\infty \pi(s/t)e^{-s}ds \nonumber \\
& & =I_1(t)+I_2(t),
\end{eqnarray}
where
$$
I_1(t)=\int_0^{t^{1/2}}\pi(s/t)e^{-s}ds, \quad I_2(t)
=\int_{t^{1/2}}^\infty \pi(s/t)e^{-s}ds.
$$
For $I_1(t)$ we use the bound $\pi(s/t)\le s/t$. Hence, for enough
small $t>0$, we obtain
\begin{eqnarray}\label{ione}
& & 0\le I_1(t) \le\frac{1}{t}\int_0^{t^{1/2}}se^{-s}ds \nonumber \\
& & =\frac{1}{t}\left(-se^{-s}\mid_0^{t^{1/2}}+\int_0^{t^{1/2}}
e^{-s}ds\right) =\frac{1}{t}O(t^{1/2})=O(t^{-1/2}).
\end{eqnarray}
The estimate for $I_2(t)$ follows from the Prime Number Theorem
with an error term given in a suitable form. So, it is known that,
for $y>1$,
$$
\pi(y) =\frac{y}{\log{y}}+O\left(\frac{y}{\log^2{y}}\right)
$$
(see e.g. [10; Theorem 23, p. 65]). Furthermore, for $s\ge
t^{1/2}$, we have $\log{s}\ge -\frac{1}{2}\log{\frac{1}{t}}$.
Hence, as in [15], we get
\begin{eqnarray}\label{pist}
& & \pi(s/t) =\frac{s}{t}\frac{1}{\log{\frac{1}{t}}+\log{s}}
+O\left(\frac{s}{t\left(\log{\frac{1}{t}}+\log{s}\right)^2}\right)
\nonumber \\
& & =\frac{s}{t\log{\frac{1}{t}}}
\left(1+O\left(\frac{\mid\log{s}\mid}{\log{\frac{1}{t}}}\right)\right)
+O\left(\frac{s}{t\log^2{\frac{1}{t}}}\right) \nonumber \\
& & =\frac{s}{t\log{\frac{1}{t}}}
+O\left(\frac{s(1+\mid\log{s}\mid)}{t\log^2{\frac{1}{t}}}\right).
\end{eqnarray}
We also recall that in (\ref{ione}) we have used the obvious
estimate
\begin{equation}\label{estim}
\int_0^{t^{1/2}} se^{-s}ds =O(t^{1/2}).
\end{equation}
Combining (\ref{pist}) and (\ref{estim}), we obtain
\begin{eqnarray}\label{itwo}
& & I_2(t) =\frac{1}{t\log{\frac{1}{t}}} \int_{t^{1/2}}^\infty
se^{-s}ds +O\left(\frac{1}{t\log^2{\frac{1}{t}}}
\int_{t^{1/2}}^\infty s(1+\mid\log{s}\mid)e^{-s}ds\right)
\nonumber \\
& & =\frac{1}{t\log{\frac{1}{t}}}\left(\int_0^\infty se^{-s}ds
+O(t^{1/2})\right) +O\left(\frac{1}{t\log^2{\frac{1}{t}}}\right)
\nonumber \\
& & =\frac{1}{t\log{\frac{1}{t}}}
+O\left(\frac{1}{t^{1/2}\log{\frac{1}{t}}}\right)
+O\left(\frac{1}{t\log^2{\frac{1}{t}}}\right) \nonumber \\
& & \sim\frac{1}{t\log{\frac{1}{t}}}, \quad t\to 0^+.
\end{eqnarray}
Hence, by (\ref{intsum}), (\ref{ione}) and (\ref{itwo}),
$$
f(e^{-t})\sim\frac{1}{t\log{\frac{1}{t}}}, \quad t\to 0^+.
$$
The proof is now completed after the substitution
$t=\log{\frac{1}{z}}$.$\rule{2mm}{2mm}$
Since
$$
\log{\frac{1}{z}}=-\log{z} =-\log{(1-(1-z))} \sim 1-z, \quad z\to
1^-,
$$
the asymptotic equivalence in Lemma 2 becomes
$$
f(z)\sim\frac{1}{(1-z)\log{\frac{1}{1-z}}}, \quad z\to 1^-.
$$
Therefore,
$$
f^2(z)+f(z^2)\sim\frac{1}{(1-z)^2\log^2{\frac{1}{1-z}}}, \quad
z\to 1^-,
$$
which implies that the series $\sum_{k>2} Q(2k)z^{2k}$
satisfies condition (\ref{funcsim}) of Hardy-Littlewood-Karamata
Tauberian theorem with $r=1, \rho=2$ and
$L(t)=\frac{1}{\log^2{t}}$ (see also (\ref{ident})). The
asymptotic equivalence of Theorem 1 follows immediately from
(\ref{sumsim}).$\rule{2mm}{2mm}$
{\it Proof of Theorem 2.} Recall that $2X_n\in (4,2n]$ equals the
size of a Goldbach partition that is chosen uniformly at random
from the set $\Sigma_{2n}$ of all such partitions. Since
$S(2n)=\mid\Sigma_{2n}\mid$ and, for any $N\in (2,n]$,
$S(2N)=\mid\Sigma_{[2N]}\mid$ ($[a]$ denotes the integer part of
the real number $a$), from (\ref{summatory}) it follows that
\begin{equation}\label{probab}
\mathbb{P}(2X_n\le 2N) =\frac{S(2N)}{S(2n)}.
\end{equation}
Setting $N\sim un, 0<u<1,$ and applying Theorem 1 twice - to the
numerator and the denominator of (\ref{probab}), we see that the
limit of (\ref{probab}), as $n\to\infty$, is $u^2$. This completes
the proof.$\rule{2mm}{2mm}$
\section{Prime Partitions with More Than Two Parts}
Let $m>2$ be an integer and let $\Sigma_{m,n}$ denote the set of
prime partitions of the integers from the interval $(4,n]$ into
$m$ parts. The goal of this section is to extend the results of
Theorems 1 and 2 to prime partitions from the class
$\Sigma_{m,n}$. We state them below as two separate theorems.
\begin{theorem} For any fixed integer $m>2$, we have
$$
\mid\Sigma_{m,n}\mid\sim\frac{1}{m!}
\left(\frac{n}{\log{n}}\right)^m, \quad n\to\infty.
$$
\end{theorem}
Furthermore, let $X_{m,n}$ denote the size of a prime partition
selected uniformly at random from the class $\Sigma_{m,n}$. (The
uniform probability measure on $\Sigma_{m,n}$ is again denoted by
$\mathbb{P}$.)
\begin{theorem} If $0<u<1$ and $m$ is as in Theorem 3, then
$$
\lim_{n\to\infty}\mathbb{P}\left(\frac{X_{m,n}}{n}\le u\right)
=u^m.
$$
\end{theorem}
Theorem 4 shows a weak convergence similar to that established in
Theorem 2. Namely, for any fixed integer $m$, $X_{m,n}/n$ converges,
as $n\to\infty$, to $\max{\{U_1,...,U_m\}}$, where $U_1,...,U_m$
are independent copies of a random variable that is uniformly
distributed in the interval $(0,1)$.
Below we only sketch the proof of Theorem 3. The proof of Theorem
4 is almost identical with that of Theorem 2.
{\it Sketch of the proof of Theorem 3.} Our main tool is again the
generating function identity (\ref{euler}). We notice first that
the coefficients $Q_m(n)$ are $=0$ if either $m$ is odd and $n$
is even or $m$ is even and $n$ is odd. By the definition of
$Q_m(n)$, we also have
\begin{equation}\label{summatorym}
\mid\Sigma_{m,n}\mid =\sum_{k\le n} Q_m(k).
\end{equation}
We compute the $m$th derivative of the infinite product in (\ref{euler})
using Faa di Bruno formula for derivatives of compound
functions (see e.g. [14; Section 2.8]). We introduce the following
auxiliary notations:
\begin{eqnarray}\label{ab}
& & b(x)=b(x,z):=-\sum_{p_k\in\mathcal{P}}
\log{(1-xz^{p_k})}, \nonumber \\
& & b_j=b_j(x,z):=\frac{\partial^j b(x,z)}{\partial x^j},
j=1,...,m.
\end{eqnarray}
Using formulae (43) and (46) of [14; Section 2.8], we obtain
\begin{equation}\label{mthab}
\frac{d^m}{dx^m}e^{b(x)} =e^{b(x)} b_1^m+R_m,
\end{equation}
where
\begin{equation}\label{bruno}
R_m=R_m(x,z) =e^{b(x,z)}\widetilde{\sum} \frac{m!}{k_1!...k_m!}
\left(\frac{b_1}{1!}\right)^{k_1}...
\left(\frac{b_m}{m!}\right)^{k_m}
\end{equation}
and $\widetilde{\sum}$ denotes the sum over all integers $k_j\ge
0, j=1,...,m$, such that $\sum_{j=1}^m jk_j=m$ and $k_1<m$.
Setting $x=0$ in (\ref{ab}), we find that $b(0,z)=0$ and $b_j(0,z)=f(z^j),
j=1,...,m,$ where the function $f(z)$ is defined (\ref{ef}).
Moreover, in the right-hand side of (\ref{bruno}) we have
$k_1+...+k_m\le m-1$. In fact, since $k_1<m$ by the definition of
$\widetilde{\sum}$ at least one $k_j$ is $>0$ for $j\ge 2$. Hence
if $m=k_1+k_2+...+k_m$, then $m<k_1+\sum_{j=2}^m jk_j=m$.
Since $f(z^j)=O(f(z))$ as $z\to 1^-$ and since
$k_1+...+k_m\le m-1$, we conclude that $R_m(0,z)=O(f^{m-1}(z))$. Therefore
(\ref{mthab}) becomes
$$
\frac{d^m}{dx^m} e^{b(x)}=f^m(z)+O(f^{m-1}(z)),
$$
or, equivalently,
\begin{equation}\label{mth}
\frac{\partial^m G(x,z)}{\partial x^m}\mid_{x=0} =f^m(z)+O(f^{m-1}(z))
\end{equation}
as $z\to 1^-$. On the other hand,
\begin{equation}\label{mthtwo}
\frac{\partial^m G(x,z)}{\partial x^m}\mid_{x=0} =m!\sum_{k\ge m}
Q_m(k)z^k.
\end{equation}
Applying Lemma 2, as in the proof of Theorem 1, we obtain the
asymptotic equivalence
\begin{equation}\label{mthasymp}
f^m(z) \sim\frac{1}{(1-z)^m\log^m{\frac{1}{1-z}}}, \quad z\to 1^-.
\end{equation}
The observations in (\ref{mth})-(\ref{mthasymp}) imply that
$$
\frac{\partial^m G(x,z)}{\partial x^m}\mid_{x=0}
\sim\frac{1}{(1-z)^m\log^m{\frac{1}{1-z}}}, \quad z\to 1^-.
$$
So, condition (\ref{funcsim}) of Hardy-Littlewood-Karamata theorem
is satisfied with $r=1, \rho=m$ and $L(t)=\frac{1}{\log^m{t}}$.
The required result follows at once from (\ref{summatorym}) and
(\ref{sumsim}).$\rule{2mm}{2mm}$
\end{document}
|
\begin{document}
\begin{abstract}
In this article we consider the Cauchy problem with large initial data for an equation of the form \[(\partial_t+\partial_x^3)u=F(u,u_x,u_{xx})\]where \(F\) is a polynomial with no constant or linear terms. Local well-posedness was established in weighted Sobolev spaces by Kenig-Ponce-Vega. In this paper we prove local well-posedness in a translation invariant subspace of \(H^s\) by adapting the result of Marzuola-Metcalfe-Tataru on quasilinear Schr\"odinger equations.
\end{abstract}
\maketitle
\thispagestyle{empty}
\section{Introduction}
In this article we consider local well-posedness for an equation of the form
\begin{equation}\label{eq:nonlinear}
\left\{\begin{array}{l}(\partial_t+\partial_x^3)u=F(u,u_x,u_{xx}),\qquad u\colon\mathbb{R}\times\mathbb{R}\rightarrow\mathbb{R}\textrm{ or }\mathbb{C}
\\u(0)=u_0\end{array}\right.
\end{equation}
where we assume \(F\) is a constant coefficient polynomial of degree \(m\geq 2\) with no linear or constant terms.
It is natural to consider well-posedness in \(H^s(\mathbb{R})\). However, due to the infinite speed of propagation, even a linear equation of the form
\[
(\partial_t+\partial_x^3+a(x)\partial_x^2)u=0
\]
where \(a\) is smooth with bounded derivatives requires a Mizohata-type necessary condition for \(L^2\) well-posedness \cite{A,Mz,Tam}
\begin{equation}\label{eq:mizohata}
\sup_{x_1\leq x_2}\ensuremath{\textrm{Re}}\int_{x_1}^{x_2} a(x)\,dx<\infty
\end{equation}
So at the very least, if \(F\) contains a term of the form \(uu_{xx}\), then we expect any solution \(u\) to \eqref{eq:nonlinear} to require some additional integrability. Indeed, an ill-posedness result in \(H^s\) was proved by Pilod \cite{P}.
One way to address this difficulty is to consider weighted spaces. Kenig-Ponce-Vega proved local well-posedness for small data in \cite{KPV1} and arbitrary data in \cite{KPV2} using the weighted space \(H^s\cap L^2(|x|^k\,dx)\) for sufficiently large \(k\in\mathbb{Z}^+\) and \(s>0\). Their result was extended to systems by Kenig-Staffilani \cite{KS}. Replacing weighted \(L^2\) spaces with weighted Besov spaces, Pilod \cite{P} proved local well-posedness for small data at low regularities in the space \(H^s(\mathbb{R})\cap\mathcal{B}^{s-2,2}_2(\mathbb{R},x^2dx)\) where \(s>\tfrac{9}{4}\) for certain quadratic nonlinearities. Quasilinear versions of this problem for which \eqref{eq:nonlinear} is a special case have also been studied by several authors (see \cite{A,C} and references therein).
As the equation \eqref{eq:nonlinear} is translation invariant, it is natural to look for a solution in a translation invariant space. By replacing weighted spaces with a spatial summability condition, Marzuola-Metcalfe-Tataru \cite{MMT} proved a small data result for quasilinear Schr\"odinger equations for initial data in a translation invariant space \(l^1H^s\subset H^s\). Their result relies on a local energy decay estimate using spaces similar to those suggested by Kenig-Ponce-Vega \cite{KPV3}.
In this paper we adapt the result of Marzuola-Metcalfe-Tataru to the problem \eqref{eq:nonlinear} where waves at frequency \(2^j\) travel at speed \(2^{2j}\). By a slight abuse of notation we also call the adapted initial data space \(\ensuremath{l^1} H^s\). As the need for additional integrability is solely due to the bilinear interactions, as in \cite{KPV1,KPV2,KPV3,KS,MMT}, we expect to be able to remove the spatial summability condition for the case that \(F\) contains no quadratic terms.
We take a standard Littlewood-Paley decomposition
\[
1=\sum\limits_{j=0}^\infty S_j
\]
constructed by taking smooth \(\varphi_0\colon\mathbb{R}\rightarrow[0,1]\) such that
\[
\varphi_0(\xi)=\left\{\begin{array}{ll}1&\qquad\textrm{for }\xi\in[-1,1]
\\0&\qquad\textrm{for }|\xi|\geq2\end{array}\right.
\]
Then define
\[
\varphi_j(\xi)=\varphi_0(2^{-j}\xi)-\varphi_0(2^{-j+1}\xi)
\]
and
\[
f_j=S_jf=\mathcal{F}^{-1}(\varphi_j\hat f)
\]
where \(\mathcal{F}u=\hat u\) is the spatial Fourier transform.
For each \(j\geq0\) we take a partition \(\mathcal{Q}_{2j}\) of \(\mathbb{R}\) into intervals of length \(2^{2j}\) and an associated smooth partition of unity
\[
1=\sum\limits_{Q\in\mathcal{Q}_{2j}}\chi_Q
\]
where we assume \(\chi_Q\sim 1\) on \(Q\) and \(\mathrm{supp}\,\chi_Q\subset B\left(Q,\tfrac{1}{2}\right)\) then define
\[
\|u\|_{l^1_{2j}L^2}=\sum\limits_{Q\in\mathcal{Q}_{2j}}\|\chi_Q u\|_{L^2}
\]
We define the initial data space \(\ensuremath{l^1} H^s\) with norm
\[
\|u\|_{\ensuremath{l^1} H^s}^2=\sum\limits_{j\geq0}2^{2sj}\|S_ju\|^2_{l^1_{2j}L^2}
\]
We note that for \(s>1\) we have \(\ensuremath{l^1} H^s\subset L^1\). The main result we prove is the following.
\begin{theorem}\label{mainthrm}
For \(s>\tfrac{9}{2}\), there exists \(C>0\) such that the equation \eqref{eq:nonlinear} is locally well-posed in \(\ensuremath{l^1} H^s\) on the time interval \([0,T]\) where \(T=e^{-C\|u_0\|_{\ensuremath{l^1} H^s}}\).
\end{theorem}
We take the definition of ``well-posedness'' to be the existence and uniqueness of a solution \(u\in C([0,T],\ensuremath{l^1} H^s)\) and Lipschitz continuity of the solution map\[\ensuremath{l^1} H^s\ni u_0\mapsto u\in C([0,T],\ensuremath{l^1} H^s)\]We note that as the problem considered in \cite{MMT} is quasilinear, continuous dependence on the initial data is all that can be expected. Although we use a similar method in proving local well-posedness for \eqref{eq:nonlinear}, the semilinear structure allows us to obtain Lipschitz dependence.
The outline of the proof of Theorem \ensuremath{\textrm{Re}}f{mainthrm} is as follows. Using a similar argument to Bejenaru-Tataru \cite{BT}, we split the initial data into a low frequency component and a high frequency component. As the low frequency component of the data is essentially stationary on a small time interval, we freeze it at \(t=0\) and rewrite \eqref{eq:nonlinear} as an equation for the evolution of the high frequency component of the form
\begin{equation}\label{intro:hf}(\partial_t+\partial_x^3+a(x)\partial_x^2)v=\tilde F(x,v,v_x,v_{xx})\end{equation}
As the spaces we use are adapted to the unit time interval, we rescale the initial data so that the high frequency component is sufficiently small to solve \eqref{intro:hf} using a perturbative argument on the unit time interval. The Mizohata-type condition \eqref{eq:mizohata} suggests the term \(a(x)\partial_x^2v\) will not be perturbative, so we include this in the principal part. In order to establish estimates for the linear equation
\begin{equation}\label{intro:modairy}(\partial_t+\partial_x^3+a(x)\partial_x^2)v=f\end{equation}
we conjugate the operator by \(e^{-\frac{1}{3}\int_0^xa(y)\,dy}\) and then find approximate solutions to \eqref{intro:modairy} by solving a suitable Airy equation. To complete the proof of Theorem \ensuremath{\textrm{Re}}f{mainthrm} we use a contraction mapping argument to solve for the time evolution of the high frequency component of the data.
The structure of remainder of the paper is as follows: in Section 2 we define the function spaces used and prove a number of bilinear estimates. In Section 3 we discuss the rescaling and properties of the rescaled initial data. In Section 4 we prove an estimate for the solution to the linear Airy-type equation \eqref{intro:modairy} and in Section 5 we complete the proof of Theorem \ensuremath{\textrm{Re}}f{mainthrm}.
\begin{remark}
While our result covers the case of the KdV, mKdV and gKdV, it is far from the best known results for these equations and we refer the reader to \cite{LinPon} for a summary of results and references.
However, even in the case of quadratic nonlinearities involving \(u_{xx}\) with which we are primarily concerned, the argument used to prove Theorem \ensuremath{\textrm{Re}}f{mainthrm} allows us to relax the assumption \(s>\tfrac{9}{2}\) for particular nonlinearities and initial data.
For data with sufficiently small \(\ensuremath{l^1} H^s\) norm we can prove local well-posedness without having to rescale the initial data. In this case we can use a contraction mapping argument with the linear estimate of Proposition \ensuremath{\textrm{Re}}f{propn:MMT} and the bilinear and algebra estimates of Proposition \ensuremath{\textrm{Re}}f{propn:bilests}. The only restrictions on regularity in this case are from the bilinear and algebra estimates and hence we have well-posedness on the unit time interval provided \(s>\sigma_0\) where \(\sigma_0\) is defined as follows.
\ensuremath{\textrm{Re}}newcommand{1.5}\begin{center}{1.5}\begin{center}
\begin{tabular}{ c | c c }
\(\mathbf{\sigma_0}\)&\multicolumn{2}{c}{\(\mathbf{F}\)\textbf{ contains terms of the form}}\\\hline
\(\tfrac{1}{2}\)&\(u^2\)&\\\hline
\(1\)&\(u^{\alpha_0}\)&\(\alpha_0\geq3\)\\\hline
\(\tfrac{3}{2}\)&\(u^{\alpha_0}u_x\)&\\\hline
\(2\)&\(u^{\alpha_0}u_x^{\alpha_1}\)&\(\alpha_0\geq1\)\\\hline
\multirow{2}{*}{\(\tfrac{5}{2}\)}&\(u^{\alpha_0}u_x^{\alpha_1}u_{xx}\)&\(\alpha_0\geq1\)\\
&\(u_x^{\alpha_1}\)&\\\hline
\(3\)&\(u^{\alpha_0}u_x^{\alpha_1}u_{xx}^{\alpha_2}\)&\(\alpha_0\geq1\)\\\hline
\(\tfrac{7}{2}\)&\(u_x^{\alpha_1}u_{xx}^{\alpha_2}\)&\(\alpha_1\geq1\)\\\hline
\(\tfrac{9}{2}\)&\(u_{xx}^{\alpha_2}\)&\\
\end{tabular}\end{center}
In the large data case, in order to ensure the high frequency component of the rescaled initial data is small, the scaling of the \(l^1H^s\) spaces (see Proposition \ensuremath{\textrm{Re}}f{freqbds}) also require that \(s>\lambda+2\) where
\begin{equation}\label{defn:lambda}
\lambda=\max\left\{\frac{\beta_1+2\beta_2-3}{|\beta|-1}:2\leq|\beta|,\,\beta\leq\alpha,\,c_\alpha\neq0\right\}
\end{equation}
and
\[
F(u,u_x,u_{xx})=\sum\limits_{2\leq|\alpha|\leq m} c_\alpha u^{\alpha_0}u_x^{\alpha_1}u_{xx}^{\alpha_2}
\]
In order to get estimates for the nonlinearity (see Proposition \ensuremath{\textrm{Re}}f{RBDS}) we need to take \(s>\sigma_0^*\) where
\[
\sigma_0^*=\max\left\{\sigma_0(\beta):2\leq|\beta|,\,\beta\leq\alpha,\,c_\alpha\neq0\right\}
\]
and \(\sigma_0(\beta)\) is determined by the table above. So for large data, Theorem \ensuremath{\textrm{Re}}f{mainthrm} is true for \(s>s_0\) where
\begin{equation}\label{defn:s0}
s_0=\max\{\sigma_0^*,\lambda+2\}
\end{equation}
\end{remark}
\section{Spaces}\label{sect:ests}
\subsection{Definitions}
For a Sobolev-type space \(U\) we define the \(l^1_{2j}U\) norm by
\[
\|u\|_{l^1_{2j}U}=\sum\limits_{Q\in\mathcal{Q}_{2j}}\|\chi_Qu\|_{U}
\]
and the \(l^\infty_{2j}U\) norm
\[
\|u\|_{l^\infty_{2j}U}=\sup\limits_{Q\in\mathcal{Q}_{2j}}\|\chi_Qu\|_{U}
\]
We define the local energy space \(X\) (see Remark 3.7 of \cite{KPV3}) with norm
\[
\|u\|_X=\sup\limits_{l\geq0}\sup\limits_{Q\in\mathcal{Q}_l}2^{-l/2}\|u\|_{L^2_{t,x}([0,1]\times Q)}
\]
and have the following local smoothing effect for the Airy linear propagator.
\begin{lemma}
If \(f\in L^2(\mathbb{R})\) then
\[
\|e^{-t\partial_x^3}S_jf\|_X\lesssim 2^{-j}\|S_jf\|_{L^2}
\]
\end{lemma}
We look for solutions to the linear equation in the space \(\ensuremath{l^1} X^s\subset C([0,1],\ensuremath{l^1} H^s)\) where
\[
\|u\|_{\ensuremath{l^1} X^s}^2=\sum\limits_{j\geq0}2^{2js}\|S_ju\|^2_{l_{2j}^1X_j}
\]
and
\[
\|u\|_{X_j}=2^j\|u\|_X+\|u\|_{L^\infty_tL^2_x}
\]
We define the atomic space \(Y\) with atoms \(a\) such that there exist \(j\geq 0\) and \(Q\in\mathcal{Q}_j\) with \(\mathrm{supp}\,a\subset[0,1]\times Q\) and \(\|a\|_{L^2_{t,x}([0,1]\times Q)}\lesssim 2^{-j/2}\) with norm given by
\[
\|f\|_Y=\inf\left\{\sum|c_k|:f=\sum c_ka_k,\;a_k\textrm{ atoms}\right\}
\]
We have the duality relation \(Y^*=X\) with respect to the standard \(L^2\) duality (see \cite{MMT} Proposition 2.1). For the inhomogeneous term in the linear equation we use the space \(\ensuremath{l^1} Y^s\) where
\[
Y_j=2^jY+L_t^1L_x^2
\]
with norm
\[
\|f\|_{Y_j}=\inf\limits_{f=f_1+f_2}\left\{2^{-j}\|f_1\|_Y+\|f_2\|_{L_t^1L_x^2}\right\}
\]
and as above
\[
\|f\|_{\ensuremath{l^1} Y^s}^2=\sum\limits_{j\geq0}2^{2js}\|S_jf\|^2_{l^1_{2j}Y_j}
\]
We use the Zygmund space \(C^\gamma_*\) with norm
\[
\|u\|_{C^\gamma_*}=\sup_{j\geq0}\,(2^{\gamma j}\|S_ju\|_{L^\infty})\qquad\gamma>0
\]
We have an algebra estimate
\begin{equation}\label{est:Calg}
\|uv\|_{C^\gamma_*}\lesssim\|u\|_{C^\gamma_*}\|v\|_{C^\gamma_*}
\end{equation}
The H\"older space \(C^\gamma\subset C^\gamma_*\) with the estimate
\begin{equation}\label{est:holderzygmund}
\|u\|_{C^\gamma_*}\lesssim \|u\|_{C^\gamma}
\end{equation}
and when \(\gamma\not\in\mathbb{Z}_+\) we have \(C^\gamma=C^\gamma_*\) (see \cite{T} for details).
\subsection{Estimates}
For several estimates we will replace the partition of unity \(\{\chi_Q\}_{Q\in\mathcal{Q}_{2j}}\) by frequency localized versions \(\tilde\chi_Q\), for example taking \linebreak\(\tilde\chi_Q=S_0\chi_Q\), such that each \(\tilde\chi_Q\sim1\) on \(Q\), is rapidly decreasing off \(Q\) and for a Sobolev-type space \(U\)
\[
\sum\limits_{Q\in\mathcal{Q}_{2j}}\|\chi_Qu\|_{U}\sim\sum\limits_{Q\in\mathcal{Q}_{2j}}\|\tilde\chi_Qu\|_{U}
\]
and as a consequence of being frequency localized
\[
\sum\limits_{Q\in\mathcal{Q}_{2j}}\|S_j(\tilde\chi_Qu)\|_U\sim\sum\limits_{Q\in\mathcal{Q}_{2j}}\|\tilde\chi_QS_ju\|_U
\]
Replacing the partition of unity by frequency localized versions we have the following Bernstein-type inequality for \(1\leq p\leq q\leq\infty\)
\begin{equation}\label{est:bernstein}
\|S_ju\|_{l^1_{2j}L^q_x}\lesssim2^{j\left(\tfrac{1}{p}-\tfrac{1}{q}\right)}\|S_ju\|_{l^1_{2j}L^p_x}
\end{equation}
We also note that for any Sobolev-type space \(U\) we can change interval size
\[
\|u\|_{l^1_{2j}U}\lesssim\left\{\begin{array}{ll}2^{2k-2j}\|u\|_{l^1_{2k}U}&\qquad\textrm{for }j\leq k
\\\|u\|_{l^1_{2k}U}&\qquad\textrm{for }j>k\end{array}\right.
\]
To see this, if \(j\leq k\) then for \(Q\in\mathcal{Q}_{2j}\) there exists some \(\tilde Q\in\mathcal{Q}_{2k}\) such that \(Q\subset\tilde Q\) and
\[
\|\chi_Qu\|_{U}\lesssim\|\chi_{\tilde Q}u\|_{U}
\]
and each \(\tilde Q\in\mathcal{Q}_{2k}\) is counted \(2^{2k-2j}\) times in this way. If \(j>k\) then for each \(Q\in\mathcal{Q}_{2j}\),
\[
\|\chi_Qu\|_U\lesssim\sum\limits_{\substack{\tilde Q\in\mathcal{Q}_{2k}\\\tilde Q\subset Q}}\|\chi_{\tilde Q}u\|_U
\]
In the case \(U=L^2\) we can improve this to
\begin{equation}\label{est:changer}
\|u\|_{l^1_{2j}L^2}\lesssim2^{k-j}\|u\|_{l^1_{2k}L^2}\qquad\textrm{for }j\leq k
\end{equation}
by writing
\[
\sum\limits_{Q\in\mathcal{Q}_{2j}}\|\chi_Qu\|_{l^1_{2k}L^2}=\sum\limits_{\tilde Q\in\mathcal{Q}_{2k}}\sum\limits_{\substack{Q\in\mathcal{Q}_{2j}\\Q\subset\tilde Q}}\|\chi_Qu\|_{L^2}
\]
and applying the Cauchy-Schwarz inequality to the sum in \(Q\subset\tilde Q\).
We have the following collection of bilinear estimates.
\begin{proposition}\label{propn:bilests} Suppose \(u,v\colon\mathbb{R}\times\mathbb{R}\rightarrow\mathbb{C}\) and \(a,b\colon\mathbb{R}\rightarrow\mathbb{C}\) then
a) Algebra estimates.
\begin{equation}\label{est:Xalg}\|uv\|_{\ensuremath{l^1} X^s}\lesssim\|u\|_{\ensuremath{l^1} X^s}\|v\|_{\ensuremath{l^1} X^s}\qquad s>1\end{equation}
\begin{equation}\label{est:Halg}\|ab\|_{\ensuremath{l^1} H^s}\lesssim\|a\|_{\ensuremath{l^1} H^s}\|b\|_{\ensuremath{l^1} H^s}\qquad s>\tfrac{1}{2}\end{equation}
b) Bilinear estimates I. For \(s>\tfrac{1}{2}\),\begin{equation}\label{est:XXY}\|uv\|_{\ensuremath{l^1} Y^s}\lesssim\|u\|_{\ensuremath{l^1} X^\alpha}\|v\|_{\ensuremath{l^1} X^\beta}\qquad\alpha,\beta\geq s-2\textrm{ and }\alpha+\beta>s+\tfrac{1}{2}\end{equation}\begin{equation}\label{est:HXY}\|au\|_{\ensuremath{l^1} Y^s}\lesssim \|a\|_{\ensuremath{l^1} H^\alpha}\|u\|_{\ensuremath{l^1} X^\beta}\qquad\alpha,\beta\geq s-1\textrm{ and }\alpha+\beta>s+\tfrac{1}{2}\end{equation}
c) Bilinear estimates II. \begin{equation}\label{est:CXX}\|au\|_{\ensuremath{l^1} X^s}\lesssim \|a\|_{C^\gamma_*}\|u\|_{\ensuremath{l^1} X^s}\qquad s>\tfrac{1}{2}\textrm{ and }\gamma>s+1\end{equation}\begin{equation}\label{est:CXY}\|au\|_{\ensuremath{l^1} Y^s}\lesssim \|a\|_{C^\gamma_*}\|u\|_{\ensuremath{l^1} Y^s}\qquad s>\tfrac{3}{2}\textrm{ and }\gamma>s\end{equation}\begin{equation}\label{est:CHH}\|ab\|_{\ensuremath{l^1} H^s}\lesssim \|a\|_{C^\gamma_*}\|b\|_{\ensuremath{l^1} H^s}\qquad s>1\textrm{ and }\gamma>s\end{equation}
\end{proposition}
\begin{proof}Following \cite{MMT} Proposition 3.1 we consider terms of the form \(S_k(S_iuS_jv)\) and the usual Littlewood-Paley trichotomy.
a) For \eqref{est:Xalg} we consider,
\textbf{High-low interactions.} \(|i-k|<4\) and \(j<i-4\). Using the Bernstein inequality \eqref{est:bernstein} we have,
\begin{align*}\|S_k(S_iuS_jv)\|_{l^1_{2k}X_k}&\lesssim\|S_iuS_jv\|_{l^1_{2i}X_i}\\&\lesssim\|S_iu\|_{l^1_{2i}X_i}\|S_jv\|_{L_{t,x}^\infty}\\&\lesssim2^{\tfrac{1}{2}j}\|S_iu\|_{l^1_{2i}X_i}\|S_jv\|_{L^\infty_tL^2_x}\end{align*}
The symmetric low-high interaction is similar.
\textbf{High-high interactions.} \(|i-j|\leq4\) and \(i,j\geq k-4\).
For \(i>k\) we use the Bernstein inequality \eqref{est:bernstein} at frequency \(\sim 2^k\), Cauchy-Schwarz and then change interval size to get
\begin{align*}\|S_k(S_iuS_jv)\|_{l^1_{2k}X_k}&\lesssim 2^{\tfrac{1}{2}k}\|S_iu\|_{l^1_{2k}X_k}\|S_jv\|_{L^\infty_tL^2_x}\\&\lesssim2^{2i-\tfrac{3}{2}k}\|S_iu\|_{l^1_{2i}X_i}\|S_jv\|_{l^1_{2j}X_j}\end{align*}
The result \eqref{est:Xalg} follows from summation.
The argument for \eqref{est:Halg} is identical to \eqref{est:Xalg} except for the improved interval change result \eqref{est:changer} in the high-high interactions that only requires \(s>\tfrac{1}{2}\)
b) We note that for \(l\leq k\)\begin{equation}\label{est:key2Y}\|f\|_{l^1_{2k}Y}\lesssim 2^l\|f\|_{l^1_{2l}L^2_{t,x}}\end{equation}For \eqref{est:XXY} we consider,
\textbf{High-low interactions.} \(|i-k|<4\) and \(j<i-4\).
Using \eqref{est:key2Y} with \(l=j\) followed by Bernstein's inequality \eqref{est:bernstein},
\begin{align*}\|S_iuS_jv\|_{l_{2k}^1Y_k}&\lesssim 2^{-k}\|S_iuS_jv\|_{l^1_{2k}Y}\\
&\lesssim2^{j-k}\|S_iuS_jv\|_{l^1_{2j}L^2_{t,x}}\\
&\lesssim2^{j-k}\|S_iu\|_{l^\infty_{2j}L^2_{t,x}}\|S_jv\|_{l^1_{2j}L^\infty_{t,x}}\\
&\lesssim2^{\tfrac{5}{2}j-2k}\|S_iu\|_{X_i}\|S_jv\|_{l^1_{2j}L_t^\infty L_x^2}
\end{align*}
The symmetric low-high interaction is similar.
\textbf{High-high interactions.} \(|i-j|\leq4\) and \(i,j\geq k-4\).
For \(j>k\) we use \eqref{est:key2Y} with \(l=k\), change interval size and then use Bernstein's inequality \eqref{est:bernstein}
\begin{align*}\|S_k(S_iuS_jv)\|_{l^1_{2k}Y_k}&\lesssim2^{j-k}\|S_k(S_iuS_jv)\|_{l^1_{2j}L^2_{t,x}}\\
&\lesssim2^{j-\tfrac{1}{2}k}\|S_k(S_iuS_jv)\|_{l^1_{2j}L^2_tL^1_x}\\
&\lesssim2^{j-\tfrac{1}{2}k}\|S_iu\|_{L^\infty_tL^2_x}\|S_jv\|_{l_{2j}^1L^2_{t,x}}\\
&\lesssim2^{j-\tfrac{1}{2}k}\|S_iu\|_{X_i}\|S_jv\|_{l^1_{2j}X_j}
\end{align*}
The result follows from summation.
For \eqref{est:HXY} we have,
\textbf{High-low interactions.} \(|i-k|<4\) and \(j<i-4\).
As for \eqref{est:XXY},
\begin{align*}\|S_iaS_ju\|_{l_{2k}^1Y_k}&\lesssim2^{j-k}\|S_iaS_ju\|_{l^1_{2j}L^2_{t,x}}\\
&\lesssim2^{j-k}\|S_ia\|_{L^2_x}\|S_ju\|_{l^1_{2j}L^\infty_{t,x}}\\
&\lesssim2^{\tfrac{3}{2}j-k}\|S_ia\|_{l^1_{2i}L_x^2}\|S_ju\|_{l^1_{2j}L_t^\infty L_x^2}
\end{align*}
\textbf{Low-high interactions.} \(|j-k|<4\) and \(i<j-4\).
For this case we use the \(L^1_tL^2_x\) norm and switch interval size to get
\begin{align*}\|S_iaS_ju\|_{l_{2k}^1Y_k}&\lesssim\|S_iaS_ju\|_{l^1_{2i}L^1_tL^2_x}\\
&\lesssim \|S_ia\|_{l^1_{2i}L_x^\infty}\|S_ju\|_{l^\infty_{2i}L_{t,x}^2}\\
&\lesssim 2^{\tfrac{3}{2}i-j}\|S_ia\|_{l^1_{2i}L_x^2}\|S_ju\|_{X_j}
\end{align*}
\textbf{High-high interactions.} \(|i-j|\leq4\) and \(i,j\geq k-4\).
Identically to \eqref{est:XXY},
\[\|S_k(S_iaS_ju)\|_{l^1_{2k}Y_k}\lesssim2^{j-\tfrac{1}{2}k}\|S_ia\|_{l^1_{2i}L^2_x}\|S_ju\|_{l^1_{2j}X_j}\]
c) For \eqref{est:CXX} we have,
\textbf{High-low interactions.} \(|i-k|<4\) and \(j<i-4\).
Switching interval size we get
\[\|S_k(S_iaS_ju)\|_{l^1_{2k}X_k}\lesssim2^{i-j}\|S_ia\|_{L^\infty}\|S_ju\|_{l^1_{2j}X_j}\]The condition \(\gamma>s+1\) guarantees that the sum in \(k\sim i\) converges.
\textbf{Low-high interactions.} \(|j-k|<4\) and \(i<j-4\).
This case is straightforwards as
\[\|S_k(S_iaS_ju)\|_{l^1_{2k}X_k}\lesssim\|S_ia\|_{L^\infty}\|S_ju\|_{l^1_{2j}X_j}\]
\textbf{High-high interactions.} \(|i-j|\leq 4\) and \(i,j\geq k-4\).
Switching interval size, for \(j>k\) we have
\[\|S_k(S_iaS_ju)\|_{l^1_{2k}X_k}\lesssim2^{2j-2k}\|S_ia\|_{L^\infty}\|S_ju\|_{l^1_{2j}X_j}\]
For the estimate \eqref{est:CXY} the low-high interaction is identical to \eqref{est:CXX}.
\textbf{High-low interactions.} \(|i-k|<4\) and \(j<i-4\).
\[\|S_k(S_iaS_ju)\|_{l^1_{2k}Y_k}\lesssim\|S_ia\|_{L^\infty}\|S_ju\|_{l^1_{2j}Y_j}\]this requires \(\gamma>s\).
\textbf{High-high interactions.} \(|i-j|\leq 4\) and \(i,j\geq k-4\).
\[\|S_k(S_iaS_ju)\|_{l^1_{2k}Y_k}\lesssim2^{3j-3k}\|S_ia\|_{L^\infty}\|S_ju\|_{l^1_{2j}Y_j}\]
The estimate \eqref{est:CHH} is identical to \eqref{est:CXX} for the low-high and high-high interactions and \eqref{est:CXY} for the high-low interactions.
\end{proof}
\section{Scaling}\label{sect:scales}
Given a solution to \eqref{eq:nonlinear}, we rescale using the scaling corresponding to the `worst' monomial nonlinearity in \(F\),
\[u^{(k)}(t,x)=2^{\lambda k}u(2^{-3k}t,2^{-k}x)\]
where \(\lambda\) is as defined in \eqref{defn:lambda}. We have the corresponding rescaled initial data
\[u_0^{(k)}(x)=2^{\lambda k}u_0(2^{-k}x)\]
The rescaling has the effect of sending high frequencies to low frequencies in the sense that
\[S_j(u_0^{(k)})=(S_{j+k}u_0)^{(k)}\]
To make use of this we define the low and high frequency components of the rescaled initial data
\[u_0^{(k)l}=S_0u_0^{(k)}=(S_{\leq k}u_0)^{(k)}\qquad\qquad u_0^{(k)h}=u_0^{(k)}-u_0^{(k)l}\]
As the low frequency component of the rescaled initial data is essentially stationary on the unit time interval we freeze it at \(t=0\) and define
\[v=u^{(k)}-u_0^{(k)l}\qquad\qquad v_0=u_0^{(k)h}\]
We can then rewrite \eqref{eq:nonlinear} as an equation for \(v\) with coefficients depending on \(u_0^{(k)l}\) and the scaling factor \(k\)
\begin{equation}\label{eq:rescalednonlinear}\left\{\begin{array}{l}(\partial_t+\partial_x^3+a(x)\partial_x^2)v=G(x,v,v_x,v_{xx})+L(x,v,v_x)+R(x)
\\v(0)=v_0\end{array}\right.\end{equation}
where
\begin{equation}\label{eq:a}a(x)=\sum_{r=0}^2c_r2^{(r-\lambda-1)k}\partial_x^ru_0^{(k)l}\end{equation}
\(G\) is a polynomial in \(v,v_x,v_{xx}\) of degree \(m\) with no constant or linear terms
\begin{equation}\label{eq:g}G(x,v,v_x,v_{xx})=\sum\limits_{2\leq|\beta|\leq|\alpha|\leq m}G_{\alpha,\beta}(x;k)v^{\beta_0}v_x^{\beta_1}v_{xx}^{\beta_2}\end{equation}
\[G_{\alpha,\beta}(x)=c_{\alpha}2^{(\lambda-3-\lambda|\alpha|+\alpha_1+2\alpha_2)k}(u_0^{(k)l})^{\alpha_0-\beta_0}(\partial_xu_0^{(k)l})^{\alpha_1-\beta_1}(\partial_x^2u_0^{(k)l})^{\alpha_2-\beta_2}\]
\(L\) is linear in \(v,v_x\)
\begin{equation}\label{eq:L}L(x,v,v_x)=\sum\limits_{\substack{2\leq|\alpha|\leq m\\|\beta|=1}}L_{\alpha,\beta}(x)v^{\beta_0}v_x^{\beta_1}\end{equation}
\[L_{\alpha,\beta}(x)=c_{\alpha}2^{(\lambda-3-\lambda|\alpha|+\alpha_1+2\alpha_2)k}(u_0^{(k)l})^{\alpha_0-\beta_0}(\partial_xu_0^{(k)l})^{\alpha_1-\beta_1}(\partial_x^2u_0^{(k)l})^{\alpha_2}\]
and \(R\) is an inhomogeneous term
\begin{equation}\label{eq:R}R(x)=\sum\limits_{2\leq|\alpha|\leq m}R_\alpha(x)-\partial_x^3u_0^{(k)l}\end{equation}
\[R_\alpha(x)=c_{\alpha}2^{(\lambda-3-\lambda|\alpha|+\alpha_1+2\alpha_2)k}(u_0^{(k)l})^{\alpha_0}(\partial_xu_0^{(k)l})^{\alpha_1}(\partial_x^2u_0^{(k)l})^{\alpha_2}\]
In order to solve \eqref{eq:rescalednonlinear} we need estimates on the size of the coefficients and initial data. We have the following proposition giving us estimates on the low and high frequency components of the rescaled initial data.
\begin{proposition}\label{freqbds}~
a) For \(r\geq0\), if \(s>r+1\)
\begin{equation}\label{est:lowfreq}\|\partial_x^ru_0^{(k)l}\|_{\ensuremath{l^1} H^\sigma}\lesssim2^{(\lambda+1-r)k}\|u_0\|_{\ensuremath{l^1} H^s}\qquad\sigma\geq0\end{equation}
and if \(s\in(\lambda+2,r+1]\)
\begin{equation}\label{est:lowfreq2}\|\partial_x^ru_0^{(k)l}\|_{\ensuremath{l^1} H^\sigma}\lesssim2^{-\tfrac{1}{2}(s-\lambda-2)k}\|u_0\|_{\ensuremath{l^1} H^s}\qquad\sigma\geq0\end{equation}
b) For \(r\geq0\) and \(s>r+\tfrac{1}{2}\)
\begin{equation}\label{est:lowfreqcstar}\|\partial_x^ru_0^{(k)l}\|_{C^\gamma_*}\lesssim2^{(\lambda-r)k}\|u_0\|_{\ensuremath{l^1} H^s}\qquad\gamma>0\end{equation}
c) \begin{equation}\label{est:highfreq}\|u_0^{(k)h}\|_{\ensuremath{l^1} H^s}\lesssim2^{-(s-\lambda-2) k}\|u_0\|_{\ensuremath{l^1} H^s}\end{equation}\end{proposition}
\begin{proof}~
a) For \eqref{est:lowfreq} we consider,
\begin{align*}\|\partial_x^ru_0^{(k)l}\|_{\ensuremath{l^1} H^\sigma}&\sim\sum\limits_{Q\in\mathcal{Q}_0}\|\chi_Q\partial_x^rS_0(u_0^{(k)})\|_{L^2}\\
&\lesssim\sum\limits_{Q\in\mathcal{Q}_0}\|\chi_Q\partial_x^r(S_{\leq k}u_0)^{(k)}\|_{L^2}\\
&\lesssim\sum\limits_{j=0}^k\sum\limits_{Q\in\mathcal{Q}_0}2^{rj+(\lambda-r)k}\|\chi_QS_{j}u_0(2^{-k}x)\|_{L^2}\\
&\lesssim\sum\limits_{j=0}^k\sum\limits_{Q\in\mathcal{Q}_{-k}}2^{rj+(\lambda+\tfrac{1}{2}-r)k}\|\chi_{Q}S_{j}u_0\|_{L^2}\\
&\lesssim\sum\limits_{j=0}^k\sum\limits_{Q\in\mathcal{Q}_{2j}}2^{(r+1)j+(\lambda+1-r)k}\|\chi_{Q}S_{j}u_0\|_{L^2}\\
&\lesssim2^{(\lambda+1-r)k}\|u_0\|_{\ensuremath{l^1} H^s}\end{align*}
for \(s>r+1\).
If \(s\in(\lambda+2,r+1)\) we replace the final line by
\begin{align*}\sum\limits_{j=0}^k2^{(r+1)j+(\lambda+1-r)k}\|S_{j}u_0\|_{l^1_{2j}L^2}
&\lesssim2^{-(s-\lambda-2)k}\sum\limits_{j=0}^k2^{(r+1-s)(j-k)}2^{sj}\|S_{j}u_0\|_{l^1_{2j}L^2}\\
&\lesssim2^{-(s-\lambda-2)k}\|u_0\|_{\ensuremath{l^1} H^s}\end{align*}
If \(s=r+1\) then
\begin{align*}\sum\limits_{j=0}^k2^{sj+(\lambda+1-r)k}\|S_{j}u_0\|_{l^1_{2j}L^2}
&\lesssim2^{\tfrac{1}{2}(\lambda+1-r)k}\sum\limits_{j=0}^k2^{-\tfrac{1}{2}(r-1-\lambda)j}2^{sj}\|S_{j}u_0\|_{l^1_{2j}L^2}\\
&\lesssim2^{-\tfrac{1}{2}(s-\lambda-2)k}\|u_0\|_{\ensuremath{l^1} H^s}\end{align*}
b) For \eqref{est:lowfreqcstar} we have
\begin{align*}\|\partial_x^ru_0^{(k)l}\|_{C^\gamma_*}&\sim\|\partial_x^rS_0(u_0^{(k)})\|_{L^\infty}\\
&\lesssim\sum\limits_{j=0}^k2^{(\lambda-r)k}\|\partial_x^rS_ju_0\|_{L^\infty}\\&\lesssim\sum\limits_{j=0}^k2^{(\lambda-r)k+(\tfrac{1}{2}+r)j}\|S_ju_0\|_{L^2}\\&\lesssim2^{(\lambda-r)k}\|u_0\|_{\ensuremath{l^1} H^s}\end{align*}
for \(s>r+\tfrac{1}{2}\).
c) For \eqref{est:highfreq} we have that
\begin{align*}\|S_j(u_0^{(k)})\|_{l^1_{2j}L^2}&\lesssim\sum\limits_{Q\in\mathcal{Q}_{2j}}2^{\lambda k}\|\chi_QS_{j+k}u_0(2^{-k}x)\|_{L^2}\\
&\lesssim\sum\limits_{Q\in\mathcal{Q}_{2j-k}}2^{(\lambda+\tfrac{1}{2})k}\|\chi_QS_{j+k}u_0\|_{L^2}\\
&\lesssim\sum\limits_{Q\in\mathcal{Q}_{2(j+k)}}2^{(\lambda+2)k}\|\chi_QS_{j+k}u_0\|_{L^2}\end{align*}
and hence we have
\begin{align*}\|u_0^{(k)h}\|^2_{\ensuremath{l^1} H^s}&\sim\sum\limits_{j=1}^\infty2^{2js}\|S_ju_0^{(k)}\|^2_{l^1_{2j}L^2}\\
&\lesssim\sum\limits_{j=1}^\infty2^{2js}2^{(2\lambda+4)k}\|\chi_QS_{j+k}u_0\|^2_{l^1_{2(j+k)}L^2}\\
&\lesssim2^{-2(s-\lambda-2)k}\sum\limits_{j=1}^\infty2^{2(j+k)s}\|\chi_QS_{j+k}u_0\|^2_{l^1_{2(j+k)}L^2}\end{align*}\end{proof}
Due to the Mizohata-type condition \eqref{eq:mizohata} we do not expect to be able to treat the term \(a\partial_x^2v\) in \eqref{eq:rescalednonlinear} as a perturbation of the linear Airy operator and hence include it in the principal part of the equation. From Proposition \ensuremath{\textrm{Re}}f{freqbds} (a) and (b) we then have the following estimates for the coefficient \(a\).
\begin{proposition}\label{aBDS}Let \(s>\lambda+2\) then,
\begin{equation}\label{est:aest}\|a(x)\|_{\ensuremath{l^1} H^\sigma}\lesssim\|u_0\|_{\ensuremath{l^1} H^s}\qquad\sigma\geq0\end{equation}
\begin{equation}\label{est:Caest}\|a(x)\|_{C^\gamma_*}\lesssim2^{-k}\|u_0\|_{\ensuremath{l^1} H^s}\qquad\gamma>0\end{equation}
For \(r=1,2\) there exists some \(\delta=\delta(s,F)>0\) such that, \begin{equation}\label{est:daest}\|\partial_x^ra(x;k)\|_{\ensuremath{l^1} H^\sigma}\lesssim2^{-\delta k}\|u_0\|_{\ensuremath{l^1} H^s}\qquad\sigma\geq0\end{equation}
\end{proposition}
\section{Linear Estimates}\label{sect:lin}
We consider the linear equation
\begin{equation}\label{eq:modairy}\left\{\begin{array}{l}(\partial_t+\partial_x^3+a\partial_x^2)v=f
\\v(0)=v_0\end{array}\right.\end{equation}
where \(a\colon\mathbb{R}\rightarrow\mathbb{C}\). We aim to prove the following result.
\begin{proposition}\label{result:est}Let \(s>s_0\) where \(s_0\) is as defined in \eqref{defn:s0} and suppose \(a\) satisfies \eqref{est:aest}--\,\eqref{est:daest}. Then for \(k>0\) sufficiently large the equation \eqref{eq:modairy} is locally well-posed in \(\ensuremath{l^1} H^s\) on the unit time interval \([0,1]\) and the solution satisfies the estimate\begin{equation}\label{est:modairysoln}\|v\|_{\ensuremath{l^1} X^s}\lesssim C(\|u_0\|_{\ensuremath{l^1} H^s})(\|v_0\|_{\ensuremath{l^1} H^s}+\|f\|_{\ensuremath{l^1} Y^s})\end{equation}\end{proposition}
To find a solution we conjugate the linear operator by \(e^{-\tfrac{1}{3}\int_0^xa(y)\,dy}\). A calculation gives
\[e^{\tfrac{1}{3}\int_0^xa\,dy}(\partial_t+\partial_x^3+a\partial_x^2)e^{-\tfrac{1}{3}\int_0^xa\,dy}w=(\partial_t+\partial_x^3)w-(a_x+\tfrac{1}{3}a^2)w_x+(\tfrac{2}{27}a^3-\tfrac{1}{3}a_{xx})w\]
So if \(w\) solves \begin{equation}\label{eq:veqn}\left\{\begin{array}{rcl}(\partial_t+\partial_x^3)w=&\!\!\!\!g\!\!\!\!&=e^{\tfrac{1}{3}\int_0^xa\,dy}f
\\w(0)=&\!\!\!\!w_0\!\!\!\!&=e^{\tfrac{1}{3}\int_0^xa\,dy}v_0\end{array}\right.\end{equation}we expect an approximate solution to \eqref{eq:modairy} to be given by\[v=e^{-\tfrac{1}{3}\int_0^xa\,dy}w\]The sense in which this is an approximate solution is summarized in the following result.
\begin{lemma}\label{modairy}Let \(s>s_0\) and \(a\) satisfy \eqref{est:aest}--\,\eqref{est:daest}.\\Suppose \(v=e^{-\tfrac{1}{3}\int_0^xa\,dy}w\) where \(w\) solves \eqref{eq:veqn} then \begin{equation}\label{est:approxu}\|v\|_{\ensuremath{l^1} X^s}\leq C(\|u_0\|_{\ensuremath{l^1} H^s})(\|v_0\|_{\ensuremath{l^1} H^s}+\|f\|_{\ensuremath{l^1} Y^s})\end{equation} and there exists some \(\delta=\delta(s,F)>0\) such that the error satisfies the estimate\begin{equation}\label{est:rhserror}\|f-(\partial_t+\partial_x^3+a\partial_x^2)v\|_{\ensuremath{l^1} Y^s}\leq 2^{-\delta k}C(\|u_0\|_{\ensuremath{l^1} H^s})(\|v_0\|_{\ensuremath{l^1} H^s}+\|f\|_{\ensuremath{l^1} Y^s})\end{equation}\end{lemma}
In order to prove this we start with the following result based on the argument of \cite{MMT} Proposition 4.1.
\begin{proposition}\label{propn:MMT}If \(w\) solves \eqref{eq:veqn} then for any \(s\geq0\)\begin{equation}\label{est:airyest}\|w\|_{\ensuremath{l^1} X^s}\lesssim\|w_0\|_{\ensuremath{l^1} H^s}+\|g\|_{\ensuremath{l^1} Y^s}\end{equation}\end{proposition}
\begin{proof}Localizing at frequency \(2^j\)
\[\left\{\begin{array}{l}(\partial_t+\partial_x^3)w_j=g_j\\w_j(0)=w_{0j}\end{array}\right.\]
Similarly to \cite{MMT} Proposition 4.2, we have the energy estimate
\begin{equation}\label{est:locnrg}\|w_j\|_{L^\infty L^2}^2\lesssim \|w_{0j}\|^2_{L^2}+\|w_j\|_{X_j}\|g_j\|_{Y_j}\end{equation}
If we can prove a local energy decay estimate of the form
\begin{equation}\label{est:locnrgdecay}2^{2j}\|w_j\|^2_X\lesssim\|w_{j}\|_{L^\infty L^2}^2+\|w_j\|_{X_j}\|g_j\|_{Y_j}\end{equation}
then combining these we have the estimate
\begin{equation}\label{est:nosum}\|w_j\|_{X_j}^2\lesssim\|w_{0j}\|_{L^2}^2+\|g_j\|_{Y_j}^2\end{equation}
In order to prove \eqref{est:locnrgdecay} it is enough to show that for any \(l\leq 2j\) and any \(Q\in\mathcal{Q}_{l}\)
\begin{equation}\label{est:locnrgdecay2}2^{2j-l}\|w_j\|^2_{L^2_{t,x}([0,1]\times Q)}\lesssim\|w_{j}\|_{L^\infty L^2}^2+\|w_j\|_{X_j}\|g_j\|_{Y_j}
\end{equation}
This is due to the fact that whenever \(l>2j\) we can cut each \(Q\in\mathcal{Q}_l\) into \(2^{l-2j}\) intervals in \(\mathcal{Q}_{2j}\) and hence for \(l>2j\)
\begin{align*}\sup\limits_{Q\in\mathcal{Q}_l}2^{2j-l}\|w_j\|^2_{L^2_{t,x}([0,1]\times Q)}&\lesssim\sup\limits_{Q\in\mathcal{Q}_l}\sup\limits_{\substack{\tilde Q\in\mathcal{Q}_{2j}\\
\tilde Q\subset Q}}\|w_j\|^2_{L^2_{t,x}([0,1]\times \tilde Q)}\\
&\lesssim\sup\limits_{0\leq i\leq 2j}\sup\limits_{\tilde Q\in\mathcal{Q}_i}2^{2j-i}\|w_j\|^2_{L^2_{t,x}([0,1]\times \tilde Q)}\end{align*}
To prove \eqref{est:locnrgdecay2}, for each \(l\leq2j\) and \(Q\in\mathcal{Q}_l\) we aim to construct a self-adjoint Fourier multiplier \(\mathcal{M}\) so that
\begin{align*}&(\mathrm{M}1)\quad\|\mathcal{M}w_j\|_{L^2_x}\lesssim\|w_j\|_{L^2_x}\\
&(\mathrm{M}2)\quad\|\mathcal{M}w_j\|_X\lesssim\|w_j\|_X\\
&(\mathrm{M}3)\quad\langle[\partial_x^3,\mathcal{M}]w_j,w_j\rangle_{L^2_{t,x}}\gtrsim 2^{2j-l}\|w_j\|^2_{L^2_{t,x}([0,1]\times Q)}-O(\|w_j\|^2_{L^2_{t,x}})\end{align*}
The estimate \eqref{est:locnrgdecay2} then follows from
\[\frac{d}{dt}\langle w_j,\mathcal{M}w_j\rangle=2\,\ensuremath{\textrm{Re}}\langle(\partial_t+\partial_x^3)w_j,\mathcal{M}w_j\rangle+\langle[\partial_x^3,\mathcal{M}]w_j,w_j\rangle\]
By translation invariance we assume \(Q=[-2^{l-1},2^{l-1}]\) and define\[\mathcal{M}w=m_lw\]
where \(m_l(x)=m(2^{-l}x)\) and \(m'(x)=-\psi^2(x)\) for some real-valued \(\psi\in\mathcal{S}\) localized at frequency \(\lesssim 1\). We choose \(\psi\) such that \(m\) is bounded and decreasing and \(\psi\sim 1\) on \(|x|\leq \tfrac{1}{2}\). Clearly \((\mathrm{M}1)\) and \((\mathrm{M}2)\) follow from this choice of \(\mathcal{M}\).
Integrating by parts we have
\begin{align*}\langle[\partial_x^3,m_l]w_j,w_j\rangle&=\langle(\partial_x^3m_l)w_j,w_j\rangle-3\langle \partial_xm_l\partial_xw_j,\partial_xw_j\rangle\\
&=\langle(\partial_x^3m_l)w_j,w_j\rangle+3\cdot2^{-l}\langle\psi^2(2^{-l}x)\partial_xw_j,\partial_xw_j\rangle\end{align*}
Using the properties of \(\psi\) and the frequency localization of \(w_j\) we get \eqref{est:locnrgdecay2}.
To prove \eqref{est:airyest} we start by taking \(Q\) at scale \(M2^{2j}\) for some \(M\) and consider
\[(\partial_t+\partial_x^3)(\chi_Qw_j)=g_j\chi_Q+[\partial_x^3,\chi_Q]w_j\]
We can replace the \(\chi_Q\) with frequency localized versions as before and assume that they are smooth on scale \(M2^{2j}\) so \(|\partial_x^n\chi_Q|\lesssim_n(M2^{2j})^{-n}\)
The estimate \eqref{est:locnrg} gives us that
\begin{equation}\label{ugh}\sum\limits_Q\|\chi_Qw_j\|_{X_j}\lesssim\sum\limits_Q\left\{\|\chi_Qw_{0j}\|_{L^2}+\|\chi_Qg_j\|_{Y_j}\right\}+\sum\limits_Q\|[\partial_x^3,\chi_Q]w_j\|_{Y_j}\end{equation} Using the above bounds on the derivatives of \(\chi_Q\) we get
\[\sum\limits_Q\|[\partial_x^3,\chi_Q]w_j\|_{L_t^1L_x^2}\lesssim M^{-1}\sum\limits_Q\|\chi_Qw_j\|_{L^\infty_tL^2_x}\]
so taking \(M\) sufficiently large (and \(j\)-independent) we can absorb the last term in \eqref{ugh} into the left-hand side.
To make the transition from \(M2^{2j}\) scale to \(2^{2j}\) scale we use an identical argument to changing interval size to show that for any \(h=h(x)\)\[\sum\limits_{\tilde Q\in M\mathcal{Q}_{2j}}\|\chi_{\tilde Q} h\|_{L^2_x}\sim\sum\limits_{Q\in\mathcal{Q}_{2j}}\|\chi_Q h\|_{L^2_x}\]
\end{proof}
From the bilinear estimate \eqref{est:CXX}, for \(\gamma>s+1\) we have\[\|v\|_{\ensuremath{l^1} X^s}\lesssim\|e^{-\tfrac{1}{3}\int_0^xa\,dy}\|_{C^\gamma_*}\|w\|_{\ensuremath{l^1} X^s}\]So from \eqref{est:airyest}, \eqref{est:CXY} and \eqref{est:CHH} we have \[\|v\|_{\ensuremath{l^1} X^s}\lesssim\|e^{-\tfrac{1}{3}\int_0^xa\,dy}\|_{C^\gamma_*}\|e^{\tfrac{1}{3}\int_0^xa\,dy}\|_{C^\gamma_*}(\|v_0\|_{\ensuremath{l^1} H^s}+\|f\|_{\ensuremath{l^1} Y^s})\]
Taking \(\gamma\in\mathbb{Z}_+\) and using \eqref{est:holderzygmund} we have
\[\|e^{\pm\tfrac{1}{3}\int_0^xa\,dy}\|_{C^\gamma_*}\lesssim\|e^{\pm\tfrac{1}{3}\int_0^xa\,dy}\|_{C^\gamma}\lesssim e^{\tfrac{1}{3}\|a\|_{L^1}}\langle\|a\|_{C^{\gamma-1}}\rangle^\gamma\]
Choosing \(\sigma>\gamma-1/2\), from Sobolev imbedding and \eqref{est:aest} we have\[\|a\|_{C^{\gamma-1}}\lesssim\|a\|_{H^{\sigma}}\lesssim\|u_0\|_{\ensuremath{l^1} H^s}\]and as \(s>1\) whenever \(a\not\equiv0\)
\[\|a\|_{L^1}\lesssim\|a\|_{\ensuremath{l^1} H^s}\lesssim\|u_0\|_{\ensuremath{l^1} H^s}\]
To prove \eqref{est:rhserror} we write\[\|f-(\partial_t+\partial_x^3+a\partial_x^2)v\|_{\ensuremath{l^1} Y^s}=\|(a_x+\tfrac{1}{3}a^2)v_x+(\tfrac{1}{3}a_{xx}+\tfrac{1}{3}aa_x+\tfrac{1}{27}a^3)v\|_{\ensuremath{l^1} Y^s}\]
We can then apply the bilinear estimate \eqref{est:HXY} to get
\begin{align*}
\|(a_x&+\tfrac{1}{3}a^2)v_x+(\tfrac{1}{3}a_{xx}+\tfrac{1}{3}aa_x+\tfrac{1}{27}a^3)v\|_{\ensuremath{l^1} Y^s}\\
&\lesssim(\|a_x\|_{\ensuremath{l^1} H^s}+\|a^2\|_{\ensuremath{l^1} H^s}+\|a_{xx}\|_{\ensuremath{l^1} H^s}+\|aa_x\|_{\ensuremath{l^1} H^s}+\|a^3\|_{\ensuremath{l^1} H^s})\|v\|_{\ensuremath{l^1} X^s}
\end{align*}
Using the algebra estimate \eqref{est:Halg} we have\[\|aa_x\|_{\ensuremath{l^1} H^s}\lesssim\|a_x\|_{\ensuremath{l^1} H^s}\|a\|_{\ensuremath{l^1} H^s}\] Using the bilinear estimate \eqref{est:CHH}, for \(\gamma>s\) we have
\[
\|a^2\|_{\ensuremath{l^1} H^s}\lesssim\|a\|_{C^\gamma_*}\|a\|_{\ensuremath{l^1} H^s}
\]
The estimate \eqref{est:rhserror} then follows from \eqref{est:aest}--\eqref{est:daest}. This completes the proof of Lemma \ensuremath{\textrm{Re}}f{modairy}.
We can now construct a solution to \eqref{eq:modairy} by iteration. Let \linebreak\(v^{(n)}\) be the approximate solution to \[\left\{\begin{array}{l}(\partial_t+\partial_x^3+a\partial_x^2)v^{(n)}=f^{(n)}
\\v^{(n)}(0)=v^{(n)}_0\end{array}\right.\] constructed in Lemma \ensuremath{\textrm{Re}}f{modairy}, where \(f^{(0)}=f\), \(v_0^{(0)}=v_0\) and for \(n\geq0\)\[\begin{array}{rl}f^{(n+1)}&\!\!\!\!=f^{(n)}-(\partial_t+\partial_x^3+a\partial_x^2)v^{(n)}\\v^{(n+1)}&\!\!\!\!=0\end{array}\]
Then for \(k\) sufficiently large \[v=\sum\limits_{n=0}^\infty v^{(n)}\] converges in \(\ensuremath{l^1} X^s\) to a solution of \eqref{eq:modairy} satisfying the estimate \eqref{est:modairysoln}.
To prove uniqueness we consider the solution to
\begin{equation}\label{eq:modairyaaaa}\left\{\begin{array}{l}(\partial_t+\partial_x^3+a\partial_x^2)v=0
\\v(0)=0\end{array}\right.\end{equation}
Taking \(w=e^{\tfrac{1}{3}\int_0^xa(y)\,dy}v\), we have
\[(\partial_t+\partial_x^3)w=(a_x+\tfrac{1}{3}a^2)w_x-(\tfrac{2}{27}a^3-\tfrac{1}{3}a_{xx})w\]
and as in the proof of Lemma \ensuremath{\textrm{Re}}f{modairy}
\[\|w\|_{l^1X^s}\lesssim2^{-\delta k}C(\|u_0\|_{\ensuremath{l^1} H^s})\|w\|_{l^1X^s}\]
so for sufficiently large \(k\) we must have \(w=0\).
\section{Proof of Theorem \ensuremath{\textrm{Re}}f{mainthrm}}\label{sect:final}
We have the following estimates for the terms on the right-hand side of \eqref{eq:rescalednonlinear}.
\begin{proposition}\label{RBDS}For \(s>s_0\) where \(s_0\) is as defined in \eqref{defn:s0},
\begin{equation}\label{est:gest}\|G(x,v,v_x,v_{xx})\|_{\ensuremath{l^1} Y^s}\lesssim C(\|u_0\|_{\ensuremath{l^1} H^s})\|v\|^2_{\ensuremath{l^1} X^s}\langle\|v\|_{\ensuremath{l^1} X^s}\rangle^{m-2}\end{equation}
There exists \(\delta_1=\delta_1(s,F)\in(0,1]\) such that,
\begin{equation}\label{est:lest}\|L(x,v,v_x)\|_{\ensuremath{l^1} Y^s}\lesssim2^{-\delta_1 k}C(\|u_0\|_{\ensuremath{l^1} H^s})\|v\|_{\ensuremath{l^1} X^s}\end{equation}
\begin{equation}\label{est:rest}\|R(x)\|_{\ensuremath{l^1} Y^s}\lesssim2^{-\delta_1 k}C(\|u_0\|_{\ensuremath{l^1} H^s})\|u_0\|_{\ensuremath{l^1} H^s}\end{equation}\end{proposition}
\begin{proof} Using the bilinear estimates \eqref{est:XXY} and \eqref{est:CHH} and the algebra estimate \eqref{est:Xalg}, for \(\gamma>s+1\) we have\[\|G(v)\|_{\ensuremath{l^1} Y^s}\lesssim\sum\limits_{2\leq|\beta|\leq|\alpha|\leq m}\|G_{\alpha,\beta}\|_{C^\gamma_*}\|v\|_{\ensuremath{l^1} X^s}^{|\beta|}\]
Using the algebra estimate \eqref{est:Calg} and the low frequency estimate \eqref{est:lowfreqcstar} we have\[\|G_{\alpha,\beta}\|_{C^\gamma_*}\lesssim c_\alpha2^{(\lambda(1-|\beta|)-3+\beta_1+2\beta_2)k}\|u_0\|_{\ensuremath{l^1} H^s}^{|\alpha|-|\beta|}\lesssim\|u_0\|_{\ensuremath{l^1} H^s}^{|\alpha|-|\beta|}\]
For \(L\) we use the bilinear estimate \eqref{est:HXY} to get
\[
\|L(v)\|_{\ensuremath{l^1} Y^s}\lesssim\sum\limits_{\substack{2\leq|\alpha|\leq m\\|\beta|=1}}\|L_{\alpha,\beta}\|_{\ensuremath{l^1} H^s}\|v\|_{\ensuremath{l^1} X^s}
\]
We then use \eqref{est:CHH} to estimate exactly one of the low frequency terms in \(L_{\alpha,\beta}\) in \(\ensuremath{l^1} H^s\) using \eqref{est:lowfreq} and the rest in \(C^\gamma_*\) for \(\gamma>s\) using \eqref{est:Calg} and \eqref{est:lowfreqcstar}. Whenever we can apply \eqref{est:lowfreq} to the term in \(\ensuremath{l^1} H^s\) we get
\[
\|L_{\alpha,\beta}\|_{\ensuremath{l^1} H^s}\lesssim2^{(\beta_1-2)k}\|u_0\|_{\ensuremath{l^1} H^s}^{|\alpha|-1}\lesssim2^{-k}\|u_0\|_{\ensuremath{l^1} H^s}^{|\alpha|-1}
\]
If we have to apply \eqref{est:lowfreq2} to the term in \(\ensuremath{l^1} H^s\) we get
\[
\|L_{\alpha,\beta}\|_{\ensuremath{l^1} H^s}\lesssim2^{-\tfrac{1}{2}(s-\lambda-2)k}\|u_0\|_{\ensuremath{l^1} H^s}^{|\alpha|-1}
\]
We note that as \(R\) does not depend on \(t\), \[\|R\|_{\ensuremath{l^1} Y^s}\leq\|R\|_{\ensuremath{l^1} H^s}\]
As in the estimate of \(L_{\alpha,\beta}\) we can estimate exactly one low frequency term in \(R_\alpha\) in \(\ensuremath{l^1} H^s\) and the rest in \(C^\gamma_*\) for \(\gamma>s\) so
\[\|R_\alpha\|_{\ensuremath{l^1} H^s}\lesssim2^{-\min\{\tfrac{1}{2}(s-\lambda-2),2-\lambda\}k}\|u_0\|_{\ensuremath{l^1} H^s}^{|\alpha|}\]
We use \eqref{est:lowfreq}, \eqref{est:lowfreq2} to get
\[\|\partial_x^3u_0^{(k)l}\|_{\ensuremath{l^1} H^s}\lesssim2^{-\min\{\tfrac{1}{2}(s-\lambda-2),2-\lambda\}k}\|u_0\|_{\ensuremath{l^1} H^s}\]\end{proof}
We state the following result which we will use to estimate the differences of the polynomial terms \(a,G,L,R\).
\begin{lemma}L\label{polygen}et \(p(x_1,\dots,x_r)=x_1^{\alpha_1}\dots x_r^{\alpha_r}\) then\[p(u_1,\dots,u_r)-p(v_1,\dots,v_r)=\sum\limits_{j=1}^r(u_j-v_j)q_j\]where \[q_j=\sum\limits_{k=0}^{\alpha_j-1}u_1^{\alpha_1}\dots u_{j-1}^{\alpha_{j-1}}u_j^kv_j^{\alpha_j-1-k}v_{j+1}^{\alpha_{j+1}}\dots v_r^{\alpha_r}\]Further, if \(\mathcal{Y}\) is a Banach space and \(\mathcal{X}_1,\dots,\mathcal{X}_r\) are Banach algebras and we have the multilinear estimate\[\|u_1\dots u_r\|_{\mathcal{Y}}\lesssim\|u_1\|_{\mathcal{X}_1}\dots\|u_r\|_{\mathcal{X}_r}\]then\begin{equation}\label{est:polygen}\|p(u_1,\dots,u_r)-p(v_1,\dots,v_r)\|_{\mathcal{Y}}\leq\sum\limits_{j=1}^nC_j\|u_j-v_j\|_{\mathcal{X}_j}\end{equation} where \[C_j=\sum\limits_{k=0}^{\alpha_j-1}\|u_1\|_{\mathcal{X}_1}^{\alpha_1}\dots\|u_{j-1}\|_{\mathcal{X}_{j-1}}^{\alpha_{j-1}}\|u_j\|_{\mathcal{X}_j}^k\|v_j\|_{\mathcal{X}_j}^{\alpha_j-1-k}\|v_{j+1}\|_{\mathcal{X}_{j+1}}^{\alpha_{j+1}}\dots\|v_r\|_{\mathcal{X}_r}^{\alpha_r}\]\end{lemma}
\subsection{Existence}
We prove the existence of a solution by a fixed point argument. By Proposition \ensuremath{\textrm{Re}}f{result:est}, for \(k>0\) sufficiently large we can find a solution \(w=\mathcal{T}(v)\in\ensuremath{l^1} X^s\) to
\begin{equation}\label{eq:iterationscheme}\left\{\begin{array}{l}(\partial_t+\partial_x^3+a\partial_x^2)w=G(v)+L(v)+R
\\w(0)=v_0\end{array}\right.\end{equation}where \(a,G,L,R\) are as in \eqref{eq:rescalednonlinear}. Let \(\delta_1>0\) be as in Proposition \ensuremath{\textrm{Re}}f{RBDS} and let \(\sigma=\tfrac{1}{2}\delta_1\). Then define \[K=\{v\in\ensuremath{l^1} X^s:\|v\|_{\ensuremath{l^1} X^s}\leq 2^{-\sigma k}\|u_0\|_{\ensuremath{l^1} H^s}\}\]
\begin{proposition}For \(k>0\) sufficiently large the map \(\mathcal{T}\colon K\rightarrow K\) is a contraction.
\end{proposition}
\begin{proof}
Suppose \(v\in K\) then from \eqref{est:highfreq}, \eqref{est:modairysoln} and Proposition \ensuremath{\textrm{Re}}f{RBDS},\begin{align*}\|\mathcal{T}(v)\|_{\ensuremath{l^1} X^s}&\lesssim C(\|u_0\|_{\ensuremath{l^1} H^s})(\|v_0\|_{\ensuremath{l^1} H^s}+\|G(v)\|_{\ensuremath{l^1} Y^s}+\|L(v)\|_{\ensuremath{l^1} Y^s}+\|R\|_{\ensuremath{l^1} Y^s})
\\&\lesssim C(\|u_0\|_{\ensuremath{l^1} H^s})(\|v_0\|_{\ensuremath{l^1} H^s}+\|v\|^2_{\ensuremath{l^1} X^s}\langle\|v\|_{\ensuremath{l^1} X^s}\rangle^{m-2}+2^{-\delta_1k}\|v\|_{\ensuremath{l^1} X^s}\\&\quad+2^{-\delta_1 k}\|u_0\|_{\ensuremath{l^1} H^s})
\\&\lesssim C(\|u_0\|_{\ensuremath{l^1} H^s})(2^{-(s-\lambda-2)k}+2^{-\delta_1k})\|u_0\|_{\ensuremath{l^1} H^s}\end{align*}
From the proof of Proposition \ensuremath{\textrm{Re}}f{RBDS} we have \(\sigma<\delta_1<\min\{1,s-\lambda-2\}\) so for sufficiently large \(k\), \[\|\mathcal{T}(v)\|_{\ensuremath{l^1} X^s}\leq2^{-\sigma k}\|u_0\|_{\ensuremath{l^1} H^s}\]
For \(v\ensuremath{^{\{j\}}}\in K\) the difference \(w=\mathcal{T}(v\ensuremath{^{\{1\}}})-\mathcal{T}(v\ensuremath{^{\{2\}}})\) satisfies
\[\left\{\begin{array}{l}(\partial_t+\partial_x^3+a\partial_x^2)w=G(v\ensuremath{^{\{1\}}})-G(v\ensuremath{^{\{2\}}})+L(v\ensuremath{^{\{1\}}}-v\ensuremath{^{\{2\}}})
\\w(0)=0\end{array}\right.\]
From \eqref{est:modairysoln} we have\[\|w\|_{\ensuremath{l^1} X^s}\lesssim C(\|u_0\|_{\ensuremath{l^1} H^s})(\|G(v\ensuremath{^{\{1\}}})-G(v\ensuremath{^{\{2\}}})\|_{\ensuremath{l^1} Y^s}+\|L(v\ensuremath{^{\{1\}}}-v\ensuremath{^{\{2\}}})\|_{\ensuremath{l^1} Y^s})\]
Using Lemma \ensuremath{\textrm{Re}}f{polygen} with the estimates in Proposition \ensuremath{\textrm{Re}}f{RBDS} we have
\begin{align*}\|G(v\ensuremath{^{\{1\}}})-G(v\ensuremath{^{\{2\}}})\|_{\ensuremath{l^1} Y^s}\lesssim 2^{-\sigma k}C(\|u_0\|_{\ensuremath{l^1} H^s})\|v\ensuremath{^{\{1\}}}-v\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} X^s}\end{align*}
From \eqref{est:lest}\[\|L(w)\|_{\ensuremath{l^1} Y^s}\lesssim2^{-\sigma k}C(\|u_0\|_{\ensuremath{l^1} H^s})\|v\ensuremath{^{\{1\}}}-v\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} X^s}\]so\[\|\mathcal{T}(v\ensuremath{^{\{1\}}})-\mathcal{T}(v\ensuremath{^{\{2\}}})\|_{\ensuremath{l^1} X^s}\lesssim2^{-\sigma k}C(\|u_0\|_{\ensuremath{l^1} H^s})\|v\ensuremath{^{\{1\}}}-v\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} X^s}\]so \(\mathcal{T}\) is a contraction for sufficiently large \(k\).\end{proof}
From the contraction mapping theorem we have a fixed point of \(\mathcal{T}\) in \(K\) which is a solution of \eqref{eq:rescalednonlinear} satisfying\begin{equation}\label{est:unibdmain}\|v\|_{\ensuremath{l^1} X^s}\leq 2^{-\sigma k}\|u_0\|_{\ensuremath{l^1} H^s}\end{equation}After adding the low frequency component of the rescaled initial data and rescaling we get a solution \(u\) to \eqref{eq:nonlinear} in \(C([0,2^{-3k}],\ensuremath{l^1} H^s)\).
\subsection{Lipschitz dependence on initial data and uniqueness}
Suppose we have two solutions, \(u\ensuremath{^{\{1\}}},u\ensuremath{^{\{2\}}}\) to \eqref{eq:nonlinear} with initial data \(u_0\ensuremath{^{\{1\}}},u_0\ensuremath{^{\{2\}}}\) respectively. Rescaling both with the same value of \(k\) and subtracting the low frequency components from each we get \(v\ensuremath{^{\{1\}}},v\ensuremath{^{\{2\}}}\) satisfying\[\left\{\begin{array}{l}(\partial_t+\partial_x^3+a\ensuremath{^{\{j\}}}\partial_x^2)v\ensuremath{^{\{j\}}}=G\ensuremath{^{\{j\}}}(v\ensuremath{^{\{j\}}})+L\ensuremath{^{\{j\}}}(v\ensuremath{^{\{j\}}})+R\ensuremath{^{\{j\}}}
\\v\ensuremath{^{\{j\}}}=v_0\ensuremath{^{\{j\}}}\end{array}\right.\qquad j=1,2\]We then have\begin{align}\label{eq:diffdata}(\partial_t+\partial_x^3+a\ensuremath{^{\{1\}}}\partial_x^2)(v\ensuremath{^{\{1\}}}-v\ensuremath{^{\{2\}}})=&\;G\ensuremath{^{\{1\}}}(v\ensuremath{^{\{1\}}})-G\ensuremath{^{\{2\}}}(v\ensuremath{^{\{2\}}})\\&+L\ensuremath{^{\{1\}}}(v\ensuremath{^{\{1\}}})-L\ensuremath{^{\{2\}}}(v\ensuremath{^{\{2\}}})\notag\\&+R\ensuremath{^{\{1\}}}-R\ensuremath{^{\{2\}}}+(a\ensuremath{^{\{2\}}}-a\ensuremath{^{\{1\}}})\partial_x^2v\ensuremath{^{\{2\}}}\notag\end{align}
Writing \[G\ensuremath{^{\{j\}}}=G((u_0\ensuremath{^{\{j\}}})^{(k)l},\partial_x(u_0\ensuremath{^{\{j\}}})^{(k)l},\partial_x^2(u_0\ensuremath{^{\{j\}}})^{(k)l},v\ensuremath{^{\{j\}}},v\ensuremath{^{\{j\}}}_x,v\ensuremath{^{\{j\}}}_{xx})\qquad j=1,2\] for a polynomial \(G\) with coefficients depending only on \(\lambda\) we can apply Lemma \ensuremath{\textrm{Re}}f{polygen} with the estimates in Proposition \ensuremath{\textrm{Re}}f{RBDS}. From \eqref{est:unibdmain} we have \[\|v\ensuremath{^{\{j\}}}\|_{\ensuremath{l^1} X^s}\lesssim 2^{-\sigma k}\|u_0\ensuremath{^{\{j\}}}\|_{\ensuremath{l^1} H^s}\] so as \(G\) is at least quadratic in \(v\ensuremath{^{\{j\}}},v\ensuremath{^{\{j\}}}_x,v\ensuremath{^{\{j\}}}_{xx}\) we have
\begin{align*}\|G&\ensuremath{^{\{1\}}}(v\ensuremath{^{\{1\}}})-G\ensuremath{^{\{2\}}}(v\ensuremath{^{\{2\}}})\|_{\ensuremath{l^1} Y^s}\\&\lesssim C(\|u_0\ensuremath{^{\{1\}}}\|_{\ensuremath{l^1} H^s},\|u_0\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} H^s})\left(\|u_0\ensuremath{^{\{1\}}}-u_0\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} H^s}+2^{-\sigma k}\|v\ensuremath{^{\{1\}}}-v\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} X^s}\right)\end{align*}
Similarly,\begin{align*}\|L&\ensuremath{^{\{1\}}}(v\ensuremath{^{\{1\}}})-L\ensuremath{^{\{2\}}}(v\ensuremath{^{\{2\}}})\|_{\ensuremath{l^1} Y^s}\\&\lesssim 2^{-\delta_1k}C(\|u_0\ensuremath{^{\{1\}}}\|_{\ensuremath{l^1} H^s},\|u_0\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} H^s})\left(\|u_0\ensuremath{^{\{1\}}}-u_0\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} H^s}+\|v\ensuremath{^{\{1\}}}-v\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} H^s}\right)\end{align*}
\[\|R\ensuremath{^{\{1\}}}-R\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} Y^s}\lesssim2^{-\delta_1 k}C(\|u_0\ensuremath{^{\{1\}}}\|_{\ensuremath{l^1} H^s},\|u_0\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} H^s})\|u_0\ensuremath{^{\{1\}}}-u_0\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} H^s}\]
For the final term we use \eqref{est:XXY}, \eqref{est:aest} and the fact that for any \(f=f(x)\), \(\|f\|_{\ensuremath{l^1} X^s}\lesssim \|f\|_{\ensuremath{l^1} H^{s+1}}\) to get\begin{align*}\|(a\ensuremath{^{\{1\}}}-a\ensuremath{^{\{2\}}})\partial_x^2v\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} Y^s}&\lesssim \|a\ensuremath{^{\{1\}}}-a\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} H^{s+1}}\|v\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} X^s}\\&\lesssim\|u_0\ensuremath{^{\{1\}}}-u_0\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} H^s}\|u_0\|_{\ensuremath{l^1} H^s}\end{align*}
Applying \eqref{est:modairysoln} to \eqref{eq:diffdata} we have\begin{align*}\|v&\ensuremath{^{\{1\}}}-v\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} X^s}\\&\lesssim C(\|u_0\ensuremath{^{\{1\}}}\|_{\ensuremath{l^1} H^s},\|u_0\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} H^s})\left(\|u_0\ensuremath{^{\{1\}}}-u_0\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} H^s}+2^{-\sigma k}\|v\ensuremath{^{\{1\}}}-v\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} X^s}\right)\end{align*}so for \(k\) sufficiently large\[\|v\ensuremath{^{\{1\}}}-v\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} X^s}\lesssim C(\|u_0\ensuremath{^{\{1\}}}\|_{\ensuremath{l^1} H^s},\|u_0\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} H^s})\|u_0\ensuremath{^{\{1\}}}-u_0\ensuremath{^{\{2\}}}\|_{\ensuremath{l^1} H^s}\]
We complete the proof of Theorem \ensuremath{\textrm{Re}}f{mainthrm} by noting that the constant terms depending on \(\|u_0\|_{\ensuremath{l^1} H^s}\) are of the form \[C(\|u_0\|_{\ensuremath{l^1} H^s})=(1+e^{C_0\|u_0\|_{\ensuremath{l^1} H^s}})p(\|u_0\|_{\ensuremath{l^1} H^s})\] for \(C_0>0\) and polynomially bounded \(p\). So for a sufficiently large constant \(C>0\) we have well-posedness when \(k\geq C\|u_0\|_{\ensuremath{l^1} H^s}\).
\end{document}
|
\begin{document}
\title{Quantifying Causality in Quantum and General Models}
\begin{CJK*}{UTF8}{gbsn}
\author{Ding Jia (贾丁)}
\email{[email protected]}
\affiliation{Department of Applied Mathematics, University of Waterloo, Waterloo, Ontario, N2L 3G1, Canada}
\affiliation{Perimeter Institute for Theoretical Physics, Waterloo, Ontario, N2L 2Y5, Canada}
\begin{abstract}
In studies of entanglement, finding out if a state is entangled and quantifying the amount of entanglement contained in a state are related but different questions. Similarly in studies of causality, finding out the causal structures compatible with a model and quantifying the causal strengths are related but different questions. Recently much research have been directed towards the first question but considerably less attention is paid to the second one. In this paper we propose axioms for all reasonable quantitative measures of causality to obey. The axioms apply to a broad family of operational probabilistic theories with and without definite causal structure. For quantum models, we study causality measures based on one-shot quantum capacities in detail. These measures are used to define the notions of quantum signalling and quantum causality measures in order to quantify quantum causality.
\end{abstract}
\maketitle
\end{CJK*}
\begin{cmt}
Causality witness?
Q-factor?
\end{cmt}
\section{Introduction}
\begin{cmt}
causality in quantum models. causal discovery. indefinite causal structure.
yes/no studied, strength not. focus of paper. coarse-grain out signalling correlation that are too noisy for practical purposes and for fundamental purposes (ICS). outline.
\end{cmt}
In operational probabilistic theories causality is usually characterized by the signalling criterion, which says that if one agent can change the measurement outcome probabilities of another agent by choosing different operations, then the first agent can causally influence the second.
The strength of the signalling criterion is that it offers a natural way to characterize causality that applies to not only explicit theories such as quantum and classical theories but also all operational probabilistic theories in general. Nonetheless the signalling criterion is limited in that it only offers yes or no answers to questions about causal structure but does not \textit{quantify} causal strength.
As we seek deeper understandings of causality, the need to quantify causality naturally arises. For instance, in the studies of quantum causality related to quantum gravity causal fluctuations are expected to be a generic phenomenon induced by spontaneous fluctuations of quantum gravitational degrees of freedom \cite{jia2017quantum}. In quantum spacetime because of the universally present quantum fluctuation of causal structure, generically any two parties will have a finite probability of being causally connected if one uses the signalling criterion. This may sound peculiar at first, but it is actually analogous to familiar features of quantum theory such as quantum tunneling which indicates a finite probability for seemingly peculiar events to happen. That such causal fluctuations do not violate locality is explained in \cite{jia2018analogue}, and if we accept what the theory suggests then quantum spacetime has a trivial causal structure at the yes or no level characterization of causality. Clearly the problem is that the signalling criterion does not distinguish ``strong'' and ``weak'' causal connections, and if only one raises the causal strength threshold of qualification towards causal connection by a little bit, most of the previous causal connections will be disqualified and the causal structure will become non-trivial. To apply this idea concretely one needs to study quantitative measures of causal strength.
An analogy can be drawn with entanglement theory. Although earlier studies focus on yes/no criterion for whether some parties share entanglement, many important questions were addressed only through studying quantitative measures of entanglement.
In this paper we define and study causality measures for operational probabilistic theories with definite causal structure and with indefinite causal structure. The definition consists of three axioms for all reasonable causality measures to obey. For quantum theory we study in some detail two particular causality measures based on one-shot communication capacities. We show that for a family of important models describing indefinite causal structure, the one-shot entanglement transmission capacities are exactly solvable. We prove that the the one-shot entanglement transmission capacities can actually be used to reconstruct the causally relevant part of the models themselves.
For quantum theory there are correlations (e.g., the classical identity channel) that allow the transmission of only classical information but not quantum information. In some contexts such as the study of quantum spacetime there is the need to exclude these correlations from having positive causal strength. For this purpose we introduce the notion of ``quantum signalling'' and use it to define ``quantum causality measures'' that fits the purpose.
The present work focuses on studying quantifying causality between two parties. We leave the task of generalizing to multiple parties for future work.
\section{Causality measures}\label{sec:cm}
In this section we list the axioms for causality measures and give some examples of causality measures. There are different frameworks for operational probabilistic theories with definite causal structure and with indefinite causal structure (e.g., \cite{hardy2005probability, leifer2006quantum, leifer2013towards, gutoski2007toward, chiribella2009theoretical, aharonov2009multiple, abramsky2009categorical, coecke2010quantum, hardy2011reformulating, hardy2012operator, chiribella2013quantum, oreshkov2012quantum, araujo2015witnessing, oreshkov2016causal, oreshkov2015operational, oreshkov2016operational, oeckl2016local, ried2015quantum, maclean2017quantum, fitzsimons2015quantum, costa2016quantum, allen2017quantum}). The following definition of causality measures applies to a wide range of frameworks. The only preliminary concepts needed are correlations (such as a channel) that mediate the causal influence, and local operations that change the correlations in order to exert the causal influence.
Notably, nothing restricts the definition to quantum theory and everything in this section applies to any operational probabilistic theory with these preliminary concepts. It is only starting with the next section that we focus on quantum theory which allows us to talk about maximally entangled states and the fidelity of states to study some particular causality measures.
Another point worth emphasizing is that the frameworks do not have to be based on directed acyclic graphs (DAGs) for the causality measures to be applicable. This is important because although many causal frameworks are based on DAGs, there are reasonable frameworks that are naturally associated with hypergraphs rather than graphs, such as Hardy's causaloid framework of indefinite causal structure \cite{hardy2005probability}. At the level of our current study of bipartite causality measures this general applicability is not significant, but it may prove to be advantageous in future works that generalize the study of causality measures to multiple parties.
\subsection{Axioms}\label{subsec:a}
A \textbf{causality measure} $\mu^{A\rightarrow B}(G)$ on parties $A$ and $B$ sharing the correlation $G$ is a real-valued function obeying the following axioms:
\begin{enumerate}
\item $\mu^{A\rightarrow B}(G)$ is non-increasing under local operations within $A$ and $B$.
\item $\mu^{A\rightarrow B}(G) \ge 0$.
\item $\mu^{A\rightarrow B}(G) > 0$ only if $A$ can signal to $B$ using $G$.
\end{enumerate}
Here ``$A$ can signal to $B$ using $G$'' means that by exploiting the correlation $G$, $A$ can change the measurement outcome probabilities of $B$ by choosing different operations. A \textbf{normalized causality measure} further obeys $\sup_G \mu^{A\rightarrow B}(G)=1$ so that $0\le \mu^{A\rightarrow B}(G)\le 1$ for all $G$. The causality measure $\mu^{A\leftarrow B}(G)$ in the opposite direction is defined similarly except that it obeys Axiom 3 with $A$ and $B$ swapped.
Axiom 1 is the main axiom for causality measures. It captures the intuition that the local operations cannot generate causal correlations. An arbitrary $G$ can be mapped to any correlation $G'$ that can be prepared by local operations alone (such as product states). The parties simply discard $G$ and prepare $G'$. Axiom 1 implies that $\alpha=\mu^{A\rightarrow B}(G')$ is the minimum value $\mu^{A\rightarrow B}$ can reach for all $G$, because starting from any $G$ the parties can apply local operations to prepare $G'$. Axiom 1 also implies that any two different $G'$ must share the same value of $\alpha$ for $\mu^{A\rightarrow B}$, because each can be prepared from the other. Axiom 2 sets this minimum value $\alpha$ to zero.
Axioms 1 and 2 resemble the axioms for entanglement measures \cite{horodecki2009quantum}, which was originally defined for states and recently generalized to general quantum correlations including those with indefinite causal structure \cite{jia2017generalizing}. The defining axioms of entanglement measures are that the measures do not increase under the LOCC (local operations and classical communications) operations, and that the measures are non-negative. More precisely, the first axiom for entanglement measures says that they should not increase under LOCC operations allowed by the LOCC setting that one is considering (monotonicity). Here an LOCC setting dictates what LOCC operations are allowed. For example, in some LOCC settings only one-way classical communication is allowed, and in some others no classical communication is allowed. The only difference between the entanglement measure axioms and Axioms 1 and 2 above is that entanglement measures must also be monotonic in the presence of classical communications if the LOCC setting allows them. In LOCC settings where all local operations are allowed (which is the case for most LOCC settings of interest), the monotonicity axiom of entanglement is stronger than Axiom 1 for causality measures. Therefore in a framework\footnote{Although as stated in \cite{jia2017generalizing} entanglement measures are defined specifically for quantum theory, they can easily be generalized to apply to a broad family of probabilistic theories which supports the notion of LOCC operations.} where they are defined the entanglement measures obey Axioms 1 and 2. However, a correlation that contains entanglement certainly does not necessarily contain causal correlation. Therefore Axiom 3 is needed to make sure that causality measures indeed measure causality. Incidentally, in a model where entanglement and causality measures are defined, if the LOCC setting only allows local operations, then causality measures obey the entanglement measure axioms. One could view causality measures as special cases of entanglement measures which obey Axiom 3 in the LOCC setting without classical communication.
We believe the remarks above justify the three axioms as necessary to define causality measures. There remains the question of whether more axioms are needed. One obvious option is to strengthen Axiom 3 by also requiring that $\mu^{A\rightarrow B}(G)>0$ if $A$ can signal to $B$ using $G$. We do not to make this requirement because it exclude some useful information transmission capacities as causality measures. For example, there are channels that can signal but have zero quantum channel capacity.
Another option is to require $\sum_i p_i \mab{G_i} \ge \mab{\sum_i p_iG_i}$ for probability vectors $p_i$. We do not to make this convexity requirement because again it would exclude quantum channel capacity as a causality measure \cite{smith2008quantum}. This choice echoes the choice in entanglement theory not to require entanglement measures to be convex (some useful measures such as distillable entanglement are not know to be convex) \cite{horodecki2009quantum}.
There are potentially other conditions one may want to impose on causality measures, just like there are conditions one may want to impose on entanglement measures in addition to the basic monotonicity and non-negativity axioms. For entanglement theory, the common view is that the two axioms above are the only ones necessary in defining entanglement measures, and other conditions may be imposed depending on particular contexts \cite{horodecki2009quantum}. It seems the case is the same for causality measures and we regard axioms 1 to 3 as sufficient to define causality measures at the basic level. Other conditions may be imposed to suit particular interests. For example, in Section \ref{sec:qcm} we study the the additional condition based on ``quantum signalling'' to define quantum causality measures.
In addition to general causality measures, we also defined normalized causality measures $\mu^{A\rightarrow B}$ for which $\sup_G \mu^{A\rightarrow B}(G)=1$. Normalized measures are useful when one compares correlations for systems with different dimensions. For example, the qubit identity channel and the qutrit identity channel are both channels with no noise and with the maximum causal strength on their respective systems. Yet the quantum channel capacity as a standard causality measure assigns a larger value to the qutrit channel. This assignment is reasonable from the perspective that the qutrit channel is capable of transmitting more information per use. Nevertheless, in other contexts where one quantifies causal strength according to how much noise there is in the correlation, a normalized measure that assigns the value one to both channels would be preferable.
\subsection{Examples}
\begin{itemize}
\item The zero measure.
\begin{align}
\mu_{\text{zero}}^{A\rightarrow B}(G)=0 \quad \text{for all }G.
\end{align}
This function trivially obeys all the three axioms and also the axioms for entanglement measures. It is of no practical value but shows that some function is both a causality measure and an entanglement measure.
\item The signalling measure.
\begin{align}
\mu_{\text{sg}}^{A\rightarrow B}(G)=
\begin{cases}
1, \quad \text{A can signal to B}
\\
0, \quad \text{A cannot signal to B}.
\end{cases}
\end{align}
This function clearly obeys Axioms 1 to 3 and is a causality measure. It is also a normalized causality measure.
Better than, for example, quantum channel capacity, it meets the condition that $\mu^{A\rightarrow B}(G)>0$ if $A$ can signal to $B$ using $G$. Yet it is not convex. Let $G_1$ and $G_2$ be channels that can and cannot signal. Then
\begin{align}
\frac{1}{2} \mu_{\text{sg}}^{A\rightarrow B}(G_1)+\frac{1}{2} \mu_{\text{sg}}^{A\rightarrow B}(G_2)=\frac{1}{2} < 1= \mu_{\text{sg}}^{A\rightarrow B}(\frac{1}{2}G_1+\frac{1}{2}G_2).
\end{align} The biggest drawback is that the signalling measure does not really \textit{quantify} causal strength.
\item For the special case that $G$ is a quantum or classical channel, the various channel capacities are causality measures, as one can easily check that they obey all the three axioms.
The channel capacities quantify how many qubits the channel can transmit per use and are not normalized measures in general. One can easily normalize them by dividing the maximum capacity a channel on the same input and output systems can reach. Precisely, given the channel capacity $C(N)$ on channels $N$ as a causality measure, we normalize it by
\begin{align}\label{eq:ncc}
C_{\text{norm}}(N):=\frac{C(N)}{\sup_{N'\in \mathfrak{C}(N)}C(N')},
\end{align}
where $\mathfrak{C}(N)$ is the set of channels on the same input and output systems of $N$. This suits the need mentioned at the end of the last subsection of finding a measure that assigns the same value one to all noiseless channels.
\item In the next section we study information transmission capacities that can be used as causality measures for general correlations not restricted to channels. They apply even to correlations without definite causal structure. The primary examples are the one-shot entanglement transmission capacities $Q_{\text{ent}}^\rightarrow(G^{AB};\epsilon)$ and the one-shot subspace transmission capacities $Q_{\text{sub}}^\rightarrow(G^{AB};\epsilon)$.
\item Given any causality measure $\mu^{A\rightarrow B}(G)$, a standard way to define a normalized measure is
\begin{align}\label{eq:ncm}
\mu^{A\rightarrow B}_{\text{norm}}(G):=\frac{\mu^{A\rightarrow B}(G)}{\sup_{\mathfrak{C}(G')}\mu^{A\rightarrow B}(G')},
\end{align}
where the supremum is taken over a set of correlations $\mathfrak{C}(G')$ that depends on $G'$. The normalization (\ref{eq:ncc}) for channel capacities is a special case of this general procedure.
\end{itemize}
\section{One-shot quantum capacities}\label{sec:osqc}
The channel capacities provide fairly natural quantitative measures of the causal strength. Yet the traditional definitions of capacities only apply to channels, which are correlations with definite causal structure. The various definitions of capacities can be generalized to apply to correlations with possibly indefinite causal structure \cite{jiasakharwade}. These generalized capacities can then be used to quantify causality for e.g., quantum gravity, where indefinite causal structure is important.
In this section we focus on two canonical causality measures based on one-shot quantum communication capacities. In communication theory, the asymptotic capacities are usually taught as the canonical capacities \cite{wilde2017quantum}. Yet there is more than one reason to consider the one-shot capacities are the truly canonical ones. The conceptual reason is that practically all correlations for communication comes with noise, so the copies of correlations cannot be strictly identical. In addition, the copies may correlation with each other. Moreover, there is no supply of infinitely many copies of the correlation. The asymptotic capacities do not account for these practical limitations but the one-shot capacities do. The technical reason for preferring one-shot capacities over asymptotic ones is that the asymptotic capacities can be viewed as special cases of the one-shot capacities when the correlation used for communication is a tensor product of $n$ identical correlations and in the limits $n\rightarrow \infty$ and $\epsilon\rightarrow 0$ where $\epsilon$ is the error tolerance. An added reason from indefinite causal structure is that its presence the asymptotic capacities cannot be defined in the most straightforward way \cite{jia2017process}. At present it is still an open question what the best way to define asymptotic capacity is in the presence of indefinite causal structure, but the one-shot capacities do not suffer the same issue.
The one-shot communication tasks of entanglement transmission and subspace transmission and their capacities are originally defined for channels in \cite{buscemi2010quantum}. We generalize the previous definitions to incorporate communication resources with indefinite causal structure. In the next section we solve for the values of the capacities for some simple but important models of indefinite causal structure.
As mentioned, the notion of causality measures applies to frameworks more general than quantum ones and relies only on the concepts correlations that mediate the causal influence, and local operations that can change the correlations in order to exert causal influence. The following tasks require in addition that the states to be transmitted live on complex Hilbert spaces, and that the allowed local operations contain preparations of maximally entangled states for the entanglement transmission task, and preparations of arbitrary pure states on subspaces for the subspace transmission tasks.
\subsection{Entanglement transmission capacity}\label{subsec:etc}
The goal of the entanglement transmission task is to transmit locally prepared entanglement through the correlation into shared entanglement. Suppose $A$ and $B$ share a correlation $G^{AB}$ that allows $A$ to send quantum states on Hilbert spaces of at most dimension $\tilde{m}$. In the $A$ to $B$ one-shot entanglement transmission task for the correlation $G^{AB}$, for each given dimension $m\le \tilde{m}$, $A$ first prepares a maximally entangled state $\Upphi^{MM'}\in L(\mathcal{H}^M\otimes \mathcal{H}^{M'})$ ($L(\mathcal{H}^x)$ denotes bounded linear operators on Hilbert space $\mathcal{H}^x$) with $\mathcal{H}^M\subset \mathcal{H}^A$, where $\mathcal{H}^A$ is the largest system $A$ can prepare states on, $\dim \mathcal{H}^M=m$, and $M'$ is a copy of the system $M$. Then $A$ keeps the $M$ part of the state intact to herself and send the $M'$ part of the state to $B$ using $G^{AB}$ such that they share a state $\Psi^{MM'}(E,D)$ with part $M$ held by $A$ and part $M'$ held by $B$. In this transmission $A$ applies some encoding local operation $E$ (which must keep the $M$ part of the original state $\Upphi^{MM'}$ intact) and $B$ applies some decoding local operation $D$. The goal is for $\Psi^{MM'}(E,D)$ to be as close to $\Upphi^{MM'}$ as possible.
Here we make a distinction between \textit{active} entanglement transmission and \textit{passive} entanglement transmission. The task of passive entanglement transmission is just as stated above, for which $A$ must keep the system $M$ intact after the original maximally entangled state $\Upphi^{MM'}$ is prepared. The task of active entanglement transmission on the other hand allows $A$ to apply some local operation $E'$ on the $M$ part of $\Psi^{MM'}(E,D)$ to obtain $\Psi^{MM'}(E,E',D)$ before it is compared with the target state $\Upphi^{MM'}$.\footnote{We require that $A$ must finally share entanglement with $B$ by keeping the $M$ part of the originally prepared state in order not to confuse the task of entanglement transmission with entanglement generation. Otherwise the parties can use an entangled state $G^{AB}$ that does not allow signalling to set up shared entanglement by simply discarding the originally prepared $\Upphi^{MM'}$ and keeping $G^{AB}$.} In the literature the distinction between the active and passive entanglement transmission tasks is often not stated explicitly, with some articles adopting the former (e.g., \cite{buscemi2010quantum}) and some others (e.g., \cite{tomamichel2016quantum}) adopting the latter as the ``entanglement transmission task''. It is not clear to us whether the two tasks are equivalent (having the same capacity), so we prefer to state them as different tasks.
For any positive integer $m\le \tilde{m}$, define the $A$ to $B$ \textbf{entanglement transmission fidelity} for $G^{AB}$ as:
\begin{align}\label{eq:gef1}
F_{\text{ent}}(G^{AB};m):=&\max_{\substack{\mathcal{H}^M\subset\mathcal{H}^A\\\dim \mathcal{H}^M=m}}\max_{E,D} \bra{\Upphi^{MM'}}\Psi^{MM'}(E,D)\ket{\Upphi^{MM'}}\quad \text{for the passive task},
\\
F_{\text{ent}}(G^{AB};m):=&\max_{\substack{\mathcal{H}^M\subset\mathcal{H}^A\\\dim \mathcal{H}^M=m}}\max_{E,E',D} \bra{\Upphi^{MM'}}\Psi^{MM'}(E,E',D)\ket{\Upphi^{MM'}}\quad \text{for the active task},\label{eq:gef2}
\end{align}
where $\ket{\Upphi^{A\bar{A}}}\in \mathcal{H}^A\otimes \mathcal{H}^{\bar{A}}$ is the pure state corresponding to $\Upphi^{A\bar{A}}$. In the first maximization the parties try over all the encodings and decodings. In the second maximization $A$ tries over all the subspaces.
Let $0\le \epsilon\le 1$ be a real number. $R=\log m$ is an \textbf{$\epsilon$-achievable rate} if
\begin{align}
F_{\text{ent}}(G^{AB};m)\ge 1-\epsilon.
\end{align}
The $A$ to $B$ \textbf{one-shot entanglement transmission capacities} of $G^{AB}$ are defined as
\begin{align}
Q_{\text{ent}}^\rightarrow(G^{AB};\epsilon):=\max\{R: R \text{ is $\epsilon$-achievable}\}.
\end{align}
The tasks of $B$ to $A$ transmission with capacity $Q_{\text{ent}}^{\leftarrow}(G^{AB};\epsilon)$ can be defined analogously.
\subsection{Subspace transmission capacity}
The definitions for subspace transmission are analogous. The goal is to transmit any state in some subspace with high fidelity. Suppose $A$ and $B$ share a correlation $G^{AB}$ that allows $A$ to send quantum states on Hilbert spaces of at most dimension $\tilde{m}$. In the $A$ to $B$ transmission task, for each $m\le \tilde{m}$, $A$ picks a subspace $\mathcal{H}^M\subset \mathcal{H}^A$ where $\mathcal{H}^A$ is the largest system $A$ can prepare states on and $\dim \mathcal{H}^M=m$. Arbitrary pure states $\ket{\psi}\in \mathcal{H}^M$ are sent through $G^{AB}$ from $A$ to $B$ such that in the end $B$ gets a state with density operator $\Psi(E,D)\in L(\mathcal{H}^M)$. In the transmission $A$ applies some encoding local operation $E$ and $B$ applies some decoding local operation $D$. The goal is for $\Psi(E,D)$ to be as close to $\ketbra{\psi}$ as possible.
For any positive integer $m\le \tilde{m}$, define the \textbf{minimum output fidelity} for $G^{AB}$ as:
\begin{align}
F_{\text{min}}(G^{AB};m):=\max_{\substack{\mathcal{H}^M\subset \mathcal{H}^A\\ \dim \mathcal{H}^M=m}}\max_{E,D}\min_{\ket{\psi}\in \mathcal{H}^M}\bra{\psi}\Psi(E,D)\ket{\psi}.
\end{align}
In the first maximization the parties try over all the encodings and decodings. In the second maximization $A$ tries over all the subspaces. Let $0\le \epsilon\le 1$ be a real number. $R=\log m$ is an \textbf{$\epsilon$-achievable rate} if
\begin{align}
F_{\min}(G^{AB};m)\ge 1-\epsilon.
\end{align}
The \textbf{one-shot subspace transmission capacities} of $G^{AB}$ are defined as
\begin{align}
Q_{\text{sub}}^\rightarrow(G^{AB};\epsilon):=\max\{R: R \text{ is $\epsilon$-achievable}\}.
\end{align}
The task for $B$ to $A$ transmission with capacity $Q_{\text{sub}}^{\leftarrow}(G^{AB};\epsilon)$ can be defined analogously.
\section{Capacities for simple models with indefinite causal structure}
In this section we solve for the values of the one-shot capacities for some simple but important models of indefinite causal structure. The models are defined in the process matrix framework \cite{oreshkov2012quantum}, which we lighteningly review below, referring the readers to the original article for details.
\subsection{Process matrices}\label{subsec:pm}
The process matrices are introduced by Oreshkov, Costa and Brukner \cite{oreshkov2012quantum} to incorporate indefinite causal structure into quantum theory. The main idea is to assume that ordinary quantum theory with definite causal structure holds locally, while globally the causal structure can be indefinite.
The local parties where ordinary quantum theory with definite causal structure holds are denote by $A, B, \cdots$. Local parties are where local operations are applied. The correlations that mediate causal influence are the process matrices, usually denoted by $W$. At the fundamental level, the correlations allow one to derive probabilities of observational outcomes within local parties -- they are maps from the local observational outcomes to the real numbers. The process matrices are representations of such maps as operators in Hilbert spaces.
Let the outcomes $i\in\mathcal{I}$ of a local observation be represented by the Choi operators $\hat{M_i}$ of the elements of a quantum instrument $\{M_i\}_{i\in \mathcal{I}}$. When the correlation is described by the process matrix $W$, the probability of observing the joint outcome $(i,j,\cdots,k)$ of $i$ within $A$, $j$ within $B$, ..., $k$ within $C$ is
\begin{align}\label{eq:br}
p(i,j,\cdots,k)=\Tr[(\hat{M}_i\otimes \hat{N}_j\otimes\cdots\otimes \hat{L}_k)^T W],
\end{align}
where $T$ denotes transpose.
Recall that a quantum instrument element $M_i$ within $A$ as a CP map $M_i:L(\mathcal{H}^{a_1})\rightarrow L(\mathcal{H}^{a_2})$ is associated with an input Hilbert space $\mathcal{H}^{a_1}$ and an output Hilbert space $\mathcal{H}^{a_2}$. If $\mathcal{H}:=\mathcal{H}^{a_1}\otimes\mathcal{H}^{a_2}\otimes \mathcal{H}^{b_1}\otimes\mathcal{H}^{b_2} \otimes \cdots \otimes \mathcal{H}^{c_1}\otimes\mathcal{H}^{c_2}$, then $W\in L(\mathcal{H})$. Let $\abs{x}$ stand for the dimension of the Hilbert space $\mathcal{H}^x$. Then the requirements that probabilities are non-negative and normalized imply that $W$ is positive semi-definite, has trace equal to $\abs{a_2}\abs{b_2}\cdots \abs{c_2}$, and lives in a linear subspace of $L(\mathcal{H})$, the detail of which we will not need for this paper. Conversely, any operator in $L(\mathcal{H})$ obeying these three conditions is a process matrix associated with local parties $A,B,\cdots,C$. In order to make explicit the parties associated with a process matrix we sometimes write $W$ as $W^{AB\cdots C}$.
Not only are channels and states in the form of their Choi operators process matrices, but also ordinary quantum theory with definite causal structure is a subtheory within the process matrix framework. The process matrix framework is therefore a generalization of ordinary quantum theory (with finite dimensional systems).
\subsection{Simple models with indefinite causal structure}\label{sec:tm}
\begin{figure}
\caption{The three causal relations. System $c$ not accessible to $A$ and $B$ is not drawn but is supposed to lie somewhere within the environment $e$.}
\label{fig:3cr}
\end{figure}
To illustrate how a process matrix can encode indefinite causal structure, we consider a family of simple models. In the next subsection we will solve for the one-shot quantum capacities of these models. In a classical theory, two events $A$ and $B$ can have three possible causal relations: $A$ being in the causal past of $B$, $A$ being in the causal future of $B$, and $A$ being causally disconnected with $B$. When there is uncertainty about the causal relation there is indefinite causal structure. This can hold at the classical level, e.g., when the uncertainty is due to the lack of knowledge. A theory of gravitation that incorporates such uncertainty would need to be probabilistic \cite{hardy2016operational}. At the quantum level causal relations can in addition be indefinite in a quantum coherent way. We present a family of simple models that reflects the quantum coherent indefiniteness of the three causal relations between two parties. The model is used to represent spontaneous causal fluctuations of quantum spacetime. Such effects are assumed to be significant for all pairs of parties with a small separation at the order of the Planck length.
The \textit{harmonic clean models} \cite{jia2018analogue} (illustrated in FIG. \ref{fig:3cr}) put the three possible causal relations between parties $A$ and $B$ in ``superposition''. Let the complex numbers $\alpha_1,\alpha_2,\alpha_3$ be the probability amplitudes for the causal relations such that $\abs{\alpha_1}^2+\abs{\alpha_2}^2+\abs{\alpha_3}^2=1$. We introduce the shorthand notation $\alpha$ for $\alpha_1,\alpha_2,\alpha_3$ ($\alpha$ is like a complex 3-vector). Inspired by \cite{feix2017quantum} we define
\begin{align}
\ket{w(\alpha)}^{GABE}:=&\alpha_1\ket{1}^g\ket{\Psi(\alpha)}^{a_1 e_2e_3}\ket{I}^{a_2 b_1}\ket{I}^{b_2 e_1}+\alpha_2\ket{2}^g\ket{\Psi(\alpha)}^{e_1 b_1e_3}\ket{I}^{b_2 a_1}\ket{I}^{a_2 e_2}\nonumber
\\
&+\alpha_3\ket{3}^g\ket{\Psi(\alpha)}^{a_1b_1e_3}\ket{I}^{a_2 e_1}\ket{I}^{b_2 e_2}.\label{eq:3crps}
\end{align}
We assume that the local subsystems of $A$ and $B$ all have equal dimensions, i.e., $\abs{a_1}=\abs{a_2}=\abs{b_1}=\abs{b_2}$. The vector $\ket{I}^{xy}$ (a pure maximally entangled state) is the Choi state representation of the identity channel from $x$ to $y$. The tripartite state $\ket{\Psi(\alpha)}$ (at the bottom of the pictures in FIG. \ref{fig:3cr}) is the ``initial state'' $A$ and $B$ can receive information from. In a more general model, both the channels between $A$ and $B$ and the state $\Psi$ may depend on $\alpha$. In the model we analyze we make the assumption that the channels do not. This simplification allows us to find exact answers for the causality measures later on, but a more general study may remove the assumption. Each of the three pictures of FIG. \ref{fig:3cr} depicts a definite causal structure between $A$ and $B$, and they correspond to the three terms of (\ref{eq:3crps}). The reason the systems are coupled through $\ket{\Psi}$ or $\ket{I}$ in the the particular way in (\ref{eq:3crps} can be inferred from the pictures in FIG. \ref{fig:3cr}.
The vector $\ket{w}$ takes the form of a superposition of three parts with amplitudes $\alpha_i$. It induces a four-party process matrix
\begin{align}
W^{GABE}(\alpha):=\ketbra{w(\alpha)}{w(\alpha)}^{GABE},
\end{align}
where $A$ consists of systems $a_1$ and $a_2$, $B$ consists of systems $b_1$ and $b_2$, $E$ consists of systems $e_1$, $e_2$ and $e_3$, and $G$ consists of the system $g$. For the local parties $A$ and $B$ the systems belonging to $E$ and $G$ are not accessible, so the process matrix reduces to
\begin{align}\label{eq:wcm}
W^{AB}(\alpha)=&\Tr_{GE}W^{GABE}(\alpha)=\sum_{i=1}^3 p_i W_i^{AB}(\alpha),
\\
\rho^{xye_3}=&\ketbra{\Psi(\alpha)}{\Psi(\alpha)}^{xye_3},
\\
\rho^{a_1}=&\rho^x, ~ \rho^{b_1}=\rho^y, ~ \rho^{a_1b_1}=\rho^{xy},\label{eq:rsc}
\\
W_1^{AB}(\alpha):=&\rho^{x}(\alpha)\otimes \Phi^{a_2b_1}\otimes \pi^{b_2}=\rho^{a_1}(\alpha)\otimes \Phi^{a_2b_1}\otimes \pi^{b_2},\label{eq:w1}
\\
W_2^{AB}(\alpha):=&\rho^{y}(\alpha)\otimes \Phi^{a_1b_2}\otimes \pi^{a_2}=\rho^{b_1}(\alpha)\otimes \Phi^{a_1b_2}\otimes \pi^{a_2},\label{eq:w2}
\\
W_3^{AB}(\alpha):=&\rho^{xy}(\alpha)\otimes \pi^{a_2}\otimes \pi^{b_2}=\rho^{a_1b_1}(\alpha)\otimes \pi^{a_2}\otimes \pi^{b_2}.\label{eq:w3}
\end{align}
The $\alpha$ dependence of $W_i$ comes from the $\alpha$ dependence of $\Psi$. The meaning of (\ref{eq:rsc}) is that $\rho^{a_1}$ is the reduced state of $\rho^{xye_3}$ on the first subsystem, $\rho^{b_1}$ is the reduced state of $\rho^{xye_3}$ on the second subsystem, and $\rho^{a_1b_1}$ is the reduced state of $\rho^{xye_3}$ on the first and second subsystems.
\subsection{Capacities}
The one-shot entanglement transmission capacities can be solved exactly for the harmonic clean models.
\begin{theorem}\label{th:osec}
For $W^{AB}$ in the family of harmonic clean models (\ref{eq:3crps}),
\begin{align}\label{eq:ec}
Q_{\text{ent}}^\rightarrow(W^{AB};\epsilon)=
\begin{cases}
\log\Big(\max\Big\{m\in \mathbb{N}: m\le \sqrt{\frac{1}{1-\frac{\epsilon}{1-p_1}}}\text{ and }m\le \abs{a_2}\Big\}\Big), & p_1<1-\epsilon,
\\
\log\abs{a_2}, & p_1\ge 1-\epsilon.
\end{cases}
\\\label{eq:ecl}
Q_{\text{ent}}^\leftarrow(W^{AB};\epsilon)=
\begin{cases}
\log\Big(\max\Big\{m\in \mathbb{N}: m\le \sqrt{\frac{1}{1-\frac{\epsilon}{1-p_2}}}\text{ and }m\le \abs{b_2}\Big\}\Big), & p_2<1-\epsilon,
\\
\log\abs{b_2}, & p_2\ge 1-\epsilon.
\end{cases}
\end{align}
\end{theorem}
\begin{proof}
We first find exact values of $F_{\text{ent}}(W^{AB};m)$ for the harmonic clean model for any $m\le \abs{a_2}$. Let $\Psi^{MM'}$ be the state $A$ and $B$ finally share and $\Upphi^{MM'}$ be the target maximally entangled state. $W^{AB}$ takes the form of a probabilistic mixture of $\sum_{i=1}^3 p_i W_i$ in (\ref{eq:wcm}), so $\Psi^{MM'}=:\sum_{i=1}^3 p_i\Psi^{MM'}_i$ is a probabilistic mixture of the three states $\Psi^{MM'}_i$ established through $W_i$. The function $F(\Psi^{MM'},\Upphi^{MM'})=\bra{\Upphi^{MM'}}\Psi^{MM'}\ket{\Upphi^{MM'}}$ is linear in the density operator $\Psi^{MM'}$, so $F(\Psi^{MM'},\Upphi^{MM'})=\sum_i p_i F(\Psi^{MM'}_i,\Upphi^{MM'})=:\sum_i p_i F_i$.
$W_1$ represents a noiseless channel, so it can achieve $F_1=1$ using a suitable protocol. In contrast, $F_2,F_3\le 1/m^2$. To see this, note from (\ref{eq:w2}) and (\ref{eq:w3}) that both $W_2$ and $W_3$ trace out $a_2$. Therefore the $M'$ part of the initial state $\Upphi^{MM'}$ is always eventually traced out. The final state takes the form $\Psi^{MM'}_i=\pi^{M}\otimes \rho_i^{M'}$ for $i=2,3$, where $\pi^M$ is the maximally mixed state and $\rho_i^{M'}$ are arbitray states. Then for $i=2,3$
\begin{align}
F_i=&\bra{\Upphi^{MM'}}\Psi^{MM'}_i\ket{\Upphi^{MM'}}
\\
=&\bra{\Upphi^{MM'}}\pi^{M}\otimes \rho_i^{M'}\ket{\Upphi^{MM'}}
\\
=&\frac{1}{m^2}\sum_k \bra{k}^{M'}\rho_i^{M'}\ket{k}^{M'}
\\
=&\frac{1}{m^2}\Tr_{M'}\rho_i^{M'}=\frac{1}{m^2}.
\end{align}
The above equation for $i=2,3$ hold for whatever $\rho_i^{M'}$ and hence for whatever protocol. Therefore a protocol is optimal for the entanglement transmission tasks if and only if it achieves $F_1=1$. An example is to input the $M'$ part of $\Upphi^{MM'}$ into $a_2$, trace out $a_1$ and $b_2$, and take $b_1$ directly as $M'$. Since $F_2=1/m^2$ and $F_3=1/m^2$, the the optimal fidelity in (\ref{eq:gef1}) or (\ref{eq:gef2}) for all $m\le \abs{a_2}$ takes the value
\begin{align}\label{eq:tmef}
F_{\text{ent}}(W^{AB};m)=\sum_i p_i F_i=p_1+(p_2+p_3)/m^2=p_1+(1-p_1)/m^2.
\end{align}
The entanglement fidelity (\ref{eq:tmef}), which holds for all $0<m\le \abs{a_2}$, is a monotonically increasing function of $p_1$. For fixed $\epsilon$ we can directly obtain the entanglement transmission capacity $Q_{\text{ent}}(W^{AB};\epsilon)$. A rate $0<m\le \abs{a_2}$ is achievable if and only if $F_{\text{ent}}(W^{AB};m)\ge 1-\epsilon$. Using (\ref{eq:tmef}) this is equivalent to $1-F_{\text{ent}}(W^{AB};m)=(1-p_1)(1-1/m^2)\le \epsilon$. To proceed we need to compare $1-p_1$ and $\epsilon$. If $1-p_1>\epsilon$, a simple calculation reveals that $m\le \abs{a_2}$ is achievable if and only if
\begin{align}\label{eq:mub}
m\le \sqrt{\frac{1}{1-\frac{\epsilon}{1-p_1}}}.
\end{align}
If $1-p_1\le \epsilon$, all $0<m\le \abs{a_2}$ are achievable. The maximum achievable value, $\mathsf{m}$, gives the capacity $Q_{\text{ent}}^\rightarrow(W^{AB};\epsilon)=\log \mathsf{m}$. We have
\begin{align}
Q_{\text{ent}}^\rightarrow(W^{AB};\epsilon)=
\begin{cases}
\log\Big(\max\Big\{m\in \mathbb{N}: m\le \sqrt{\frac{1}{1-\frac{\epsilon}{1-p_1}}}\text{ and }m\le \abs{a_2}\Big\}\Big), & p_1<1-\epsilon,
\\
\log\abs{a_2}, & p_1\ge 1-\epsilon.
\end{cases}
\end{align}
A symmetric proof establishes (\ref{eq:ecl}).
\end{proof}
Clearly $Q_{\text{ent}}^\rightarrow(W^{AB};\epsilon)$ is a non-decreasing functions of $p_1$, which makes intuitive sense because as the amplitude of $A$ in the causal past of $B$ increases we expect a better capacity. For $A$ to $B$ communication, once $p_1$ is fixed the values of $p_2$ and $p_3$ are irrelevant. Note that the capacity also depends on $\abs{a_2}$, which imposes a bound on the maximum achievable value $\mathsf{m}$.
What relations of $\epsilon$ and $p_1$ give the extreme cases of the maximal capacity and the zero capacity? For fixed $\abs{a_2}$, it follows directly from (\ref{eq:ec}) that $Q_{\text{ent}}^\rightarrow(W^{AB};\epsilon)=\log\abs{a_2}$ if and only if \begin{align}
\frac{\epsilon}{1-p_1}\ge 1-\frac{1}{\abs{a_2}^2}.
\end{align}
On the other hand, $Q_{\text{ent}}^\rightarrow(W^{AB};\epsilon)=0$ if and only if
\begin{align}\label{eq:ccz}
p_1<1-\frac{4}{3}\epsilon.
\end{align}
In this case the RHS of (\ref{eq:mub}) is less than $2$, so $\mathsf{m}=1$ and $Q_{\text{ent}}^\rightarrow(W^{AB};\epsilon)=\log\mathsf{m}=0$.
Another important thing is that for any harmonic clean model $W$ with $p_1>0$, there is always an $\epsilon$ that allows it to quantum communicate better than non-signalling resources. For any non-signalling resource $V$, the same calculation for $F_{\text{ent}}$ of $W_2$ or $W_3$ applies, so $F_{\text{ent}}(V;m)=1/m^2$. For any non-trivial $m$ ($m\ge 2$) there is an $\epsilon>0$ such that
\begin{align}
p_1+(1-p_1)/m^2>1-\epsilon>1/m^2.
\end{align}
In particular, we can pick such an $\epsilon_0$ for $m=2$. The second inequality above implies that for this $\epsilon$, $Q_{\text{ent}}(V;\epsilon_0)=0$, and the first inequality above implies through (\ref{eq:tmef}) that $Q_{\text{ent}}(W;\epsilon_0)\ge \log 2$.
All the previous conclusions have a symmetric version for $B$ to $A$ communication.
We move on to study subspace transmission. Let $\Psi$ be the state $B$ finally obtains. Similar to entanglement transmission, we observe that $F(\psi,\Psi)=\bra{\psi}\Psi\ket{\psi}$ is linear in $\Psi$. $\Psi$ in turn is a mixture of $\Psi_i$ established through $W_i$, so $F(\psi,\Psi)=\sum_i p_i F(\psi,\Psi_i)=:\sum_i p_i F_i$.
$W_1$ represents a noiseless channel, so it can achieve $F_1=1$ for any input state $\psi$ by sending it into $a_2$, tracing out $a_1$ and $b_2$, and taking the state that emerges at $b_1$ directly as $\Psi$. In contrast, once $E$ and $D$ are fixed $\Psi_i$ are constant states for $i=2$ and $i=3$, respectively. For these two cases, $\min_{\ket{\psi}} F_i=\min_{\ket{\psi}} \bra{\psi}\Psi_i\ket{\psi} =\lambda_{\text{min}}$, where $\lambda_{\text{min}}$ is the minimum eigenvalue of $\Psi_i$. A protocol that maximizes these values for $i=2,3$ outputs a maximally mixed state so that $\lambda_{\text{min}}=1/m$. Such a protocol always exists because $B$ can always trace out the state he gets and prepares a maximally mixed state, but the protocol may not be unique because $B$ may receive a maximally mixed state directly and hence does not need to re-prepare it.
If $\rho^{b_1}(\alpha_3)\neq \pi^{b_1}$, we cannot obtain a formula for the capacities without knowing $\rho^{b_1}(\alpha_3)$. Consider $F_{\text{min}}(W^{AB};m)$ for $m=\abs{a_2}$. A protocol that maximizes $\min_{\ket{\psi}} F_1$ must ask $B$ to take whatever that he receives at $b_1$ as the final state up to unitaries, because this is the only way that the information from the original state at $A$ reaches him without loss. Yet the same protocol applied to $W_i$ for $i=2,3$ would not maximize $\min_{\ket{\psi}} F_i$, since these would take $\rho^{b_1}(\alpha_3)\neq \pi^{b_1}$ as the final state up to unitaries, and this state must have $\lambda_{\text{min}}<1/m$, which is sub-optimal. The actual optimal protocol for the subspace transmission task will then have to make a compromise between optimizing $i=1$ and $i=2,3$, and the precise form of the protocol will depend on what $\rho^{b_1}(\alpha_3)$ is.
If on the other hand $\rho^{b_1}(\alpha_3)= \pi^{b_1}$, then the same protocol that maximizes $\min_{\ket{\psi}} F_i$ for $i=1$ also maximizes it for $i=2,3$. In fact, this is the optimizing protocol for all $m$ not just $m=\abs{a_2}$. Therefore we have for all $m$, $F_{\text{min}}(W^{AB};m)=p_1+(p_2+p_3)/m=p_1+(1-p_1)/m$. A calculation similar to the one in the proof of Theorem \ref{th:osec} yields the following result.
\begin{theorem}\label{th:ossc}
For $W^{AB}$ in the family of harmonic clean models (\ref{eq:3crps}) with $\rho^{b_1}(\alpha_3)$ equalling the maximally mixed state $\pi^{b_1}$,
\begin{align}\label{eq:sc}
Q_{\text{sub}}^\rightarrow(W^{AB};\epsilon)=
\begin{cases}
\log\Big(\max\Big\{m\in \mathbb{N}: m\le \frac{1}{1-\frac{\epsilon}{1-p_1}}\text{ and }m\le \abs{a_2}\Big\}\Big), & p_1<1-\epsilon,
\\
\log\abs{a_2}, & p_1\ge 1-\epsilon.
\end{cases}
\\\label{eq:scl}
Q_{\text{sub}}^\leftarrow(W^{AB};\epsilon)=
\begin{cases}
\log\Big(\max\Big\{m\in \mathbb{N}: m\le \frac{1}{1-\frac{\epsilon}{1-p_2}}\text{ and }m\le \abs{b_2}\Big\}\Big), & p_2<1-\epsilon,
\\
\log\abs{b_2}, & p_2\ge 1-\epsilon.
\end{cases}
\end{align}
\end{theorem}
\subsection{The reconstruction theorem}
Incidentally, within the family of harmonic clean models the one-shot entanglement transmission capacities determines the causally relevant part of the harmonic clean models in the following sense.
\begin{theorem}[Reconstruction from One-Shot Quantum Capacities]\label{th:etr}
Let $W^{AB}$ be any unknown harmonic clean model. Then the capacities $Q_{\text{ent}}^\rightarrow(W^{AB};\epsilon)$ and $Q_{\text{ent}}^\leftarrow(W^{AB};\epsilon)$ indexed by $\epsilon$ determines $\abs{\alpha}:=(\abs{\alpha_1},\abs{\alpha_2},\abs{\alpha_3})$. If $\abs{\alpha_3}\neq 1$, they also determine the subsystem dimensions.
\end{theorem}
\begin{proof}
By definition, any particular harmonic clean model has $\abs{a_1}=\abs{a_2}=\abs{b_1}=\abs{b_2}$. We first show that if $\abs{\alpha_3}\neq 1$, the quantum capacities determine this dimension. It suffices to show that any pair of $W$ and $W'$ with different subsystem dimensions have a different capacity for some $\epsilon$. Without loss of generality assume that the subsystem dimension for $W$ is larger. By assumption $W$ has $\abs{\alpha_3}\neq 1$. Because $\abs{\alpha_3}\neq 1$ at least one of $p_1$ and $p_2$ is positive. Without loss of generality assume $p_1>0$. By (\ref{eq:ec}) for $\epsilon\ge 1-p_1$, $Q_{\text{ent}}^\rightarrow(W;\epsilon)=\log\abs{a_2}>\log\abs{a_2'}\ge Q_{\text{ent}}^\rightarrow(W';\epsilon)$. Hence the quantum capacities distinguish subsystem dimensions.
We next show that the capacities determine $\abs{\alpha}$. The capacities can tell if $\abs{\alpha_3}=1$, because by Theorem \ref{th:osec} only in this case all the capacities in both directions are zero. In this case $\abs{\alpha_1}=\abs{\alpha_2}=0$, so the capacities determine $\abs{\alpha}$. Next we assume that $\abs{\alpha_3}\neq 1$. We want to show that any pair of $W$ and $W'$ with the same subsystem dimension but different $\abs{\alpha}$ have different capacities for some $\epsilon$. If $W$ and $W'$ are different, then for $j=1$, $j=2$ or both, $\abs{\alpha_j}\neq \abs{\alpha'_j}$. Without loss of generality assume that $\abs{\alpha_1}< \abs{\alpha'_1}$ and hence $p_1< p'_1$. There is an $\epsilon>0$ such that
\begin{align}
\frac{3}{4}(1-p_1)> \epsilon >\frac{3}{4}(1-p'_1).
\end{align}
By (\ref{eq:ccz}), $Q_{\text{ent}}^\rightarrow(W^{AB};\epsilon)=0$, and $Q_{\text{ent}}^\rightarrow(W'^{AB};\epsilon)> 0$. This establishes the theorem.
\end{proof}
This reconstruction theorem is potentially important in that it suggests a way to quantitatively characterize correlations with indefinite causal structure in general operational probabilistic theories beyond quantum models. The process matrices are defined over complex Hilbert spaces. Although correlations with indefinite causal structure had been defined in more general settings (e.g., Hardy's causaloid \cite{hardy2005probability} and Oreshkov and Giarmatzi's general processes \cite{oreshkov2016causal}), their quantitative features have not been studied in detail. The definition of the one-shot entanglement transmission capacities (and that of the one-shot subspace transmission capacities) may be extended to general models. They offer quantitative characterizations of the more general correlations and the reconstruction theorem suggests they may even characterize all the essential aspects of the correlation as far as the causality is concerned. An preliminary question that deserves further study is to what extent the one-shot entanglement transmission capacities can be used to reconstruct general quantum process matrices outside the family of harmonic clean ones.
\section{Quantum causality measures}\label{sec:qcm}
In operational probabilistic theories, signalling is commonly used as \textit{the} criterion for causality. Yet for quantum models (complex Hilbert space quantum operational probabilistic theories, for which ordinary quantum theory and quantum theories with indefinite causal structure are special cases) there are motivations to introduce another criterion for causality. Indeed, quantum and classical information are different types of information, and we know from communication theory that one may be able to transmit classical information without being able to transmit quantum information. The signalling criterion is defined with respect to influencing classical measurement outcomes, so it may be regarded as a causality criterion based on classical information. Is there a causality criterion based on quantum information?
We propose a quantum causality criterion based on the one-shot quantum transmission tasks defined in Section \ref{sec:osqc}. Roughly speaking the criterion says that if a correlation performs any of the one-shot quantum transmission task better than all non-signalling correlations for any error tolerance $\epsilon$, then the correlation can be used to ``quantum signal''. The traditional signalling criterion is weak in the sense that any influence of the measurement outcome probabilities qualifies a correlation to signalling. Similarly, the quantum signalling criterion is weak in the sense that a better-than-non-signalling-resource performance for any one-shot quantum transmission task qualifies a correlation to quantum signalling.
One use of the quantum causality criterion is to distinguish natural models of quantum spacetime which support indefinite causal structure from unnatural ones. The models of quantum spacetime that only support indefinite causal structure according to the signalling criterion are unnatural. When the medium of causal influence is some material such as a telephone line, it is conceivable that the material only allows the transmission of classical but not quantum information. However, for quantum spacetime itself as the medium, it would be very unnatural for two causally connected parties to share correlations that can only send classical but not quantum information. A natural model of quantum spacetime should have indefinite causal structure according to both the signalling and the quantum signalling criteria.
Another use of the quantum signalling criterion is to update the axioms of causality measures to define quantum causality measures that quantify quantum causal strengths (Subsection \ref{subsec:aqcm}). Quantum causality measures have applications, for instance, in quantifying the causal strength of quantum spacetime correlations.
\subsection{Quantum signalling}
Suppose $G$ is the quantum correlation the two parties $A$ and $B$ share. We say that $A$ can \textbf{quantum signal} to $B$ if there exists an error tolerance $\epsilon$ for which they can perform any one-shot quantum transmission task better than the non-signalling resources in the traditional sense. In other words, we say that $A$ can quantum signal to $B$ if there exists $\epsilon>0$ for which $Q^{A\rightarrow B}(G;\epsilon)>\sup_{H\in \mathcal{N}}Q^{A\rightarrow B}(H;\epsilon)$, where $\mathcal{N}$ is the set of non-signalling resources defined on the same systems, and $Q$ is any of the one-shot quantum transmission capacities including the active and passive entanglement transmission capacities and the subspace transmission capacity. We call this the ``quantum signalling criterion''. To distinguish quantum signalling from the traditional notion of signalling, we call the latter ``classical signalling'', because it is defined based on classical observational outcomes.
Quantum signalling is stronger than than classical signalling, because by definition in order to quantum signal the parties must beat all classically non-signalling resources, which implies that they share a resource that is can classically signal.
Classical causal correlations do not allow quantum signalling. Classical correlations break entanglement and coherence. For the entanglement transmission task they can only set up shared separable states (otherwise entanglement may be created by LOCC) but not entangled states. Yet separable states will not have more entanglement fidelity than product states. Let $\sum_i p_i \rho^M_i\otimes \sigma^{M'}_i$ be an arbitrary separable state. Then
\begin{align}
F=\bra{\Upphi^{MM'}}\sum_i p_i \rho^M_i\otimes \sigma^{M'}_i\ket{\Upphi^{MM'}}=&\bra{\Upphi^{MM'}}\sum_i p_i \rho^M_i\otimes \sigma^{M'}_i\ket{\Upphi^{MM'}}
\\
=&\frac{1}{\abs{M}}\Tr_M\Big[\sum_i p_i \rho^M_i \sigma^{M}_i\Big]
\\
\le&\max_i \frac{1}{\abs{M}}\Tr_M\Big[\rho^M_i \sigma^{M}_i\Big]
\\
=&\max_i \frac{1}{\abs{M}}\bra{\Upphi^{MM'}} \rho^M_i\otimes \sigma^{M'}_i\ket{\Upphi^{MM'}}.
\end{align}
Therefore the separable state does not have greater entanglement fidelity than the product state $\rho^M_i\otimes \sigma^{M'}_i$ for some $i$. Because any product state can be created established without signalling, classical correlations do not perform better than the classically non-signalling resources for entanglement transmission. For the subspace transmission task, note that even the most effective classical causal correlation, the classical identity channel, cannot achieve a greater minimum output fidelity than classically non-signalling correlations. Suppose the classical identity channel projects onto the $\{\ket{i}\}_{i=1}^d$ basis. The most effective encodings and decodings are unitaries. Without loss of generality assume they are the quantum identity channels. The worst case scenario for the minimum output fidelity is with the input state $\ket{\psi}=\sum_{i=1}^d \frac{1}{\sqrt{d}} \ket{i}$. Then $\Psi=\frac{1}{d}\id$ and $F=\bra{\psi}\Psi\ket{\psi}=1/d$. This minimum output fidelity can be matched if $A$ and $B$ share a classically non-signalling correlation and for the transmission $B$ traces out whatever he receives and outputs the maximally mixed state. Therefore classical correlations do not perform better than the classically non-signalling resources for subspace transmission either and hence they cannot quantum signal.
We note that entanglement \textit{generation} capacities do not count as \textit{transmission} capacities, because as mentioned in Subsection \ref{subsec:etc} a correlation (e.g., an entangled state) that does not allow the transmission of quantum information may have a positive entanglement generation capacity. This situation contrasts that with the asymptotic capacities for quantum channels, for which the three capacities of entanglement transmission, subspace transmission, and entanglement generation agree. One reason for the difference is that restricted to channels nothing can generate entanglement without being able to transmit entanglement. On the other hand, correlations with indefinite causal structure such as process matrices contain entangled states as special cases, but these can generate entanglement without being able to transmit entanglement. The inclusion of such correlations break the ``degeneracy'' of the three capacities.
Another reason for the difference is that quantum signalling is defined using one-shot capacities rather than asymptotic ones. For quantum channels there is an inequality that relates the entanglement transmission and subspace transmission capacities \cite{buscemi2010quantum}:
\begin{align}\label{eq:oscieq}
Q_{\text{ent}}(N;\epsilon)-1\le Q_{\text{sub}}(N;2\epsilon)\le Q_{\text{ent}}(N;4\epsilon),
\end{align}
which shows that the two capacities are closely related. However, it does not set up an equivalence of the two capacities. Neither can it be used to pick one out of the two capacities to define quantum signalling to yield a weaker quantum signalling criterion than with the other one. The incomparability of the one-shot quantum capacities leaves us with the need to check each type of capacity to qualify quantum signalling.
\subsection{Axioms for quantum causality measures}\label{subsec:aqcm}
A \textbf{quantum causality measure} $\mu^{A\rightarrow B}(G)$ on local parties $A$ and $B$ sharing correlation $G$ is a real-valued function obeying the following axioms:
\begin{enumerate}
\item $\mu^{A\rightarrow B}(G)$ is non-increasing under local operations within $A$ and $B$.
\item $\mu^{A\rightarrow B}(G) \ge 0$.
\item $\mu^{A\rightarrow B}(G) > 0$ only if $A$ can quantum signal to $B$ using $G$.
\end{enumerate}
A \textbf{normalized quantum causality measure} further obeys $\sup_R \mu^{A\rightarrow B}(G)=1$ so that $0\le \mu^{A\rightarrow B}(G)\le 1$ for all $G$. The quantum causality measure $\mu^{A\leftarrow B}(G)$ in the opposite direction is defined similarly except that it obeys Axiom 3 with $A$ and $B$ swapped.
In comparison to causality measure axioms, the only difference is that in axiom 3 ``quantum signal'' is used in place of ``signal''.
\subsection{Examples of quantum causality measures}
\begin{itemize}
\item The zero measure.
\begin{align}
\mu_{\text{zero}}^{A\rightarrow B}(G)=0 \quad \text{for all }G.
\end{align}
This function trivially obey all the three axioms. It is a causality measure, a quantum causality measure, and an entanglement measure.
\item The quantum signalling measure.
\begin{align}
\mu_{\text{qsg}}^{A\rightarrow B}(G)=
\begin{cases}
1, \quad \text{A can quantum signal to B}
\\
0, \quad \text{A cannot quantum signal to B}.
\end{cases}
\end{align}
This function clearly obeys axioms 1 to 3 and is a quantum causality measure. It is also a normalized measure.
\item For quantum channels the quantum channel capacities are quantum causality measures, as one can easily check. Their normalization as in (\ref{eq:ncc}) are normalized quantum causality measures that assign the value one to noiseless channels.
\item For arbitrary correlations that may or may not contain indefinite causal structure, the one-shot entanglement transmission and subspace transmission capacities defined and studied in previous sections are quantum causality measures. Axioms 1 to 3 hold for these capacities directly by their definitions.
Definitions similar to (\ref{eq:ncm}) yield normalized capacities that assign the value one to the maximally causal correlations such as the identity channel:
\begin{align}
Q_{\text{norm}}(G;\epsilon):=\frac{Q(G;\epsilon)}{\sup_{G'\in \mathfrak{C}(G)}Q(G';\epsilon)},
\end{align}
where $\mathfrak{C}(G)$ is the set of correlations on the same systems of $G$, and $Q$ stands for any of the one-shot quantum capacities.
\end{itemize}
\section{Discussion}\label{sec:dis}
The present work is partially inspired by the previous work of Fitzsimons Jones and Vedral (FJV) who introduced ``causality monotones'' for ``pseudo-density matrices'' as a measure of causality \cite{fitzsimons2015quantum}. Pseudo-density matrices as they stand in \cite{fitzsimons2015quantum} are generalization of density matrices and describe qubit systems that reside at different times. Given a pseudo-density matrix $R$, a causality monotone $f(R)$ is required to obey
\begin{enumerate}
\item $f(R) \ge 0$, with $f(R) = 0$ if $R$ is completely positive, and $f(R) = 1$ for any $R$ obtained from two consecutive measurements on a single qubit closed system.
\item $f(R)$ is invariant under unitary operations.
\item $f(R)$ is non-increasing under local operations.
\item $\sum_i p_i f(R_i) \ge f(\sum_i p_iR_i)$.
\end{enumerate}
There are some obvious similarities and important differences between the FJV axioms for causality monotones and the axioms for (quantum) causality measures. The biggest difference is that the (quantum) causality measure axioms apply to general models, whereas the causality monotone axioms apply specifically to pseudo-density matrices. Pseudo-density matrices have some limitations which makes other frameworks more preferable to study quantum theory with generalized states. In particular, in more general models such as the process matrices systems can have arbitrary finite dimensions, measurements are not restricted to projective ones, and measurement update rules are more flexible.
In terms of the content of the axioms, the $f(R)\ge 0$ part of the first axiom of FJV is the same as Axiom 2 for (quantum) causality measures. The rest of FJV Axiom 1 depend on the specific construction of pseudo-density matrices and do not have general applicability. For general correlations Axioms 2 of FJV would have an analogue saying that $\mu^{A\rightarrow B}(G)$ is invariant under local unitary operations within $A$ and within $B$. Yet this would actually follows from causality measure Axiom 2 because local unitary operations are reversible. Suppose for some local unitary $U$, $\mu^{A\rightarrow B}(U(G))<\mu^{A\rightarrow B}(G)$. Then by causality measure Axiom 2, $\mu^{A\rightarrow B}(G)=\mu^{A\rightarrow B}(U^{-1}\circ U(G))<\mu^{A\rightarrow B}(U(G))<\mu^{A\rightarrow B}(G)$, which is a contradiction. By causality measure Axiom 2, $U$ cannot increase $\mu^{A\rightarrow B}(G)$ either. Hence any unitary operation must keep $\mu^{A\rightarrow B}(G)$ constant. Axiom 3 of FJV is the same as Axiom 1 for (quantum) causality measures. Axiom 4 of FJV expresses the convexity of causality measures and as already discussed in Subsection \ref{subsec:a} is too stringent because it would rule out communication capacities as causality measures.
Another significant difference is each particular (quantum) causality measure assigns two functions $\mu^{A\rightarrow B}$ and $\mu^{B\rightarrow A}$ to a pair of parties $A$ and $B$, while each particular causality monotone assigns only one function. With definite causal structure, there can only be causal influence for at most one direction, so one of the two measures $\mu^{A\rightarrow B}$ and $\mu^{B\rightarrow A}$ is constantly zero and is redundant. In this case it is reasonable to have one function as causal monotones do. However, when there is indefinite causal structure both $\mu^{A\rightarrow B}$ and $\mu^{B\rightarrow A}$ are relevant.
As defined, the FJV causality monotones are not restricted to two parties, although the explicit examples studied in \cite{fitzsimons2015quantum} are bipartite pseudo-density matrices. The causality measures as defined in this work only apply to two parties. In the multipartite case it remains to be investigated what the FJV causality monotones measure, and how the causality measures generalize.
Janzing, Balduzzi, Grosse-Wentrup and Sch{\"o}kopf (JBGWS) proposed a set of five postulates for measures of causal strength \cite{janzing2013quantifying}. The authors mention that although they regard them as natural postulates, they ``do not claim that every reasonable measure of causal strength should satisfy these postulates''. In contrast, the axioms we propose in this paper are intended to hold for all measures of causality. A somewhat restrictive postulate of JBGWS is the ``mutual information'' postulate, which says that ``if the true causal DAG reads $X \rightarrow Y$, then $\mathfrak{C}_{X\rightarrow Y} =I(X;Y)$. Here $X$ and $Y$ are nodes of a DAG, $X \rightarrow Y$ means that $X$ influences $Y$ directly -- $X$ changes the probability distribution of $Y$ even if all other variables are held constant, $\mathfrak{C}_{X\rightarrow Y}$ is the causal strength measure for $X$ causally influencing $Y$, and $I(X;Y)$ is the classical mutual information. The mutual information postulate sets classical mutual information as the default causal strength measure when the condition of the postulate is met. In our view this postulate cannot be imposed on general causality measures because it excludes other useful causality measures such as the quantum capacities. Another restriction of the JBGWS postulates is that as stated they apply to causal models based on DAGs. As mentioned in Section \ref{sec:cm}, the causality measure axioms of this paper do not require the models to be based on DAGs, and hence apply to more general models such as the causaloid models \cite{hardy2005probability} which are naturally associated with hypergraphs rather than graphs.
An advantage of the JBGWS postulates is that they take into consideration the possible variation of variables outside of the nodes associated with the causal arrows. In the present paper we assumed that the correlations $G$ are bipartite, so that there are no degree of freedom left in parties other than $A$ and $B$. Each $G$ is understood as a conditional correlation $G(v_C,v_D,\cdots)$ that arises when all other parties $C,D,\cdots$ have already conducted their operations and observed certain outcomes, with these parameters denoted as $v_C,v_D\cdots$ for $G$ to depend on. A more general study of causality measures that allows these parameters to vary would be a study of multipartite causality measures which also generalize the party $A$ exerting the causal influence and the party $B$ receiving the causal influence to multiple parties. This general study is left for future work.
\section{Conclusion}\label{sec:c}
In this paper we proposed three axioms for all reasonable quantitative measures of causal strength to obey. The axioms apply to any theory which contains the concepts of correlations that mediate the causal influence, and local operations that change the correlations in order to exert the causal influence. In particular, the axioms apply to theories with indefinite causal structure. The canonical examples of causality measures the various notions of information transmission capacities. For quantum models, we defined and studied the one-shot entanglement transmission and subspace transmission capacities as causality measures in detail. These causality measures in turn allow us to define the notions of quantum signalling and quantum causality measures for quantum models such that correlations that cannot transmit quantum information have zero quantum causality measures.
Some natural questions arise from the present work that deserve further investigation. We studied causality measures for two parties. An obvious question is how to generalize the study to multiple parties. Another question concerns the use of one-shot quantum capacities to define quantum signalling. The motivation is to find the weakest criterion of quantum signalling that makes sense, and one-shot capacities which tolerate some errors yield a weaker criterion than capacities that do not tolerate any error (such as asymptotic capacities). Although it is reasonable to use quantum capacity to qualify quantum signalling and the one-shot quantum capacities give the weakest criterion among all standard quantum capacities, one needs to keep an open mind on finding still weaker criterion. In terms of the relation between causality measures and entanglement measures, we pointed out in Section \ref{sec:cm} that in frameworks where causality and entanglement measures can be defined, causality measures are special cases of entanglement measures that obey Axiom 3 in the LOCC setting without classical communication. Because of this connection it is possible to harness knowledge of entanglement measures and apply it to causality measures. Finally, we proved an interesting reconstruction theorem for the family of harmonic clean quantum models of indefinite causal structure that allows one to reconstruct the causally relevant parameters of the models from the one-shot entanglement transmission capacities. It is worth investigating further to what extent the one-shot capacities characterize general models of indefinite causal structure.
\end{document}
|
\begin{document}
\baselineskip=1.8pc
\begin{center}
{\bf
A High Order Multi-Dimensional Characteristic Tracing Strategy for
the Vlasov-Poisson System
}
\end{center}
\centerline{
Jing-Mei
Qiu\fracootnote{Department of Mathematics, University of Houston,
Houston, 77004, USA. E-mail: [email protected]. Research supported by
Air Force Office of Scientific Computing YIP grant FA9550-12-0318, NSF grant DMS-1217008 and DMS-1522777.},
Giovanni Russo \fracootnote{Department of Mathematics and Informatics, University of Catania, Catania, 95125, Italy. Email: [email protected].
Research supported by ITN-ETN Marie Curie program 642768.
}
}
\centerline{\bf Abstract}
In this paper, we consider a finite difference grid-based semi-Lagrangian approach in solving the Vlasov-Poisson (VP) system.
Many of existing methods are based on dimensional splitting, which decouples the problem into solving linear advection problems, see {\em Cheng and Knorr, Journal of Computational Physics, 22(1976)}.
However, such splitting is subject to the splitting error. If we consider multi-dimensional problems without splitting, difficulty arises in tracing characteristics with high order accuracy. Specifically, the evolution of characteristics is subject to the electric field which is determined globally from the distribution of particle densities via the Poisson's equation. In this paper, we propose a novel strategy of tracing characteristics high order in time via a two-stage multi-derivative prediction-correction approach and by using moment equations of the VP system. With the foot of characteristics being accurately located, we proposed to use weighted essentially non-oscillatory (WENO) interpolation to recover function values between grid points, therefore to update solutions at the next time level. The proposed algorithm does not have time step restriction as Eulerian approach and enjoys high order spatial and temporal accuracy. However, such finite difference algorithm does not enjoy mass conservation; we discuss one possible way of resolving such issue and its potential challenge in numerical stability.
The performance of the proposed schemes are numerically demonstrated via classical test problems such as Landau damping and two stream instabilities.
\noindent {\bf Keywords:}
Semi-Lagrangian; Vlasov-Poisson system; Characteristics; High order; WENO
\section{Introduction}
\label{sec1}
\setcounter{equation}{0}
\setcounter{figure}{0}
\setcounter{table}{0}
This paper focuses on a high order truly multi-dimensional semi-Lagrangian (SL) approach for the Vlasov-Poisson
(VP) simulations. Arising from collisionless plasma applications, the VP system,
\begin{equation}
\fracrac{\partial f}{\partial t} + {\bf v} \cdot \nabla_{\bf x} f +
\mathbf{E}({\bf x},t) \cdot \nabla_{\bf v} f = 0, \label{eq: vlasov}
\end{equation}
and
\begin{equation}
\mathbf{E}(\mathbf{x},t)=-\nabla_{\bf x}\phi(\mathbf{x},t),\quad
-\Delta_{\bf
x}\phi(\mathbf{x},t)=\rho(\mathbf{x},t)-1,\label{eq: poisson}
\end{equation}
describes the temporal evolution of the particle distribution function in six dimensional phase space. $f( {\bf x},{\bf v},t)$ is
probability distribution function which describes the probability of
finding a particle with velocity $\bf{v}$ at position $\bf{x}$ at
time $t$, $\bf{E}$ is the electric field, and $\phi$ is the
self-consistent electrostatic potential. The probability
distribution function couples to the long range fields via the
charge density, $\rho(t,x) = \int_{\mathbb{R}^3} f(x,v,t)dv$,
where we take the limit of uniformly distributed infinitely massive
ions in the background. In this paper, we consider the VP system with
1-D in ${\bf x}$ and 1-D in ${\bf v}$.
Many different approaches have been proposed for the VP simulations.
There are the Lagrangian particle-in-cell (PIC) methods, which have been very popular
in practical high dimensional simulations due to its relatively low computational cost
\cite{friedman1991multi, jacobs2009implicit, heikkinen2008full}. However, the Lagrangian particle approach is known to suffer
the statistical noise which is of order $1/\sqrt{N}$, where $N$ is the number of particles in a simulation.
There are very high order Eulerian finite difference \cite{zhou2001numerical}, finite volume \cite{banks2010new}, finite element discontinuous Galerkin method
\cite{heath2010discontinuous, cheng2011positivity}. Eulerian methods can be designed to be highly accurate in both space and in time, thus being able to resolve complicated solution structures in a more efficient manner by using a set of relatively coarse numerical mesh. However, they are subject to CFL time step restrictions.
There are the dimensional split SL approach originally proposed in
\cite{cheng}, and further developed in the finite volume \cite{FilbetSB, sonnendruecker, begue1999two, besse2003semi, crouseilles2010conservative},
finite difference \cite{carrillo2007nim, Qiu_Christlieb, Qiu_Shu2}, finite element discontinuous Galerkin framework \cite{Qiu_Shu_DG,rossmanith2011positivity} and a hybrid finite different-finite element framework \cite{Guo_Qiu}. The semi-Lagrangian framework allows for extra large numerical time steps compared with Eulerian approach, leading to some savings in computational cost. The dimensional splitting allows for a very simple implementation procedure for tracing characteristics; however it causes a second order operator splitting error in time. For convergence estimate for the semi-Lagrangian methods for the VP simulations, we refer to \cite{charles2013enhanced}. If the splitting is not performed properly, numerically instabilities are observed \cite{huot2003instability}.
In \cite{Guo_Qiu2}, an integral deferred correction method is proposed for the dimensional split SL approach to reduce the splitting error.
In this paper, we proposes a high order truly multi-dimensional SL finite difference approach for solving the VP system. The `truly multi-dimensional' means that no operator splitting is involved. The difficulty is the tracing of characteristics with high order temporal accuracy in a time step. Especially the evolution of characteristics is due to the electric field induced by the unknown particle distribution function $f$ in the Vlasov equation \eqref{eq: vlasov}. A high order two-stage multi-derivative predictor-corrector algorithm is proposed to build up a high order characteristic-tracing algorithm based on lower order ones, with the help of moment equations of the VP system. A high order WENO interpolation is proposed to recover information among grid points. The proposed algorithm is of high order accuracy in both space and in time. However, there is no mass conservation. We discuss such issues as well as the computational cost of the proposed algorithm.
The paper is organized as follows. Section~\ref{sec2} describes the high order SL finite difference approach without operator splitting. High order way of tracing characteristics are proposed and analyzed. Issues related to computational cost and mass conservation are discussed.
Section~\ref{sec4} presents numerical simulation results. Finally, the conclusion is given in Section~\ref{sec5}.
\section{Truly multi-dimensional SL algorithm.}
\label{sec2}
\setcounter{equation}{0}
\setcounter{figure}{0}
\setcounter{table}{0}
\subsection{Algorithm framework}
\label{sec2.1}
Our goal is to design a high order SL finite difference scheme for the VP system without operator splitting.
Consider the VP system \eqref{eq: vlasov} with 1-D in $x$ and 1-D $v$. The 2-D ${x-v}$ plane is discretized into uniformly spaces rectangular meshes,
\[
x_{\fracrac12} < x_{1+\fracrac12} < \cdots < x_{i+\fracrac12}<\cdots < x_{n_x+ \fracrac12},
\]
\[
v_{\fracrac12} < v_{1+\fracrac12} < \cdots < v_{j+\fracrac12}<\cdots < v_{n_v+ \fracrac12}.
\]
The center of each of the rectangular cell $[x_{i-\fracrac12}, x_{i+\fracrac12}] \times [v_{j-\fracrac12}, v_{j+\fracrac12}]$ is denoted as $(x_i, v_j)$.
We consider evolving the numerical solution $f^n_{i, j}$, $i = 1, \cdots n_x, j =1, \cdots, n_v$, where $f^n_{i, j}$
denotes the numerical solution at $(x_i, v_j)$ at the time level $t^n$.
The proposed SL algorithm in updating the solution $f^{n+1}_{ij}$ consists of the following steps.
\begin{enumerate}
\item Characteristics are traced backward in time to $t^n$. Let the foot of the characteristic at the time level $t^n$ emanating from $(x_i, v_j)$ at $t^{n+1}$ be denoted as $(x^{\star}_i, v^{\star}_j)$. It is approximated by numerically solving the following final value problem
\begin{equation}
\label{eq: char}
\left \{
\begin{array}{l}
\fracrac{d{x}(t)}{dt} = {v(t)}, \\[2mm]
\fracrac{d{v(t)}}{dt} = {E(x(t), t)},\\[2mm]
x(t^{n+1}) = x_i, \\[2mm]
v(t^{n+1}) = v_j.
\end{array}
\right.
\end{equation}
Here, we remark that solving \eqref{eq: char} with high order temporal accuracy is non-trivial. Especially,
the electric field ${\bf E}$ depends on the unknown function $f$ via the Poisson's equation \eqref{eq: poisson} in a global rather than local fashion. Moreover, being a final value problem, the electrical field $E$ is known initially only at the time step $t^n$.
In Section~\ref{sec2.2}, we discuss the proposed high order (up to third order) way of tracing characteristics in time.
\item The solution is updated as
\begin{equation}
\label{eq: update}
f^{n+1}_{i, j} = f(x^{n, (l)}_i, v^{n, (l)}_j, t^n) \approx f(x^{\star}_i, v^{\star}_j, t^n).
\end{equation}
We propose to recover $f(x^{n, (l)}_i, v^{n, (l)}_j, t^n)$ by a high order (up to sixth order) WENO interpolation from $f^n_{i, j}$, $i = 1, \cdots n_x, j =1, \cdots, n_v$. The procedures are discussed in
Section~\ref{sec2.3}.
\end{enumerate}
\subsection{Tracing characteristics with high order temporal accuracy}
\label{sec2.2}
It is numerically challenging to design a one-step method to locate the foot of characteristics with high order accuracy in time.
The electric field $E$ is not explicitly unknown; it is induced by the unknown function $f$ via the Poisson's equation \eqref{eq: poisson}.
Since it is difficult to evaluate the electric field $E$ (r.h.s. of equation \eqref{eq: char}) for some intermedia time stages between $[t^n, t^{n+1}]$, Runge-Kutta methods can't be used directly.
Below we describe our proposed predictor-corrector procedure for locating the foot of characteristics. We will first describe a first order scheme in tracing characteristics; the second scheme is built upon the first order prediction; and the proposed third order scheme is built upon the second order prediction.
In our notations, the superscript $^n$ denotes the time level, the subscript $i$ and $j$ denote the location $x_i$ and $v_j$ in $x$ and $v$ directions respectively, the superscript $^{(l)}$ denotes the formal order of approximation. For example, in equation \eqref{eq: x_v_1} below,
$x^{n, (1)}_{i}$ (or $v^{n, (1)}_{j}$) approximates $x_i^\star$ (or $v_j^\star$) with first order,
and $E^n_i = E(x_i, t^n)$. $\fracrac{d}{dt} = \fracrac{\partial}{\partial t} + \fracrac{\partial x}{\partial t}\fracrac{\partial}{\partial x}$ denotes the material derivatives along characteristics.
The order of approximation we mentioned in this subsection is for temporal accuracy.
We propose to use a spectrally accurate fast Fourier transform (FFT) in solving the Poisson's equation \eqref{eq: poisson}, whose r.h.s. function $\rho(x, t) = \int f(x, v, t)dv$ is evaluated by a mid-point rule numercally. The mid point rule is of spectral accuracy given the function being integrated is either periodic or compactly supported \cite{boyd2001caf}.
\noindent
\underline{\em \bf First order scheme.} We let
\begin{equation}
\label{eq: x_v_1}
x^{n, (1)}_i = x_i - v_j \Delta t; \quad v^{n, (1)}_j = v_j - E^n_i \Delta t,
\end{equation}
which are first order approximations to $x_i^\star$ and $v_j^\star$, see Proposition~\ref{prop: order1} below.
Let
\begin{equation}
\label{eq: f_1}
f^{n+1, (1)}_{i, j} = f(x^{n, (1)}_i, v^{n, (1)}_j, t^n),
\end{equation}
which is a first order in time approximation to $f^{n+1}_{i, j}$.
Note that the spatial approximation in equation \eqref{eq: f_1} (and in other similar equations in this subsection) is performed via high order WENO interpolation discussed in Section~\ref{sec2.3}.
Based on $\{f^{n+1, (1)}_{i, j}\}$, we computed
\[
\rho^{n+1, (1)}_i, \quad E^{n+1, (1)}_i
\]
by using a mid-point rule and FFT based on the Poisson's equation \eqref{eq: poisson}.
Note that $\rho^{n+1, (1)}_i$ and $E^{n+1, (1)}_i$ also approximate $\rho^{n+1}_i$ and $E^{n+1}_i$ with first order temporal accuracy.
\begin{prop}
\label{prop: order1}
$x^{n, (1)}_i$ and $v^{n, (1)}_j$ constructed in equation \eqref{eq: x_v_1} are first order approximations to $x_i^\star$ and $v_j^\star$ in time.
\end{prop}
\noindent
{\em Proof.}
By Taylor expansion,
\begin{equation}a
x_i^\star &=& x_i - \fracrac{d x_i}{dt}(x_i, v_j, {t^{n+1}}) \Delta t + \mathcal{O}(\Delta t^2) \nonumber\\
&=& x_i - v_j \Delta t + \mathcal{O}(\Delta t^2) \nonumber\\
&\stackrel{\eqref{eq: x_v_1}}{=}& x^{n, (1)}_i + \mathcal{O}(\Delta t^2), \nonumber
\end{equation}a
\begin{equation}a
v_j^\star &=& v_j - \fracrac{d v_j}{dt}|_{t^{n+1}} \Delta t + \mathcal{O}(\Delta t^2) \nonumber\\
&=& v_j - E^{n+1}_i \Delta t + \mathcal{O}(\Delta t^2) \nonumber\\
&=& v_j - (E^{n}_i + \mathcal{O}(\Delta t)) \Delta t + \mathcal{O}(\Delta t^2) \nonumber\\
&\stackrel{\eqref{eq: x_v_1}}{=} & v^{n, (1)}_j + \mathcal{O}(\Delta t^2). \nonumber
\end{equation}a
Hence $x^{n, (1)}_i$ and $v^{n, (1)}_j$ are second order approximations to $x_i^\star$ and $v_j^\star$ locally in time for a time step; the approximation is of first order in time globally. We remark that the proposed first order scheme is similar to, but different from, the standard forward Euler or backward Euler integrator. It is specially tailored to the system \eqref{eq: char}.
$\mbox{ }\rule[0pt]{1.5ex}{1.5ex}$
\noindent
\underline{\em \bf Second order scheme.} We let
\begin{equation}
\label{eq: x_v_2}
x^{n, (2)}_i = x_i - \fracrac12 (v_j + v_j^{n, (1)}) \Delta t,
\quad
v^{n, (2)}_j = v_j - \fracrac12 (E(x_i^{n, (1)}, t^n) + E^{n+1, (1)}_i)\Delta t,
\end{equation}
which are second order approximations to $x_i^\star$ and $v_j^\star$, see Proposition~\ref{prop: order2} below.
Note that $E(x_i^{n, (1)}, t^n)$ in equation \eqref{eq: x_v_2} can be approximated by WENO interpolation from $\{E^n_i\}_{i=1}^{n_x}$.
Let
$
f^{n+1, (2)}_{i, j} = f(x^{n, (2)}_i, v^{n, (2)}_j, t^n),
$
approximating $f^{n+1}_{i, j}$ with second order in time.
Based on $\{f^{n+1, (2)}_{i, j}\}$, we computed
$
\rho^{n+1, (2)}_i, \quad E^{n+1, (2)}_i
$
approximating $\rho^{n+1}_i$ and $E^{n+1}_i$ with second order temporal accuracy.
\begin{prop}
\label{prop: order2}
$x^{n, (2)}_i$ and $v^{n, (2)}_j$ constructed in equation \eqref{eq: x_v_2} are second order approximations to $x_i^\star$ and $v_j^\star$ in time.
\end{prop}
\noindent
{\em Proof.}
It can be checked by Taylor expansion
\begin{equation}a
x_i^\star &=& x_i - \left(\fracrac{d x}{dt}(x_i, v_j, {t^{n+1}}) + \fracrac{d x}{dt}(x_i^\star, v_j^\star, t^{n})\right) \fracrac{\Delta t}{2} + \mathcal{O}(\Delta t^3) \nonumber\\
&=& x_i - \left(v_j^\star + v_j \right) \fracrac{\Delta t}{2} + \mathcal{O}(\Delta t^3) \nonumber\\
&\stackrel{Prop. \ref{prop: order1}}{=}& x_i - \left(v_j^{n, (1)} + \mathcal{O}(\Delta t^2) + v_j \right) \fracrac{\Delta t}{2} + \mathcal{O}(\Delta t^3) \nonumber\\
&=& x_i - \left(v_j^{n, (1)} + v_j \right) \fracrac{\Delta t}{2} + \mathcal{O}(\Delta t^3) \nonumber\\
&\stackrel{\eqref{eq: x_v_2}}{=}& x^{n, (2)}_i + \mathcal{O}(\Delta t^3). \nonumber
\end{equation}a
Similarly,
\begin{equation}a
v_j^\star &=& v_j - \left(E^{n+1}_i + E(x_i^\star, t^n)\right) \fracrac{\Delta t}{2} + \mathcal{O}(\Delta t^3) \nonumber\\
&\stackrel{Prop. \ref{prop: order1}}{=}& v_j - \left(E^{n+1, (1)}_i + E(x^{n, (1)}_i, t^n) + \mathcal{O}(\Delta t^2)\right)\fracrac{\Delta t}2 + \mathcal{O}(\Delta t^3) \nonumber\\
&\stackrel{\eqref{eq: x_v_2}}{=} & v^{n, (2)}_j + \mathcal{O}(\Delta t^3). \nonumber
\end{equation}a
Hence $x^{n, (2)}_i$ and $v^{n, (2)}_j$ are third order approximations to $x_i^\star$ and $v_j^\star$ locally in time for a time step; the approximation is of second order in time globally. Again the proposed second order scheme tailored to the system \eqref{eq: char} is similar to, but slightly different from, the second order Runge-Kutta integrator based on the trapezoid rule.
$\mbox{ }\rule[0pt]{1.5ex}{1.5ex}$
\noindent
\underline{\em \bf Third order scheme.} We let
\begin{equation}
\label{eq: x_3}
x^{n, (3)}_i = x_i - v_j \Delta t + \fracrac{\Delta t^2}2 (\fracrac23 E^{n+1, (2)}_i + \fracrac13 E(x_i^{n, (2)}, t^n)),
\end{equation}
\begin{eqnarray}
\label{eq: v_3}
v^{n, (3)}_j &=& v_j - E^{n+1, (2)}_i \Delta t + \fracrac{\Delta t^2}2 \left(
\fracrac23 (\fracrac{d}{dt}E(x_i, t^{n+1}))^{(2)} + \fracrac13 \fracrac{d}{dt}E(x_i^{n, (2)}, t^n)
\right),
\end{eqnarray}
which are third order approximations to $x_i^\star$ and $v_j^\star$, see Proposition~\ref{prop: order3} below.
Note that $\fracrac{d}{dt}E$ terms on the r.h.s. of equation \eqref{eq: v_3} will be obtained by using the macro-equations described below.
Let
$
f^{n+1, (3)}_{i, j} = f(x^{n, (3)}_i, v^{n, (3)}_j, t^n),
$
approximating $f^{n+1}_{i, j}$ with third order in time.
Based on $\{f^{n+1, (3)}_{i, j}\}$, we computed
$
\rho^{n+1, (3)}_i, \quad E^{n+1, (3)}_i
$
approximating $\rho^{n+1}_i$ and $E^{n+1}_i$ with third order temporal accuracy.
\begin{rem}We note that the mechanism to build this third order scheme is different from Runge-Kutta methods where intermedia stage solutions are constructed. It has some similarity in spirit to the Taylor-series (Lax-Wendroff type) method, where higher order time derivatives are recursively transformed into spatial derivatives. The difference with the Lax-Wendroff type time integration is: Lax-Wendroff method only uses spatial derivatives at one time level, while the proposed method used the spatial derivatives (or its high order approximations) at both $t^n$ and $t^{n+1}$ via a predictor-corrector procedure. In a sense, the proposed method is a two-stage multi-derivative method.
\end{rem}
With $ \fracrac{\partial E}{\partial x} = \rho-1$ from the Poisson's equation \eqref{eq: poisson}, to compute the Lagrangian time derivative along characteristics $\fracrac{d}{dt}E = \fracrac{\partial}{\partial t} + v \fracrac{\partial}{\partial x}$, we only need to numerically approximate $\fracrac{\partial E}{\partial t}$. Notice that if we integrate the Vlasov equation \eqref{eq: vlasov} over $v$, we have
\begin{equation}
\label{eq: moment0}
\rho_t + J_x = 0,
\end{equation}
where $\rho(x, t)$ is the charge density and $J(x, t) = \int f v dv$ is the current density. With the Poisson's equation \eqref{eq: poisson}, and from eq.~\eqref{eq: moment0}, we have
$
\fracrac{\partial}{\partial x} (E_t + J) =0,
$
that is $E_t + J$ is independent of the spatial variable $x$. Thus
\[
E_t + J = \fracrac1L \int (E_t + J(x, t)) dx = \fracrac1L \int J(x, t) dx,
\]
the last equality above is due to the periodic boundary condition of the problem.
It can be shown, by multiplying the Vlasov equation \eqref{eq: vlasov} by $v$ and performing integration in both $x$- and $v$- directions, that
\[
\fracrac{\partial}{\partial t} \int J(x, t) dx = 0,
\]
therefore
\begin{equation}
\fracrac{\partial}{\partial t} E(x, t) + J = \fracrac1L \int j(x, t=0) dx \doteq \bar{J^0}, \nonumber
\end{equation}
where $\bar{\cdot}$ denotes one's spatial average.
Hence,
\begin{equation}
\label{eq: dt_E}
\fracrac{d}{dt} E = (\fracrac{\partial}{\partial t} + v \fracrac{\partial}{\partial x}) E = \bar{J^0} - J(x, t) + v (\rho-1).
\end{equation}
Specifically, in equation \eqref{eq: v_3}
\begin{equation}a
(\fracrac{d}{dt}E(x_i, t^{n+1}))^{(2)} &=& \bar{J^0} - J^{n+1, (2)}_i + v_j (\rho^{n+1, (2)}_i-1), \nonumber\\
\fracrac{d}{dt}E(x_i^{n, (2)}, t^n) &=& \bar {J^0} - J (x_i^{n, (2)}, t^n) + v_j^{n, (2)} (\rho(x_i^{n, (2)}, t^n)-1). \nonumber
\end{equation}a
Note that $J^{n+1, (2)}_i$ and $J^{n}_i$ can be evaluated by mid-point rule from $\{f^{n+1, (2)}_{i, j} \}$ and $\{f^{n}_{i, j} \}$ respectively with spectral accuracy in space;
while $J (x_i^{n, (2)}, t^n)$ can be numerically approximated by WENO interpolation from $J^n_i$.
\begin{prop}
\label{prop: order3}
$x^{n, (3)}_i$ and $v^{n, (3)}_j$ constructed in equation \eqref{eq: x_3}-\eqref{eq: v_3} are third order approximations to $x_i^\star$ and $v_j^\star$ in time.
\end{prop}
\noindent
{\em Proof.}
It can be checked by Taylor expansion
\begin{equation}a
x_i^\star &=& x_i - \fracrac{d x}{dt}(x_i, v_j, {t^{n+1}}) {\Delta t} + \left(\fracrac23 \fracrac{d^2 x_i}{dt^2}(x_i, v_j, {t^{n+1}}) + \fracrac13 \fracrac{d^2 x_i}{dt^2}(x_i^\star, v_j^\star, {t^{n}})\right) \fracrac{\Delta t^2}{2}
+\mathcal{O}(\Delta t^4) \nonumber\\
&=& x_i - v_j {\Delta t} +
\left(\fracrac23 E^{n+1}_i + \fracrac13 E(x^\star_i, t^n)\right) \fracrac{\Delta t^2}{2} +
\mathcal{O}(\Delta t^4) \nonumber\\
&\stackrel{Prop. \ref{prop: order2}}{=}& x_i - v_j {\Delta t} +
\left(\fracrac23 E^{n+1, (2)}_i + \fracrac13 E(x_i^{n, (2)}, t^n) + \mathcal{O}(\Delta t^3)\right) \fracrac{\Delta t^2}{2}
+\mathcal{O}(\Delta t^4) \nonumber\\
&\stackrel{\eqref{eq: x_3}}{=}& x^{n, (3)}_i + \mathcal{O}(\Delta t^4). \nonumber
\end{equation}a
Similarly,
\begin{equation}a
v_j^\star =&& v_j - E^{n+1}_i {\Delta t} + \left(\fracrac23 \fracrac{d E}{dt}(x_i, {t^{n+1}}) + \fracrac13 \fracrac{dE}{dt}(x_i^\star, {t^{n}})\right) \fracrac{\Delta t^2}{2}
+\mathcal{O}(\Delta t^4) \nonumber\\
\stackrel{Prop. \ref{prop: order2}}{=}&& v_j - (E^{n+1, (2)}_i + \mathcal{O}(\Delta t^3)) {\Delta t} \nonumber\\
&&
+
\left(\fracrac23 (\fracrac{d E}{dt}(x_i, {t^{n+1}}))^{(2)} + \fracrac13 \fracrac{dE}{dt}(x_i^{n, (2)}, {t^{n}})+ \mathcal{O}(\Delta t^3) \right) \fracrac{\Delta t^2}{2}
+ \mathcal{O}(\Delta t^4) \nonumber\\
\stackrel{\eqref{eq: v_3}}{=} && v^{n, (3)}_j + \mathcal{O}(\Delta t^4). \nonumber
\end{equation}a
Hence $x^{n, (3)}_i$ and $v^{n, (3)}_j$ are fourth order approximations to $x_i^\star$ and $v_j^\star$ locally in time for a time step; the approximation is of third order in time globally.
$\mbox{ }\rule[0pt]{1.5ex}{1.5ex}$
\noindent
\underline{\em \bf Higher order extensions.} The procedures proposed above for locating the foot of characteristics can be extended to schemes with higher order temporal accuracy by using higher order version of Taylor expansion, e.g. as in equation~\eqref{eq: x_3} \eqref{eq: v_3}. As higher order material derivatives, e.g. $\fracrac{d^2}{dt^2}E$, are involved, a set of macro-equations from the Vlasov equation are needed. Specifically, we propose to multiply the Vlasov equation \eqref{eq: vlasov} by $v^k$, integrate over $v$ and obtain
\[
\fracrac{\partial }{\partial t} M_k + \fracrac{\partial }{\partial x} M_{k+1} - k E M_{k-1} = 0,
\]
where $M_k(x, t) = \int f(x, v, t) v^k dv$. Especially, $M_0 = \rho(x, t)$ is the charge density and $M_1 = J(x, t)$ is the current density. When $k=0$, we have equation \eqref{eq: moment0}; When $k=1$, we have
\begin{equation}
\label{eq: moment1}
\fracrac{\partial }{\partial t} J + \fracrac{\partial }{\partial x} M_2 - E \rho = 0.
\end{equation}
With these, we have
\begin{equation}a
\fracrac{d^2E}{dt^2} &\stackrel{\eqref{eq: dt_E}}{=}& (\fracrac{\partial }{\partial t} + v \fracrac{\partial }{\partial x}) (\bar{J^0} - J(x, t) + v (\rho-1)) \nonumber\\
\label{eq: dEdt2}
&\stackrel{\eqref{eq: moment1}}{=}& v^2 \pad{\rho}{x} + \pad{M_2}{x} -2v\pad{J}{x} - E,
\end{equation}a
where spatial derivative terms can be evaluated by high order WENO interpolations or reconstructions.
\subsection{High order WENO interpolations.}
\label{sec2.3}
In this subsection, we discuss the procedures in spatial interpolation to recover information among grid points, e.g. to update numerical solution by equation \eqref{eq: update}, and in spatial reconstruction to recover function derivatives at grid points, e.g. in computing spatial derivatives in equation ~\eqref{eq: dEdt2}. There have been a variety of interpolation choices, such as the piecewise parabolic method (PPM) \cite{colella1984piecewise}, spline interpolation \cite{crouseilles2007hermite}, cubic interpolation propagation (CIP) \cite{yabe2001cip}, ENO/WENO interpolation \cite{carrillo2007nim, Qiu_Shu2}. In our work we adapt the WENO interpolations.
\noindent
\underline{\em \bf WENO interpolations.} High order accuracy is achieved by using several points in the neighborhood: the number of points used in the interpolation
determines the order of interpolation. WENO \cite{Shu_book, carrillo2007nim, Qiu_Shu2}, short for `weighted essentially non-oscillatory', is a well-developed adaptive procedure to
overcome Gibbs phenomenon, when the solution is under-resolved or contains discontinuity. Specifically, when the solution is smooth the WENO interpolation recovers the linear interpolation for very high order accuracy; when the solution is under-resolved, the WENO interpolation automatically assign more weights to smoother stencils. The smoothness of the stencil is measured by the divided differences of numerical solutions. Below we provide formulas for the sixth order WENO interpolations, which is what we used in our simulations.
The sixth order WENO interpolation at a position $x\in [x_{i-1}, x_{i}]$ (or $\xi \doteq \fracrac{x-x_i}{\Delta x} \in [-1, 0]$) is obtained by
\[
Q(\xi) = \omega_1 P_1(\xi) + \omega_2 P_2(\xi) + \omega_3 P_3(\xi),
\]
where
\[
P_1(\xi) = (f_{i-3}, f_{i-2}, f_{i-1}, f_i) \,
\left (
\begin{array}{llll}
0&-1/3&-1/2&-1/6\\
0 & 3/2 & 2&1/2 \\
0&-3&-5/2&-1/2\\
1&11/6&1&1/6\\
\end{array}
\right )
\,
\left (
\begin{array}{l}
1\\
\xi\\
\xi^2\\
\xi^3
\end{array}
\right ),
\]
\[
P_2(\xi) = (f_{i-2}, f_{i-1}, f_i, f_{i+1}) \,
\left (
\begin{array}{llll}
0&1/6&0&-1/6\\
0 & -1 & 1/2&1/2 \\
1&1/2&-1&-1/2\\
0&1/3&1/2&1/6
\end{array}
\right )
\,
\left (
\begin{array}{l}
1\\
\xi\\
\xi^2\\
\xi^3
\end{array}
\right ),
\]
\[
P_3(\xi) = (f_{i-1}, f_i, f_{i+1}, f_{i+2}) \,
\left (
\begin{array}{llll}
0&-1/3&1/2&-1/6\\
1 & -1/2 & -1&1/2 \\
0&1&1/2&-1/2\\
0&-1/6&0&1/6
\end{array}
\right )
\,
\left (
\begin{array}{l}
1\\
\xi\\
\xi^2\\
\xi^3
\end{array}
\right ).
\]
Linear weights
\[
\gamma_1(\xi) = \fracrac{1}{20}(\xi-1)(\xi-2) , \quad \gamma_2(\xi) = -\fracrac{1}{10}(\xi+3)(\xi-2),
\quad \gamma_3(\xi) = \fracrac{1}{20}(\xi+3)(\xi+2) .
\]
Nonlinear weights are chosen to be
$$
\omega_m = \fracrac {\tilde{\omega}_m}
{\sum_{l=1}^3 \tilde{\omega}_l},\qquad \mbox{with} \quad
\tilde{\omega}_l = \fracrac {\gamma_l}{(\varepsilon + \beta_l)^2} , \quad l = 1, 2, 3,
$$
where $\epsilon=10^{-6}$,
and the smoothness indicators
\begin{equation}a
\beta_1 =
-9\,f_{{i-3}}f_{{i-2}}+4/3\,{f_{{i-3}}}^{2}-11/3\,f_{{i-3}}f_{{i}}+10\,f_{{i-3}}
f_{{i-1}}+14\,f_{{i-2}}f_{{i}}\nonumber\\
+22\,{f_{{i-1}}}^{2}-17\,f_{{i-1}}f_{{i}}+10/3\,
{f_{{i}}}^{2}+16\,{f_{{i-2}}}^{2}-37\,f_{{i-2}}f_{{i-1}},\nonumber
\end{equation}a
\begin{equation}a
\beta_2 =
-7\,f_{{i-2}}f_{{i-1}}+4/3\,{f_{{i-2}}}^{2}-5/3\,f_{{i-2}}f_{{i+1}}+6\,f_{{i-2}}U_
{{i}}+6\,f_{{i-1}}f_{{i+1}}\nonumber\\
+10\,{f_{{i}}}^{2}-7\,f_{{i}}f_{{i+1}}+4/3\,{f_{{
4}}}^{2}+10\,{f_{{i-1}}}^{2}-19\,f_{{i-1}}f_{{i}},\nonumber
\end{equation}a
\begin{equation}a
\beta_3 =
-17\,f_{{i-1}}f_{{i}}+10/3\,{f_{{i-1}}}^{2}-11/3\,f_{{i-1}}f_{{i+2}}+14\,f_{{i-1
}}f_{{i+1}}+10\,f_{{i}}f_{{i+2}}\nonumber\\
+16\,{f_{{i+1}}}^{2}-9\,f_{{i+1}}f_{{i+2}}+4/3\,
{f_{{i+2}}}^{2}+22\,{f_{{i}}}^{2}-37\,f_{{i}}f_{{i+1}}.\nonumber
\end{equation}a
\subsection{Computational cost and savings}
One of the procedures in the proposed algorithm that takes up much computational time is to trace the foot of characteristics. Assume $N = n_x = n_v$, the scheme involves solving the Poisson's equation via FFT with the cost on the order of $N log(N)$ and a high order 2-D WENO interpolation on the order of $C N^2$, where the constant $C$ is larger when the order of interpolation is higher. Since the 2-D WENO interpolation (compared with the 1-D Poisson solver) is a procedure that takes most of the computational time, we will use the number of 2-D WENO interpolations as a measurement of computational cost.
For the first order scheme \eqref{eq: x_v_1}, there is a high order 2-D WENO interpolation involved. The proposed second order scheme \eqref{eq: x_v_2} is based on the first order prediction: two high order 2-D WENO interpolations are involved. This leads to twice the computational cost as a first order scheme. The third order scheme \eqref{eq: x_3} - \eqref{eq: v_3} is based on the second order prediction: three high order 2-D WENO interpolations are involved. We claim that proposed high order procedures are computationally efficient: the computational cost roughly grows linearly with the order of approximation. To further save some computational cost, we propose to use lower order 2-D WENO interpolation in the prediction steps. Specifically, in the third order scheme \eqref{eq: x_3} - \eqref{eq: v_3}, we propose to use a second order 2-D WENO interpolation in the first order prediction, use a fourth order 2-D WENO interpolation in the second order prediction, and use a sixth order 2-D WENO interpolation in the final step of updating.
\subsection{Discussion on mass conservative correction and stability}
\label{sec3}
\setcounter{equation}{0}
\setcounter{figure}{0}
\setcounter{table}{0}
The proposed scheme is non mass conservative. One possible remedy is a conservative correction procedure, that allows the construction of a conservative scheme starting from a non conservative one.
This approach was first introduced in the context if the BGK model of rarefied gas dynamics by P.~Santagati in his PhD thesis \cite{Santagati07}, and illustrated in a preprint
\cite{Russo-Santagati-BGK-11}. Take a simple linear convection equation in one space dimension for example, the equation will take the form
\begin{equation}
\pad{f}{t} + \pad{f}{x} = 0, \quad f(x,0) = f^0(x),
\label{eq:scalar2}
\end{equation}
with periodic boundary conditions. \rf{eq:scalar2} is discretized on a spatial grid, $x_i = i\Delta x$, $i=1\ldots,n$.
Following Osher and Shu \cite{ShuOsherEfficient}, we impose that the pointwise value $f_i^n\approx f(x_i,t^n)$ satisfies the equation
\[
f^{n+1}_i - f^n_i
= -\fracrac{\hat{F}_{j+1/2}-\hat{F}_{j-1/2}}{\Delta x},
\]
where the function $\hat{F}$ is reconstructed at the edge of the cell from the point wise values of $F(x_i) = \int_{t^n}^{t^{n+1}} f(x_i, \tau)d\tau$ in the same way pointwise values of a function $u(x\pm\Delta x)$ can be reconstructed from cell average $\bar{u}_i$, see \cite{Jiang_Shu} for a detailed description of the WENO reconstruction procedure. Let $(c_\ell,b_\ell)$, $\ell = 1,\ldots,s$ be the nodes and weights of an accurate quadrature formula in the interval $[0,1]$.
To approximate $F(x_i)$, one can use a quadrature rule
\[
F(x_i) \approx \Delta t \sum_{\ell=1}^{s} b_\ell f(x_i, t^n + c_\ell \Delta t),
\]
where $f(x_i, t^n + c_\ell \Delta t)$ can be obtained by the characteristics tracing as well as WENO interpolation described earlier this section. Such procedure can be directly extended to two dimensional problem, including the Vlasov-Poisson procedure, where the non-conservative semi-Lagrangian method previously proposed can be used to get the solution at quadrature points. The 2-point Gauss-Legendre quadrature formula with $b_1 = b_2 = 1$ and $c_{1,2}= \fracrac{1}{2}\pm\fracrac{1}{2\sqrt{3}}$ is found to be a good choice with good stability property. On the other hand, such conservative correction is subject to a time step constraint related to the spatial mesh size similar to that of the Eulerian approach from spatial interpolation and reconstruction procedures. As a result, the advantage of using larger time steps in a SL method is lost. To investigate and improve such stability constraint is subject to our future research.
\section{Numerical tests: the Vlasov-Poisson system}
\label{sec4}
In this section, we examine the performance of the
proposed fully multi-dimensional semi-Lagrangian method for the VP systems. Periodic
boundary condition is imposed in x-direction, while zero boundary
condition is imposed in v-direction. We recall several norms in the VP system
below, which should remain constant in time.
\begin{enumerate}
\item $L^p$ norm $1\leq p<\infty$:
\begin{equation}
\|f\|_p=\left(\int_v\int_x|f(x,v,t)|^pdxdv\right)^\fracrac1p.
\end{equation}
\item Energy:
\begin{equation}
\text{Energy}=\int_v\int_xf(x,v,t)v^2dxdv + \int_xE^2(x,t)dx,
\end{equation}
where $E(x,t)$ is the electric field.
\item Entropy:
\begin{equation}
\text{Entropy}=\int_v\int_xf(x,v,t)\log(f(x,v,t))dxdv.
\end{equation}
\end{enumerate}
Tracking relative deviations of these quantities numerically will be
a good measure of the quality of numerical schemes. The relative
deviation is defined to be the deviation away from the corresponding
initial value divided by the magnitude of the initial value.
In our numerical tests, we let the time step size $\Delta t = CFL \cdot \min(\Delta x/v_{max}, \Delta v/\max(E))$, where $CFL$ is specified for different runs;
and let $v_{max} = 6$ to minimize the error from truncating the domain in $v$-direction.
We first present the example of two stream instability. In this example, we will
demonstrate the (1) high order spatial accuracy and the high order
temporal accuracy of the proposed schemes; (2) the time evolution of overall mass and other theoretically conserved physical norms for the proposed method; (3) the performance of the proposed method in resolving solution structures.
\begin{exa} Consider two stream instability
\cite{FilbetS}, with an unstable initial distribution
function:
\begin{equation}
f(x,v,t=0)=\fracrac{2}{7\sqrt{2\pi}}(1+5v^2)(1+\alpha((\cos(2kx)+\cos(3kx))/1.2+
\cos(kx))\exp(-\fracrac{v^2}{2})
\end{equation}
with $\alpha=0.01$, $k=0.5$, the length of the domain in the x
direction is $L=\fracrac{2\pi}{k}$ and the background ion distribution
function is fixed, uniform and chosen so that the total net charge
density for the system is zero.
\end{exa}
We test both spatial and temporal convergence of the proposed truly multi-dimensional semi-Lagrangian method.
We first test the spatial convergence by using a sequence of meshes with $n_x = n_v = \{210, 126, 90, 70\}$.
The meshes are designed so that the coarse mesh grid coincides with part of the reference fine mesh grid ($n_x=n_v=630$).
We set $CFL=0.01$ so that the spatial error is the dominant error.
Table~\ref{tab: spa_2stream} is the spatial convergence table for the proposed schemes with sixth order WENO interpolation. The expected fifth order convergence globally in time in observed.
We then test the temporal convergence of the proposed first, second and third order schemes.
Table~\ref{tab: tem_2stream} provides the temporal convergence rate for the scheme with the first to third order temporal accuracy.
We use the sixth order WENO interpolation and a spatial mesh of $Nx = Nv=160$, so that the temporal error is the dominant error.
Expected first, second and third order temporal accuracy is observed. In Table~\ref{tab: tem_2stream}, the time step size is about $6$ to $10$ times that from an Eulerian method, yet highly accurate numerical results is achieved.
To compare the performance of schemes with different temporal orders, we numerically
track the time evolution of physically conserved quantities of the system. In our runs, we let $n_x=n_v=128$, $CFL=5$.
In Figure~\ref{fig: 2stream_norm}, the time evolution of numerical $L^1$ norm, $L^2$ norm, energy and entropy for schemes
with different orders of temporal accuracy are plot. In general, high order temporal accuracy indicates a better preservation of those physically conserved norms. The $L^1$ norm is not conserved since our scheme is neither mass conservative nor positivity preserving. In Figure \ref{fig: 2stream}, we show the contour plot of the numerical solution of the proposed SL WENO method with third order temporal accuracy at around $T=53$. The plot is comparable to our earlier work reported in \cite{Qiu_Christlieb, Qiu_Shu2}.
\begin{table}[htb]
\begin{center}
\begin{tabular}{|c | c c|}
\hline
\cline{1-3} $Nx \times Nv$ &$L^1$ error & order \\
\hline
{$70\times 70$} &7.01E-7 & -- \\
\hline
{$90\times 90$} &2.06E-7&4.88\\
\hline
{$126\times 126$} &3.96E-8&4.89\\
\hline
{$210\times 210$} &3.20E-9&4.95\\
\hline
\end{tabular}
\end{center}
\caption{Order of accuracy in space for the SL WENO schemes: two stream instability.
The scheme use sixth order WENO interpolation and has a third order temporal accuracy in tracing characteristics.
$T=1$ and $CFL=0.01$.}
\label{tab: spa_2stream}
\end{table}
\begin{table}[htb]
\begin{center}
\begin{tabular}{|c | c c|c c|c c|}
\hline
\cline{1-7} &\multicolumn{2}{c|}{first order} &\multicolumn{2}{c|}{second order} &\multicolumn{2}{c|}{third order} \\
\hline
\cline{1-7} $CFL$& $L^1$ error&order& $L^1$ error&order& $L^1$ error&order\\
\hline
6 & 1.17E-4& -- &2.40E-6 & -- & 1.13E-7&--\\
\hline
7 & 1.40E-4&1.13 & 2.80E-6 & 2.04 & 1.79E-7&3.02\\
\hline
8 & 1.63E-4& 1.16 & 3.69E-6 &2.07 & 2.69E-7&3.02\\
\hline
9 & 1.87E-4& 1.16 & 4.69E-6 &2.04 & 3.84E-7&3.03\\
\hline
10 & 2.12E-4& 1.20 & 5.84E-6 &2.08 & 5.31E-7&3.06\\
\hline
\end{tabular}
\end{center}
\caption{Order of accuracy in time for the SL WENO schemes with sixth order WENO interpolation and various orders of temporal accuracy. Two stream instability. $Nx = Nv=160$ and $T=5$.}
\label{tab: tem_2stream}
\end{table}
\begin{figure}
\caption{Two stream instability. The SL WENO scheme with
sixth order WENO interpolation in space and various orders of temporal accuracy.
Time evolution of the relative
deviations of discrete $L^1$ norms (upper left), $L^2$ norms,
kinetic energy norms (lower left) and entropy (lower right).}
\label{fig: 2stream_norm}
\end{figure}
\begin{figure}
\caption{Two stream instability: $T=53$. The SL WENO scheme with the sixth order WENO interpolation and a third order temporal accuracy. The spatial mesh is $128 \times 128$ and $CFL=5$.}
\label{fig: 2stream}
\end{figure}
\begin{exa} Consider weak Landau damping for the
Vlasov-Poisson system with initial condition:
\begin{equation}
\label{landau}
f(x,v,t=0)=\fracrac{1}{\sqrt{2\pi}}(1+\alpha\cos(kx))\exp(-\fracrac{v^2}{2}),
\end{equation}
where $\alpha=0.01$.
When the perturbation magnitude is small enough ($\alpha=0.01$), the VP system
can be approximated by linearization around the Maxwellian equilibrium
$f^0(v)=\fracrac{1}{\sqrt{2\pi}}e^{-\fracrac{v^2}{2}}$.
The analytical damping rate of electric field can be derived accordingly \cite{fried1961plasma}.
We test the numerical numerical damping rates with theoretical values. We only present
the case of $k=0.5$. The spatial computational grid has $n_x=n_v=128$ and $CFL=5$.
For the scheme with first, second and third order accuracy in time and sixth order WENO interpolation in space, we plot the evolution of electric field in $L^2$ norm
benchmarked with theoretical values (solid black lines in the figure) in Figure \ref{fig403}. A better match with the theoretical decay rate of the electric field is observed for schemes with second and third order temporal accuracy. The time evolution of discrete $L^1$ norm, $L^2$ norm, kinetic energy and
entropy of schemes with different temporal orders are reported in Figure \ref{fig404}. $L^1$ and $L^2$ norms are better preserved by schemes with higher order temporal accuracy.
Note that the mass is not exactly preserved. Energy and entropy are better preserved by schemes with second and third order accuracy than that with first order accuracy.
\begin{figure}
\caption{Weak Landau damping. Time evolution of electric field in
$L^2$ norm.}
\label{fig403}
\end{figure}
\begin{figure}
\caption{Weak Landau damping. The proposed SL WENO scheme with
first, second and third order accuracy in time and sixth order WENO interpolation in space.
Time evolution of the relative
deviations of discrete $L^1$ norms (upper left), $L^2$ norms,
kinetic energy norms (lower left) and entropy (lower right).}
\label{fig404}
\end{figure}
\end{exa}
\begin{exa} Consider strong Landau damping. The
initial condition is equation \eqref{landau}, with $\alpha=0.5$ and
$k=0.5$.
The evolution of $L^2$ norms of electric field is provided in Figure \ref{fig405}, which is comparable to existing results in the literature, e.g. see \cite{Guo_Qiu}.
The time evolution of discrete $L^1$ norm,
$L^2$ norm, kinetic energy and entropy are reported in Figure \ref{fig407}.
The $L^1$ norm, as expected, is not conservative.
Numerical solutions of the proposed scheme at different times are observed to be comparable
to those that have been well reported in the literature, e.g. \cite{Qiu_Christlieb, Guo_Qiu} among many others. Thus we omit to present
those figures to save space.
\begin{figure}
\caption{Strong Landau damping. Time evolution of electric field in
$L^2$ norm.}
\label{fig405}
\end{figure}
\begin{figure}
\caption{Strong Landau damping. The SL WENO scheme with sixth order WENO interpolation in space and
various orders of temporal accuracy.
Time evolution of the relative
deviations of discrete $L^1$ norms (upper left), $L^2$ norms,
kinetic energy norms (lower left) and entropy (lower right).}
\label{fig407}
\end{figure}
\end{exa}
\begin{exa} Consider the symmetric two stream
instability
\cite{banks2010new},
with the initial
condition
\begin{equation}
f(x,v,t=0)=\fracrac{1}{\sqrt{8\pi}v_{th}}\left[\exp\left(-\fracrac{(v-u)^2}{2v_{th}^2}\right)+\exp\left(-\fracrac{(v+u)^2}{2v_{th}^2}\right)\right]\big
(1+0.0005\cos(kx)\big )
\end{equation}
with $u=5\sqrt{3}/4$, $v_{th}=0.5$ and $k=0.2$. The background ion
distribution function is fixed, uniform and chosen so that the total
net charge density for the system is zero.
Figure~\ref{fig: 2stream2_E} plots the evolution of electric fields for the proposed scheme
benchmarked with a reference rate from linear theory $\gamma = \fracrac{1}{\sqrt{8}}$, see \cite{banks2010new}.
Theoretical consistent results are observed.
Time evolution of discrete $L^1$ norm, $L^2$ norm, kinetic energy and entropy of
schemes with different temporal orders are reported in Figure
\ref{fig: 2stream2_norm}. Again, higher order schemes in general perform better
in preserving the conserved physical quantities than low order ones.
In Figures \ref{fig: 2stream2}, we report numerical
solutions from the SL WENO schemes with various temporal accuracy in approximating the distribution solution $f$.
It can be observed that, with the same time step size, the higher order schemes (e.g. second and third order ones) perform better than a first order one.
\begin{figure}
\caption{Symmetric two stream instability: time evolution of electric field in
$L^2$ norm. The SL WENO scheme with sixth order WENO interpolation in space and various orders of temporal accuracy. }
\label{fig: 2stream2_E}
\end{figure}
\begin{figure}
\caption{Two stream instability. The SL WENO scheme with
sixth order WENO interpolation in space and various orders of temporal accuracy.
Time evolution of the relative
deviations of discrete $L^1$ norms (upper left), $L^2$ norms,
kinetic energy norms (lower left) and entropy (lower right).}
\label{fig: 2stream2_norm}
\end{figure}
\begin{figure}
\caption{Symmetric two stream instability: $T=50$. Results from schemes with first order temporal accuracy with $CFL=5$ (upper left), $CFL=0.1$ (upper right).
Results from schemes with second order temporal accuracy (lower left) and third order temporal accuracy (lower right) and $CFL=5$.}
\label{fig: 2stream2}
\end{figure}
\end{exa}
\section{Conclusion}
\label{sec5}
\setcounter{equation}{0}
\setcounter{figure}{0}
\setcounter{table}{0}
In this paper, we propose a systematical way of tracing characteristics for a one-dimensional in space and one-dimensional in velocity Vlasov-Poisson system with high order temporal accuracy. Based on such mechanism, a finite difference grid-based semi-Lagrangian approach coupled with WENO interpolation is proposed to evolve the system. It is numerically demonstrated that schemes with higher order of temporal accuracy perform better in many aspects than the first order one. Designing mass conservative semi-Lagrangian schemes, yet not subject to time step constraints, is considered to be challenging and is subject to future research investigations.
\end{document}
|
\begin{document}
\title{The fifth moment of Hecke $L$-functions in the weight aspect}
\author{Rizwanur Khan}
\address{
Science Program\\ Texas A\&M University at Qatar\\ PO Box 23874\\ Doha, Qatar}
\email{[email protected] }
\subjclass[2010]{11M99, 11F11}
\keywords{$L$-functions, moments, cusp forms, Kuznetsov trace formula}
\begin{abstract}
We prove an upper bound for the fifth moment of Hecke $L$-functions associated to holomorphic Hecke cusp forms of full level and weight $k$ in a dyadic interval $K\le k \le 2K$, as $K\to\infty$. The bound is sharp on Selberg's eigenvalue conjecture.
\end{abstract}
\maketitle
\section{Introduction}
Moments of $L$-functions, especially at the central point, are extensively studied. They yield valuable data about an $L$-function's distribution, and can be used for example to infer information about the size, non-vanishing and symmetry-type of the central values.
This article is inspired by the recent works of Kiral and Young \cite{kiryou} and Blomer and Khan \cite{blokha}. The former paper established, for the first time, an upper bound for the fifth moment of $L$-functions associated to holomorphic newforms of prime level $q$ and fixed small weight, as $q\to \infty$. The latter paper established a certain reciprocity-type formula for the twisted fourth moment of Hecke $L$-functions in the level aspect, which gave as a corollary an upper bound for the fifth moment, but with more general conditions and also allowing for Maass $L$-functions. In both papers, the upper bound for the fifth moment depends on the Ramanujan conjecture at the finite places, and when assuming the truth of this conjecture, the given upper bound is sharp (as strong as the Lindel\"{o}f bound on average).
The goal of the present paper is to fix the level (at 1) and prove a fifth moment estimate in the weight aspect (it should also be possible to work with Hecke Maass $L$-functions in the spectral aspect). Let $H_k$ denote the orthonormal set of holomorphic Hecke cusp forms $f$ of level $1$
and weight $k$. This has $k/12+O(1)$ elements and forms a basis of the space of cusp forms of level 1 and weight $k$. Let $\xi_fbda_f(n)$ denote the (real) eigenvalue corresponding to $f\in H_k$ of the $n$-th Hecke operator (which satisfies Deligne's bound $\xi_fbda_f(n)\ll n^\epsilon$). The $L$-function associated to $f$ is defined for $\Re(s)>1$ by
\begin{align*}
L(s, f)= \sum_{n=1}^{\infty}
\frac{\xi_fbda_f(n)}{n^s}.
\end{align*}
The central point is $s=\half$ and by \cite{kohzag} the central value $L(\half, f)$ is known to be non-negative. Our main theorem is
\begin{theorem} \label{main} Let
\begin{align*}
\mathcal{F} = \bigcup_{\substack{K \le k \le 2K\\k\equiv 0 \bmod 2}} H_k,
\end{align*}
a set of $O(K^2)$ elements.
For any $\epsilon>0$, we have
\begin{align}
\label{fifthbound} \sum_{f\in \mathcal{F}} L(\thalf,f)^5 \ll K^{2+2\theta+\epsilon}
\end{align}
as $K\to \infty$, where $\theta= \frac{7}{64}$ is the current best bound towards the Selberg eigenvalue conjecture \cite[Appendix 2]{kim}.
\end{theorem}
\noindent The ``log of conductor to log of family size'' ratio in (\ref{fifthbound}) is 5/2, the same as in the level aspect fifth moment considered in \cite{kiryou} and \cite{blokha}. Thus our result should be considered an analogue of the level aspect estimate. Assuming the Selberg eigenvalue conjecture (which is a part of the Ramanujan conjecture at the infinite place), our bound is sharp. This seems to be the first time that a sharp bound has been proven (conditionally) for any moment higher than the fourth in the archimedian (weight or spectral) aspect. Jutila \cite{jut} proved a good upper bound for the twelfth moment of Hecke Maass $L$-functions in the spectral aspect, but that is not sharp.
Other authors \cite{ivi, jut2, pen} have proven sharp bounds for the third and fourth moments over smaller families. For example, in \cite{pen} Peng proved a sharp bound for the third moment over $H_k$, which yields the Weyl-quality bound $L(\half, f)\ll k^{\frac13+\epsilon}$. Since such a strong bound already exists, we do not pursue a twisted fourth moment and amplification, although our methods would permit it. The goal is not to obtain individual bounds, although our main theorem already implies a weaker subconvexity bound.
Our main ideas have a similar flavour to those of \cite{kiryou,blokha,li}, but our method is different -- for example, we apply ``reciprocity'' twice, while the other papers apply it once. Compared to \cite{kiryou}, our proof is simpler and shorter, and as already noted above, our method could also be used to prove a bound for the twisted fourth moment, while this is not the case in \cite{kiryou} (because as explained in section 2 of that paper, the assumption $m_1\le m_2$ is made at the outset and cancellation in the $m_1$ sum is used to deal with ``fake main terms''). We cannot really compare with \cite{blokha} because that paper was after a more general result. It might be possible to derive our result from \cite{blokha} by first understanding the relevant integral transforms in terms of the weight, but our paper is self-contained and has the advantage (depending on taste) of being more ``classical'' in its approach.
Throughout the paper, we will use the convention that $\epsilon$ denotes an arbitrarily small positive constant, but not necessarily the same one from one occurrence to the next.
\section{Rough Sketch}
The purpose of this sketch is to explain the main ideas, ignoring all technicalities. We will consider only the generic ranges of all sums.
Using approximate function equations, we can write the fifth moment as
\begin{align*}
\frac{1}{K^2} \sum_{f\in \mathcal{F}} L(\thalf,f)^5 &\approx \frac{1}{K^2} \sum_{f\in \mathcal{F}} \sum_{n_1\asymp K} \frac{\xi_fbda_f(n_1)}{\sqrt{n_1}} \sum_{n_2,n_3,_4,n_5\asymp K} \frac{\xi_fbda_f(n_2n_3n_4n_5)}{\sqrt{n_2n_3n_4n_5} }\\
&\approx \frac{1}{K^{\frac72}} \sum_{K\le k\le K} \frac{1}{K} \sum_{f\in H_k} \xi_fbda_f(n_1) \xi_fbda_f(n_2n_3n_4n_5).
\end{align*}
We need an upper bound of $O(K^{2\theta+\epsilon})$. We will in fact find that this kind of grouping with $n_1$ on one side and $n_2,n_3,n_4,n_5$ on the other leads to cleaner calculations. Applying Petersson's trace formula, the off-diagonal part of this is
\begin{align*}
\frac{1}{K^{\frac72}} \sum_{K\le k\le K} \ \sum_{n_1,n_2,n_3,n_4,n_5 \asymp K} \ \sum_{c\ge 1} 2\pi i^k \frac{S(n_1, n_2n_3n_4n_5,c)}{c} J_{k-1} \Big(4\pi \frac{\sqrt{n_1n_2n_3n_4n_5}}{c}\Big).
\end{align*}
Summing over $k$ first, we will get that this is
\begin{align*}
& \frac{1}{K^{\frac72}} \sum_{n_1,n_2,n_3,n_4,n_5 \asymp K} \sum_{c\asymp K^\half } \frac{S(n_1, n_2n_3n_4n_5,c)}{c} e\Big( \frac{2\sqrt{n_1n_2n_3n_4n_5}}{c}\Big)\\
\approx &\frac{1}{K^{4}} \sum_{n_1,n_2,n_3,n_4,n_5 \asymp K} \sum_{c\asymp K^\half } S(n_1, n_2n_3n_4n_5,c) e\Big( \frac{2\sqrt{n_1n_2n_3n_4n_5}}{c}\Big),
\end{align*}
where as usual $e(z)$ denotes $e^{2\pi i z}$.
Splitting the sum over $n_1$ into residue classes mod $c$ and applying Poisson summation (denote the dual variable by $m_1$) we get
\begin{align*}
\frac{1}{K^{\frac72}} \sum_{\substack{n_2,n_3,n_4,n_5 \asymp K\\ c\asymp K^\half} } \sum_{-\infty < m_1 <\infty} \sum_{a\bmod c} S(a, n_2n_3n_4n_5,c) e\Big( \frac{a m_1}{c}\Big) \int_{x\asymp 1} e\Big( \frac{2\sqrt{ x K n_2n_3n_4n_5}}{c}\Big) e\Big(\frac{-xK m_1}{c}\Big) dx.
\end{align*}
The complete sum over residue classes evaluates to $ce(\frac{-n_2n_3n_4n_5\overline{m_1}}{c})$, and the integral is evaluated using the stationary phase method. We get
\begin{align}
\label{return} \frac{1}{K^{4}} \sum_{\substack{n_2,n_3,n_4,n_5\asymp K\\ c\asymp K^\half \\ m_1\asymp K^\frac32 }} e\Big( \frac{-n_2n_3n_4n_5 \overline{m_1}}{c}\Big) e\Big( \frac{n_2n_3n_4n_5}{m_1c}\Big) = \frac{1}{K^{4}} \sum_{\substack{n_2,n_3,n_4 ,n_5\asymp K\\ c\asymp K^\half \\ m_1\asymp K^\frac32 }} e\Big( \frac{n_2n_3n_4n_5\overline{c}}{m_1}\Big),
\end{align}
by reciprocity.
Next we apply Poisson summation (mod $m_1$) to the $n_2$ and $n_3$ sums (in the actual proof, we will apply Voronoi summation once instead of Poisson summation twice). Note that if we were following \cite{kiryou} step by step, we would have applied Poisson summation to $n_2$, $n_3$ and $n_4$, but this is not how we proceed. We get
\begin{align}
\label{return2} \frac{1}{K^{\frac72}} \sum_{\substack{n_4,n_5 \asymp K\\ c, m_2 ,m_3 \asymp K^\half \\ m_1\asymp K^\frac32 }} e\Big( \frac{m_2m_3 c \overline{n_4n_5}}{m_1}\Big).
\end{align}
This sum displays only the generic ranges of $m_2$ and $m_3$ (the dual variables). The zero frequencies $m_2=0$ or $m_3=0$, which are omitted, are in fact quite troublesome. For example, return to (\ref{return}) and consider the terms with $m_1|n_3n_4n_5$ (these terms correspond to $m_2=0$). The contribution of such terms is
\begin{align}
\label{fakelarge} \frac{1}{K^{4}} \sum_{\substack{n_2,n_3,n_4,n_5 \asymp K\\ c\asymp K^\half , m_1\asymp K^\frac32 \\ m_1| n_3n_4n_5}} 1 \asymp K^{\half},
\end{align}
while we need to prove a bound of $K^{2\theta+\epsilon}$. It seems that we cannot do better because there are no harmonics present to produce further cancellation. Of course, it is not possible (by the Lindel\"{o}f hypothesis) for the fifth moment to be so large, so a careful evaluation of the fifth moment must show that these ``fake main terms'' should cancel out somehow. But there is a shortcut. The weight functions from the approximate functional equations have been suppressed in (\ref{fakelarge}). If we take them into account, there is a way to design them carefully so that (\ref{fakelarge}) is not so large. This idea was used in \cite{bhm} and \cite{kiryou}, and section 2 of the latter paper contains a nice heuristic about how the idea works.
Back to (\ref{return2}), we can apply reciprocity again to get
\begin{align*}
\frac{1}{K^{\frac72}} \sum_{\substack{n_4,n_5 \asymp K\\ c, m_2 ,m_3 \asymp K^\half \\ m_1\asymp K^\frac32 }} e\Big( \frac{m_2m_3 c \overline{m_1}}{n_4n_5}\Big) e\Big( \frac{-m_2m_3 c}{m_1n_4n_5}\Big) \approx \frac{1}{K^{\frac72}} \sum_{\substack{n_4,n_5 \asymp K\\ c, m_2 ,m_3 \asymp K^\half \\ m_1\asymp K^\frac32 }} e\Big( \frac{m_2m_3 c \overline{m_1}}{n_4n_5}\Big).
\end{align*}
Applying Poisson summation (mod $n_4n_5$) to the $m_1$ sum (denote the dual variable by $l_1$), this is
\begin{align*}
\frac{1}{K^{4}} \sum_{\substack{n_4,n_5 \asymp K\\ c, m_2 ,m_3 , l_1 \asymp K^\half }} S(m_2m_3 c , l_1 ,n_4n_5).
\end{align*}
Now we can sum over $n_4$ using Kuznetsov's formula. The sum of Kloosterman sums is in the Linnik range as $n_4n_5\ge \sqrt{ m_2 m_3 c l_1}$. This leads to
\begin{align}
\label{sketchlast} \frac{1}{K^{2}} \sum_{n_5 \asymp K} \Big( \sum_{\substack{ c, m_2 ,m_3 , l_1 \asymp K^\half }} \sum_{t_j\asymp 1 } \frac{\xi_fbda_j(m_2m_3 c) \xi_fbda_j(l_1)}{\sqrt{m_2 m_3 c l_1}} + \ldots \Big)
\end{align}
where the sum is over an orthonormal basis of Maass cusp forms $\{ u_j \}$ of level $n_5$ and (essentially bounded) Laplacian eigenvalue $\frac{1}{4}+t_j^2$, and the ellipsis denotes the contribution of the Eisenstein series and holomorphic forms. Actually we lose $O(K^{2\theta+\epsilon})$ here due to the possibility of exceptional eigenvalues, but for the purposes of this sketch we ignore this issue.
The inner sum of (\ref{sketchlast}), given within the parentheses, looks like the fourth moment of $L(\half, u_j)$ in the level aspect, provided that we can decompose $\xi_fbda_j(m_2m_3 c)$ by multiplicativity. For this, we need to work with a basis comprising of lifts of newforms; such a basis is given in \cite{blomil} or \cite{blokha}. Then the expected bound for the fourth moment, which can be proved using the spectral large sieve, gives
\begin{align*}
\frac{1}{K^{2}} \sum_{n_5 \asymp K} (n_5 K^\epsilon) \ll K^\epsilon
\end{align*}
as desired. We never need any cancellation from the $n_5$-sum, which is why a twisted fourth moment bound would probably be possible in place of the main theorem.
\section{Background}
\subsection{Approximate functional equations}
For $f\in H_k$ we have the functional equation \cite[Theorem 14.7]{iwakow},
\begin{align}
\label{fe} \Lambda(s,f):= (2\pi)^{-s} \Gamma_{\mathbb{R}}ma(s+\tfrac{k-1}{2})L(s,f) = i^k \Lambda(1-2,f).
\end{align}
Let $\tau(m)$ denote the number of divisors of $m$. We will use the following standard approximate functional equations. For any $f\in H_k$, we have
\begin{align}
\label{afe1} L(\thalf,f)^2 = 2\sum_{m\ge 1} \frac{\xi_fbda_f(m)\tau(m)}{\sqrt{m}} V_k(m),
\end{align}
where
\begin{align*}
V_k(x)=\frac{1}{2\pi i} \int_{(A)} x^{-s} \mathcal{G}(s) \frac{\Gamma_{\mathbb{R}}ma(s+\tfrac{k}{2})^2}{\Gamma_{\mathbb{R}}ma(\tfrac{k}{2})^2} \zeta(1+2s) \frac{ds}{s}
\end{align*}
for any $A>0$ and
\begin{align}
\label{gdef} \mathcal{G}(s)= 2 e^{s^2} (\thalf-s^2).
\end{align}
This follows from the functional equation (\ref{fe}) and \cite[Theorem 5.3]{iwakow}. As explained in that theorem, we may insert in the integrand above any even function which is bounded in a fixed horizontal strip about $\Re(s)=0$, and has value 1 at $s=0$. Our function $\mathcal{G}(s)$ satisfies these properties and is chosen to decay exponentially in the vertical direction (this is convenient for convergence) and to vanish at $s=\half$ (this will be needed later to deal with the ``fake main terms'').
For $k\equiv 0 \bmod 4$, the root number in the functional equation is $1$, and we have
\begin{align}
\label{afe2} L(\thalf,f) = 2\sum_{n\ge 1} \frac{\xi_fbda_f(n)}{\sqrt{n}} W_k(n),
\end{align}
where
\begin{align*}
W_k(x)=\frac{1}{2\pi i} \int_{(A)} x^{-s} e^{s^2} \frac{\Gamma_{\mathbb{R}}ma(s+\tfrac{k}{2})}{\Gamma_{\mathbb{R}}ma(\tfrac{k}{2})} \frac{ds}{s}.
\end{align*}
We have
\begin{align}
\label{vwbounds} V_k^{(j)}(x), W_k^{(j)}(x) \ll x^{-j}(1+x)^{-A}
\end{align}
for any $A>0$ and integer $j\ge 0$.
Using this for $j=0$, large $A$ and Stirling's estimates for the gamma function, the sums (\ref{afe1}) and (\ref{afe2}) may be restricted to $m\ll k^{2+\epsilon}$ and $n\ll k^{1+\epsilon}$ respectively, up to an error of $O(k^{-100})$. Taking $j=0$ and $A=\epsilon$ shows that $|V_k(x)|, |W_k(x)| <k^\epsilon$.
\subsection{Summation formulae}
We will need the Voronoi summation formula and the Poisson summation formula.
\begin{lemma} \label{vorsum} {\bf Voronoi summation.}
Given a compactly supported smooth function $\Phi$ with bounded derivatives, and coprime integers $h$ and $\ell$, we have
\begin{align}
\label{vorstate} \sum_{m\ge 1} \frac{\tau(m)}{m} e\Big(\frac{m\overline{h}}{\ell}\Big)\Phi\Big(\frac{m}{M}\Big)= \frac{1}{\ell} \int_{-\infty}^\infty \log\Big(\frac{ x}{\ell^2}+2\gamma\Big)\Phi \Big(\frac{x}{M}\Big)\frac{dx}{x} + \sum_\pm \frac{1}{\ell} \sum_{r\ge 1} \tau(r) e\Big(\frac{\pm rh}{\ell}\Big) \check{\Phi}_\pm \Big(\frac{Mr}{\ell^2}\Big),
\end{align}
where
\begin{align*}
\check{\Phi}_\pm(x)= \frac{1}{2\pi i} \int_{(A)} H_1^\pm(s) \tilde{\Phi}(-s) x^{-s} ds,
\end{align*}
$\tilde{\Phi}$ is the Mellin transform of $\Phi$,
\begin{align*}
H_1^\pm(s)= 2(2\pi i)^{-2s} \Gamma_{\mathbb{R}}ma(s)^2 \cos^{(1\mp1)/2} (\pi s),
\end{align*}
and $A>0$.
\end{lemma}
\proof
See \cite[section 2.3]{bhm}. We can take any $A>0$ because $\tilde{\Phi}(-s)\ll (1+|s|)^{-B}$ for any $B\ge 0$ by integration by parts.
\endproof
\begin{lemma} \label{poiss} {\bf Poisson summation.} Given a compactly supported smooth function $\Phi$ with bounded derivatives, and an arithmetic function $S_q(n)$ with period $q$, we have
\begin{align}
\label{pois3} &\sum_{-\infty < n < \infty } \Phi\Big(\frac{n}{N}\Big) S_q(n) \\
\nonumber &= \frac{N}{q} \sum_{-\infty < l <\infty} \hat{\Phi}\Big(\frac{lN}{q}\Big) \sum_{a\bmod q} S_q(a) e\Big(\frac{a l}{q}\Big)\\
\nonumber &= \frac{N}{q} \hat{\Phi} (0) \sum_{a\bmod q} S_q(a) + \frac{N}{q} \sum_{-\infty < l <\infty} \ \sum_{a\bmod q} S_q(a) e\Big(\frac{a l}{q}\Big) \int_{-\infty}^{\infty} \frac{1}{2\pi i} \int_{(A)} \Big(\frac{-2\pi x lN}{q}\Big)^{-s} \Phi(x) H_2(s) ds dx,
\end{align}
where $\hat{\Phi}$ denotes the Fourier transform of $\Phi$,
\begin{align*}
H_2(s)= \Gamma_{\mathbb{R}}ma(s) \exp \Big(\frac{i\pi s}{2}\Big)
\end{align*}
and $A>0$.
\end{lemma}
\proof
For the second line of (\ref{pois3}), separate the $n$ sum into sums over residue classes $a$ modulo $q$ and apply the usual Poisson summation formula to each sum. For the third line we keep aside the contribution of $l=0$, and for $l\neq 0$ we first compute the Mellin transform
\begin{align}
\label{mel} \int_0^\infty \hat{\Phi}(y) y^{s-1} dy = \int_0^\infty \int_{-\infty}^\infty \Phi(x) e(-yx) y^{s-1} dx dy =\int_{-\infty}^\infty \Phi(x) (-2\pi x)^{-s} H_2(s) dx.
\end{align}
This follows by swapping the order of integration, which we can do by the compact support of $\Phi$, and then using the Mellin transform
\begin{align*}
\int_0^\infty e^{iy} y^{s-1} dy = H_2(s)
\end{align*}
which holds for $0<\Re(s)<1$. But since $\hat{\Phi}(y)\ll (1+|y|)^{-B}$ for any $B\ge 0$ by integration by parts, we have that $\int_0^\infty \hat{\Phi}(y) y^{s-1} dy $ converges absolutely for $\Re(s)>0$. Thus the Mellin transform given in (\ref{mel}) analytically continues to $\Re(s)>0$, and by the Mellin inversion theorem we have
\begin{align*}
\hat{\Phi}\Big(\frac{lN}{q}\Big)= \frac{1}{2\pi i} \int_{(A)} \Big(\frac{lN}{q}\Big)^{-s} \int_{-\infty}^\infty \Phi(x) (-2\pi x )^{-s} H_2(s) dx ds
\end{align*}
for any $A>0$.
\endproof
\subsection{An average of the $J$-Bessel function} The following result can be found in \cite[Corollary 8.2]{ils}.
\begin{lemma} \label{javg}
Let $x>0$ and let $h$ be a smooth function compactly supported on the positive reals and possessing bounded derivatives. We have
\begin{align}
\label{avgmaint} \frac{1}{K} \sum_{k \equiv 0 \bmod 2} 2i^k
h\Big(\frac{k-1}{K}\Big)
J_{k-1}(x)
= -\frac{1}{\sqrt{x}} \Im \Big(e^{-2\pi i /8} e^{ix}
\hbar\Big(\frac{K^2}{2x}\Big) \Big) +
O\Big(\frac{x}{K^5}\int_{-\infty}^{\infty} v^4|\hat{h}(v)|dv\Big),
\end{align}
where for real $v$,
\begin{align*}
\hbar(v):=\int_{0}^{\infty} \frac{h(\sqrt{u})}{\sqrt{2\pi u}}
e^{iuv} du
\end{align*}
and $\hat{h}$ denotes the Fourier transform of $h$.
The implied constant is absolute.
\end{lemma}
\noindent By integrating by parts
several times we get that $\hbar(v)\ll |v|^{-B}$
for any $B\ge 0$. Thus the main
term of (\ref{avgmaint}) is not dominant if $x < K^{2-\epsilon}$.
For future use, define for any complex number $s$ the more general function
\begin{align*}
\hbar_{s}(v):=\int_{0}^{\infty}
\frac{h(\sqrt{u})}{\sqrt{2\pi u}} u^{s/2} e^{iuv} du.
\end{align*}
Integrating by parts, we get
\begin{align}
\label{hbar} \hbar_{s}^{(j)}(v)\ll_{\Re(s)} (1+|s|)^B |v|^{-B}
\end{align}
for any $B\ge 0$. Thus the Mellin transform
\begin{align*}
\tilde{\hbar}_{s}(w)= \int_0^\infty \hbar_{s}(v) v^{w-1} dv
\end{align*}
is holomorphic in the half plane $\Re(w)>0$, and we have by integrating by parts $j$ times:
\begin{align*}
\tilde{\hbar}_{s}(w) \ll_{\Re(s)} (1+|s|)^{j+\Re(w)+1} (1+|w|)^{-j}.
\end{align*}
\section{Hecke relations}
Define
\begin{align*}
\mathop{{\sum}^{P}}_{f\in H_k} \gamma_f := \sum_{f\in H_k}
\Big(\frac{2\pi^2}{(k-1)L(\mathrm{1, sym}^2 f )}\Big) \gamma_f
\end{align*}
for any complex numbers $\gamma_f$ depending on $f$. The average $\mathop{{\sum}^{P}}$ arises in the Petersson trace formula \cite[Proposition 14.5]{iwakow}:
\begin{align*}
\mathop{{\sum}^{P}}_{f\in H_k} \xi_fbda_{f}(n) \xi_fbda_f(m) =
\delta_{m,n}+2\pi i^k \sum_{c=1}^{\infty}
\frac{S(n,m,c)}{c}J_{k-1}\Big(\frac{4\pi\sqrt{mn}}{c}\Big),
\end{align*}
where the value of $\delta_{m,n}$ is $1$ if $m=n$ and $0$ otherwise,
$S(n,m,c)$ is the Kloosterman sum, and $J_{k-1}(x)$ is the $J$-Bessel
function.
The following lemma explains how we will group together variables in the fifth moment.
\begin{lemma} \label{reduce}
To prove the main theorem, it suffices to prove that for any smooth functions $h, U_1,U_2,U_3$ compactly supported on $(\half,\frac{5}{2})$ with bounded derivatives, and any
\begin{align*}
\alpha,\beta,\beta_1,\beta_2\ge 1, \ \ 1\le N_1,N_2,N_3 < K^{1+\epsilon},
\end{align*}
with
\begin{align}
\label{condition} N_3\ge N_2, \ \ N_1N_2<\frac{K^{2+\epsilon}}{\alpha}, \ \ \beta\ge \alpha,
\end{align}
we have
\begin{align}
\label{hrel}
\frac{1}{K} \sum_{k \equiv 0 \bmod 2} h\Big(\frac{k-1}{K}\Big) \mathop{{\sum}^{P}}_{f \in H_k} S_f \ll \sqrt{\alpha} K^{2\theta+\epsilon},
\end{align}
where
\begin{multline*}
S_f :=\\\sum_{n_1,n_2,n_3,m\ge 1} \frac{\xi_fbda_f(n_1 n_2 m \alpha ) \xi_fbda_f(n_3) \tau(m) }{ \sqrt{n_1 n_2 n_3m }}
W_k(n_1\beta_1)W_k(n_2\beta_2)W_k(n_3) V_k(m\beta) U_1\Big(\frac{n_1 }{N_1}\Big)U_2\Big(\frac{ n_2 }{N_2}\Big)U_3\Big(\frac{n_3}{N_3}\Big).
\end{multline*}
\end{lemma}
\proof
To prove the main theorem, it suffices to prove that
\begin{align*}
\frac{1}{K} \sum_{k \equiv 0 \bmod 2} h\Big(\frac{k-1}{K}\Big) \mathop{{\sum}^{P}}_{f \in H_k} L(\thalf,f)^5 \ll K^{2\theta+\epsilon},
\end{align*}
because we have $L(\half,f)\ge 0$ by \cite{kohzag} and $k^{-\epsilon}<L(\mathrm{1, sym}^2 f )<k^{\epsilon}$ by \cite[Appendix]{golhoflie}.
We claim that
\begin{align}
\label{claim} L(\thalf,f)^5 &= 8 \Big( \sum_{n \ge 1} \frac{\xi_fbda_f(n)}{\sqrt{n}} W_k(n) \Big)^3 L(\thalf,f)^2.
\end{align}
This holds by (\ref{afe2}) when $k\equiv 0 \bmod 4$. But when $k\equiv 2 \bmod 4$, it also holds because then $L(\half,f)=0$ by the functional equation (\ref{fe}), so both sides of (\ref{claim}) vanish. Now we can insert the approximate functional equation for $L(\thalf,f)^2$ given in (\ref{afe1}) to get that
\begin{align*}
L(\thalf,f)^5 &= 16 \Big( \sum_{n \ge 1} \frac{\xi_fbda_f(n)}{\sqrt{n}} W_k(n) \Big)^3 \Big(\sum_{m \ge 1} \frac{\xi_fbda_f(m )\tau(m )}{\sqrt{m}} V_k(m) \Big).
\end{align*}
Expanding the cube and working in dyadic intervals, to establish the main theorem it suffices to prove that
\begin{align*}
\frac{1}{K} \sum_{k \equiv 0 \bmod 2} h\Big(\frac{k-1}{K}\Big) \mathop{{\sum}^{P}}_{f \in H_k} S_1 \ll K^{2\theta+\epsilon},
\end{align*}
where
\begin{align*}
S_1:= \prod_{i=1}^3\Big( \sum_{n_i \ge 1} \frac{\xi_fbda_f(n_i)}{\sqrt{n_i}} W_i\Big(\frac{n_i}{N_i}\Big) \Big) \Big(\sum_{m \ge 1} \frac{\xi_fbda_f(m )\tau(m )}{\sqrt{m}} V_k(m) \Big)
\end{align*}
for
\begin{align*}
W_i(n_i):=W_k(n_i)U_i\Big(\frac{n_i}{N_i}\Big)
\end{align*}
and $1\le N_1,N_2,N_3<K^{1+\epsilon}$. By symmetry, we can suppose that $N_3\ge N_2$. By Hecke multiplicativity, we have
\begin{align*}
\xi_fbda_f(m)\xi_fbda_f(n_1)=\sum_{d|(m,n_1)} \xi_fbda_f\Big(\frac{mn_1}{d^2}\Big),
\end{align*}
so replacing $m$ by $md$ and $n_1$ by $n_1d$, we get
\begin{align*}
S_1= \sum_{n_1,n_2,n_3,m,d\ge 1} \frac{\xi_fbda_f(mn_1)\xi_fbda_f(n_2)\xi_fbda_f(n_3)\tau(md)}{d\sqrt{n_1n_2n_3m}} W_1\Big(\frac{n_1d}{N_1}\Big)W_2\Big(\frac{n_2}{N_2}\Big)W_3\Big(\frac{n_3}{N_3}\Big)V_k(md).
\end{align*}
Now we combine
\begin{align*}
\xi_fbda_f(mn_1) \xi_fbda_f(n_2) =\sum_{b|(mn_1,n_2)} \xi_fbda_f\Big(\frac{mn_1n_2}{b^2}\Big)=\sum_{\substack{ n_2= b_1b \\ b|mn_1}} \xi_fbda_f\Big(\frac{mn_1b_1}{b}\Big).
\end{align*}
Ordering by the gcd of $n_1$ and $b$, we have the disjoint union
\begin{align}
\label{proc1} \{ n_1,m : b |n_1m \} = \bigsqcup_{\substack{ b=b_2 b' \\ (b,n_1)=b_2 }} \{ n_1,m : b |n_1m\} = \bigsqcup_{\substack{ b=b_2b' }} \Big\{ n_1,m : b_2 | n_1, b' |m, \Big(\frac{n_1}{b_2},b'\Big)=1 \Big\},
\end{align}
and $(\frac{n_1}{b_2},b')=1$ can be detected using the Mobius function:
\begin{align}
\label{proc2} \sum_{\substack{b'=b_3b_4\\b_3|\frac{n_1}{b_2}}} \mu(b_3) = \begin{cases}
1 &\text{ if } (\frac{n_1}{b_2},b')=1\\
0 &\text{otherwise}.
\end{cases}
\end{align}
Thus replacing $b$ by $b_2b_3b_4$, $n_2$ by $b_1b_2b_3b_4$, $n_1$ by $n_1b_2b_3$, and $m$ by $mb_3b_4$, we get
\begin{multline*}
S_1= \sum_{\substack{n_1,b_1,n_3,m\ge 1\\ b_2,b_3,b_4,d\ge 1}} \frac{\xi_fbda_f(mn_1 b_1 b_3) \xi_fbda_f(n_3)\tau(mb_3b_4d) \mu(b_3) }{d b_2 b_3^\frac32 b_4 \sqrt{n_1b_1 n_3m}} \\W_1\Big(\frac{n_1 b_2 b_3d}{N_1}\Big)W_2\Big(\frac{b_1b_2b_3b_4}{N_2}\Big)W_3\Big(\frac{n_3}{N_3}\Big)V_k(mb_3b_4d).
\end{multline*}
Splitting the divisor function
\begin{align*}
\tau(mb_3b_4d)=\sum_{r|(m,b_3b_4d)} \mu(r) \tau\Big(\frac{m}{r}\Big) \tau\Big(\frac{b_3b_4 d}{r}\Big),
\end{align*}
replacing $m$ by $mr$, and renaming $b_1$ to $n_2$, we have
\begin{multline*}
S_1= \sum_{\substack{n_1,n_2,n_3,m\ge 1 \\ b_2,b_3,b_4,d\ge 1\\ r| b_3b_4 d }} \frac{\xi_fbda_f(mn_1 n_2 b_3 r) \xi_fbda_f(n_3)\tau(m)\tau(\frac{b_3b_4 d}{r}) \mu(b_3) \mu(r) }{d b_2 b_3^\frac32 b_4 \sqrt{n_1 n_2 n_3m r}} \\
W_1\Big(\frac{n_1 b_2 b_3d}{N_1}\Big)W_2\Big(\frac{ n_2 b_2b_3b_4}{N_2}\Big)W_3\Big(\frac{n_3}{N_3}\Big)V_k(mb_3b_4dr).
\end{multline*}
We plan to find cancellation in the sum over $n_1,n_2,n_3,m$ and to sum trivially over the remaining parameters $b_2,b_3,b_4,r,d$. Thus it suffices to prove that
\begin{align}
\label{need0} \sum_{\substack{b_2,b_3,b_4,d\ge 1 \\ r| b_3b_4 d }} \frac{1}{d b_2 b_3^\frac32 b_4 \sqrt{r}} \Bigg|\frac{1}{K} \sum_{k \equiv 0 \bmod 2} h\Big(\frac{k-1}{K}\Big) \mathop{{\sum}^{P}}_{f \in H_k} S_2 \Bigg| \ll K^{2\theta+\epsilon},
\end{align}
where
\begin{align*}
S_2:=\sum_{n_1,n_2,n_3,m\ge 1} \frac{\xi_fbda_f(mn_1 n_2 b_3 r) \xi_fbda_f(n_3)\tau(m) }{ \sqrt{n_1 n_2 n_3m }}
W_1\Big(\frac{n_1 b_2 b_3d}{N_1}\Big)W_2\Big(\frac{ n_2 b_2b_3b_4}{N_2}\Big)W_3\Big(\frac{n_3}{N_3}\Big)V_k(mb_3b_4dr).
\end{align*}
For (\ref{need0}) it suffices to show that
\begin{align*}
\frac{1}{K} \sum_{k \equiv 0 \bmod 2} h\Big(\frac{k-1}{K}\Big) \mathop{{\sum}^{P}}_{f \in H_k} S_2\ll K^{2\theta+\epsilon} \sqrt{b_3 r}.
\end{align*}
This is given by (\ref{hrel}), once in $S_f$ we replace $N_1$ by $N_1/b_2b_3d$ and $N_2$ by $N_2/b_2b_3b_4$, and take $\alpha=b_3 r$, $\beta=b_3b_4 d r$, $\beta_1= b_2b_3d$, $\beta_2= b_2b_3b_4$. Note that these substitutions lead to a smaller value of $N_2$, so that $N_3\ge N_2$ still holds. Since $\beta_1\beta_2\ge b_3(b_3b_4r)\ge \alpha$, we have $N_1N_2<K^{2+\epsilon}/\alpha$. Also note that $\beta\ge \alpha$.
\endproof
\section{Application of the trace formula}
Applying the Petersson trace formula to Lemma \ref{reduce}, we need to prove that
\begin{align*}
D +OD< \sqrt{\alpha}K^{2\theta+\epsilon},
\end{align*}
where the diagonal
\begin{multline*}
D:=\\ \sum_{\substack{n_1,n_2,n_3, m \ge 1\\ n_3=n_1n_2m\alpha}} \frac{\xi_fbda_f(n_1 n_2 m \alpha ) \xi_fbda_f(n_3) \tau(m) }{ \sqrt{n_1 n_2 n_3m }}
U_1\Big(\frac{n_1 }{N_1}\Big)U_2\Big(\frac{ n_2 }{N_2}\Big)U_3\Big(\frac{n_3}{N_3}\Big)W_k(n_1\beta_1)W_k(n_2\beta_2)W_k(n_3)V_k(m\beta)
\end{multline*}
trivially satisfies the required bound, and the off-diagonal is
\begin{multline*}
OD:= \sum_{\substack{n_1,n_2,n_3, m,c \ge 1\\ }} \frac{S(n_1 n_2 m \alpha , n_3 ,c) \tau(m) }{c \sqrt{n_1 n_2 n_3m }} U_1\Big(\frac{n_1 }{N_1}\Big)U_2\Big(\frac{ n_2 }{N_2}\Big)U_3\Big(\frac{n_3}{N_3}\Big) \\
\frac{1}{K} \sum_{k \equiv 0 \bmod 2} h\Big(\frac{k-1}{K}\Big) 2\pi i^k J_{k-1}\Big(\frac{4\pi \sqrt{n_1n_2n_3 m \alpha}}{c} \Big) W_k(n_1\beta_1) W_k(n_2\beta_2) W_k(n_3)V_k(m\beta).
\end{multline*}
At this point, we cannot absorb the $W_k$ functions into the arbitrary weight functions $U_i$ because $W_k$ depends on $k$ and we still need to average over $k$, which is what we do next. Applying Lemma \ref{javg}, the contribution of its error term is bounded by
\begin{align*}
\frac{1}{K^{5-\epsilon}} \sum_{\substack{n_1n_2<K^{2+\epsilon}/\alpha \\ n_2<K^{1+\epsilon}\\ m<K^{2+\epsilon} }} \ \sum_{c\ge 1} \ \frac{|S(n_1 n_2 m \alpha , n_3 ,c)| }{c \sqrt{n_1 n_2 n_3m }} \frac{ \sqrt{n_1 n_2 n_3m \alpha}}{c} \ll K^\epsilon,
\end{align*}
on using Weil's bound for the Kloosterman sum. Thus we need only consider the main term of Lemma \ref{javg}, and it suffices to prove
\begin{align*}
OD_1 :=\sum_{\substack{n_1,n_2,n_3, m,c \ge 1\\ }}& \frac{S(n_1 n_2 m \alpha , n_3 ,c) \tau(m) }{\sqrt{c} (n_1 n_2 n_3m)^\frac34 } e\Big(\frac{2\pi \sqrt{n_1n_2n_3 m \alpha}}{c} \Big) \\ &\Psi_K\Big(n_1\beta_1,n_2\beta_2,n_3, m\beta , \frac{K^2 c}{8\pi \sqrt{n_1n_2n_3 m \alpha}}\Big)\prod_{j=1}^{3} U_j\Big(\frac{n_j}{N_j}\Big) \ll \alpha^\frac34 K^{2\theta+\epsilon},
\end{align*}
where
\begin{multline*}
\Psi_K(x_1,x_2,x_3, x_4, v) := \frac{1}{(2\pi i)^4} \int_0^\infty \frac{h(\sqrt{u})}{\sqrt{2\pi u}} \int_{(A_4)} \frac{ \zeta(1+2s_4) \mathcal{G}(s_4) }{ x_4^{s_4}} \frac{\Gamma_{\mathbb{R}}ma^2\Big(\frac{\sqrt{u}}{2}K+s_4+\frac{1}{2}\Big)}{\Gamma_{\mathbb{R}}ma^2\Big(\frac{\sqrt{u}}{2}K+\frac{1}{2}\Big)} \\
\prod_{j=1}^{3} \int_{(A_j)} \frac{e^{s_j^2} }{x_j^{s_j} }
\frac{\Gamma_{\mathbb{R}}ma\Big(\frac{\sqrt{u}}{2}K+s_j+\frac{1}{2}\Big)}{\Gamma_{\mathbb{R}}ma\Big(\frac{\sqrt{u}}{2}K+\frac{1}{2}\Big)} \frac{ds_j}{s_j}\frac{ds_4}{s_4} e^{iuv} du.
\end{multline*}
By the rapid decay of the $s_1,s_2,s_3,s_4$ integrands in vertical lines, we may effectively truncate the integrals to $|\Im s_1|,|\Im s_2|,|\Im s_3|,|\Im s_4|<K^\epsilon$. For $|s|<K^\epsilon$, by Stirling's approximation we have
\begin{align*}
\frac{\Gamma_{\mathbb{R}}ma\Big(\frac{\sqrt{u}}{2}K+s+\frac{1}{2}\Big)}{\Gamma_{\mathbb{R}}ma\Big(\frac{\sqrt{u}}{2}K+\frac{1}{2}\Big)} = \Big(\frac{\sqrt{u}}{2} K \Big)^s\Big( 1 + \frac{P(s)}{\sqrt{u}K}+ O\Big(\frac{1}{K^{2-\epsilon}}\Big)\Big)
\end{align*}
for some polynomial $P$. Thus
\begin{align}
\label{wstir} \Psi_K(x_1,x_2,x_3,x_4,v) = \Psi\Big(\frac{x_1}{K},\frac{x_2}{K},\frac{x_3}{K},\frac{x_4}{K^2},v\Big) + \frac{1}{K} \Psi_0\Big(\frac{x_1}{K},\frac{x_2}{K},\frac{x_3}{K},\frac{x_4}{K^2},v\Big) + O(K^{-2+\epsilon}),
\end{align}
where for $\xi_i>0$ and real $v$ we define
\begin{multline}
\label{wdef} \Psi(\xi_1,\xi_2,\xi_3,\xi_4,v) \\ := \frac{1}{(2\pi i)^4} \int_{(A_4)} \int_{(A_3)} \int_{(A_2)} \int_{(A_1)} \frac{e^{s_1^2+s_2^2+s_3^2} \zeta(1+2s_4) \mathcal{G}(s_4) }{ 2^{s_1+s_2+s_3} 4^{s_4} \xi_1^{s_1} \xi_2^{s_2} \xi_3^{s_3} \xi_4^{s_4}} \hbar_{s_1+s_2+s_3+2s_4}(v) \frac{ds_1}{s_1}\frac{ds_2}{s_2}\frac{ds_3}{s_3}\frac{ds_4}{s_4}
\end{multline}
and $W_0$ has the same definition except for the presence of an extra factor $\frac{P(s_1,s_2,s_3,s_4)}{\sqrt{u}}$ in the integrand for some polynomial $P$. It suffices to treat only the contribution of $\Psi$, as the treatment of the secondary term $\Psi_0$ will be similar. Thus we need to prove
\begin{align}
\label{defod2} OD_2 :=\sum_{\substack{n_1,n_2,n_3, m,c \ge 1\\ }}& \frac{S(n_1 n_2 m \alpha , n_3 ,c) \tau(m) }{\sqrt{c} (n_1 n_2 n_3m)^\frac34 } e\Big(\frac{2\pi \sqrt{n_1n_2n_3 m \alpha}}{c} \Big) \\
\nonumber &\Psi\Big(\frac{n_1\beta_1}{K},\frac{n_2\beta_2}{K},\frac{n_3}{K}, \frac{m\beta }{K^2}, \frac{K^2 c}{8\pi \sqrt{n_1n_2n_3 m \alpha}}\Big)\prod_{j=1}^{3} U_j\Big(\frac{n_j}{N_j}\Big) \ll \alpha^\frac34 K^{2\theta+\epsilon}.
\end{align}
By (\ref{hbar}) we may assume (up to negligible error) that
\begin{align}
\label{crange} c\ll \frac{\sqrt{n_1 n_2 n_3 m \alpha}}{K^{2-\epsilon}}.
\end{align}
By (\ref{vwbounds}) and (\ref{hbar}), we have that
\begin{align}
\label{wbound} \frac{\partial^{j_1}}{\partial \xi_1^{j_1} }\frac{\partial^{j_2}}{\partial \xi_2^{j_2} }\frac{\partial^{j_3}}{\partial \xi_3^{j_3} } \frac{\partial^{j_4}}{\partial \xi_4^{j_4} } \frac{\partial^{j}}{\partial v^{j} } \Psi( \xi_1, \xi_2, \xi_3, \xi_4, v) \ll K^\epsilon \xi_1^{-j_1-A_1} \xi_2^{-j_2-A_2} \xi_3^{-j_3-A_3} \xi_4^{-j_4-A_4} v^{-j-B}
\end{align}
for $\xi_1,\xi_2,\xi_3,\xi_4,v>0$, any integers $j_i,B\ge 0$ and any real $A_i>0$.
\section{Poisson summation and reciprocity}
In (\ref{defod2}), we sum over $n_3$ in residue classes mod $c$ and apply Poisson summation (Lemma \ref{poiss}), getting
\begin{multline}
\label{od2} OD_2 = \sum_{\substack{n_1,n_2,m,c \ge 1\\ -\infty < \ell < \infty} } \frac{N_3}{c} \sum_{a \bmod c} S(a,n_1 n_2 m \alpha,c) e\Big(\frac{a\ell}{c}\Big) \frac{\tau(m)}{ \sqrt{c} (n_1 n_2 N_3m)^\frac34 } U_1\Big(\frac{n_1}{N_2}\Big)U_2\Big(\frac{n_2}{N_2}\Big)\\ \int_{-\infty}^{\infty} e\Big(\frac{-\ell N_3 x}{c}\Big) e\Big(\frac{2\sqrt{x n_1n_2N_3 m \alpha}}{c}\Big) \Psi\Big(\frac{n_1\beta_1}{K},\frac{n_2\beta_2}{K},\frac{xN_3}{K}, \frac{m\beta }{K^2}, \frac{K^2 c}{8\pi \sqrt{x n_1n_2N_3 m \alpha}}\Big) \frac{U_3(x)}{x^\frac34} dx.
\end{multline}
Call the integral above $I$. We will evaluate it using stationary phase approximation.
\begin{lemma} \label{stationary} We have that $I\ll K^{-1000}$ unless $|\ell|\le K^{100}$, in which case
\begin{align*}
I &=\\
&\sqrt{\frac{2\ell c}{ n_1n_2 m \alpha}} e\Big(\frac{n_1 n_2m\alpha }{\ell c}- \frac{\pi}{8}\Big) \Psi \Big(\frac{n_1\beta_1}{N_1},\frac{n_2\beta_2}{N_2},
\frac{ n_1n_2m\alpha}{\ell^2 K}, \frac{m\beta }{K^2}, \frac{K^2 \ell c}{8\pi n_1n_2m \alpha}\Big) U_3\Big(\frac{n_1 n_2 m \alpha }{\ell ^2 N_3}\Big) \Big(\frac{n_1 n_2 m \alpha }{\ell ^2 N_3}\Big)^{\frac14} \\
&+O(K^{-1000}) + O\Big( \frac{1}{K^{3-\epsilon}} \delta_{|\ell| \asymp \frac{\sqrt{n_1n_2m\alpha} }{\sqrt{N_3} }} \Big),
\end{align*}
with the understanding that the main term vanishes if $\ell =0$. The delta function $\delta_{\text{P}}$ equals 1 if the statement P holds and 0 otherwise.
\end{lemma}
\proof
Suppose first that $\ell=0$. Then integrate by parts $j$ times the integral $I$ given in (\ref{od2}). Here we repeatedly integrate $e(\frac{2\sqrt{x n_1n_2N_3 m\alpha}}{c})$ after substituting $y=\sqrt{x}$ and differentiate the rest of the integrand. Using (\ref{wbound}) and (\ref{crange}) we have that
\begin{align*}
I\ll K^\epsilon \Big( \frac{ \sqrt{n_1n_2N_3 m \alpha}}{c} \Big)^{-j} \ll K^\epsilon \Big(\frac{1}{K^{2-\epsilon}}\Big)^j.
\end{align*}
Taking $j$ large enough, this is $O(K^{-1000})$.
Now suppose that $\ell \neq 0$ and $|\ell|> K^{100}$. We integrate by parts $j$ times the integral $I$ given in (\ref{od2}). This time we repeatedly integrate $e(\frac{-\ell N_3 x}{c}) $ and differentiate the rest of the integrand. We again get that $I\ll K^{-1000}$.
Henceforth assume $\ell \neq 0$ and $|\ell |\le K^{100}$. Making the substitution
\begin{align*}
y=\frac{\ell ^2 N_3}{n_1 n_2 m \alpha}x,
\end{align*}
the integral is
\begin{multline*}
I= \int_{-\infty}^{\infty} e\Big(\frac{n_1n_2m\alpha }{\ell c}(2\sqrt{y}-y)\Big) \Psi\Big(\frac{n_1\beta_1}{K},\frac{n_2\beta_2}{K},
\frac{y n_1n_2m\alpha}{\ell^2 K}, \frac{m\beta }{K^2}, \frac{K^2 \ell c}{8\pi \sqrt{y} n_1n_2 m \alpha}\Big)\\
\Big(\frac{n_1 n_2 m \alpha}{\ell ^2 N_3} \Big)^{\frac14} U_3\Big(\frac{n_1 n_2 m \alpha }{\ell ^2 N_3}y\Big) \frac{dy}{y^{\frac{3}{4}}}.
\end{multline*}
Define $U_0(x)=x^{\frac14} U_3(x)$, so that
\begin{align}
I\label{idef} = \int_{-\infty}^{\infty} e\Big(\frac{n_1n_2m\alpha }{\ell c}(2\sqrt{y}-y)\Big) \Psi\Big(\frac{n_1\beta_1}{K},\frac{n_2\beta_2}{K},
\frac{y n_1n_2m\alpha}{\ell^2 K}, \frac{m\beta }{K^2}, \frac{K^2 \ell c}{8\pi \sqrt{y} n_1n_2 m \alpha}\Big) U_0\Big(\frac{n_1 n_2 m \alpha }{\ell ^2 N_3}y\Big) \frac{dy}{y}.
\end{align}
The stationary point occurs at $y=1$. Let $\Omega(y)$ be a smooth function with bounded derivatives which is equal to 1 on $(1/2, 3/2)$ and 0 on $(-\infty,1/4)\cup(2,\infty)$. We write
\begin{align*}
I=I_1+I_2,
\end{align*}
where $I_1$ is defined as in (\ref{idef}) except that its integrand has an extra factor $1-\Omega(y)$, and $I_2$ is defined as in (\ref{idef}) except that its integrand has an extra factor $\Omega(y)$.
We first show that
\begin{align*}
I_1\ll K^{-1000}.
\end{align*}
For this we will use \cite[lemma 8.1]{bky} with
\begin{align*}
&h(y)= \frac{2\pi n_1n_2m\alpha }{\ell c}(2\sqrt{y}-y), \\
&w(y) = \frac{1-\Omega(y)}{y} \Psi \Big(\frac{n_1\beta_1}{K},\frac{n_2\beta_2}{K},
\frac{y n_1n_2m\alpha}{\ell^2 K}, \frac{m\beta }{K^2}, \frac{K^2 \ell c}{8\pi \sqrt{y} n_1n_2 m \alpha}\Big) U_0\Big(\frac{n_1 n_2 m \alpha }{\ell ^2 N_3}y\Big).
\end{align*}
The parameters in this lemma are
\begin{align*}
R= \frac{n_1 n_2 m \alpha}{|\ell | c} \Big( \Big(\frac{n_1 n_2 m\alpha }{\ell ^2 N_3}\Big)^\half +1\Big), \ \ U=Q= \frac{\ell ^2 N_3}{n_1 n_2 m \alpha}, \ \ Y= \frac{n_1 n_2 m\alpha }{|\ell | c} \Big( \frac{\ell ^2 N_3}{n_1 n_2 m \alpha}\Big)^\half.
\end{align*}
This is because
\begin{align*}
h'(y)=\frac{n_1n_2 m \alpha}{\ell c}\Big( \frac{1}{\sqrt{y}} -1 \Big), \ \ h^{(j)}(y) \ll \frac{n_1n_2m\alpha}{|\ell | c} y^\half y^{-j}
\end{align*}
for $j\ge 2$, and we can assume that $|y-1|\gg 1$ by the support of $1-\Omega(y)$ ,and that $y\asymp \frac{\ell ^2 N_3}{n_1 n_2 m \alpha }$ by the support of $U_0$. Further, by (\ref{wbound}), we have
\begin{align*}
w^{(j)}(y)\ll \frac{K^\epsilon}{y^{j+1}}.
\end{align*}
We don't need to specify the remaining parameters $\alpha, \beta, X$ given in \cite[lemma 8.1]{bky}, apart from noting that they are bounded by some power of $K$. The result of the lemma is
\begin{align*}
I_1\ll (\beta-\alpha)X( (QR/\sqrt{Y} )^{-A}+(RU)^{-A})
\end{align*}
for any $A\ge 0$. Thus it suffices to show that $QR/\sqrt{Y}>K^\epsilon$ and $RU>K^\epsilon$, and then to take $A$ large enough.
{\it Case 1.} Suppose that $ \frac{\ell ^2 N_3}{n_1 n_2 m \alpha}\ge 1$. Then $R\gg \frac{n_1 n_2 m \alpha}{|\ell | c}$ and so
\begin{align*}
&QR/\sqrt{Y} \gg \Big(\frac{n_1 n_2 m \alpha}{|\ell | c}\Big)^\half \Big( \frac{\ell ^2 N_3}{n_1 n_2 m \alpha} \Big)^\frac34 \gg \Big(\frac{n_1 n_2 m \alpha}{|\ell | c}\Big)^\half \Big( \frac{\ell ^2 N_3}{n_1 n_2 m \alpha} \Big)^\frac14 = \Big(\frac{
\sqrt{n_1 n_2 N_3 m \alpha}}{c} \Big)^\half,\\
&RU \gg \frac{n_1 n_2 m \alpha}{|\ell | c} \Big( \frac{\ell ^2 N_3}{n_1 n_2 m \alpha} \Big) \gg \frac{n_1 n_2 m \alpha}{|\ell | c} \Big( \frac{\ell ^2 N_3}{n_1 n_2 m \alpha} \Big) ^\frac12 = \frac{\sqrt{n_1 n_2 N_3 m \alpha}}{c}.
\end{align*}
By (\ref{crange}), we have $\frac{\sqrt{n_1 n_2 N_3 m \alpha}}{c} \gg K^{2-\epsilon}$.
{\it Case 2.} Suppose that $ \frac{\ell ^2 N_3}{n_1 n_2 m \alpha}< 1$. Then $R\gg \frac{n_1n_2 m \alpha}{|\ell | c} (\frac{n_1n_2 m \alpha}{\ell ^2 N_3})^\half$ and so
\begin{align*}
&QR/\sqrt{Y} \gg \Big(\frac{n_1 n_2 m \alpha}{|\ell | c}\Big)^\half \Big( \frac{\ell ^2 N_3}{n_1 n_2 m \alpha} \Big)^\frac14 = \Big(\frac{
\sqrt{n_1 n_2 N_3 m \alpha}}{c} \Big)^\half,\\
&RU \gg \frac{n_1 n_2 m \alpha}{|\ell | c} \Big( \frac{\ell ^2 N_3}{n_1 n_2 m \alpha} \Big) ^\frac12 = \frac{\sqrt{n_1 n_2 N_3 m \alpha}}{c},
\end{align*}
and the conclusion is the same.
Now consider $I_2$. We have
\begin{multline*}
I_2 = \int_{-\infty}^{\infty} e\Big(\frac{n_1n_2m\alpha }{\ell c}(2\sqrt{y}-y)\Big) U\Big(\frac{n_1\beta_1}{N_1},\frac{n_2\beta_2 }{N_2},
\frac{y n_1n_2m\alpha}{\ell^2 N_3}, \frac{m\beta }{K^2}, \frac{K^2 \ell c}{8\pi \sqrt{y} n_1n_2 m \alpha}\Big) U_0\Big(\frac{n_1 n_2 m \alpha }{\ell ^2 N_3}y\Big) \Omega(y) \frac{dy}{y}.
\end{multline*}
We apply \cite[Proposition 8.2]{bky}, with
\begin{align*}
&h(y)= \frac{2\pi n_1n_2m\alpha }{\ell c}(2\sqrt{y}-y), \\
&w(y) = \frac{\Omega(y)}{y} \Psi \Big(\frac{n_1\beta_1}{K},\frac{n_2\beta_2}{K},
\frac{y n_1n_2m\alpha}{\ell^2 K}, \frac{m\beta }{K^2}, \frac{K^2 \ell c}{8\pi \sqrt{y} n_1n_2 m \alpha}\Big) U_0\Big(\frac{n_1 n_2 m \alpha }{\ell ^2 N_3}y\Big),\\
&X=K^\epsilon, V=V_1=Q=1, Y= \frac{n_1 n_2 m\alpha}{|\ell |c}\asymp \frac{ \sqrt{n_1 n_2 N_3 m \alpha}}{c}.
\end{align*}
The approximation to $Y$ is given by $\ell^2 \asymp \frac{n_1 n_2 m \alpha y}{N_3}$ and $y\asymp 1$ by the support of $U_0$ and $\Omega$ respectively. By (\ref{crange}), we have that $Y\gg K^{2-\epsilon}$. Thus the conditions \cite[line (8.7)]{bky} are satisfied for $\delta=1/5$ say, and we get (we have a factor of $e(-1/8)$ instead of $e(1/8)$ because the second derivative of $h$ is negative)
\begin{align*}
I_2 = \sqrt{\frac{2\ell c}{ n_1 n_2 m\alpha }} e\Big(\frac{n_1 n_2 m \alpha}{\ell c}-\frac{\pi}{8}\Big) \Psi \Big(\frac{n_1\beta_1}{K},\frac{n_2\beta_2}{K},
\frac{ n_1n_2m\alpha}{\ell^2 K}, \frac{m\beta }{K^2}, \frac{K^2 \ell c}{8\pi n_1n_2 m \alpha}\Big) U_0\Big(\frac{n_1 n_2 m \alpha }{\ell ^2 N_3}\Big) +\text{error},
\end{align*}
where
\begin{align*}
\text{error}=O\Big( \frac{1}{ \sqrt{Y}} \sum_{1\le n\le 10^6} p_n(1) + K^{-100}\Big),
\end{align*}
where
\begin{align*}
p_n(1)=\frac{1}{n!} \frac{|G^{(2n)}(1)| }{Y^n} , \ \ \ G(t)=w(t)e(H(t)), \ \ \ H(t) = h(t)-h(1)-\tfrac12 h''(1)(t-1)^2.
\end{align*}
Note that $H(1)=H'(1)=H''(1)=0$, and so $G^{(2n)}(1)\ll Y^{^{\lfloor \frac{2n}{3} \rfloor}} \ll Y^{n-1}$. Thus
\begin{align*}
\text{error}= O\Big(Y^{-3/2} \delta_{|\ell| \asymp \frac{\sqrt{n_1 n_2 m\alpha} }{\sqrt{N_3} }}\Big) = O\Big( \frac{1}{K^{3-\epsilon}} \delta_{|\ell| \asymp \frac{\sqrt{n_1 n_2 m\alpha} }{\sqrt{N_3} }}\Big).
\end{align*}
\endproof
Now we are ready to return to (\ref{od2}). We evaluate the $a$-sum there as
\begin{align*}
\sum_{a \bmod c} S(a,n_1n_2m\alpha, c)e\Big(\frac{a\ell}{c}\Big) =
\begin{cases}
c e(\frac{-n_1n_2m\alpha\overline{\ell}}{c}) &\text{ if } (\ell,c)=1\\
0 &\text{ otherwise},
\end{cases}
\end{align*}
and then apply Lemma \ref{stationary} for the integral. The error term of this lemma contributes, using (\ref{condition}) and (\ref{crange}), at most
\begin{align*}
\frac{1}{K^{3-\epsilon}} \sum_{\substack{n_1\asymp N_1 ,n_2\asymp N_2 \\ m<K^{2+\epsilon}/\beta \\ c< \sqrt{N_1N_2N_3m \alpha}/K^{2-\epsilon} \\ |\ell|\asymp \sqrt{n_1 n_2 m\alpha / N_3} }} \frac{ N_3^\frac14}{ c^\half (n_1 n_2 m)^\frac34 } \ll K^\epsilon.
\end{align*}
Thus we only need to consider the contribution of the main term. It suffices to prove (we only treat the terms with $\ell >0$)
\begin{multline}
\label{reqod3} OD_3 := \sum_{\substack{n_1,n_2,m,c,\ell \ge 1} } e\Big(\frac{-n_1 n_2 m \alpha \overline{\ell }}{c}\Big) e\Big(\frac{n_1 n_2 m \alpha}{\ell c} \Big) \frac{\tau(m )}{ n_1 n_2 m } \\
\Psi \Big(\frac{n_1\beta_1}{K},\frac{n_2\beta_2}{K},
\frac{ n_1n_2m\alpha}{\ell^2 K}, \frac{m\beta }{K^2}, \frac{K^2 \ell c}{8\pi n_1n_2 m \alpha}\Big) U_1\Big(\frac{n_1 }{N_1}\Big) U_2\Big(\frac{n_2 }{N_2}\Big) U_3\Big(\frac{n_1 n_2 m \alpha }{\ell ^2 N_3}\Big) \ll \alpha K^{2\theta+\epsilon},
\end{multline}
where it is understood that the sum is restricted to $(\ell ,c)=1$. By the reciprocity relation for exponentials, we have
\begin{multline*}
OD_3 = \sum_{\substack{n_1,n_2,m,c,\ell \ge 1} } e\Big( \frac{n_1 n_2 m \alpha \overline{c }}{\ell} \Big) \frac{\tau(m )}{ n_1 n_2 m } \\
\Psi \Big(\frac{n_1\beta_1}{K},\frac{n_2\beta_2}{K},
\frac{ n_1n_2m\alpha}{\ell^2 K}, \frac{m\beta }{K^2}, \frac{K^2 \ell c}{8\pi n_1n_2 m \alpha}\Big) U_1\Big(\frac{n_1 }{N_1}\Big) U_2\Big(\frac{n_2 }{N_2}\Big) U_3\Big(\frac{n_1 n_2 m \alpha }{\ell ^2 N_3}\Big) .
\end{multline*}
\section{Voronoi summation and fake main terms}
The next goal is to perform Voronoi summation on $m$ but we cannot do so immediately because in the exponential $e(\frac{n_1 n_2 m \alpha \overline{c }}{\ell}) $, the integers $n_1n_2\alpha$ and $\ell $ may not be coprime. We first prepare by eliminating any common factors. Re-ordering the sum $OD_3$ by $b_1= (n_1,\ell )$, and replacing $n_1$ by $b_1 n_1$ and $\ell $ by $b_1 \ell $, we have
\begin{multline*}
OD_3 = \sum_{\substack{n_1,n_2,m,c,\ell \ge 1\\ b_1\ge1 \\ (n_1,\ell)=1}} e\Big( \frac{n_1 n_2 m \alpha \overline{c }}{\ell} \Big) \frac{\tau(m )}{ b_1 n_1 n_2 m } \\
\Psi \Big(\frac{b_1n_1\beta_1}{K},\frac{n_2\beta_2}{K}, \frac{ n_1n_2m\alpha}{b_1 \ell^2 K}, \frac{m\beta }{K^2}, \frac{K^2 \ell c}{8\pi n_1n_2 m \alpha}\Big) U_1\Big(\frac{b_1n_1 }{N_1}\Big) U_2\Big(\frac{b_2 n_2 }{N_2}\Big) U_3\Big(\frac{n_1 n_2 m \alpha }{b_1 \ell ^2 N_3}\Big).
\end{multline*}
Next we re-order the sum by $b_2= (n_2,\ell )$, and replace $n_2$ by $b_2 n_2$ and $\ell $ by $b_2 \ell $, then re-order the result by $b_3= (\alpha,\ell )$, and replace $\ell$ by $b_3 \ell$ and $\alpha$ by $b_3 \alpha$. In this way, the conditions (\ref{condition}) become
\begin{align}
\label{condition2} N_3\ge N_2, \ \ N_1N_2<\frac{K^{2+\epsilon}}{b_3 \alpha}, \ \ \ \beta\ge b_3\alpha,
\end{align}
and
\begin{multline}
\label{gcd} OD_3 = \sum_{\substack{n_1,n_2,m,c,\ell \ge 1\\ b_1,b_2\ge1,b_3|\alpha \\ (n_1n_2\alpha,\ell)=1}} e\Big( \frac{n_1 n_2 m \alpha \overline{c }}{\ell} \Big) \frac{\tau(m )}{ b_1 b_2 n_1 n_2 m } \\
\Psi \Big(\frac{b_1n_1\beta_1}{K},\frac{b_2 n_2\beta_2}{K}, \frac{ n_1n_2m\alpha}{b_1b_2b_3 \ell^2 K}, \frac{m\beta }{K^2}, \frac{K^2 \ell c}{8\pi n_1n_2 m \alpha}\Big) U_1\Big(\frac{b_1 n_1 }{N_1}\Big) U_2\Big(\frac{n_2 b_2 }{N_2}\Big) U_3\Big(\frac{n_1 n_2 m \alpha }{b_1 b_2 b_3\ell ^2 N_3}\Big),
\end{multline}
for which the required bound (\ref{reqod3}) becomes
\begin{align*}
OD_3\ll b_3 \alpha K^{2\theta+\epsilon}.
\end{align*}
Working in dyadic intervals of $m$ by taking a partition of unity, it suffices to show that
\begin{multline*}
OD_3 = \sum_{j} \sum_{\substack{n_1,n_2,m,c,\ell \ge 1\\ b_1,b_2\ge1,b_3|\alpha \\ (n_1n_2\alpha,\ell)=1}} e\Big( \frac{n_1 n_2 m \alpha \overline{c }}{\ell} \Big) \frac{\tau(m )}{ b_1 b_2 n_1 n_2 m } U_1\Big(\frac{b_1 n_1 }{N_1}\Big) U_2\Big(\frac{n_2 b_2 }{N_2}\Big) U_3\Big(\frac{n_1 n_2 m \alpha }{b_1 b_2 b_3\ell ^2 N_3}\Big) U_{4,j}\Big(\frac{m }{M_j}\Big)\\
\Psi \Big(\frac{b_1n_1\beta_1}{K},\frac{b_2 n_2\beta_2}{K}, \frac{ n_1n_2m\alpha}{b_1b_2b_3 \ell^2 K}, \frac{m\beta }{K^2}, \frac{K^2 \ell c}{8\pi n_1n_2 m \alpha}\Big) \ll b_3 \alpha K^{\theta+\epsilon}
\end{multline*}
for some smooth functions $U_{4,j}$ compactly supported on $(\half,\frac52)$, for $j\ll \log K$, and $M_j\asymp 2^j$. We now apply the Voronoi summation formula (Lemma \ref{vorsum}) to the $m$ sum, getting
\begin{align}
\label{od3def} OD_3:= FM + OD_4,
\end{align}
where the ``fake main term'' is
\begin{multline*}
FM := \int_{-\infty}^{\infty} \Big( \log \frac{x}{\ell ^2} +2\gamma \Big) \sum_{j} U_{4,j}\Big(\frac{x }{M_j}\Big) \sum_{\substack{n_1,n_2,c,\ell \ge 1\\ b_1,b_2\ge1,b_3|\alpha \\ (n_1n_2\alpha,\ell)=1}} \frac{1}{\ell b_1 b_2 n_1 n_2 } \\
\Psi \Big(\frac{b_1n_1\beta_1}{K},\frac{b_2 n_2\beta_2}{K}, \frac{ n_1n_2x\alpha}{b_1b_2b_3 \ell^2 K}, \frac{x\beta }{K^2}, \frac{K^2 \ell c}{8\pi n_1n_2 x \alpha}\Big) U_1\Big(\frac{b_1 n_1 }{N_1}\Big) U_2\Big(\frac{n_2 b_2 }{N_2}\Big) U_3\Big(\frac{n_1 n_2 x \alpha }{b_1 b_2 b_3\ell ^2 N_3}\Big) \frac{dx}{x},
\end{multline*}
and $OD_4$ is given in the next section. In the sum $FM$, we may re-patch the partition of unity and reverse the steps which led to (\ref{gcd}), getting that
\begin{multline*}
FM = \int_{-\infty}^{\infty} \Big( \log \frac{x}{\ell ^2} +2\gamma \Big) \sum_{\substack{n_1,n_2,c,\ell \ge 1} } \frac{1}{ \ell n_1 n_2 } \\
\Psi \Big(\frac{n_1\beta_1}{K},\frac{n_2\beta_2}{K},
\frac{ n_1n_2x\alpha}{\ell^2 K}, \frac{x\beta }{K^2}, \frac{K^2 \ell c}{8\pi n_1n_2 x \alpha}\Big) U_1\Big(\frac{n_1 }{N_1}\Big) U_2\Big(\frac{n_2 }{N_2}\Big) U_3\Big(\frac{n_1 n_2 x \alpha }{\ell ^2 N_3}\Big) \frac{dx}{x}.
\end{multline*}
The trivial bound for $M$ is $O(K^{\half+\epsilon})$, from the length of the $c$-sum given by (\ref{crange}). It seems like we cannot do better because there are no exponentials or other harmonics present which may produce further cancellation (hence the name ``fake main term''). However we can exploit our judicious choice of weight function in the approximate functional equation, as follows.
Making the substitution $y=\frac{x n_1 n_2 \alpha}{\ell ^2 N_3}$, we have
\begin{multline*}
FM = \int_{-\infty}^{\infty} \sum_{\substack{n_1,n_2,c,\ell \ge 1} } \Big( \log \frac{yN_3}{n_1n_2\alpha} +2\gamma \Big) \frac{1}{ \ell n_1 n_2 } \\
\Psi \Big(\frac{n_1\beta_1}{K},\frac{n_2\beta_2}{K},
\frac{ y N_3}{K}, \frac{y \ell^2 N_3 \beta }{K^2 n_1n_2\alpha}, \frac{K^2 c}{8\pi \ell N_3 y}\Big) U_1\Big(\frac{n_1 }{N_1}\Big) U_2\Big(\frac{n_2 }{N_2}\Big) U_3(y) \frac{dy}{y}.
\end{multline*}
It suffices to show that
\begin{align*}
FM':= \sum_{\substack{ c,\ell \ge 1} } \frac{1}{ \ell }
\Psi \Big(\frac{n_1\beta_1}{K},\frac{n_2\beta_2}{K},
\frac{ y N_3}{K}, \frac{y \ell^2 N_3 \beta }{K^2 n_1n_2\alpha}, \frac{K^2 c}{8\pi \ell N_3 y}\Big) \ll K^\epsilon
\end{align*}
for any $n_1\asymp N_1, n_2\asymp N_2, y\asymp 1$. Using (\ref{wdef}) and Mellin inversion, we have
\begin{multline*}
FM'= \frac{1}{(2\pi i)^5} \int_{(1+\epsilon)} \int_{(\half+\epsilon)} \int_{(\epsilon)} \int_{(\epsilon)} \int_{(\epsilon)} \frac{e^{s_1^2+s_2^2+s_3^2} \zeta(1+2s_4) \mathcal{G}(s_4) }{ 2^{s_1+s_2+s_3} 4^{s_4} } \zeta(1+2s_4-w) \zeta(w) \\
\Big(\frac{K}{n_1\beta_1}\Big)^{s_1}\Big(\frac{K}{n_2\beta_2}\Big)^{s_2} \Big(\frac{K}{yN_3}\Big)^{s_3} \Big( \frac{K^2 n_1n_2\alpha}{y N_3 \beta }\Big)^{s_4} \Big(\frac{8\pi N_3 y}{K^2 }\Big)^w
\tilde{\hbar}_{s_1+2s_2+2s_3}(w) \frac{ds_1}{s_1}\frac{ds_2}{s_2}\frac{ds_3}{s_3}\frac{ds_4}{s_4} dw.
\end{multline*}
Here $\zeta(1+2s_4-w)$ comes from the $\ell $-sum and $\zeta(w)$ comes from the $c$-sum. We must initially keep the lines of integration at $\Re(w)=1+\epsilon$ and $\Re(s_4)=\half+\epsilon$ in order to stay in the region of absolute convergence. The goal is to move all the lines of integration to $(\epsilon)$, and this would prove the claim.
We first move the $w$-integral to $\Re(w)=\epsilon$. This crosses a simple pole at $w=1$, with residue
\begin{multline*}
FM'':= \frac{1}{(2\pi i)^4 } \int_{(\half+\epsilon)} \int_{(\epsilon)} \int_{(\epsilon)} \int_{(\epsilon)} \frac{e^{s_1^2+s_2^2+s_3^2} \zeta(1+2s_4) \mathcal{G}(s_4) }{ 2^{s_1+s_2+s_3} 4^{s_4} } \zeta(2s_4) \\
\Big(\frac{K}{n_1\beta_1}\Big)^{s_1}\Big(\frac{K}{n_2\beta_2}\Big)^{s_2} \Big(\frac{K}{yN_3}\Big)^{s_3} \Big( \frac{K^2 n_1n_2\alpha}{y N_3 \beta }\Big)^{s_4} \Big(\frac{8\pi N_3 y}{K^2 }\Big)
\tilde{\hbar}_{s_1+2s_2+2s_3}(1) \frac{ds_1}{s_1}\frac{ds_2}{s_2}\frac{ds_3}{s_3}\frac{ds_4}{s_4} .
\end{multline*}
On the shifted integral at $\Re(w)=\epsilon$, which is not displayed, we may move the $s_4$ integral to $\Re(s_4)=\epsilon$ and then estimate (this does not cross any pole of $\zeta(1+2s_4-w)$ so this straight forward). Thus the shifted integral is $O(K^{\epsilon})$ and we are left to estimate $FM''$. In the integral $FM''$, we move the line of integration to $\Re(s_4)=\epsilon$. This does not cross any poles because the simple pole of $\zeta(2s_4)$ at $s_4=\half$ is cancelled out by the zero at $s_4=\half$ of $\mathcal{G}(s_4)$. See the definition (\ref{gdef}). Thus $FM''$ is $O(K^{-1+\epsilon})$.
\section{Second application of reciprocity}
We now return to (\ref{od3def}) and give the definition of $OD_4$ corresponding to the sum on the right hand side of (\ref{vorstate}). We have ($r$ is the dual variable)
\begin{multline*}
OD_4 := \sum_{j}\sum_\pm \frac{1}{2\pi i} \int_{(A)} \int_0^\infty\sum_{\substack{n_1,n_2,r,c,\ell \ge 1\\ b_1,b_2\ge1,b_3|\alpha \\ (c,\ell)=1}} e\Big( \frac{\pm r c \overline{n_1 n_2 \alpha} }{\ell} \Big) \frac{\tau(r )}{ \ell b_1 b_2 n_1 n_2 } \Big(\frac{M_j r x}{\ell ^2}\Big)^{-w} H_1^{\pm}(w) U_1\Big(\frac{b_1 n_1 }{N_1}\Big)\\
U_2\Big(\frac{n_2 b_2 }{N_2}\Big) U_3\Big(\frac{n_1 n_2 x M_j \alpha }{b_1 b_2 b_3\ell ^2 N_3}\Big) U_{4,j}(x)
\Psi \Big(\frac{b_1n_1\beta_1}{K},\frac{b_2 n_2\beta_2}{K}, \frac{ n_1n_2xM_j\alpha}{b_1b_2b_3 \ell^2 K}, \frac{xM_j\beta }{K^2}, \frac{K^2 \ell c }{8\pi n_1 n_2 x M_j \alpha}\Big) \frac{dx}{x} dw,
\end{multline*}
where it is understood that the sum is restricted to $(n_1n_2\alpha,\ell)=1$ and we need $OD_4\ll b_3 \alpha K^{2\theta+\epsilon}$.
We first simplify the notation a bit (we did not do this earlier because we needed the exact form of the weight functions in order to deal with fake main terms). First, we observe that since there are $O(K^\epsilon)$ dyadic intervals, it is enough to consider any one smooth function $U_{4,j}=U_4$ and $M_j=M$. From the fourth component of $\Psi$ and the assumption $\beta\ge b_3 \alpha$ from (\ref{condition}), we can assume
\begin{align*}
M < \frac{K^{2+\epsilon}}{b_3\alpha}.
\end{align*}
We can also consider the sum in dyadic intervals $r\asymp R$ by inserting a smooth bump function $U_5(\frac{r}{R})$, where $U_5$ is supported on $(\half,\frac52)$. We can assume that
\begin{align*}
R<\frac{K^\epsilon \ell^2}{ M}
\end{align*}
because the contribution of $r \ge \frac{K^\epsilon \ell^2}{ M}$ is $O(K^{-100})$ say. This can be seen by moving the $w$-integral in $OD_4$ far to the right (taking $A$ large). By repeatedly integrating by parts the $x$-integral, we may restrict the $w$-integral to $|\Im w|<K^\epsilon$ (the real part is already fixed at $A$). Doing so, we may absorb $r^{-w}$ and $(\ell^2)^{-w}$ into $U_5$ and $U_3$ respectively. Similarly we may expand the function $\Psi$ using (\ref{wdef}), truncate the integrals there to $|\Im s_1|,|\Im s_2|,|\Im s_3|,|\Im s_4|<K^\epsilon$ (with $\Re(s_i)$ fixed of course) and absorb part of this function into the bump functions $U_1$, $U_2$, $U_3$, $U_4$. Thus it suffices to prove (we do not seek cancellation in the sum over $b_1,b_2,b_3$)
\begin{align*}
\sum_{\substack{n_1,n_2,r,c,\ell \ge 1\\ \\ (c,\ell)=1}} e\Big( \frac{\pm r c \overline{n_1 n_2 \alpha} }{\ell} \Big) \frac{\tau(r )}{ \ell n_1 n_2 } U_1\Big(\frac{b_1 n_1 }{N_1}\Big) U_2\Big(\frac{n_2 b_2 }{N_2}\Big)
U_3\Big(\frac{n_1 n_2 M \alpha }{b_1 b_2 b_3 \ell ^2 N_3}\Big) U_5\Big( \frac{r}{R}\Big) \hbar_s \Big(\frac{K^2 \ell c}{8\pi x n_1n_2 M \alpha}\Big) \\
\ll b_3 \alpha K^{\theta+\epsilon}
\end{align*}
for any $b_1,b_2,b_3\ge 1$, $x\asymp 1$, $|s|<K^\epsilon$ and any compactly supported functions $U_j$ with $j$-th derivative bounded by $(K^\epsilon)^j$. We simplify the notation a bit more. We suppress the factor $8\pi x$ in $\hbar_s$, rename $b_1b_2$ to $a$, $b_3$ to $b$, $M\alpha$ to $M$, $N_1/b_1$ to $N_1$ and $N_2/b_2$ to $N_2$. Thus it suffices to prove
\begin{align}
\label{b4recip} \sum_{\substack{n_1,n_2,r,c,\ell \ge 1\\ \\ (c,\ell)=1}} e\Big( \frac{\pm r c \overline{n_1 n_2 \alpha} }{\ell} \Big) \frac{\tau(r )}{ \ell n_1 n_2 } U_1\Big(\frac{ n_1 }{N_1}\Big) U_2\Big(\frac{ n_2 }{N_2}\Big)
U_3\Big(\frac{n_1 n_2 M }{ab \ell ^2 N_3}\Big) U_5\Big( \frac{r}{R}\Big) \hbar_s \Big(\frac{K^2 \ell c}{ n_1n_2 M}\Big) \ll b \alpha K^{\theta+\epsilon},
\end{align}
for any integers $a,b,\alpha$ and
\begin{align}
\label{newcond} N_1,N_2,N_3 < K^{1+\epsilon}, \ \ N_1N_2<\frac{K^{2+\epsilon}}{\alpha a b}, \ \ M<\frac{K^{2+\epsilon}}{b}, \ \
R<\frac{K^\epsilon \ell^2 \alpha}{ M } \asymp \frac{K^\epsilon N_1 N_2 \alpha}{ ab N_3},
\ \ N_3\ge N_2.
\end{align}
The approximation $\frac{K^\epsilon \ell^2 \alpha}{ M } \asymp \frac{K^\epsilon N_1 N_2 \alpha}{ ab N_3} $ follows from the support of $U_3$. This updates (\ref{condition2}).
Now using reciprocity for exponentials, we have
\begin{align*}
e\Big( \frac{ \pm r c \overline{n_1 n_2 \alpha} }{\ell} \Big) =e\Big( \frac{ \mp r c \overline{\ell} }{n_1 n_2 \alpha} \Big) e\Big( \frac{ \pm r c }{\ell n_1 n_2 \alpha} \Big) = e\Big( \frac{ \mp r c \overline{\ell} }{n_1 n_2 \alpha} \Big) \Big( 1+ O\Big( \frac{ \pm r c }{\ell n_1 n_2 \alpha}\Big) \Big).
\end{align*}
The contribution to (\ref{b4recip}) of this error term is less than
\begin{align*}
\sum_{\substack{n_1\asymp N_1\\ n_2\asymp N_2\\ \ell \asymp \frac{ \sqrt{ N_1 N_2 M}}{\sqrt{a b N_3}} }} \ \sum_{ r < \frac{K^\epsilon N_1 N_2 \alpha}{N_3 a b}} \ \sum_{c< \frac{ N_1N_2 M }{\ell K^{2-\epsilon}}} \frac{K^\epsilon}{\ell n_1n_2} \cdot \frac{ r c }{\ell n_1 n_2 \alpha}
\ll \frac{N_1^\frac32 N_2^\frac32 M^\half \alpha}{K^{4-\epsilon} N_3^\half}\ll \frac{1}{K^{\half-\epsilon}},
\end{align*}
by (\ref{newcond}).
So in (\ref{b4recip}) we can replace the exponential with $e( \frac{ \mp r c \overline{\ell} }{n_1 n_2 \alpha})$ and detect the condition $(\ell ,c)=1$ using the M\"{o}bius function:
\begin{align}
\sum_{l |(\ell,c)} \mu(l) =
\begin{cases}
1 &\text{ if } (\ell,c)=1\\
0 &\text{otherwise}.
\end{cases}
\end{align}
Thus replacing $\ell$ by $\ell l$ and $c$ by $cl$,
it suffices to prove
\begin{align*}
\sum_{\substack{n_1,n_2,r,c,\ell \ge 1\\ l\ge 1 } }e\Big( \frac{ \mp r c \overline{\ell} }{n_1 n_2 \alpha} \Big) \frac{\mu(l) \tau(r )}{ l \ell n_1 n_2 } U_1\Big(\frac{ n_1 }{N_1}\Big) U_2\Big(\frac{ n_2 }{N_2}\Big)
U_3\Big(\frac{n_1 n_2 M }{ab l^2 \ell ^2 N_3}\Big) U_5\Big( \frac{r}{R}\Big) \hbar_s \Big(\frac{K^2 l^2 \ell c}{ n_1n_2 M}\Big)
\nonumber \ll b \alpha K^{\theta+\epsilon}.
\end{align*}
We do not seek cancellation over the $l$-sum, so it suffices to prove
\begin{align*}
OD_5:= \sum_{\substack{n_1,n_2,r,c,\ell \ge 1 } } e\Big( \frac{ \mp r c \overline{\ell} }{n_1 n_2 \alpha} \Big) \frac{ \tau(r )}{ \ell n_1 n_2 } U_1\Big(\frac{ n_1 }{N_1}\Big) U_2\Big(\frac{ n_2 }{N_2}\Big)
U_3\Big(\frac{n_1 n_2 M }{ab l^2 \ell ^2 N_3}\Big) U_5\Big( \frac{r}{R}\Big) \hbar_s \Big(\frac{K^2 l^2 \ell c}{ n_1n_2 M}\Big)
\nonumber \ll b \alpha K^{\theta+\epsilon}
\end{align*}
for any integer $l\ge 1$ and assuming (\ref{newcond}). Also keep in mind that it is understood that the sum is restricted to $(\ell,n_1n_2\alpha)=1$.
\section{Second Poisson summation}
Now we split the $\ell $-sum in $OD_5$ into (primitive) residue classes mod $n_1 n_2 \alpha$ and apply Poisson summation (Lemma \ref{poiss}). Note that $\ell$ is supported in compact interval of size ${\frac{\sqrt{n_1n_2M}}{l\sqrt{abN_3}}}$. The result is that (the dual variable is $d$)
\begin{align}
\label{od5pois} OD_5= &\sum_{\substack{n_1,n_2,r,c \ge 1 }} \frac{S( \mp r c, d, n_1 n_2 \alpha)}{n_1n_2\alpha} \frac{\tau(r ) }{ n_1 n_2 } U_1\Big(\frac{ n_1 }{N_1}\Big) U_2\Big(\frac{ n_2 }{N_2}\Big)
U_5\Big( \frac{r}{R}\Big) \\
\nonumber & \times \Bigg( \int_{-\infty}^{\infty} U_3\Big(\frac{1}{y}\Big) \hbar_s \Big(\frac{y K^2 c l }{ \sqrt{ab n_1n_2 N_3 M}}\Big) \frac{dy}{y} \\
\nonumber & + \sum_{d\neq 0}\frac{1}{2\pi i}\int_{(A)} \int_0^\infty H_2(z)
\Big( \frac{-2\pi yd\sqrt{M}}{l \alpha \sqrt{abn_1n_2N_3}} \Big)^{-z}
U_3\Big(\frac{1}{y}\Big) \hbar_s \Big(\frac{y K^2 c l}{ \sqrt{ab n_1n_2 N_3 M}}\Big) \frac{dy}{y} dz \Bigg).
\end{align}
We first consider the contribution of the second line of (\ref{od5pois}). This is the zero frequency contribution, and it is bounded by
\begin{align*}
\sum_{\substack{n_1\asymp N_1 \\ n_2\asymp N_2}} \sum_{ r < \frac{K^\epsilon N_1 N_2 \alpha}{N_3 a b}} \sum_{c< \frac{\sqrt{ab n_1n_2 N_3 M }}{l K^{2-\epsilon} } } \frac{K^\epsilon |S( \mp r c, 0, n_1 n_2 \alpha)|}{n_1^2 n_2^2 \alpha} \ll
\frac{\sqrt{N_1N_2M}}{K^{2-\epsilon}\sqrt{N_3}}\ll \frac{1}{K^{\half-\epsilon}},
\end{align*}
on using $N_3\ge N_2$ and that the Ramanujan sum is $O(K^\epsilon)$ on average.
Now we consider the contribution of the third line of (\ref{od5pois}), arising from the sum over $d\neq 0$. We consider this sum in dyadic intervals $d\asymp D$ (for simplicity, we restrict to only positive values of $d$) and $c\asymp C$ by inserting smooth bump functions $U_6(\frac{d}{D})$ and $U_7(\frac{c}{C})$ say. We can assume that
\begin{align}
\label{drange} D< \frac{ K^\epsilon l \alpha \sqrt{ab N_1 N_2 N_3} } { \sqrt{ M } }
\end{align}
because the contribution of $d>\frac{l \alpha \sqrt{ab N_1 N_2 N_3} } { \sqrt{ M } }$ is $O(K^{-100})$ by moving $z$-integral in (\ref{od5pois}) far to the right. Restricting to $|\Im z|<K^\epsilon$ and $\Re(z)$ fixed, which we may do up to negligible error by repeatedly by parts with respect to $y$, we may absorb $d^{-z},n_1^{z},n_2^{z}$ into the existing weight functions. We can also assume that
\begin{align}
\label{crange2} C< \frac{\sqrt{ab N_1N_2 N_3 M }}{l K^{2-\epsilon} }
\end{align}
and absorb the function $\hbar_s$ into $U_7$, by using Mellin inversion and separating variables as above. Thus it suffices to prove
\begin{align*}
\sum_{\substack{n_1,n_2,r,c,d \ge 1 }} \frac{S( \mp r c, d, n_1 n_2 \alpha)}{n_1n_2\alpha} \frac{\tau(r ) }{ n_1 n_2 } U_1\Big(\frac{ n_1 }{N_1}\Big) U_2\Big(\frac{ n_2 }{N_2}\Big) U_5\Big( \frac{r}{R}\Big) U_6\Big( \frac{d}{D}\Big) U_7\Big( \frac{c}{C}\Big) \ll b\alpha K^{2\theta+\epsilon}.
\end{align*}
Finally, we need this to be in a form to which we can apply Kuznetsov's formula. To this end we define
\begin{align*}
X:=\frac{N_1N_2\alpha}{\sqrt{RDC}},
\end{align*}
and replace $U_2(\frac{n_2}{N_2})$ with a different bump function
\begin{align*}
Y_1\Big(\frac{ 4\pi X \sqrt{rcd}}{ n_1 n_2 \alpha }\Big)
\end{align*}
with properties given below. We can also replace $\frac{\tau(r ) }{ n_1 n_2 }$ with $\frac{\tau(r ) }{ N_1 N_2 }$. Thus it suffices to prove (we do not seek cancellation in the $n_1$ sum)
\begin{align*}
OD_6:= \frac{1}{ N_1 N_2 } \sum_{n_1\asymp N_1} \Big| \sum_{\substack{n_2 ,r,c,d \ge 1 }} \frac{S( \pm r c, d, n_1 n_2 \alpha) }{n_1 n_2 \alpha}
Y_1\Big(\frac{ 4\pi X \sqrt{rcd}}{ n_1 n_2 \alpha }\Big) Y_2\Big(\frac{d }{ D}\Big) Y_3\Big(\frac{r}{R}\Big) Y_4\Big(\frac{ c }{C}\Big) \Big| \ll b\alpha K^{2\theta+\epsilon},
\end{align*}
where $Y_i$ are smooth functions compactly supported on $(\half,\frac{5}{2})$ with $\| Y_j ^{(j)} \|_\infty \ll (K^\epsilon)^j$ and we assume (\ref{newcond}), (\ref{drange}) and (\ref{crange2}).
\section{Kuznetsov's formula}
The goal now is to prove the required bound for $OD_6$ using Kuznetsov's formula and the spectral large sieve. We consider only the case of positive sign; the negative sign case is similar. By \cite[Theorem 16.5]{iwakow}, we have that
\begin{align}
\label{kuzback} \sum_{\substack{n_2 \ge 1 }} \frac{ S( r c, d, n_1 n_2 \alpha ) }{n_1 n_2 \alpha}
Y_1\Big(\frac{ 4\pi X \sqrt{rcd}}{ n_1 n_2 \alpha }\Big) = \text{Maass} + \text{Eis}+\text{Hol},
\end{align}
where these are the contributions of the Maass cusp forms, Eisenstein series, and holomorphic forms as given in the referenced theorem. For the Maass forms we have
\begin{align*}
\text{Maass} =\sum_{j\ge 1} \mathcal{M}_{Y_1}(t_j) \frac{\rho_j(rc)\overline{\rho}_j(d)}{\cosh (\pi t_j)},
\end{align*}
where the sum is over an orthonormal basis of Maass cusp forms of level $n_1\alpha$ with Fourier coefficients $\rho_j(n)$ and Laplacian eigenvalue $\frac{1}{4}+t_j^2$, and
\begin{align*}
\mathcal{M}_{Y_1}(t) = \frac{\pi i}{2 \sinh( \pi t)} \int_0^\infty (J_{2it}(x)-J_{-2it}(x)) Y_1(xX) \frac{dx}{x}
\end{align*}
By \cite[Lemma 3.6]{butkha}, for example, we have that $ \mathcal{M}_{Y_1}(t) \ll K^{-100}$ if $|t|\ge K^\epsilon$, so we can restrict the sum Maass to $|t_j|<K^\epsilon$, in which range $\mathcal{M}_{Y_1}(t_j) \ll X^{2\theta+\epsilon}$, by the same Lemma.
We have $X\ll K^{1-\epsilon}$ by (\ref{rdc}), so it suffices to prove that
\begin{align*}
\sum_{n_1\asymp N_1} \frac{1}{N_1N_2} \sum_{|t_j|<K^\epsilon} \Big| \sum_{\substack{r,d,c\ge 1}} \frac{\rho_j(rc)\overline{\rho}_j(d)}{\cosh(\pi t_j)} \tau(r) Y_2\Big(\frac{d }{ D}\Big) Y_3\Big(\frac{r}{R}\Big) Y_4\Big(\frac{ c }{C}\Big) \Big| \ll b \alpha K^\epsilon.
\end{align*}
Now we would like to decompose $\rho_j(rc)$, so that Cauchy-Schwarz and the spectral large sieve may be applied. To do this we need to work with newforms, whose Fourier coefficients are multiplicative. We consult \cite[section 3]{blokha} to see how to choose a basis consisting of lifts of newforms. By \cite[equation (3.10)]{blokha}, and the $\cosh(\pi t_j)^\half$ normalization from the first display of \cite[section 3.2]{blokha}, it suffices to prove that
\begin{align*}
\sum_{n_1\asymp N_1} \frac{1}{N_1N_2} \sum_{|t_j|<K^\epsilon} \frac{K^\epsilon (uv)^\half}{N_1\alpha} \Big| \sum_{\substack{r,d,c\ge 1\\ u|rc \\ v|d }} \xi_fbda_j\Big(\frac{rc}{u}\Big)\xi_fbda_j\Big(\frac{d}{v}\Big) \tau(r) Y_2\Big(\frac{d }{ D}\Big) Y_3\Big(\frac{r}{R}\Big) Y_4\Big(\frac{ c }{C}\Big) \Big|\ll b\alpha K^\epsilon
\end{align*}
for any integers $u,v\ge 1$ and $N_0|n_1\alpha$, where $\xi_fbda_j(n)$ are the Hecke eigenvalues corresponding to newforms of level $N_0$. We now replace $d$ by $dv$ and, proceeding exactly like in steps (\ref{proc1}) to (\ref{proc2}), we can write $u=u_1u_2u_3$ and replace $r$ by $ru_1u_2$ and $c$ by $cu_2u_3$ to see that it suffices to prove
\begin{multline*}
\sum_{n_1\asymp N_1} \frac{1}{N_1N_2} \sum_{|t_j|<K^\epsilon} \frac{K^\epsilon (u_1u_2u_3v)^\half}{N_1\alpha}\\ \Big| \sum_{\substack{r,d,c\ge 1}} \xi_fbda_j(rcu_2)\xi_fbda_j(d) \mu(u_2) \tau(r u_1 u_2) Y_2\Big(\frac{d v }{ D}\Big) Y_3\Big(\frac{r u_1 u_2 }{R}\Big) Y_4\Big(\frac{ cu_2 u_3 }{C}\Big) \Big|\ll b\alpha K^\epsilon.
\end{multline*}
To simplify notation, we may replace $r$ by $ru_2$. Thus it suffices to prove
\begin{align*}
\sum_{n_1\asymp N_1} \frac{1}{N_1N_2} \sum_{|t_j|<K^\epsilon} \frac{K^\epsilon (u_1u_2u_3v)^\half}{N_1\alpha} \Big| \sum_{\substack{r\asymp R/u_1\\ c\asymp C/u_2u_3\\ d\asymp D/v}} \xi_fbda_j(rc)\xi_fbda_j(d) \gamma_r \gamma_d \gamma_c \Big| \ll b\alpha K^\epsilon
\end{align*}
for any $\gamma_r,\gamma_c, \gamma_d \ll K^\epsilon$. By Hecke multiplicativity, we have
\begin{align*}
\xi_fbda_j(rc)\xi_fbda_j(d) = \sum_{\substack{s|(r,c) \\ (s,N_0)=1}} \mu(s) \xi_fbda_j\Big(\frac{r}{s} \Big)\xi_fbda_j\Big(\frac{c}{s} \Big) \xi_fbda_j(d) = \sum_{\substack{s|(r,c) \\ w|(c/s,d) \\(sw,N_0)=1}} \mu(s) \xi_fbda_j\Big(\frac{r}{s} \Big)\xi_fbda_j\Big(\frac{cd}{sw^2} \Big),
\end{align*}
and so after replacing $r$ by $rs$, $c$ by $csw$, and $d$ by $dw$, it suffices to prove
\begin{align*}
OD_7:= \sum_{\substack{n_1\asymp N_1\\ s,w \le K^{10} }} \frac{1}{N_1N_2} \sum_{|t_j|<K^\epsilon} \frac{K^\epsilon (u_1u_2u_3v)^\half}{N_1\alpha} \Big| \sum_{\substack{r\asymp R/u_1 s \\ cd \asymp CD/u_2u_3 v sw^2}} \xi_fbda_j(r) \xi_fbda_j(cd) \gamma_{r} \gamma_{cd} \Big| \ll b\alpha K^\epsilon,
\end{align*}
for any $\gamma_r, \gamma_{cd}\ll K^\epsilon$.
By the Cauchy-Schwarz inequality and the spectral large sieve \cite[Theorem 2]{desiwa}, we have that $OD_7$ is bounded by
\begin{align*}
& \sum_{\substack{n_1\asymp N_1\\ s,w \le K^{10} }} \frac{1}{N_1N_2} \frac{K^\epsilon (u_1u_2u_3v)^\half}{N_1\alpha} \Big( \sum_{|t_j|<K^\epsilon} \Big| \sum_{\substack{r\asymp R/u_1s }} \xi_fbda_j(r) \gamma_{r} \Big|^2\Big)^\half \Big( \sum_{|t_j|<K^\epsilon} \Big| \sum_{\substack{ cd \asymp CD/u_2u_3vsw^2 }} \xi_fbda_j(cd) \gamma_{cd} \Big|^2\Big)^\half \\
&\ll \sum_{\substack{n_1\asymp N_1\\ s,w \le K^{10} }} \frac{1}{N_1N_2} \frac{K^\epsilon (u_1u_2u_3v)^\half}{N_1\alpha} \Big( \Big(N_1\alpha + \frac{R}{u_1s}\Big)\frac{R}{u_1s} \Big)^\half \Big( \Big(N_1\alpha + \frac{CD}{u_2 u_3 v s w^2}\Big)\frac{CD}{u_2 u_3 v s w^2} \Big)^\half.
\end{align*}
Thus it suffices to prove
\begin{align*}
\frac{K^\epsilon}{ N_1 N_2 \alpha } \big( (N_1\alpha + R)R \big)^\half \big((N_1\alpha + CD )CD \big)^\half \ll b\alpha K^\epsilon.
\end{align*}
By (\ref{newcond}),(\ref{drange}), and (\ref{crange2}), we have
\begin{align}
\label{rdc} (RCD)^\half \ll \frac{\alpha N_1 N_2}{K^{1-\epsilon}},
\end{align}
so it suffices to prove
\begin{align}
\label{last} \frac{1}{K} (N_1\alpha + R) ^\half (N_1\alpha + CD ) ^\half \ll b\alpha K^\epsilon.
\end{align}
We have
\begin{align*}
&\frac{N_1\alpha}{K} \ll K^\epsilon \alpha,\\
& \frac{(N_1\alpha CD )^\half}{K}\ll \frac{N_1\alpha ( a b N_2 N_3 )^\half}{K^2} \ll \frac{(N_1 N_3 \alpha)^\half ( a b \alpha N_1 N_2 )^\half}{K^2} \ll K^\epsilon \alpha^\half,\\
& \frac{(N_1\alpha R )^\half}{K}\ll\frac{N_1\alpha N_2 ^\half}{K N_3^\half } \ll K^\epsilon \alpha,
\end{align*}
where in the last inequality we use crucially that $N_3\ge N_2$. This establishes (\ref{last}).
It remains to consider Eis and Hol in (\ref{kuzback}). These are similarly treated using the large sieve, once we use the multiplicative Fourier coefficients provided explicitly in \cite[section 3]{blokha}.
\end{document}
|
\begin{equation}gin{document}
\title {The isoperimetric inequality on asymptotically flat manifolds with nonnegative scalar curvature}
\begin{equation}gin{abstract}
In this note, we consider the isoperimetric inequality on an asymptotically flat manifold with nonnegative scalar curvature, and improve it by using Hawking mass. We also obtain a rigidity result when equality holds for the classical isoperimetric inequality on an asymptotically flat manifold with nonnegative scalar curvature.
\epsilonnd{abstract}
\keywords{isoperimetric inequality; inverse mean curvature flow; Hawking mass; asymptotically flat manifold }
\renewcommand{\textup{2000} Mathematics Subject Classification}{\textup{2000} Mathematics Subject Classification}
\subjclass[2000]{Primary 83C57 ; Secondary 53C44}
\author{Yuguang Shi$^\dag$}
\address{Yuguang Shi, Key Laboratory of Pure and Applied mathematics, School of Mathematical Sciences, Peking University,
Beijing, 100871, P.R. China.} \epsilonmail{[email protected]}
\thanks{$^\dag$ Research partially supported by NSF grant of China 10725101 and 10990013.}
\date{2015}
\maketitle
\markboth {Yuguang Shi}{}
\section {introduction}
The isoperimetric inequality and isoperimetric surfaces have a very long history and many important applications in mathematics, see e.g. \cite{B}, \cite{CY}. Huisken has observed that ADM mass of an asymptotically flat manifold (see Definition \ref{defaf} below) appears in the expansion of isoperimetric ratio when the volume is large enough, see \cite{Hu} and \cite{EM2} (for the case of coordinates sphere, see \cite{FST}). Inspired by these facts, it is natural to ask if there is any relationship between the isoperimetric inequality and quasi-local mass for any fixed enclosed volume. In this short note, we are able to use the Hawking mass to improve the isoperimetric inequality in some cases. In order to present our result, we need some notions.
\begin{equation}gin{defi}\langlebel{defaf}
A complete and connected three-manifold $(M^3,g)$ is said to be {\rm asymptotically flat}
(AF) (with one end) if there are a positive constant $C>0$ and a compact subset $K$
such that $M\setminus K$ is diffeomorphic to $\Bbb R^3\setminus B_R(0)$
for some $R>0$ and in the standard coordinates in $\Bbb R^3$, the metric
$g$ satisfies:
\begin{equation}gin{equation} \langlebel{daf1}
g_{ij}=\delta_{ij}+\sigma_{ij}
\epsilonnd{equation}
with
\begin{equation}gin{equation} \langlebel{daf2}
|\sigma_{ij}|+r|\partial \sigma_{ij}|+r^2|\partial\partial\sigma_{ij}|\leq C r^{-1},
\epsilonnd{equation}
where $r$ and $\partial$ denote the Euclidean distance and standard derivative operator on $\Bbb R^3$
respectively. The region $M\setminus K$ is called the end of $M$.
\epsilonnd{defi}
An original idea of \cite{BC} is to use the weak solution of inverse mean curvature (\ref{imcf}) to estimate the volumes of isoperimetric regions in an asymptotically hyperbolic manifold. Inspired by this, we use the same idea to investigate the same problem in the case of AF manifolds. More specifically, for any $x\in M$, it is proved here that there is a weak solution $(G_t)_{t>-\infty}$ of (\ref{imcf}) with initial condition $\{x\}$ in \cite{HI}. One important property for this weak solution is that for each $t\in \Bbb R$, $(G_t)$ has the least boundary area among all domains containing it, i.e. $(G_t)$ is a minimizing hull in $(M^3, g)$. Another interesting property is that the Hawking mass of $K_t =\partial G_t$ which is defined as
$$
m_H (t)={\mathfrak f}rac{(Area (K_t))^{\mathfrak f}rac12}{(16\partiali)^{\mathfrak f}rac32}(16\partiali - \int_{K_t}H^2),
$$
is nondecreasing in $t$; here, $H$ is the mean curvature of $K_t(x) =\partial G_t$ with respect to outward unit normal vector. By using this quantity, we are able to estimate the area of $K_t$ in terms of the volume of $G_t$, see (\ref{isoperineq1}) below; hence, we obtain Theorem \ref{comparisontheorem1}.
To do that, we need to parametrize $t$ by $v$, which is the volume of $G_t$, and it turns out that this function $t(v)$ is Lipschitz; for details, see Lemma \ref{functiontofv} below. Let $m(v)= m_H (t(v))$, $B(v)= Area (K_{t(v)})$, and
\begin{equation}gin{equation}
\begin{equation}gin{split}
A(v)= \inf\{&\mathcal{H}^2(\partialartial^* \Omega): \Omega\subset M \text{ is a Borel set with finite perimeter, and }\\
&\mathcal{L}^3 (\Omega)=v\}.
\epsilonnd{split}\nonumber
\epsilonnd{equation}
here, $\mathcal{H}^2$ is $2$-dimensional Hausdorff measure for the reduced boundary of $\Omega$, and $\mathcal{L}^3 (\Omega)$ is the Lebesgue measure of $\Omega$ with respect to metric $g$.
Then our main result can be stated as follows
\begin{equation}gin{theo}\langlebel{comparisontheorem1}
Suppose $(M^3, g)$ is an asymptotically flat (AF) manifold with nonnegative scalar curvature. Fix a point $o\in M$, for every $v>0$, there is a $\rho>0$ so that for all $x\in M\setminus B_{\rho}(0)$ we have that
\begin{equation}gin{equation}\langlebel{inequality1}
A(v)\leq (36\partiali)^{\mathfrak f}rac13 \left(\int^v_0(1-(16\partiali)^{{\mathfrak f}rac12}B^{-{\mathfrak f}rac12} (t) m(t))^{\mathfrak f}rac12 dt\rightght)^{\mathfrak f}rac23 .
\epsilonnd{equation}
Where $m(v)$ is defined as above.
\epsilonnd{theo}
When scalar curvature of $M$ is non-negative, and $M$ satisfies some topological conditions, we have that $m(v)\mathfrak geq 0$. We see that in this case $A(v)\leq (36\partiali)^{\mathfrak f}rac13 v^{\mathfrak f}rac23$. Comparing this with the Euclidean case in which $m(v)=0$, we observe the following heuristic phenomenon: {\it to enclose the same volume, isoperimetric surfaces in a manifold with bigger mass have smaller area.} We believe such a phenomenon can also be observed in the case of asymptotically hyperbolic manifolds, and we will discuss this problem in a future paper. With these facts in mind, it is natural to ask what happens if there is a $v_0>0$ with $A(v_0)=(36\partiali)^{\mathfrak f}rac13 v_0 ^{\mathfrak f}rac23$? Our next theorem gives an answer to this question.
\begin{equation}gin{theo}\langlebel{rigidity}
Suppose $(M^3, g)$ is an asymptotically flat manifold with nonnegative scalar curvature. Then there is a $v_0 >0$ with
$$
A(v_0)=(36\partiali)^{\mathfrak f}rac13 v_0 ^{\mathfrak f}rac23
$$
if and only if $(M^3, g)$ is isometric to $\Bbb R^3$.
\epsilonnd{theo}
Inequality (\ref{isoperineq1}) below is crucial in the proof of Theorem \ref{comparisontheorem1} and Theorem \ref{rigidity}, and its equivalent version was first proved in \cite{BC} (see Proposition 3 in \cite{BC}), and the arguments here are quite similar to those in \cite{BC}.
Theorem \ref{comparisontheorem1} and Theorem \ref{rigidity} play important roles in the proof of existence of isoperimetric regions in a non-flat AF manifold. There are many results that focus on large isoperimetric regions in an AF manifold, where the asymptotic regime plays an important role, see \cite{EM1} and references therein. However, there are very few results on the existence of isoperimetric regions with medium size in an AF manifold. One difficulty is that the minimizing sequence of isoperimetric regions may drift off to infinity, while Theorem \ref{comparisontheorem1} allows for control over these minimizing sequence in a certain sense and we may obtain the existence isoperimetric regions with any given volume. This was observed in the very recent paper \cite{CCE}.
The outline of the paper is as follows. In Section 2, we introduce some notions and basic facts of weak solutions of inverse mean curvature flow from \cite{HI}; in Section 3, we prove the main results.
{\bf Acknowledgements} The author is grateful to Prof. Frank Morgan, Dr. Gang Li and Dr. Chao Bao for pointing out some typos and errors in the first version of the paper, and also would like to thank referees for many useful comments and suggestions which make the paper clearer.
Especially, the author would like to thank one of referees for pointing out that the assumptions of Theorem \ref{comparisontheorem1} and Theorem \ref{rigidity} can be relaxed by considering IMCF in the exterior region of $M$.
\section {Preliminary}
In this section, we introduce some notions and present some facts from \cite{HI} that will be needed in the proof of Theorem \ref{comparisontheorem1} and Theorem \ref{rigidity}. As in \cite{HI}, a classical solution of the inverse mean curvature flow (IMCF) in $(M^3, g)$ is a smooth family of $F: N\times [0, T]\to M$ of embedded hypersurfaces $N_t= F(N,t)$ satisfying the following evolution equation
\begin{equation}gin{equation}\langlebel{imcf}
{\mathfrak f}rac{\partial F}{\partial t}=H^{-1}\nu, \quad 0\leq t\leq T
\epsilonnd{equation}
where $H$ is the mean curvature of $N_t$ at $F(x, t)$ with respect to the outward unit normal vector $\nu$ for any $x \in N$. In generally, the evolution equation (\ref{imcf}) has no classical solution. In order to overcome this difficult, a level set approach was established in \cite{HI}, i.e. these evolving surfaces were given as the level-sets of a scalar function $u$ via $N_t=\partial\{x\in M: u(x)<t\}$, where $u$ satisfies the following degenerate elliptic equation in weak sense.
\begin{equation}gin{equation}\langlebel{scalarimcf}
div_M ({\mathfrak f}rac{\nabla u}{|\nabla u|})=|\nabla u|.
\epsilonnd{equation}
Here the left-hand side describes the mean curvature of level-sets and the right-hand side yields the inverse speed.
By the definition of AF manifolds, for any $x\in M\setminus K$, we may consider standard coordinates $x=(x^1, x^2, x^3)$ on $\Bbb R^3$. It was observed in \cite{HI} that $v(x)=C \log |x|$ is a weak subsolution of (\ref{scalarimcf}) on $M\setminus K$ (please see the precise definition of weak subsolution of (\ref{scalarimcf}) on P.365 in \cite{HI} ), where $|x|=\sqrt{(x^1)^2 +(x^2)^2 +(x^3)^2 }$. With this weak subsolution one is able to prove the existence of the weak solution of (\ref{scalarimcf}) on $M$ with any nonempty precompact smooth open set $E_0$ as initial condition (See Theorem 3.1 in \cite{HI}). Let $u^\epsilonpsilon$ be the solution of the following elliptic regularization:
\begin{equation}gin{equation}\langlebel{ellipticregularization}
\left\{
\begin{equation}gin{array}{ll}
E^\epsilonpsilon u^\epsilonpsilon= div({\mathfrak f}rac{\nabla u^\epsilonpsilon}{\sqrt{|\nabla u^\epsilonpsilon|^2 +\epsilonpsilon^2}})-\sqrt{|\nabla u^\epsilonpsilon|^2 +\epsilonpsilon^2}=0, & \text{in $\Omega_L$} \\
u^\epsilonpsilon =0 , & \text{on $\partial E_0$} \\
u^\epsilonpsilon =L-2, & \text{on $\partial F_L$}
\epsilonnd{array}
\rightght.
\epsilonnd{equation}
Here and in the sequel, $F_L = \{v<L\}$, for any large $L>0$, and $\Omega_L = F_L \setminus \bar E_0$, let $W^\epsilonpsilon (x,z)= u^\epsilonpsilon (x)-\epsilonpsilon z$ be a function on $\Omega_L \times \Bbb R$, then we have
$$
div({\mathfrak f}rac{\nabla W^\epsilonpsilon}{|\nabla W^\epsilonpsilon|})=|\nabla W^\epsilonpsilon|,
$$
or equivalently, the level set $N^\epsilonpsilon_t= \{(x,z)\in \Omega_L \times \Bbb R: W^\epsilonpsilon (x,z)=t\}$ is a slice of the inverse mean curvature flow in the domain $\Omega_L \times \Bbb R$ for any $t>0$, and actually it is the classical solution to (\ref{imcf}). We know from Lemma 3.5 in \cite{HI} that (\ref{ellipticregularization}) admits a classical solution. Also, we have the following compactness lemma and its proof can be found on P.398 in \cite{HI}.
\begin{equation}gin{lemm}\langlebel{compactness1}
Let $(M^3, g)$ be an AF manifold, and $E_0$ be a precompact set of $M$ with smooth boundary. Then there are subsequences $\epsilonpsilon_i \rightghtarrow 0$, $L_i \rightghtarrow \infty$, $N^i_t=N^{\epsilonpsilon_i}_t$ such that
\begin{equation}gin{equation}
N^i_t \rightghtarrow \tilde{N_t}=N_t \times \Bbb R, \quad\text{locally in $C^1$}, \quad \text{for almost every $ t\mathfrak geq 0$}
\epsilonnd{equation}
where $N_t=\partial E_t$ and $(E_t)_{t>0}$ is the unique weak solution of (\ref{imcf}) with $E_0$ as the initial condition.
\epsilonnd{lemm}
\section{Proof of the main theorems}
In this section, we first establish some lemmas, and then prove our main results. Many arguments are from \cite{BC}. Lemma \ref{functiontofv} below plays an important role in the proof, and meanwhile we note that many quantities involved are not smooth along the weak solution of inverse mean curvature flow (\ref{imcf}). To handle this difficulty, we first calculate the corresponding quantities along the solutions to elliptic regularizations with suitable boundary conditions. Passing to the limit using Lemma \ref{compactness1}, we get what we want.
Let $B_\mu(x)$ be any geodesic ball with radius $\mu>0$ and center $x$ in $(M, g)$, and let $E_0=B_\mu(x)$. We consider the following boundary problem
\begin{equation}gin{equation}\langlebel{ellipticregularization2}
\left\{
\begin{equation}gin{array}{ll}
E^\epsilonpsilon u^\epsilonpsilon= div({\mathfrak f}rac{\nabla u^\epsilonpsilon}{\sqrt{|\nabla u^\epsilonpsilon|^2 +\epsilonpsilon^2}})-\sqrt{|\nabla u^\epsilonpsilon|^2 +\epsilonpsilon^2}=0, & \text{in $\Omega_L$} \\
u^\epsilonpsilon =0 , & \text{on $\partial E_0$} \\
u^\epsilonpsilon =L-2, & \text{on $\partial F_L$}.
\epsilonnd{array}
\rightght.
\epsilonnd{equation}
Using Lemma \ref{compactness1}, we know there are subsequences $\epsilonpsilon_i \rightghtarrow 0$, $L_i \rightghtarrow \infty$, $N^i_t=N^{\epsilonpsilon_i}_t$ such that
\begin{equation}gin{equation}
N^i_t \rightghtarrow \tilde{N_t}=N_t \times \Bbb R, \quad\text{locally in $C^1$}, \text{for almost every $ t\mathfrak geq 0$}
\epsilonnd{equation}
where $N_t=\partial E_t$ and $(E_t)_{t>0}$ is the unique weak solution of (\ref{imcf}) with the initial condition $E_0=B_\mu(x)$. For simplicity, as in the proof of Lemma 8.1 in \cite{HI}, for each $\mu>0$, we may take a suitable transformation on $t$, so that the weak solution $(E_t)$ for the initial value problem (\ref{imcf}) is defined on $[-T(\mu), \infty)$. Here $T(\mu)\rightghtarrow \infty$ as $\mu$ approaches to zero, and $(E_t)_{-T(\mu)\leq t <\infty}$ converges locally in $C^1$ to $(G_t)_{-\infty <t <\infty}$ which is the weak solution of (\ref{imcf}) with the single point $\{x\}$ as the initial condition.
Let $W^\epsilonpsilon$ be defined by (\ref{ellipticregularization2}), and
$$
V_\epsilonpsilon(t)= Vol(\{(x,z)\in \Omega_L \times \Bbb R: W^\epsilonpsilon (x,z)<t, \quad |z|\leq {\mathfrak f}rac12)\}).
$$
Note that the level sets of $W^\epsilonpsilon$ form a classical solution to (\ref{imcf}). We see that $V_\epsilonpsilon(t)$ is a smooth function of $t$, and further more, we have the following result
\begin{equation}gin{lemm}\langlebel{vderivativet} Let $\chi_{\{|z|\leq {\mathfrak f}rac12\}}(x,z)$ be the characteristic function of the domain $\mathbb{D}=\{(x,z)\in \Omega_L \times \Bbb R: |z|\leq {\mathfrak f}rac12\}$. Then
$$
{\mathfrak f}rac{d V_\epsilonpsilon}{dt}=\int_{N^\epsilonpsilon_t}H^{-1}_\epsilonpsilon \chi_{\{|z|\leq {\mathfrak f}rac12\}}(x,z)dS >0.
$$
Here and in the sequel, $H_\epsilonpsilon$ denotes the mean curvature of $N^\epsilonpsilon_t$ in $\mathbb{D}$ with respect to the unit normal direction ${\mathfrak f}rac{\nabla W^\epsilonpsilon}{|\nabla W^\epsilonpsilon|}$ .
\epsilonnd{lemm}
\begin{equation}gin{proof}
Using the Co-area formula, we see that
\begin{equation}gin{equation}
\begin{equation}gin{split}
V_\epsilonpsilon (t)&=\int_\mathbb{D}\chi_{\{|z|\leq {\mathfrak f}rac12\}}(x,z)\chi_{\{W^\epsilonpsilon <t\}}(x,z)dv\\
&=\int^\infty_{-\infty}\int_{\{W^\epsilonpsilon =\sigma\}}{\mathfrak f}rac{\chi_{\{|z|\leq {\mathfrak f}rac12\}}(x,z)\chi_{\{W^\epsilonpsilon <t\}}(x,z)}{|\nabla W^\epsilonpsilon|}dSd\sigma\\
&=\int^t_{-\infty}\int_{\{W^\epsilonpsilon =\sigma\}}{\mathfrak f}rac{\chi_{\{|z|\leq {\mathfrak f}rac12\}}(x,z)}{|\nabla W^\epsilonpsilon|}dSd\sigma
\epsilonnd{split}
\epsilonnd{equation}
which implies
$$
{\mathfrak f}rac{d V_\epsilonpsilon}{dt}=\int_{N^\epsilonpsilon_t}H^{-1}_\epsilonpsilon \chi_{\{|z|\leq {\mathfrak f}rac12\}}(x,z)dS >0.
$$
This finishes the proof of the lemma.
\epsilonnd{proof}
A direct conclusion of Lemma \ref{vderivativet} is the following
\begin{equation}gin{coro}\langlebel{tderivativev}
Let $W^\epsilonpsilon$ be a classical solution to (\ref{imcf}) on $\mathbb{D}$ and $v=Vol(\{(x,z)\in \Omega_L \times \Bbb R: W^\epsilonpsilon (x,z)<t, \quad |z|\leq {\mathfrak f}rac12\})$. Then $t$ is a smooth function of $v$ and
\begin{equation}gin{equation}
\begin{equation}gin{split}
{\mathfrak f}rac{dt}{dv}&=(\int_{N^\epsilonpsilon_t}H^{-1}_\epsilonpsilon \chi_{\{|z|\leq {\mathfrak f}rac12\}}(x,z)dS)^{-1}\\
&=(\int_{N^\epsilonpsilon_t \cap \{|z|\leq {\mathfrak f}rac12\}}H^{-1}_\epsilonpsilon dS)^{-1}.
\epsilonnd{split}\nonumber
\epsilonnd{equation}
\epsilonnd{coro}
Let $(G_t)_{t>-\infty}$ be the weak solution of (\ref{imcf}). We have the following
\begin{equation}gin{lemm}\langlebel{imcfvol}
For any $v>0$ either there is a time $t \in \mathbb{R}$ with $Vol (G_t)=v$ or $v$ is a jump volume for (\ref{imcf}), i.e. there exists a time $t_1>-\infty$ with
$$
Vol(G_{t_1})<v\leq Vol(G^+_{t_1}),
$$
where $G^+_{t_1}$ is the strictly minimizing hull for $G_{t_1}$.
\epsilonnd{lemm}
\begin{equation}gin{proof}
Let
$$
t_0=\inf\{t \in \Bbb R: Vol(G_t)\mathfrak geq v\},
$$
and
$$
\tau_0=\sup\{t \in \Bbb R: Vol(G_t)\leq v\}.
$$
Note that $t_0 \mathfrak geq \tau_0$. By \cite{HI}, we know that $K_t=\partial G_t$ converges to $K^+ _{t_0}$ locally in $C^1$ when $t$ decreases to $t_0$ and $K_t$ converges to $K _{\tau_0}$ locally in $C^1$ when $t$ increases to $\tau_0$ so that $Vol(G^+ _{t_0})\mathfrak geq v \mathfrak geq Vol(G _{\tau_0})$. If $t_0 > \tau_0$, this contradicts the definition of $t_0$ or
$\tau_0$. Thus $t_0 =\tau_0$. Thus either $v$ satisfies $Vol(G_{t_0})=v$ or $Vol(G_{t_0})<v\leq Vol(G^+_{t_0})$. This finishes the proof of Lemma \ref{imcfvol}.
\epsilonnd{proof}
The next lemma is on the relation between $t$ and the volumes of the level sets of a weak solution of (\ref{imcf}).
\begin{equation}gin{lemm}\langlebel{functiontofv}
For any $v>0$, let
\begin{equation}gin{equation}
t(v)=\inf\{\tau: Vol(G_\tau)\mathfrak geq v\}\nonumber.
\epsilonnd{equation}
Then $t$ is a Lipschitz function and
\begin{equation}gin{equation}
{\mathfrak f}rac{dt}{dv}\leq (\int_{K_t}H^2 )^{\mathfrak f}rac12 \cdot (Area(K_t))^{-{\mathfrak f}rac32},\nonumber
\epsilonnd{equation}
where $K_t=\partial G_t$.
\epsilonnd{lemm}
\begin{equation}gin{proof}
For any fixed $v>0$, let $t^i(v) =t^i$ with $v=Vol(\{(x,z)\in \Omega_L \times \Bbb R: W^\epsilonpsilon (x,z)<t^i, \quad |z|\leq {\mathfrak f}rac12\})$. Then by Lemma \ref{compactness1}, we see that $t^i(v)$ converges to $t(v)$. (Here we assume without loss of generality the initial condition $B_\mu(x)$ shrinks to $\{x\}$ as $i \to \infty$.) Next, according to Corollary \ref{tderivativev}
\begin{equation}gin{equation}
\begin{equation}gin{split}
{\mathfrak f}rac{dt^i}{dv}&=(\int_{N^i_t \cap \{|z|\leq {\mathfrak f}rac12\}}H^{-1}_i dS)^{-1}\\
&\leq (\int_{N^i_t \cap \{|z|\leq {\mathfrak f}rac12\}}H^2_i dS)^{\mathfrak f}rac12 (Area (N^i_t \cap \{|z|\leq {\mathfrak f}rac12\}))^{-{\mathfrak f}rac32}
\epsilonnd{split}\nonumber
\epsilonnd{equation}
Hence, for any $v_1 \mathfrak geq v_2$, we have
$$
t^i (v_1)-t^i (v_2)\leq \int^{v_1}_{v_2}(\int_{N^i_t \cap \{|z|\leq {\mathfrak f}rac12\}}H^2_i dS)^{\mathfrak f}rac12 (Area (N^i_t \cap \{|z|\leq {\mathfrak f}rac12\}))^{-{\mathfrak f}rac32} dv
$$
According to (5.6) in \cite{HI}, we see that for any $T>-T(\mu)$ and all $t\in [-T(\mu),T]$
$$\int_{N^i_t \cap \{|z|\leq {\mathfrak f}rac12\}}H^2_i dS\leq C(T),$$
here $C(T)$ is a constant that depends only on $T$. Using also (5.12) in \cite{HI} we see that for almost every $t>-T(\mu)$, we have
$$
\int_{N^i_t\cap \{|z|\leq {\mathfrak f}rac12\}}H^2_i dS\rightghtarrow \int_{\tilde N_t\cap \{|z|\leq {\mathfrak f}rac12\}}H^2 dS.
$$
Letting $i \rightghtarrow \infty$ and using the bounded convergence theorem, we see that
\begin{equation}gin{equation}
\begin{equation}gin{split}
t(v_1)-t (v_2)&\leq \int^{v_1}_{v_2}(\int_{\tilde N_t\cap \{|z|\leq {\mathfrak f}rac12\}}H^2)^{\mathfrak f}rac12 (Area (\tilde N_t\cap \{|z|\leq {\mathfrak f}rac12\}))^{-{\mathfrak f}rac32} dv\\
&=\int^{v_1}_{v_2}(\int_{K_t}H^2 )^{\mathfrak f}rac12 \cdot (Area(K_t))^{-{\mathfrak f}rac32} dv.
\epsilonnd{split}
\epsilonnd{equation}
This finishes the proof of the lemma.
\epsilonnd{proof}
Let $M_{ext}$ be the exterior region of $(M^3, g)$ defined in Lemma 4.1 in \cite{HI}. Let $\Omega \subset M$ be a Borel set with finite perimeter, and $\Omega_{ext}= \Omega \cap M_{ext}$, here and in the sequel $M_{ext}$
is the exterior region of $M$, for the its definition see Lemma 4.1, P.392, \cite{HI}. Let
$$
A_{ext}(v)= \inf \{\mathcal{H}^2 (\partialartial^* \Omega_{ext}): \mathcal{L}^3 (\Omega_{ext})=v \}.
$$
Clearly, we have $A(v)\leq A_{ext}(v)$. In order to prove Theorem \ref{comparisontheorem1} and Theorem \ref{rigidity}, we need $A_{ext}(v)$ to be nondecreasing:
\begin{equation}gin{lemm}\langlebel{nondecreasav}
Let $(M^3, g)$ be an AF manifold with nonnegative scalar curvature. Let $M_{ext}$ be the exterior region of $M$, then $A_{ext}(v)$ is nondecreasing.
\epsilonnd{lemm}
We will use an idea from \cite{B} to prove this lemma: we need to construct a compact manifold with compact boundary from $M_{ext}$. More precisely, note that $(M^3, g)$ is AF, hence we may take a large compact domain $\Omega \subset M_{ext}$ so that $M_{ext}\setminus \Omega$ is diffeomorphic to $\Bbb R^3 \setminus \mathbb{B}_{R+4}$, hence, for simplicity, we just assume $\Omega \setminus K $ is differmorphic to $\mathbb{B}_{R+4} \setminus \mathbb{B}_{\mathfrak f}rac R2$, here $K$ is the compact domain of $M$ which appears in Definition \ref{defaf}. On the other hand, we observe that the standard sphere with radius ${\mathfrak f}rac\langlembda 2$ can be expressed as $\mathbb{S}^2 (\langlembda)=(\Bbb R^3, g_S={\mathfrak f}rac{(dx^1)^2 + (dx^2)^2 +(dx^3)^2}{(1+\langlembda^{-2}|x|^2)^2})$. Let
\begin{equation}gin{equation}
\bar g= \left\{
\begin{equation}gin{array}{ll}
g, \quad \text{inside $\mathbb{B}_{R+5}$} \\
\epsilonta g +(1-\epsilonta)g_S, \text{on $\mathbb{B}_{R+6} \setminus\mathbb{B}_{R+5}$} \\
g_S, \quad \text{outside $\mathbb{B}_{R+6}$}
\epsilonnd{array}
\rightght.
\epsilonnd{equation}
where $\epsilonta$ is a smooth function with $\epsilonta=1$ in $\mathbb{B}_{R+5}$ and that vanishes outside $\mathbb{B}_{R+6}$. Thus $(M_{ext} , \bar g)$ can be regarded as a compact manifold with compact boundary. We denote this manifold by $(\bar M, \bar g)$.
We also need the following result from \cite[Lemma 1]{MY}.
\begin{equation}gin{lemm}\langlebel{areabound}[Meeks-Yau] Let $\iota$ be the infinmum of the injectivity
radius of points in $\{x\in \bar M|\ d(x, S_{\mathfrak f}rac R2)>{\mathfrak f}rac
d4\}$. Let $K>0$ be the upper bound of the curvature of $\bar M$ outside $\mathbb{B}_{\mathfrak f}rac R2$. Let $S_{\mathfrak f}rac R2$ be the coordinate sphere with radius ${\mathfrak f}rac R2$, suppose $N$ is a minimal surface and suppose $x\in N$
is a point satisfying $d(x,S_{\mathfrak f}rac R2)\mathfrak ge
{\mathfrak f}rac d2$, then
\begin{equation}gin{equation}\langlebel{area}
|N\cap B_x(r)|\mathfrak ge 2\partiali K^{-2}\int_0^r \tau^{-1}(\sin
K\tau)^2d\tau
\epsilonnd{equation}
where $r=\min\{{\mathfrak f}rac d4,\iota\}$.
\epsilonnd{lemm}
\begin{equation}gin{proof}[Proof of Lemma \ref{nondecreasav}]
Assume that $A_{ext}(v)$ is not nondecreasing. Then there are $v_1 < v_2$ with $A_{ext}(v_1)> A_{ext}(v_2)$. Using result from geometry measure theory (\cite{Si}), there is a compact domain $\Omega_0 \subset \bar M$ with compact boundary $\Sigma_0$ and $\Sigma_0 \setminus \partialartial M_{ext}$ is smooth, and
$$
Area_{\bar g}(\Sigma_0)=\inf \{Area_{\bar g} (\partial \Omega): \Omega \subset \bar M, Vol_{\bar g}(\Omega)\mathfrak geq v_1\}.
$$
Here and in the sequel $Area_{\bar g}$, and $Vol_{\bar g}$ denote area and volume with respect to metric $\bar g$ respectively.
{\it We claim that $Vol_{\bar g}(\Omega_0)>v_1$ provided $R$ and $\langlembda$ are large enough. Therefore, $\Sigma_0 \setminus \partialartial M_{ext}$ is a stable minimal surface in $\bar M$}. In fact, suppose $Vol_{\bar g}(\Omega_0)=v_1$, for any $\epsilonpsilon>0$, we assume there is a compact domain $\mathbb{D}_2 \subset M$ with $Vol_g (\mathbb{D}_2 )=v_2$ and $Area_g (\partial\mathbb{D}_2 )< A_{ext}(v_2)+\epsilonpsilon$, and without loss of generality, we assume $\mathbb{D}_2$ is contained in $\Omega$, then we have
\begin{equation}gin{equation}\langlebel{ineq1}
Area_{\bar g}(\Sigma_0)\leq Area_{\bar g} (\partial\mathbb{D}_2 )=Area_g (\partial\mathbb{D}_2 )< A_{ext}(v_2)+\epsilonpsilon < A_{ext}(v_1),
\epsilonnd{equation}
which implies $\Omega_0$ cannot be contained in $\Omega$ completely.
If $\Omega_0$ is contained the domain outside $\mathbb{B}_{R+6}$, then by the solution of isoperimetric problem on the standard sphere, we see that when $R$ and $\langlembda$ become large enough, the diameter of $\Omega_0$ in $\bar M $ is uniform bounded independently of $R$ and $\langlembda$. However, for any fixed $R$, taking $\langlembda$ large enough, we see that the metric $\bar g$ restricted on $\Omega_0$ is almost Euclidean. Then, by a translation in $\Bbb R^3$, we may find a domain $\Omega_1$ which is contained in $\mathbb{B}_R \setminus \mathbb{B}_{\mathfrak f}rac R2 \subset \Omega $ and isometric to $\Omega_0$ in $\Bbb R^3$. Hence, the volume of $\Omega_1$ and area of the boundary of $\Omega_1$ are very close to those of $\Omega_0$ with respect to metric $\bar g$ provided $R$ and $\langlembda$ is large enough. By a small perturbation on $\Omega_1$ if necessary, we may assume $Vol_{\bar g} (\Omega_1)=Vol_{\bar g} (\Omega_0)$, and $A_{ext}(v_1) \leq Area_{\bar g}(\partial \Omega_1)\leq Area_{\bar g}(\Sigma_0)+\epsilonpsilon$, which is contradiction to (\ref{ineq1}), provided that $\epsilonpsilon >0$ is sufficiently small.
For the remaining case, by the co-area formula, we see that we may find a coordinate sphere $S_\rho$ with $Area _{\bar g}(S_\rho \cap \Omega_0)<\epsilonpsilon$, and $R+6\leq \rho\leq 2R$. By the solution of the classical isoperimetric problem on the standard sphere, we may assume the diameter of the part of $\Omega_0$ which outside $\mathbb{B}_\rho$ has uniform bounded independently of $R$ and $\rho$. Therefore, by the same reasoning as above, we may translate the part of $\Omega_0$ which outside $\mathbb{B}_\rho$ into $\mathbb{B}_{R}\setminus \mathbb{B}_{\mathfrak f}rac{R}{2}$ completely and get a new domain denoted by $\Omega_2$ which may have several connected components and contained in $\mathbb{B}_{R}$. Note that $g$ is asymptotically flat. We see that the volume of $\Omega_2$ and area of the boundary of $\Omega_2$ with respect to $g$ are very close to these of $\Omega_0$. By a perturbation of $\Omega_2$ if necessary, we get a domain in $\Omega$ which is still denoted by $\Omega_2$ with $Vol_{\bar g}(\Omega_2)=Vol_g(\Omega_2)=v_1$. We again get $A_{ext}(v_1) \leq Area_{\bar g}(\partial \Omega_2)\leq Area_{\bar g}(\Sigma_0)+2\epsilonpsilon$, which is contradiction to (\ref{ineq1}), provided $\epsilonpsilon >0$ is small enough. Therefore, $Vol_{\bar g}(\Omega_0)> v_1$, and hence, as we claimed before, $\Sigma_0\setminus \partialartial M_{ext}$ is a stable minimal surface in $\bar M$.
Finally, we want to prove the minimal surface $\Sigma_0$ is contained in $\mathbb{B}_{R+1}$ when $R$ is large enough. In particular, it is in $\Omega$. Actually, for any $x\in \Sigma_0 \setminus \mathbb{B}_{R+1}$, note that $(M,g)$ is AF. Thus, we may assume that $\iota >{\mathfrak f}rac R2$ and $K\leq C R^{-3}$ outside $\mathbb{B}_{R+1}$. By (\ref{area}), we have that
$$
Area_{\bar g} (\Sigma_0)\mathfrak geq C R^2,
$$
where $C$ is a constant independent of $R$. However, by (\ref{ineq1}), we see that this is a contradiction when $R$ is sufficiently large. Thus, $\Sigma_0\setminus \partialartial M_{ext}$ is contained in $\Omega$. Without loss of generality, we may assume that $M_{ext}$ is foliated by spheres of positive mean curvature. It follows there are no minimal surfaces contained in $M_{ext}$. This finishes proof of the lemma.
\epsilonnd{proof}
Now, we can prove Theorem \ref{comparisontheorem1} and Theorem \ref{rigidity}.
\begin{equation}gin{proof}[Proof of Theorem \ref{comparisontheorem1} and Theorem \ref{rigidity}] For any $v>0$, we may choose a sufficiently large $\rho=\rho(v)$, for any $x\in M\setminus B_{\rho}(0)\subset M_{ext}$ and consider the IMCF (\ref{imcf}) with initial condition $\{x\}$. By choosing $\rho >0$ sufficiently large if necessary, we may assume there is a $G_t$ which is a domain in the weak solution of (\ref{imcf}) with initial condition $\{x\}$, satisfying $Vol(G_t)>v$, and $G_t$ is contained in the interior part of $M_{ext}$. By a direct computation and Lemma \ref{functiontofv} , we see that
$$
{\mathfrak f}rac{dB}{dv}\leq B^{-{\mathfrak f}rac12}(\int_{K_t} H^2)^{\mathfrak f}rac12.
$$
By the definition of the Hawking mass of $K_{t(v)}$, we see that
$$
\int_{K_t} H^2= 16\partiali -(16\partiali)^{\mathfrak f}rac32 B^{-{\mathfrak f}rac12} m(v),
$$
In conjunction with previous inequality, we obtain (see also Proposition 3 in \cite{BC}).
\begin{equation}gin{equation}\langlebel{isoperineq1}
B(v)\leq (36\partiali)^{\mathfrak f}rac13 \left(\int^v_0(1-(16\partiali)^{{\mathfrak f}rac12}B^{-{\mathfrak f}rac12} (t) m(t))^{\mathfrak f}rac12 dt\rightght)^{\mathfrak f}rac23 .
\epsilonnd{equation}
If $v$ is not a jump volume, then there is a $G_t$ with $Vol(G_t)=v$ so that in this case we have
\begin{equation}gin{equation}
\begin{equation}gin{split}
A(v)&\leq A_{ext}(v)\leq Area(K_t)=B(v)\\
&\leq (36\partiali)^{\mathfrak f}rac13 \left(\int^v_0(1-(16\partiali)^{{\mathfrak f}rac12}B^{-{\mathfrak f}rac12} (t) m(t))^{\mathfrak f}rac12 dt\rightght)^{\mathfrak f}rac23 ;
\epsilonnd{split}\nonumber
\epsilonnd{equation}
If $v$ is a jump volume, then there is a $G_\tau$ with $v_1 =Vol (G_\tau)<v \leq Vol (G^+ _\tau)=v_2 $. Hence $t(v)=\tau$ and thus $B(v)=B(v_1)$,
$$
A(v)\leq A_{ext}(v)\leq A_{ext}(v_2)\leq Area (K^+ _\tau)=Area(K_\tau)=B(v_1)=B(v).
$$
Here we have used Lemma \ref{nondecreasav} in the first inequality. This finishes proof of Theorem \ref{comparisontheorem1}.
Suppose there is a $v_0>0$ with $A(v_0)=(36\partiali)^{\mathfrak f}rac13 v_0 ^{\mathfrak f}rac23$. We claim that in this case $v_0$ is not a jump volume. Suppose not, then we may find $v_1$, $v_2$ with $v_1 <v_0 \leq v_2$, and $Vol(G_{t_1})=v_1$ and $Vol(G ^+ _{t_1})=v_2$. Since $A_{ext}(v)$ is nondecreasing , we see that $A_{ext}(v_1) \leq A_{ext}(v_0)\leq A_{ext}(v_2)$. However,
$$
A_{ext}(v_1)\leq Area(K_{t_1})\leq (36\partiali)^{\mathfrak f}rac13 v^{\mathfrak f}rac23 _1,
$$
$$
A(v_0)=(36\partiali)^{\mathfrak f}rac13 v_0 ^{\mathfrak f}rac23,
$$
$$
A_{ext}(v_2)\leq Area(K ^+ _{t_1} ) =Area (K _{t_1}).
$$
Combine these inequalities, we see that $v_0\leq v_1$, which is a contradiction. Thus $v_0$ is not a jump volume.
Suppose there is non-flat point $x\in M\setminus B_{\rho}(0)$. We consider the weak solution of (\ref{imcf}) with initial condition $\{x\}$. By Lemma 8.1 in \cite{HI}, $m(v)>0$, for $v>0$, together with (\ref{isoperineq1}), we that there is $t>-\infty$ with $Vol (G_t)=v_0 $. Thus,
$$
A(v_0)\leq B(v_0)< (36\partiali)^{\mathfrak f}rac13 v_0 ^{\mathfrak f}rac23,
$$
which is a contradiction. Thus $M\setminus B_{\rho}(0)$ is flat. It follows that the ADM mass of $(M,g)$ is zero so that $(M,g)$ is flay by the positive mass theorem proved in \cite{SY}, we see $(M,g)=\mathbb{R}^3$. This finishes proof of Theorem \ref{rigidity}.
\epsilonnd{proof}
\begin{equation}gin{thebibliography}{999}
\bibitem{B} H.L. Bray, {\it The Penrose inequality in general relativity and volume comparison theorems involving scalar curvature} (thesis), [math.DG] (1998).
\bibitem{BC} S. Brendle \& O. Chodosh, {\it A volume comparison theorem for asymptotically hyperbolic manifolds}, Comm.
Math. Phys. 332 (2014), no. 2, 839-846. MR 3257665
\bibitem{CCE} A.Carlotto, O. Chodosh \& M. Eichmair {\it Efficitive versions of the positive mass theorem}, preprint, http://arxiv.org/abs/1503.05910v1 (2015).
\bibitem{CY}D. Christodoulou \& S.-T. Yau, {\it Some remarks on the quasi-local mass, Mathematics and general relativity} (Santa Cruz, CA, 1986), Contemp. Math., vol. 71, Amer. Math. Soc., Providence, RI, 1988, pp. 9-14, MR 954405.
\bibitem{EM1} M. Eichmair \& J. Metzger, {\it Large isoperimetric surfaces in initial data sets}, J.Differential Geom. Vol.94 (2013) 159-186,
MR 3031863
\bibitem{EM2} M. Eichmair \& J. Metzger, {\it Unique isoperimetric foliations of asymptotically flat manifolds in all dimensions}, Invent. Math. Vol.194 (2013) 591-630, MR 3127063
\bibitem{FST}X.-Q. Fan, Y.-S. Shi \& L.-F. Tam, {\it Large-sphere and small-sphere limits of the Brown-York mass}, Comm. Anal. Geom. 17 (2009), no. 1, 37-72, MR 2495833.
\bibitem{HI} G.Huisken \& T.Ilmanen {\it The inverse mean curvature flow and the Riemannian Penrose Inequality.} J.Differential Geom., 59 (2001) 353-437, MR 1916951
\bibitem{Hu} G. Huisken, {\it An isoperimetric concept for mass and quasilocal mass}, Oberwolfach reports 3 (2006), no. 2, 87-88.
\bibitem{MY} W.H. Meeks \& S.-T. Yau, {\it Topology of three-dimensional manifolds and the embedding problems in minimal surface theory}, Ann. of Math. \textbf{112} (1980), 441--484, MR 595203
\bibitem{SY} R.Schoen \& S.-T. Yau, {\it On the proof of the positive mass conjecture in General Relativity}, Comm. Math. Phys \textbf{65} (1979), 45-76. MR 526976
\bibitem{Si} L. M. Simon, {\it Lectures on Geometric Measure Theory}, Proc. of Centre for Math. Analysis, 3. Austr. Nat. Univ., 1983
\epsilonnd{thebibliography}
\epsilonnd{document}
|
\begin{document}
\begin{abstract}
Several important algorithms for machine learning and data analysis use pairwise distances as input. On Riemannian manifolds these distances may be prohibitively costly to compute, in particular for large datasets. To tackle this problem, we propose a distance approximation which requires only a linear number of geodesic boundary value problems to be solved. The approximation is constructed by fitting a two-dimensional model space with constant curvature to each pair of samples. We demonstrate the usefulness of our approach in the context of shape analysis on landmarks spaces.
\end{abstract}
\maketitle
\tableofcontents
\section{Introduction}
\subsection{Context}
Several important algorithms for machine learning and data analysis on manifolds use pairwise distances as an input.
For example, this is the case for multi-dimensional scaling and agglomerative clustering.
The main computational burden is typically not the algorithm per se, but the calculation of the pairwise distance matrix.
Indeed, each individual distance might be the solution of a costly optimization problem, as e.g.\@ in the case of Riemannian shape analysis, and the size of the distance matrix grows quadratically in the number of samples.
This can render distance-based learning prohibitively slow compared to alternative non-geometric methods.
\subsection{Relation to previous work}
This paper builds on the idea of \textcite{yang2011approximations} to use distance approximations for reducing the number of boundary value problems from quadratic down to linear in the number of samples.
The approximations in \cite{yang2011approximations} are based on the Baker--Campbell--Hausdorff formula and are therefore restricted to (quotients of) Lie groups with bi-invariant Riemannian metrics.
In particular, they do not apply to landmark spaces with kernel metrics.
Indeed, the numerical experiments in \cite{yang2011approximations} do not show any improvement of second over first order approximations.
This leads \textcite{yang2015diffeomorphic} to question if such improvements are possible at all.
The present paper answers this question affirmatively.
\subsection{Contribution}
We propose a new second order approximation of the distance function.
The approximation is obtained by fitting a two-dimensional model space with constant curvature to each pair of samples, represented by tangent vectors to the mean.
The reduction to two dimensions is crucial because higher-dimensional model spaces with prescribed sectional curvature and closed-form expressions for geodesic distances are not available, to the best of our knowledge.
As expected, the second-order approximation is more accurate than the first-order one in small-distance regimes, which cover typical applications in shape analysis.
Moreover, in contrast to second-order Taylor polynomials, our approximate distances are always non-negative and have more realistic large-distance asymptotics.
As an application, we present a numerical implementation of our distance approximation on landmark manifolds with kernel metrics and demonstrate its performance on some simple toy data.
\subsection{Outlook}
In the special case of landmark manifolds with kernel metrics, our sectional curvature computations could be sped up using Mario's formula \cite{micheli2012sectional}.
This will allow us to test our algorithm on high-dimensional data from real-world applications.
Pseudo-landmark data will be of particular interest because the small distances of adjacent pseudo-landmarks lead to large sectional curvatures, which highlights the importance of the curvature correction in the second-order distance approximations.
\section{Approximations of pairwise distances}
\label{sec:approximations}
This aim of this section is to construct approximations of the Riemannian distance function, which are fast to compute and accurate for sufficiently concentrated sample points.
As an auxiliary tool we first describe Taylor expansions of geodesic distances.
Subsequently, we develop an alternative and better approximation using model spaces with constant curvature.
\subsection{Taylor approximation of squared distances}
\label{sec:taylor}
Recall that the squared Riemannian distance function on any Riemannian manifold is smooth away from the cut locus, including on a neighborhood of the diagonal.
This allows one to use Taylor approximations of squared distances between sufficiently close points.
More specifically, for any two points $y,z$ near $x$, one may calculate the Riemannian logarithms $u=\log_x(y)$ and $v=\log_x(z)$ and approximate the squared distance $\dist(y,z)^2$ by a Taylor polynomial of the function $(u,v) \mapsto \dist(\exp_x(u),\exp_x(v))^2$.
This is made precise in the following lemma, which also provides an explicit expression of the Taylor series up to order five.
\begin{lemma}\label{lem:taylor}
Let $(M,g)$ be a Riemannian manifold with exponential map $\exp$, distance function $\dist$, and curvature tensor $R$, let $x \in M$, and let $u,v \in T_xM$.
Then it holds for sufficiently small $u$ and $v$ that
\begin{equation}\label{eq:taylor}
\dist\big(\exp_{x}(u),\exp_{x}(v)\big)^2
=
\|u-v\|_{g_x}^2-\frac{1}{3}R_x(u,v,v,u)+O(\|u\|+\|v\|)^6.
\end{equation}
\end{lemma}
\begin{proof}
This expansion is well-known even for higher orders; see e.g.\@ \cite{nicolaescu2011jets} or \cite{pennec2018taylor}.
\end{proof}
Note that the lowest-order term in the Taylor series \eqref{eq:taylor} is the Euclidean distance in normal coordinates.
The next term corrects for the influence of curvature and, by definition, improves the accuracy for small distances.
However, for large distances, it may worsen the accuracy, can lead to negative signs, and has unrealistic quartic growth; cf.\@ \autoref{fig:curvature}.
These problems are even worse for higher order Taylor expansions, which involve covariant derivatives of the curvature tensor.
To address these problems, we propose an alternative approximation in the next section.
\begin{figure}
\caption{Approximate distances between points $\exp_x(tu)$ and $\exp_x(tv)$ on a manifold with sectional curvature at $x$ given by $k_x(u,v) \in \{-1,-\tfrac12,0,\tfrac12,1\}
\label{fig:curvature}
\end{figure}
\subsection{Approximation by constant curvature spaces}
\label{sec:curvature}
This section corrects some shortcomings of the Taylor approximation in the previous section.
The idea is to replace the Taylor polynomial \eqref{eq:taylor} by the squared distance function of a suitably selected constant curvature space.
This guarantees non-negativity and leads to more realistic large-distance asymptotics while retaining the same order of accuracy.
A visual comparison of the two approximations is presented in \autoref{fig:curvature}.
In more details, the proposed approximation works as follows.
For any points $y,z$ near $x \in M$, calculate the Riemannian logarithms $u=\log_x(y)$ and $v=\log_x(z)$ and sectional curvature $k_x(u,v)$, find a surface with constant curvature equal to $k_x(u,v)$, and approximate the squared distance between $y$ and $z$ by the squared distance function of the constant curvature surface.
This is made precise in the following theorem.
\begin{theorem}\label{thm:curvature}
Let $(M,g)$ be a Riemannian manifold with Riemannian distance function $\dist$ and sectional curvature $k$,
let $x \in M$,
let $u,v\in T_xM$,
and let $(\tilde M,\tilde g)$ be a 2-dimensional constant curvature space with $\tilde x \in \tilde M$ and $\tilde u,\tilde v \in T_{\tilde x}\tilde M$ such that
\begin{align*}
g_x(u,u)=\tilde g_{\tilde x}(\tilde u,\tilde u),
\quad
g_x(v,v)=\tilde g_{\tilde x}(\tilde v,\tilde v),
\quad
g_x(u,v)=\tilde g_{\tilde x}(\tilde u,\tilde v),
\quad
k_x(u,v)=\tilde k_{\tilde x}(\tilde u,\tilde v).
\end{align*}
Then it holds for sufficiently small $u$ and $v$ that
\begin{equation}\label{eq:curvature}
\dist\big(\exp_{x}(u),\exp_{x}(v)\big)^2
=
\widetilde\dist\big(\exp_{\tilde x}(\tilde u),\exp_{\tilde x}(\tilde v)\big)^2+O(\|u\|+\|v\|)^6.
\end{equation}
\end{theorem}
\begin{proof}
The conditions ensure that $R_x(u,v,v,u)=\tilde R_{\tilde x}(\tilde u,\tilde v,\tilde v,\tilde u)$.
Therefore, the functions
\begin{align*}
(u,v)&\mapsto \dist\big(\exp_{x}(u),\exp_{x}(v)\big)^2,
&
(\tilde u,\tilde v) &\mapsto \widetilde\dist\big(\exp_{\tilde x}(\tilde u),\exp_{\tilde x}(\tilde v)\big)^2
\end{align*}
have the same Taylor expansion up to order 5 by \autoref{lem:taylor}.
\end{proof}
The approximation in \autoref{thm:curvature} can be computed efficiently thanks to the following closed-form expression of the geodesic distance on constant curvature surfaces (i.e., the sphere, plane, and hyperbolic plane).
\begin{lemma}
\label{lem:const_curv}
Let $(M, g)$ be a 2-dimensional manifold with constant sectional curvature $k\in\mathbb R$,
let $r=\mathbbm 1_{\{k\neq 0\}} |k|^{-1/2}$,
let $x \in M$,
let $\langle\cdot,\cdot\rangle$ and $\|\cdot\|$ denote to the Riemannian metric and norm on $T_xM$, respectively,
let $u, v \in T_xM$,
and let $\phi \in \mathbb R$ satisfy $\langle u,v\rangle=\|u\|\|v\|\cos\phi$. Then
\begin{align*}
\hspace{2em}&\hspace{-2em}
\dist\big(\exp_x(u),\exp_x(v)\big)
\\&=
\left\{\begin{aligned}
&r\arccos\big(\cos\|u/r\|\cos\|v/r\|+\sin\|u/r\|\sin\|v/r\|\cos\phi\big),&&k>0,\\
&\|u-v\|,&& k=0,\\
&2r \arctanh\sqrt{1-\frac{2}{1+\cosh\|u/r\|\cosh\|v/r\|-\sinh\|u/r\|\sinh\|v/r\|\cos\phi}},&&k<0,
\end{aligned}\right.
\end{align*}
\end{lemma}
In contrast to the Taylor approximations in \autoref{lem:taylor}, the constant-curvature approximations in \autoref{thm:curvature} behave well for large distances: they have asymptotically linear growth along geodesics emanating from the same point.
This is made precise in the following lemma.
\begin{lemma}
\label{lem:growth}
The distances in \autoref{lem:const_curv} have the following (sub)-linear growth rates:
\begin{align*}
\lim_{t\to\infty} \frac{\dist\big(\exp_x(tu),\exp_x(tv)\big)}{t}
=
\left\{\begin{aligned}
& 0, && k>0,
\\
& \|u-v\|, && k=0,
\\
&\|u\|+\|v\|, && k<0, \cos(\phi)\neq 1,
\\
&\big|\|u\|-\|v\|\big|,&& k<0, \cos(\phi)=1.
\end{aligned}\right.
\end{align*}
\end{lemma}
\begin{proof}
As the statement is clear for spherical and Euclidean distances, as well as for points moving along the same geodesic, the only non-trivial case is $k<0$, $u\neq 0$, $v\neq 0$, and $\cos(\phi)\neq 1$.
Let $D(t)$ be the denominator in the expression for the hyperbolic distance, i.e.,
\begin{align*}
D(t)
&=
1+\cosh\|tu/r\|\cosh\|tv/r\|-\sinh\|tu/r\|\sinh\|tv/r\|\cos\phi
\\&=
1+\cosh\big(\|tu/r\|-\|tv/r\|\big)+(1-\cos\phi)\sinh\|tu/r\|\sinh\|tv/r\|.
\end{align*}
The latter formula shows that $D(t)$ tends to infinity for large $t$.
Accordingly, the distance $\dist\big(\exp_x(tu),\exp_x(tv)\big)$ tends to infinity for large $t$.
This justifies the use of l'H\^opital's rule, and one obtains
\begin{align*}
\hspace{2em}&\hspace{-2em}
\lim_{t\to\infty} \frac1t \dist\big(\exp_x(tu),\exp_x(tv)\big)
=
\lim_{t\to\infty} \frac{d}{dt} 2r \arctanh\sqrt{1-\frac{2}{D(t)}}
=
\lim_{t\to\infty} \frac{r D'(t)}{D(t)},
\end{align*}
provided that the limit on the right-hand side exists.
One easily calculates
\begin{equation*}
rD'(t)
=
(\|u\| - \|v\| \cos\phi) \cosh\|tv/r\| \sinh\|tu/r\| + (\|v\| - \|u\| \cos\phi) \cosh\|tu/r\| \sinh\|tv/r\|.
\end{equation*}
Approximating all $\cosh$ and $\sinh$ by $\exp$, one obtains
\begin{align*}
\lim_{t\to\infty} \frac{r D'(t)}{D(t)}
&=
\lim_{t\to\infty} \frac{(\|u\| + \|v\|)(1- \cos\phi) \exp(\|tu/r\|+\|tv/r\|)}{1+\exp(\|tu/r\|+\|tv/r\|)(1-\cos\phi)}
=
\|u\| + \|v\|.
\qedhere
\end{align*}
\end{proof}
As an alternative to \autoref{thm:curvature}, it would be tempting to use $m$-dimensional instead of two-dimensional model spaces.
This would lead to approximate distances of the same order of accuracy with the additional benefit that the triangle inequality holds.
However, it is difficult to find model spaces with generically prescribed sectional curvatures at a point.
Moreover, the geodesic distance on such model spaces, if they exist, might not have a closed-form expression.
This points to the advantage of fitting model spaces separately for each pair of samples, as done in \autoref{thm:curvature}.
\section{Applications to shape analysis}
\label{sec:shape}
This section demonstrates the usefulness of distance approximations in the context of shape analysis.
More specifically, we consider shape analysis on landmark spaces with kernel metrics.
These metrics are widely used and provide an intuitive notion of similarity.
As the calculation of the Riemannian distance on these spaces is computationally intensive, there is a high potential for significant speed-ups via distance approximations.
\subsection{Landmark spaces with kernel metrics}
\label{sec:landmarks}
For any $d,m\in\mathbb N_{>0}$, landmark space $\Land^m(\mathbb R^d)$ is the set of all configurations of $m$ distinct points in $\mathbb R^d$.
Landmark space is an open subset of $\mathbb R^{d\times m}$ and therefore a manifold.
Any kernel $k\colon\mathbb R^d\times\mathbb R^d\to \mathbb R^{d\times d}$ defines a Riemannian co-metric on landmark space via
\begin{align*}
K_q(p,p) = \sum_{i,j=1}^m (p^i)^\top k(q^i,q^j)p^j\in\mathbb R,
\qquad
q\in \Land^m(\mathbb R^d), p \in T^*_q \Land^m(\mathbb R^d).
\end{align*}
The corresponding Riemannian metric is denoted by $G$.
If $k$ is the kernel of a reproducing Hilbert space of vector fields on $\mathbb R^d$, then $k$ can be interpreted as a right-invariant co-metric on the diffeomorphism group $\Diff(\mathbb R^d)$ such that for any landmark configuration $\bar q \in \Land^m(\mathbb R^d)$, the following map is a Riemannian submersion:
\begin{equation*}
(\Diff(\mathbb R^d),k) \ni \phi \mapsto \big(\phi(\bar q^1),\dots,\phi(\bar q^m)\big) \in (\Land^m(\mathbb R^d),K).
\end{equation*}
In this sense the co-metric on landmark space is induced by a co-metric on the diffeomorphism group on ambient space.
A typical choice of kernel is the Gaussian kernel, which is given by
\begin{align*}
\forall x,y\in\mathbb R^d: \qquad k(x,y)=\exp\left(\frac{\|x-y\|^2}{2\sigma^2}\right)I^{d\times d},
\end{align*}
where $\sigma\in(0,\infty)$ and $I^{d\times d}$ denotes the $d\times d$ identity matrix.
\subsection{Computation of sectional curvatures}
The sectional curvatures on general Riemannian manifolds can be computed numerically by taking advantage of the automatic differentiation features of modern machine learning software, as demonstrated by \textcite{kuehnel2017computational}.
This is also the approach we follow in our numerics.
Alternatively, in the specific case of landmark manifolds with kernel metrics, the sectional curvatures could also be computed directly thanks to Mario's formula, which provides closed-form expressions via the relation to the sectional curvatures on $\Diff(\mathbb R^d)$, as described by \textcite{micheli2012sectional}.
If the kernel metric on $\Diff(\mathbb R^d)$ was bi-invariant, which it is not, then the Riemannian exponential would coincide with the Lie group exponential, and the sectional curvatures could be computed using the Baker--Campbell--Hausdorff formula as suggested by \textcite{yang2011approximations}.
The computational complexity of the curvature computations is as follows.
Consider a dataset of $n$ landmark configurations $\{q_1,\dots,q_n\}$ in $\Land^m(\mathbb R^d)$, where the dimension $d$ is treated as a constant.
Then the initial registration to some fixed template configuration $\bar q$, i.e., the computation of $v_i=\log_{\bar q}(q_i)$ for each $i \in \{1,\dots,n\}$, has complexity $O(m^2n)$.
Most importantly, only a linear number $n$ of boundary value problems has to be solved.
Subsequently, the approximate pairwise distances can be computed at complexity $O(m^4n^2)$ from the full Riemannian curvature tensor or at complexity $O(m^2n^2)$ using Mario's formula .
While this quadratic-in-$n$ complexity is unavoidable when fully sampled distance matrices are required, the constants in the complexity bound can be very good.
For example, if $m$ is sufficiently small to allow the computation of the full Riemannian curvature tensor at $\bar q$, then each sectional curvature can be computed by a few matrix-times-vector operations, and the corresponding approximate distances \eqref{eq:curvature} by some additional trigonometric function evaluations.
\subsection{Numerical experiments}
We tested our distance approximation on some toy datasets of landmark configurations.
More extensive tests on larger and higher-dimensional real-world datasets are intended in future work.
\begin{figure}
\caption{Random shapes $q_i=\exp_{\bar q}
\label{fig:trapezoid_a}
\caption{Random shapes $q_i=\exp_{\bar q}
\label{fig:trapezoid_b}
\caption{Histogram of true minus approximate distances for the shapes in \subref{fig:trapezoid_a}
\label{fig:histograms3}
\caption{Histogram of true minus approximate distances for the shapes in \subref{fig:trapezoid_b}
\label{fig:histograms4}
\caption{Histogram of the sectional curvatures $k_{\bar q}
\label{fig:trapezoid_e}
\caption{Multi-dimensional scaling based on true ($\circ$) and approximate ($\textcolor[HTML]{1f77b4}
\label{fig:trapezoid_f}
\caption{Improved accuracy of second-order (orange $\textcolor[HTML]{ff7f0e}
\label{fig:trapezoid}
\end{figure}
A first observation of our numerical experiments is that the second-order approximation via constant-curvature spaces outperforms the first-order one, as shown in \autoref{fig:trapezoid} for standard normally distributed landmark data.
In this example the first-order approximations have a negative bias because the sectional curvatures are on average negative, but this bias is corrected by the second-order approximation.
This confirms our theoretical predictions and stands in contrast to the results of \cite{yang2011approximations}, where an alternative second-order term did not lead to improved accuracy.
Our findings are robust with respect to the variance of the noise and the width of the kernel: in all cases considered, the second-order approximation errors were smaller in mean and variance than the first-order ones.
A second observation is that even for moderately large datasets of approximately 15--20 shapes, the distance approximations are faster to compute than the true distances, despite the overhead of the curvature computations.
This holds true despite the fact that our numerics do not yet rely on the explicit curvature formulas of \textcite{micheli2012sectional}, which provide potential for further improvement.
A third observation is that the second-order correction can be highly beneficial to distance-based learning.
Indeed, \autoref{fig:trapezoid_f} shows that multi-dimensional scaling based on true distances is nearly identical to multi-dimensional scaling based on second-order approximations, whereas the result based on first-order approximations is significantly off.
\printbibliography
\end{document}
|
\begin{document}
\title{Data-pattern tomography of entangled states}
\author{Vadim Reut}
\affiliation{
\small{B. I. Stepanov Institute of Physics, National Academy of Science of Belarus, Nezavisimosti Ave. 68, Minsk 220072 Belarus}\\}
\affiliation{\small{Department of Theoretical Physics and Astrophysics, Belarusian State University, Nezavisimosty Ave. 4, Minsk 220030 Belarus}\\}
\author{Alexander Mikhalychev}
\affiliation{
\small{B. I. Stepanov Institute of Physics, National Academy of Science of Belarus, Nezavisimosti Ave. 68, Minsk 220072 Belarus}\\}
\author{Dmitri Mogilevtsev}
\affiliation{
\small{B. I. Stepanov Institute of Physics, National Academy of Science of Belarus, Nezavisimosti Ave. 68, Minsk 220072 Belarus}\\}
\date{\today}
\begin{abstract}
We discuss the data-pattern tomography for reconstruction of entangled states of light. We show that for a moderate number of probe coherent states it is possible to achieve high accuracy of representation not only for single-mode states but also for two-mode entangled states. We analyze the stability of these representations to the noise and demonstrate the conservation of the purity and entanglement. Simulating the probe and signal measurements, we show that systematic error inherent for representation of realistic signal response with finite sets of responses from probe states still allows one to infer reliably the signal states preserving entanglement.
\end{abstract}
\maketitle
\section{\label{sec:intro}Introduction}
Quantum tomography as a way of inferring a quantum state is potentially the most precise measuring tool available to a physicist \cite{paris0,nielsen_chuang,yongsiahbook}. However, this tool requires rather precise tuning. One needs to know characteristics of the measurement setup which necessarily involves a calibration of it. Generally, it is quite a nontrivial task equivalent to quantum process tomography of the detecting system. Provided that one can describe the setup with the few-parameter model (such as efficiency and dark count rate of detectors, etc.), it is possible in some cases to perform the calibration without a complete set of known probes by trading some information about the probe for knowledge about the detector. For example, with a twin-photon state, one can find the absolute value of the detecting setup efficiency ~\cite{klyshko-1980,malygin-1981}; entanglement also makes possible ``self-testing'' or ``blind tomography''~\cite{scarani-2012,scarani-2014}. Trading knowledge of probes (preferably of the most general nature, such as Gaussianity) for information about the measurement gave rise to the concept of self-sufficient, or self-calibrating tomography \cite{mogilevtsev-2009,mogilevtsev-2010calibration,branczyk-2012,mogilevtsev-2012,stark2016}.
However, there is a possibility of skipping the calibration stage altogether. This possibility is given by the data-pattern tomography~\cite{mogilevtsev-2010qt,mogilevtsev-2013}. The idea of this method is somewhat similar to that of optical image analysis with a known optical response function~\cite{gonzalez-2002}. An observer measures responses (the data patterns) for a set of known quantum probe states and matches them with the response obtained from the unknown signal of interest. The data-pattern tomography can also be understood as a search for the optimal state estimator over the subspace that is spanned by the probe states. This approach is naturally insensitive to imperfections of the measurement setup since all device imperfections are automatically incorporated into and accounted for by the measured data patterns. The data-pattern scheme was recently successfully realized with few-photon signals and coherent probes and was shown to be quite robust~\cite{cooper-2014,harder-2014}.
The efficiency of data-pattern tomography depends essentially on the choice of the basis set of probe states. It is highly desirable to use the smallest possible number of basis states. If the observer believes that the signal state is very likely residing in some operator subspace, he or she can make use of this insight to define the set of probe states that spans this subspace for data-pattern reconstruction~\cite{mogilevtsev-2010qt,mogilevtsev-2013}. Naturally, the accuracy of the method depends crucially on the accuracy of the signal representation. Systematic error intrinsic in the method is unavoidably amplified in the process of the signal inference since both probe and signal patterns are subject to statistical errors.
Here we show that such an amplification of errors would not lead to breaking of essentially quantum features of the state. Such a fragile feature as the entanglement survives the inference procedure, and for sufficiently high accuracy of the representation the fidelity of the signal reconstructions for the large number of signal state copies tends to the values close to the fidelity given by the representation. Our simulation shows that this number of copies stands well within the region of experimental feasibility. Moreover, entanglement is not broken even for a comparatively low number of signal copies, when infidelity is several times higher than the infidelity of the representation (which is rather remarkable if one takes into account the main feature of the data-pattern approach: the density matrix of the signal is approximated by the mixture of nonorthogonal projectors). So the data-pattern scheme is quite feasible and reliable also for the reconstruction of multimodal states. Also, we show that the representation of the signal in terms of the classical probe basis is quite robust with respect to the noise of the representation weights.
The outline of the article is as follows. In Sec.~\ref{sec:basis} we review the basics of the data-pattern scheme and discuss the selection criteria of optimal basis sets. After that, we analyze different basis sets of coherent states for the representation of single-mode states as the initial simple problem and entangled double-mode states with a small average number of photons. Next, in Sec.~\ref{sec:analysis}, the quality of such expansions based on the sets with optimal parameters is discussed. We analyze the stability of this procedure to the noise and evaluate entanglement and the purity of the represented states. Last, but not least, in Sec.~\ref{sec:reconstruction} we present simulations of the procedure of data-pattern reconstruction using optimal sets of coherent projectors for single-mode and entangled double-mode states and demonstrate the survival of entanglement.
\section{\label{sec:basis}Optimal basis sets}
In this section, we consider the representation of optical quantum states based on the discrete basis set in data-pattern tomography. From a practical point of view, it is essential to find optimal basis sets with the minimum possible number of coherent projectors for the representation. However, to our knowledge, the optimization of the basis choice has not yet been fully discussed and analyzed (here one can point to only two preliminary recent works \cite{mogilevtsev-2013,mogilevtsev-2014}). In experiments~\cite{cooper-2014,harder-2014} the reconstruction was done by considering only a set of probe states that was \textit{a priori} deemed sufficiently large (from $48$ to $150$). We analyze the applicability of different sets of probe states for data-pattern tomography. For the chosen basis set, the efficiency of the reconstruction could be enhanced using, for example, the adaptive Bayesian procedure \cite{holsby, straupe1,straupe2,mikhalychev-2015}.
\subsection{\label{sec:basis:principles}General principles}
First of all, let us consider the general principles of the data-pattern scheme. We assume that there is an appropriately chosen finite set of probe states which can be described by the density operators $\sigma_\xi$, where $\xi=1,\dots{},M$. We would like to reconstruct the true signal state described by the density operator $\rho$. The key point of the discussion is the possibility to fit the signal $\rho$ with a mixture of probes,
\begin{equation}
\label{eq:expansion}
\rho\approx\rho^{Appr}=\sum_{\xi=1}^{M}x_{\xi}\sigma_{\xi},
\end{equation}
where $x_{\xi}$ are real coefficients. In order to proceed with the reconstruction, an observer carries out a number of some measurements on the unknown signal state $\rho$ and a predefined set of probe states $\sigma_{\xi}$. The outcome $k$ in the $j$-th measurement can be described by positive operator-valued measures (POVM) $\Pi_{jk}$,
\begin{eqnarray}
\label{eq:freqprob}
\begin{array}{lr}
p^{(\xi)}_{jk}=Tr(\Pi_{jk}\sigma_\xi),\\[3pt]
p^{(\rho)}_{jk}=Tr(\Pi_{jk}\rho),
\end{array}
\end{eqnarray}
where $p^{(\xi,\rho)}_{jk}$ are probabilities for the probe $\xi$ or signal $\rho$. Such measurements under a finite number of signal and probe copies result in frequency distributions $f^{(\rho)}_{jk}$ and $f^{(\xi)}_{jk}$ (see Fig.~\ref{fig:scheme}), which represent the data patterns for the signal state and the probe states, respectively.
\begin{figure}
\caption{Measurement scheme of data-pattern approach.}
\label{fig:scheme}
\end{figure}
So from Eqs.~\eqref{eq:expansion} and~\eqref{eq:freqprob} one has that the representation coefficients $x_{\xi}$ can be found by fitting the signal data pattern with probe data patterns:
\begin{equation}
\label{eq:freq}
f^{(\rho)}_{jk}\approx\sum_{\xi}x_{\xi}f^{(\xi)}_{jk},
\end{equation}
taking into account the physical constraints imposed on the density operator $\rho^{Appr}$ [$\rho^{Appr}=(\rho^{Appr})^{\dagger}$, $\Tr\rho^{Appr}=1$, $\rho^{Appr}\geq0$]. These constraints imply fulfillment of the following conditions for estimated coefficients:
\begin{equation}
\label{eq:constraints}
x_\xi=x_\xi^*,~\sum_{\xi}x_\xi=1,~\sum_{\xi}x_{\xi}\sigma_\xi\geq0.
\end{equation}
The possibility of accurate data-pattern reconstruction is closely related to the representation~\eqref{eq:expansion}. Having preliminary information or making a reasonable guess about the class of plausible signal states (such as the upper limit on the average photon number) allows one to make the reconstruction using an appropriately chosen set of probe states spanning required subspace. The problem of the optimal basis-state selection naturally arises. To solve this problem our research presented here follows certain selection criteria imposed on the basis states $\{\sigma_\xi\}$.
First of all, for practical purposes, it is advisable to use probe states that can be easily generated in the laboratory and provide an accurate representation of an unknown signal state. It was shown in previous works on the data-pattern scheme in the single-mode case that the usual coherent states satisfy these practical requirements rather well for a wide class of signal states~\cite{mogilevtsev-2010qt,mogilevtsev-2013}. Notice that some time ago considerable attention was paid to representing nonclassical quantum states in terms of the nonorthogonal basis of pure quantum states (here one can mention, for example, classical works \cite{jansky1,jansky2}). More recently, a number of works on representing entangled states using such coherent bases has appeared (for example, \cite{mikhalychev1,horoshko}). In the current work, we are implementing a quite different approach: a representation of the density matrix of the signal in terms of coherent-state projectors.
Second, it is desirable to use the smallest possible number of probe states in order to minimize computational resources for reconstruction of an unknown signal state. In subsequent research we use the fidelity $F(\rho,\rho^{Appr})=\sqrt{\rho^{1/2}\rho^{Appr}\rho^{1/2}}$ as the measure of quality of representation~\eqref{eq:expansion}~\cite{jozsa-1994}. Aiming for practical applications, we require the accuracy of representation~\eqref{eq:expansion} to be greater than experimental measurement precision. It is not possible to evaluate the required accuracy precisely, but errors in recent works~\cite{cooper-2014,harder-2014} on the data-pattern scheme may be taken as the reference point for our estimations. Having relative experimental errors on the scale of several percent, we determine the required precision of the expansion~\eqref{eq:expansion} to be an order of magnitude greater. Thus, the criterion of the required accuracy of the representation can be expressed as $F(\rho,\rho^{Appr})\geq0.999$.
Notice that states that are close in terms of fidelity may have rather different physical properties, as has been shown theoretically and
experimentally, for example, in~\cite{paris1,paris2,paris3}. So, when judging the quality of the state representation, we estimate also purity and the entanglement.
Third, the set of basis states must be suitable for representation of a wide range of quantum states with required precision. In this paper, we consider the broad class of states with small average number of photons which are widely used and applied in quantum cryptography and quantum computing~\cite{nielsen_chuang,gisin-2002}.
Next, we shall analyze the optimal basis sets of coherent projectors for the single-mode case as the initial simple problem. After that, based on the results obtained, we shall consider the case of entangled double-mode states.
\subsection{\label{sec:basis:singlemode}Single-mode case}
First, we consider the selection of the optimal basis sets for the single-mode case as the initial problem. For this case we analyze the expansion for the single-photon state, the coherent state with amplitude $\alpha=0.5$, the even coherent state (the so-called ``Schr\"odinger's kitten'' state) ${\psi\propto{}|\alpha=0.5\rangle+|\alpha=-0.5\rangle}$, and the superposition of the vacuum and the single-photon states.
We assume the basis sets $\{\sigma_\xi=|\alpha_\xi\rangle\langle\alpha_\xi|\}$, where $\alpha_\xi$ are amplitudes of coherent projectors.
To solve the formulated problem we use \textsc{cvx} for \textsc{matlab}, a package for specifying and solving convex programs~\cite{cvx,boyd-2008}. The disciplined convex programming methodology is implemented in this system. It is assumed that one follows certain rules specifying a problem. If we take into account joint concavity of the fidelity $F$ in two arguments~\cite{nielsen_chuang}, it can be verified that the problem of expansion formulated above satisfies the whole set of rules. To improve convergence, we impose additional constraints on the absolute values of the coefficients $x_\xi$: $|x_\xi|\leq{}\Const$. It is most natural to select the discrete sets of coherent states $\{\sigma_\xi=|\alpha_\xi\rangle\langle\alpha_\xi|\}$ by constructing \textit{the square lattice} near the origin on the phase plane with the axes representing the values of the real and imaginary parts of complex amplitudes $\{\alpha_\xi\}$. Let us discuss now the possibility of representing single-mode quantum states based on these states and find the optimal sets, which meet the above-mentioned criteria. The optimization parameters of these basis sets are the number of nodes along each axis $N$ and grid pitch $d$.
\begin{figure}
\caption{The fidelities of the representation of mixed states ${\rho=p|0\rangle\langle{}
\label{fig:mixed-single}
\end{figure}
The increase in the accuracy of representation is provided by the increase in the number of nodes $N\times{N}$ (the number of coherent projectors) and the decrease in grid pitch $d$ since the recognition of small-scale details on the phase plane requires advanced resolution. On the other hand, filling a large area of the phase plain with the lattice with small $d$ requires an excessive number of basis states. This means that there are some optimal values of the number of nodes along each axis $N$ and grid pitch $d$. The dependencies of the fidelity on the parameters $N$ and $d$ enable us to find the optimal parameters of the square lattice taking into account the criterion of the required accuracy of the representation ($F\geq0.999$). The analysis of the representation~\eqref{eq:expansion} for the above-mentioned signal states enables us to determine optimal parameters in this case: $N\times{}N=6\times{}6$, $d=0.05-0.15$.
Notice that choosing probes not on the simple square grid but in a more sophisticated way may, in fact, lead to better accuracy with a smaller number of probes. For the signal with a small average number of photons we also consider the sets of coherent states chosen in the helical grid on the phase plane \cite{mogilevtsev-2014}. All probe states are equidistant in radius and angle; we optimize the number of nodes $N$, the step of the radial distance $\Delta{r}$, and the step of the angle $\Delta\varphi=2\pi(m/n)$, with $m,n\in{\mathds{N}}$.
The optimization procedure for this set of probes gives the following optimal parameters: $N=17$, $\Delta\varphi=\pi/4$, $\Delta{r}=0.009-0.016$. One can see that the use of the helical grid with optimal parameters requires indeed a smaller number of states in comparison with the square lattice.
The analysis of the representation~\eqref{eq:expansion} given above is for pure states. However, obviously, the expansion~\eqref{eq:expansion} holds for mixed states as well. Let us demonstrate how the accuracy of the representation fares with the mixed states. For this purpose, we analyze the fidelity of the representation for the mixed states ${\rho=p|0\rangle\langle{}0|+(1-p)|1\rangle\langle{}1|}$ ($p\in[0,1]$). Figure~\ref{fig:mixed-single} shows that the fidelities of the representation of mixed states exceed the minimum required accuracy for optimal basis sets of coherent projectors in the cases of the square lattice and the helical grid. This analysis confirms that the method considered works appropriately for mixed states as well.
\subsection{\label{sec:basis:doublemode}Double-mode case}
\begin{figure}
\caption{The fidelity of the representation of entangled double-mode states against the grid pitch $d$ of the square lattice of coherent projectors: (a) the number of nodes along each axis $N=6$; (b) the number of nodes along each axis $N=7$. The predefined double-mode states are ${\psi=\Const\times\big(|\alpha\rangle_1|-\alpha\rangle_2+|-\alpha\rangle_1|\alpha\rangle_2\big)}
\label{fig:fidel-double}
\end{figure}
Now let us move to the consideration of the multimode case. Since we are aiming, primarily, to show how the entanglement survives the data-pattern inference, we restrict ourselves to the double-mode case. The representations are analyzed for the following states: ${\psi=\big(|0\rangle_1|1\rangle_2+|1\rangle_1|0\rangle_2\big)/\sqrt{2}}$, ${\psi=\big(|0\rangle_1|0\rangle_2+|1\rangle_1|1\rangle_2\big)/\sqrt{2}}$, $\psi=\Const\times\big(|\alpha\rangle_1|-\alpha\rangle_{2}+ |-\alpha\rangle_1|\alpha\rangle_2\big)$ (${\alpha=0.5}$).
We choose the basis sets ${\{\sigma_\xi=|\alpha_{\xi_1}\rangle_1\langle\alpha_{\xi_1}|\otimes|\alpha_{\xi_2}\rangle_2\langle\alpha_{\xi_2}|\}}$ as the tensor product of the coherent states selected in the nodes of the square lattice on the phase plane. The parameters to optimize are the number of nodes along each axis $N$ and grid pitch $d$ for the two modes.
Accurate representation~\eqref{eq:expansion} of states that are the tensor product of pure single-mode states is possible for the parameters found in the previous section ($N=6$, $d=0.05-0.15$) since ${F(\rho_1\otimes\rho_2,\rho_1^{Appr}\otimes\rho_2^{Appr})}={F(\rho_1,\rho_1^{Appr})F(\rho_2,\rho_2^{Appr})}$~\cite{jozsa-1994}. Intuitively, in the two-mode case it is natural to expect some nontrivial complication due to the presence of entanglement. We analyze the expansions~\eqref{eq:expansion} for the pure entangled states mentioned above in this section. The results presented in Fig.~\ref{fig:fidel-double} indicate that, indeed, the representation of entangled double-mode states requires a greater number of nodes along each axis $N$ than in the single-mode case. This figure demonstrates that a representation with fidelity $F\geq0.999$ is possible for the sets with the following optimal parameters: $N=7$, $d=0.05-0.20$.
It is also useful to demonstrate that the method considered works properly for mixed states in this case. For this reason, we consider the expansion of the state with the density operator ${\rho=\frac{1-p}{2}\big(|0\rangle_1|1\rangle_2+|1\rangle_1|0\rangle_2\big)\big(\langle0|_1\langle1|_2+\langle1|_1\langle0|_2\big)}\hmm+{\frac{p}{2}\big(|0\rangle_1|0\rangle_2+|1\rangle_1|1\rangle_2\big)\big(\langle0|_1\langle0|_2+\langle1|_1\langle1|_2\big)}$ ($p\in[0,1]$). An analysis of the fidelities of the representation of these states confirms that optimal basis sets of coherent projectors are appropriate for the accurate expansion of mixed states as well (see Fig.~\ref{fig:mixed-double}).
We close this section by noting that the basis sets of coherent projectors found are applicable for the essentially accurate representation of single-mode and entangled double-mode states with a small average number of photons. We note that the achieved precision of the representation~\eqref{eq:expansion}, $F(\rho,\rho^{Appr})\geq0.999$, indicates that these optimal sets of probe states can reliably represent a wide class of signal states. This circumstance allows us to expect that these sets of coherent projectors may be successfully implemented in data-pattern tomography of single-mode and entangled double-mode states effectively.
\begin{figure}
\caption{The fidelities of the representation of entangled mixed states ${\rho=\frac{1-p}
\label{fig:mixed-double}
\end{figure}
\section{\label{sec:analysis}Expansion analysis}
In this section we consider the quality of the expansion~\eqref{eq:expansion} using discrete sets of coherent projectors. Since any real measurements are connected with the noise of different sources, it is essential to analyze the stability of the representation using the basis sets considered. After that, we shall analyze the conservation of entanglement for the expansion based on the optimal discrete sets. Last, the purity of the expansions is investigated based on the analysis of their eigenvalues.
\subsection{\label{sec:analysis:stability}Stability}
\begin{figure}
\caption{The fidelities of the representation~\eqref{eq:expansionfluct}
\label{fig:noise}
\end{figure}
Analyzing the stability of the reconstructions using the optimal basis sets, we compare quantum state $\rho$ with the result of the expansion containing fluctuations with normal distribution $N(0,\sigma^2)$ in the coefficients:
\begin{equation}
\label{eq:expansionfluct}
\rho^{Appr\prime}=\sum_{\xi=1}^{N}(x_\xi+\{\text{noise}\})\sigma_\xi.
\end{equation}
Notice that we enforce semipositivity and unit trace of $\rho^{Appr\prime}$. Plots of the fidelity $F\big(\rho,\rho^{Appr\prime}\big)$ against the rms amplitude error indicate that the representations using the optimal sets of coherent states are quite stable for both the single- and double-mode states. The plots for the coherent state with amplitude $\alpha=0.5$ in the single-mode case and for state ${\psi=\big(|0\rangle_1|1\rangle_2+|1\rangle_1|0\rangle_2\big)/\sqrt{2}}$ in the double-mode case are presented in Fig.~\ref{fig:noise}. One can see that the fidelity remains rather high over a quite large range of the rms amplitude error.
\subsection{\label{sec:analysis:entanglement}Entanglement estimation}
It is not obvious that the expansion~\eqref{eq:expansion} based on the set of coherent projectors conserves the entanglement. In order to demonstrate this, we estimate entanglement for the same states represented using the square lattice of basis states.
We use an entanglement witness (EW) to determine whether a state is separable or not. A density operator $\rho$ describes an entangled state iff there exists a Hermitian operator $W$ (called EW) which detects its entanglement, i.e., $\Tr(W\rho)<0$ and $\Tr(W\sigma_{sep})>0$ for all $\sigma_{sep}$ separable~\cite{Horodecki1996,brandao-2006}. We calculate the entanglement witness using a method proposed in Ref.~\cite{brandao-2004}. For the calculation of the operator $W$ we use \textsc{cvx}~\cite{cvx,boyd-2008}. Figure~\ref{fig:EW} shows the results of these calculations for the square lattice of basis states with $N=6$ and $N=7$. One can see that all values of the trace $\Tr(W\rho)$ estimated are negative. Thus, the representation~\eqref{eq:expansion} with these discrete sets of coherent projectors conserves entanglement. Figure~\ref{fig:EW}(b) demonstrates that the values of the trace $\Tr{(W\rho^{Appr})}$ for the expansions using optimal basis sets are very close to the values found for the precise density matrices, which indicates the closeness of their entanglements.
\begin{figure}
\caption{Plots of the trace $\Tr{(W\rho)}
\label{fig:EW}
\end{figure}
\subsection{\label{sec:analysis:purity}Purity}
At the end of this section we analyze the purity of the density matrices $\rho^{Appr}$, which is defined as ${\mu[\rho^{Appr}]=\Tr{(\rho^{Appr})^2}=\sum_{k}\lambda_{k}^2}$ ($\lambda_{k}$ are the eigenvalues of the density matrix $\rho^{Appr}$). We analyze the purity of the density matrices $\rho^{Appr}$ of the states considered earlier. The calculations for the expansions using the optimal basis sets show that the purity is well conserved in the single-mode case. Figure~\ref{fig:purity_entangled} shows the dependence of the purity of the representation of entangled double-mode states against the grid pitch $d$ of the square lattice of coherent projectors with $N=6$ and $N=7$. According to Fig.~\ref{fig:purity_entangled}(b) the expansions using the optimal basis sets are found to conserve the purity with great precision.
\begin{figure}
\caption{The purity of the representation of entangled double-mode states against the grid pitch $d$ of the square lattice of coherent projectors: (a) the number of nodes along each axis $N=6$ (fidelity level of representation $\rho^{Appr}
\label{fig:purity_entangled}
\end{figure}
In conclusion, we can assert that the optimal basis sets considered in this section are applicable for a highly accurate representation of entangled states with a small average number of photons.
\section{\label{sec:reconstruction}Reconstruction using the optimal basis sets}
In this section, we demonstrate the possibility of accurate data-pattern reconstruction of single-mode and entangled double-mode states using the optimal basis sets of the coherent projectors found. To this end, we shall specify and simulate the set of measurements for this scheme.
\subsection{\label{sec:reconstruction:measurments}Set of measurements}
As a means of demonstrating the applicability of the discrete basis sets of coherent projectors in data-pattern tomography in the simplest and most straightforward way, let us take the intended measurements to be projections onto coherent states for single- and double-mode cases. These measurements are described by the POVM elements $\Pi_{j}=|\beta_j\rangle\langle\beta_j|$ and $\Pi_j=|\beta_{j_1}\rangle_j\langle\beta_{j_1}|\otimes|\beta_{j_2}\rangle_j\langle\beta_{j_2}|$ ($j=1,\dots{},K$) with amplitudes $\beta_j$, $\beta_{j_1}$, $\beta_{j_2}$. For the assumed ideal lossless detection, the probabilities $p^{(\xi)}_{j}$ and $p^{(\rho)}_{j}$ of observing the positive outcome in the $j$th measurement for a probe state $\sigma_{\xi}$ and a signal state $\rho$ are given by Eqs. (\ref{eq:freqprob}).
Amplitudes of the coherent projectors forming the POVM elements $\{\Pi_j\}$ are selected as equidistant phase-space points that form a square lattice in our simulations. We assume that the probabilities of observing every coherent-state setting $j$ for the probe state $\sigma_{\xi}$ and the signal state $\rho$ are measured with a finite number of state copies $N_{rep}$. The experimental frequencies $f^{(\xi)}_{f}$ and $f^{(\rho)}_{f}$ are simulated using a binomial distribution with parameters $N_{rep}$, $p^{(\xi)}_{j}$ and $N_{rep}$, $p^{(\rho)}_{j}$ for the sets of probe states and the signal state, respectively.
One is able to carry out the reconstruction minimizing the distance
\begin{equation}
\label{eq:functional}
E[x_\xi]=\sum_{j}\Big(f^{(\rho)}_{j}-\sum_{\xi}x_{\xi}f^{(\xi)}_{j}\Big)^2.
\end{equation}
Minimization of the functional~\eqref{eq:functional} subject to the constraints imposed on the coefficients $\{x_{\xi}\}$ represents the semidefinite convex problem~\cite{boyd-2004} and can be solved using the package \textsc{cvx}~\cite{cvx,boyd-2008}.
\begin{figure}
\caption{The fidelities of the reconstruction of single-mode states against the number of state copies $N_{rep}
\label{fig:reconstruction-single}
\end{figure}
\subsection{\label{sec:reconstruction:results}Results}
We are now ready to demonstrate the possibility of accurate data-pattern reconstruction using the discussed optimal sets for the reconstruction of single-mode and entangled double-mode states. Typically, the number of employed measurement settings $K$ is not equal to the number of probe states $M$. However, for the sake of simplicity we let $K=M$ and take the square lattice for the POVM elements $\{\Pi_j\}$ to be the same as that for the set of basis states $\{\sigma_{\xi}\}$.
In the single-mode case we select the amplitudes of probe states and projectors $\{\Pi_j=|\beta_j\rangle\langle\beta_j|\}$ to be in the nodes of the square lattice with the optimal parameters $N=6$, $d=0.15$. Figure~\ref{fig:reconstruction-single} shows examples of fidelities of the single-mode states reconstructed against the number of state copies $N_{rep}$. In the same way we carry out the reconstruction of entangled double-mode states by selecting the amplitudes of the discrete sets of probe states and projectors $\{\Pi_j=|\beta_{j_1}\rangle_j\langle\beta_{j_1}|\otimes|\beta_{j_2}\rangle_j\langle\beta_{j_2}|\}$ as phase-space points that form the square lattice with optimal parameters $N=7$, $d=0.15$. Figure~\ref{fig:reconstruction-double} demonstrates the fidelities of this reconstruction process for entangled double-mode states. Figures~\ref{fig:reconstruction-single} and~\ref{fig:reconstruction-double} show that the fidelities $F(\rho,\rho^{Appr})$ for single-mode and entangled double-mode states are comparably close for reasonably large numbers of state copies ($N_{rep}>10^4$). One can see that an observer is able to reconstruct single-mode or entangled double-mode states with a fidelity that is arbitrarily close to the target fidelity $F(\rho,\rho^{Appr})=0.999$ by increasing the number of state copies $N_{rep}$. So for sufficiently high accuracy of the representation the fidelity of the signal reconstructions for a large number of signal state copies indeed tends to the values close to the fidelity given by the representation. Furthermore, this number of copies stands well within the region of experimental feasibility.
Finally, we analyze the conservation of entanglement for the signal states reconstructed with different numbers of state copies $N_{rep}$. For this purpose, we calculate the entanglement witness $W$ using a method proposed in Ref.~\cite{brandao-2004}. An entanglement witness operator calculated for the precise density matrix $\rho$ does not have to be EW for the states $\rho^{Appr}$ reconstructed with a relatively small number of state copies. Therefore, for the calculation of the entanglement witness $W$ we solve the problem of convex optimization for the density operators $\rho^{Appr}$ reconstructed. Figure~\ref{fig:EW-reconstruction} shows the results of these calculations for the same sets of probe states and projectors as in Fig.~\ref{fig:reconstruction-double} (we select them in the square lattice on the phase plane with parameters $N=7$, $d=0.15$). Figure~\ref{fig:EW-reconstruction} demonstrates that the entanglement survives the inference procedure even for a reasonably small number of state copies $N_{rep}$.
\begin{figure}
\caption{The fidelities of the reconstruction of entangled double-mode states against the number of state copies $N_{rep}
\label{fig:reconstruction-double}
\end{figure}
\begin{figure}
\caption{Plot of the trace $\Tr{(W^{Appr}
\label{fig:EW-reconstruction}
\end{figure}
We conclude that using a discrete probe set of appropriately chosen coherent projectors in data-pattern tomography enables us to accurately reconstruct entangled optical quantum states.
\section{\label{sec:conclusions}Conclusions}
We have discussed the data-pattern approach to quantum tomography of entangled states. The efficiency of this procedure depends essentially on the choice of the basis set of the probe states. We substantiated the choice of the discrete set of coherent states by constructing a regular grid of basis states and finding the optimal expansion for given quantum states based on this basis set using the method considered in the paper. We have demonstrated the possibility of accurate representation of entangled states based on these discrete sets of coherent projectors, found the optimal ones for entangled double-mode states as well as single-mode states with a small average number of photons, and demonstrated the robustness of the representation with respect to added noise. The simulations of the reconstruction process demonstrated the feasibility and the effectiveness of data-pattern quantum tomography of entangled states using the discrete basis set of coherent states. The results presented show that the data-pattern approach may become an efficient tool in experimental quantum-state reconstruction of entangled states.\\[5pt]
\section*{ACKNOWLEDGMENTS}
This work was supported by
the National Academy of Sciences of Belarus through the
program “Convergence” and the European Commission through
the SUPERTWIN project (Contract No. 686731).
\begin{thebibliography}{99}
\makeatletter
\providecommand \@ifxundefined [1]{
\@ifx{#1\undefined}
}
\providecommand \@ifnum [1]{
\ifnum #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi
}
\providecommand \@ifx [1]{
\ifx #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi
}
\providecommand \natexlab [1]{#1}
\providecommand \enquote [1]{``#1''}
\providecommand \bibnamefont [1]{#1}
\providecommand \bibfnamefont [1]{#1}
\providecommand \citenamefont [1]{#1}
\providecommand \href@noop [0]{\@secondoftwo}
\providecommand \href [0]{\begingroup \@sanitize@url \@href}
\providecommand \@href[1]{\@@startlink{#1}\@@href}
\providecommand \@@href[1]{\endgroup#1\@@endlink}
\providecommand \@sanitize@url [0]{\catcode `\\12\catcode `\$12\catcode
`\&12\catcode `\#12\catcode `\^12\catcode `\_12\catcode `\%12\relax}
\providecommand \@@startlink[1]{}
\providecommand \@@endlink[0]{}
\providecommand \url [0]{\begingroup\@sanitize@url \@url }
\providecommand \@url [1]{\endgroup\@href {#1}{\urlprefix }}
\providecommand \urlprefix [0]{URL }
\providecommand \Eprint [0]{\href }
\providecommand \doibase [0]{http://dx.doi.org/}
\providecommand \selectlanguage [0]{\@gobble}
\providecommand \bibinfo [0]{\@secondoftwo}
\providecommand \bibfield [0]{\@secondoftwo}
\providecommand \translation [1]{[#1]}
\providecommand \BibitemOpen [0]{}
\providecommand \bibitemStop [0]{}
\providecommand \bibitemNoStop [0]{.\EOS\space}
\providecommand \EOS [0]{\spacefactor3000\relax}
\providecommand \BibitemShut [1]{\csname bibitem#1\endcsname}
\let\auto@bib@innerbib\@empty
\bibitem{paris0}\textit{Quantum State Estimation}, edited by M. Paris, J. {\v{R}}eh{\'a}{\v{c}}ek, Lecture Notes in Physics Vol. 649 (Springer, Berlin, 2004).
\bibitem [{\citenamefont {Nielsen}\ and\ \citenamefont
{Chuang}(2000)}]{nielsen_chuang}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~A.}\ \bibnamefont
{Nielsen}}\ and\ \bibinfo {author} {\bibfnamefont {I.~L.}\ \bibnamefont
{Chuang}},\ }\href@noop {} {\emph {\bibinfo {title} {Quantum Computation and
Quantum Information}}}\ (\bibinfo {publisher} {Cambridge University Press, Cambridge},\
\bibinfo {year} {2000})\BibitemShut {NoStop}
\bibitem [{\citenamefont {Teo}(2016)}]{yongsiahbook}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.~S.}\ \bibnamefont
{Teo}},\ }\href@noop {} {\emph {\bibinfo {title} {Introduction to
Quantum-State Estimation}}}\ (\bibinfo {publisher} {World Scientific,
Singapore},\ \bibinfo {year} {2016})\BibitemShut {NoStop}
\bibitem [{\citenamefont {Klyshko}(1980)}]{klyshko-1980}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D. N.}~\bibnamefont
{Klyshko}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Quantum Electronics}\ }\textbf {\bibinfo {volume} {10}},\ \bibinfo {pages}
{1112} (\bibinfo {year} {1980})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Malygin}\ \emph {et~al.}(1981)\citenamefont
{Malygin}, \citenamefont {Penin},\ and\ \citenamefont
{Sergienko}}]{malygin-1981}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A. A.}~\bibnamefont
{Malygin}}, \bibinfo {author} {\bibfnamefont {A. N.}~\bibnamefont {Penin}}, \
and\ \bibinfo {author} {\bibfnamefont {A. V.}~\bibnamefont {Sergienko}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Sov. Phys. JETP
Lett}\ }\textbf {\bibinfo {volume} {33}},\ \bibinfo {pages} {477} (\bibinfo
{year} {1981})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Scarani}(2012)}]{scarani-2012}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {V.}~\bibnamefont
{Scarani}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Acta
Phys. Slovaca}\ }\textbf {\bibinfo {volume} {62}},\ \bibinfo {pages} {347}
(\bibinfo {year} {2012})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Yang}\ \emph {et~al.}(2014)\citenamefont {Yang},
\citenamefont {V{\'e}rtesi}, \citenamefont {Bancal}, \citenamefont
{Scarani},\ and\ \citenamefont {Navascu{\'e}s}}]{scarani-2014}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {T.~H.}\ \bibnamefont
{Yang}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {V{\'e}rtesi}},
\bibinfo {author} {\bibfnamefont {J.-D.}\ \bibnamefont {Bancal}}, \bibinfo
{author} {\bibfnamefont {V.}~\bibnamefont {Scarani}}, \ and\ \bibinfo
{author} {\bibfnamefont {M.}~\bibnamefont {Navascu{\'e}s}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\
}\textbf {\bibinfo {volume} {113}},\ \bibinfo {pages} {040401} (\bibinfo
{year} {2014})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Mogilevtsev}\ \emph {et~al.}(2009)\citenamefont
{Mogilevtsev}, \citenamefont {{\v{R}}eh{\'a}{\v{c}}ek},\ and\ \citenamefont
{Hradil}}]{mogilevtsev-2009}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont
{Mogilevtsev}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{{\v{R}}eh{\'a}{\v{c}}ek}}, \ and\ \bibinfo {author} {\bibfnamefont
{Z.}~\bibnamefont {Hradil}},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {79}},\ \bibinfo
{pages} {020101} (\bibinfo {year} {2009})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Mogilevtsev}(2010)}]{mogilevtsev-2010calibration}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont
{Mogilevtsev}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Phys. Rev. A}\ }\textbf {\bibinfo {volume} {82}},\ \bibinfo {pages}
{021807} (\bibinfo {year} {2010})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Bra{\'n}czyk}\ \emph {et~al.}(2012)\citenamefont
{Bra{\'n}czyk}, \citenamefont {Mahler}, \citenamefont {Rozema}, \citenamefont
{Darabi}, \citenamefont {Steinberg},\ and\ \citenamefont
{James}}]{branczyk-2012}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Bra{\'n}czyk}}, \bibinfo {author} {\bibfnamefont {D.~H.}\ \bibnamefont
{Mahler}}, \bibinfo {author} {\bibfnamefont {L.~A.}\ \bibnamefont {Rozema}},
\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Darabi}}, \bibinfo
{author} {\bibfnamefont {A.~M.}\ \bibnamefont {Steinberg}}, \ and\ \bibinfo
{author} {\bibfnamefont {D.~F.}\ \bibnamefont {James}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {New J. Phys.}\ }\textbf
{\bibinfo {volume} {14}},\ \bibinfo {pages} {085003} (\bibinfo {year}
{2012})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Mogilevtsev}\ \emph {et~al.}(2012)\citenamefont
{Mogilevtsev}, \citenamefont {{\v{R}}eh{\'a}{\v{c}}ek},\ and\ \citenamefont
{Hradil}}]{mogilevtsev-2012}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont
{Mogilevtsev}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{{\v{R}}eh{\'a}{\v{c}}ek}}, \ and\ \bibinfo {author} {\bibfnamefont
{Z.}~\bibnamefont {Hradil}},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {New J. Phys.}\ }\textbf {\bibinfo {volume} {14}},\
\bibinfo {pages} {095001} (\bibinfo {year} {2012})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Stark}(2016)}]{stark2016}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {C.~J.}\ \bibnamefont
{Stark}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Commun.
Math. Phys.}\ }\textbf {\bibinfo {volume} {348}},\ \bibinfo {pages} {1}
(\bibinfo {year} {2016})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {{\v{R}}eh{\'a}{\v{c}}ek}\ \emph
{et~al.}(2010)\citenamefont {{\v{R}}eh{\'a}{\v{c}}ek}, \citenamefont
{Mogilevtsev},\ and\ \citenamefont {Hradil}}]{mogilevtsev-2010qt}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{{\v{R}}eh{\'a}{\v{c}}ek}}, \bibinfo {author} {\bibfnamefont
{D.}~\bibnamefont {Mogilevtsev}}, \ and\ \bibinfo {author} {\bibfnamefont
{Z.}~\bibnamefont {Hradil}},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {105}},\ \bibinfo
{pages} {010402} (\bibinfo {year} {2010})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Mogilevtsev}\ \emph {et~al.}(2013)\citenamefont
{Mogilevtsev}, \citenamefont {Ignatenko}, \citenamefont {Maloshtan},
\citenamefont {Stoklasa}, \citenamefont {\ifmmode \check{R}\else
\v{R}\fi{}eh\'a\ifmmode~\check{c}\else \v{c}\fi{}ek},\ and\ \citenamefont
{Hradil}}]{mogilevtsev-2013}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont
{Mogilevtsev}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Ignatenko}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Maloshtan}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Stoklasa}},
\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {\ifmmode \check{R}\else
\v{R}\fi{}eh\'a\ifmmode~\check{c}\else \v{c}\fi{}ek}}, \ and\ \bibinfo
{author} {\bibfnamefont {Z.}~\bibnamefont {Hradil}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {New J. Phys.}\ }\textbf {\bibinfo
{volume} {15}},\ \bibinfo {pages} {025038} (\bibinfo {year}
{2013})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Gonzalez}\ and\ \citenamefont
{Woods}(2002)}]{gonzalez-2002}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.~C.}\ \bibnamefont
{Gonzalez}}\ and\ \bibinfo {author} {\bibfnamefont {R.~E.}\ \bibnamefont
{Woods}},\ }\href@noop {} {\emph {\bibinfo {title} {Digital Image
Processing}}}\ (\bibinfo {publisher} {Prentice Hall, Upper Saddle River},\
\bibinfo {year} {2002})\BibitemShut {NoStop}
\bibitem [{\citenamefont {Cooper}\ \emph {et~al.}(2014)\citenamefont {Cooper},
\citenamefont {Karpi{\'n}ski},\ and\ \citenamefont {Smith}}]{cooper-2014}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Cooper}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Karpi{\'n}ski}}, \ and\ \bibinfo {author} {\bibfnamefont {B.~J.}\
\bibnamefont {Smith}},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Nat. Commun.}\ }\textbf {\bibinfo {volume} {5}},\
\bibinfo {pages} {4332} (\bibinfo
{year} {2014})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Harder}\ \emph {et~al.}(2014)\citenamefont {Harder},
\citenamefont {Silberhorn}, \citenamefont {{\v{R}}eh{\'a}{\v{c}}ek},
\citenamefont {Hradil}, \citenamefont {Motka}, \citenamefont {Stoklasa},\
and\ \citenamefont {S{\'a}nchez-Soto}}]{harder-2014}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Harder}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Silberhorn}},
\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{{\v{R}}eh{\'a}{\v{c}}ek}}, \bibinfo {author} {\bibfnamefont
{Z.}~\bibnamefont {Hradil}}, \bibinfo {author} {\bibfnamefont
{L.}~\bibnamefont {Motka}}, \bibinfo {author} {\bibfnamefont
{B.}~\bibnamefont {Stoklasa}}, \ and\ \bibinfo {author} {\bibfnamefont
{L.L.}~\bibnamefont {S{\'a}nchez-Soto}},\ }\href@noop {} {\bibfield {journal}
{\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {90}},\
\bibinfo {pages} {042105} (\bibinfo {year} {2014})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Motka}\ \emph {et~al.}(2014)\citenamefont {Motka},
\citenamefont {Stoklasa}, \citenamefont {Rehacek}, \citenamefont {Hradil},
\citenamefont {Karasek}, \citenamefont {Mogilevtsev}, \citenamefont {Harder},
\citenamefont {Silberhorn},\ and\ \citenamefont
{S{\'a}nchez-Soto}}]{mogilevtsev-2014}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont
{Motka}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Stoklasa}},
\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Rehacek}}, \bibinfo
{author} {\bibfnamefont {Z.}~\bibnamefont {Hradil}}, \bibinfo {author}
{\bibfnamefont {V.}~\bibnamefont {Karasek}}, \bibinfo {author} {\bibfnamefont
{D.}~\bibnamefont {Mogilevtsev}}, \bibinfo {author} {\bibfnamefont
{G.}~\bibnamefont {Harder}}, \bibinfo {author} {\bibfnamefont
{C.}~\bibnamefont {Silberhorn}}, \ and\ \bibinfo {author} {\bibfnamefont
{L.L.}~\bibnamefont {S{\'a}nchez-Soto}},\ }\href@noop {} {\bibfield {journal}
{\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {89}},\
\bibinfo {pages} {054102} (\bibinfo {year} {2014})}\BibitemShut {NoStop}
\bibitem{holsby} F. Huszar and N. M. T. Houlsby, Phys. Rev. A \textbf{85}, 052120 (2012).
\bibitem{straupe1} K. S. Kravtsov, S. S. Straupe, I. V. Radchenko, N. M. T. Houlsby, F. Huszar, and S. P. Kulik,
Phys. Rev. A \textbf{87} 062122 (2013).
\bibitem{straupe2} S. S. Straupe, JETP letters \textbf{104}, 510 (2016).
\bibitem [{\citenamefont {Mikhalychev}\ \emph {et~al.}(2015)\citenamefont
{Mikhalychev}, \citenamefont {Mogilevtsev}, \citenamefont {Teo},
\citenamefont {{\v{R}}eh{\'a}{\v{c}}ek},\ and\ \citenamefont
{Hradil}}]{mikhalychev-2015}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Mikhalychev}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont
{Mogilevtsev}}, \bibinfo {author} {\bibfnamefont {Y.~S.}\ \bibnamefont
{Teo}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{{\v{R}}eh{\'a}{\v{c}}ek}}, \ and\ \bibinfo {author} {\bibfnamefont
{Z.}~\bibnamefont {Hradil}},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {92}},\ \bibinfo
{pages} {052106} (\bibinfo {year} {2015})}\BibitemShut {NoStop}
\bibitem{jansky1} J. Janszky and An. V. Vinogradov, Phys. Rev. Lett. \textbf{64}, 2771 (1990).
\bibitem{jansky2} J. Janszky, P. Domokos, S. Szabo, and P. Adam, Phys.Rev.A \textbf{51}, 4191 (1995).
\bibitem{mikhalychev1} S. Ya. Kilin, A. B. Mikhalychev. Proc. SPIE \textbf{6726}, 67263D (2007);
S. Ya. Kilin, A. B. Mikhalychev, Phys. Rev. A. \textbf{83}, 052303 (2011).
\bibitem{horoshko} D. B. Horoshko, S. De Bievre, M. I. Kolobov, and G. Patera, Phys. Rev. A \textbf{93}, 062323 (2016).
\bibitem [{\citenamefont {Jozsa}(1994)}]{jozsa-1994}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Jozsa}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {J.
Mod. Opt.}\ }\textbf {\bibinfo {volume} {41}},\ \bibinfo {pages}
{2315} (\bibinfo {year} {1994})}\BibitemShut {NoStop}
\bibitem{paris1} C. Benedetti, F. Buscemi, P. Bordone, and M. G. A. Paris, Phys. Rev. A \textbf{87}, 052328 (2013).
\bibitem{paris2} M. Bina, A. Mandarino, S. Olivares, and M. G. A. Paris, Phys. Rev. A \textbf{89}, 012305 (2014).
\bibitem{paris3} A. Mandarino, M. Bina, C. Porto, S. Cialdi, S. Olivares, and M. G. A. Paris, Phys. Rev. A \textbf{93}, 062118 (2016).
\bibitem [{\citenamefont {Gisin}\ \emph {et~al.}(2002)\citenamefont {Gisin},
\citenamefont {Ribordy}, \citenamefont {Tittel},\ and\ \citenamefont
{Zbinden}}]{gisin-2002}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {N.}~\bibnamefont
{Gisin}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Ribordy}},
\bibinfo {author} {\bibfnamefont {W.}~\bibnamefont {Tittel}}, \ and\ \bibinfo
{author} {\bibfnamefont {H.}~\bibnamefont {Zbinden}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Rev. Mod. Phys.}\
}\textbf {\bibinfo {volume} {74}},\ \bibinfo {pages} {145} (\bibinfo {year}
{2002})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Grant}\ and\ \citenamefont {Boyd}(2016)}]{cvx}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Grant}}\ and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Boyd}},\
}\href@noop {} {{\bibinfo {title} {\textsc{cvx}: \textsc{matlab} software for
disciplined convex programming, version 2.1},}\ }\bibinfo {howpublished}
{\url{http://cvxr.com/cvx}}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Grant}\ and\ \citenamefont {Boyd}(2008)}]{boyd-2008}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Grant}}\ and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Boyd}},\
}in\ \href@noop {} {\emph {\bibinfo {booktitle} {Recent Advances in Learning
and Control}}},\ \bibinfo {editor} {edited by\ \bibinfo {editor}
{\bibfnamefont {V.}~\bibnamefont {Blondel}}, \bibinfo {editor} {\bibfnamefont
{S.}~\bibnamefont {Boyd}}, \ and\ \bibinfo {editor} {\bibfnamefont
{H.}~\bibnamefont {Kimura}}},\ \bibinfo {series and number} {Lecture Notes in Control and
Information Sciences Vol. 371},\ (\bibinfo {publisher} {Springer, London},\ \bibinfo {year} {2008}),\ pp.\ \bibinfo {pages} {95--110}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Horodecki}\ \emph {et~al.}(1996)\citenamefont
{Horodecki}, \citenamefont {Horodecki},\ and\ \citenamefont
{Horodecki}}]{Horodecki1996}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}\ \bibnamefont
{Horodecki}}, \bibinfo {author} {\bibfnamefont {P.}\ \bibnamefont
{Horodecki}}, \ and\ \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Horodecki}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Phys. Lett. A}\ }\textbf {\bibinfo {volume} {223}},\ \bibinfo {pages} {1 }
(\bibinfo {year} {1996})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Brandao}\ and\ \citenamefont
{Vianna}(2006)}]{brandao-2006}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {F.~G.}\ \bibnamefont
{Brandao}}\ and\ \bibinfo {author} {\bibfnamefont {R.~O.}\ \bibnamefont
{Vianna}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Int. J. Quantum Inf.}\ }\textbf {\bibinfo {volume}
{4}},\ \bibinfo {pages} {331} (\bibinfo {year} {2006})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Brandao}\ and\ \citenamefont
{Vianna}(2004)}]{brandao-2004}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {F.~G. S.~L.}\
\bibnamefont {Brandao}}\ and\ \bibinfo {author} {\bibfnamefont {R.~O.}\
\bibnamefont {Vianna}},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {70}},\ \bibinfo
{pages} {062309} (\bibinfo {year} {2004})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Boyd}\ and\ \citenamefont
{Vandenberghe}(2004)}]{boyd-2004}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Boyd}}\ and\ \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont
{Vandenberghe}},\ }\href@noop {} {\emph {\bibinfo {title} {Convex
Optimization}}}\ (\bibinfo {publisher} {Cambridge University Press, Cambridge},\
\bibinfo {year} {2004})\BibitemShut {NoStop}
\end{thebibliography}
\end{document}
|
\begin{document}
\title[New curvature tensors along Riemannian submersion]
{New curvature tensors along Riemannian submersions}
\author[M. A. Akyol]{Mehmet Akif Akyol}
\address{Bingol University\\ Faculty of Arts and Sciences,\\ Department of Mathematics\\ 12000, Bing\"{o}l, Turkey}
\email{[email protected]}
\author[G. Ayar]{G\"{u}lhan Ayar}
\address{Karaman Mehmet Bey University\\ Department of Mathematics \\ 70000, Karaman, Turkey}
\email{[email protected]}
\subjclass{Primary 53C15, 53B20}
\keywords{Riemannian submersion, Weyl projective curvature tensor, $M$-projective curvature tensor, concircular curvature tensor, conformal curvature tensor, conharmonic curvature tensor.}
\date{January 1, 2004}
\begin{abstract}
In 1966, B. O'Neill [The fundamental equations of a submersion, Michigan Math. J., Volume 13, Issue 4 (1966), 459-469.] obtained some fundamental equations and curvature relations between the total space, the base space and the fibres of a submersion. In the present paper, we define new curvature tensors along Riemannian submersions such as Weyl projective curvature tensor, concircular curvature tensor, conharmonic curvature tensor, conformal curvature tensor and $M-$projective curvature tensor, respectively. Finally, we obtain some results in case of the total space of Riemannian submersions has umbilical fibres for any curvature tensors mentioned by the above.
\end{abstract}
\maketitle
\section{Introduction and Preliminaries}
In differential geometry, an important tool to define the curvature of $n-$ dimensional spaces (such as Riemannian manifolds) is the Riemannian curvature tensor. The tensor has played an important role both general relativty and gravity. In this manner, Mishra in \cite{mishra} defined some new curvature tensors on Riemannian manifolds such as concircular curvature tensor, conharmonic curvature tensor, conformal curvature tensor, respectively. Taking into account the paper of Mishra, Pokhariyal and Mishra defined the Wely projective curvature tensor on Riemannian manifolds \cite{PM}. Afterwards, Ojha defined $M-$ projective curvature tensor \cite{OJ}.
Curvature tensors also play an important role in physics. Relevance in physics of the tensors considered in our work and some of the important studies that focus on the geometric properties of curvature tensors are; in 1988 conditions of conharmonic curvature tensor of Kaehler hypersurfaces in complex space forms has been analysed by M. Doric et al. \cite{doric1}. By the way, the relativistic significance of concircular curvature tensor has been studied by Ahsan \cite{Ahsan1} and this tensor has been explored in that study. Finaly in 2018 based on Einstein’s geodesic postulate, projectively related connections on a space-time manifold and the closely related Weyl projective tensor has been examined detailed by G. Hall \cite{Hall1}.
Riemannian submersion appears to have been studied and its differential geometry has first defined by O'Neill 1966 and Gray 1967 \cite{O}. We note that Riemannian submersions have been studied widely not only in mathematics, but also in theoretical pyhsics because of their applications in the Yang-Mills theory, Kaluza Klein theory, super gravity, relativity and superstring theories (see \cite{BL1}, \cite{BL}, \cite{IV}, \cite{IV1}, \cite{M}, \cite{W1}). Most of the studies related to Riemannian submersion can be found in the books (\cite{FIP}, \cite{Sahin}). In 1966, B. O'Neill has defined a paper related to some fundamental equations of a submersion. In that paper, he has given some curvature relations on Riemannian submersions.
In this study, in addition to the curvature relations previously defined on Riemannian submersion, we investigate new curvature tensors on a Riemannian submersion and the curvature properties of these tensors.
In the present paper, in the first part of our study, the basic definitions and theorems that we will use throughout the paper are given. In sections 2-6 include the Weyl projective curvature tensor, concircular curvature tensor, conharmonic curvature tensor, conformal curvature tensor and $M$-projective curvature tensor relations for a Riemannian submersion respectively. Also various results are obtained by examining the conditions for having total umbilical fibers.
Now, we will give the basic definitions and theorems without proofs that we will use throughout the paper.
\begin{definition}\rm
Let $(M,g)$ and $(N,g_{\text{\tiny$N$}})$ be Riemannian manifolds,
where-$dim(M)>dim(N)$. A surjective mapping
$\pi:(M,g)\rightarrow(N,g_{N})$ is called a \emph{Riemannian
submersion}
\cite{O} if:\\
\textbf{(S1)}\quad The rank of $\pi$ equals $dim(N)$.\\
In this case, for each $q\in N$, $\pi^{-1}(q)=\pi_{q}^{-1}$ is a $k$-dimensional
submanifold of $M$ and called a \emph{fiber}, where $k=dim(M)-dim(N).$
A vector field on $M$ is called \emph{vertical} (resp.
\emph{horizontal}) if it is always tangent (resp. orthogonal) to
fibers. A vector field $X$ on $M$ is called \emph{basic} if $X$ is
horizontal and $\pi$-related to a vector field $X_{*}$ on $N,$ i.e. ,
$\pi_{*}(X_{p})=X_{*\pi(p)}$ for all $p\in M,$ where $\pi_{*}$ is derivative or differential map of $\pi.$
We will denote by $\mathcal{V}$ and $\mathcal{H}$ the projections on the vertical
distribution $ker\pi_{*}$, and the horizontal distribution
$ker\pi_{*}^{\bot},$ respectively. As usual, the manifold $(M,g)$ is called \emph{total manifold} and
the manifold $(N,g_{N})$ is called \emph{base manifold} of the submersion $\pi:(M,g)\rightarrow(N,g_{N})$.\\
\textbf{(S2)}\quad $\pi_{*}$ preserves the lengths of the horizontal vectors.\\
This condition is equivalent to say that the derivative map $\pi_{*}$ of $\pi$, restricted to $ker\pi_{*}^{\bot},$ is a linear
isometry.
\end{definition}
If $X$ and $Y$ are the basic vector fields, $\pi$-related to $X_{N}, Y_{N}$, we have the following facts:
\begin{enumerate}
\item{} $g(X,Y)=g_{N}(X_{N},Y_{N})\circ\pi$,\\
\item{} $h[X,Y]$ is the basic vector field $\pi$-related to $[X_{N},Y_{N}]$,\\
\item{} $h(\nabla_{X}Y)$ is the basic vector field $\pi$-related to ${\nabla^{N}}_{{X}_{N}}Y_{N}$, \\
\end{enumerate}
for any vertical vector field $V$, $[X,V]$ is the vertical.\\
\indent
The geometry of Riemannian
submersions is characterized by O'Neill's tensors $\mathcal{T}$ and
$\mathcal{A}$, defined as follows:
\begin{equation}\label{e1}
\mathcal{T}_{E}F=v\nabla_{vE}hF+h\nabla_{vE}vF,
\end{equation}
\begin{equation}\label{e2}
\mathcal{A}_{E}F=v\nabla_{hE}hF+h\nabla_{hE}vF
\end{equation}
for any vector fields $E$ and $F$ on $M,$ where $\nabla$ is the
Levi-Civita connection, $v$ and $h$ are orthogonal projections on vertical and horizontal spaces, respectively.
We now recall the following curvature relations for a Riemannian submersion from \cite{FIP} and \cite{O}.
\begin{theorem}\label{thm1}
$(M, g)$ and $(G, g^\prime)$ Riemannian manifolds,
$$\pi: (M, g) \to (G, g^\prime)$$
a Riemannian submersion and $R^M$, $R^G$ and $\hat{R}$ be Riemannian curvature tensors of $M,G $ and $(\pi^{-1} (x), \hat{g}_x)$ fibre respectively. In this case, there are the following equations for any $U, V, W, F \in \chi^v (M)$ and $X, Y, Z, H \in \chi^h (M)$
\begin{align}\label{g2}
g(R^M(X,Y)Z,H) &= g({R}^G(X,Y)Z,H) + 2g(A_X Y, A_Z H)\nonumber \\
&- g(A_Y Z, A_X H)+ g(A_X Z, A_Y H),
\end{align}
\begin{align}\label{g3}
g(R^M(X,Y)Z,V)&= -g((\nabla_Z A)_X Y, V) - g(A_X Y, T_V Z) \nonumber\\
&+g(A_Y Z, T_V X)- g(A_X Z, T_V Y),
\end{align}
\begin{align}\label{g4}
g(R^M(X,Y)V,W) &= g((\nabla_V A)_X Y,W)- g((\nabla_W A)_X Y,V)\nonumber \\
&+ g(A_X V A_Y W)+ g(A_X W, A_Y V) \nonumber\\
&- g(T_V X, T_W Y)+ g(T_W X, T_V Y),
\end{align}
\begin{align}\label{g5}
g(R^M(X,V)Y,W) &= g((\nabla_X T)_V W,Y)- g((\nabla_V A)_X Y,W)\nonumber \\
&- g(T_V X, T_W Y)+ g(A_X V, A_Y W),
\end{align}
\begin{align}\label{g6}
g(R^M(U,V)W,X)&=g((\nabla_U T)_V W, X) - g((\nabla_V T)_U W, X)
\end{align}
and
\begin{align}\label{g1}
g(R^M(U,V)W,F) &=g(\hat{R}(U,V)W,F) + g(T_U W, T_V F)- g(T_V W, T_U F).
\end{align}
\end{theorem}
\begin{definition}\rm\cite{FIP}
Let $(M, g)$ be a Riemannian manifold and a local orthonormal frame of the vertical distribution $\nu$ is $\{U_j\}_{1\leq j\leq r} $.
Then $N$, the horizontal vector field on $(M, g)$ is locally defined by
$$N = \sum_{j=1}^{r} T_{U_j}U_j.$$
\end{definition}
\begin{proposition}\label{pro1}
Let $(M, g)$ and $(G, g^\prime)$ Riemannian manifolds,
$$\pi: (M, g)\to (G, g^\prime) $$
a Riemannian submersion
and $\{X_i, U_j\}$ be a $\pi$-compatible frame.
In this case, for any $U, V \in \chi^v (M)$ and $X, Y \in \chi^h (M)$, the Ricci tensor $S^M$ satisfies the following equations \cite{FIP}:
\begin{align}\label{S1}
(i) \,\,\,\,\,\, S^M(U,V) &= \hat{S}(U,V) -g(N,T_U V) \\
&+ \sum_{i} \{g((\nabla_{X_i}T)_U V,X_i)+g(A_{X_i}U, A_{X_i}V)\}, \nonumber\\
(ii) \,\,\,\,\,\, S^M(X,Y) &=S^G (X', Y') \circ \pi + \frac{1}{2}
\{g(\nabla_X N,Y)+g(\nabla_Y N, X) \} \label{S2}\\
&-2 \sum_{i} g(A_X X_i, A_Y X_i) - \sum_{j} g(T_{U_j}X,T_{U_j}Y),\nonumber\\
(iii) \,\,\,\,\,\, S^M(U,X) &= g(\nabla_U N,X) -\sum_{j} g(\nabla_{U_j}T)_{U_j}U,X) \label{S3} \\
&+ \sum_{i} \{g((\nabla_{X_i}A)_{X_i} X,U)-2g(A_X {X_i}, T_U X_i)\}. \nonumber
\end{align}
\end{proposition}
\begin{proposition}\label{pro2} \cite{FIP}
Let's take the scalar curvatures of $(M, g), (G, g^\prime)$ Riemannian manifolds and $x \in G, \pi^{-1} (x)$ fibre with $r^M, r^G$ and $\hat{r}$, respectively.
In a
$$\pi: (M, g) \to (G, g^\prime)$$
Riemannian submersion, $(M, g)$ depends on the scalar curvature of the Riemannian manifold $r^G$ and the scalar curve of any lift $\hat{r}$. In this case
\begin{equation}\label{Pro2}
r^M= \hat{r}+r^G \circ \pi - ||N||^2-||A||^2- ||T||^2+ 2 \sum_{i} g(\nabla_{X_i} N, X_i).
\end{equation}
\end{proposition}
\section{Weyl projective curvature tensor along a Riemannian submersion}
In this section, we examine the Weyl projective curvature tensor relations between the total space, the base space and fibres on a Riemannian submersion. We also give a corollary in case of the Riemannian submersion has totally umbilical fibres.
\begin{definition}\cite{mishra}
Let take an $n$-dimensional differentiable manifold $M$ with
differentiability class $C^{\infty }$. In the $n-$dimensional space $V_n$, the tensor
\begin{equation*}
P^*(X,Y)Z=R^M(X,Y)Z-\frac{1}{n-1}\{S^M(Y,Z)X-S^M(X,Z)Y\}. \label{5.3}
\end{equation*}
is called Weyl projective curvature tensor, where Ricci tensor of total space denoted by $S^{M}.$
\end{definition}
Now, we have the following main theorem.
\begin{theorem}
Let $(M, g)$ and $(G, g^\prime)$ Riemannian manifolds,
$$\pi: (M, g) \to(G, g^\prime)$$
a Riemannian submersion and $R^M$, $R^G$ and $\hat{R}$ be Riemannian curvature tensors, $S^M$, $S^G$ and $\hat{S}$ be Ricci tensors of $M, \, G$ and the fibre respectively. Then for any $U, V, W, F \in \chi^v (M)$ and $X, Y, Z, H\in \chi^h (M)$, we have the following relations for Weyl projective curvature tensor:
\begin{align*}
&g(P^*(X,Y)Z, H)\!\!=\!\!g(R^G (X,Y)Z, H)\!+\!2g (A_{X}Y, A_{Z}H)\!-\!g (A_{Y}Z, A_{X}H)\!+\!g (A_{X}Z, A_{Y}H)\\
&- \frac{1}{n-1} \Bigg\{ g(X,H) \bigg[S^G (Y', Z') \circ \pi + \frac{1}{2} \left(g(\nabla_Y N,Z)+ g(\nabla_Z N,Y ) \right) \\
&-2 \sum_{i} g(A_Y X_i, A_Z X_i) - \sum_{j} g(T_{U_j}Y, T_{U_j}Z)\bigg] \\
&- g(Y,H) \bigg[S^G (X', Z') \circ \pi + \frac{1}{2} \left(g(\nabla_X N,Z)+ g(\nabla_Z N,X ) \right) \\
&-2 \sum_{i} g(A_X X_i, A_Z X_i) - \sum_{j} g(T_{U_j}X, T_{U_j}Z)\bigg] \Bigg\},
\end{align*}
\begin{align*}
g(P^*(X,Y)Z, V) &=-g((\nabla_Z A)_X Y, V) - g(A_X Y, T_V Z)+ g(A_Y Z, T_V X)\\
&- g(A_X Z, T_V Y),
\end{align*}
\begin{align*}
g(P^*(X,Y)V, W) &= g((\nabla_V A)_X Y, W) - g((\nabla_W A)_X Y, V) + g(A_X V, A_Y W)\\
& - g(A_X W, A_Y V)- g(T_V X, T_W Y)+ g(T_W X, T_V Y),
\end{align*}
\begin{align*}
g(P^*(X,V)Y, W) &= g((\nabla_X T)_V W, Y) + g((\nabla_V A)_X Y, W)
- g(T_V X, T_W Y)\\
& + g(A_X V, A_Y W),
\end{align*}
\begin{align*}
g(P^*(U,V)W, X)&=g((\nabla_U T)_V W, X) - g((\nabla_V T)_U W, X)
\end{align*}
and
\begin{align*}
g(P^*(U,V)W, F)&= g( \hat{R} (U,V)W , F)+ g (T_{U}W, T_{V}F )- g (T_{V}W, T_{U}F) \\
&- \frac{1}{n-1} \Bigg\{ g(F,U) \bigg[\hat{S} (V, W) - g(N,T_V W) \\
&+ \sum_{i} \left( g((\nabla_{X_i} T)_V W, X_i)+g(A_{X_i} V, A_{X_i} W)\right)\bigg] \\
&- g(F,V) \bigg[\hat{S} (U, W) - g(N,T_U W) \\
&+ \sum_{i} \left( g((\nabla_{X_i} T)_U W, X_i)+g(A_{X_i} U, A_{X_i} W)\right)\bigg]\Bigg\}.
\end{align*}
\end{theorem}
\begin{proof}
We only give the proof of the $1^{st}$ equation of this theorem. The following equations are obtained inner production with $H$ to $P^*$ and using \eqref{g2} and \eqref{S2} equations.
\begin{align}
g(R^M(X,Y)Z,H)&=g(R^G(X,Y)Z,H)+ 2g(A_X Y, A_Z H)- g(A_Y Z,A_X H)\nonumber \\
&+g(A_X Z, A_Y H),\nonumber
\end{align}
\begin{align}
S^M(Y,Z) &= S^G (Y',Z') \circ \pi + \frac{1}{2} \left\{g(\nabla_Y N,Z)+g(\nabla_Z N, Y)\right\} \nonumber \\
&-2\sum_{i} g(A_Y Y_i, A_Z Y_i) -\sum_{j}(T_{U_j}Y, T_{U_j}Z)\nonumber
\end{align}
and
\begin{align}
S^M(X,Z) &= S^G (Y', Z') \circ \pi
+ \frac{1}{2} \left\{g(\nabla_X N,Z)+g(\nabla_Z N, X)\right\} \nonumber\\
&-2\sum_{i} g(A_X X_i, A_Z X_i) -\sum_{j}(T_{U_j}X, T_{U_j}Z).\nonumber
\end{align}
When these equations are substituted in $P^*$, the given result is obtained. Other equations are similarly proved by using Theorem \ref{thm1} and Proposition \ref{pro1}.
\end{proof}
\begin{corollary}
Let $\pi: (M, g) \to(G, g^\prime)$ be a Riemannian submersion, where $(M, g)$ and $(G, g^\prime)$ Riemannian manifolds. If the Riemannian submersion has total umbilical fibres, that is $N = 0$, then the Weyl projective curvature tensor is given by
\begin{align*}
&g(P^*(X,Y)Z, H)\\
&=g(R^G (X,Y) Z , H )+2g (A_{X}Y, A_{Z}H)-g (A_{Y}Z, A_{X}H )+g (A_{X}Z, A_{Y}H )\\
&- \frac{1}{n-1} \Bigg\{ g(X,H) \bigg[S^G (Y', Z') \circ \pi
-2 \sum_{i} g(A_Y X_i, A_Z X_i) - \sum_{j} g(T_{U_j}Y, T_{U_j}Z)\bigg] \\
&- g(Y,H) \bigg[S^G (X', Z') \circ \pi
-2 \sum_{i} g(A_X X_i, A_Z X_i) - \sum_{j} g(T_{U_j}X, T_{U_j}Z)\bigg] \Bigg\},
\end{align*}
and
\begin{align*}
&g(P^*(U,V)W, F) \\
&= g( \hat{R} (U,V)W , F)+ g (T_{U}W, T_{V}F )- g (T_{V}W, T_{U}F) \\
&- \frac{1}{n-1} \Bigg\{ g(F,U) \bigg[\hat{S} (V, W)
+ \sum_{i} \left( g((\nabla_{X_i} T)_V W, X_i)+g(A_{X_i} V, A_{X_i} W)\right)\bigg] \\
&- g(F,V) \bigg[\hat{S} (U, W)
+ \sum_{i} \left( g((\nabla_{X_i} T)_U W, X_i)+g(A_{X_i} U, A_{X_i} W)\right)\bigg]\Bigg\}.
\end{align*}
for any $U, V, W, F \in \chi^v (M)$ and $X, Y, Z, H\in \chi^h (M)$,
\end{corollary}
\section{Concircular curvature tensor along a Riemannian submersion}
In this section, curvature relations of concircular curvature tensor in a Riemannian submersion are examined. In particular, we show that the Riemannian submersion with concircular curvature tensor has no the totally umbilical fibres.
\begin{definition}\rm \label{defconcir}
In the $n-$dimensional space $V_n$, the tensor
\begin{align*}
C^*(X,Y,Z,H)&=R^M(X,Y,Z,H)- \frac{r^M}{n(n-1)} [g(X,H)g(Y,Z)-g(Y,H)g(X,Z)],
\end{align*}
is called concircular curvature tensor, where scalar tensor denoted by $r^M$\cite{mishra}.
\end{definition}
Now, we have the following main theorem.
\begin{theorem}
Let $(M, g)$ and $(G, g^\prime)$ Riemannian manifolds,
$$\pi: (M, g) \to(G, g^\prime)$$
a Riemannian submersion and $R^M$, $R^G$ and $\hat{R}$ be Riemannian curvature tensors, $r^M$, $r^G$ and $\hat{r}$ be scalar curvature tensors of $M, \, G$ and the fibre respectively. Then for any $U, V, W, F \in \chi^v (M)$ and $X, Y, Z, H\in \chi^h (M)$, we have the following relations
\begin{align*}
g(C^*(X,Y)Z, H) &= g(R^G (X,Y) Z , H )+ 2g (A_{X}Y, A_{Z}H )\\
&- g (A_{Y}Z, A_{X}H )+ g (A_{X}Z, A_{Y}H )\\
&- \frac{r^M}{n(n-1)} \Bigg\{ g(Y,Z) g(X,H)- g(X,Z) g(Y,H) \Bigg\},
\end{align*}
\begin{align*}
g(C^*(X,Y)Z, V) &= -g((\nabla_Z A)_X Y, V) - g(A_X T, T_V Z) \\
&+ g(A_Y Z, T_V X) - g(A_X Z, T_V Y),
\end{align*}
\begin{align*}
g(C^*(X,Y)V, W) &= g((\nabla_V A)_X Y, W) - g((\nabla_W A)_X Y, V) + g(A_X V, A_Y W) \\
&- g(A_X W, A_Y V)-g(T_V X, T_W Y)+ g(T_W X, T_V Y),
\end{align*}
\begin{align*}
g(C^*(X,V)Y, W) &= g((\nabla_X T)_V W, Y) + g((\nabla_V A)_X Y, W)
- g(T_V X, T_W Y) \\
&+ g(A_X Y, A_Y W)-\frac{r^M}{n(n-1)} \{-g(X,Y)g(V,W)\},
\end{align*}
\begin{align*}
g(C^*(U,V)W, X) &= g((\nabla_U T)_V W, X) - g((\nabla_V T)_U W, X)
\end{align*}
and
\begin{align*}
g(C^*(U,V)W, F)&= g( \hat{R} (U,V)W , F)+ g (T_{U}W, T_{V}F )- g (T_{V}W, T_{U}F) \\
&- \frac{r^M}{n(n-1)} \Bigg\{ g(V,W)g(U,F)- g(U,W)g(V,F)\Bigg\}
\end{align*}
where
\begin{equation*}
r^M= \hat{r}+r^G \circ \pi -||A||^2- ||T||^2.
\end{equation*}
\end{theorem}
\begin{proof}
Let's prove the $2^{nd}$ equation of this theorem. Taking inner product $C^*$ with $V$ then we have
\begin{align*}
g(C^*(X,Y)Z,V) = g(R(X,Y)Z,V) -\frac{r^M}{n(n-1)} \{g(Y,Z)g(X,V)-g(X,Z)\}.
\end{align*}
Then using equation \eqref{g3}, we get
\begin{align*}
g(C^*(X,Y)Z,V)&=-g((\nabla_Z A)_X Y,V)- g(A_X T, T_V Z) \\
&+g(A_Y Z, T_V X)- g(A_X Z, T_V Y).
\end{align*}
which completes the proof of the second equation. Other equations are similarly proved by using Theorem \ref{thm1}, Proposition \ref{pro1} and Proposition \ref{pro2}.
\end{proof}
\begin{corollary}
Let $\pi: (M, g) \to(G, g^\prime)$ be a Riemannian submersion, where $(M, g)$ and $(G, g^\prime)$ Riemannian manifolds. Then
the concircular curvature tensor of Riemannian submersion has no total umbilical fibres.
\end{corollary}
\section{Conharmonic curvature tensor along a Riemannian submersion}
In this section, curvature relations of conharmonic curvature tensor in a Riemannian submersion are examined.
\begin{definition}\rm \label{defconhar}
In the $n-$dimensional space $V_n$, the tensor
\begin{align*}
L^*(X,Y,Z,H)&= R^M(X,Y,Z,H)- \frac{1}{n-2}[g(Y,Z)Ric(X,H)-g(X,Z)Ric(Y,H),
\end{align*}
is called conharmonic curvature tensor, where Ricci tensor denoted by $Ric$ \cite{mishra}.
\end{definition}
In a similar way, we have the following main theorem.
\begin{theorem}
Let $(M, g)$ and $(G, g^\prime)$ Riemannian manifolds,
$$\pi: (M, g) \to(G, g^\prime)$$
a Riemannian submersion and $R^M$, $R^G$ and $\hat{R}$ be Riemannian curvature tensors, $S^M$, $S^G$ and $\hat{S}$ be Ricci tensors of $M, \, G$ and the fibre respectively. Then for any $U, V, W, F \in \chi^v (M)$ and $X, Y, Z, H\in \chi^h (M)$, we have the following relations
\begin{align*}
&g(L^* (X,Y)Z, H)\\
&= g(R^G (X,Y)Z, H) +2 g (A_X Y, A_Z H) - g(A_Y Z, A_X H) + g(A_X Z, A_Y H) \\
&- \frac{1}{(n-2)} \Bigg\{g(Y,Z) \bigg[S^G (X',H') \circ \pi + \frac{1}{2} \left(g(\nabla_X N,H)+ g(\nabla_H N, X)\right) \\
&-2 \sum_{i} g(A_X X_i, A_H X_i)- \sum_{j}g(T_{U_j}X, T_{U_j}H) \bigg] \\
&-g(X,Z) \bigg[S^G (Y',H') \circ \pi + \frac{1}{2} \left(g(\nabla_Y N,H)+ g(\nabla_H N, Y)\right) \\
&-2 \sum_{i} g(A_Y X_i, A_H X_i)- \sum_{j}g(T_{U_j}Y, T_{U_j}H) \bigg] \\
&+g(X, H) \bigg[S^G (Y',Z') \circ \pi
+\frac{1}{2} \left(g(\nabla_Y N,Z)+g(\nabla_Z N, Y)\right) \\
&- 2 \sum_{i} g(A_Y X_i, A_Z X_i) - \sum_{j} g(T_{U_j}Y, T_{U_j}Z) \Bigg] \\
&-g(Y,H) \bigg[S^G (X',Z') \circ \pi + \frac{1}{2} \left(g(\nabla_X N,Z)+ g(\nabla_Z N, X)\right) \\
&-2 \sum_{i} g(A_X X_i, A_Z X_i)- \sum_{j}g(T_{U_j}X, T_{U_j}Z) \bigg] \Bigg\},
\end{align*}
\begin{align*}
&g(L^* (X,Y)Z, V) \\
&=-g((\nabla_Z A)_X Y,V) - g(A_X T, T_V Z) + g(A_Y Z, T_V X)- g(A_X Z, T_V Y) \\
&- \frac{1}{(n-2)} \Bigg\{g(Y, Z) \bigg[g(\nabla_X N,V)
-\sum_{j} g((\nabla_{U_j} T)_{U_j} X, V) \\
&+\sum_{i}\left(g((\nabla_{X_i}A)_{X_i} X,V )- 2 g (A_V X_i, T_X X_i) \right) \bigg] \\
& -g(X,Z) \bigg[g(\nabla_Y N,V)
-\sum_{j} g((\nabla_{U_j} T)_{U_j} Y, V) \\
&+ \sum_{i}\left(g((\nabla_{X_i}A)_{X_i} Y,V ) - 2 g (A_V X_i, T_Y X_i) \right) \bigg] \Bigg\},
\end{align*}
\begin{align*}
g(L^* (X,Y)V, W)&= g((\nabla_V A)_X Y, W)- g((\nabla_W A)_X Y, V)+ g(A_X V, A_Y W)\\
&- g(A_X W, A_Y V) - g(T_V X, T_W Y) + g(T_W X, T_V Y),
\end{align*}
\begin{align*}
&g(L^* (X,V)Y ,W)\\
&=g((\nabla_X T)_V W, Y) + g((\nabla_V A)_X Y,W)- g(T_V X, T_W Y) + g(A_X Y, A_Y W)\\
&-\frac{1}{(n-2)} \bigg\{ - g(V,W) \Big[ S^G (X', Y') \circ \pi + \frac{1}{2} \left(g(\nabla_X N,Y)+ g(\nabla_Y N,X)\right) \\
&- 2 \sum_{i} g(A_X X_i, A_Y X_i) -\sum_{j} g(T_{U_j}X, T_{U_j}Y) \Big]\\
& - g(X,Y) \Big[\hat{S} (V,W) -g(N,T_V W) + \sum_{i}\left(g((\nabla_{X_i}T)_V W,X_i) \right.\\
& \left. + g(A_{X_i} V, A_{X_i}W) \right) \Big] \bigg\},
\end{align*}
\begin{align*}
g(L^* (U,V)W ,X)
&=g((\nabla_U T)_V W, X) - g((\nabla_V T)_U W, X)\\
&- \frac{1}{(n-2)} \bigg\{g(V,W) \Big[g(\nabla_U N,X) -\sum_{j} g(\nabla_{U_j}T)_{U_j}U,X) \\
&+ \sum_{i} \{g((\nabla_{X_i}A)_{X_i} X,U)-2g(A_X {X_i}, T_U X_i)\}\Big] \\
&- g(U,W) \Big[g(\nabla_V N,X) -\sum_{j} g(\nabla_{U_j}T)_{U_j}V,X) \\
&+ \sum_{i} \{g((\nabla_{X_i}A)_{X_i} X,V)-2g(A_X {X_i}, T_V X_i)\}\Big] \bigg\}
\end{align*}
and
\begin{align*}
g(L^* (U,V)W ,F)&= g(\hat{R} (U,V)W, F) + g(T_U W, T_V F) - g(T_V W, T_U F ) \\
&- \frac{1}{(n-2)} \bigg\{g(V,W) \Big[\hat{S}(U,F) -g(N, T_U F)\\
&+ \sum_{i}\left(g((\nabla_{X_i}T)_U F, X_i)+g(A_{X_i}U,A_{X_i}F)\right)\Big]\\
&-g(U,W) \Big[\hat{S}(V,F)-g(N,T_V F) \\
&+ \sum_{i}\left(g((\nabla_{X_i}T)_V F, X_i)+ g(A_{X_i} V, A_{X_i}F)\right)\Big] \\
&+g(F,U) \Big[ \hat{S}(U,V)- g(N, T_U V) \\
& + \sum_{i}\left(g((\nabla_{X_i}T)_U V, {X_i})+ g(A_{X_i}U, A_{X_i}V)\right) \Big] \\
&-g(F,V) \Big[ \hat{S}(U,W) -g(N,T_U W)\\
&+ \sum_{i}\left( g((\nabla_{X_i}T)_U W, X_i)+ g(A_{X_i}U, A_{X_i}W) \right) \Big] \bigg\}.
\end{align*}
\end{theorem}
\begin{proof}
Let's prove the $3^{th}$ equation of this theorem. The following equations are obtained inner production with $W$ to $L^*$ and by using equation \eqref{g4}
\begin{align*}
g(L^*(X,Y)V,W)&=g(R^M(X,Y)V,W)-\frac{1}{n-2} \{g(X,W)S(Y,V) \\
&-g(Y,W)S(X,V)+g(Y,V)S(X,W)-g(X,V)S(Y,W)\}. \nonumber
\end{align*}
One can easily obtain the other equations by using Theorem \ref{thm1} and Proposition \ref{pro1}.
\end{proof}
\begin{corollary}
Let $\pi: (M, g) \to(G, g^\prime)$ be a Riemannian submersion, where $(M, g)$ and $(G, g^\prime)$ Riemannian manifolds. If the Riemannian submersion has total umbilical fibres, that is $N = 0$, then the conharmonic curvature tensor is given by
\begin{align*}
&g(L^* (X,Y)Z, H)\\
&= g(R^G (X,Y)Z, H) +2 g (A_X Y, A_Z H) - g(A_Y Z, A_X H) + g(A_X Z, A_Y H) \\
&- \frac{1}{(n-2)} \Bigg\{g(Y,Z) \bigg[S^G (X',H') \circ \pi
-2 \sum_{i} g(A_X X_i, A_H X_i)- \sum_{j}g(T_{U_j}X, T_{U_j}H) \bigg] \\
&-g(X,Z) \bigg[S^G (Y',H') \circ \pi
-2 \sum_{i} g(A_Y X_i, A_H X_i)- \sum_{j}g(T_{U_j}Y, T_{U_j}H) \bigg] \\
&+g(X, H) \bigg[S^G (Y',Z') \circ \pi
- 2 \sum_{i} g(A_Y X_i, A_Z X_i) - \sum_{j} g(T_{U_j}Y, T_{U_j}Z) \Bigg] \\
&-g(Y,H) \bigg[S^G (X',Z') \circ \pi
-2 \sum_{i} g(A_X X_i, A_Z X_i)- \sum_{j}g(T_{U_j}X, T_{U_j}Z) \bigg] \Bigg\},
\end{align*}
\begin{align*}
g(L^* (X,Y)Z, V) &=-g((\nabla_Z A)_X Y,V)\!-\!g(A_X T, T_V Z) \!+\! g(A_Y Z, T_V X)\!- \!g(A_X Z, T_V Y) \\
&- \frac{1}{(n-2)} \Bigg\{g(Y, Z) \bigg[
-\sum_{j} g((\nabla_{U_j} T)_{U_j} X, V) \\
&+\sum_{i}\left(g((\nabla_{X_i}A)_{X_i} X,V )- 2 g (A_V X_i, T_X X_i) \right) \bigg] \\
& -g(X,Z) \bigg[-\sum_{j} g((\nabla_{U_j} T)_{U_j} Y, V) \\
&+ \sum_{i}\left(g((\nabla_{X_i}A)_{X_i} Y,V ) - 2 g (A_V X_i, T_Y X_i) \right) \bigg] \Bigg\},
\end{align*}
\begin{align*}
&g(L^* (X,V)Y ,W)\\
&= g((\nabla_X T)_V W, Y) + g((\nabla_V A)_X Y,W)- g(T_V X, T_W Y) + g(A_X Y, A_Y W)\\
&-\frac{1}{(n-2)} \bigg\{ - g(V,W) \Big[ S^G (X', Y') \circ \pi
- 2 \sum_{i} g(A_X X_i, A_Y X_i) -\sum_{j} g(T_{U_j}X, T_{U_j}Y) \Big]\\
& - g(X,Y) \Big[\hat{S} (V,W) + \sum_{i}\left(g((\nabla_{X_i}T)_V W,X_i)
+ g(A_{X_i} V, A_{X_i}W) \right) \Big] \bigg\},
\end{align*}
\begin{align*}
g(L^* (U,V)W ,X)&= g((\nabla_U T)_V W, X) - g((\nabla_V T)_U W, X)\\
&- \frac{1}{(n-2)} \bigg\{g(V,W) \Big[\sum_{j} g(\nabla_{U_j}T)_{U_j}U,X) \\
&+ \sum_{i} \{g((\nabla_{X_i}A)_{X_i} X,U)-2g(A_X {X_i}, T_U X_i)\}\Big] \\
&- g(U,W) \Big[\sum_{j} g(\nabla_{U_j}T)_{U_j}V,X) \\
&+ \sum_{i} \{g((\nabla_{X_i}A)_{X_i} X,V)-2g(A_X {X_i}, T_V X_i)\}\Big] \bigg\}
\end{align*}
and
\begin{align*}
&g(L^* (U,V)W ,F)\\
&= g(\hat{R} (U,V)W, F) + g(T_U W, T_V F) - g(T_V W, T_U F ) \\
&- \frac{1}{(n-2)} \bigg\{g(V,W) \Big[\hat{S}(U,F) \\
&+ \sum_{i}\left(g((\nabla_{X_i}T)_U F, X_i)+g(A_{X_i}U,A_{X_i}F)\right)\Big]\\
&-g(U,W) \Big[\hat{S}(V,F)
+ \sum_{i}\left(g((\nabla_{X_i}T)_V F, X_i)+ g(A_{X_i} V, A_{X_i}F)\right)\Big] \\
&+g(F,U) \Big[ \hat{S}(U,V)
+ \sum_{i}\left(g((\nabla_{X_i}T)_U V, {X_i})+ g(A_{X_i}U, A_{X_i}V)\right) \Big] \\
&-g(F,V) \Big[ \hat{S}(U,W)
+ \sum_{i}\left( g((\nabla_{X_i}T)_U W, X_i)+ g(A_{X_i}U, A_{X_i}W) \right) \Big] \bigg\}.
\end{align*}
\end{corollary}
\section{Conformal curvature tensor along a Riemannian submersion}
In this section, we find some curvature relations of conformal curvature tensor in a Riemannian submersion and give a corollary in case of the Riemannian submersion has totally umbilical fibres.
\begin{definition}\rm \label{def1}
In the $n-$dimensional space $V_n$, the tensor
\begin{align*}
V^*(X,Y,Z,H)&=R^M(X,Y,Z,H)-\frac{1}{n-2} [g(X,H)Ric(Y,Z)-g(Y,H)Ric(X,Z)\\
&+g(Y,Z)Ric(X,H)-g(X,Z)Ric(Y,H)]\\
&+\frac{r^M}{(n-1)(n-2)} [g(X,H)g(Y,Z)-g(Y,H)g(X,Z)],
\end{align*}
is called conformal curvature tensor, where Ricci tensor and scalar tensor denoted by $Ric$ and $r^M$ respectively \cite{mishra}.
\end{definition}
\begin{theorem}
Let $(M, g)$ and $(G, g^\prime)$ Riemannian manifolds,
$$\pi: (M, g) \to(G, g^\prime)$$
a Riemannian submersion and $R^M$, $R^G$ and $\hat{R}$ be Riemannian curvature tensors, $S^M$, $S^G$ and $\hat{S}$ be Ricci tensors and $r^M$, $r^G$ and $\hat{r}$ be scalar curvature tensors of $M, \, G$ and the fibre respectively. Then for any $U, V, W, F \in \chi^v (M)$ and $X, Y, Z, H\in \chi^h (M)$, we have the following relations
\begin{align*}
&g(V^* (X,Y)Z, H) \\
&= g(R^G (X,Y)Z, H) +2 g (A_X Y, A_Z H) - g(A_Y Z, A_X H) + g(A_X Z, A_Y H) \\
&- \frac{1}{(n-2)} \Bigg\{g(X, H) \bigg[S^G (Y',Z') \circ \pi
+\frac{1}{2} \left(g(\nabla_Y N,Z)+g(\nabla_Z N, Y)\right) \\
&- 2 \sum_{i} g(A_Y X_i, A_Z X_i) - \sum_{j} g(T_{U_j}Y, T_{U_j}Z) \Bigg] \\
&-g(Y,H) \bigg[S^G (X',Z') \circ \pi + \frac{1}{2} \left(g(\nabla_X N,Z)+ g(\nabla_Z N, X)\right) \\
&-2 \sum_{i} g(A_X X_i, A_Z X_i)- \sum_{j}g(T_{U_j}X, T_{U_j}Z) \bigg] \\
&+g(Y,Z) \bigg[S^G (X',H') \circ \pi + \frac{1}{2} \left(g(\nabla_X N,H)+ g(\nabla_H N, X)\right) \\
&-2 \sum_{i} g(A_X X_i, A_H X_i)- \sum_{j}g(T_{U_j}X, T_{U_j}H) \bigg] \\
&-g(X,Z) \bigg[S^G (Y',H') \circ \pi + \frac{1}{2} \left(g(\nabla_Y N,H)+ g(\nabla_H N, Y)\right) \\
&-2 \sum_{i} g(A_Y X_i, A_H X_i)- \sum_{j}g(T_{U_j}Y, T_{U_j}H) \bigg]\Bigg\} \\
&+ \frac{r^M}{(n-1)(n-2)} \{g(Y,Z) g(X,H)- g(X,Z)g(Y,H)\},
\end{align*}
\begin{align*}
g(V^* (X,Y)Z, V) &\!=\! -g((\nabla_Z A)_X Y,V)\!\!-\!\! g(A_X T, T_V Z)\!+\!g(A_Y Z, T_V X)\!-\!g(A_X Z, T_V Y) \\
&- \frac{1}{(n-2)} \Bigg\{g(Y, Z) \bigg[g(\nabla_X N,V)
-\sum_{j} g((\nabla_{U_j} T)_{U_j} X, V) \\
&+\sum_{i}\left(g((\nabla_{X_i}A)_{X_i} X,V )- 2 g (A_V X_i, T_X X_i) \right) \bigg] \\
& -g(X,Z) \bigg[g(\nabla_Y N,V)
-\sum_{j} g((\nabla_{U_j} T)_{U_j} Y, V) \\
&+ \sum_{i}\left(g((\nabla_{X_i}A)_{X_i} Y,V ) - 2 g (A_V X_i, T_Y X_i) \right) \bigg] \Bigg\},
\end{align*}
\begin{align*}
g(V^* (X,Y)V, W)&=& g((\nabla_V A)_X Y, W)- g((\nabla_W A)_X Y, V)+ g(A_X V, A_Y W)\\
&& - g(A_X W, A_Y V) - g(T_V X, T_W Y) + g(T_W X, T_V Y),
\end{align*}
\begin{align*}
&g(V^* (X,V)Y ,W)\\
&=g((\nabla_X T)_V W, Y) + g((\nabla_V A)_X Y,W)- g(T_V X, T_W Y) + g(A_X Y, A_Y W)\\
&-\frac{1}{(n-2)} \bigg\{ - g(V,W) \Big[ S^G (X', Y') \circ \pi + \frac{1}{2} \left(g(\nabla_X N,Y)+ g(\nabla_Y N,X)\right) \\
&- 2 \sum_{i} g(A_X X_i, A_Y X_i) -\sum_{j} g(T_{U_j}X, T_{U_j}Y) \Big]\\
& - g(X,Y) \Big[\hat{S} (V,W) -g(N,T_V W) + \sum_{i}\left(g((\nabla_{X_i}T)_V W,X_i) \right.\\
& \left. + g(A_{X_i} V, A_{X_i}W) \right) \Big] \bigg\},
\end{align*}
\begin{align*}
g(V^* (U,V)W ,X)&= g((\nabla_U T)_V W, X) - g((\nabla_V T)_U W, X)\\
&- \frac{1}{(n-2)} \bigg\{g(V,W) \Big[g(\nabla_U N,X) -\sum_{j} g(\nabla_{U_j}T)_{U_j}U,X) \\
&+ \sum_{i} \{g((\nabla_{X_i}A)_{X_i} X,U)-2g(A_X {X_i}, T_U X_i)\}\Big] \\
&- g(U,W) \Big[g(\nabla_V N,X) -\sum_{j} g(\nabla_{U_j}T)_{U_j}V,X) \\
&+ \sum_{i} \{g((\nabla_{X_i}A)_{X_i} X,V)-2g(A_X {X_i}, T_V X_i)\}\Big] \bigg\}
\end{align*}
and
\begin{align*}
&g(V^* (U,V)W ,F)=g(\hat{R} (U,V)W, F) + g(T_U W, T_V F) - g(T_V W, T_U F ) \\
&- \frac{1}{(n-2)} \bigg\{ g(F,U) \Big[ \hat{S}(V,W)- g(N, T_V W) \\
& + \sum_{i}\left(g(\nabla_{X_i}T)_V W, {X_i}+ g(A_{X_i}V, A_{X_i}W)\right) \Big] \\
&-g(F,V) \Big[ \hat{S}(U,W) -g(N,T_U W)+ \sum_{i} \left(g((\nabla_{X_i}T)_U W, X_i)+ g(A_{X_i}U, A_{X_i}W) \right)\Big]\\
&+g(V,W) \Big[\hat{S}(U,F) -g(N, T_U F)+ \sum_{i}\left(g((\nabla_{X_i}T)_U F, X_i)+g(A_{X_i}U,A_{X_i}F)\right)\Big]\\
&-g(U,W) \Big[\hat{S}(V,F)-g(N,T_V F) + \sum_{i}\left(g((\nabla_{X_i}T)_V F, X_i)+ g(A_{X_i} V, A_{X_i}F)\right)\Big] \bigg\}\\
&+\frac{r^M}{(n-1)(n-2)} \{g(V,W)g(U,F)-g(U,W)g(V,F)\}
\end{align*}
where
\begin{equation*}
r^M= \hat{r}+r^G \circ \pi - ||N||^2-||A||^2- ||T||^2+ 2 \sum_{i} g(\nabla_{X_i} N, X_i).
\end{equation*}
\end{theorem}
\begin{proof}
Let's prove the $4^{th}$ equation of this theorem. The following equations are obtained inner production with $W$ to $V^*$
\begin{align*}
g(V^*(X,V)Y,W)&=g(R^M(X,V)Y,W) -\frac{1}{n-2} \{g(X,W)S^M(V,Y) \\
&-g(V,W)S^M(X,Y)+g(V,Y)S(X,W)-g(X,Y)S^M(V,W)\} \nonumber\\
&+ \frac{r^M}{(n-1)(n-2)} \{g(X,W)g(Y,V)-g(X,V)g(Y,W)\}. \nonumber
\end{align*}
Then using equations \eqref{g5}-\eqref{S2}, we have the desired result.
From the Theorem \ref{thm1}, Proposition \ref{pro1} and Proposition \ref{pro2} the above equations are obtained.
\end{proof}
\begin{corollary}
Let $\pi: (M, g) \to(G, g^\prime)$ be a Riemannian submersion, where $(M, g)$ and $(G, g^\prime)$ Riemannian manifolds. If the Riemannian submersion has total umbilical fibres, that is $N = 0$, then the conformal curvature tensor is given by
\begin{align*}
&g(V^* (X,Y)Z, H) \\
&=g(R^G (X,Y)Z, H) +2 g (A_X Y, A_Z H) - g(A_Y Z, A_X H) + g(A_X Z, A_Y H) \\
&- \frac{1}{(n-2)} \Bigg\{g(X, H) \bigg[S^G (Y',Z') \circ \pi
- 2 \sum_{i} g(A_Y X_i, A_Z X_i) - \sum_{j} g(T_{U_j}Y, T_{U_j}Z) \Bigg] \\
&-g(Y,H) \bigg[S^G (X',Z') \circ \pi
-2 \sum_{i} g(A_X X_i, A_Z X_i)- \sum_{j}g(T_{U_j}X, T_{U_j}Z) \bigg] \\
&+g(Y,Z) \bigg[S^G (X',H') \circ \pi
-2 \sum_{i} g(A_X X_i, A_H X_i)- \sum_{j}g(T_{U_j}X, T_{U_j}H) \bigg] \\
&-g(X,Z) \bigg[S^G (Y',H') \circ \pi
-2 \sum_{i} g(A_Y X_i, A_H X_i)- \sum_{j}g(T_{U_j}Y, T_{U_j}H) \bigg]\Bigg\} \\
&+ \frac{r^M}{(n-1)(n-2)} \{g(Y,Z) g(X,H)- g(X,Z)g(Y,H)\},
\end{align*}
\begin{align*}
&g(V^* (X,Y)Z,V) \\
&= -g((\nabla_Z A)_X Y,V) - g(A_X T, T_V Z) + g(A_Y Z, T_V X)- g(A_X Z, T_V Y) \\
&- \frac{1}{(n-2)} \Bigg\{g(Y, Z) \bigg[-\sum_{j} g((\nabla_{U_j} T)_{U_j} X, V)
+\sum_{i}\left(g((\nabla_{X_i}A)_{X_i} X,V )- 2 g (A_V X_i, T_X X_i) \right) \bigg] \\
&-g(X,Z) \bigg[-\sum_{j} g((\nabla_{U_j} T)_{U_j} Y, V)
+ \sum_{i}\left(g((\nabla_{X_i}A)_{X_i} Y,V ) - 2 g (A_V X_i, T_Y X_i) \right) \bigg] \Bigg\},
\end{align*}
\begin{align*}
&g(V^* (X,V)Y ,W)\\
&=g((\nabla_X T)_V W, Y) + g((\nabla_V A)_X Y,W)- g(T_V X, T_W Y) + g(A_X Y, A_Y W)\\
&-\frac{1}{(n-2)} \bigg\{ - g(V,W) \Big[ S^G (X', Y') \circ \pi
- 2 \sum_{i} g(A_X X_i, A_Y X_i) -\sum_{j} g(T_{U_j}X, T_{U_j}Y) \Big]\\
&- g(X,Y) \Big[\hat{S} (V,W) + \sum_{i}\left(g((\nabla_{X_i}T)_V W,X_i)
+ g(A_{X_i} V, A_{X_i}W) \right) \Big] \bigg\},
\end{align*}
\begin{align*}
g(V^* (U,V)W ,X)&= g((\nabla_U T)_V W, X) - g((\nabla_V T)_U W, X)\\
&- \frac{1}{(n-2)} \bigg\{g(V,W) \Big[\sum_{j} g(\nabla_{U_j}T)_{U_j}U,X) \\
&+ \sum_{i} \{g((\nabla_{X_i}A)_{X_i} X,U)-2g(A_X {X_i}, T_U X_i)\}\Big] \\
&- g(U,W) \Big[\sum_{j} g(\nabla_{U_j}T)_{U_j}V,X) \\
&+ \sum_{i} \{g((\nabla_{X_i}A)_{X_i} X,V)-2g(A_X {X_i}, T_V X_i)\}\Big] \bigg\}
\end{align*}
and
\begin{align*}
g(V^* (U,V)W ,F)&=g(\hat{R} (U,V)W, F) + g(T_U W, T_V F) - g(T_V W, T_U F ) \\
&- \frac{1}{(n-2)} \bigg\{ g(F,U) \Big[ \hat{S}(V,W) \\
&+ \sum_{i}\left(g(\nabla_{X_i}T)_V W, {X_i}+ g(A_{X_i}V, A_{X_i}W)\right) \Big] \\
&-g(F,V) \Big[ \hat{S}(U,W) + \sum_{i} \left(g((\nabla_{X_i}T)_U W, X_i)+ g(A_{X_i}U, A_{X_i}W) \right)\Big]\\
&+g(V,W) \Big[\hat{S}(U,F) + \sum_{i}\left(g((\nabla_{X_i}T)_U F, X_i)+g(A_{X_i}U,A_{X_i}F)\right)\Big]\\
&-g(U,W) \Big[\hat{S}(V,F) + \sum_{i}\left(g((\nabla_{X_i}T)_V F, X_i)+ g(A_{X_i} V, A_{X_i}F)\right)\Big] \bigg\}\\
&+\frac{r^M}{(n-1)(n-2)} \{g(V,W)g(U,F)-g(U,W)g(V,F)\}
\end{align*}
where
\begin{equation*}
r^M= \hat{r}+r^G \circ \pi - ||A||^2- ||T||^2.
\end{equation*}
\end{corollary}
Finally, we investigate the $M-$projective curvature tensor on a Riemannian submersion and give a corollary in case of the totally umbilical fibres.
\section{$M$-projective curvature tensor along a Riemannian submersion}
In this section, curvature relations of $M$-projective curvature tensor in a Riemannian submersion are examined and obtain a corollary using the curvature tensor.
\begin{definition}
Let take an $n$-dimensional differentiable manifold $M^n$ with
differentiability class $C^{\infty }$. In 1971 on a $n$
-dimensional Riemannian manifold, ones \cite{PM} defined a tensor
field $W^{\ast }$ as
\begin{align*}
W^{\ast }(X,Y)Z&=R^M(X,Y)Z-\frac{1}{2(n-1)}[S^M(Y,Z)X\\
&-S^M(X,Z)Y+g(Y,Z)QX-g(X,Z)QY]\label{5.0}
\end{align*}
tensor $W^{\ast }$ as $M$-projective curvature tensor.
\end{definition}
In addition, on an $n-$dimensional Riemannian manifold $M^n$ the Ricci operator $Q$ is defined by
\begin{align*}
S^M(X,Y) = g(QX,Y).
\end{align*}
\begin{theorem}
Let $(M, g)$ and $(G, g^\prime)$ Riemannian manifolds,
$$\pi: (M, g) \to(G, g^\prime)$$
a Riemannian submersion and $R^M$, $R^G$ and $\hat{R}$ be Riemannian curvature tensors, $S^M$, $S^G$ and $\hat{S}$ be Ricci tensors of $M, \, G$ and the fibre respectively. Then for any $U, V, W, F \in \chi^v (M)$ and $X, Y, Z, H\in \chi^h (M)$, we have the following relations for $M$-projective curvature tensor:
\begin{align*}
&g(W^* (X,Y)Z, H) \\
&=g(R^G (X,Y)Z, H) +2 g (A_X Y, A_Z H) - g(A_Y Z, A_X H) + g(A_X Z, A_Y H) \\
&- \frac{1}{2(n-1)} \Bigg\{g(X, H) \bigg[S^G (Y',Z') \circ \pi
+\frac{1}{2} \left(g(\nabla_Y N,Z)+g(\nabla_Z N, Y)\right) \\
&- 2 \sum_{i} g(A_Y X_i, A_Z X_i) - \sum_{j} g(T_{U_j}Y, T_{U_j}Z) \Bigg] \\
&-g(Y,H) \bigg[S^G (X',Z') \circ \pi + \frac{1}{2} \left(g(\nabla_X N,Z)+ g(\nabla_Z N, X)\right) \\
&-2 \sum_{i} g(A_X X_i, A_Z X_i)- \sum_{j}g(T_{U_j}X, T_{U_j}Z) \bigg] \\
&+g(Y,Z) \bigg[S^G (X',H') \circ \pi + \frac{1}{2} \left(g(\nabla_X N,H)+ g(\nabla_H N, X)\right) \\
&-2 \sum_{i} g(A_X X_i, A_H X_i)- \sum_{j}g(T_{U_j}X, T_{U_j}H) \bigg] \\
&-g(X,Z) \bigg[S^G (Y',H') \circ \pi + \frac{1}{2} \left(g(\nabla_Y N,H)+ g(\nabla_H N, Y)\right) \\
&-2 \sum_{i} g(A_Y X_i, A_H X_i)- \sum_{j}g(T_{U_j}Y, T_{U_j}H) \bigg]\Bigg\},
\end{align*}
\begin{align*}
g(W^* (X,Y)Z, V) &=-g((\nabla_Z A)_X Y,V) - g(A_X Y, T_V Z) + g(A_Y Z, T_V X)- g(A_X Z, T_V Y) \\
&- \frac{1}{2(n-1)} \Bigg\{g(Y, Z) \bigg[g(\nabla_X N,V)
-\sum_{j} g((\nabla_{U_j} T)_{U_j} X, V) \\
&+\sum_{i}\left(g((\nabla_{X_i}A)_{X_i} X,V )- 2 g (A_V X_i, T_X X_i) \right) \bigg] \\
& -g(X,Z) \bigg[g(\nabla_Y N,V)
-\sum_{j} g((\nabla_{U_j} T)_{U_j} Y, V) \\
&+ \sum_{i}\left(g((\nabla_{X_i}A)_{X_i} Y,V ) - 2 g (A_V X_i, T_Y X_i) \right) \bigg] \Bigg\},
\end{align*}
\begin{align*}
g(W^* (X,Y)V, W)&= g((\nabla_V A)_X Y, W)- g((\nabla_W A)_X Y, V)+ g(A_X V, A_Y W)\\
&- g(A_X W, A_Y V) - g(T_V X, T_W Y) + g(T_W X, T_V Y),
\end{align*}
\begin{align*}
&g(W^* (X,V)Y ,W)=g((\nabla_X T)_V W, Y) + g((\nabla_V A)_X Y,W)- g(T_V X, T_W Y) + g(A_X Y, A_Y W)\\
&-\frac{1}{2(n-1)} \bigg\{ - g(V,W) \Big[ S^G (X', Y') \circ \pi + \frac{1}{2} \left(g(\nabla_X N,Y)+ g(\nabla_Y N,X)\right) \\
&- 2 \sum_{i} g(A_X X_i, A_Y X_i) -\sum_{j} g(T_{U_j}X, T_{U_j}Y) \Big] - g(X,Y) \Big[\hat{S} (V,W) -g(N,T_V W) \\
&+ \sum_{i}\left(g((\nabla_{X_i}T)_V W,X_i)+ g(A_{X_i} V, A_{X_i}W) \right) \Big] \bigg\},
\end{align*}
\begin{align*}
g(W^* (U,V)W ,X)&=g((\nabla_U T)_V W, X) - g((\nabla_V T)_U W, X)\\
& - \frac{1}{2(n-1)} \bigg\{g(V,W) \Big[g(\nabla_U N,X) -\sum_{j} g(\nabla_{U_j}T)_{U_j}U,X) \\
&+ \sum_{i} \{g((\nabla_{X_i}A)_{X_i} X,U)-2g(A_X {X_i}, T_U X_i)\}\Big] \\
&- g(U,W) \Big[g(\nabla_V N,X) -\sum_{j} g(\nabla_{U_j}T)_{U_j}V,X) \\
&+ \sum_{i} \{g((\nabla_{X_i}A)_{X_i} X,V)-2g(A_X {X_i}, T_V X_i)\}\Big] \bigg\}
\end{align*}
and
\begin{align*}
&g(W^* (U,V)W ,F)= g(\hat{R} (U,V)W, F) + g(T_U W, T_V F) - g(T_V W, T_U F ) \\
&- \frac{1}{2(n-1)} \bigg\{ g(F,U) \Big[ \hat{S}(V,W)- g(N, T_V W) \\
& + \sum_{i}\left(g(\nabla_{X_i}T)_V W, {X_i}+ g(A_{X_i}V, A_{X_i}W)\right) \Big] \\
&-g(F,V) \Big[ \hat{S}(U,W) -g(N,T_U W)+ \sum_{i} g((\nabla_{X_i}T)_U W, X_i)+ g(A_{X_i}U, A_{X_i}W) \Big]\\
&+g(V,W) \Big[\hat{S}(U,F) -g(N, T_U F)+ \sum_{i}\left(g((\nabla_{X_i}T)_U F, X_i)+g(A_{X_i}U,A_{X_i}F)\right)\Big]\\
&-g(U,W) \Big[\hat{S}(V,F)-g(N,T_V F) + \sum_{i}\left(g((\nabla_{X_i}T)_V F, X_i)+ g(A_{X_i} V, A_{X_i}F)\right)\Big] \bigg\}.
\end{align*}
\end{theorem}
\begin{proof}
Let's prove the $6^{th}$ equation of this theorem.
The following equations are obtained inner production with $F$ to $W^*$ and using \eqref{g1} and \eqref{S1} equations.
\begin{align*}
g(W^* (U,V)W,F)&=g(R^M(U,V)W,F)- \frac{1}{2(n-1)} \bigg\{g(F,U)S^M(U,V)\\
&-g(F,V)S^M(U,W) +g(V,W)S^M(U,F)-g(U,W)S^M(V,F) \bigg\}, \nonumber
\end{align*}
\begin{align*}
g(R^M(U,V)W,F)=g(\hat{R}(U,V)W,F)+g(T_U W, T_V F)- g(T_V W, T_U F)
\end{align*}
and
\begin{align*}
S^M(U,V)= \hat{S}(U,V)- g(N, T_U V) + \sum_{i}\left\{g((\nabla_{X_i}T)_U V, X_i)+g(A_{X_i}U, A_{X_i}V) \right\}.
\end{align*}
When these equations are substituted in $W^*$, the given result is obtained. Other equations are similarly proved by using Theorem \ref{thm1} and Proposition \ref{pro1}.
\end{proof}
\begin{corollary}
Let $\pi: (M, g) \to(G, g^\prime)$ be a Riemannian submersion, where $(M, g)$ and $(G, g^\prime)$ Riemannian manifolds. If the Riemannian submersion has total umbilical fibres, that is $N = 0$, then the $M$-projective curvature tensor is given by
\begin{align*}
&g(W^* (X,Y)Z, H)\\
&= g(R^G (X,Y)Z, H) +2 g (A_X Y, A_Z H) - g(A_Y Z, A_X H) + g(A_X Z, A_Y H) \\
&- \frac{1}{2(n-1)} \Bigg\{g(X, H) \bigg[S^G (Y',Z') \circ \pi
- 2 \sum_{i} g(A_Y X_i, A_Z X_i) - \sum_{j} g(T_{U_j}Y, T_{U_j}Z) \bigg] \\
&-g(Y,H) \bigg[S^G (X',Z') \circ \pi
-2 \sum_{i} g(A_X X_i, A_Z X_i)- \sum_{j}g(T_{U_j}X, T_{U_j}Z) \bigg] \\
&+g(Y,Z) \bigg[S^G (X',H') \circ \pi
-2 \sum_{i} g(A_X X_i, A_H X_i)- \sum_{j}g(T_{U_j}X, T_{U_j}H) \bigg] \\
&-g(X,Z) \bigg[S^G (Y',H') \circ \pi
-2 \sum_{i} g(A_Y X_i, A_H X_i)- \sum_{j}g(T_{U_j}Y, T_{U_j}H) \bigg]\Bigg\},
\end{align*}
\begin{align*}
g(W^* (X,Y)Z, V) &=-g((\nabla_Z A)_X Y,V) - g(A_X Y, T_V Z) + g(A_Y Z, T_V X)- g(A_X Z, T_V Y) \\
&- \frac{1}{2(n-1)} \Bigg\{g(Y, Z) \bigg[
-\sum_{j} g((\nabla_{U_j} T)_{U_j} X, V) \\
&+\sum_{i}\left(g((\nabla_{X_i}A)_{X_i} X,V )- 2 g (A_V X_i, T_X X_i) \right) \bigg] \\
& -g(X,Z) \bigg[
-\sum_{j} g((\nabla_{U_j} T)_{U_j} Y, V) \\
&+ \sum_{i}\left(g((\nabla_{X_i}A)_{X_i} Y,V ) - 2 g (A_V X_i, T_Y X_i) \right) \bigg] \Bigg\},
\end{align*}
\begin{align*}
&g(W^* (X,V)Y ,W)\!=\!g((\nabla_X T)_V W, Y)\!+\!g((\nabla_V A)_X Y,W)\!-\! g(T_V X, T_W Y)\!+\!g(A_X Y, A_Y W)\\
&-\frac{1}{2(n-1)} \bigg\{ - g(V,W) \Big[ S^G (X', Y') \circ \pi
- 2 \sum_{i} g(A_X X_i, A_Y X_i) -\sum_{j} g(T_{U_j}X, T_{U_j}Y) \Big]\\
& - g(X,Y) \Big[\hat{S} (V,W) + \sum_{i}\left(g((\nabla_{X_i}T)_V W,X_i)
+ g(A_{X_i} V, A_{X_i}W) \right) \Big] \bigg\},
\end{align*}
\begin{align*}
g(W^* (U,V)W ,X)&=g((\nabla_U T)_V W, X) - g((\nabla_V T)_U W, X)\\
& - \frac{1}{2(n-1)} \bigg\{g(V,W) \Big[\sum_{j} g(\nabla_{U_j}T)_{U_j}U,X) \\
&+ \sum_{i} \{g((\nabla_{X_i}A)_{X_i} X,U)-2g(A_X {X_i}, T_U X_i)\}\Big] \\
&- g(U,W) \Big[\sum_{j} g(\nabla_{U_j}T)_{U_j}V,X) \\
&+ \sum_{i} \{g((\nabla_{X_i}A)_{X_i} X,V)-2g(A_X {X_i}, T_V X_i)\}\Big] \bigg\}
\end{align*}
and
\begin{align*}
&g(W^* (U,V)W ,F)\\
&=g(\hat{R} (U,V)W, F) + g(T_U W, T_V F) - g(T_V W, T_U F ) \\
&- \frac{1}{2(n-1)} \bigg\{ g(F,U) \Big[ \hat{S}(V,W)
+ \sum_{i}\left(g(\nabla_{X_i}T)_V W, {X_i}+ g(A_{X_i}V, A_{X_i}W)\right) \Big] \\
&-g(F,V) \Big[ \hat{S}(U,W) + \sum_{i} g((\nabla_{X_i}T)_U W, X_i)+ g(A_{X_i}U, A_{X_i}W) \Big]\\
&+g(V,W) \Big[\hat{S}(U,F) + \sum_{i}\left(g((\nabla_{X_i}T)_U F, X_i)+g(A_{X_i}U,A_{X_i}F)\right)\Big]\\
&-g(U,W) \Big[\hat{S}(V,F) + \sum_{i}\left(g((\nabla_{X_i}T)_V F, X_i)+ g(A_{X_i} V, A_{X_i}F)\right)\Big] \bigg\}.
\end{align*}
\end{corollary}
\begin{rem}
The authors investigate new curvature tensors along Riemannian submersions and obtain some results by using totally umbilical fibres. Therefore, it will be worth examining new curvature tensors along Riemannian submersions.
\end{rem}
\end{ack}
\end{document}
|
\begin{document}
\title{\bf\Large A note on zero-sum $5$-flows in regular graphs}
\begin{abstract}
Let $G$ be a graph. A {\it zero-sum flow} of $G$ is an assignment
of non-zero real numbers to the edges such that the sum of the
values of all edges incident with each vertex is zero. Let $k$ be
a natural number. A {\it zero-sum $k$-flow} is a flow with values
from the set \mbox{$\{\pm 1,\ldots ,\pm(k-1)\}$}. It has been
conjectured that every $r$-regular graph, $r\geq 3$, admits a zero-sum
$5$-flow. In this paper we give an affirmative answer to this
conjecture, except for $r=5$.
\end{abstract}
\date{}
\noindent{\bf\large 1. Introduction}
Nowhere-zero flows on graphs were introduced by Tutte \cite{tut} in
1949 and since then have been extensively studied by many
authors. A great deal of research in the area has been motivated
by Tutte's 5-Flow Conjecture which states that every $2$-edge connected
graph can have its edges directed and labeled by integers from
$\{1, 2, 3, 4\}$ in such a way that Kirchhoff’s current law is
satisfied at each vertex. In 1983, Bouchet \cite{bouchet}
generalized this concept to bidirected graphs. A \emph{bidirected graph} $G$ is a graph with vertex set $V(G)$
and edge set $E(G)$ such that each edge is oriented as one of
the four possibilities:
\begin{picture}(45,5)(0,-2)
\thicklines
\put(15,0){\vector(-1,0){4}}\put(27,0){\vector(1,0){4}}
\thinlines \put(0,0){\line(1,0){40}} \put(0,0){\circle*{4}}
\put(40,0){\circle*{4}}
\end{picture},
\begin{picture}(45,5)(0,-2)
\thicklines \put(15,0){\vector(1,0){4}}\put(27,0){\vector(1,0){4}}
\thinlines \put(0,0){\line(1,0){40}} \put(0,0){\circle*{4}}
\put(40,0){\circle*{4}}
\end{picture},
\begin{picture}(45,5)(0,-2)
\thicklines
\put(15,0){\vector(1,0){4}}\put(27,0){\vector(-1,0){4}}
\thinlines \put(0,0){\line(1,0){40}} \put(0,0){\circle*{4}}
\put(40,0){\circle*{4}}
\end{picture},
\begin{picture}(45,5)(0,-2)
\thicklines
\put(15,0){\vector(-1,0){4}}\put(27,0){\vector(-1,0){4}}
\thinlines \put(0,0){\line(1,0){40}} \put(0,0){\circle*{4}}
\put(40,0){\circle*{4}}
\end{picture}.
Let $G$ be a bidirected graph. For every $v \in V(G)$, the
set of all edges with tails (respectively, heads) at $v$ is denoted by
$E^+(v)$ (respectively, $E^-(v)$). The function $f:E(G) \longrightarrow
\mathbb{R}$ is a \emph{bidirected flow} of $G$ if for every $v
\in V(G)$, we have
$$\sum_{e \in E^+(v)} f(e) = \sum_{e \in E^-(v)} f(e).$$ If $f$ takes its values from the set \mbox{$\{\pm 1,\ldots ,\pm(k-1)\}$}, then it is called a \emph{nowhere-zero bidirected $k$-flow}.
Consequently, Bouchet proposed the following interesting conjecture.
\noindent {\bf Bouchet's Conjecture.} {\rm \cite{bouchet, xui}}
\textit{Every bidirected graph that has a nowhere-zero bidirected
flow admits a nowhere-zero bidirected $6$-flow.}
Bouchet proved that his conjecture is true if $6$ is replaced by
$216$. Then Zyka reduced $216$ to $30$ \cite{zyk}.
Let $G$ be a graph. A {\it zero-sum flow} for $G$ is an assignment of non-zero real numbers
to the edges such that the sum of the values of all edges
incident with each vertex is zero. Let $k$ be a natural number. A {\it zero-sum $k$-flow} is a
flow with values from the set \mbox{$\{\pm 1,\ldots
,\pm(k-1)\}$}. The following conjecture was posed on the zero-sum flows in graphs.
\noindent{\bf Zero-Sum Conjecture (ZSC).} {\rm \cite{zerosum1}} \textit{If $G$ is a graph
with a zero-sum flow, then $G$ admits a zero-sum $6$-flow.}
The following conjecture is an improved version of ZSC for regular graphs.
\noindent{\bf Conjecture A.} {\rm \cite{d}} \textit{ Every $r$-regular graph ($r\geq 3$)
admits a zero-sum $5$-flow.}
Recently, in connection with this conjecture the following two theorems were proved.
\begin{thm}{ \em \cite{zerosum1}} \label{evenreg}
\textit{ Let $r$ be an even integer with $r\geq 4$. Then every
$r$-regular graph has a zero-sum $3$-flow.}
\end{thm}
\begin{thm}{ \em \cite{d}}\label{3reg} \textit{ Let $G$ be an $r$-regular graph.
If $r$ is divisible by $3$, then $G$ has a zero-sum $5$-flow.}
\end{thm}
\begin{remark} {There are some regular graphs with no zero-sum $4$-flow. To see this
consider the graph given in Figure $1$. To the contrary assume this the graph has a zero-sum $4$-flow. Since
the sum of the values of all edges incident with each vertex is zero, for every $v \in V(G)$,
$-2$ or $2$ should appear in the neighborhood of $v$. On the other hand two numbers with absolute
value $2$ can not appear in the neighborhood of a vertex. So all edges of $G$ with values $\pm 2$
form a perfect matching. But by celebrated Tutte's Theorem \cite[p.76] {bondy}, $G$ has no perfect matching, a contradiction.}
\end{remark}
\special{psfile=3r.eps hscale=40 vscale=40 hoffset=90
voffset=-220}
$$\textmd{Figure $1$. A $3$-regular graph with no zero-sum $4$-flow}$$
In $2010$, the following result was proved.
\begin{thm} {\em \cite{d}}\label{bou}{ \textit Bouchet's Conjecture
and ZSC are equivalent.}
\end{thm}
Motivated by Bouchet's Conjecture and along with Theorem \ref{bou}
we focused our attention to establish the Conjecture A. We show that except $r = 5$, Conjecture A is true.
\noindent{\bf\large 2. The Main Result }
In this section we prove that every $r$-regular graph, $r\geq 3$,
$r\neq 5$, admits a zero-sum $5$-flow. Before establishing our main
result we need some notations and definitions.
Let $G$ be a finite and undirected graphs with vertex set $V(G)$ and edge set $E(G)$,
where multiple edges and loops are admissible. A {\it
$k$-regular} graph is a graph where each vertex is of degree $k$.
A subgraph $F$ of a graph $G$, is a
{\it factor} of $G$ if $F$ is a spanning subgraph of $G$. If a
factor $F$ has all of its degrees equal to $k$, it is called a
{\it$k$-factor}. Thus a
$2$-factor is a disjoint union of finitely many cycles that cover
all the vertices of $G$. A {\it $k$-factorization} of $G$ is a partition of the edges of $G$ into disjoint $k$-factors.
For integers $a$ and $b$, $1 \leq a \leq b$, an {\it $[a, b]$-factor} of
$G$ is defined to be a factor $F$ of $G$ such that
$a \leq d_F (v) \leq b$, for every $v \in V (G)$. For any vertex $v\in V(G)$, let
$N_{G}(v)=\{\,u \in V(G)\,|\,uv\in E(G)\,\}$.
The following two theorems are also needed.
\begin{thm}{ \em \cite{handbook}}\label{peterson}
\textit{Every $2k$-regular multigraph admits a $2$-factorozation.}
\end{thm}
\begin{thm}{ \em \cite{kano}}\label{kano}
\textit{Let $r \geq 3$ be an odd integer and let $k$ be an integer such
that $1 \leq k \leq \frac{2r}{3}$. Then every $r$-regular graph
has a $[k-1, k]$-factor each component of which is regular.}
\end{thm}
\begin{lem}\label{qr}
\textit{Let $G$ be an $r$-regular graph. Then
for every even integer $q$, $ 2r \leq q \leq 4r$, there exists
a function $f: E(G) \rightarrow \{2, 3, 4\}$ such that for every
$u \in V(G)$, $\sum_{v \in N_G(u)}f(uv)=q$.}
\end{lem}
\begin{proof}
{First assume that $r$ is an odd integer. For every edge $e=uv$, we add a new edge $e'=uv$ to the graph $G$ and call the resultant graph by $G'$.
Clearly, $G'$ is a $2r$-regular
multigraph. By Theorem \ref{peterson}, $G'$ admits a $2$-factorization with $2$-factors $F_1, \ldots, F_{r}$.
Now, for every $e \in F_i$, $1 \leq i \leq r$, we define a function $g: E(G') \rightarrow \{1,2\}$ as
follows:
$$g(e)=\left\{
\begin{array}{ll}
2, & \hbox{$1 \leq i \leq \frac{q-2r}{2}$;}\\
1, & \hbox{$ \frac{q-2r}{2}<i$.}
\end{array}
\right.$$
Therefore for each $v
\in V(G')$, $\sum_{v \in N_{G'}(u)}g(uv)=q$. Now, define a function $f: E(G) \rightarrow \{2,3,4\}$ such that
for every $e=uv \in E(G)$, $f(e)= g(e)+g(e')$, where $e'=uv$ in $G'$.
Then for every $u \in V(G)$, $\sum_{v \in N_G(u)}f(uv)=q$, as desired.
Now, let $r$ be an even integer. Since $G$ is an
$r$-regular graph, by Theorem \ref{peterson}, $G$ admits a
$2$-factorization with $2$-factors $F_1, \ldots, F_{\frac{r}{2}}$. Now, for every $e\in F_i$, $1 \leq i \leq\frac{ r}{2}$, we define a function
$f: E(G) \rightarrow \{2, 3, 4\}$ as follows:
$$f(e)=\left\{
\begin{array}{ll}
4, & \hbox{$1 \leq i \leq \lfloor \frac{q-2r}{4}\rfloor $;} \\
3, & \hbox{$\lfloor \frac{q-2r}{4} \rfloor < i\leq \lceil \frac{q-2r}{4} \rceil$;} \\
2, & \hbox{$ \lceil \frac{q-2r}{4} \rceil<i.$}
\end{array}
\right.$$
It is not hard to verify that for every $u \in V(G)$, $\sum_{v
\in N_G(u)}f(uv)=q$, as desired. }
\end{proof}
Now, we are in a position to prove our main theorem.
\begin{thm}
\textit{Let $r \geq 3$ and $r \neq 5$. Then
every $r$-regular graph has a zero-sum $5$-flow.}
\end{thm}
\begin{proof}
{
First we prove the theorem for $r=7$. Let $G$ be a $7$-regular
graph. Then by Theorem \ref{kano}, $G$ has a $[3, 4]$-factor, say $H$,
whose components are regular. Let $H_1$ be the union of the
$3$-regular components of $H$ and let $H_2$ be the union of
$4$-regular components of $H$. By Theorem \ref{peterson}, $H_2$
can be decomposed into two $2$-factors $H'_{2}$ and $H''_{2}$.
Assign $1$ and $2$ to all edges of $H'_{2}$ and $H''_{2}$, respectively.
By Lemma \ref{qr}, there exists a function $f: E(H_1)
\rightarrow \{2, 3, 4\}$ such that for every $u \in V(H_1)$,
$\sum_{v \in N_{H_1}(u)}f(uv)=8$. Now, assign $-2$ to every edge in $E(G)\setminus E(H)$ and we are done.
Now, let $r\geq 9$ be an odd integer. By Theorem
\ref{kano}, for every $k$, $k \leq \frac{2r}{3}$, $G$ has a
$[k-1, k]$-factor whose components are regular. Let $k=\lfloor
\frac{2r}{3} \rfloor$, $k'= r-k$, and $H$ be a $[k-1,
k]$-factor of $G$ such that $H_1$ be the union of $(k-1)$-regular
subgraph of $H$ and $H_2=H \setminus H_1$. It can be easily
checked that $k \leq 2k' \leq 2k-4$. Hence by Lemma \ref{qr}, there
exists a function $f:E(H_1) \longrightarrow \{2, 3, 4\}$
such that for every $u \in V(H_1)$, $\sum_{v \in
N_{H_1}(u)}f(uv)= 4k'+4$. Also by Lemma \ref{qr}, there exists a
function $f:E(H_2) \longrightarrow \{2, 3, 4\}$ such that
for every $v \in V(H_2)$,
$\sum_{v \in N_{H_2}(u)}f(uv)= 4k'$.
Finally assign $-4$ to every edge of $E(G)\setminus E(H)$. Now, by Theorem \ref{evenreg} and
Theorem \ref{3reg} the proof is complete. }
\end{proof}
\noindent {\bf Acknowledgements.} The authors are indebted to the
School of Mathematics, Institute for Research in Fundamental
Sciences (IPM) for the support. The research of the first author and the
second author were in parts supported by grantns from IPM (No.
88050212) and (No. 88050042), respectively.
\providecommand{\bysame}{\leavevmode\hbox
to3em{\hrulefill}\thinspace}
\end{document}
|
\begin{document}
\maketitle
\begin{abstract}
Jakobson and Nadirashvili \cite{JN} constructed a sequence of eigenfunctions
on $T^2$ with a bounded number of critical points, answering in the negative
the question raised by Yau \cite{Yau1} which asks that whether the number of the critical points
of eigenfunctions for the Laplacian increases with the corresponding
eigenvalues.
The present paper finds three interesting eigenfunctions
on the minimal isoparametric hypersurface $M^n$ in
$S^{n+1}(1)$. The corresponding
eigenvalues are $n$, $2n$ and $3n$, while their critical sets consist of
$8$ points, a submanifold(infinite many points) and $8$ points, respectively. On one of
its focal submanifolds, a similar phenomenon occurs.
\end{abstract}
\section{Introduction}
Eigenvalues of Laplacian are very important intrinsic invariants,
which reflect the geometry of manifolds very precisely.
Unfortunately, there are few manifolds whose eigenvalues are clearly
known, not to mention the eigenfunctions. The numbers of critical
points of eigenfunctions are even more difficult to determine.
However, as S.T.Yau pointed out, this number is closely related to
many important questions, which makes it worthy of being studied
extensively. In this regard, S.T.Yau \cite{Yau1} raised a question:
is it true that the number of critical points of the $k$-th eigenfunction on a
compact Riemannian manifold increases with $k$. He also investigated this problem in
the surface case (cf. \cite{Yau2}).
In 1999, Jakobson and Nadirashvili \cite{JN} constructed
a metric on a $2$-dimensional torus and a sequence of eigenfunctions
such that the corresponding eigenvalues go to infinity while the
number of critical points remains bounded, a constant in fact. But
in a fastidious manner, this remarkable example does not deny Yau's conjecture
in the sense of ``non-decreasing".
In the present paper, by taking advantage of a natural
concept--isoparametric hypersurface, we find (based on \cite {Sol})
an isoparametric function, which is an eigenfunction on the minimal
isoparametric hypersurface $M^n$ of OT-FKM type in $S^{n+1}(1)$.
Combining with the other two well-known eigenfunctions, it
constitutes a sequence of eigenfunctions with increasing eigenvalues,
but the numbers of their critical points are not monotonic at all.
Similarly, another isoparametric function (indeed an eigenfunction)
expressed in the same form arises in one of the focal submanifolds
of $M^n$ mentioned before. Together with the other eigenfunction,
it constitutes a sequence of eigenfunctions with similar property
as that on $M^n$.
One of the main results of the present paper is the following:
\begin{thm}\label{thm1}
\emph{Let $M^n$ be the minimal isoparametric hypersurface of
OT-FKM type in the unit sphere $S^{n+1}(1)$. Then there exist three
eigenfunctions $\varphi_1$, $\varphi_2$ and $\varphi_3$ defined on
$M^n$, corresponding to eigenvalues $n$, $2n$ and $3n$, whose
critical sets consist of $8$ points, a submanifold and $8$ points,
respectively. For specific, $\varphi_1$ and $\varphi_3$ are both
Morse functions; $\varphi_2$ is an isoparametric function on $M^n$,
whose critical set $C(\varphi_2)$ is:
\begin{equation}\label{varphi2}
C(\varphi_2)=N_+\cup N_-,\quad \dim N_+=\dim N_-= n-m ~(1\leq m <
n),
\end{equation}
where the number $m$ will be introduced in the definition of
OT-FKM type.
}
\end{thm}
\begin{rem}
The Morse number (the minimal number of critical points of all Morse
functions) of a compact isoparametric hypersurface with $g=4$
distinct principal curvatures in the unit sphere is equal to $2g=8$
(\emph{cf.} \cite{CR}).
\end{rem}
Firstly, to clarify notations, we denote the Laplacian on an
$n$-dimensional compact manifold $M^n$ by $\Delta f= \text{div}
\nabla f$, and say $\lambda_k$ its $k$-th eigenvalue with
multiplicity ($\lambda_0=0<\lambda_1<\lambda_2<...$) if $\Delta f_k
+ \lambda_k f_k =0$ for some $f_k: M^n\rightarrow \mathbb{R}$.
Correspondingly, $f_k$ is called the $k$-th eigenfunction. The
present paper is mainly concerned with the number of critical points
of the eigenfunction $f_k$.
Recall that a hypersurface $M^n$ in a Riemannian manifold
$\widetilde{M}^{n+1}$ is \emph{isoparametric} if it is a level
hypersurface of an isoparametric function $f$ on
$\widetilde{M}^{n+1}$, that is, a non-constant smooth function $f:
\widetilde{M}^{n+1}\rightarrow \mathbb{R}$ satisfying (cf. \cite{Wan, {GT2}}):
\begin{equation}\label{ab}
\left\{ \begin{array}{ll}
~~|\widetilde{\nabla} f|^2= b(f)\\
\quad\widetilde{\triangle} f~~=a(f)
\end{array}\right.
\end{equation}
where $b$ and $a$ are smooth and continuous functions on $\mathbb{R}$, respectively.
In this meaning,
the \emph{focal varieties} are the preimages of the global maximum
and minimum values (if exist) of $f$, which we denote by $M_+$ and $M_-$,
respectively. They are in fact both minimal submanifolds of
$\widetilde{M}^{n+1}$ with codimensions $m_++1$ and $m_-+1$ in
$\widetilde{M}^{n+1}$, respectively(cf. \cite{Wan},\cite{Th},
\cite{GT1}).
As asserted by \'{E}lie Cartan, an isoparametric hypersurface in the
unit sphere is indeed a hypersurface with constant principal
curvatures. Let $g$ be the number of distinct principal curvatures with multiplicity $m_i$ ($i=1, \cdots, g$).
An elegant result of M\"{u}nzner states that $g$ can be only $1, 2, 3, 4$ or $6$, and $m_i=m_{i+2}$ (subscripts mod $g$).
To clarify the notations, we denote $m_+=:m_1$ and $m_-=:m_2$.
Up to now, the isoparametric hypersurfaces with $g = 1, 2, 3, 6$ are
completely classified (cf. \cite{DN} and \cite{Miy}). For
isoparametric hypersurfaces with $g=4$, Cecil-Chi-Jensen
(\cite{CCJ}), Immervoll (\cite{Imm}) and Chi (\cite{Chi, Chi2}) proved a
far reaching result that they are all of OT-FKM type except for the homogeneous case
with $(m_+, m_-)=(2, 2), (4, 5)$.
From now on, we are specifically concerned with the isoparametric hypersurfaces of OT-FKM type in $S^{n+1}(1)$
with four distinct principal curvatures. For a symmetric Clifford system $\{P_0,...,P_m\}$
on $\mathbb{R}^{2l}$, \emph{i.e.}, $P_i$'s are symmetric matrices
satisfying $P_iP_j+P_jP_i=2\delta_{ij}I_{2l}$, the \emph{OT-FKM type
isoparametric hypersurfaces} are level hypersurfaces of $f:=F|_{S^{2l-1}}$ with $F$ defined
by Ferus, Karcher and M\"{u}nzner (cf. \cite{FKM}):
\begin{eqnarray}\label{OT-FKM isop. poly.}
&& \quad F: \mathbb{R}^{2l} \rightarrow \mathbb{R}\nonumber\\
&& F(x) = |x|^4 - 2\displaystyle\sum_{\alpha = 0}^{m}{\langle
P_{\alpha}x,x\rangle^2}
\end{eqnarray}
The pairs $(m_+, m_-)$ of the OT-FKM type are $(m, l-m-1)$, provided
$m>0$ and $l-m-1>0$, where $l = k\delta(m)$ $(k=1,2,3,...)$, $\delta(m)$ is the dimension
of an irreducible module of the Clifford algebra $C_{m-1}$, which we list below:
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|c|c|}
\hline
$m$ & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & $\cdots$ $m$+8 \\
\hline
$\delta(m)$ & 1 & 2 & 4 & 4 & 8 & 8 & 8 & 8 & ~16$\delta(m)$\\
\hline
\end{tabular}
\end{center}
We now fix $M^n$ to be the minimal isoparametric hypersurface of
OT-FKM type in $S^{n+1}(1)$, and $f$ to be $f:=F|_{S^{2l-1}}$ with $F$
defined in (\ref{OT-FKM isop. poly.}). Choosing a point $q_1\in
S^{n+1}(1)\backslash \{M_+, M_-, M^n\}$, we define three
eigenfunctions $\varphi_1$, $\varphi_2$ (following \cite{Sol}) and
$\varphi_3$ as follows:
\begin{eqnarray}
&& \varphi_1: M^n\rightarrow \mathbb{R},\qquad\qquad \varphi_2: M^n\rightarrow \mathbb{R}\qquad\qquad \varphi_3: M^n\rightarrow \mathbb{R}\nonumber\\
&& \qquad x \mapsto \langle x, q_1\rangle, \qquad\qquad\quad x \mapsto \langle Px, x\rangle \quad\qquad\qquad x \mapsto \langle \xi(x), q_1\rangle
\end{eqnarray}
where $\xi$ is a unit normal vector field on $M^n$;
$P\in \Sigma:=\Sigma (P_0,...,P_m)$, the unit sphere in
$\mathrm{\mathrm{Span}}\{P_0,...,P_m\}$, which is called \emph{the Clifford sphere}
(see Definition 3.6 of \cite{FKM}).
\noindent
\begin{rem}
It was proved by the authors that the first eigenvalue of the closed
minimal isoparametric hypersurface $M^n$ in $S^{n+1}(1)$ is just $n$
(cf. \cite{TY}, \cite{TXY}). As a corollary, the coordinate function
restricted on $M^n$, $\varphi_1$, is the first eigenfunction.
\end{rem}
With all the preconditions, a direct verification reveals that the
eigenvalues corresponding to $\varphi_1$, $\varphi_2$ and
$\varphi_3$ are $n$, $2n$ and $3n$, respectively.
Moreover, with our choice of $q_1\in S^{n+1}(1)\backslash \{M_+,
M_-, M^n\}$, a simple application of isoparametric geometry shows
that $\varphi_1$ and $\varphi_3$ are both Morse functions with
$2g=8$ critical points. The more fascinating result is that
$\varphi_2$ is indeed an isoparametric function on $M^n$, thus by
virtue of \cite{Wan}, the critical set of $\varphi_2$ are just the
union of its focal submanifolds $N_+$ and $N_-$. For the proof of
Theorem \ref{thm1}, we need the following lemma
\begin{lem}\label{N+N-}
\emph{For focal submanifolds $N_+$ and $N_-$ of $\varphi_2$ on
$M^n$, we have diffeomorphisms:
\begin{equation*}
N_+\underset{diff.}{\cong}N_-\underset{diff.}{\cong}M_+ =\{x\in
S^{n+1}(1)~|~\langle P_0x, x\rangle=\langle P_1x,
x\rangle=\cdots=\langle P_mx, x\rangle=0\}.
\end{equation*}
Particularly, in the case of $m=1$, each level
(isoparametric) hypersurface of $\varphi_2$ is minimal in $M^n$.}
\end{lem}
\begin{rem}
When $m=1$, the codimensions of $N_+$ and $N_-$ in $M^n$ are $1$, this is what called improper isoparametric (cf. pp.165 of \cite{GT2}).
\end{rem}
As we stated before, another counterexample of Yau's
conjecture appear on the focal submanifold $M_-:=f^{-1}(-1)$ with
dimension $l+m-1$. In a similar way, we define two eigenfunctions
$\omega_1$, $\omega_2$ on $M_-$:
\begin{eqnarray}\label{omega12}
&& \omega_1: M_- \rightarrow \mathbb{R}\qquad\qquad\qquad\qquad \omega_2: M_-\rightarrow \mathbb{R}\nonumber\\
&& \qquad x \mapsto \langle Px, x\rangle\qquad\qquad\quad\qquad\qquad x \mapsto \langle x, q_2\rangle,
\end{eqnarray}
where $P\in \Sigma=\Sigma(P_0, P_1,...,P_m)$, $q_2\in
S^{n+1}(1)\backslash \{M_+, M_-\}$. Correspondingly, we have the
following theorem:
\begin{thm}\label{thm2}
\emph{Let $M_-:=f^{-1}(-1)$, a focal submanifold of OT-FKM type in
the unit sphere $S^{n+1}(1)$. Then there exist two eigenfunctions
$\omega_1$ and $\omega_2$ defined on $M_-$, corresponding to
eigenvalues $4m$ and $l+m-1$, whose critical sets consist of a
submanifold and $4$ points, respectively. For specific, $\omega_2$
is a Morse function; $\omega_1$ is an isoparametric function on
$M_-$, whose critical set $C(\omega_1)$ is:
\begin{equation}\label{omega1}
C(\omega_1)=V_+\cup V_-, \quad \dim V_+=\dim V_-= l-1.
\end{equation}}
\end{thm}
\begin{rem}
The Morse number of each focal submanifold of a compact
isoparametric hypersurface with $g=4$ distinct principal curvatures
in the unit sphere is equal to $g=4$ (\emph{cf.} \cite{CR}).
\end{rem}
For the proof of Theorem \ref{thm2}, we need the following:
\begin{lem}\label{V+V-}
\emph{For focal submanifolds $V_+$ and $V_-$ of $\omega_1$ on $M_-$,
we have isometries:
\begin{equation*}
V_+\underset{isom.}{\cong}V_-\underset{isom.}{\cong}S^{l-1}(1).
\end{equation*}
Particularly, in the improper case, i.e. $m=1$, each level (
isoparametric ) hypersurface of $\omega_1$ is minimal in $M_-$.}
\end{lem}
Comparing with the values of $\delta(m)$ in the previous table, we
observe that $4m<l+m-1$ at most cases. More precisely, $4m<l+m-1$ as
long as $k\geq 5$ and $m\leq 9$; $4m<l+m-1$ holds true for any $k$
when $m\geq 10$. Therefore, with an appropriate choice of $k$, we
can always make eigenfunctions $\omega_1$ and $\omega_2$ another
counterexample of Yau's conjecture.
Bearing these examples in mind, we would like to raise the following
question:
\noindent \textbf{Question:} For a generic metric on a compact
manifold $M$, is the number of critical points of the first
eigenfunction (must be a Morse function, according to Uhlenbeck \cite{Uh})
equal to the Morse number of $M$?\footnote{\emph{Added in proof}. It was recently proved by A. Enciso and D. Peralta-Salas that on a compact manifold, there is a Riemannian metric such that the first nontrivial eigenfunction can have as many non-degenerate critical points as one wishes (bigger in particular than the Morse number of the manifold). Moreover, any other metric $C^{\infty}$ close to it carries the same property(cf. \cite{EP-S}).}
\section{counterexamples on $M^n$}
This section will be committed to proving Theorem \ref{thm1} on the
minimal isoparametric hypersurface $M^n$ of OT-FKM type in
$S^{n+1}(1)$. At first, we denote the connections and Laplacians on
$M^n$, $S^{n+1}(1)$ and $\mathbb{R}^{n+2}$ respectively by:
\begin{eqnarray*}\label{connection}
&& M^n \subset S^{n+1}(1) \subset \mathbb{R}^{n+2}\nonumber\\
&& \nabla~ \triangle, \quad\overline{\nabla}~ \overline{\triangle}, \qquad\widetilde{\nabla} ~\widetilde{\triangle}.
\end{eqnarray*}
In order to facilitate the description, we state the following lemma in front of the proof of Theorem \ref{thm1}. The proof
is direct and will be omitted here.
\begin{lem}\label{laplacian relation}
\emph{Let $\xi$ be a ( local )unit vector field on $S^{n+1}(1)$
extended from a unit normal vector field of $M^n$, $H$ be the mean
curvature vector field of $M^n$ in $S^{n+1}(1)$. For functions $\mathcal{G}$ on
$\mathbb{R}^{n+2}$, $G=\mathcal{G}|_{S^{n+1}}$ and $g=G|_{M^n}$, at
any $x\in M^{n}$ ( as a position vector field ) we have:
\begin{equation}\label{two laplacian}
\left\{ \begin{array}{ll}
\widetilde{\triangle}\mathcal{G}|_{S^{n+1}} = \overline{\triangle} G+nx(\mathcal{G})+xx(\mathcal{G})\\
\overline{\triangle}G|_{M^n} ~~= \triangle g -\xi(G)\langle H,
\xi\rangle + \xi\xi(G) -\overline{\nabla}_{\xi}\xi(G)
\end{array}\right.
\end{equation}}
\end{lem}
\noindent \textbf{\emph{Proof of Theorem \ref{thm1}.}}\quad We take
the first step by determining the eigenvalues corresponding to
$\varphi_i$ ($i=1, 2, 3$).
Clearly, based on Lemma \ref{laplacian relation}, a direct calculation depending on the
minimality of $M^n$ in $S^{n+1}(1)$ leads to
\begin{equation}\label{eigenvalue of varphi13}
\triangle \varphi_1= -n \varphi_1.
\end{equation}
Besides, in conjunction with Codazzi equation, we get another straightforward result:
\begin{equation}\label{eigenvalue of varphi31}
\triangle \varphi_3= -|B|^2 \varphi_3 = -(g-1)n \varphi_3 = -3n \varphi_3,
\end{equation}
where $B$ is the second fundamental form of $M^n$, and the second equality in (\ref{eigenvalue of varphi31}) is an assertion
of \cite{PT}.
According to Solomon \cite{Sol}, the eigenvalue corresponding to
$\varphi_2$ is equal to $2n$. As a matter of fact, this conclusion
can also be derived from a few basic facts and Lemma \ref{laplacian
relation}---some formulas in this process will be useful later:
It is well known that there exists a unique $c_0$ with $-1<c_0<1$
such that the minimal isoparametric hypersurface $M^n$ (of OT-FKM type)
is given by $M^n=f^{-1}(c_0)$ (the value of $c_0$ will be given in the proof of Lemma \ref{N+N-}). We can choose the unit normal vector
field to be
$$\xi=\frac{\overline{\nabla}f}{|\overline{\nabla}f|}\Big|_{M^n}=\frac{\widetilde{\nabla}F-4Fx}{4\sqrt{1-F^2}}\Big|_{M^n}.$$
Extending $\xi$ along the normal geodesics such that
$\overline{\nabla}_{\xi}\xi=0$, it follows that
\begin{equation}\label{xi varphi2} \xi(\varphi_2) = \langle \xi,
\overline{\nabla}\varphi_2 \rangle =
\langle\frac{\widetilde{\nabla}F-4Fx}{4\sqrt{1-F^2}},
2Px-2\varphi_2x \rangle = -2\sqrt{\frac{1+f}{1-f}}\varphi_2,
\end{equation}
and thus
\begin{equation*}
\xi\xi(\varphi_2) = \langle \xi,
\overline{\nabla}\xi(\varphi_2)\rangle = -4\varphi_2.
\end{equation*}
Here, we extended $\varphi_2$ to $S^{n+1}(1)$ and $\mathbb{R}^{n+2}$
in a natural way. Then combining with (\ref{two laplacian}) and
$H=0$, we arrive at
\begin{equation}\label{eigenvalue of varphi2}
\triangle \varphi_2= -2n \varphi_2.
\end{equation}
Next, we aim to investigate the critical sets of $\varphi_i$ ($i=1,
2, 3$). Let $e_1, e_2,...,e_n$ be an orthonormal tangent frame field
on $M^n$ with $A_{\xi}e_i=\mu_ie_i$ ($i=1,2,...,n$), where $A_{\xi}$
is the shape operator. According to M\"{u}nzner, the principal
curvature $\mu_i\in\{\cot \theta_j =
\cot(\theta_1+\frac{j-1}{4}\pi)~|~ 0<\theta_1<\frac{\pi}{4},~
j=1,2,3,4\}.$
\emph{\emph{(i)}}
For each $e_i\in T_xM^n$, we have
\begin{equation}\label{gradient varphi1}
\langle \nabla \varphi_1, e_i\rangle = e_i\langle x, q_1\rangle =
\langle e_i, q_1\rangle.
\end{equation}
It follows that $x$~is~a~critical~point~of~$\varphi_1$ if and only
if $q_1\in \mathrm{Span}\{ x, \xi(x) \}$.
In other words, $q_1$ lies on some normal geodesic $v(t)$ ($-\pi\leq
t\leq \pi$) with $v(0)=x,~v^{\prime}(0)=\xi(x)$. Therefore the
number of critical points of $\varphi_1$ is
\begin{equation*}
\sharp C(\varphi_1)=\frac{2\pi}{\pi/g}=2g=8.
\end{equation*}
Here, we used the known fact that the distance between two focal
submanifolds is equal to $\pi/g$ (cf. \cite{CR}).
Furthermore, recall the formula of Hessian:
$$\mathrm{Hess}(\varphi_1)_{ij}=\langle e_i, \nabla_{e_j}\nabla\varphi_1\rangle.$$
Restricted to a critical point $x$, using (\ref{gradient varphi1})
we express it as
\begin{equation}\label{Hessian 1}
\mathrm{Hess} (\varphi_1)|_x=-\mathrm{diag}\{~\langle\mu_1\xi-x, q_1\rangle, ~\langle\mu_2\xi-x, q_1\rangle,...,~\langle\mu_n\xi-x, q_1\rangle~\}.
\end{equation}
Writing $q_1=\cos t~x+\sin t~\xi$ $(-\pi<t<\pi)$ for a fixed $x$, a
direct calculation leads to
\begin{eqnarray*}
\langle\mu_i\xi-x, q_1\rangle=0 &\Leftrightarrow& \sin t(\cot\theta_i-\cot t)=0\\
&\Leftrightarrow& q_1\in M_+\cup M_-\cup M^n.
\end{eqnarray*}
From the assumption $q_1\in S^{n+1}(1)\backslash\{M_+, M_-, M^n\}$,
we derive that $\varphi_1$ is a Morse function, as desired.
\emph{\emph{(ii)}} Similarly, for each $e_i\in T_xM^n$, we have
\begin{equation*}
\langle \nabla \varphi_3, e_i\rangle = e_i\langle \xi, q_1\rangle = -\langle A_{\xi}e_i, q_1\rangle = -\langle \mu_ie_i, q_1\rangle.
\end{equation*}
Since $\mu_i\in\{\cot \theta_j = \cot(\theta_1+\frac{j-1}{4}\pi)~|~
0<\theta_1<\frac{\pi}{4},~ j=1,2,3,4\},$ it is easy to see that
$\mu_i\neq 0~ ~\forall i$. Thus
$x$~is~a~critical~point~of~$\varphi_3$ if and only if
$q_1\in \mathrm{Span}\{x, \xi(x) \}$.
Analogously, \begin{equation*}
\sharp C(\varphi_3)=\frac{2\pi}{\pi/g}=2g=8.
\end{equation*}
Furthermore, $\mathrm{Hess}(\varphi_3)$ at a critical point $x$ can be
expressed as
\begin{equation}\label{Hessian 3}
\mathrm{Hess}(\varphi_3)|_x=-\mathrm{diag}\{~\mu_1\langle\mu_1\xi-x, q_1\rangle, ~\mu_2\langle\mu_2\xi-x, q_1\rangle,...,~\mu_n\langle\mu_n\xi-x, q_1\rangle~\}.
\end{equation}
Again, our choice of $q_1$ guarantees that $\varphi_3$ is a Morse
function.
\emph{\emph{(iii)}} From the formula (\ref{xi varphi2}), we derive that
\begin{eqnarray}\label{nabla varphi2}
\nabla\varphi_2 &=& \widetilde{\nabla}\varphi_2-x(\varphi_2)x -\xi(\varphi_2)\xi \\
&=& 2(Px-\varphi_2x+\varphi_2\sqrt{\frac{1+c_0}{1-c_0}}\xi).\nonumber
\end{eqnarray}
Immediately, a simple calculation shows that $\varphi_2$ satisfies
\begin{equation}\label{isoparametric varphi2}
\left\{ \begin{array}{ll}
|\nabla \varphi_2|^2=4(1-\frac{2}{1-c_0}\varphi_2^2)\\
\quad\triangle \varphi_2 = -2n \varphi_2.
\end{array}\right.
\end{equation}
By definition, $\varphi_2$ is an isoparametric function on $M^n$.
Define the focal submanifolds by $N_{\pm}:=\{x\in
M^n~|~\varphi_2=\pm \sqrt{\frac{1-c_0}{2}}\}$. Therefore the
critical set of $\varphi_2$ is the union of its focal submanifolds:
$$C(\varphi_2)=N_+\cup N_-.$$
We are now in a position to complete the proof of Theorem \ref{thm1} by verifying Lemma \ref{N+N-}.
\noindent
\textbf{\emph{Proof of Lemma \ref{N+N-}.}}~
As indicated before, the focal submanifold $M_+$ of OT-FKM type is
$$M_+:=f^{-1}(+1)=\{x\in S^{n+1}(1)~|~\langle P_0x, x\rangle=\langle P_1x, x\rangle=\cdots=\langle P_mx, x\rangle=0\}.$$
Define a map:
\begin{eqnarray*}
&&h_+: M_+\rightarrow S^{n+1}(1) \\
&&\qquad\quad x\mapsto \cos t~ x+\sin t~ Px
\end{eqnarray*}
where $\cos t=\sqrt{\frac{1}{2}(1+\sqrt{\frac{1+c_0}{2}})}$, $\sin
t=\sqrt{\frac{1}{2}(1-\sqrt{\frac{1+c_0}{2}})}$. It is easy to show
that $$\langle Ph_+(x), h_+(x)\rangle=\sqrt{\frac{1-c_0}{2}},~
\emph{i.e.} ~~~~h_+(x)\in N_+.$$ Thus the image of $h_+$ is
contained in $N_+$. On the other hand, define another map:
\begin{eqnarray*}
&&j_+: N_+\rightarrow M_+ \\
&&\qquad\quad x\mapsto \cos t~ x+\sin t~ \xi(x)
\end{eqnarray*}
with the same values of $\cos t$ and $\sin t$, and
$\xi=\frac{\overline{\nabla}f}{|\overline{\nabla}f|}.$ Evidently,
$j_+$ is well defined and is just the inverse function of $h_+$.
This means that the focal submanifold $N_+$ of $\varphi_2$ on $M^n$
is diffeomorphic to the focal submanifold $M_+$ of $f$ on
$S^{n+1}(1)$.
We conclude the proof by investigating the mean curvatures of the
level hypersurfaces $N_t:=\varphi_2^{-1}(t)$, $t\in
(-\sqrt{\frac{1-c_0}{2}}, \sqrt{\frac{1-c_0}{2}})$. Following the
formula of the mean curvature $h(t)$ (cf. \cite{GT2}), we
have:
\begin{equation}\label{mean curvature}
h(t) = \frac{b^{\prime}(t)-2a(t)}{2\sqrt{b(t)}}
= \frac{n-\frac{4}{1-c_0}}{\sqrt{1-\frac{2t^2}{1-c_0}}}~t
\end{equation}
Obviously, the isoparametric hypersurface $N_0=\varphi_2^{-1}(0)$ is
minimal in $M^n$. In addition, the minimality of $M^n$ implies:
$$c_0=\frac{m_--m_+}{m_-+m_+}=\frac{l-2m-1}{l-1},\quad n=2l-2,$$
then we obtain that
$$n-\frac{4}{1-c_0}=0 \Leftrightarrow m=1 ~(~the~ improper~ case~ (\mathrm{cf}.~ [GT2])).$$
In conclusion, in the improper case, all the level hypersurfaces of $\varphi_2$ are minimal.
The same argument applies to $N_-$ with a little change of the
values:
$$\cos t=\sqrt{\frac{1}{2}(1+\sqrt{\frac{1+c_0}{2}})}, \quad \sin t=-\sqrt{\frac{1}{2}(1-\sqrt{\frac{1+c_0}{2}})}.$$
$\Box$
The proof of Theorem \ref{thm1} is now complete.
\section{counterexamples on $M_-$}
\noindent \textbf{\emph{Proof of Theorem \ref{thm2}.}} Implementing
the previous arguments in Section $2$, it is not difficult to find
that $\omega_2$ on $M_-$ is an eigenfunction corresponding to the
eigenvalue $\dim M_-=l+m-1$, and the number of its critical points
is $\frac{2\pi}{2\pi/g}=g=4$ (cf. \cite{CR}). Therefore, in
order to complete the proof of Theorem \ref{thm2}, we need only to
confirm that $\omega_1$ is an isoparametric function on $M_-$ and
prove Lemma \ref{V+V-}.
Firstly, noticing the Euclidean gradient $\widetilde{\nabla}\omega_1$ can be expressed by
$$\widetilde{\nabla}\omega_1=2Px=2\langle Px, x\rangle x+2\Big(Px-\langle Px, x\rangle x\Big),$$
we claim that
\noindent
\textbf{Claim}: \emph{$y:=Px-\langle Px, x\rangle x\in T_xM_-.$}
Holding this claim, it follows that $\nabla \omega_1=2y=2(Px-\langle
Px, x\rangle x)$. Then a simple calculation leads to
\begin{equation}\label{isoparametric omega1}
\left\{\begin{array}{ll}
|\nabla \omega_1|^2=4(1-\omega_1^2) \\
\quad\triangle \omega_1=-4m\omega_1,
\end{array}\right.
\end{equation}
where the second equality is due to Solomon \cite{Sol}. Namely,
$\omega_1$ is an isoparametric function on $M_-$. Define the focal
submanifolds of $\omega_1$ by $V_{\pm}:=\{x\in M_-~|~\omega_1=\pm
1\}$. Then the critical set of $\omega_1$ is
$$C(\omega_1)=V_+\cup V_-.$$
\begin{rem}
The proof of $|\nabla \omega_1|^2=4(1-\omega_1^2)$ is recently used by \cite{QT} to
obtain a sequence of isoparametric functions (hypersurfaces).
\end{rem}
Now we are left to prove the previous Claim and Lemma \ref{V+V-}.
\noindent \textbf{\emph{Proof of \textbf{Claim}}}. Firstly, we
rewrite the focal submanifold
$$M_- := \{x\in S^{n+1}(1)~|~\displaystyle\sum_{\alpha=0}^{m}\langle P_{\alpha}x, x\rangle^2=1\}$$
as
\begin{eqnarray*}
M_- &=& \{x\in S^{n+1}(1)~|~x=\sum_{\alpha=0}^{m}\langle P_{\alpha}x, x\rangle P_{\alpha}x\}
\end{eqnarray*}
Define $\mathcal{P}:=\displaystyle\sum_{\alpha=0}^{m}\langle
P_{\alpha}x, x\rangle P_{\alpha}$, then for each $x\in M_-$ we have
\begin{equation}\label{Px=x}
\mathcal{P}\in \Sigma\quad and \quad\mathcal{P}x=x.
\end{equation}
Since $\mathcal{P}$ is an orthogonal symmetric matrix with vanishing trace,
we can decompose $\mathbb{R}^{2l}$ as
$$\mathbb{R}^{2l}=E_+(\mathcal{P})\oplus E_-(\mathcal{P}).$$
With respect to this decomposition, $2y\in\mathbb{R}^{2l}$ can be
written as
$$2y=(y+\mathcal{P} y)+(y-\mathcal{P}y).$$
Denoting $P=\displaystyle\sum_{\beta=0}^{m}a_{\beta}P_{\beta}$ with $\displaystyle\sum_{\beta=0}^{m}a_{\beta}^2=1$, we have
\begin{eqnarray*}
y+\mathcal{P} y &=& Px-\langle Px, x\rangle x + \mathcal{P}Px-\langle Px, x\rangle \mathcal{P}x\\
&=& P\mathcal{P}x + \mathcal{P}Px - 2\langle Px, x\rangle x\\
&=& \sum_{\beta=0}^{m}a_{\beta}P_{\beta}\Big(\sum_{\alpha=0}^{m}\langle P_{\alpha}x, x\rangle P_{\alpha}x\Big)+ \sum_{\alpha=0}^{m}\langle P_{\alpha}x, x\rangle P_{\alpha}\Big(\sum_{\beta=0}^{m}a_{\beta}P_{\beta}x\Big)- 2\langle Px, x\rangle x\\
&=& 2\sum_{\alpha=0}^{m}a_{\alpha}\langle P_{\alpha}x, x\rangle x-2 \sum_{\beta=0}^{m}a_{\beta}\langle P_{\beta}x, x \rangle x\\
&=& 0,
\end{eqnarray*}
which leaves $2y=y-\mathcal{P}y$, \emph{i.e.} $y\in E_-(\mathcal{P})$.
On the other hand, setting $y=Px-\langle Px, x\rangle x=Qx$, where
$$Q:=P-\langle Px, x\rangle\mathcal{P}\in \mathrm{Span}\{P_0, P_1,...,P_m\},$$
it is easy to find that
$$\langle Q, \mathcal{P}\rangle=0.$$
Comparing with (cf. Section 4.5(iii) of \cite{FKM})
\begin{equation*}
T_x^{\perp}M_-=\{\nu\in E_-(\mathcal{P})~|~\langle \nu, Qx\rangle=0,
~\forall~ \langle Q, \mathcal{P}\rangle=0\},
\end{equation*}
we get immediately the Claim.
$\Box$
Now we are in a position to prove Lemma \ref{V+V-}.
\noindent
\textbf{\emph{Proof of Lemma \ref{V+V-}.}}
Under an orthogonal transformation, we can express $P$ as
\begin{equation*}
P=T^t\left(
\begin{array}{cc}
I_l & 0 \\
0 & -I_l \\
\end{array}
\right)T,\quad with ~T^tT=I_{2l}.
\end{equation*}
Write $Tx=(z, w)\in \mathbb{R}^l\times \mathbb{R}^l$ for $x\in
S^{n+1}(1)$. The condition $\langle Px, x\rangle=1$ is equivalent to
\begin{equation*}
|z|^2-|w|^2=1,
\end{equation*}
which implies $|z|^2=1,~|w|^2=0$. On the other hand, we observe that
\begin{eqnarray*}
V_+&:=&\{x\in M_-~|~\langle Px,
x\rangle=1\}\\
&=&\{x\in S^{2l-1}~|~\langle Px, x\rangle=1\}.
\end{eqnarray*}
Thus we get an isometry
\begin{equation*}
V_+\underset{isom.}{\cong}S^{l-1}(1).
\end{equation*}
Similarly, \begin{equation*} V_-\underset{isom.}{\cong}S^{l-1}(1).
\end{equation*}
Now the proof of Theorem \ref{thm2} is complete.
\end{document}
|
\begin{document}
\title{On injective resolutions of local cohomology modules}
\author{Tony J. Puthenpurakal}
\date{\today}
\address{Department of Mathematics, Indian Institute of Technology Bombay, Powai, Mumbai 400 076, India}
\email{[email protected]}
\subseteqjclass{Primary 13D45; Secondary 13D02, 13H10 }
\keywords{local cohomology, injective resolutions}
\begin{abstract}
Let $K$ be a field of characteristic zero and let $R = K[X_1,\ldots,X_n]$. Let $I$ be an ideal in $R$ and let $M = H^i_I(R)$ be the $i^{th}$-local
cohomology module of $R$ with respect to $I$.
Let $c = \operatorname{injdim} M$. We prove that if $P$
is a prime ideal in $R$ with Bass number $\mathfrak{m} u_c(P,M) > 0$ then $P$ is a maximal ideal in $R$.
\end{abstract}
\mathfrak{m} aketitle
\section{Introduction}
Throughout this paper $R$ is a commutative Noetherian ring. If $M$ is an $R$-module and $Y$ be a locally closed subscheme of $\operatorname{Spec}(R)$, we denote by $H^i_Y(M)$ the $i^{th}$-local cohomology module of $M$ with support in $Y$. If $Y$ is closed in $\operatorname{Spec}(R)$ with defining ideal $I$ then $H^i_Y(M)$ is denoted by $H^i_I(M)$.
In a remarkable paper, \cite{HuSh}, Huneke and Sharp proved that if $R$ is a regular ring containing a field of characteristic $p > 0$, and $I$ is an ideal in $R$ then the local cohomology modules of $R$ with respect to \ $I$ have the following properties:
\begin{enumerate}[\rm(i)]
\item
$H^j_{\mathfrak{m} }(H^i_I(R))$ is injective, where $\mathfrak{m} $ is any maximal ideal of $R$.
\item
$\operatorname{injdim}_R H^i_I(R) \leq \dim \operatorname{Supp} H^i_I(R)$.
\item
The set of associated primes of $H^i_I(R)$ is finite.
\item
All the Bass numbers of $H^i_I(R)$ are finite.
\end{enumerate}
Here $\operatorname{injdim}_R H^i_I(R)$ denotes the injective dimension of $H^i_I(R)$. Also $\operatorname{Supp} M = \{ P \mathfrak{m} id M_P \mathfrak{m} athfrak{n} eq 0 \ \text{and $P$ is a prime in $R$}\}$ is the support of an $R$-module $M$. The $j^{th}$ Bass number of an $R$-module $M$ with respect to a prime ideal $P$ is defined as $\mathfrak{m} u_j(P,M) = \dim_{k(P)} \operatorname{Ext}^j_{R_P}(k(P), M_P)$ where $k(P)$ is the residue field of $R_P$.
In another remarkable paper, for regular rings in characteristic zero, Lyubeznik was able to establish the above properties
for a considerably larger class of functors than just the local cohomology modules, see \cite{Lyu-1}. We call such functors as \textit{Lyubeznik functors}, see section two for details.
If $\mathcal{T}$ is a Lyubeznik functor on $Mod(R)$ then $\mathcal{T}(R)$ satisfies the following properties:
\begin{enumerate}[\rm(i)]
\item
$H^j_{\mathfrak{m} }(\mathcal{T}(R))$ is injective, where $\mathfrak{m} $ is any maximal ideal of $R$.
\item
$\operatorname{injdim}_R \mathcal{T}(R) \leq \dim \operatorname{Supp} \mathcal{T}(R)$.
\item
For every maximal ideal $\mathfrak{m} $, the number of associated primes of $\mathcal{T}(R)$ contained in $\mathfrak{m} $
is finite.
\item
All the Bass numbers of $\mathcal{T}(R)$ are finite.
\end{enumerate}
We should note that if $R = K[X_1,\ldots,X_n]$ then the number of associate primes of $\mathcal{T}(R)$ is finite.
The results of Lyubeznik for characteristic zero raised the question of whether the results (i)–-(iv) of Huneke and Sharp (in characteristic $p>0$) could be extended to this larger class of functors. In \cite{Lyu-2}, Lyubeznik proves it.
If $M$ is a finitely generated module over a Cohen-Macaulay \ ring $R$ and say $M$ has finite injective dimension $d = \dim R$, then it is elementary to prove that if $\mathfrak{m} u_d(P,M) > 0$ then $P$ is a maximal ideal in $R$, use \cite[3.1.13]{BH}. This fails for modules which are not finitely generated, for instance consider the injective hull $E(R/P)$ of $R/P$ where $P$ is a prime ideal which is not maximal.
Our main result is
\begin{theorem}\label{main}
Let $K$ be a field of characteristic zero and let $R = K[X_1,\ldots,X_n]$. Let $\mathcal{T}$ be a Lyubeznik functor on $Mod(R)$. Suppose $ \operatorname{injdim} \mathcal{T}(R) = c$. If $P$ is a prime ideal in $R$ with Bass number $\mathfrak{m} u_c(P, \mathcal{T}(R)) > 0 $ then $P$ is a maximal ideal of $R$.
\end{theorem}
As an aside we note that to best of our knowledge this is the first result whose proof uses the fact that $\mathfrak{a} ss \mathcal{T}(R)$ is finite for any Lyubeznik functor $\mathcal{T}$.
A natural question is what can we say about $\mathfrak{m} u_c(\mathfrak{m} ,\mathcal{T}(R))$ as $\mathfrak{m} $ varies over maximal ideals in $R$.
Our next result is essentially only an observation.
\begin{proposition}\label{main-2}
Let $K$ be an algebraically closed field of characteristic zero and let $R = K[X_1,\ldots,X_n]$. Let $\mathcal{T}$ be a Lyubeznik functor on $Mod(R)$. Suppose $ \operatorname{injdim} \mathcal{T}(R) = c$. Then for all $i = 0,\ldots, c$; the set
$$\{ \mathfrak{m} u_i(\mathfrak{m} , \mathcal{T}(R)) \mathfrak{m} id \mathfrak{m} \ \text{a maximal ideal of }\ R \}$$
is bounded.
\end{proposition}
The surprising thing about Proposition \ref{main-2} is that I do not know whether such a result holds for finitely generated modules over $R$.
A natural question is whether the results \ref{main} and \ref{main-2} hold in characteristic $p > 0$. Although we expect this to be true; our techniques do not work in positive characteristic. We are only able to extend Propostion \ref{main-2} to a subclass of Lyubeznik functors, see \ref{char-p}.
We now describe in brief the contents of this paper. In section two we define Lyubeznik functors and also a few preliminary results on holonomic modules which we need. In section three we discuss two Lemmas which will help in proving Theorem \ref{main}. In section four we prove Theorem \ref{main}. Finally in section five we prove Proposition \ref{main-2}.
\section{Preliminaries}
In this section we define Lyubeznik functors. We also prove a result on holonomic modules which we need.
\s \textit{Lyubeznik functors:}\\
Let $R$ be a commutative Noetherian ring and let $X = \operatorname{Spec}(R)$. Let $Y$ be a locally closed subset of $X$. If $M$ is an $R$-module and $Y$ be a locally closed subscheme of $\operatorname{Spec}(R)$, we denote by $H^i_Y(M)$ the $i^{th}$-local cohomology module of $M$ with support in $Y$. Suppose
$Y = Y_1 \setminus Y_2$ where $Y_2 \subseteqseteq Y_1$ are two closed subsets of $X$ then we have an exact sequence of functors
\[
\cdots \rightarrow H^i_{Y_1}(-) \rightarrow H^i_{Y_2}(-) \rightarrow H^i_Y(-) \rightarrow H^{i+1}_{Y_1}(-) \rightarrow .
\]
A Lyubeznik functor $\mathcal{T}$ is any functor of the form $\mathcal{T} = \mathcal{T}_1\circ \mathcal{T}_2 \circ \cdots \circ \mathcal{T}_m$ where every functor $\mathcal{T}_j$ is either $H^i_Y(-)$ for some locally closed subset of $X$ or the kernel, image or
cokernel of some arrow in the previous long exact sequence for closed
subsets $Y_1,Y_2$ of $X$ such that $Y_2 \subseteqseteq Y_1$.
We need the following result from \cite[3.1]{Lyu-1}.
\begin{proposition}\label{flat-L}
Let $\phi \colon R \rightarrow S$ be a flat homomorphism of Noetherian rings. Let $\mathcal{T}$ be a
Lyubeznik functor on $Mod(R)$. Then there exists a Lyubeznik functor $\widehat{\mathcal{T}}$ on $Mod(S)$ and isomorphisms $\widehat{\mathcal{T}}(M\otimes_R S) \cong \mathcal{T}(M)\otimes_R S$ which is functorial in $M$.
\end{proposition}
\s \textit{Lyubeznik functors and holonomicity:}\\
Let $K$ be a field of characteristic zero. Let $S = K[[X_1,\ldots,X_n]]$. Let $D$ be the ring of $K$-linear differential operators on $S$. Let $\mathcal{T} $ be a Lyubeznik functor on $Mod(S)$. If $M$ is any holonomic $D$-module then $\mathcal{T}(M)$ is a holonomic $D$-module; see \cite[2.2d]{Lyu-1}. In particular $\mathcal{T}(S)$ is a holonomic $D$-module.
Let $R = K[X_1,\ldots,X_n]$ and let $A_n(K)$ be the $n^{th}$-Weyl algebra over $K$.
Let $\mathcal{T}$ be a Lyubeznik functor on $Mod(R)$. If $M$ is any holonomic $A_n(K)$-module then $\mathcal{T}(M)$ is a holonomic $A_n(K)$-module; (the proof in \cite[2.2d]{Lyu-1} can be modified to prove this result). In particular $\mathcal{T}(R)$ is a holonomic $A_n(K)$-module.
\begin{remark}
In \cite{B} holonomic $A_n(K)$-modules are called modules belonging to the Bernstein class.
\end{remark}
\s\label{d-mod} Let $k$ be a field of characteristic zero and let $S = k[[Y_1,\ldots,Y_n]]$. Let $D$ be the ring of $k$-linear differential operators on $S$. Let $C$ be a simple holonomic $D$-module. Notice $\mathfrak{a} ss_S C = \{ P \}$ for some prime $P$ in $S$. Also $C$ is $P$-torsion; see \cite[3.3.16-17]{B}. It follows from \cite[p.\ 109, lines 3-6]{B} that there exists $h \in (S/P)$ non-zero such that $\operatorname{Hom}_S(S/P, C)_h$ is a finitely generated $(S/P)_h$ module. Let $g$ be a pre-image of $h$ in $S$. Then clearly
$\operatorname{Hom}_S(S/P,C)_g$ is a finitely generated $S_g$-module. We now generalize this result.
\begin{proposition}\label{fin-gen}
(with hypotheses as in \ref{d-mod}) Let $M$ be a holonomic $D$-module. Assume $\mathfrak{a} ss_S M = \{ P \}$ and $M$ is $P$-torsion. Then there exists $h \in S\setminus P$ such that $\operatorname{Hom}_S(S/P, M)_h$ is finitely generated as a $S_h$-module.
\end{proposition}
\begin{proof}
Let $ 0 = M_0 \varsubsetneq M_1\varsubsetneq M_2\varsubsetneq \cdots \varsubsetneq M_{n-1} \varsubsetneq M_n = M$ be a filtration of $M$ with $M_i/M_{i-1}$ simple $D$-module for $i = 1,\ldots,n$. By induction on $i$ we prove that there exists $h_i \in S\setminus P$ such that $\operatorname{Hom}_S(S/P, M_i)_{h_i}$ is finitely generated as a $S_{h_i}$-module.
For $i = 1$ note that $M_1$ is a simple holonomic $D$-module. Also $\mathfrak{a} ss_S M_1 \subseteqseteq \mathfrak{a} ss_S M = \{ P \}$. Then by \ref{d-mod} we get the required assertion.
We assume the result for $i = r$ and prove it for $i = r + 1$.
Say $\operatorname{Hom}_S(S/P, M_r)_{h_r}$ is a finitely generated $S_{h_r}$-module. We consider the following two cases.
Case 1 : $\mathfrak{a} ss_S M_{r+1}/M_r = \{ P \}$. \\ By \ref{d-mod} there exists $g_r \in S \setminus P$ such that $\operatorname{Hom}_S(S/P, M_{r+1}/M_r)_{g_r}$ is finitely generated $S_{g_r}$-module.
Consider the exact sequence
\[
0 \rightarrow \operatorname{Hom}_S(S/P, M_r) \rightarrow \operatorname{Hom}_S(S/P, M_{r+1}) \rightarrow \operatorname{Hom}_S(S/P, M_{r+1}/M_r).
\]
Localize at $h_{r+1} = h_rg_r \in S \setminus P$. Notice
\begin{enumerate}
\item
$\operatorname{Hom}_S(S/P, M_r)_{h_{r+1}} = \left(\operatorname{Hom}_S(S/P,M_r)_{h_r}\right)_{g_r}$ is finitely generated as a $S_{h_{r+1}}$-module.
\item
$\operatorname{Hom}_S(S/P, M_{r+1}/M_r)_{h_{r+1}} = \left( \operatorname{Hom}_S(S/P, M_{r+1}/M_r)_{g_r} \right)_{h_r}$ is finitely generated as a $S_{h_{r+1}}$-module.
\end{enumerate}
It follows that $\operatorname{Hom}_S(S/P, M_{r+1})_{h_{r+1}}$ is finitely generated as a $S_{h_{r+1}}$-module.
Case 2: $\mathfrak{a} ss_S M_{r+1}/M_r = \{ Q \}$ with $Q \mathfrak{m} athfrak{n} eq P$. \\ As $M$ is $P$-torsion we have that $Q\supsetneq P$. Take $g \in Q \setminus P$. Then $(M_{r+1}/M_r)_g = 0$. So $\operatorname{Hom}_S(S/P, M_{r+1}/M_r)_g = 0$. Put $h_{r+1} = h_rg \in S \setminus P$. Then note that
\[
\operatorname{Hom}_S(S/P, M_{r+1})_{h_{r+1}} \cong \operatorname{Hom}_S(S/P, M_r)_{h_{r+1}} = \left(\operatorname{Hom}_S(S/P,M_r)_{h_r}\right)_{g},
\]
is finitely generated as a $S_{h_{r+1}}$-module.
Thus by induction we get that there exists $h \in S \setminus P$ such that $\operatorname{Hom}_S(S/P, M)_h$ is finitely generated as a $S_h$-module.
\end{proof}
\s Finally we need the following well-known result regarding non-singular locus of affine domains.
\begin{theorem}\label{locus}
Let $A$ be an affine domain, finitely generated over a perfect field $k$. Then
\begin{enumerate}[\rm (1)]
\item
The non-singular locus of $A$ is non-empty and an open subset of $\operatorname{Spec}(A)$.
\item
There exists a maximal ideal $\mathfrak{m} $ of $A$ with $A_\mathfrak{m} $ regular local.
\item
If $\dim A \mathfrak{gr} eq 1$ then there exists infinitely many maximal ideals of $A$ with $A_\mathfrak{m} $ regular local.
\item
Suppose $\dim A \mathfrak{gr} eq 2$ and let $f \in A$. Then there exists a maximal ideal $\mathfrak{m} $ of $A$ with $f \mathfrak{m} athfrak{n} otin \mathfrak{m} $ and $A_\mathfrak{m} $-regular local.
\end{enumerate}
\end{theorem}
\section{Two Lemma's}
In this section we establish two lemma's which will enable us to prove our main result. Let $K$ be a field of characteristic zero and let $P$ be a prime ideal of height in $R = K[X_1,\ldots,X_n]$. Let $E(R/P)$ denote the injective hull of $R/P$.
Recall that $E(R/P) = H^g_P(R)_P$. It follows that $E(R/P)$ is a $A_n(K)$-module and the natural inclusion $H^g_P(R) \rightarrow E(R/P)$ is $A_n(K)$-linear.
\begin{lemma}\label{lemma1} Let $K$ be a field of characteristic zero and let $P$ be a prime ideal of height $n-1$ in $R = K[X_1,\ldots,X_n]$. Then $E(R/P)$ is not a holonomic $A_n(K)$-module.
\end{lemma}
\begin{proof}
Suppose if possible $E(R/P)$ is a holonomic $A_n(K)$-module. We have an exact sequence of $A_n(K)$-modules
\[
0 \rightarrow H^{n-1}_P(R) \rightarrow E(R/P) \rightarrow C \rightarrow 0.
\]
As $E(R/P)$ is holonomic we have that $C$ is also a holonomic $A_n(K)$-module. Notice $C_P = 0$. It follows that $C$ is supported at only finitely many maximal ideals of $R$, say $\mathfrak{m} _1,\ldots,\mathfrak{m} _r$. By Theorem \ref{locus}(3) there exists a maximal ideal $\mathfrak{m} $ of $R$ such that $\mathfrak{m} \mathfrak{m} athfrak{n} eq \mathfrak{m} _i$ for all $i$ and $(R/P)_\mathfrak{m} $ is regular local.
Note $H^{n-1}_P(R)_\mathfrak{m} = E(R/P)_\mathfrak{m} $ as $C_\mathfrak{m} = 0$. If $\mathfrak{m} R_\mathfrak{m} = (z_1,\ldots,z_n)$ then as $R_\mathfrak{m} /P R_\mathfrak{m} $ is regular we may assume that $P R_\mathfrak{m} = (z_1,\ldots,z_{n-1})$. In particular $H^n_{PR_\mathfrak{m} }(R_\mathfrak{m} ) = 0$.
Let $f \in \mathfrak{m} R_\mathfrak{m} \setminus P R_\mathfrak{m} $. Note that we have an exact sequence
\[
0 \rightarrow H^{n-1}_{PR_\mathfrak{m} }(R_\mathfrak{m} ) \rightarrow H^{n-1}_{PR_\mathfrak{m} }(R_\mathfrak{m} )_f \rightarrow H^n_{(PR_\mathfrak{m} ,f)}(R_\mathfrak{m} ) \rightarrow H^n_{PR_\mathfrak{m} }(R_\mathfrak{m} ) = 0
\]
As $H^{n-1}_{PR_\mathfrak{m} }(R_\mathfrak{m} ) = H^{n-1}_P(R)_\mathfrak{m} = E(R/P)_\mathfrak{m} $ it follows that the first map in the above exact sequence
is an isomorphism. It follows that $H^n_{(PR_\mathfrak{m} ,f)}(R_\mathfrak{m} ) = 0$. This contradicts Grothendieck's non-vanishing theorem as $\sqrt{(PR_\mathfrak{m} , f)} = \mathfrak{m} R_\mathfrak{m} $.
\end{proof}
Our next result is
\begin{lemma}\label{lemma2}
Let $K$ be a field of characteristic zero and let $P$ be a height $g$ prime in $R = K[X_1,\ldots,X_n]$ with $g \leq n-2$. Suppose $\mathfrak{m} $ is a maximal ideal in $R$ with $(R/P)_\mathfrak{m} $ a regular local ring. Let $\mathcal{T}$ be a Lyubeznik functor on $Mod(R_\mathfrak{m} )$. Then $\mathcal{T}(R_\mathfrak{m} ) \mathfrak{m} athfrak{n} eq E(R/P)_\mathfrak{m} ^c$ for any $c > 0$.
\end{lemma}
\begin{proof}
Suppose if possible $\mathcal{T}(R_\mathfrak{m} ) = E(R/P)_\mathfrak{m} ^c$ for some $c > 0$. Let $\widehat{R_\mathfrak{m} }$ be the completion of $R_\mathfrak{m} $ at $\mathfrak{m} R_\mathfrak{m} $. Note $\widehat{R_\mathfrak{m} } = K^\prime ime[[Z_1,\ldots,Z_n]]$ where $K^\prime ime \cong R/\mathfrak{m} $. Let $D$ be the ring of $K^\prime ime$-linear differential operators on $\widehat{R_\mathfrak{m} }$. Note by \ref{flat-L} there exists a Lyubeznik functor $\widehat{\mathcal{T}}$ on $Mod(\widehat{R_\mathfrak{m} })$ such that $ \widehat{\mathcal{T}}( \widehat{R_\mathfrak{m} }) = \mathcal{T}(R_\mathfrak{m} )\otimes \widehat{R_\mathfrak{m} }$. In particular $E(R/P)_\mathfrak{m} ^c \otimes \widehat{R_\mathfrak{m} }$ is a holonomic $D$-module. So $V = E(R/P)_\mathfrak{m} \otimes \widehat{R_\mathfrak{m} }$ is a holonomic $D$-module.
As $(R/P)_\mathfrak{m} $ is regular local we may assume that $PR_\mathfrak{m} = (Z_1,\ldots,Z_g)$. Note $n \mathfrak{gr} eq g + 2$. In particular we have that $P\widehat{R_\mathfrak{m} }$ is a prime ideal in $\widehat{R_\mathfrak{m} }$. Notice $V$ is $P\widehat{R_\mathfrak{m} }$-torsion. Furthermore $\mathfrak{a} ss V = \{ P\widehat{R_\mathfrak{m} } \}$. Using Proposition \ref{fin-gen} we get that there exists $h \in \widehat{R_\mathfrak{m} } \setminus P\widehat{R_\mathfrak{m} }$
such that $\operatorname{Hom}(\widehat{R_\mathfrak{m} }/P\widehat{R_\mathfrak{m} }, V)_h$ is a finitely generated $(\widehat{R_\mathfrak{m} })_h$-module.
Notice $\operatorname{Hom}_{R_\mathfrak{m} }(R_\mathfrak{m} /PR_\mathfrak{m} , E(R/P)_\mathfrak{m} ) = k(P)$ where $k(P)$ is the quotient field of $R_\mathfrak{m} /P R_\mathfrak{m} $. It follows that
\[
\operatorname{Hom}(\widehat{R_\mathfrak{m} }/P\widehat{R_\mathfrak{m} }, V) = \operatorname{Hom}_{R_\mathfrak{m} }(R_\mathfrak{m} /PR_\mathfrak{m} , E(R/P)_\mathfrak{m} ) \otimes \widehat{R_\mathfrak{m} }
= k(P)\otimes \widehat{R_\mathfrak{m} }.
\]
For $\lambda \in K$ let $\mathfrak{q} _\lambda = (Z_1,\ldots,Z_g, Z_{g+1} + \lambda Z_{g+2})$. Clearly $\mathfrak{q} _\lambda $ is a prime ideal of height $g+1$ in $R_\mathfrak{m} $ containing $PR_\mathfrak{m} $. Furthermore we have that $\mathfrak{q} _\lambda \widehat{R_\mathfrak{m} }$ is a prime ideal in $\widehat{R_\mathfrak{m} }$. If $\lambda_1 \mathfrak{m} athfrak{n} eq \lambda_2$ then it is easy to show that $\mathfrak{q} _{\lambda_1} \mathfrak{m} athfrak{n} eq \mathfrak{q} _{\lambda_2}$.
Now consider $\overline{h}$, the image of $h$ in $\widehat{R_\mathfrak{m} }/P\widehat{R_\mathfrak{m} }$. By considering a primary decomposition of $(\overlineerline{h})$ it follows that infinitely many $\mathfrak{q} _\lambda \widehat{R_\mathfrak{m} }$ do not contain $h$. Choose one such $\lambda$. Thus we have that
$\operatorname{Hom}(\widehat{R_\mathfrak{m} }/P\widehat{R_\mathfrak{m} }, V)_{\mathfrak{q} _\lambda \widehat{R_\mathfrak{m} }} $ is a finitely generated
$(\widehat{R_\mathfrak{m} })_{\mathfrak{q} _\lambda \widehat{R_\mathfrak{m} }}$-module.
Notice we have a flat local map
$(R_\mathfrak{m} )_{\mathfrak{q} _\lambda} \rightarrow (\widehat{R_\mathfrak{m} })_{\mathfrak{q} _\lambda \widehat{R_\mathfrak{m} }}$.
Furthermore note that
\begin{align*}
\operatorname{Hom}(\widehat{R_\mathfrak{m} }/P\widehat{R_\mathfrak{m} }, V)_{\mathfrak{q} _\lambda \widehat{R_\mathfrak{m} }} &= k(P)\otimes_{R_\mathfrak{m} }\widehat{R_\mathfrak{m} } \otimes_{\widehat{R_\mathfrak{m} }} (\widehat{R_\mathfrak{m} })_{\mathfrak{q} _\lambda \widehat{R_\mathfrak{m} }}, \\
&= k(P)\otimes_{R_\mathfrak{m} } (\widehat{R_\mathfrak{m} })_{\mathfrak{q} _\lambda \widehat{R_\mathfrak{m} }}, \\
&= k(P)\otimes_{R_\mathfrak{m} } (R_\mathfrak{m} )_{\mathfrak{q} _\lambda} \otimes_{(R_\mathfrak{m} )_{\mathfrak{q} _\lambda}}(\widehat{R_\mathfrak{m} })_{\mathfrak{q} _\lambda \widehat{R_\mathfrak{m} }}, \\
&= k(P) \otimes_{(R_\mathfrak{m} )_{\mathfrak{q} _\lambda}}(\widehat{R_\mathfrak{m} })_{\mathfrak{q} _\lambda \widehat{R_\mathfrak{m} }}.
\end{align*}
In the last equation we have used that $k(P)_{\mathfrak{q} _\lambda} = k(P)$. By Proposition \ref{flat-inf-gen} we get that $k(P)$ is a finitely generated $(R_\mathfrak{m} )_{\mathfrak{q} _\lambda}$-module. This is a contradiction as $P(R_\mathfrak{m} )_{\mathfrak{q} _\lambda}$ is a non-maximal prime ideal in $(R_\mathfrak{m} )_{\mathfrak{q} _\lambda}$.
\end{proof}
We need the following result in the proof of Lemma \ref{lemma2}.
\begin{proposition}\label{flat-inf-gen}
Let $\phi \colon A \rightarrow B$ be a flat local map of Noetherian local rings. Let $L$ be an $A$-module.
Then $L$ is finitely generated as a $A$-module if and only if $L \otimes_A B$ is finitely generated as a $B$-module.
\end{proposition}
\begin{proof}
If $L$ is finitely generated as a $A$-module then clearly $L\otimes_A B$ is finitely $B$-module.
Suppose now that $L$ is not a finitely generated $A$-module. Let
$$ L_1 \varsubsetneq L_2 \varsubsetneq \cdots \varsubsetneq L_n \varsubsetneq L_{n+1} \varsubsetneq \cdots $$
be a strictly ascending chain of submodules in $L$.
By faithful flatness we have that
$$ L_1\otimes B \varsubsetneq L_2\otimes B\varsubsetneq \cdots \varsubsetneq L_n\otimes B \varsubsetneq L_{n+1} \otimes B \varsubsetneq \cdots $$
is a strictly ascending chain of submodules of $L\otimes B$. It follows that $L\otimes B$ is not finitely generated.
\end{proof}
\section{Proof of the Theorem \ref{main}}
In this section we prove our main result. We need the following easily proved fact.
\begin{proposition}\label{local}
Let $A$ be a Noetherian ring
and let $T$ be an $A$-module. Let $f \in A$.
Then the natural map
$$\eta \colon T \rightarrow T_f \ \text{ is injective if and only if } \ f \mathfrak{m} athfrak{n} otin \bigcup_{P \in \mathfrak{a} ss T}P. $$
\end{proposition}
We now give
\begin{proof}[Proof of Theorem \ref{main}]
Set $M = \mathcal{T}(R)$. We prove that if $P$ is a prime ideal in $R$ and not maximal then $\mathfrak{m} u_c(P, M) = 0$. Notice $\mathfrak{m} u_c(P, M) = \mathfrak{m} u_0(P, H^c_P(M))$, see \cite[1.4, 3.4(b)]{Lyu-1}. We consider two cases.
\textit{Case 1:} $\operatorname{height} P = n-1$.\\ Suppose if possible $\mathfrak{m} u_0(P, H^c_P(M)) \mathfrak{m} athfrak{n} eq 0$. Notice then $P$ is a minimal prime of $H^c_P(M)$. So if $\mathfrak{q} \in \mathfrak{a} ss_R H^c_P(M)$ and $\mathfrak{q} \mathfrak{m} athfrak{n} eq P$ then $\mathfrak{q} $ is a maximal ideal of $R$. In this case $\Gamma_\mathfrak{q} (H^c_P(M)) = E(R/\mathfrak{q} )^r$ for some $r > 0$.
Since $\mathfrak{a} ss H^c_P(M)$ is a finite set we can write $H^c_P(M) = L \oplus I$ \textit{as $R$-modules }where $\mathfrak{a} ss_R L = \{ P \}$ and $I = E(R/\mathfrak{m} _1)^{r_1}\oplus E(R/\mathfrak{m} _2)^{r_2}\oplus \cdots \oplus E(R/\mathfrak{m} _s)^{r_s}$ for some maximal ideals $\mathfrak{m} _1,\ldots,\mathfrak{m} _s$ and finite numbers $r_1,\ldots,r_s$.
Thus $I$ is an injective $R$-module. Also note that both $L$ and $I$ are $P$-torsion. Further note that $I = \Gamma_{\mathfrak{m} _1\mathfrak{m} _2\cdots \mathfrak{m} _s}(H^c_P(M))$ is a $A_n(K)$-submodule of $H^c_P(M)$ and so $L \cong H^c_P(M)/I$ is a holonomic $A_n(K)$-module.
Let $f \in R\setminus P$. Recall $\operatorname{injdim} M = c$. We have an exact sequence
\[
H^c_{(P,f)}(M) \rightarrow H^c_P(M) \rightarrow H^c_P(M)_f \rightarrow H^{c+1}_{(P,f)}(M) = 0.
\]
Thus the natural map $\eta \colon L \rightarrow L_f$ is surjective. As $f \mathfrak{m} athfrak{n} otin P$ and $\mathfrak{a} ss L = \{ P \}$ we get that $\eta $ is also injective. Thus $L = L_f$ for every $f \in R\setminus P$. It follows that $L = L_P$. Also note that $L_P = H^c_P(M)_P$. By \cite[3.4(b)]{Lyu-1}, $L_P = E(R/P)^l$ for some finite $l > 0$. Thus we have that $E(R/P)$ is a holonomic $A_n(K)$-module. By \ref{lemma1} this is a contradiction.
\textit{Case 2:} $\operatorname{height} P \leq n-2$. \\ Suppose if possible $\mathfrak{m} u_0(P, H^c_P(M)) \mathfrak{m} athfrak{n} eq 0$. Let $\mathfrak{a} ss H^c_P(M) = \{ P, Q_1,\ldots Q_c \}$ where $Q_i \mathfrak{m} athfrak{n} eq P$. As $H^c_P(M)$ is $P$-torsion we have that $Q_i \supsetneq P$ for all $i$. Let $f_i \in Q_i \setminus P$. Put $f = f_1\cdots f_c$. By Theorem \ref{locus}(4) there exists a maximal ideal $\mathfrak{m} $ of $R$ such that $f \mathfrak{m} athfrak{n} otin \mathfrak{m} $ and $(R/P)_\mathfrak{m} $ is regular local. Localize at $\mathfrak{m} $. Notice $\mathfrak{a} ss_{R_\mathfrak{m} } H^c_P(M)_\mathfrak{m} = \{ PR_\mathfrak{m} \}$.
Let $g \in R_\mathfrak{m} \setminus PR_\mathfrak{m} $. Notice $\operatorname{injdim}_{R_\mathfrak{m} } M_\mathfrak{m} \leq c$. So we have an exact sequence
\[
H^c_{(PR_\mathfrak{m} , g)}(M_\mathfrak{m} ) \rightarrow H^c_{PR_\mathfrak{m} }(M_\mathfrak{m} ) \rightarrow H^c_{PR_\mathfrak{m} }(M_\mathfrak{m} )_g \rightarrow H^{c+1}_{(PR_\mathfrak{m} , g)}(M_\mathfrak{m} ) = 0.
\]
Thus the natural map $\eta \colon H^c_{PR_\mathfrak{m} }(M_\mathfrak{m} ) \rightarrow H^c_{PR_\mathfrak{m} }(M_\mathfrak{m} )_g$ is surjective. By Lemma \ref{local} it is also injective as $\mathfrak{a} ss H^c_{PR_\mathfrak{m} }(M_\mathfrak{m} ) = \{ PR_\mathfrak{m} \}$. It follows that $H^c_{PR_\mathfrak{m} }(M_\mathfrak{m} ) = H^c_{PR_\mathfrak{m} }(M_\mathfrak{m} )_g$.
So $H^c_{PR_\mathfrak{m} }(M_\mathfrak{m} )= H^c_{PR_\mathfrak{m} }(M_\mathfrak{m} )_{PR_\mathfrak{m} }$. By \cite[1.4, 3.4(b)]{Lyu-1}, we get that $H^c_{PR_\mathfrak{m} }(M_\mathfrak{m} )_P \cong E(R_\mathfrak{m} /PR_\mathfrak{m} )^s$ for some finite $s > 0$.
By \ref{flat-L} there exist a Lyubeznik functor $\mathcal{T}^\prime ime$ on $Mod(R_\mathfrak{m} )$ with
$\mathcal{T}^\prime ime(R_\mathfrak{m} ) = \mathcal{T}(R)\otimes R_\mathfrak{m} = M_\mathfrak{m} $. Observe that
$\mathcal{G} = H^c_{PR_\mathfrak{m} }\circ \mathcal{T}^\prime ime$ is a Lyubeznik functor on $R_\mathfrak{m} $. We have $\mathcal{G}(R_\mathfrak{m} ) = E(R_\mathfrak{m} /PR_\mathfrak{m} )^s$. This contradicts Lemma \ref{lemma2}.
\end{proof}
\section{Proof of Proposition \ref{main-2}}
In this section we prove Proposition \ref{main-2}. Throughout $K$ is an algebraically closed field of characteristic zero. Let $R = K[X_1,\ldots, X_n]$ and let $A_n(K)$ be the $n^{th}$-Weyl algebra over $K$. We use notions developed in \cite[Chapter 1]{B}, in particular we use the notion of Bernstein filtration of $A_n(K)$, good filtration, multiplicity and dimension of a finitely generated $A_n(K)$-module.
We will use the fact that for any holonmic module $M$ we have $\ell(M) \leq e(M)$; here $\ell(M)$ denotes the length of $M$ as an $A_n(K)$-module and $e(M)$ denotes its multiplicity.
The following result is well-known. So we just sketch an argument.
\begin{proposition}\label{mult-inj}
Let $\mathfrak{m} $ be a maximal ideal of $R$. Then $e(E(R/\mathfrak{m} )) = 1$. In particular $E(R/\mathfrak{m} )$ is a simple $A_n(K)$-module.
\end{proposition}
\begin{proof}(Sketch)
As $K$ is algebraically closed $\mathfrak{m} = (X_1-a_1,\ldots,X_n-a_n)$ for some $a_1,\ldots,a_n \in K$. After a change of variables we may assume $a_1= \cdots = a_n = 0$.
Note $E(R/\mathfrak{m} ) = K[\partial_1,\ldots,\partial_n]$. The obvious filtration on $E(R/\mathfrak{m} )$ is compatible with the Bernstein filtration and is good. So $e(E(R/\mathfrak{m} )) = 1$.
\end{proof}
\s\label{bound} Let $M$ be a holonomic $A_n(K)$-module. Let $f \in R$ be a polynomial of degree $d$. Then by proof of Theorem 5.19 in Chapter 1 of \cite{B} we have
$$e(M_f) \leq e(M)(1+ \deg f )^n.$$
We now give
\begin{proof}[Proof of Proposition \ref{main-2}]
Set $M = \mathcal{T}(R)$. Let $\mathfrak{m} = (X_1-a_1,\ldots,X_n-a_n)$ be a maximal ideal of $R$. Fix $i$ with $0 \leq i \leq c$. Notice $\mathfrak{m} u_i(\mathfrak{m} , M) = \mathfrak{m} u_0(\mathfrak{m} , H^i_\mathfrak{m} (M))$, see \cite[1.4, 3.4(b)]{Lyu-1}. If $H^i_\mathfrak{m} (M) = E(R/\mathfrak{m} )^{r_i}$ then $\mathfrak{m} u_i(\mathfrak{m} , M) = r_i = \ell(H^i_\mathfrak{m} (M))$.
To compute $H^i_\mathfrak{m} (M)$ we use the \v{C}ech-complex:
\[
\mathbf{C} \colon 0 \rightarrow M \rightarrow \bigoplus_{j = 1}^{n}M_{(X_j-a_j)} \rightarrow \cdots \rightarrow M_{(X_1-a_1)\cdots(X_n-a_n)} \rightarrow 0.
\]
In particular we have that $\ell(H^i_\mathfrak{m} (M)) \leq \ell(\mathbf{C} ^i)$. Notice $C^i$ has $\binom{n}{i}$ copies of modules of the form $M_f$ were $f$ is a product of $i$ distinct polynomials among $X_1-a_1,\cdots,X_n-a_n$. In particular $\deg f = i$. So by
\ref{bound} we have $e(M_f) \leq e(M)(1+ i)^n$. Thus
$$r_i \leq e(\mathbf{C} ^i) \leq \binom{n}{i}e(M)(1+ i)^n.$$
\end{proof}
\begin{remark}\label{char-p}
If $K_p$ is an algebraically closed field of characteristic $p$ and $S = K_p[X_1,\ldots,X_n]$ then Proposition \ref{main-2} holds for functors of the form
$$\mathcal{G}(-) = H^{i_1}_{I_1}(H^{i_2}_{I_2}(\cdots (H^{i_r}_{I_r}(-))\cdots ).$$
The point is that $\mathcal{G}(R)$ is holonomic $D$-module where $D$ is the ring of
$K_p$-linear differential operators over $S$. Here we use the notion of holonomicity by V. Bavula \cite{VB}. In this case
the bound $\ell(M_f) \leq n!\ell(M)(1+\deg f)^n$ holds, see \cite[Proof of 3.6]{Lyu-3}. The proof then follows by the same argument as before.
\end{remark}
\end{document}
\end{document}
|
\begin{document}
\title{Experimental realization of para-particle oscillators}
\author{C. Huerta Alderete}
\email[e-mail: ]{[email protected]}
\affiliation{Joint Quantum Institute, Department of Physics, University of Maryland, College Park, MD 20742, USA}
\author{Alaina M. Green}
\affiliation{Joint Quantum Institute, Department of Physics, University of Maryland, College Park, MD 20742, USA}
\author{Nhung H. Nguyen}
\affiliation{Joint Quantum Institute, Department of Physics, University of Maryland, College Park, MD 20742, USA}
\author{Yingyue Zhu}
\affiliation{Joint Quantum Institute, Department of Physics, University of Maryland, College Park, MD 20742, USA}
\author{B. M. Rodr\'iguez-Lara}
\affiliation{Tecnologico de Monterrey, Escuela de Ingenier\'ia y Ciencias, Ave. Eugenio Garza Sada 2501, Monterrey, N. L., Mexico, 64849}
\author{Norbert M. Linke}
\affiliation{Joint Quantum Institute, Department of Physics, University of Maryland, College Park, MD 20742, USA}
\date{\today}
\begin{abstract}
Para-particles are fascinating because they are neither bosons nor fermions.
While unlikely to be found in nature, they might represent accurate descriptions of physical phenomena like topological phases of matter.
We report the quantum simulation of para-particle oscillators by tailoring the native couplings of two orthogonal motional modes of a trapped ion.
Our system reproduces the dynamics of para-bosons and para-fermions of even order very accurately.
These results represent the first experimental analogy of para-particle dynamics in any physical system and demonstrate full control of para-particle oscillators.
\end{abstract}
\maketitle
\section{Introduction}
Investigating particles that are neither bosons or fermions is of fundamental interest in physics.
One example is the so-called para-particles, which were formally introduced in the early years of quantum mechanics as a generalization of bosons and fermions \cite{Green1953p270,Greenberg1965pB1155,Plyushchay1997p619}.
They fall into two categories, para-bosons and para-fermions, and are further classified by a parameter of deformation, or order of para-quantization, $p\geq 1$.
Like ordinary particles, para-particles are characterized by their spin and dimensionality in Hilbert space; para-bosons have integer spin and an infinite-dimensional representation, while para-fermions have half-integer spin and a finite-dimensional representation.
Standard bosons can be thought of as para-particles of order $p=1$.
During the initial development of the theory of para-particles, there was great interest in finding out if there are matter/field candidates for them in nature.
However, it was shown that it is not possible to distinguish a para-particle from a collection of standard particles by measurement \cite{Hartle1969p2043}.
As a result, no particle from the standard model is currently considered a para-particle candidate \cite{Greenberg1965pB1155,Baker2015p929}.
The field has become purely theoretical but still has produced a string of remarkable results.
Para-bosons and para-fermions were considered as candidates for the particles of dark matter/dark energy \cite{Ebadi2013p057,Nelson2016p034039,Kitabayashi2018p043504} and as a tool to describe excitations in solids \cite{Safonov1991p109}.
In para-quark models quarks were considered as a para-fermion of order $p=3$ before the color model was introduced \cite{Bracken1973p1784}.
Proposals for the quantum simulation of para-bosons \cite{HuertaAlderete2017p013820} and para-fermions \cite{HuertaAlderete2018p11572} renewed the interest of the community in uncovering potential uses of para-particles, mainly from the vantage of statistical thermodynamics \cite{Hama1992p149,Stoilva2020p126421}, and their utility in the characterization of non-classical properties of light \cite{HuertaAlderete2017p043835,Mojaveri2018p529,Mojaveri2018p346,Mojaveri2018p1850134,Wei_Min2001p283} as well as possible applications in optics \cite{RodriguezWalton2020p043840}.
Despite this wave of new activity, to the best our knowledge, para-particle dynamics have not been realized experimentally, nor have the many theoretical predictions associated with them.
Many physical systems have been put forward to produce experimental evidence, such as laser-inscribed arrays of evanescently coupled waveguides \cite{RodriguezWalton2020p043840} or hybrid systems involving two transmission line resonators controlled by a superconducting qubit \cite{Li2012p014303,Ma2014p062342,Strauch2010p050501}.
Following the path laid out in Refs. \cite{HuertaAlderete2017p013820,HuertaAlderete2018p11572}, a quantum system involving a spin$-1/2$ degree of freedom coupled to two bosonic modes can be engineered to simulate para-bosons and para-fermions \cite{Chilingaryan2015p245501, HuertaAlderete2016p414001}.
We choose a trapped ion system \cite{Liebfried2003p281,Haffner2008p155,Blatt2008p1476,Monroe2021p025001} for this experimental realization for its track record of motional state control \cite{Meekhof1996p1796,An2015p193,Um2016p11410,Debnath2018p073001,Jost2009p683,Ohira2019p060301}, tailored spin-motion interactions \cite{Chen2021p060311,Lemmer2018p073002}, and high fidelity operations including reliable state preparation and measurement.
Trapped ions provide a highly controllable quantum environment, which grants access to phenomena in regimes that are not otherwise accessible in nature \cite{Schutzhold2007p201301,Lv2018p021027}.
In this work, we report the analog quantum simulation of both kinds of para-particle oscillators by using the spin of a trapped atomic ion and two of its bosonic modes of motion in the trap, tailoring laser-induced couplings between them.
Our system precisely reproduces the relevant dynamics for para-bosons and para-fermions of even order.
These results represent the first experimental realization of para-particle dynamics in any physical system.
They also demonstrate full control of para-particle oscillators using a trapped-ion experiment and open the door for the verification of past proposals and future applications for para-particle models.
\section{Para-particle oscillators}
A para-particle oscillator may be understood as a parity--deformed oscillator that generalizes the standard Fermi--Dirac and Bose--Einstein statistics associated with fermions and bosons \cite{Macfarlane1994p1054, Dunne1995p3889, Plyushchay1997p619}.
In the interaction picture, the Hamiltonian for a driven para-particle oscillator,
\begin{eqnarray}\label{eq:pP_Interaction}
\hat{H}_{\vartheta} = \frac{\hbar g}{2} \left(\hat{A}_{\vartheta} + \hat{A}_{\vartheta}^{\dagger}\right),
\end{eqnarray}
is given in terms of the raising (lowering) operators, $\hat{A}^{\dagger}_{\vartheta}~(\hat{A}_{\vartheta})$, of para-fermions, $\vartheta=pF$, and para-bosons, $\vartheta=pB$, that define a para-particle algebra, see Appendix \ref{app:representation} for more details.
This can be related to an ion system coupled to two motional modes through the effective operators,
\begin{eqnarray}\label{eq:pF_operators}
\hat{A}_{pF} &=& \sqrt{2} \left( \hat{a}_{x} \hat{\sigma}_{+} + \hat{a}_{y}^{\dagger} \hat{\sigma}_{-} \right),\\ \label{eq:pB_operators}
\hat{A}_{pB} &=& \sqrt{2} \left( \hat{a}_{x} \hat{\sigma}_{-} - \hat{a}_{y} \hat{\sigma}_{+} \right),
\end{eqnarray}
where $\hat{\sigma}_{+}~(\hat{\sigma}_{-})$ is the spin-raising (lowering) operator and $\hat{a}^{\dagger}_{j}~\left(\hat{a}_{j}\right)$, with $j=x,y$, is the phonon creation (annihilation) operator.
In order to simulate this oscillator, it is necessary to engineer an effective Hamiltonian where both field modes couple simultaneously to the spin under the Jaynes--Cummings (JC) dynamics, Eq. (\ref{eq:pF_operators}), equivalent to a para-Fermi oscillator of even order.
When the $y~(x)$ motional mode is coupled to the spin system under (anti-) JC dynamics, Eq. (\ref{eq:pB_operators}), we create a system that represents a driven para-Bose oscillator, also of even order.
See Appendix \ref{app:mapping} or Refs. \cite{HuertaAlderete2018p11572,HuertaAlderete2017p013820} for a complete derivation.
\begin{figure}
\caption{{\bf Ladder of para-particle states.}
\label{fig:Ladder_States}
\end{figure}
The representation implemented in our experiment allows for vacuum states such that $\hat{A}_{\vartheta} \vert p;~\text{vac} \rangle = 0 $. The para-Fermi vacuum state of order $2n$ takes the form $\vert 2n;~ \text{vac}\rangle \equiv \vert \downarrow, 0, n \rangle$, and it is part of a $(2n+1)$-dimensional Hilbert space.
In the para-Bose case of order $2(n+1)$, there can be two vacuum states, $\vert 2(n+1);~\text{vac} \rangle \equiv \vert \downarrow, n, 0 \rangle $ and $\vert 2(n+1);~\text{vac} \rangle \equiv \vert \uparrow, 0, n \rangle $, each part of an infinite-dimensional Hilbert space.
Figure \ref{fig:Ladder_States} illustrates the ladder of para-particle states of order $2$; the states $\left\{\vert \downarrow, 1, 0 \rangle, \vert \uparrow, 0, 0 \rangle, \vert \downarrow, 0, 1 \rangle\right\}$ and $\left\{\vert \downarrow, 0, 0 \rangle, \vert \uparrow, 1, 0 \rangle, \vert \downarrow, 1, 1 \rangle, \ldots\right\}$ form an orthonormal basis for para-fermions and para-bosons, respectively.
By choosing an adequate initial state, we can simulate any para-particle oscillator of even order on a trapped ion system. Measurements of accessible observables in the ion frame, such as the spin $\langle \hat{\sigma}_{z}\rangle$ and the average phonon number $\langle \hat{n}_{j}\rangle$, can be related to physical quantities in the para-particle frame, such as the expectation value of the para-particle number $\langle \hat{\mathcal{N}}_{\vartheta} \rangle$,
\begin{eqnarray} \label{eq:N_pF}
\langle \hat{\mathcal{N}}_{pF} \rangle &=& \langle \hat{n}_{x} \rangle - \langle\hat{n}_{y} \rangle + \frac{p}{2} , \\ \label{eq:N_pB}
\langle \hat{\mathcal{N}}_{pB} \rangle &=& \langle \hat{n}_{x} \rangle + \langle\hat{n}_{y} \rangle + 1 - \frac{p}{2},
\end{eqnarray}
where $p$ is the para-particle order.
\section{Experimental setup}
We demonstrate the experimental realization of para-particles using a trapped $^{171}$Yb$^+$ ion confined in a linear Paul trap.
The ion is more tightly confined along the two transverse directions, $x$ and $y$, with mode frequencies $\omega_{x} = 2\pi \times 3.05$ MHz and $\omega_{y} = 2\pi \times 2.88$ MHz, respectively, which are used as the bosonic modes for the simulation, see Fig. \ref{fig:pP_Schema}(a).
\begin{figure}
\caption{{\bf Experimental scheme.}
\label{fig:pP_Schema}
\end{figure}
The ion is laser-cooled close to the motional ground state of both of these modes by Doppler cooling and subsequent Raman sideband cooling.
The spin-$1/2$ system is encoded in the hyperfine-split $^{2}S_{1/2}$ manifold, where we choose $\vert \downarrow \rangle \equiv \vert F=0; m_{F}=0 \rangle$ and $\vert \uparrow \rangle \equiv \vert F=1; m_{F}=0 \rangle$ with a frequency difference $\omega_{HF}=2\pi \times 12.642821$ GHz, which is insensitive to magnetic field fluctuations to first order.
The spin is initialized by optical pumping and read out using state-dependent fluorescence detection \cite{Olmschenk2007p052314}.
A pair of counter-propagating Raman beams from a single $355$-nm mode-locked laser \cite{Islam2014p3238} are used to manipulate the spin of the ion.
These beams can also impart momentum on the ion to change the motional state in the trap by addressing the motional sidebands $\omega_{HF} \pm \omega_{x(y)}$. In the interaction picture, those Raman laser operations can be described by the following Hamiltonians,
\begin{eqnarray}\label{eq:rsb}
\hat{H}_{JC} &=& \frac{\hbar \eta \Omega_r}{2} \left( \hat{a} \hat{\sigma}_{+} e^{i \phi } + \hat{a}^{\dagger} \hat{\sigma}_{-} e^{-i \phi}\right),\\ \label{eq:bsb}
\hat{H}_{aJC} &=& \frac{\hbar \eta \Omega_b}{2} \left( \hat{a} \hat{\sigma}_{-} e^{i \phi } + \hat{a}^{\dagger} \hat{\sigma}_{+} e^{-i \phi}\right).
\end{eqnarray}
Here again, $\hat{\sigma}_{+}~(\hat{\sigma}_{-})$ is the spin-raising (lowering) operator, $\hat{a}^\dagger~(\hat{a})$ is the phonon creation (annihilation) operator, $\Omega_{r(b)}$ is the sideband Rabi frequency of the (anti-) JC model or red (blue) sideband, $\eta= \Delta k \sqrt{\hbar/M \omega_{j}}$ is the Lamb-Dicke parameter where $\Delta k$ is the net wave vector of the Raman laser beams, $M$ is the mass of the $^{171}$Yb$^+$ ion, and $\phi$ is the phase difference between the Raman laser beams.
During the experiment, we drive a combination of those operations, Eqs. (\ref{eq:rsb})-(\ref{eq:bsb}), on both modes, $x$ and $y$, simultaneously, see Fig. \ref{fig:pP_Schema}(b), to realize the dynamics of a para-particle oscillator \cite{HuertaAlderete2017p013820,HuertaAlderete2018p11572}.
A typical experimental sequence is shown in Fig. \ref{fig:pP_Schema}(c).
First, we perform cooling of the motion and optical pumping to prepare the system in the state $\vert \downarrow, 0, 0 \rangle$ \cite{Monroe1995p4011}, and then transfer the system to a specific initial state $\vert \psi, n_{x}, n_{y} \rangle$ based on the choice of para-particle order.
To initialize the motional modes in a particular Fock state, phonon excitations are introduced by resonantly driving a combination of blue-sideband, red-sideband and carrier $\pi$-pulse to prepare the desired state, see Appendix \ref{App:FSPrep} for more details.
Then, we off-resonantly drive the two sideband transitions responsible for the para-particle dynamics with precisely defined intensities and detunings.
After the para-particle evolution, we read out either the spin or the motional state.
While the spin measurement is made directly with the expectation value calculated by averaging over many repetitions, the motional readout requires the spin as an intermediary.
First, we reset the spin to $\vert \downarrow \rangle$ by applying an optical pumping pulse with negligible effect on the motional state population since our system is in the Lamb-Dicke regime.
Subsequently, a blue-sideband pulse with frequency $\omega_{x(y)}$ is applied to probe the Fock state distribution.
This is repeated with increasing pulse duration resulting in Rabi oscillations with frequency components whose amplitudes depend on the Fock state populations \cite{Meekhof1996p1796}.
The resulting signal is fitted to a sum of sinusoids to obtain their relative amplitudes, with the frequencies and decay rates determined by an independent measurement of the Rabi frequency, more details are given in Appendix \ref{App:MotionalAnalysis}.
\section{Results}
\subsection{Para-fermions}
We first simulate a para-fermion of order $2$, hence we prepare the vacuum state $\vert 2; 0 \rangle \equiv \vert \downarrow, 0,1 \rangle$.
The simulation consists of driving the red sidebands of both motional modes with equal Rabi frequency, Eq. (\ref{eq:pF_operators}).
The spin oscillates with amplitude $0.5$, Fig. \ref{fig:parafermions}(a), and facilitates excitation exchange between $x$ (blue line) and $y$ (yellow line) motional modes, Fig. \ref{fig:parafermions}(b). After a full oscillation of the spin, the population of the $y$ mode switches coherently to the $x$ mode. This represents the evolution of the para-Fermi number operator, $\langle \hat{\mathcal{N}}_{pF} \rangle$ in Eq. (\ref{eq:N_pF}), in the para-Fermi frame, shown in Fig. \ref{fig:parafermions}(c).
The dynamics become more complex as we increase the order, and hence scale up the number of occupied Fock states in the trapped-ion system.
We also realize a system of order $10$, starting from the vacuum state $\vert 10; 0\rangle \equiv\vert \downarrow, 0,5 \rangle$.
The spin evolution exhibits oscillations that collapse and then revive partially, Fig. \ref{fig:parafermions}(d), a feature also seen in the excitation exchange between motional modes, Fig. \ref{fig:parafermions}(e).
This translates into the expected para-Fermi number excitation, Fig. \ref{fig:parafermions}(f).
\begin{figure}
\caption{{\bf Para-Fermi dynamics.}
\label{fig:parafermions}
\end{figure}
The data points in Figure \ref{fig:parafermions} correspond to experimental results.
Every data point is acquired by averaging over 300 measurements of the experimental sequence, and the error bars are statistical for the spin population and a result of the fitting procedure for the motional modes.
Continuous lines correspond to a numerical simulation with up to eight Fock states per motional mode.
We investigate effects of motional heating in the trap as a potential source of error by including a Lindblad term in our numerical simulation, see Appendix \ref{app:Lindblad}. We find them to be small, especially for shorter times of the evolution, see Fig. \ref{fig:parafermions}.
We attribute the additional deviations visible in some places to fluctuations in the experimental parameters, especially the intensity, which is subject to beam pointing drift.
We note that the evolution of the lowest energy state under driven para-Fermi oscillator dynamics resembles a binomial state in the ion frame \cite{HuertaAlderete2018p11572}.
This produces single frequency oscillations of the spin for small para-particle order, Fig. \ref{fig:parafermions}(a), and a multiple-frequency beat as the order increases, Fig. \ref{fig:parafermions}(d).
To the best of our knowledge, this is the first time that binomial states have been created in a physical system since their theoretical introduction \cite{Stoler1985p345,VidellaBarranco1994p5233}.
\subsection{Para-bosons}
We simulate the para-Bose oscillator of order $2$ by first preparing the vacuum state, $\vert 2;0\rangle \equiv \vert \downarrow, 0, 0 \rangle$. We choose a low order due to the fast increase of the Fock state occupation from the simulation dynamics.
The simulation consists of driving near the blue sideband on the $x$ motional mode and the red sideband on the $y$ motional mode with equal Rabi frequencies. The spin evolution corresponds to a damped oscillation, Fig. \ref{fig:pb_p2}(a), while the motional excitations increase, Fig. \ref{fig:pb_p2}(b), reproducing the expected dynamics of the para-Bose number operator, Fig. \ref{fig:pb_p2}(c).
The experimental outcome is directly compared to a numerical simulation, continuous lines, with a Hilbert space of up to 35 Fock states per motional mode; motional heating is investigated by including a Lindblad term on the simulations, see Appendix \ref{app:Lindblad}.
The para-Bose simulation is more sensitive to anisotropy in the red and blue sideband driving \cite{HuertaAlderete2017p013820}, so we take as an upper(lower) bound the frequency $g_{\pm} = \Omega_{pB} \pm \delta$ with $\Omega_{pB}= (\Omega_{r}+\Omega_{b})/2$ and $\delta= \vert \Omega_{r} - \Omega_{b}\vert/2$, where $\Omega_{r(b)}$ comes from calibration measurements on the experiment. The shadings in Fig. \ref{fig:pb_p2} correspond to these bounds.
As in the para-Fermi case, every data point is acquired by averaging over 300 measurements of the experimental sequence and error bars are statistical for spin population and a result of the fitting procedure for the motional modes.
We note that this time evolution is related to a coherent state \cite{HuertaAlderete2017p013820}, with interesting non-classical properties; the nature of its statistics (sub/super- Poissonian) is controlled by the Rabi frequency of the drive and the para-particle order \cite{HuertaAlderete2017p043835}.
\begin{figure}
\caption{{\bf Para-Bose dynamics.}
\label{fig:pb_p2}
\end{figure}
\section{Conclusion}
Driving dynamics with trapped ions that involve the Hilbert spaces of multiple orthogonal vibrational modes simultaneously is a promising experimental capability for the simulation of physical systems \cite{Davoudi2021p, Chen2021p060311}.
We demonstrate highly controllable experimental realizations of both kinds of para-particle oscillators for the first time.
Our work is a first step towards the more detailed studies of para-particles that involve single and many body interactions, as well as the search for new properties such as topological phases \cite{Cai2020pnwaa196} or multi-frequency conversion \cite{Kockum2017p2045}.
\begin{acknowledgments}
We thank Elijah Keene Kaimiola Mossman for his help with the experiment.
This work is supported by the National Science Foundation via the Physics Frontier Center at the Joint Quantum Institute (PHY-1430094), the Maryland-ARL Quantum Partnership (W911NF1920181), and the DOE Office of Science, Office of Nuclear Physics (DE-SC0021143). A. M. G. is supported by a Joint Quantum Institute Postdoctoral Fellowship.
\end{acknowledgments}
\nocite{*}
\appendix
\section{Para-particle representation}\label{app:representation}
Para-particle creation (annihilation) operators $\hat{A}_{\vartheta}~(\hat{A}^{\dagger}_{\vartheta})$ follow the so-called tri-linear commutation relations,
\begin{eqnarray}
\left[ \left[ \hat{A}_{pF}, \hat{A}^{\dagger}_{pF} \right] , \hat{A}_{pF} \right] = -2\hat{A}_{pF}, &\qquad& \left[ \left[ \hat{A}_{pF}, \hat{A}^{\dagger}_{pF} \right] , \hat{A}_{pF}^{\dagger} \right] = 2\hat{A}_{pF}^{\dagger},\\
\left[ \left\{ \hat{A}_{pB}, \hat{A}^{\dagger}_{pB} \right\} , \hat{A}_{pB} \right] = -2\hat{A}_{pB}, &\qquad& \left[ \left\{ \hat{A}_{pB}, \hat{A}^{\dagger}_{pB} \right\} , \hat{A}_{pB}^{\dagger} \right] = 2\hat{A}_{pB}^{\dagger}.
\end{eqnarray}
These relations include the usual bosons and fermions. We consider a driven oscillator Hamiltonian,
\begin{eqnarray}
\hat{H}_{d} &=& \hat{H}_{0} + \hbar g \left( \hat{A}_{\vartheta} + \hat{A}_{\vartheta}^{\dagger}\right) \cos \omega_{d}t,
\end{eqnarray}
where $\hat{H}_{0}$ is the free para-particle Hamiltonian defined as $\hat{H}_{0} =\frac{\hbar \omega}{2}\left\{ \hat{A}_{pB}, \hat{A}_{pB}^{\dagger}\right\}$ for para-bosons and $\hat{H}_{0} =\frac{\hbar\omega}{2}\left[ \hat{A}_{pF}, \hat{A}_{pF}^{\dagger}\right]$ for para-fermions.
We move to the frame given by the free para-particle Hamiltonian to obtain,
\begin{eqnarray}
\hat{H}_{\vartheta} &=& e^{i \hat{H}_{0}t/\hbar} \hat{H}_{d} e^{-i \hat{H}_{0}t/\hbar} \nonumber \\
&=& \hbar g \left( \hat{A}_{\vartheta} e^{-i\omega t} + \hat{A}_{\vartheta}^{\dagger} e^{i\omega t}\right) \cos \omega_{d}t.
\end{eqnarray}
Assuming resonant driving, $\vert \omega - \omega_{d}\vert = 0$, and weak coupling, $\vert \omega + \omega_{d}\vert \gg g$, allows us to apply a rotating wave approximation to simplify this interaction Hamiltonian,
\begin{eqnarray} \nonumber
\hat{H}_{\vartheta} &=& \frac{\hbar g}{2} \left( \hat{A}_{\vartheta} + \hat{A}_{\vartheta}^{\dagger} \right).
\end{eqnarray}
We use a parity-deformed representation \cite{Plyushchay1997p619},
\begin{eqnarray} \label{eq:pF_commutations}
\left\{ \hat{A}^{\dagger}_{pF}, \hat{A}_{pF} \right\} &=& (p+1) -\hat{\mathcal{R}}_{pF}, \qquad \left[ \hat{A}^{\dagger}_{pF}, \hat{A}_{pF} \right] = 2\left( \hat{\mathcal{N}}_{pF} -\frac{p}{2} \right)\hat{\mathcal{R}}_{pF}, \\
\left[ \hat{A}_{pB}, \hat{A}^{\dagger}_{pB}\right] &=& 1 + (p-1)\hat{\mathcal{R}}_{pB}, \qquad \frac{1}{2}\left\{ \hat{A}_{pB}, \hat{A}^{\dagger}_{pB}\right\} = \hat{\mathcal{N}}_{pB} + \frac{p}{2},\label{eq:pB_commutations} \\\nonumber
&&\left\{ \hat{\mathcal{R}}_{\vartheta}, \hat{A}_{\vartheta} \right\} = \left\{ \hat{\mathcal{R}}_{\vartheta}, \hat{A}^{\dagger}_{\vartheta} \right\} = 0.
\end{eqnarray}
where $\hat{\mathcal{R}}_{\vartheta} = e^{-i \pi \hat{\mathcal{N}}_{\vartheta}}
$ is the parity operator, such that $\hat{\mathcal{R}}_{\vartheta}^{2} = 1$, and $\hat{\mathcal{N}}_{\vartheta}$ is the para-particle number operator. For a given order we can define an orthonormal Hilbert space where the para-particle order is given by the action,
\begin{eqnarray}
\hat{A}_{\vartheta} \hat{A}^{\dagger}_{\vartheta} \vert p; \text{vac} \rangle = p \vert p; \text{vac} \rangle,
\end{eqnarray}
on the vacuum state.
\section{Mapping}\label{app:mapping}
Our simulation starts with the interaction Hamiltonian in the ion frame,
\begin{eqnarray}
\hat{H}_{ion}^{pF} &=& \frac{\hbar\Omega}{2} \left[\left( \hat{a}_{x} + \hat{a}_{y} \right) \hat{\sigma}_{+} + \left( \hat{a}_{x}^{\dagger} + \hat{a}_{y}^{\dagger} \right) \hat{\sigma}_{-} \right], \\
\hat{H}_{ion}^{pB} &=& \frac{\hbar\Omega}{2}\left[\left( \hat{a}_{x}^{\dagger} - \hat{a}_{y} \right) \hat{\sigma}_{+} + \left( \hat{a}_{x} - \hat{a}_{y}^{\dagger} \right) \hat{\sigma}_{-} \right],
\end{eqnarray}
where $\Omega=\Omega_{r}=\Omega_{b}$ is determined by experimental parameters. Then, we identify the para-particle operators, Eqs. (\ref{eq:pF_operators})-(\ref{eq:pB_operators}), such that we recover the interaction Hamiltonian for a driven para-particle oscillator, Eq. (\ref{eq:pP_Interaction}). Table \ref{tab:pP} summarizes the relations between the two frames.
\begin{table}[h!]
\centering
\begin{tabular}{c c cc cc c}
& && && & \\ \hline\hline
& && && & \\
& Para-particle frame & \multicolumn{4}{c}{Ion-frame} &\\
& && && & \\ \hline\hline
& && && & \\
&&& para-fermions && para-bosons &\\
& && && & \\ \hline \hline
& && && & \\
&$\hat{A}_{\vartheta}$ && $\sqrt{2} \left( \hat{a}_{x} \hat{\sigma}_{+} + \hat{a}_{y}^{\dagger} \hat{\sigma}_{-} \right)$ && $\sqrt{2} \left( \hat{a}_{x} \hat{\sigma}_{-} - \hat{a}_{y} \hat{\sigma}_{+} \right)$ &\\
& && && & \\
& $\left[ \hat{A}_{\vartheta}, \hat{A}^{\dagger}_{\vartheta}\right] $ && $2 \left(\hat{n}_{x} - \hat{n}_{y} \right)\hat{\sigma}_{z}$ && $2 \left(\hat{n}_{y} - \hat{n}_{x}\right) \hat{\sigma}_{z} + 2 $ &\\
& && && & \\
&$\left\{ \hat{A}^{\dagger}_{\vartheta}, \hat{A}_{\vartheta}\right\} $ && $ 2 \left(\hat{n}_{x} + \hat{n}_{y}+ 1 + \hat{\sigma}_{z}\right)$ && $2 \left(\hat{n}_{x} + \hat{n}_{y} + 1 \right) $& \\
& && && & \\
&Parity && $-\hat{\sigma}_{z}$ && $\hat{\sigma}_{z}$&\\
& && && &\\
&$\hat{\mathcal{N}}_{\vartheta}$ && $\hat{n}_{x} - \hat{n}_{y} + \frac{p}{2}$ && $\hat{n}_{x} + \hat{n}_{y} + 1 - \frac{p}{2}$ &\\
& && && &\\
\hline
\end{tabular}
\caption{Relations between operators para-particle frame and the ion.}
\label{tab:pP}
\end{table}
\section{Fock state preparation}\label{App:FSPrep}
The blue sideband, or anti-Jaynes--Cummings, interaction induces transitions between the states $\vert \downarrow , n \rangle$ and $\vert \uparrow , n + 1 \rangle$ with Rabi frequencies proportional to $\sqrt{n+1}$.
The red sideband, or Jaynes--Cummings, interaction drives transitions between the states $\vert \downarrow, n \rangle$ and $\vert \uparrow, n - 1 \rangle$ with Rabi frequencies proportional to $\sqrt{n}$.
The carrier transition changes the state of the spin without changing the Fock state.
Our system is initially cooled to the ground state $\vert \downarrow, 0\rangle$.
We use a series of sideband and carrier $\pi$-pulses to initialize different Fock states, Fig. \ref{fig:App_FSprep}(a).
For example, to prepare the Fock state $n=3$, the pulse sequence we need is blue sideband--red sideband--blue sideband--carrier, which will walk the excitation through the states $\vert \downarrow, 0 \rangle \rightarrow \vert \uparrow, 1\rangle \rightarrow \vert \downarrow, 2 \rangle \rightarrow \vert \uparrow, 3\rangle \rightarrow \vert \downarrow, 3\rangle$.
\begin{figure}
\caption{{\bf Fock State Preparation}
\label{fig:App_FSprep}
\end{figure}
Each measured probability for $\vert \downarrow, n\rangle$ with $n=0,1,\ldots,5$ is fitted to $P_{\uparrow}(t) = e^{-\gamma_{n} t}\cos^{2} \Omega_{n,n+1} t$, where $\Omega_{n,n+1}$ is the Rabi frequency and $\gamma_{n}$ is the decoherence rate between levels $\vert n\rangle$ and $\vert n+1 \rangle$.
The measured Rabi frequency ratios $\Omega_{n,n+1} /\Omega_{0,1}$ are plotted in Figure \ref{fig:App_FSprep}(b), and compared to the expected $\sqrt{n+1}$.
We observe no inherent cross-coupling between excitations on $x$ and $y$ modes.
\section{Fock state population measurement}\label{App:MotionalAnalysis}
\begin{figure}
\caption{{\bf Fock state population analysis.}
\label{fig:pF_detection}
\end{figure}
Fock state populations are analyzed by first setting the spin to $\vert \downarrow \rangle$ by optical pumping and then applying a blue sideband pulse for various interaction times before reading out the spin state \cite{Meekhof1996p1796}. The resulting curve is described by the function,
\begin{eqnarray}\label{eq:Prob_down}
P_{\uparrow}(t) = \frac{1}{2} \left(1 + \sum_{n=0}^{\infty} P_{n} e^{-\gamma_{n}t} \cos \Omega_{n,n+1}t\right),
\end{eqnarray}
from which the desired Fock state populations $P_{n}$ can be obtained by a fit. The $\gamma_{n}$ are phenomenological decay constants assumed to obey $\gamma_{n}= \gamma \sqrt{n+1}$. Notice that the more populated the motional Hilbert space, the greater the number of frequency components required for the fitting procedure. Typical examples of this measurement and fitting procedure are shown in Fig. \ref{fig:pF_detection} for para-Fermi and Fig. \ref{fig:pB_detection} for para-Bose at various times.
\section{Numerical Simulations}\label{app:Lindblad}
We investigate effects of motional heating by computing the time evolution of the system according to a Lindblad master equation,
\begin{eqnarray}
\partial_{t} \hat{\rho} = -i \left[ \hat{H}^{\vartheta}_{ion}, \hat{\rho}\right] + \sum_{j=x,y}\left( \gamma_{j} n_{th} \hat{\mathcal{L}}[\hat{a}^{\dagger}_{j}]\hat{\rho} + \gamma_{j}(n_{th}
+1) \hat{\mathcal{L}}[\hat{a}_{j}]\hat{\rho}\right),
\end{eqnarray}
where,
\begin{eqnarray}
\hat{\mathcal{L}}[\hat{O}]\hat{\rho} = 2 \hat{O} \hat{\rho} \hat{O}^{\dagger} - \hat{O}^{\dagger}\hat{O}\hat{\rho} - \hat{\rho} \hat{O}^{\dagger} \hat{O}
\end{eqnarray}
is the Lindblad operator and $\gamma n_{th} \approx \gamma(n_{th}+1)$ is the motional heating rate which is measured as $70$ phonons$/s$ in our system.
\begin{figure}
\caption{{\bf Fock state population analysis.}
\label{fig:pB_detection}
\end{figure}
\end{document}
|
\begin{document}
\begin{abstract}
In this paper we introduce {\it critical surfaces}, which are described via a 1-complex whose definition is reminiscent of the curve complex. Our main result is that if the minimal genus common stabilization of a pair of strongly irreducible Heegaard splittings of a 3-manifold is not critical, then the manifold contains an incompressible surface. Conversely, we also show that if a non-Haken 3-manifold admits at most one Heegaard splitting of each genus, then it does not contain a critical Heegaard surface. In the final section we discuss how this work leads to a natural metric on the space of strongly irreducible Heegaard splittings, as well as many new and interesting open questions.
\end{abstract}
\title{Critical Heegaard surfaces}
\footnotetext[1]{To appear in Transactions of the AMS.}
\section{Introduction.}
It is a standard exercise in 3-manifold topology to show that every manifold admits Heegaard splittings of arbitrarily high genus. Hence, a ``random" Heegaard splitting does not say much about the topology of the manifold in which it sits. To use Heegaard splittings to prove interesting theorems, one needs to make some kind of non-triviality assumption. The most obvious such assumption is that the splitting is minimal genus. However, this assumption alone is apparently very difficult to use.
In \cite{cg:87}, Casson and Gordon define a new notion of triviality for a Heegaard splitting, called {\it weak reducibility}. A Heegaard splitting which is not weakly reducible, then, is said to be {\it strongly irreducible}. The assumption that a Heegaard splitting is strongly irreducible has proved to be much more useful than the assumption that it is minimal genus. In fact, in \cite{cg:87}, Casson and Gordon show that in a non-Haken 3-manifold, minimal genus Heegaard splittings {\it are} strongly irreducible.
The moral here seems to be this: since the assumption of minimal genus is difficult to make use of, one should pass to a larger class of Heegaard splittings, which is still restrictive enough that one can prove non-trivial theorems.
Now we switch gears a little. It is a Theorem of Riedemeister and Singer (see \cite{am:90}) that given two Heegaard splittings, one can always stabilize the higher genus one some number of times to obtain a stabilization of the lower genus one (see the next section for definitions of these terms). However, this immediately implies that any two Heegaard splittings have a common stabilization of arbitrarily high genus. Hence, the assumption that one has a ``random" common stabilization cannot be terribly useful. What is of interest, of course, is the minimal genus common stabilization. As before though, the assumption of minimal genus has turned out to be very difficult to use.
In this paper, we propose a new class of Heegaard splittings, which we call {\it critical}, and prove that at least in the non-Haken case, this class includes the minimal genus common stabilizations. As one would hope, the assumption that a splitting is critical is more useful than the assumption that it is a minimal genus common stabilization.
We define the term {\it critical} via a 1-complex associated with any embedded, separating surface in a 3-manifold, which is reminiscent of the curve complex. After the preliminary definitions we present a section of Lemmas which lead up to the following Theorem:
\noindent {\bf Theorem 4.6.} {\it Suppose $M$ is an irreducible 3-manifold with no closed incompressible surfaces, and at most one Heegaard splitting (up to isotopy) of each genus. Then $M$ does not contain a critical Heegaard surface.}
The remainder of the paper is concerned with the converse of this Theorem. That is, we answer precisely when a (non-Haken) 3-manifold {\it does} contain a critical Heegaard surface.
The main technical theorem which starts us off in this direction is:
\noindent {\bf Theorem 5.1.} {\it Let $M$ be a 3-manifold with critical surface, $F$, and incompressible surface, $S$. Then there is an incompressible surface, $S'$, homeomorphic to $S$, such that every loop of $F \cap S'$ is essential on both surfaces. Furthermore, if $M$ is irreducible, then there is such an $S'$ which is isotopic to $S$.}
As immediate corollaries to this, we obtain:
\noindent {\bf Corollary 5.6.} {\it If $M_1 \# M_2$ contains a critical surface, then either $M_1$ or $M_2$ contains a critical surface.}
\noindent {\bf Corollary 5.7.} {\it A reducible 3-manifold does not admit a critical Heegaard splitting.}
\noindent {\bf Corollary 5.8.} {\it Suppose $M$ is a 3-manifold which admits a critical Heegaard splitting, such that $\partial M \ne \emptyset$. Then $\partial M$ is essential in $M$.}
It is this last corollary which we combine with a considerable amount of new machinery to yield:
\noindent {\bf Theorem 7.1.$'$} {\it Suppose $F$ and $F'$ are distinct strongly irreducible Heegaard splittings of some closed 3-manifold, $M$. If the minimal genus common stabilization of $F$ and $F'$ is not critical, then $M$ contains an incompressible surface.}
We actually prove a slightly stronger version of this Theorem, that holds for manifolds with non-empty boundary.
Compare Theorem 7.1.$'$ to that of Casson and Gordon \cite{cg:87}: If the minimal genus Heegaard splitting of a 3-manifold, $M$, is not strongly irreducible, then $M$ contains an incompressible surface. In this light, we see that critical Heegaard surfaces are a natural follow-up to strongly irreducible Heegaard surfaces.
In the last section, we show how these results lead to a natural metric on the space of strongly irreducible Heegaard splittings of a non-Haken 3-manifold. It is the belief of the author that more information about this space would be of great interest. We also discuss how a better understanding of critical surfaces may help answer several open questions, including the {\it stabilization conjecture}. We end with a few new questions and conjectures generated by this work which the reader may find interesting.
The author would like to thank several people for their input during the preparation of this paper: the referee, for many helpful suggestions regarding earlier versions; Cameron Gordon, for guidance during the author's thesis work, from which this paper grew; Saul Schleimer, for an introduction to the beauty of the curve complex; and Eric Sedgwick, for helpful conversations regarding Lemma \ref{l:reducesphere}.
\section{Basic Definitions.}
In this section, we give some of the standard definitions that will be used throughout the paper. The expert in 3-manifold theory can easily skip this.
A 2-sphere in a 3-manifold which does not bound a 3-ball on either side is called {\it essential}. If a manifold does not contain an essential 2-sphere, then it is referred to as {\it irreducible}.
A loop on a surface is called {\it essential} if it does not bound a disk in the surface. Given a surface, $F$, in a 3-manifold, $M$, a {\it compressing disk} for $F$ is a disk, $D \subset M$, such that $F \cap D=\partial D$, and such that $\partial D$ is essential on $F$. If we let $D \times I$ denote a thickening of $D$ in $M$, then to {\it compress $F$ along $D$} is to remove $(\partial D) \times I$ from $F$, and replace it with $D \times \partial I$.
A {\it compression body} is a 3-manifold which can be obtained by starting with some surface, $F$, forming the product, $F \times I$, attaching some number of 2-handles to $F \times \{1\}$, and capping off any remaining 2-sphere boundary components with 3-balls. The boundary component, $F \times \{0\}$, is often referred to as $\partial _+$. The other boundary component is referred to as $\partial _-$. If $\partial _-=\emptyset$, then we say the compression body is a {\it handlebody}.
A surface, $F$, in a 3-manifold, $M$, is a {\it Heegaard splitting of M}, if $F$ separates $M$ into two compression bodies, $W$, and $W'$, such that $F=\partial _+W=\partial _+ W'$. Such a splitting is {\it non-trivial} if neither $W$ nor $W'$ are products. A {\it stabilization} of $F$ is a new Heegaard splitting which is the connect sum of the standard genus 1 Heegaard splitting of $S^3$ and $F$. Another way to define a stabilization is by ``tunneling" a 1-handle out of $W$, and attaching it to $\partial _+ W'$. If this is done in such a way so as to make the definition symmetric in $W$ and $W'$, then one arrives at a stabilization. The Riedemeister-Singer theorem (see \cite{am:90}) states that given any two Heegaard splittings, $F$ and $F'$, there is always a stabilization of $F$ which is isotopic to a stabilization of $F'$.
\section{The isotopy-invariant disk complex.}
For every surface, $F$, we can define a complex, $G(F)$, as follows: for each isotopy class of essential simple closed curve in $F$, there is a vertex of $G(F)$. There is an edge connecting two vertices if and only if there are representatives of the corresponding equivalence classes that are disjoint. $G(F)$ is the 1-skeleton of a complex which is usually referred to as the {\it curve complex} of $F$, and is an object that many mathematicians have made use of. We now generalize the 1-complex, $G(F)$.
The new 1-complex we define here will be only for embedded, orientable, compact, separating (but not necessarily connected) surfaces in 3-manifolds. Suppose $F$ is such a surface, separating a 3-manifold, $M$, into a ``red" and a ``blue" side. If $D$ and $D'$ are compressing disks for $F$, then we say $D$ is equivalent to $D'$ if there is an isotopy of $M$ taking $F$ to $F$, and $D$ to $D'$ (we do allow $D$ and $D'$ to be on opposite sides of $F$). Note that this equivalence relation is stronger than $D$ and $D'$ being isotopic in $M$ (rel $F$). Indeed, if there is an isotopy which takes $D$ to $D'$, sweeping out a subset $B \subset M$, then there is a isotopy of $M$, taking $D$ to $D'$, which fixes every point outside of a neighborhood of $B$.
We now define a 1-complex, $\Gamma (F)$. For each equivalence class of compressing disk for $F$, there is a vertex of $\Gamma (F)$. Two (not necessarily distinct) vertices are connected by an edge if there are representatives of the corresponding equivalence classes on opposite sides of $F$, which intersect in at most a point. $\Gamma (F)$ is thus an ``isotopy-invariant disk complex" for $F$.
For example, if $F$ is the genus 1 Heegaard splitting of $S^3$, then there is an isotopy of $S^3$ which takes $F$ back to itself, but switches the sides of $F$. Such an isotopy takes a compressing disk on one side of $F$ to a compressing disk on the other. Hence, $\Gamma (F)$ has a single vertex. However, there are representatives of the equivalence class that corresponds to this vertex which are on opposite sides of $F$, and intersect in a point. Hence, there is an edge of $\Gamma (F)$ which connects the vertex to itself.
We can now exploit this terminology to give concise definitions of some of the standard terms in 3-manifold topology, as well as one important new one.
\begin{dfn}
We say $F$ is {\it incompressible} if $\Gamma (F)=\emptyset$. If an irreducible 3-manifold contains no incompressible surfaces, then we say it is {\it non-Haken}. Otherwise, it is {\it Haken}.
\end{dfn}
\begin{dfn}
$F$ is {\it reducible} if there are compressing disks on opposite sides of $F$ with the same boundary. The union of these disks is called a {\it reducing sphere} for $F$.
\end{dfn}
It is a standard exercise to show that if a Heegaard splitting of an irreducible 3-manifold is reducible, then there is a lower genus Heegaard splitting.
\begin{dfn}
$F$ is {\it strongly irreducible} if there are compressing disks on opposite sides of $F$, but $\Gamma (F)$ contains no edges.
\end{dfn}
In \cite{cg:87}, Casson and Gordon show that if $F$ is a Heegaard splitting of a non-Haken 3-manifold which is not strongly irreducible, then $F$ is reducible. Hence, any minimal genus Heegaard splitting of a non-Haken 3-manifold must be strongly irreducible.
\begin{dfn}
A vertex of $\Gamma (F)$ is {\it isolated} if it is not the endpoint of any edge.
\end{dfn}
\begin{dfn}
If we remove the isolated vertices from $\Gamma (F)$ and are left with a disconnected 1-complex, then we say $F$ is {\it critical}.
\end{dfn}
Equivalently, $F$ is critical if there exist two edges of $\Gamma(F)$ that can not be connected by a path.
\section{Preliminary Lemmas}
Before proceeding further, we introduce some notation. Suppose $D$ and $E$ are compressing disks on opposite sides of $F$, such that $|D \cap E| \le 1$. Then we denote the edge of $\Gamma (F)$ which connects the equivalence class of $D$ to the equivalence class of $E$ as $D-E$. If $D$ and $D'$ are disks in the same equivalence class, then we write $D \sim D'$. Hence, a chain of edges in $\Gamma (F)$ may look something like
\[D_1-E_2 \sim E_3-D_4 \sim D_5 - E_6 -D_7\]
Many of the proofs of this paper follow by producing such chains. Note that we will always denote disks on the red side of $F$ (i.e. ``red disks") with the letter ``$D$" (usually with some subscript), and blue disks with the letter ``$E$".
We begin with a simple Lemma.
\begin{lem}
\label{l:3site}
Let $F$ be a critical surface, and suppose $D_1 - E_1$ and $D_2 - E_2$ are edges which lie in different components of $\Gamma (F)$. Then for each red disk, $D$, there is an $i=1$ or $2$ such that $|D \cap E| >1$, whenever $E$ is a blue disk equivalent to $E_i$. Similarly, for each blue disk, $E$, there is an $i$ such that $|D \cap E| >1$, whenever $D$ is a red disk equivalent to $D_i$.
\end{lem}
\begin{proof}
If the Lemma is false, then we have the following chain of edges: $D_1 - E_1 - D - E_2 - D_2 $, which clearly contradicts the assumption that the edges $D_1 - E_1$ and $D_2 - E_2$ are in different components of $\Gamma (F)$.
\end{proof}
{\bf Note.} Lemma \ref{l:3site} is closely related to the ``3-site property" of Pitts and Rubinstein (see, for example, \cite{rubinstein:96}). In our language, the 3-site property says that if $D_1 - E_1$ and $D_2 - E_2$ are edges in different components of $\Gamma (F)$, then there cannot be a third edge, $D - E$, where $D$ misses $E_1$ and $E_2$, and $E$ misses $D_1$ and $D_2$. It is trivial to show that this is implied by Lemma \ref{l:3site}.
The next Lemma is a nice self-contained result about the components of a disconnected critical surface. It is presented only to help the reader get used to our definitions, and will not be used in the remainder of the paper.
\begin{lem}
\label{l:components}
Suppose $F$ is a critical surface such that every compressing disk for every component of $F$ is isotopic (rel $\partial$) to a compressing disk for $F$, and such that each component of $F$ is separating. Then either
\begin{itemize}
\item exactly one component of $F$ is critical, and the rest are incompressible, or
\item exactly two components of $F$ are strongly irreducible, and the rest are incompressible.
\end{itemize}
\end{lem}
\noindent {\bf Note.} It is possible to define the terms {\it strongly irreducible} and {\it critical} for non-separating surfaces as well, which makes the statement of Lemma \ref{l:components} a little nicer. We do not bother here, since we will not need such generality.
\begin{proof}
Suppose that $D_1-E_1$ and $D_2-E_2$ are edges in different components of $\Gamma (F)$. By Lemma \ref{l:3site}, $\partial D_2 \cap \partial E_1 \ne \emptyset$. Hence, $D_2$ and $E_1$ are compressing disks for the same component of $F$, which we refer to as $F_1$. Similarly, $D_1$ and $E_2$ must be compressing disks for the same component, $F_2$. Any compressing disk for any other component would violate Lemma \ref{l:3site}. Hence, any component of $F$ other than $F_1$ or $F_2$ is incompressible.
Now suppose that $F_1=F_2$. If $D_1-E_1$ and $D_2-E_2$ are in the same component of $\Gamma (F_1)$, then there is a chain of edges that connects them. But every compressing disk for $F_1$ is a compressing disk for $F$, so the same chain would connect $D_1-E_1$ and $D_2-E_2$ in $\Gamma (F)$, a contradiction. Hence, $F_1$ is critical.
If $F_1 \ne F_2$, and $F_2$ is not strongly irreducible, then let $D - E$ be an edge of $\Gamma (F_2)$. Then we have the chain:
\[ D_1 -E_1 - D-E-D_2-E_2\]
Since this is a contradiction, $\Gamma (F_2)$ contains no edges, and $F_2$ is strongly irreducible. A symmetric argument shows that $F_1$ is strongly irreducible.
\end{proof}
For the proof of the next Lemma, we will need to define a partial ordering on embedded surfaces in a 3-manifold. This ordering will play a more crucial role in the next section.
\begin{dfn}
\label{d:order}
For any surface, $F$, let $c(F)=\sum \limits _n (2-\chi(F^n))^2$, where $\{F^n\}$ are the components of $F$. If $F_1$ and $F_2$ denote compact, embedded surfaces in a 3-manifold, $M$,
then we say $F_1 < F_2$ if $c(F_1) < c(F_2)$.
\end{dfn}
Note that this ordering is defined so that if $F_1$ is obtained from $F_2$ by a compression, then $F_1 < F_2$. In fact, we could have used any complexity we wished (and there are several) which induced this partial ordering. The one used here was introduced to the author by Peter Shalen.
\begin{lem}
\label{l:reducesphere}
Suppose $F$ is a Heegaard splitting of an irreducible 3-manifold, $M$, which does not contain any closed incompressible surfaces. If $D_0$ and $E_0$ are red and blue disks such that $D_0 \cap E_0=\emptyset$, then there is a reducing sphere for $F$ which corresponds to an edge of $\Gamma (F)$ in the same component as $D_0-E_0$.
\end{lem}
\begin{proof}
The proof is a restatement of Casson and Gordon's proof from \cite{cg:87}, which says there exists some reducing sphere for $F$. By being careful, we can guarantee that this reducing sphere corresponds to an edge of $\Gamma (F)$ in the same component as $D_0-E_0$.
Choose non-empty collections of red and blue disks, $\bf D$ and $\bf E$, subject to the following constraints:
\begin{enumerate}
\item For all $D \in \bf D$ and $E \in \bf E$, $D \cap E=\emptyset$.
\item For all $D \in \bf D$ and $E \in \bf E$, $D - E$ is in the same component of $\Gamma (F)$ as $D_0-E_0$.
\item The surface, $F_0$, obtained from $F$ by compressing along all disks of $\bf D$ and $\bf E$ is minimal (in the sense of Definition \ref{d:order}), with respect to the above two constraints.
\end{enumerate}
Note that the existence of $D_0$ and $E_0$ is what guarantees that we may find such collections, $\bf D$ and $\bf E$, which are non-empty.
We claim that each component of the surface, $F_0$, is a 2-sphere. First, let $F_+$ be the surface obtained from $F$ by compressing along all the disks of $\bf D$, and let $F_-$ be the surface obtained by compressing along all disks of $\bf E$. Let $M_+$ be the closure of the component of $M-F_0$ which contains $F_+$, and let $M_-$ be the closure of the other component. Notice that $F_+$ and $F_-$ are Heegaard splittings of $M_+$ and $M_-$.
If some component of $F_0$ is not a 2-sphere, then it is compressible into $M_+$ or $M_-$ (since $M$ contains no closed incompressible surfaces). Suppose the former. Let $W$ denote the closure of the component of $M_+-F_+$ which contains $F_0$, and let $W'$ denote the other component. By the Lemma of Haken (see, for example, \cite{cg:87}), there is a complete collection, $\bf E'$, of compressing disks for $F_+$ in $W$, and a compressing disk, $\hat D$, for $F_0$ in $M_+$, such that $\hat D \cap F_+$ is a single loop, and $\hat D \cap E'=\emptyset$ for all $E' \in \bf E'$. Notice that $\hat D$ contains a compressing disk, $\hat D'$, for $F_+$ in $W'$. But since $F_+$ is obtained from $F$ by compression along $\bf D$, we can consider $\hat D'$ and $\bf E'$ to be compressing disks for $F$, which are disjoint from all $D \in \bf D$.
Now, we replace the original collections, $\bf D$ and $\bf E$, with the collections ${\bf D'}= {\bf D} \cup \hat D'$ and $\bf E'$. By construction, for any $D' \in \bf D'$ and $E' \in \bf E'$, $D' \cap E'=\emptyset$. For any $E' \in \bf E'$ and $D \in \bf D$ (and hence in $\bf D'$), $D-E'$ is an edge in $\Gamma (F)$ which is in the same component as $D-E$, for any $E \in \bf E$, and hence in the same component as $D_0-E_0$. Furthermore, compressing $F$ along all disks of $\bf D'$ and $\bf E'$ yields a surface that can be obtained from $F_0$ by compressing along $\hat D'$. But this surface is then smaller than $F_0$, contradicting our minimality assumption. We conclude that $F_0$ is a collection of 2-spheres.
The remainder of the proof follows that of Rubinstein from \cite{rubinstein:96}. Notice that $F_0$ consists of a subsurface, $P$, of $F$, together with two copies of each disk in $\bf D$ and $\bf E$. These disks are colored red and blue, so we can picture $F_0$ as a collections of spheres with a bunch of red and blue subdisks. Some sphere in this collection, $F_0'$, must have both red and blue subdisks, because otherwise $F$ would not be connected. Let $\gamma$ be a loop on $F_0'$ which separates the red disks from the blue disks. Note that $\gamma \subset P \subset F$. Let $R$ be the component of $F'_0-\gamma$ which contains the red disks, and let $B$ be the other component. Then we can push $P \cap R$ slightly into the red side, and $P \cap B$ into the blue side. This turns $F'_0$ into a sphere that intersects $F$ in the single essential simple closed curve, $\gamma$. Furthermore, $R \cap E=\emptyset$ for all $E \in \bf E$, so the edge $R-B$ is in the same component of $\Gamma (F)$ as $D_0-E_0$.
\end{proof}
\begin{lem}
\label{l:stab}
Suppose $F$ is a Heegaard splitting of an irreducible 3-manifold, $M$, which does not contain any closed incompressible surfaces. For each edge, $D-E$ of $\Gamma (F)$, there is some edge, $D'-E'$, in the same component as $D-E$, such that $|D' \cap E'|=1$.
\end{lem}
\begin{proof}
Since $D-E$ is an edge of $\Gamma (F)$, either $|D \cap E|=1$, or $D \cap E=\emptyset$. In the former case there is nothing to prove. In the latter case, we may apply Lemma \ref{l:reducesphere} to obtain an edge, $D''-E''$, in the same component as $D-E$, such that $D'' \cup E''$ is a reducing sphere for $F$. By the irreducibility of $M$, $D'' \cup E''$ bounds a ball, $B$. By \cite{waldhausen:68}, we know that $F \cap B$ is standard. So inside $B$ we can find a pair of disks on opposite sides, $D'$ and $E'$, such that $|D' \cap E'|=1$. But then $D' \cap E''=\emptyset$, so the chain $D''-E''-D'-E'$ implies $D'-E'$ is in the same component of $\Gamma (F)$ as $D-E$.
\end{proof}
\begin{thm}
\label{t:nocrit}
Suppose $M$ is an irreducible 3-manifold with no closed incompressible surfaces, and at most one Heegaard splitting (up to isotopy) of each genus. Then $M$ does not contain a critical Heegaard surface.
\end{thm}
Examples of such 3-manifolds include $S^3$ and $B^3$ \cite{waldhausen:68}, $L(p,q)$ \cite{bo:83}, \cite{bonahon:83}, and handlebodies \cite{waldhausen:68}.
\begin{proof}
Let $F$ be a Heegaard splitting of $M$, and let $D-E$ and $D'-E'$ denote edges of $\Gamma (F)$. Our goal is to show that these two edges lie in the same component. By Lemma \ref{l:stab}, we may assume that $|D \cap E|=|D' \cap E'|=1$. As $F$ cannot be a torus, the boundary of a neighborhood of $D \cup E$ is a sphere which intersects $F$ in a single essential simple closed curve. This curve divides the sphere into a red disk, $R$, and a blue disk, $B$, such that $\partial R=\partial B$. Similarly, $D' \cup E'$ gives rise to disks, $R'$ and $B'$, such that $\partial R' =\partial B'$.
Now, $\partial R$ separates $F$ into two components, at least one of which, $T$, is a punctured torus. Furthermore, $(F-T) \cup R$ is a Heegaard surface of lower genus. Similarly, $\partial R'$ separates $F$ into two components, at least one of which, $T'$, is a punctured torus, and $(F-T') \cup R'$ is a Heegaard surface of lower genus.
By assumption, $(F-T) \cup R$ is isotopic to $(F-T') \cup R'$. This isotopy can be realized as an isotopy of $M$, which takes $(F-T) \cup R$ to $(F-T') \cup R'$. Furthermore, since $B$ and $B'$ are isotopic to $R$ and $R'$, we may assume that the isotopy of $M$ takes $B$ to $B'$, as long as $B$ and $B'$ do not end up on opposite sides. Since $T$ and $T'$ lie inside the balls bounded by $R \cup B$ and $R' \cup B'$ (and in fact, $T \cup R$ and $T' \cup R'$ are Heegaard splittings for these balls), we can also arrange the isotopy so that it takes $T$ to $T'$. What we have now produced is an isotopy of $M$, taking $F$ to $F$, and $R$ to $R'$. This implies the following chain:
\[E-D-B-R \sim R'-B'-D'-E'\]
If, on the other hand, the isotopy of $M$ which takes $(F-T) \cup R$ to $(F-T') \cup R'$ ``switches sides", so that $B$ and $B'$ end up on opposite sides, then there must be an isotopy which takes $(F-T) \cup R$ to $(F-T') \cup B'$, and $(F-T) \cup B$ to $(F-T') \cup R'$. Hence, we conclude that there is an isotopy of $M$ taking $F$ to $F$, and $R$ to $B'$. This implies the chain:
\[E-D-B-R \sim B'-R'-D'-E'\]
\end{proof}
\section{Incompressible Surfaces}
\label{s:incomp}
Our goal in this section is to examine the interplay between critical surfaces and incompressible surfaces. Our main result is the following:
\begin{thm}
\label{t:essential_intersection}
Let $M$ be a 3-manifold with critical surface, $F$, and incompressible surface, $S$. Then there is an incompressible surface, $S'$, homeomorphic to $S$, such that every loop of $F \cap S'$ is essential on both surfaces. Furthermore, if $M$ is irreducible, then there is such an $S'$ which is isotopic to $S$.
\end{thm}
\begin{proof}
The proof is in several stages. First, we construct a map, $\Phi$, from $S \times D^2$ into $M$. We then use $\Phi$ to break up $D^2$ into regions, and label them in such a way so that if any region remains unlabelled, then the conclusion of the theorem follows. Finally, we construct a map from $D^2$ to a labelled 2-complex, $\Pi$, which has non-trivial first homology, and show that if there is no unlabelled region, then the induced map on homology is nontrivial, a contradiction. This general strategy is somewhat similar to that used in \cite{rs:96}, although the details have very little in common.
\noindent \underline{Stage 1:} {\it Constructing the map, $\Phi :S \times D^2 \rightarrow M$}.
Let $D_0-E_0$ and $D_1-E_1$ denote edges in different components of $\Gamma (F)$, such that $D_0 \cap E_0=D_1 \cap E_1=\emptyset$. Such edges are guaranteed to exist in every component. Indeed, if $D-E$ is an edge where $|\partial D \cap \partial E|=1$, then the boundary of a neighborhood of $D \cup E$ is a sphere, which intersects $F$ in a single loop. This loop cuts the sphere into disks, $D'$ and $E'$, which can be isotoped to be disjoint. Furthermore, we have the chain $D'-E'-D-E$ in $\Gamma (F)$, which shows that $D'-E'$ is in the same component as $D-E$.
We now produce a sequence of compressing disks for $F$, $\{D_{\frac {i}{n}}\}_{i=1,...,n-1}$, such that $D_{\frac {i}{n}} \cap D_{\frac {i+1}{n}} =\emptyset$, for all $i$ between 0 and $n-1$. Begin by isotoping $D_0$ and $D_1$ so that $|D_0 \cap D_1|$ is minimal. If $D_0 \cap D_1=\emptyset$, then we are done. If not, then let $\gamma$ be some arc of intersection which is outermost on $D_1$. So, $\gamma$ cuts off a subdisk, $D_1'$, of $D_1$, whose interior is disjoint from $D_0$. $\gamma$ also cuts $D_0$ into two subdisks. Choose one to be $D_0'$. Since $|D_0 \cap D_1|$ is assumed to be minimal, the disk $D^1=D_1' \cup D_0'$ must be a compressing disk for $F$. Furthermore, $D^1$ can be isotoped so that is misses $D_0$, and so that $|D^1 \cap D_1|$ is strictly less than $|D_0 \cap D_1|$. Continue now in the same fashion, using $D^1$ and $D_1$ to construct $D^2$, where $D^1 \cap D^2 =\emptyset$, and $|D^2 \cap D_1|<|D^1 \cap D_1|$. Eventually, we come to a disk, $D^{n-1}$, where $D^{n-1} \cap D_1=\emptyset$. Now, for all $i$ between 1 and $n-1$, let $D_{\frac {i}{n}}=D^i$, and we are done.
We can apply a symmetric argument to produce a sequence of disks, $\{E_{\frac {i}{m}}\}_{i=1,...,m-1}$, such that $E_{\frac {i}{m}} \cap E_{\frac {i+1}{m}} =\emptyset$, for all $i$ between 0 and $m-1$.
Our goal now is to use all of these disks to define $\Phi$. The definition we give may seem overly technical, but there are certain features that are worth pointing out before-hand. If $\Phi$ is any map from $S \times D^2$ into $M$, then let $\Phi _x (s)=\Phi (s,x)$. If $x$ is the center of $D^2$, we would like $\Phi _x$ to be the identity on $S$. That is, at the center of $D^2$, we do nothing to $S$. For $x$ near the boundary of $D^2$, we would like $\Phi _x(S)$ to be disjoint from at least one of the disks, $\{D_{\frac {i}{n}}\}$, or $\{E_{\frac {i}{m}}\}$. In particular, there should be two points, $\theta _0$ and $\theta _1$ on $\partial D^2$ such that for all $x \in D$ near $\theta _i$, $\Phi _x (S)$ is disjoint from both $D_i$ and $E_i$, for $i=0,1$. In fact, any map $\Phi$ which fits this description will work for the remainder of our proof. We give an explicit construction of one here for concreteness.
Let $\{U_i\}_{i=0,...,n}$ and $\{V_i\}_{i=0,...,m}$ denote neighborhoods of the disks, $\{D_{\frac {i}{n}}\}$ and $\{E_{\frac {i}{m}}\}$, such that
\begin{enumerate}
\item $U_i \cap U_{i+1}=\emptyset$, for $0 \le i <n$
\item $V_i \cap V_{i+1}=\emptyset$, for $0 \le i <m$
\item $U_0 \cap V_0=\emptyset$
\item $U_n \cap V_m=\emptyset$
\end{enumerate}
For each $i$ between 0 and $n$, let $\gamma ^i :M \times I \rightarrow M$ be an isotopy, such that $\gamma ^i _0 (x)=x$ for all $x \in M$, $\gamma ^i _t (x)=x$ for all $t$, and all $x$ outside of $U_i$, and $\gamma ^i_1(S) \cap D_{\frac {i}{n}} =\emptyset$ (where $\gamma ^i _t(x)$ is short-hand for $\gamma ^i (x,t)$). In other words, $\gamma ^i$ is an isotopy which pushes $S$ off of $D_{\frac {i}{n}}$, inside $U_i$. Similarly, for each $i$ between 0 and $m$, let $\delta ^i$ be an isotopy which pushes $S$ off of $E_{\frac {i}{m}}$, inside $V_i$.
Now choose $n+m+2$ points on $S^1$, which are labelled and cyclically ordered as follows: $x_0,...,x_n,y_m,...,y_0$. Let $\{f_i\}_{i=0,...,n}$ and $\{g_i\}_{i=0,...,m}$ be sets of continuous functions from $S^1$ to $I$, such that
\begin{enumerate}
\item $supp(f_i)=(x_{i-1},x_{i+1})$, for $0<i<n$,
\item $supp(g_i)=(y_{i+1},y_{i-1})$, for $0<i<m$,
\item $supp(f_0)=(y_0,x_1)$, $supp(f_n)=(x_{n-1},y_m)$,
\item $supp(g_0)=(y_1,x_0)$, $supp(g_m)=(x_n,y_{m-1})$,
\item for each $\theta \in S^1$, there is an $i$ such that $f_i(\theta)=1$ or $g_i(\theta)=1$,
\item there are points, $\theta _0 \in (y_0,x_0)$ and $\theta _1 \in (x_n,y_m)$, such that $f_0(\theta _0)=g_0(\theta _0)=f_n(\theta _1)=g_m(\theta _1)=1$.
\end{enumerate}
The existence of such functions is easily verified by the reader.
Finally, we define $\Phi :S \times D^2 \rightarrow M$. For each $x \in S$ and $(r,\theta)\in D^2$, let
\[\Phi _{(r,\theta)}(x)=\prod \limits _{i=0} ^{n} \gamma ^i _{rf_i(\theta)}(x) \prod \limits _{i=0} ^{m} \delta ^i _{rg_i(\theta)}(x)\]
Note that it is a consequence of how we defined the functions $\{f_i\}$ and $\{g_i\}$ that for each value of $\theta$ there are at most two functions that are not the identity. Since these two functions will have support on disjoint subsets of $M$, the order of the above product does not matter. For example, suppose $n=7$. Let $\theta '$ be some value of $S^1$ between $x_3$ and $x_4$. Then $f_3$ and $f_4$ are the only functions that can be non-zero at $\theta '$. Hence, the above product simplifies to:
\[\Phi _{(r,\theta')}(x)=\gamma ^3 _{rf_3(\theta ')}(x) \cdot \gamma ^4 _{rf_4(\theta ')}(x)\]
But $\gamma ^3$ is the identity outside $U_3$, $\gamma ^4$ is the identity outside $U_4$, and $U_3 \cap U_4 =\emptyset$. Hence, $\gamma ^3$ and $\gamma ^4$ commute.
We now perturb $\Phi$ slightly so that it is in general position with respect to $S$, and again denote the new function as $\Phi$. Consider the set $\Sigma=\{x \in D^2 | \Phi _x (S)$ is not transverse to $F\}$. If $\Phi$ is in general position with respect to $S$, then Cerf theory (see \cite{cerf:68}) tells us that $\Sigma$ is homeomorphic to a graph, and the maximum valence of each vertex of this graph is 4. We will use these facts later.
\noindent \underline{Stage 2:} {\it Labelling $D^2$}.
Let $C_0$ and $C_1$ denote sets of components of $\Gamma (F)$ such that
\begin{enumerate}
\item any isolated vertex is in both sets,
\item a component which is not an isolated vertex (i.e. it contains at least one edge) is in exactly one of the sets, and
\item the component containing the edge $D_i-E_i$ is an element of $C_i$, for $i=0,1$.
\end{enumerate}
\noindent {\bf Note:} The salient feature of these sets is that if $v$ is a vertex of $\Gamma (F)$ in a component, $A \in C_0$, and $v'$ is some other vertex, in a component, $B \in C_1$, then $A \ne B$. Hence, $v-v'$ cannot be an edge of $\Gamma (F)$. We may conclude then that if $D$ is a red disk representing $v$, and $E$ is a blue disk representing $v'$, then $D \cap E \ne \emptyset$. We will make extensive use of these facts.
As at the end of Stage 1, let $\Sigma=\{x \in D^2 | \Phi _x (S)$ is not transverse to $F\}$. A {\it region} of $D^2$ is a component of $D^2 - \Sigma$. Let $x$ be any point in the interior of some region. Label this region with a ``$D_i$" (``$E_i$") if there is a disk, $\Delta$, in $\Phi _x (S)$ such that $\partial \Delta$ is an essential loop on $F$, $int (\Delta) \cap F$ is a (possibly empty) union of inessential loops on $F$, and $\Delta$ is isotopic rel $\partial$ to a red (blue) compressing disk for $F$ which corresponds to a vertex of $\Gamma (F)$ in $C_i$. It is possible that some regions will have more than one label. For example, if there is a red disk in $\Phi _x (S)$ which corresponds to an isolated vertex of $\Gamma (F)$, then it will have both of the labels ``$D_0$" and ``$D_1$".
\noindent {\bf Note:} We will be consistent about using quotation marks to denote labels. Hence, the label ``$D_0$", for example, should not be confused with the disk, $D_0$.
The point of our labelling is that if there is any compressing disk for $F$ contained in $\Phi _x (S)$, then the region containing $x$ will be assigned a label. Hence, the existence of an unlabelled region would imply that all components of intersection of $\Phi _x (S)$ and $F$ are either essential on both surfaces, or inessential on both. From there it is easy to prove the Theorem. We now proceed under the assumption that there are no unlabelled regions.
\begin{clm}
\label{c:same}
No region can have both of the labels ``$D_i$" and ``$E_{1-i}$".
\end{clm}
\begin{proof}
Let $x$ be a point in the interior of a region with the labels ``$D_0$" and ``$E_1$". Then there is a red disk, $D$, and a blue disk, $E$, in $\Phi _x (S)$. Furthermore, according to our labelling, the equivalence class of $D$ represents a vertex of $\Gamma (F)$ in a component $A \in C_0$. Similarly, $[E] \in B$, for some $B \in C_1$. But since $D$ and $E$ are both subsets of $\Phi _x (S)$ they must be disjoint, a contradiction (see the above note after the definitions of $C_0$ and $C_1$).
\end{proof}
\begin{clm}
\label{c:adjacent}
If a region has the label ``$D_i$", then no adjacent region can have the label ``$E_{1-i}$".
\end{clm}
{\bf Note.} The proof of this claim is essentially the same as that of Lemma 4.4 of \cite{gabai:87}.
\begin{proof}
Suppose the region, $\mathcal R_0$, has the label ``$D_0$", and an adjacent region, $\mathcal R_1$, has the label ``$E_1$". For each $x \in \mathcal R_0$, there is a red disk, $D$, contained in $\Phi _x (S)$. Similarly, for each $x \in \mathcal R_1$, there is a blue disk, $E$, contained in $\Phi _x (S)$.
Let $x_i$ be some point in the interior of $\mathcal R_i$. Let $p:I \rightarrow D^2$ be an embedded path connecting $x_0$ to $x_1$, which does not wander into any region other than $\mathcal R_0$ or $\mathcal R_1$. As $t$ increases from $0$ to $1$, we see a moment, $t_*$, when $\Phi _{p(t_*)}(S)$ does not meet $F$ transversely (i.e. $t_*=p^{-1}(\partial \mathcal R_1)$). At $t_*$ we simultaneously see the disappearance of $D$, and the appearance of $E$. (Otherwise, $\mathcal R_1$ would have both the labels ``$D_0$" and ``$E_1$", which we ruled out in Claim \ref{c:same}).
Let $\gamma _t$ be the components of $\Phi _{p(t)}(S) \cap F$, whose intersection with $D$ is nontrivial if $t \le t_*$, and whose intersection with $E$ is nontrivial for $t>t_*$. As $t$ approaches $t_*$ from either side, we see a tangency occur for $\gamma _t$ (either with itself, or with some other component of $\Phi _{p(t)}(S) \cap F$). Since generically only one such tangency occurs at $t_*$, we see that $\lim \limits _{t \rightarrow t_*^-} \gamma_t \cap \lim \limits _{t \rightarrow t_*^+} \gamma_t \ne \emptyset$, as in Figure \ref{f:gamma}.
\begin{figure}
\caption{$\gamma _t$, before and after $t_*$.}
\label{f:gamma}
\end{figure}
Since $D$ and $E$ are on opposite sides of $F$, we see from Figure \ref{f:gamma} that they can be made disjoint (since $F$ is orientable), and hence, $D - E$ is an edge of $\Gamma (F)$. As in the proof of Claim \ref{c:same}, this contradicts our labelling.
\end{proof}
\noindent \underline{Stage 3:} {\it The 2-complex, $\Pi$, and a map from $D^2$ to $\Pi$}.
Let $\Pi$ be the labelled 2-complex depicted in Figure \ref{f:pi}. Let $\Sigma '$ be the dual graph of $\Sigma$. Map each vertex of $\Sigma '$ to the point of $\Pi$ with the same label(s) as the region of $D^2$ in which it sits. Claim \ref{c:same} assures that this map is well defined on the vertices of $\Sigma '$.
\begin{figure}
\caption{The 2-complex, $\Pi$.}
\label{f:pi}
\end{figure}
Similarly, map each edge of $\Sigma '$ to the 1-simplex of $\Pi$ whose endpoints are labelled the same. Claim \ref{c:adjacent} guarantees that this, too, is well defined.
We now claim that the map to $\Pi$ extends to all of $D^2$. Note that the maximum valence of a vertex of $\Sigma$ is four. Hence, the boundary of each region in the complement of $\Sigma '$ gets mapped to a 1-cycle with at most four vertices in $\Pi$. Inspection of Figure \ref{f:pi} shows that there is only one such cycle which is not null homologous. Hence, to show the map extends, it suffices to prove the following:
\begin{clm}
All four labels cannot occur around a vertex of $\Sigma$.
\end{clm}
\begin{proof}
The proof is somewhat similar to that of Claim \ref{c:adjacent}. Let $x_*$ denote a vertex of $\Sigma$, around which we see all four labels. Claims \ref{c:same} and \ref{c:adjacent} imply that each region around such a vertex must have a unique label. If $x$ is any point in a region labelled ``$D_i$" (for $i=0,1$), then there is a red disk, $D'_i$, contained in $\Phi _x (S)$. Similarly, if $x$ is in a region labelled ``$E_i$", then there is a blue disk, $E'_i$, contained in $\Phi _x (S)$.
Let $\gamma _x$ denote the components of $\Phi _x (S) \cap F$ whose intersection with $D'_i$ is nontrivial for $x$ in a region with the label ``$D_i$", and whose intersection with $E'_i$ is nontrivial for $x$ in a region with the label ``$E_i$". Let $\gamma _{x_*}$ denote the union of all possible limits of $\gamma _x$, as $x$ approaches $x_*$ from different directions. Then $\gamma _{x_*}$ is some graph with exactly 2 vertices, each of valence four. If $\mathcal R$ is a region which meets $x_*$, and $x \in \mathcal R$, then $\gamma _x$ is one component of the 1-manifold obtained from $\gamma _{x_*}$ by resolving both vertices in some way. (To resolve a vertex is to replace a part of $\gamma _{x_*}$ that looks like ``$\times$" by a part that looks like ``$)($".)
Since there are exactly four ways to resolve two vertices, and there are four regions with different labels around $x_*$, we see all four resolutions of $\gamma _{x_*}$ appear around $x_*$. However, the orientability of $F$ insures that {\it some} resolution of $\gamma _{x_*}$ always produces a 1-manifold, $\alpha$, that can be made disjoint from all components of all other resolutions. (In fact, one can show that there are always two resolutions of $\gamma _{x_*}$ which have this property, but this is irrelevant for us). Some component of $\alpha$ is the boundary of one of the disks, $D'_0$, $D'_1$, $E'_0$, or $E'_1$. Suppose $\partial D'_0 \subset \alpha$. Then $\partial D'_0$ can be made disjoint from $\partial E'_1$, since $\partial E'_1$ is a component of some other resolution of $\gamma _{x_*}$. This now contradicts our labelling.
\end{proof}
\noindent \underline{Stage 4:} {\it Finding an unlabelled subregion}.
To obtain a contradiction, it suffices to prove that the map from $D^2$ to $\Pi$, when restricted to $\partial D^2$, induces a non-trivial map on homology. To this end, we must examine the possibilities for the labels of the regions adjacent to $\partial D^2$.
Recall from Stage 1 the points, $\{x_i\}$ and $\{y_i\}$ on $S^1=\partial D^2$, and the disks, $\{D _{\frac{i}{n}}\}$ and $\{E _{\frac{i}{m}}\}$.
\begin{clm}
\label{c:boundary1}
If $\mathcal R$ is a region such that some point, $p$, of $\partial \mathcal R$ lies between $x_i$ and $x_{i+1}$ on $\partial D^2$ (for some $i$), then $\mathcal R$ cannot have both of the labels ``$E_0$" and ``$E_1$".
\end{clm}
\begin{proof}
As discussed in Stage 1, at $p$ the definition of $\Phi$ simplifies to
\[\Phi _p(x)=\gamma ^i _{f_i(p)}(x) \cdot \gamma ^{i+1} _{f_{i+1}(p)}(x)\]
By our construction of the functions, $\{f_i\}$, we know that either $f_i (p)=1$ or $f_{i+1}(p)=1$. Suppose the former is true. Then $\Phi$ further simplifies to
\[\Phi _p(x)=\gamma ^i _1(x) \cdot \gamma ^{i+1} _{f_{i+1}(p)}(x)\]
By construction, $\gamma ^i _1(S) \cap D_{\frac {i}{n}}=\emptyset$, and hence $\Phi _p (S) \cap D_{\frac {i}{n}}=\emptyset$. By our definition of a {\it region}, we conclude that for every point, $p \in \mathcal R$, $\Phi _p (S) \cap D_{\frac {i}{n}}=\emptyset$.
Now, if $\mathcal R$ has both of the labels ``$E_0$" and ``$E_1$", then either
\begin{enumerate}
\item there are blue disks, $E$ and $E'$, in $\Phi _p (S)$, which correspond to vertices in different components of $\Gamma (F)$, or
\item there is a blue disk, $E \subset \Phi _p (S)$, which corresponds to an isolated vertex of $\Gamma (F)$.
\end{enumerate}
In either case, there is a blue disk (say $E$) which corresponds a vertex of $\Gamma (F)$ which is in a component other than the one that contains $D_{\frac {i}{n}}$. However, $\Phi _p (S) \cap D_{\frac {i}{n}}=\emptyset$ and $E \subset \Phi _p (S)$ implies that $D_{\frac {i}{n}}-E$ is an edge of $\Gamma (F)$, a contradiction.
\end{proof}
By a symmetric argument, we can show that a region adjacent to a point of $\partial D^2$ between $y_0$ and $y_m$ cannot have both of the labels ``$D_0$" and ``$D_1$".
\begin{clm}
\label{c:boundary2}
If $\mathcal R_0$ and $\mathcal R_1$ are regions adjacent to $\partial D^2$, with labels ``$E_0$" and ``$E_1$", respectively, then no point of $\mathcal R_0 \cap \mathcal R_1$ can lie between $x_i$ and $x_{i+1}$ on $\partial D^2$ (for any $i$).
\end{clm}
\begin{proof}
If the Claim is not true, then some point, $p$, of $\mathcal R_0 \cap \mathcal R_1$ lies between $x_i$ and $x_{i+1}$ on $\partial D^2$, for some $i$. As in the proof of Claim \ref{c:boundary1}, we may conclude that $\Phi _p (S)$ is disjoint from the red compressing disk, $D_{\frac {i}{n}}$. However, this implies that for all $x \in D^2$ near $p$, $\Phi _x (S) \cap D_{\frac {i}{n}}=\emptyset$. In particular, there are points, $p_j \in \mathcal R_j$ (for $j=0,1$), such that $\Phi _{p_j} (S) \cap D_{\frac {i}{n}}=\emptyset$.
Since $\mathcal R_j$ has only the label ``$E_j"$ (for $j=0,1$), there are blue disks, $E'_j \subset \Phi _{p_j}(S)$, which are in different components of $\Gamma (F)$. But $E'_0 \cap D_{\frac {i}{n}}=E'_1 \cap D_{\frac {i}{n}}=\emptyset$ implies that $E'_0 - D_{\frac {i}{n}} - E'_1$ is a chain of edges in $\Gamma (F)$, a contradiction.
\end{proof}
Again by a symmetric argument, we can show that there cannot be a pair of regions with the labels ``$D_0$" and ``$D_1$", whose boundaries both contain a point of $\partial D^2$ between $y_0$ and $y_m$.
Now, recall from our definition of the functions $\{f_i\}$ and $\{g_i\}$ that there is a point, $\theta _0 \in (y_0,x_0)$, such that $f_0(\theta _0)=g_0(\theta _0)=1$.
\begin{clm}
\label{c:boundary3}
If $\mathcal R_0$ is the region whose boundary contains $\theta _0$, then $\mathcal R_0$ cannot have either of the labels ``$D_1$" or ``$E_1$".
\end{clm}
\begin{proof}
At $\theta _0$, the definition of $\Phi$ simplifies to
\[\Phi _{\theta _0}(x)=\gamma ^0 _1(x) \cdot \delta ^0 _1(x)\]
Hence, $\Phi _{\theta _0}(S)$ is disjoint from both $D_0$ and $E_0$. If $\mathcal R_0$ had the label ``$D_1$", then there would be some red disk, $D \subset \Phi _{\theta _0}(S)$, which corresponds to a vertex of $\Gamma (F)$ in some component, $B \in C_1$. Any such disk must intersect $E_0$, since $E_0$ corresponds to a vertex of $\Gamma (F)$ in some component, $A \in C_0$. However, $D \cap E_0 =\emptyset$, a contradiction. A symmetric argument rules out the possibility of the label ``$E_1$" for $\mathcal R_0$.
\end{proof}
Similarly, we can show that if $\mathcal R_1$ is the region whose boundary contains $\theta _1$, then $\mathcal R_1$ cannot have either of the labels ``$D_0$" or ``$E_0$".
\begin{clm}
The map from $D^2$ to $\Pi$, when restricted to $\partial D^2$, induces a non-trivial map on homology.
\end{clm}
\begin{proof}
Claims \ref{c:boundary1} and \ref{c:boundary2} imply that no point of the arc of $\partial D^2$ between $x_0$ and $x_n$ gets mapped to the lower triangle of $\Pi$. Similarly, we can show that no point of the arc of $\partial D^2$ between $y_0$ and $y_m$ gets mapped to the upper triangle. Finally, Claim \ref{c:boundary3} implies that the region, $\mathcal R_0$, whose boundary contains the point, $\theta _0$, gets mapped to the left triangle of $\Pi$, while the region, $\mathcal R_1$, whose boundary contains the point, $\theta _1$, gets mapped to the right triangle.
All of this directly implies the Claim, as illustrated in Figure \ref{f:rpi}.
\begin{figure}
\caption{The map from $\partial D^2$ to $\Pi$ is non-trivial on homology.}
\label{f:rpi}
\end{figure}
\end{proof}
We now complete the proof of Theorem \ref{t:essential_intersection}. Since $H_1(\Pi) \ne 0$, the map to $\Pi$ cannot extend, so there must be a region with no labels. Now, let $x$ be a point in the interior of an unlabelled region. Then there are no compressions in $\Phi _x(S)$ for $F$. Hence, any loop of $F \cap \Phi _x(S)$ is either essential on both surfaces, or inessential on both. It is now a routine matter to use an innermost disk argument to show that the inessential loops can be removed by a sequence of disk swaps, or by a further isotopy of $S$, if $M$ is irreducible.
\end{proof}
We now present some immediate corollaries to Theorem \ref{t:essential_intersection}.
\begin{cor}
If $M_1 \# M_2$ contains a critical surface, then either $M_1$ or $M_2$ contains a critical surface.
\end{cor}
\begin{cor}
A reducible 3-manifold does not admit a critical Heegaard splitting.
\end{cor}
\begin{cor}
\label{c:essential_boundary}
Suppose $M$ is a 3-manifold which admits a critical Heegaard splitting, such that $\partial M \ne \emptyset$. Then $\partial M$ is essential in $M$.
\end{cor}
\begin{proof}
If $\partial M \cong S^2$, then either $\partial M$ is essential, or $M \cong B^3$. However, Theorem \ref{t:nocrit} implies that $B^3$ does not contain a critical Heegaard surface.
If $\partial M$ contains a component of non-zero genus, then we claim it is incompressible. If not, then Theorem \ref{t:essential_intersection} implies that there is a compressing disk for $\partial M$ which misses the critical Heegaard splitting. However, this would imply that there is a compression body, $W$, with $\partial _- W$ compressible in $W$, a contradiction.
\end{proof}
\section{Generalized Heegaard Splittings}
In this section we cover the background material that we will need for the proof of our main result, Theorem \ref{t:common_stab}.
\begin{dfn}
(Scharlemann- Thompson \cite{st:94})
A {\it Generalized Heegaard Splitting} (GHS) of a 3-manifold, $M$, is a sequence of closed, embedded, pairwise disjoint surfaces, $\{F_i\}_{i=0}^{2n}$, such that for each $i$ between 1 and $n$, $F_{2i-1}$ is a non-trivial Heegaard splitting, or a union of Heegaard splittings (at least one of which is non-trivial), of the submanifold of $M$ co-bounded by $F_{2i-2}$ and $F_{2i}$, and such that $\partial M=F_0 \amalg F_{2n}$.
\end{dfn}
{\it Notes:} (1) If $M$ is closed, then $F_0=F_{2n}=\emptyset$. (2) We allow $F_{2i-1}$ to be a union of Heegaard splittings only when the submanifold of $M$ co-bounded by $F_{2i-2}$ and $F_{2i}$ is disconnected.
We will sometimes depict a GHS schematically as in Figure \ref{f:ghs}. Often when we do this we will also need to represent compressing disks for $F_{2i-2}$, for various values of $i$. Examples of this are the curved arcs depicted in the figure.
\begin{figure}
\caption{Schematic depicting a Generalized Heegaard Splitting.}
\label{f:ghs}
\end{figure}
Recall the ordering of embedded surfaces given in Definition \ref{d:order}. We now use this to define a partial ordering on GHSs.
\begin{dfn}
Let $F^1=\{F^1_i\}$ and $F^2=\{F^2_j\}$ be two GHSs of a 3-manifold, $M$. We say $F^1<F^2$ if $\{F^1_i | i \ {\rm odd}\} <\{F^2_j | j \ \rm odd\}$, where each set is put in non-increasing order, and then the comparison is made lexicographically.
\end{dfn}
We now define two ways to get from one GHS to a smaller one. Suppose $\{F_i\}$ is a GHS. Suppose further that $D-E$ is an edge in $\Gamma (F_{2i-1})$, where $D$ and $E$ are disks in the submanifold of $M$ co-bounded by $F_{2i-2}$ and $F_{2i}$, for some $i$. Let $F_D$ denote the surface obtained from $F_{2i-1}$ by compression along $D$, and $F_E$ denote the surface obtained from $F_{2i-1}$ by compression along $E$. If $D \cap E =\emptyset$, then let $F_{DE}$ denote the surface obtained from $F_{2i-1}$ by compression along both $D$ and $E$. There are now two cases, with several subcases:
\begin{enumerate}
\item $D \cap E =\emptyset$
\begin{enumerate}
\item $F_D \ne F_{2i-2}$, $F_E \ne F_{2i}$.
\\ Remove $F_{2i-1}$ from $\{F_i\}$. In it's place, insert $\{F_D, F_{DE}, F_E\}$ and reindex.
\item $F_D = F_{2i-2}$, $F_E \ne F_{2i}$.
\\ Replace $\{F_{2i-2}, F_{2i-1}\}$ with $\{F_{DE},F_E\}$.
\item $F_D \ne F_{2i-2}$, $F_E = F_{2i}$.
\\ Replace $\{F_{2i-1}, F_{2i}\}$ with $\{F_D,F_{DE}\}$.
\item $F_D = F_{2i-2}$, $F_E = F_{2i}$.
\\ Replace $\{F_{2i-2}, F_{2i-1}, F_{2i}\}$ with $F_{DE}$ and reindex.
\end{enumerate}
\item $|D \cap E|=1$ (In this case $F_D$ and $F_E$ co-bound a product region of $M$)
\begin{enumerate}
\item $F_D \ne F_{2i-2}$, $F_E \ne F_{2i}$.
\\ Replace $F_{2i-1}$ in $\{F_i\}$ with $F_D$ or $F_E$.
\item $F_D = F_{2i-2}$, $F_E \ne F_{2i}$.
\\ Remove $\{F_{2i-2}, F_{2i-1}\}$ and reindex.
\item $F_D \ne F_{2i-2}$, $F_E = F_{2i}$.
\\ Remove $\{F_{2i-1}, F_{2i}\}$ and reindex.
\item $F_D = F_{2i-2}$, $F_E = F_{2i}$.
\\ Remove $\{F_{2i-1}, F_{2i}\}$ {\it or} $\{F_{2i-2}, F_{2i-1}\}$ and reindex.
\end{enumerate}
\end{enumerate}
If $\partial D \cup \partial E$ bounds an annulus on $F_{2i-1}$, then some element of the sequence of surfaces that we get after one of the above operations contains a 2-sphere component. In this case, we remove the 2-sphere component.
In Case 1 above ($D \cap E =\emptyset$), we say the new sequence was obtained from the old one by the {\it weak reduction}, $D-E$. In Case 2 ($|D \cap E|=1$), we say the new sequence was obtained by a {\it destabilization}. Each of these operations is represented schematically in Figure \ref{f:reddfn}. In either case we leave it as an exercise to show that the new sequence is a GHS, provided $M$ is irreducible. Note that if the GHS, $F^1$, is obtained from the GHS, $F^2$, by weak reduction or destabilization, then $F^1<F^2$.
\begin{figure}
\caption{(a) A destabilization. (b) A weak reduction.}
\label{f:reddfn}
\end{figure}
For readers unfamiliar with Generalized Heegaard Splittings, we pause here for a moment to tie these concepts to more familiar ones. Suppose $M$ is a closed 3-manifold, and $\{F_i\}_{i=0}^{2n}$ is a GHS of $M$. Since $M$ is closed, we must have $F_0=F_{2n}=\emptyset$. If, in addition, $n=1$, then our GHS looks like $\{\emptyset, F_1, \emptyset\}$. By definition, $F_1$ is a Heegaard splitting of $M$. If $\{\emptyset, F'_1, \emptyset\}$ was obtained from $\{\emptyset, F_1, \emptyset\}$ by a destabilization, then the Heegaard splitting, $F_1$, is a stabilization of the Heegaard splitting, $F'_1$.
\section{Minimal Genus Common Stabilizations}
In this section we prove the main result of this paper:
\begin{thm}
\label{t:common_stab}
Suppose $M$ is a 3-manifold whose boundary, if non-empty, is incompressible, and $F$ and $F'$ are distinct strongly irreducible Heegaard splittings of $M$ which induce the same partition of $\partial M$. If the minimal genus common stabilization of $F$ and $F'$ is not critical, then $M$ contains a non-boundary parallel incompressible surface.
\end{thm}
We will actually prove the contrapositive of this Theorem. That is, we show that if $F$ and $F'$ are strongly irreducible Heegaard splittings of a small 3-manifold, $M$, then their minimal genus common stabilization is critical. A {\it small} 3-manifold is one which is either closed and non-Haken, or is one with incompressible boundary, in which every incompressible surface is boundary parallel.
They key technique that will be used in the proof is a careful analysis of the ``best" way one can transform $F$ into $F'$ by a sequence of intermediate GHSs.
\begin{dfn}
A {\it Sequence Of GHSs} (SOG) of a 3-manifold, $M$, is a sequence, $\{F^j\}_{j=1}^n$, such that for each $k$ between 1 and $n-1$, one of the GHSs, $F^k$ or $F^{k+1}$, is obtained from the other by a weak reduction or destabilization.
\end{dfn}
{\it Notation:} We will always use subscripts to denote surfaces, superscripts to denote GHSs, and a boldface font to denote an entire SOG. Hence, $F_i^j$ is the $i$th surface of the $j$th GHS of the SOG, $\bf F$. If $\bf F$ is a SOG of a 3-manifold, $M$, then $M^k_i$ will always denote the submanifold of $M$ cobounded by $F^k_{i-1}$ and $F^k_{i+1}$.
\begin{dfn}
If $\bf F$ is a SOG of $M$, and $k$ is such that $F^{k-1}$ and $F^{k+1}$ are both obtained from $F^k$ by weak reduction or destabilization, then we say the GHS, $F^k$, is {\it maximal} in $\bf F$. Similarly, if $k$ is such that $F^k$ is obtained from both $F^{k-1}$ and $F^{k+1}$ by weak reduction or destabilization, then we say it is {\it minimal}.
\end{dfn}
We now define a partial ordering on SOGs.
\begin{dfn}
Let $\bf F$ and $\bf G$ be two SOGs of some 3-manifold, $M$. Let $MAX(\bf F)$ and $MAX(\bf G)$ denote the sets of maximal GHSs of $\bf F$ and $\bf G$. If we reorder each of these sets in non-increasing order, then we say $\bf F<\bf G$ if $MAX({\bf F})<MAX({\bf G})$, where all comparisons are made lexicographically. If $\bf F$ is minimal among all SOGs of some collection, then we say it is {\it flattened}.
\end{dfn}
This definition is a bit hard to slog through. After all, $MAX(\bf F)$ is a set of sets of surfaces. At every level we make a lexicographic comparison. Unfortunately, there does not seem to be much that can be done to make this any simpler. Perhaps the only thing that will convince the reader that this is the right definition is the proofs of a few Lemmas.
Let us not forget that our eventual goal is to prove Theorem \ref{t:common_stab}. To this end, we suppose for the remainder of the paper that $M$ is a small 3-manifold that contains non-isotopic strongly irreducible Heegaard splittings, $F$ and $F'$. One subtle technical point here is that if $M$ is not closed, then we will assume further that $F$ and $F'$ induce the same partition of $\partial M$. That is, we assume that $\partial M=\partial _1 M \cup \partial _2 M$ (either of which may be empty), and that all components of $\partial _1 M$ are on one side of both $F$ and $F'$, and all components of $\partial _2 M$ are on the other side of $F$ and $F'$.
Among all SOGs of $M$ whose first GHS is $\{\partial _1 M, F, \partial _2 M \}$, and last GHSs is $\{\partial _1 M, F', \partial _2 M \}$, let $\bf F$ denote one which is flattened. We now prove several Lemmas that tell us precisely what $\bf F$ looks like.
\begin{lem}
\label{l:oddstrirr}
If $F^k$ is maximal in $\bf F$, then for every odd value of $i$ except for exactly one, $F^k_i$ is strongly irreducible in $M^k_i$.
\end{lem}
\begin{proof}
Since $F^k$ is maximal in $\bf F$, there is some odd $p$ such that $F^{k-1}$ is obtained from $F^k$ by a weak reduction or destabilization corresponding to an edge, $D-E$, in $\Gamma (F^k_p)$. Similarly, there is an odd $q$ such that $F^{k+1}$ is obtained from $F^k$ by a weak reduction or destabilization corresponding to an edge, $D'-E'$, in $\Gamma (F^k_q)$. We first claim that $p=q$. If not, then we can replace $F^k$ in ${\bf F}$ with the GHS obtained from $F^{k-1}$ by the weak reduction or destabilization, $D'-E'$. Since the new GHS can also be obtained from $F^{k+1}$ by the weak reduction or destabilization, $D-E$, our substitution has defined a new SOG of $M$. In general, this replaces one maximal GHS of ${\bf F}$ with two smaller maximal GHSs, reducing $MAX({\bf F})$, and therefore contradicting our assumption that ${\bf F}$ was flattened.
Now, suppose $F^k_r$ is not strongly irreducible, for some odd $r \ne p$, and let $D^*-E^*$ be an edge in $\Gamma (F^k_r)$. Let $G^-$, $G^0$, and $G^+$ denote the GHSs obtained from $F^{k-1}$, $F^k$, and $F^{k+1}$ by the weak reduction or destabilization, $D^*-E^*$. Now replace $F^k$ in ${\bf F}$ with $\{G^-,G^0,G^+\}$ and reindex. In general, this also replaces one maximal GHS of ${\bf F}$ with two smaller maximal GHSs, reducing $MAX({\bf F})$.
\end{proof}
\begin{lem}
\label{l:critmax}
If $F^k$ is maximal in ${\bf F}$, then there is exactly one odd number, $p$, such that $F^k_p$ is critical in $M^k_p$.
\end{lem}
\begin{proof}
Let $p$, $D-E$, and $D'-E'$ be as defined in the proof of Lemma \ref{l:oddstrirr}. To establish the Lemma we show that $D-E$ and $D'-E'$ are in different components of $\Gamma (F^k_p)$. Suppose this is not true. Then there is a chain of edges in $\Gamma (F^k_p)$ connecting $D-E$ to $D'-E'$. That is, there is a sequence of disks, $\{C_l\}_{l=1}^w$, such that $C_1=D$, $C_2=E$, $C_{w-1}=D'$, and $C_w=E'$, and for each $l$, $[C_{l-1}]-[C_l]$ is an edge of $\Gamma (F^k_p)$ ($[C_l]$ denotes the equivalence class of $C_l$). In other words, the following is a chain connecting $D-E$ to $D'-E'$:
\[ [D]-[E]-[C_3]-[C_4]-...-[C_{w-2}]-[D']-[E']\]
\begin{clm}
\label{c:chain}
There are disks, $\{D_{2l+1}\}$ and $\{E_{2l}\}$, such that the following is a chain in $\Gamma (F^k_p)$:
\[D_1-E_2-D_3-E_4-...-D_{w-1}-E_w\]
where $D_1=D$, $E_2=E$, and the pair $(D_{w-1},E_w)$ is isotopic to the pair $(D',E')$.
\end{clm}
\begin{proof}
Let $D_1=D$, and $E_2=E$. By definition there exist representatives, $U \in [E_2]$, and $V \in [C_3]$ such that $|U \cap V|\le 1$. Since $E_2$ and $U$ are in $[E_2]$, there is an isotopy, $\Phi$, of $M$ taking $F^k_p$ to $F^k_p$, and $E_2$ to $U$. If we apply $\Phi ^{-1}$ to $V$, we obtain a disk, on the opposite side of $F^k_p$ as $E_2$, which intersects $E_2$ at most once. Let this disk be $D_3$. Then $E_2-D_3$ is an edge of $\Gamma (F^k_p)$, and $D_3 \in [C_3]$. Continue in this way, using $D_3$ now to construct $E_4$, etc.
\end{proof}
\begin{clm}
\label{c:isotopy}
The GHS, $F'$, obtained from $F^k$ by the weak reduction or destabilization, $D_{w-1}-E_w$, is the same as $F^{k+1}$ (up to isotopy).
\end{clm}
\begin{proof}
Let $\Phi$ denote the isotopy of $M$ which takes the pair, $(D_{w-1},E_w)$, to the pair, $(D',E')$. Then $\Phi$ also takes $F'$ to $F^{k+1}$.
\end{proof}
\begin{clm}
\label{c:disjoint}
For each even $l$, we may assume that $D_{l-1} \cap D_{l+1}=\emptyset$. Similarly, for each odd $l$, we may assume that $E_{l-1} \cap E_{l+1}=\emptyset$.
\end{clm}
\begin{proof}
If the statement of the claim is not true, then we show that we can replace the chain with a chain where it is true. Let us assume then that for some even $l$, $D_{l-1} \cap D_{l+1} \ne \emptyset$.
Begin by isotoping $D_{l-1}$ and $D_{l+1}$ so that $|D_{l-1} \cap D_{l+1}|$ is minimal. Let $\alpha$ be an outermost arc of $D_{l-1} \cap D_{l+1}$ on $D_{l+1}$. Then $\alpha$ cuts off a subdisk, $V$, of $D_{l+1}$, such that $V \cap D_{l-1} = \alpha$. Furthermore, $\alpha$ cuts $D_{l-1}$ into two disks, $V'$ and $V''$. Since $D_{l-1}$ and $D_{l+1}$ each meet $E_l$ in at most a point, we can conclude that either $V' \cup V$ or $V'' \cup V$ meets $E_l$ in at most a point. Let's assume that this is true of $D^1=V' \cup V$. The minimality of $|D_{l-1} \cap D_{l+1}|$ insures that $D^1$ is in fact a compressing disk for $F^k_p$. We may also isotope $D^1$ so that it misses $D_{l-1}$ entirely, and so that $|D^1 \cap D_{l+1}|<|D_{l-1} \cap D_{l+1}|$. Hence, we have the following chain in $\Gamma (F^k_p)$:
\[D_{l-1}-E_l-D^1-E_l-D_{l+1}\]
We now repeat the construction, using $D^1$ and $D_{l+1}$ to define a disk, $D^2$, such that $D^1 \cap D^2=\emptyset$, and $|D^2 \cap D_{l+1}|<|D^1 \cap D_{l+1}|$, yielding the chain:
\[D_{l-1}-E_l-D^1-E_l-D^2-E_l-D_{l+1}\]
Eventually, this process must terminate with the desired chain.
\end{proof}
We now proceed to show that our original choice of ${\bf F}$ was not flattened. We do this in two stages. First, for all odd $l$ between 1 and $w-1$, define $B^l$ to be the GHS obtained from $F^k$ by the weak reduction or destabilization, $D_l-E_{l+1}$. For all even $l$, define $B^l$ to be the GHS obtained from $F^k$ by the weak reduction or destabilization, $E_l-D_{l+1}$. Note that since $D_1=D$ and $E_2=E$, it follows that $B^1=F^{k-1}$. Furthermore, Claim \ref{c:isotopy} impies that $B^{w-1}=F^{k+1}$.
Let $A^l$ be a copy of the GHS, $F^k$, for all $l$ between 1 and $w-2$. Now, insert into ${\bf F}$ the sequence of GHSs, $\{B^1,A^1,B^2, A^2, ..., A^{w-2}, B^{w-1}\}$, where $\{F^{k-1},F^k,F^{k+1}\}$ occurred. The new SOG thus defined looks like
\[ ..., F^{k-2}, B^1, A^1, B^2, A^2, ..., B^{w-2}, A^{w-2}, B^{w-1}, F^{k+2}, ...\]
The construction of ${\bf F}'$ from ${\bf F}$ is illustrated in Figure \ref{f:stage1}.
\begin{figure}
\caption{Constructing ${\bf F}
\label{f:stage1}
\end{figure}
To see that this defines a SOG, ${\bf F}'$, note that for each odd $l$ (say), $B^l$ is obtained from both $A^{l-1}$ and $A^l$ by the weak reduction or destabilization, $D_l-E_{l+1}$. Note also that each of the $A^l$s that we have inserted becomes a maximal GHS of ${\bf F}'$.
Now, for each $l$ we perform some operation on ${\bf F}'$ that will result in a new SOG, ``flatter" than ${\bf F}$. We assume that $l$ is even, so that $C_{l \pm 1}=D_{l \pm 1}$, and $C_l=E_l$ (the cases when $l$ is odd are completely symmetric). By Claim \ref{c:disjoint}, we may assume $D_{l-1} \cap D_{l+1}=\emptyset$. Up to symmetry, there are now three cases to consider:
{\bf Case 1.} $D_{l-1} \cap E_l = \emptyset$, and $E_l \cap D_{l+1} = \emptyset$.
In this case, $D_{l-1}-E_l$ and $E_l - D_{l+1}$ persist as weak reductions for $B^l$ and $B^{l-1}$, respectively. Performing either of these weak reductions on the corresponding GHS yields the same GHS, which we call $\bar A^{l-1}$. We now replace $A^{l-1}$ with $\bar A^{l-1}$ in ${\bf F}'$. This is illustrated schematically in Figure \ref{f:case1}.
\begin{figure}
\caption{Replacing $A^{l-1}
\label{f:case1}
\end{figure}
{\bf Case 2.} $D_{l-1} \cap E_l = \emptyset$, and $|E_l\cap D_{l+1}| = 1$.
In this case, $E_l - D_{l+1}$ persists as a destabilization for $B^{l-1}$. Performing this destabilization on $B^{l-1}$ yields the GHS, $B^l$. Hence, we can remove $A^{l-1}$ from ${\bf F}'$, and still be left with a SOG. This is illustrated in Figure \ref{f:case2}.
\begin{figure}
\caption{Removing $A^{l-1}
\label{f:case2}
\end{figure}
{\bf Case 3.} $|D_{l-1} \cap E_l| = |E_l\cap D_{l+1}| = 1$.
Consider the neighborhood of $D_{l-1} \cup E_l \cup D_{l+1}$. This is a ball, which intersects $F^k_p$ in a standardly embedded, twice punctured torus. Hence, the result of the destabilization, $D_{l-1}-E_l$, is isotopic to the result of the destabilization, $E_l-D_{l+1}$. In other words, $B^{l-1}=B^l$, up to isotopy. Hence, we can simply remove $\{B^l,A^l\}$ from ${\bf F}'$, and still be left with a SOG.
After performing one of the above three operations for each $l$, we eliminate all occurrences of $A^l$. The overall effect is a new SOG in which there are many more maximal GHSs than there were in ${\bf F}$, but where each one is smaller than $F^k$. This shows that $\bf F$ was not flattened, a contradiction.
\end{proof}
We pause here for a moment to note that the proof of Lemma \ref{l:critmax} never used our assumption that $M$ is small. Hence, as an immediate Corollary we obtain the following:
\begin{cor}
If a 3-manifold contains non-isotopic Heegaard splittings of some genus, then it contains a critical surface.
\end{cor}
This isn't quite the converse of Theorem \ref{t:nocrit}, since we do not conclude that there is a critical {\it Heegaard} surface. For this, we will need our assumption that $M$ is small.
\begin{lem}
\label{l:critsfce}
If $F^k$ is maximal in ${\bf F}$, then $F^k=\{F^k_0, F^k_1, F^k_2\}$, for some critical Heegaard surface, $F^k_1$,
\end{lem}
{\bf Note:} Much of this proof is similar to one which appears in \cite{st:94}.
\begin{proof}
Suppose that $F^k$ is maximal in ${\bf F}$. By Lemma \ref{l:critmax} there is some odd number, $p$, such that $F^k_p$ is critical in $M^k_p$. We now claim that for each even $i$, $F^k_i$ is incompressible in $M$. Suppose this is not true for $F^k_i$. Then there is a compressing disk, $D$, for $F^k_i$ in $M$. Choose $D$ so as to minimize $w=|D \cap \bigcup \limits _{j\ {\rm even}} F^k_j|$. Let $\alpha$ be an innermost disk of intersection of $D \cap \bigcup \limits _{j\ {\rm even}} F^k_j$, and suppose $\alpha$ lies in $F^k_j$. Then $\alpha$ bounds a subdisk, $D'$, of $D$. If $\alpha$ were inessential on $F^k_j$, then we could swap a subdisk of $F^k_j$ with a subdisk of $D$ to lower $w$, a contradiction. So it must be the case that $D'$ is a compressing disk for $F^k_j$.
The disk $D'$ lies in either $M^k_{j-1}$ or $M^k_{j+1}$. Without loss of generality, assume the former. By definition, $F^k_{j-1}$ is a Heegaard splitting for $M^k_{j-1}$. However, if $j-1=p$, then $F^k_{j-1}$ is a critical Heegaard splitting for $M^k_{j-1}$, and Corollary \ref{c:essential_boundary} implies that $\partial M^k_{j-1}$ is incompressible in $M^k_{j-1}$. On the other hand, if $j-1 \ne p$, then Lemma \ref{l:oddstrirr} implies $F^k_{j-1}$ is a strongly irreducible Heegaard splitting for $M^k_{j-1}$, and we know that $\partial M^k_{j-1}$ is incompressible in $M^k_{j-1}$ by \cite{cg:87}.
Since $M$ is assumed to be small, we conclude that there are no non-empty, non-boundary parallel surfaces of even subscript in $F^k$, other than possibly the first or last surface of $F^k$.
Let $F^k_0$ denote the first surface of $F^k$ and $F^k_m$ the last. Suppose then that $F^k_i$ is boundary parallel, where $i$ is some even number not equal to $0$ or $m$. Let $F_0$ denote the components of $F^k_i$ that are parallel to $F^k_0$, and $F_m$ denote the components of $F^k_i$ that are parallel to $F^k_m$. Assume $F_0$ is non-empty. If $F_m$ is also non-empty, then choose $x \in F_0$, and $y \in F_m$. We claim that there is no path in $M$ connecting $x$ to $y$. If $c$ is such a path, then let $c'$ denote the closure of a component of $c - F^k_i$ which connects $F_0$ to $F_m$. If $c'$ sits in the submanifold of $M$ between $F^k_0$ and $F^k_i$, then, since $c' \cap F_0 \ne \emptyset$, and $F_0$ is parallel into $F^k_0$, the other endpoint of $c'$ must lie on either $F_0$ or $F^k_0$, a contradiction. Similarly, if $c'$ sits in the submanifold of $M$ between $F^k_i$ and $F^k_m$, then, since $c' \cap F_m \ne \emptyset$, and $F_m$ is parallel into $F^k_m$, the other endpoint of $c'$ must lie on either $F_m$ or $F^k_m$, a contradiction.
We conclude that $F_m =\emptyset$, i.e. that every component of $F^k_i$ is parallel to some component of $F^k_0$. But then this must also be true for $F^k_j$, for {\it all} even $j$ between $0$ and $i$ (since they are all incompressible, and the only such surfaces in a product are boundary parallel). In particular, $F^k_2$ is parallel into $F^k_0$, and so $F^k_1$ is a strongly irreducible Heegaard splitting of a product. As the only such splittings are trivial by \cite{st:93}, and trivial Heegaard splittings do not occur among the surfaces of odd subindex of a GHS, we conclude that $m=2$, i.e. that there are no non-empty surfaces of even subscript in $F^k$, except for possibly $F^k_0$ and $F^k_2$.
\end{proof}
\begin{lem}
\label{l:minHeegaard}
There is a flattened SOG, $\bf F$, whose minimal GHSs are of the form $\{F^k_0, F^k_1, F^k_2\}$, for some strongly irreducible Heegaard surface, $F^k_1$.
\end{lem}
\begin{proof}
Let $\bf F$ first denote any flattened SOG, and $F^k$ a GHS which is minimal in $\bf F$. If for some odd $i$ $F^k_i$ is not strongly irreducible in $M^k_i$ then there is some edge in $\Gamma(F^k_i)$. We can now form a new GHS, $F^*$, from $F^k$ by doing the corresponding weak reduction or destabilization, and replace $F^k$ in ${\bf F}$ with the sequence $F^k, F^*, F^k$. This replaces one minimal GHS of ${\bf F}$ with a smaller one, without changing any other maximal or minimal GHS. We may now repeat this process until we arrive at a flattened SOG in which all odd surfaces of all minimal GHSs are strongly irreducible.
Scharlemann and Thompson show in \cite{st:94} that if $F^k_i$ is strongly irreducible in $M^k_i$, for all odd $i$, then $F^k_i$ is incompressible in $M$ for all even $i$. The proof of the Lemma is finished by noting that since $M$ is small it contains no non-boundary parallel incompressible surfaces. Hence, as in the proof of Lemma \ref{l:critsfce}, $F^k$ can contain no non-empty surfaces with even subscript, aside from the first and last.
\end{proof}
\begin{lem}
If $F^k=\{F^k_0, F^k_1, F^k_2\}$ is maximal in ${\bf F}$, and $F^l=\{F^l_0, F^l_1,F^l_2\}$ is the next (or previous) minimal GHS in ${\bf F}$, then $F^k_1$ is a stabilization of $F^l_1$, $F^k_0=F^l_0$, and $F^k_2=F^l_2$.
\end{lem}
\begin{proof}
Let $\Theta$ denote all of the possibilities for the sequence of GHSs between $F^k$ and $F^l$. Suppose that $\{X^i\}_{i=1}^{n} \in \Theta$ (so that $X^1=F^k$ and $X^n=F^l$). Recall that for each $i$ between $1$ and $n-1$, $X^{i+1}$ is obtained from $X^i$ by either a weak reduction or a destabilization. Note that $X^n$ must be obtained from $X^{n-1}$ by a destabilization, because a weak reduction can never produce a GHS of the form $\{F^l_0, F^l_1,F^l_2\}$ (because the number of surfaces of odd subindex in a GHS which results from a weak reduction is always greater than 1).
We first claim that $F^k_1$ is a stabilization of $F^l_1$. For each $X' \in \Theta$, let $DS(X')$ denote the number of destabilizations which occur after the last weak reduction in $X'$ (If there are no weak reductions in some $X'$, then our claim is proved). By the last sentence of the above paragraph, we know that $DS(X') \ge 1$, for all $X' \in \Theta$. Let $WR(X')$ denote the total number of weak reductions in $X'$. Finally, let $X$ denote the element of $\Theta$ for which the pair $(WR(X),DS(X))$ is minimal, where such pairs are compared lexicographically.
Suppose $X^i$ is the last GHS in $X$ obtained from $X^{i-1}$ by weak reduction. This weak reduction involves compressing disks, $D$ and $E$, for some odd surface, $X^{i-1}_p$, of $X^{i-1}$ such that $D \cap E=\emptyset$. By assumption, $X^{i+1}$ is obtained from $X^i$ by a destabilization. This destabilization involves compressing disks, $D'$ and $E'$, for some odd surface of $X^i$ such that $|D' \cap E'|=1$. Now it is a matter of enumerating all possible cases, and checking the definitions of weak reduction and destabilization. In each case we find that we can either reduce the total number of weak reductions by one, or we can switch the order of the last weak reduction and the next destabilization. Either of these will reduce our complexity, providing a contradiction. We will do the most difficult (and illustrative) case here, and leave the others as an exercise for the reader.
Let $X^{i-1}_D$, $X^{i-1}_E$, and $X^{i-1}_{DE}$ be the surfaces obtained from $X^{i-1}_p$ by compression along $D$, $E$ and both $D$ and $E$. The first case of the definition of a weak reduction is when $X^{i-1}_D \ne X^{i-1}_{p-1}$, and $X^{i-1}_E \ne X^{i-1}_{p+1}$. In this case, $X^i$ is obtained from $X^{i-1}$ by removing the surface, $X^{i-1}_p$, inserting $\{X^{i-1}_D, X^{i-1}_{DE}, X^{i-1}_E\}$ in its place, and reindexing. The matter of reindexing is just for notational convenience, so we may choose to hold off on this for a moment. Hence, for now the surfaces of $X^i$ are $\{..., X^{i-1}_{p-1}, X^{i-1}_D, X^{i-1}_{DE}, X^{i-1}_E, X^{i-1}_{p+1}, ...\}$.
If $D'$ and $E'$ are compressing disks for any surface, $X^{i-1}_j$, where $j$ is an odd number not equal to $p$, then clearly we could have done the destabilization, $D'-E'$ before the weak reduction, $D-E$. Such a switch reduces $DS(X')$, keeping $WR(X')$ fixed, contradicting our minimality assumption. We conclude then that $D'$ and $E'$ are compressing disks for either $X^{i-1}_D$ or $X^{i-1}_E$. Assume the former.
Now we check the defintion of what it means to peform the destabilizaton, $D'-E'$. The relevant surfaces of $X^i$ are $\{..., X^{i-1}_{p-1}, X^{i-1}_D, X^{i-1}_{DE}, ...\}$. By defintion, $D'$ lies in the submanifold of $M$ cobounded by $X^{i-1}_{p-1}$ and $X^{i-1}_D$, while $E'$ lies in the submanifold cobounded by $X^{i-1}_D$ and $X^{i-1}_{DE}$. But $X^{i-1}_{DE}$ was obtained from $X^{i-1}_D$ by compression along $E$. Hence, it must be the case that $E'=E$.
To form the GHS, $X^{i+1}$, we are now instructed to create the surfaces, $X^i_{D'}$ and $X^i_{E'}$, which are obtained from $X^{i-1}_D$ by compression along $D'$ and $E'$. Since $E'=E$, it must be that $X^i_{E'}=X^{i-1}_{DE}$. Assume that $X^i_{D'} \ne X^{i-1}_{p-1}$ (if this is not true, then there is another case to check). To form $X^{i+1}$, we are instructed to simply remove $\{X^{i-1}_D, X^{i-1}_{DE}\}$ from $X^i$, and reindex. The resulting GHS now looks like $\{..., X^{i-1}_{p-1}, X^{i-1}_E, X^{i-1}_{p+1}, ...\}$. We now claim that there was a destabilization of $X^{i-1}$ which would have resulted in precisely this GHS.
Since the surface, $X^i_{D'}$, was obtained from $X^{i-1}_D$ by compression along the red disk, $D'$, and $X^{i-1}_D$ was obtained from $X^{i-1}_p$ by compression along the red disk, $D$, we can identify $D'$ with a red compressing disk (which we will continue to call $D'$) for $X^{i-1}_p$. That is, there is at least one compressing disk for $X^{i-1}_p$ which will become isotopic to $D'$ after we compress along $D$. Choose one, and call it $D'$ also. As $E'=E$, and $|D' \cap E'|=1$, we have $|D' \cap E|=1$. Hence, we can perform the destabilization, $D'-E$, on the GHS, $X^{i-1}$. Let's do this, and see what we get.
One more time, the first step in performing a destabilization is to form the surfaces, $X^{i-1}_{D'}$, and $X^{i-1}_E$, obtained from $X^{i-1}_p$ by compression along $D'$ and $E$. We have already assumed that $X^{i-1}_E \ne X^{i-1}_{p+1}$. Since neither $X^{i-1}_D$ nor $X^i_{D'}$ were equal to $X^{i-1}_{p-1}$, it must be that $X^{i-1}_{D'} \ne X^{i-1}_{p-1}$. Now, checking the definition of destabilization one last time, to perform $D'-E$ on $X^{i-1}$, we are instructed to remove $X^{i-1}_p$, and in its place insert $X^{i-1}_E$. The resulting GHS now looks like $\{..., X^{i-1}_{p-1}, X^{i-1}_E, X^{i-1}_{p+1}, ...\}$, which we have already seen is precisely the GHS, $X^{i+1}$. What we have shown is that we can reduce the total number of weak reductions by one, again contradicting our minimality assumption on $X$.
The above argument shows that $F^k_1$ is a stabilization of $F^l_1$. The proof is now complete by noting that stabilization does not change the induced partition of the boundary components of $M$, and so it must be that $F^k_0=F^l_0$, and $F^k_2=F^l_2$.
\end{proof}
By the previous Lemmas, we may assume that for all $k$, $F^k=\{\partial _1 M, F^k_1, \partial _2 M\}$, for some Heegaard surface, $F^k_1$. If $F^k$ is minimal in ${\bf F}$, then $F^k_1$ is strongly irreducible. If $F^k$ is maximal in ${\bf F}$, then $F^k_1$ is critical. Furthermore, for all $k$ either $F^k$ or $F^{k+1}$ is obtained from the other by a destabilization.
\begin{lem}
There is exactly one maximal GHS in ${\bf F}$.
\end{lem}
\begin{proof}
Suppose not. Let $F^a$ and $F^c$ be consequetive maximal GHSs of ${\bf F}$, and $F^b$ be the minimal GHS such that $a<b<c$. We assume, without loss of generality, that $c-b \ge b-a$. Note that $F^b$ is obtained from both $F^a$ and $F^{2b-a}$ by a sequence of $b-a$ destabilizations. In other words, the Heegaard splittings, $F^a_1$ and $F^{2b-a}_1$ can be obtained from the Heegaard splitting, $F^b_1$, by doing $b-a$ stabilizations. However, stabilization is unique, so $F^a=F^{2b-a}$. Hence, if we remove the GHSs between $F^{a+1}$ and $F^{2b-a}$ (inclusive) from ${\bf F}$, we are left with a new SOG, with one fewer maximal GHS ($F^a$ will no longer be maximal, unless $c-b=b-a$. In this case, we will have removed $F^b$), where every other maximal GHS has remained unchanged. This contradicts our minimality assumption on ${\bf F}$.
\end{proof}
We summarize all of our results as follows: there is a SOG, ${\bf F}$, of $M$ such that
\begin{enumerate}
\item $F^1=\{\partial _1 M, F, \partial _2 M\}$,
\item $F^n=\{\partial _1 M, F', \partial _2 M\}$,
\item For all $k$, $F^k=\{\partial _1 M, F^k_1, \partial _2 M\}$, for some Heegaard surface, $F^k_1$. If $F^k$ is maximal in ${\bf F}$, then $F^k_1$ is critical.
\item There is a unique maximal GHS, $F^k$, for ${\bf F}'$.
\end{enumerate}
We can say all this in a much simpler way: $F^k_1$ is a critical Heegaard splitting of $M$, which is a common stabilization of $F$ and $F'$. Our assumption that ${\bf F}$ is flattened insures that $F^k_1$ is a {\it minimal genus} common stabilization, and hence the proof of Theorem \ref{t:common_stab} is complete.
Now, if we combine Theorem \ref{t:common_stab} with Theorem \ref{t:nocrit}, we obtain the following:
\begin{cor}
A small 3-manifold contains a critical Heegaard surface if and only if it contains non-isotopic Heegaard splittings of some genus.
\end{cor}
\section{Open Questions}
\label{s:questions}
We conclude with some open questions about critical surfaces.
\subsection{A metric on the space of strongly irreducible Heegaard splittings}
We now show how our results lead to a natural metric on the space of strongly irreducible Heegaard splittings of a non-Haken 3-manifold. The author believes that it would be of interest to understand this space better.
First, given a critical surface, $F$, we can define a larger 1-complex, $\Lambda(F)$, that contains $\Gamma(F)$ as follows: the vertices of $\Lambda(F)$ are equivalence classes of loops on $F$, where two loops are considered equivalent if there is an isotopy of $M$ taking $F$ to $F$, and one loop to the other. There is an edge connecting two vertices if there are representatives of the corresponding equivalence classes which intersect in at most a point. Recall that a vertex of $\Gamma(F)$ corresponds to an equivalence class of compressing disks for $F$. Thus, we can identify each vertex of $\Gamma(F)$ with the vertex of $\Lambda(F)$ which corresponds to the boundary of any representative disk.
Now, suppose $e_1$ and $e_2$ are two edges in $\Gamma (F)$. Define $d(e_1,e_2)$ to be the minimal length of any chain connecting $e_1$ to $e_2$ in $\Lambda(F)$. Now, given two components, $C_1$ and $C_2$, of $\Gamma (F)$, we can define $d(C_1,C_2)$, the {\it distance} between $C_1$ and $C_2$, as $\min \{d(e_1,e_2)|e_1$ is an edge in $C_1$, and $e_2$ is an edge of $C_2\}$.
Finally, suppose $H$ and $H'$ are strongly irreducible Heegaard splittings of a 3-manifold, $M$, and $F$ is their minimal genus common stabilization. If $D-E$ is a destabilization which leads from $F$ to $H$, and $D'-E'$ is a destabilization leading to $H'$, then the results of this paper show that $D-E$ and $D'-E'$ are in different components, $C$ and $C'$, of $\Gamma (F)$. We can therefore define the {\it distance} between $H$ and $H'$ as $d(C,C')$.
\begin{quest}
Can the distance between strongly irreducible Heegaard splittings be arbitrarily high? If not, is there a bound in terms of the genera of the splittings, or perhaps a universal bound?
\end{quest}
\begin{quest}
Is there an algorithm to compute the distance between two given strongly irreducible Heegaard splittings?
\end{quest}
\begin{quest}
Is there a relationship between the distance between two strongly irreducible Heegaard splittings, and the number of times one needs to stabilize the higher genus one to obtain a stabilization of the lower genus one?
\end{quest}
\begin{quest}
Is there a relationship between the distance between two strongly irreducible Heegaard splittings, and the distances of each individual splitting, in the sense of Hempel \cite{hempel:01}?
\end{quest}
\subsection{The Algorithms}
In \cite{crit2} we produce an algorithm which enumerates isotopy classes of critical surfaces. This ties critical surfaces to several open algorithmic questions in 3-manifold topology.
\noindent \underline {\it An algorithm to determine stabilization bounds.}
If one could give an algorithm which takes a critical surface, and returns all of the strongly irreducible Heegaard splittings that it destabilizes to, one may be able to produce an algorithm which takes two Heegaard splittings, and says {\it exactly} how many times they have to be stabilized before they become equivalent.
\noindent \underline {\it An algorithm to find examples where many stabilizations are required.}
Since the results of \cite{crit2} give us a way to enumerate critical surfaces, we can look for examples of manifolds with multiple minimal genus Heegaard splittings, but no critical surfaces of genus one higher. This would imply that the minimal genus Heegaard splittings require more than one stabilization to be equivalent. There is currently no known example of this.
\subsection{Topology}
As with any sort of surface in a 3-manifold, it would be nice to have restrictions on how critical surfaces may intersect other submanifolds. Here are some examples of these types of questions:
\noindent \underline {\it The stabilization conjecture.}
The stabilization conjecture asserts that if we start with two non-isotopic Heegaard splittings and stabilize the higher genus one once, we obtain a stabilization of the other. For non-Haken 3-manifolds, Theorem \ref{t:common_stab} allows us to rephrase this as follows: Suppose $F$ is a critical Heegaard surface, and $D_0-E_0$ and $D_1-E_1$ are edges in different components of $\Gamma (F)$. Let $B_i$ be a ball which contains $D_i$ and $E_i$, such that the genus of $F \cap B_i$ is maximal. If the genus of $F \cap B_0$ is greater than or equal to 2, then the genus of $F \cap B_1$ is 1.
\noindent \underline {\it Local Detection of critical surfaces.}
In \cite{scharlemann:97}, Scharlemann proves that under the right hypotheses, the intersection of a strongly irreducible Heegaard splitting with a ball must be a ``nicely embedded" punctured sphere. Is there a restriction on the topology of the intersection of a critical surface with a ball?
\noindent \underline {\it Critical surfaces as index 2 minimal surfaces.}
In \cite{fhs:83}, Freedman, Hass and Scott show that any incompressible surface can be isotoped to be a least area surface. Such surfaces are minimal surfaces of index 0. In \cite{pr:87}, Pits and Rubinstein show that strongly irreducible surfaces can be isotoped to minimal surfaces of index 1. This motivates us to make the following conjecture:
\begin{cnj}
Any critical surface can be isotoped to be a minimal surface of index 2.
\end{cnj}
In \cite{crit2}, we prove a Piecewise-Linear analogue of this.
\end{document}
|
\begin{document}
\title{Bounds for modified Struve functions of the first kind and their ratios}
\author{Robert E. Gaunt\footnote{School of Mathematics, The University of Manchester, Manchester M13 9PL, UK}}
\date{\today}
\maketitle
\begin{abstract}We obtain a simple two-sided inequality for the ratio $\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$ in terms of the ratio $I_\nu(x)/I_{\nu-1}(x)$, where $\mathbf{L}_\nu(x)$ is the modified Struve function of the first kind and $I_\nu(x)$ is the modified Bessel function of the first kind. This result allows one to use the extensive literature on bounds for $I_\nu(x)/I_{\nu-1}(x)$ to immediately deduce bounds for $\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$. We note some consequences and obtain further bounds for $\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$ by adapting techniques used to bound the ratio $I_\nu(x)/I_{\nu-1}(x)$. We apply these results to obtain new bounds for the condition numbers $x\mathbf{L}_\nu'(x)/\mathbf{L}_\nu(x)$, the ratio $\mathbf{L}_\nu(x)/\mathbf{L}_\nu(y)$ and the modified Struve function $\mathbf{L}_\nu(x)$ itself. Amongst other results, we obtain two-sided inequalities for $x\mathbf{L}_\nu'(x)/\mathbf{L}_\nu(x)$ and $\mathbf{L}_\nu(x)/\mathbf{L}_\nu(y)$ that are given in terms of $xI_\nu'(x)/I_\nu(x)$ and $I_\nu(x)/I_\nu(y)$, respectively, which again allows one to exploit the substantial literature on bounds for these quantities. The results obtained in this paper complement and improve existing bounds in the literature.
\end{abstract}
\noindent{{\bf{Keywords:}}} Modified Struve function of the first kind; bounds; ratios of modified Struve functions; condition numbers; modified Bessel function of the first kind
\noindent{{{\bf{AMS 2010 Subject Classification:}}} Primary 33C20; 26D07. Secondary 33C10
\section{Introduction}\label{intro}
The ratios of modified Bessel functions $I_{\nu}(x)/I_{\nu-1}(x)$ and $K_{\nu-1}(x)/K_{\nu}(x)$ arise in many areas of the applied sciences, including epidemiology \cite{nh73}, chemical kinetics \cite{lbf03} and signal processing \cite{ks08}; see \cite{segura} and references therein for further applications. These ratios are also key computational tools in the construction of numerical algorithms for computing modified Bessel functions (see, for example, Algorithms 12.6 and 12.7 of \cite{ast07}). There is now an extensive literature on lower and upper bounds for these ratios; see \cite{amos74,b15,g32,kg13,ifantis,il07,im78, ln10,lorch, nasell,nasell2,pm50, rs16,segura,s12,soni}. There is also a considerable literature on lower and upper bounds for the ratios $I_\nu(x)/I_\nu(y)$ and $K_\nu(x)/K_\nu(y)$; see \cite{amos74, b09, baricz2, bs09, bord, ifantis, jb96, jbb, l91,lm89, paris, ross73, si95}, which has been used, for example, to obtain tight bounds for the generalized Marcum Q-function, which arises in radar signal processing \cite{b09, bs09}.
The modified Struve functions are related to the modified Bessel functions. Likewise, they arise in manyfold applications, including leakage inductance in transformer windings \cite{hw94}, perturbation approximations of lee waves in a stratified flow \cite{mh69}, scattering of plane waves by circular cylinders \cite{s84} and lift and downwash distributions of oscillating wings in subsonic and supersonic flow \cite{w55,w54}; see \cite{bp13} for a list of further application areas.
The first detailed study of inequalities for modified Struve functions was \cite{jn98}, in which two-sided inequalities for modified Struve functions and their ratios were obtained, together with Tur\'{a}n and Wronski type inequalities. Recently, \cite{bp14} used a classical result on the monotonicity of quotients of Maclaurin series and techniques developed in the extensive study of modified Bessel functions and their ratios to obtain monotonicity results and, as a consequence, functional inequalities for the modified Struve function of the first kind $\mathbf{L}_\nu(x)$ that complement and improve the results of \cite{jn98}. Further results and a new proof of a Tur\'{a}n-type inequality for the modified Struve function of the first kind are given in \cite{bps17}, and monotonicity results and functional inequalities for the modified Struve function of the second kind $\mathbf{M}_\nu(x)=\mathbf{L}_\nu(x)-I_\nu(x)$ are given in \cite{bp142}. It should be noted that the techniques used in \cite{bp14} and \cite{bp142} to obtain functional inequalities for $\mathbf{L}_\nu(x)$ and $\mathbf{M}_\nu(x)$ are quite different (this is also commented on in \cite{g18}), which is in contrast to the literature on modified Bessel functions in which functional inequalities for $I_\nu(x)$ and $K_\nu(x)$ are often developed in parallel. For this reason, in this paper, with the exception of Remark \ref{rtyy}, we restrict our attention to the modified Struve function $\mathbf{L}_\nu(x)$.
In this paper, we obtain new bounds for the ratios $\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$ and $\mathbf{L}_\nu(x)/\mathbf{L}_{\nu}(y)$, the condition numbers $x\mathbf{L}_\nu'(x)/\mathbf{L}_\nu(x)$ and the modified Struve function $\mathbf{L}_\nu(x)$ itself. These results complement and, at least in some cases, improve those given in \cite{bp14,jn98}. Our approach is quite different, though. In Section \ref{sec2}, we obtain a simple but accurate two-sided inequality for the ratio $\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$ in terms of the ratio $I_\nu(x)/I_{\nu-1}(x)$. This result is quite powerful because it allows one to exploit the extensive literature on bounds for $I_\nu(x)/I_{\nu-1}(x)$ to bound $\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$. We give some examples, and complement these bounds by showing that some of the techniques from the literature used to bound $I_\nu(x)/I_{\nu-1}(x)$ can be easily adapted to bound the ratio $\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$. In Section \ref{sec3}, we apply these bounds to obtain new bounds for the quantities $x\mathbf{L}_\nu'(x)/\mathbf{L}_\nu(x)$, $\mathbf{L}_\nu(x)/\mathbf{L}_\nu(y)$ and the modified Struve function $\mathbf{L}_\nu(x)$. Amongst other results, we obtain two-sided inequalities for $x\mathbf{L}_\nu'(x)/\mathbf{L}_\nu(x)$ and $\mathbf{L}_\nu(x)/\mathbf{L}_\nu(y)$ that are given in terms of $xI_\nu'(x)/I_\nu(x)$ and $I_\nu(x)/I_\nu(y)$, respectively, which again allows one to exploit the substantial literature on these quantities. Through a combination of asymptotic analysis of the bounds and numerical results, we find that, in spite of their simple form, the bounds obtained in this paper are quite accurate and often tight in certain limits.
\section{Upper and lower bounds for the ratio $\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$}\label{sec2}
\subsection{Bounding $\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$ via bounds for $I_\nu(x)/I_{\nu-1}(x)$}\label{sec2.2}
In this section, we obtain a simple but accurate double inequality for the ratio $\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$ in terms of the ratio $I_\nu(x)/I_{\nu-1}(x)$. The modified Bessel and modified Struve functions $I_\nu(x)$ and $\mathbf{L}_\nu(x)$ are closely related functions that have the following power series representations (see \cite{olver} for these and the forthcoming properties):
\begin{align}\label{idefn}I_\nu(x)&=\sum_{n=0}^\infty\frac{(\frac{1}{2}x)^{2n+\nu}}{n!\Gamma(n+\nu+1)}, \\
\label{ldefn}\mathbf{L}_\nu(x)&=\sum_{n=0}^\infty\frac{(\frac{1}{2}x)^{2n+\nu+1}}{\Gamma(n+\frac{3}{2})\Gamma(n+\nu+\frac{3}{2})}.
\end{align}
It is immediate from these series representations, and the standard formulas $\Gamma(\frac{3}{2})=\frac{\sqrt{\pi}}{2}$ and $t\Gamma(t)=\Gamma(t+1)$ that, as $x\downarrow0$,
\begin{align}\label{itend0}I_\nu(x)&\sim \frac{x^\nu}{2^\nu\Gamma(\nu+1)}, \quad \nu>-1,\\
\label{ltend0}\mathbf{L}_\nu(x)&\sim \frac{x^{\nu+1}}{\sqrt{\pi}2^{\nu}\Gamma(\nu+\frac{3}{2})}\bigg(1+\frac{x^2}{3(2\nu+3)}\bigg), \quad \nu>-\tfrac{3}{2},
\end{align}
and both functions have very similar behaviour as $x\rightarrow\infty$:
\begin{align}\label{iinfty}I_\nu(x)&\sim\frac{\mathrm{e}^x}{\sqrt{2\pi x}}\bigg(1-\frac{4\nu^2-1}{8x}+\frac{(4\nu^2-1)(4\nu^2-9)}{128x^2}\bigg), \quad \nu\in\mathbb{R}, \\
\label{linfty}\mathbf{L}_\nu(x)&\sim\frac{\mathrm{e}^x}{\sqrt{2\pi x}}\bigg(1-\frac{4\nu^2-1}{8x}+\frac{(4\nu^2-1)(4\nu^2-9)}{128x^2}\bigg),\quad \nu\in\mathbb{R}.
\end{align}
The modified Struve function $\mathbf{L}_\nu(x)$ satisfies the relations
\begin{align}\label{struveid1}\mathbf{L}_{\nu-1}(x)-\mathbf{L}_{\nu+1}(x)&=\frac{2\nu}{x}\mathbf{L}_\nu(x)+a_\nu(x), \\
\label{struveid2}\mathbf{L}_{\nu-1}(x)+\mathbf{L}_{\nu+1}(x)&=2\mathbf{L}_\nu'(x)-a_\nu(x),
\end{align}
where $a_\nu(x)=\frac{(\frac{1}{2}x)^\nu}{\sqrt{\pi}\Gamma(\nu+\frac{3}{2})}$. The modified Bessel function $I_\nu(x)$ satisfies the same relations but without the $a_\nu(x)$ term. For $\nu>-\frac{3}{2}$, it will be useful to define the function $b_\nu(x):(0,\infty)\rightarrow(0,\frac{1}{2})$ by
\begin{equation}\label{bdefn}b_\nu(x):=\frac{xa_\nu(x)}{2\mathbf{L}_\nu(x)}=\frac{(\frac{1}{2}x)^{\nu+1}}{\sqrt{\pi}\Gamma(\nu+\frac{3}{2})\mathbf{L}_\nu(x)}.
\end{equation}
This function will appear throughout this paper, and we collect some useful basic properties in the following lemma.
\begin{lemma}\label{blemma}The following assertions are true:
\noindent (i) For $\nu>-\frac{3}{2}$,
\begin{align}\label{bnu0}b_\nu(x)&\sim\frac{1}{2}-\frac{x^2}{6(2\nu+3)}, \quad x\downarrow0, \\
\label{bnu1}b_\nu(x)&\sim \frac{x^{\nu+3/2}\mathrm{e}^{-x}}{2^{\nu+1/2}\Gamma(\nu+\frac{3}{2})}, \quad x\rightarrow\infty.
\end{align}
\noindent (ii) For fixed $x>0$, $b_\nu(x)$ increases as $\nu$ increases in the interval $(-\frac{3}{2},\infty)$.
\noindent (iii) If $\nu>-\frac{3}{2}$, then $b_\nu(x)$ is a decreasing function of $x$ in $(0,\infty)$. Therefore, for $x>0$,
\begin{equation*}b_\nu(x)<b_\nu(0^+)=\frac{1}{2}, \quad \nu>-\tfrac{3}{2}.
\end{equation*}
This inequality can be improved further to
\begin{equation}\label{bcrude2}b_\nu(x)<\frac{1}{2}\bigg(1+\frac{x^2}{3(2\nu+3)}\bigg)^{-1}, \quad \nu>-\tfrac{3}{2}.
\end{equation}
\noindent (iv) For $x>0$,
\begin{equation}\frac{x}{2}\mathrm{csch}(x)\label{star5}\leq b_\nu(x)<\frac{x}{4}\mathrm{csch}\bigg(\frac{x}{2\nu+3}\bigg),
\end{equation}
where the lower and upper bounds are valid for $\nu\geq-\frac{1}{2}$ and $\nu>-1$, respectively. We have equality in the lower bound if and only if $\nu=-\frac{1}{2}$ and the inequality is reversed if $-\frac{3}{2}<\nu<\frac{1}{2}$.
\end{lemma}
\begin{proof}(i) The expansion (\ref{bnu0}) can be obtained by using the asymptotic expansion (\ref{ltend0}), and (\ref{bnu1}) follows because $\mathbf{L}_\nu(x)\sim\frac{\mathrm{e}^x}{\sqrt{2\pi x}}$, as $x\rightarrow\infty$.
\noindent (ii) This is immediate from part (v) of Theorem 2.2 of \cite{bp14}.
\noindent (iii) It is clear from the series representation (\ref{ldefn}) that $1/b_\nu(x)$ is an increasing function of $x$ in $(0,\infty)$, and so $b_\nu(x)$ is a decreasing function of $x$ in $(0,\infty)$. That $b_\nu(0^+)=\frac{1}{2}$ also follows from (\ref{ldefn}). Inequality (\ref{bcrude2}) follows from truncating the series expansion of $\mathbf{L}_\nu(x)$ at the second term.
\noindent (iv) It was shown in \cite{bp14} that
\begin{equation*}\mathbf{L}_\nu(x)>\frac{x^\nu\sinh\big(\frac{x}{2\nu+3}\big)}{\sqrt{\pi}2^{\nu-1}\Gamma(\nu+\frac{3}{2})},\quad\nu>-1 \quad \text{and} \quad \mathbf{L}_{\nu}(x)\leq \frac{x^\nu\sinh(x)}{\sqrt{\pi}2^\nu\Gamma(\nu+\frac{3}{2})}, \quad \nu\geq-\tfrac{1}{2},
\end{equation*}
where there is equality in the second inequality if and only if $\nu=-\frac{1}{2}$ and the inequality is reversed if $-\frac{3}{2}<\nu<\frac{1}{2}$. Applying these inequalities to (\ref{bdefn}) yields inequality (\ref{star5}).
\end{proof}
We now move on to the problem of bounding the ratio $\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$. To this end, we prove the following theorem, which gives a two-sided inequality for $\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$ in terms of the ratio $I_\nu(x)/I_{\nu-1}(x)$.
\begin{theorem}\label{thmil}(i) For $x>0$,
\begin{align}\label{bounddo}I_{\nu}(x)\mathbf{L}_{\nu-1}(x)-I_{\nu-1}(x)\mathbf{L}_{\nu}(x)>0, \quad \nu\geq\tfrac{1}{2},
\end{align}
and
\begin{align}\label{bound456}I_{\nu}(x)\mathbf{L}_{\nu-1}(x)-I_{\nu-1}(x)\mathbf{L}_{\nu}(x)& <\frac{(\frac{1}{2}x)^{\nu}I_{\nu}(x)}{\sqrt{\pi}\Gamma(\nu+\frac{3}{2})}, \quad \nu\geq-\tfrac{1}{2},\\
\label{bound789}I_{\nu}(x)\mathbf{L}_{\nu-1}(x)-I_{\nu-1}(x)\mathbf{L}_{\nu}(x)&<\frac{(\frac{1}{2}x)^{\nu-1}I_{\nu-1}(x)}{\sqrt{\pi}\Gamma(\nu+\frac{1}{2})}, \quad \nu\geq\tfrac{3}{2}.
\end{align}
\noindent (ii) For $x>0$,
\begin{equation}\label{firstcor1}\bigg(\frac{I_{\nu-1}(x)}{I_\nu(x)}+\frac{2b_\nu(x)}{x}\bigg)^{-1}<\frac{\mathbf{L}_{\nu}(x)}{\mathbf{L}_{\nu-1}(x)}<\frac{I_{\nu}(x)}{I_{\nu-1}(x)},
\end{equation}
where the lower bound is valid for $\nu\geq0$ and the upper bound is valid for $\nu\geq\frac{1}{2}$.
\end{theorem}
In our proof, we shall make use of the following standard result on stochastic ordering of random variables \cite{stoch}.
\begin{lemma}\label{dfbgdf}Let $X$ and $Y$ be real-valued random variables. Suppose that $\mathbb{P}(X>x)\leq\mathbb{P}(Y>x)$ for all $x\in\mathbb{R}$, and that additionally $\mathbb{P}(X>y)<\mathbb{P}(Y>y)$ for some $y\in\mathbb{R}$. That is $X$ is \emph{stochastically strictly less than} $Y$. Then, for all bounded, strictly increasing functions $f:\mathbb{R}\rightarrow\mathbb{R}$,
\begin{equation*}\mathbb{E}[f(X)]<\mathbb{E}[f(Y)].
\end{equation*}
In particular, if additionally $X$ and $Y$ have bounded support, then
\begin{equation*}\mathbb{E}[X^2]<\mathbb{E}[Y^2].
\end{equation*}
\end{lemma}
\noindent{\emph{Proof of Theorem \ref{thmil}}.} (i) We first prove inequality (\ref{bounddo}). Suppose $\nu>\frac{1}{2}$; we shall deal with the case $\nu=\frac{1}{2}$ later. From \cite[10.32.2, 11.5.6]{olver} we have the integral representations, for $\nu>-\frac{1}{2}$,
\begin{align}\label{Iint5}I_\nu(x)&=\frac{2(\frac{1}{2}x)^\nu}{\sqrt{\pi}\Gamma(\nu+\frac{1}{2})}\int_0^1(1-t^2)^{\nu-\frac{1}{2}}\cosh(xt)\,\mathrm{d}t , \\
\label{Lint5}\mathbf{L}_{\nu}(x)&=\frac{2(\frac{1}{2}x)^\nu}{\sqrt{\pi}\Gamma(\nu+\frac{1}{2})}\int_0^1(1-t^2)^{\nu-\frac{1}{2}}\sinh(xt)\,\mathrm{d}t .
\end{align}
We thus obtain, for $\nu>\frac{1}{2}$,
\begin{align*}&I_{\nu}(x)\mathbf{L}_{\nu-1}(x)-I_{\nu-1}(x)\mathbf{L}_{\nu}(x) \\
&=\frac{4(\frac{1}{2}x)^{2\nu-1}}{\sqrt{\pi}\Gamma(\nu-\frac{1}{2})\Gamma(\nu+\frac{1}{2})}\bigg[\int_0^1\!\int_0^1(1-t^2)^{\nu-\frac{1}{2}}(1-s^2)^{\nu-\frac{3}{2}}\cosh(xt)\sinh(xs)\,\mathrm{d}t\,\mathrm{d}s \\
&\quad-\int_0^1\!\int_0^1(1-t^2)^{\nu-\frac{3}{2}}(1-s^2)^{\nu-\frac{1}{2}}\cosh(xt)\sinh(xs)\,\mathrm{d}t\,\mathrm{d}s\bigg] \\
&=\frac{4(\frac{1}{2}x)^{2\nu-1}}{\sqrt{\pi}\Gamma(\nu-\frac{1}{2})\Gamma(\nu+\frac{1}{2})}\int_0^1\!\int_0^1(1-t^2)^{\nu-\frac{3}{2}}(1-s^2)^{\nu-\frac{3}{2}}(s^2-t^2)\cosh(xt)\sinh(xs)\,\mathrm{d}t\,\mathrm{d}s \\
&=\frac{4(\frac{1}{2}x)^{2\nu-1}}{\sqrt{\pi}\Gamma(\nu-\frac{1}{2})\Gamma(\nu+\frac{1}{2})}\bigg[\int_0^1f_\nu(x,t)\,\mathrm{d}t\int_0^1s^2g_\nu(x,s)\,\mathrm{d}s-\int_0^1t^2f_\nu(x,t)\,\mathrm{d}t\int_0^1g_\nu(x,s)\,\mathrm{d}s\bigg],
\end{align*}
where
\begin{align*}f_\nu(x,t):=(1-t^2)^{\nu-\frac{3}{2}}\cosh(xt), \quad g_\nu(x,t):=(1-t^2)^{\nu-\frac{3}{2}}\sinh(xt).
\end{align*}
Proving that $I_{\nu}(x)\mathbf{L}_{\nu-1}(x)-I_{\nu-1}(x)\mathbf{L}_{\nu}(x)>0$ for all $x>0$ is thus equivalent to proving that
\begin{align}\label{stovb}\frac{\int_0^1 t^2f_\nu(x,t)\,\mathrm{d}t}{\int_0^1f_\nu(x,t)\,\mathrm{d}t}<\frac{\int_0^1 t^2g_\nu(x,t)\,\mathrm{d}t}{\int_0^1g_\nu(x,t)\,\mathrm{d}t}, \quad \text{for all $x>0$.}
\end{align}
Now let $X$ and $Y$ be random variables supported on $(0,1)$, with probability density functions $f_\nu(x,t)/\int_0^1f_\nu(x,s)\,\mathrm{d}s$ and $g_\nu(x,t)/\int_0^1g_\nu(x,s)\,\mathrm{d}s$, respectively. Therefore (\ref{stovb}) can be written as
\begin{equation}\label{bvhsb6}\mathbb{E}[X^2]<\mathbb{E}[Y^2].
\end{equation}
But, since, for fixed $x>0$ and $\nu>\frac{1}{2}$, the ratio $f_\nu(x,t)/g_\nu(x,t)$ is strictly decreasing in $t$ on the interval $(0,1)$ (in the probability literature, one would say that $X$ is less than $Y$ according to likelihood ratio ordering \cite{stoch}), we have, for $z\in(0,1)$,
\begin{align*}\mathbb{P}(X>z)=\frac{\int_z^1 f_\nu(x,t)\,\mathrm{d}t}{\int_0^1f_\nu(x,t)\,\mathrm{d}t}<\frac{\int_z^1 g_\nu(x,t)\,\mathrm{d}t}{\int_0^1g_\nu(x,t)\,\mathrm{d}t}=\mathbb{P}(Y>z).
\end{align*}
Therefore (\ref{bvhsb6}) holds due to Lemma \ref{dfbgdf}, meaning that $I_{\nu}(x)\mathbf{L}_{\nu-1}(x)-I_{\nu-1}(x)\mathbf{L}_{\nu}(x)>0$ for all $x>0$, if $\nu>\frac{1}{2}$.
Now, we consider the case $\nu=\frac{1}{2}$. From \cite[10.49(ii), 11.4(i)]{olver} we have the formulas
\begin{align}&I_{\frac{1}{2}}(x)=\sqrt{\frac{2}{\pi x}}\sinh(x), \quad \mathbf{L}_{\frac{1}{2}}(x)=\sqrt{\frac{2}{\pi x}}\big(\cosh(x)-1\big), \nonumberumber \\
\label{sph9}&I_{-\frac{1}{2}}(x)=\sqrt{\frac{2}{\pi x}}\cosh(x), \quad \mathbf{L}_{-\frac{1}{2}}(x)=\sqrt{\frac{2}{\pi x}}\sinh(x).
\end{align}
Using these formulas and the standard identity $\cosh^2(x)-\sinh^2(x)=1$ we have that
\[I_{\frac{1}{2}}(x)\mathbf{L}_{-\frac{1}{2}}(x)-I_{-\frac{1}{2}}(x)\mathbf{L}_{\frac{1}{2}}(x)=\frac{2}{\pi x}\big(\cosh(x)-1\big)>0.\]
We now deduce inequalities (\ref{bound456}) and (\ref{bound789}) from (\ref{bounddo}). Substituting the relations
\begin{align*}I_{\nu+1}(x)=I_{\nu-1}(x)-\frac{2\nu}{x}I_{\nu}(x),\quad \mathbf{L}_{\nu+1}(x)=\mathbf{L}_{\nu-1}(x)-\frac{2\nu}{x}\mathbf{L}_{\nu}(x)-\frac{(\frac{1}{2}x)^{\nu}}{\sqrt{\pi} \Gamma(\nu+\frac{3}{2})}
\end{align*}
into the inequality $I_{\nu+1}(x)\mathbf{L}_{\nu}(x)-I_{\nu}(x)\mathbf{L}_{\nu+1}(x)>0$ gives inequality (\ref{bound456}). Similarly, on substituting the relations
\begin{align*}I_{\nu-1}(x)=I_{\nu+1}(x)+\frac{2\nu}{x}I_{\nu}(x), \quad \mathbf{L}_{\nu-1}(x)=\mathbf{L}_{\nu+1}(x)+\frac{2\nu}{x}\mathbf{L}_\nu(x)+\frac{(\frac{1}{2}x)^{\nu}}{\sqrt{\pi} \Gamma(\nu+\frac{3}{2})}
\end{align*}
into the inequality $I_{\nu}(x)\mathbf{L}_{\nu-1}(x)-I_{\nu-1}(x)\mathbf{L}_{\nu}(x)>0,$ and then replacing $\nu$ by $\nu-1$, we deduce inequality (\ref{bound789}).
\noindent (ii) The two-sided inequality follows from rearranging inequalities (\ref{bounddo}) and (\ref{bound456}) using the facts that $I_\nu(x)>0$ and $\mathbf{L}_\nu(x)>0$ for all $x>0$ if $\nu\geq-1$, and that $b_\nu(x)=\frac{xa_\nu(x)}{2\mathbf{L}_\nu(x)}$.
$\Box$
\begin{remark}
Numerical experiments carried out with \emph{Mathematica} suggest that inequality (\ref{bounddo}) of Theorem \ref{thmil} holds for all $\nu\geq-\frac{1}{2}$ (which would mean that the upper bound of (\ref{firstcor1}) would hold for all $\nu\geq0$). It should be noted that the parameter range in the two-sided inequality (\ref{firstcor1}) is sufficient for the purposes of this paper, but extending the range may be useful in other applications. An alternative method will be needed for $-\frac{1}{2}\leq\nu<\frac{1}{2}$, because the integral representations (\ref{Iint5}) and (\ref{Lint5}) are not valid for such $\nu$. The case $\nu=-\frac{1}{2}$ is straightforward to verify, though. From \cite[10.49(ii), 11.4(i)]{olver} we have the formulas
\begin{align*}I_{-\frac{3}{2}}(x)=\sqrt{\frac{2}{\pi x}}\bigg(\sinh(x)-\frac{\cosh(x)}{x}\bigg), \quad \mathbf{L}_{-\frac{3}{2}}(x)=\sqrt{\frac{2}{\pi x}}\bigg(\cosh(x)-\frac{\sinh(x)}{x}\bigg).
\end{align*}
Using (\ref{sph9}), these formulas and the standard identity $\cosh^2(x)-\sinh^2(x)=1$ gives
\[I_{-\frac{1}{2}}(x)\mathbf{L}_{-\frac{3}{2}}(x)-I_{-\frac{3}{2}}(x)\mathbf{L}_{-\frac{1}{2}}(x)=\frac{2}{\pi x}>0.\]
\end{remark}
\begin{remark}\label{rtyy}We can use the formula $\mathbf{M}_\nu(x)=\mathbf{L}_\nu(x)-I_\nu(x)$ to write
\[I_{\nu}(x)\mathbf{L}_{\nu-1}(x)-I_{\nu-1}(x)\mathbf{L}_{\nu}(x)=I_{\nu}(x)\mathbf{M}_{\nu-1}(x)-I_{\nu-1}(x)\mathbf{M}_{\nu}(x).\]
Therefore we also have the double inequality
\begin{equation*}0<I_{\nu}(x)\mathbf{M}_{\nu-1}(x)-I_{\nu-1}(x)\mathbf{M}_{\nu}(x)<\frac{(\frac{1}{2}x)^{\nu}I_{\nu}(x)}{\sqrt{\pi}\Gamma(\nu+\frac{3}{2})},
\end{equation*}
where the lower bound is valid for $\nu\geq\frac{1}{2}$ and the upper bound is valid for $\nu\geq-\frac{1}{2}$. Now, if $\nu\geq-\frac{1}{2}$, we have $\mathbf{M}_\nu(x)<0$ for all $x>0$ (see \cite[11.5.4]{olver} for the case $\nu>-\frac{1}{2}$, and the formulas in (\ref{sph9}) for the case $\nu=-\frac{1}{2}$). Therefore, rearranging the above lower bound gives that, for $x>0$,
\begin{equation*}\frac{\mathbf{M}_{\nu}(x)}{\mathbf{M}_{\nu-1}(x)}>\frac{I_{\nu}(x)}{I_{\nu-1}(x)}, \quad \nu\geq\tfrac{1}{2}.
\end{equation*}
Since $\mathbf{M}_\nu(x)=\mathbf{L}_\nu(x)-I_\nu(x)$ and $I_\nu(x)/\mathbf{L}_\nu(x)\gg1$, as $x\downarrow0$, for $\nu\geq-\frac{1}{2}$, it follows that this inequality is tight as $x\downarrow0$. However, from the asymptotic formula \cite[11.6.2]{olver}
\begin{equation*}\mathbf{M}_\nu(x)\sim-\frac{(\frac{1}{2}x)^{\nu-1}}{\sqrt{\pi}\Gamma(\nu+\frac{1}{2})}, \quad \nu>-\tfrac{1}{2},
\end{equation*}
as $x\rightarrow\infty$, and the asymptotic formula (\ref{iinfty}), we have that, as $x\rightarrow\infty$, $\mathbf{M}_\nu(x)/\mathbf{M}_{\nu-1}(x)=O(x)$ for $\nu\geq-\frac{1}{2}$, whereas $I_\nu(x)/I_{\nu-1}(x)=O(1)$ for all $\nu\in\mathbb{R}$.
\end{remark}
\begin{remark}Let $l_\nu^a(x)$, $u_\nu^a(x)$ denote the lower and upper bounds of the double inequality (\ref{firstcor1}) and let $h_\nu(x)=\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$. The double inequality (\ref{firstcor1}) is tight as $x\downarrow\infty$. Indeed, from the asymptotic formulas (\ref{iinfty}) and (\ref{bnu1}), we have, as $x\rightarrow\infty$,
\begin{equation*}\frac{u_\nu^a(x)}{l_\nu^a(x)}-1=\frac{2b_\nu(x)}{x}\frac{I_\nu(x)}{I_{\nu-1}(x)}=O(x^{\nu+1/2}\mathrm{e}^{-x}).
\end{equation*}
From the asymptotic formulas (\ref{itend0}) and (\ref{ltend0}) and the fact that $b_\nu(0^+)=\frac{1}{2}$, we have
\begin{equation*}1-\lim_{x\downarrow0}\frac{l_\nu^a(x)}{h_\nu(x)}=0, \quad \lim_{x\downarrow0}\frac{u_\nu^a(x)}{h_\nu(x)}-1=\frac{1}{2\nu},
\end{equation*}
and so the relative error in approximating $h_\nu(x)$ by $l_\nu^a(x)$ is 0 in the limit $x\downarrow0$, and the relative error in approximating $h_\nu(x)$ by $u_\nu^a(x)$ in the limit $x\downarrow0$ decreases as $\nu$ increases, and the bound is tight as $\nu\rightarrow\infty$. Further insight into the accuracy of these bounds can be gained from Tables \ref{table1} and \ref{table11}. Despite their simple form, the bounds can be seen to be quite accurate, with a relative error of less than $0.01$ for both bounds when $x\geq10$ for the values of $\nu$ we considered. This accuracy is a consequence of the exponential decay of $b_\nu(x)$.
\end{remark}
\begin{table}[h]
\centering
\caption{\footnotesize{Relative error in approximating $\mathbf{L}_{\nu}(x)/\mathbf{L}_{\nu-1}(x)$ by $\big(I_{\nu-1}(x)/I_\nu(x)+2b_\nu(x)/x\big)^{-1}$.}}
\label{table1}
{\scriptsize
\begin{tabular}{|c|rrrrrrrrr|}
\hline
\backslashbox{$\nu$}{$x$} & 0 & 0.5 & 1 & 2.5 & 5 & 7.5 & 10 & 15 & 25 \\
\hline
$0$ & 0.0000 & 0.0355 & 0.0947 & 0.1073 & 0.0196 & 0.0022 & 0.0002 & 0.0000 & 0.0000 \\
$0.5$ & 0.0000 & 0.0097 & 0.0312 & 0.0623 & 0.0200 & 0.0030 & 0.0003 & 0.0000 & 0.0000 \\
1 & 0.0000 & 0.0040 & 0.0138 & 0.0377 & 0.0186 & 0.0037 & 0.0005 & 0.0000 & 0.0000 \\
2.5 & 0.0000 & 0.0007 & 0.0027 & 0.0112 & 0.0122 & 0.0047 & 0.0011 & 0.0000 & 0.0000 \\
5 & 0.0000 & 0.0001 & 0.0006 & 0.0028 & 0.0054 & 0.0039 & 0.0016 & 0.0001 & 0.0000 \\
7.5 & 0.0000 & 0.0000 & 0.0002 & 0.0011 & 0.0026 & 0.0026 & 0.0016 & 0.0002 & 0.0000 \\
10 & 0.0000 & 0.0000 & 0.0001 & 0.0005 & 0.0014 & 0.0018 & 0.0013 & 0.0003 & 0.0000 \\
\hline
\end{tabular}}
\end{table}
\begin{table}[h]
\centering
\caption{\footnotesize{Relative error in approximating $\mathbf{L}_{\nu}(x)/\mathbf{L}_{\nu-1}(x)$ by $I_{\nu}(x)/I_{\nu-1}(x)$.}}
\label{table11}
{\scriptsize
\begin{tabular}{|c|rrrrrrrrr|}
\hline
\backslashbox{$\nu$}{$x$} & 0 & 0.5 & 1 & 2.5 & 5 & 7.5 & 10 & 15 & 25 \\
\hline
$0$ & $\infty$ & 7.7021 & 1.7232 & 0.1394 & 0.0061 & 0.0004 & 0.0000 & 0.0000 & 0.0000 \\
$0.5$ & 1.0000 & 0.8868 & 0.6481 & 0.1631 & 0.0135 & 0.0011 & 0.0001 & 0.0000 & 0.0000 \\
1 & 0.5000 & 0.4711 & 0.3981 & 0.1587 & 0.0206 & 0.0022 & 0.0002 & 0.0000 & 0.0000 \\
2.5 & 0.2000 & 0.1957 & 0.1833 & 0.1200 & 0.0344 & 0.0066 & 0.0010 & 0.0000 & 0.0000 \\
5 & 0.1000 & 0.0990 & 0.0961 & 0.0780 & 0.0387 & 0.0132 & 0.0034 & 0.0001 & 0.0000 \\
7.5 & 0.0667 & 0.0662 & 0.0650 & 0.0568 & 0.0354 & 0.0165 & 0.0059 & 0.0004 & 0.0000 \\
10 & 0.0500 & 0.0498 & 0.0491 & 0.0445 & 0.0313 & 0.0175 & 0.0078 & 0.0009 & 0.0000 \\
\hline
\end{tabular}}
\end{table}
The double inequality (\ref{firstcor1}) allows one to exploit the substantial literature on bounds for the ratio $I_\nu(x)/I_{\nu-1}(x)$ to bound the ratio $\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$. Because of the accuracy of the double inequality (\ref{firstcor1}), the accuracy of the resulting bounds for $\mathbf{L}_\nu(x)/\mathbf{L}_\nu(x)$ will be similar to that of the initial bounds for $I_\nu(x)/I_{\nu-1}(x)$. In the following corollary, we note some examples.
\begin{corollary}\label{bghj}For $x>0$,
\begin{equation}\label{upper1}\frac{x}{\nu-\frac{1}{2}+2b_{\nu}(x)+\sqrt{\big(\nu+\frac{1}{2}\big)^2+x^2}}<\frac{\mathbf{L}_{\nu}(x)}{\mathbf{L}_{\nu-1}(x)}<\frac{x}{\nu-\frac{1}{2}+\sqrt{\big(\nu-\frac{1}{2}\big)^2+x^2}},
\end{equation}
where the lower bound holds for $\nu\geq0$ and the upper bound holds for $\nu\geq\frac{1}{2}$. Also,
\begin{equation}\label{lowtanh}\frac{\mathbf{L}_{\nu}(x)}{\mathbf{L}_{\nu-1}(x)}>\frac{x\tanh(x)}{x+(2\nu-1)\tanh(x)+2b_\nu(x)\tanh(x)}, \quad \nu>\tfrac{1}{2}.
\end{equation}
\end{corollary}
\begin{proof}It was shown in \cite{segura} (see also \cite{amos74,ln10,rs16}) that, for $x>0$,
\[\frac{x}{\nu-\frac{1}{2}+\sqrt{\big(\nu+\frac{1}{2}\big)^2+x^2}}<\frac{I_{\nu}(x)}{I_{\nu-1}(x)}<\frac{x}{\nu-\frac{1}{2}+\sqrt{\big(\nu-\frac{1}{2}\big)^2+x^2}},\]
where the lower bound holds for $\nu\geq0$ and the upper bound holds for $\nu\geq\frac{1}{2}$, and it was shown in \cite{ifantis} that
\[\frac{I_{\nu}(x)}{I_{\nu-1}(x)}>\frac{x\tanh(x)}{x+(2\nu-1)\tanh(x)}, \quad \nu>\tfrac{1}{2}.\]
Now combine these bounds with inequality (\ref{firstcor1}) of Theorem \ref{thmil}.
\end{proof}
\begin{remark}The lower bound (\ref{lowtanh}) complements the following upper bound of \cite{bp14}:
\begin{equation}\label{bpineq3}\frac{\mathbf{L}_{\nu}(x)}{\mathbf{L}_{\nu-1}(x)}\leq\frac{\cosh(x)-1}{\sinh(x)}=\tanh\bigg(\frac{x}{2}\bigg), \quad
x>0,\:\nu\geq\tfrac{1}{2},
\end{equation}
where we have equality if and only if $\nu=\frac{1}{2}$. It should also be noted that a more complicated bound, valid for $\nu\geq\frac{3}{2}$, which improves on (\ref{bpineq3}) is given by inequality (3.1) of \cite{bp14}.
Let us compare our bound (\ref{upper1}) with (\ref{bpineq3}). We used \emph{Mathematica} to observe that if $\nu>\frac{3}{2}$, then $x/({\nu-\frac{1}{2}+\sqrt{(\nu-\frac{1}{2})^2+x^2}})<\tanh\big(\frac{x}{2}\big)$ for all $x>0$. (We checked the case $\nu=\frac{3}{2}$, since if the inequality holds for this value of $\nu$ then it must hold for for all $\nu>\frac{3}{2}$.) An asymptotic analysis also shows that when $\frac{1}{2}<\nu<\frac{3}{2}$ inequality (\ref{bpineq3}) performs better in the limit $x\downarrow0$, whilst inequality (\ref{upper1}) is better as $x\rightarrow\infty$. We used \emph{Mathematica} to find the value $x_\nu^*$ at which the two upper bounds are equal. We find $x_{\frac{5}{8}}^*=4.21$, $x_{\frac{3}{4}}^*=3.26$, $x_{\frac{7}{8}}^*=2.66$, $x_{1}^*=2.18$, $x_{\frac{9}{8}}^*=1.76$, $x_{\frac{5}{4}}^*=1.35$, $x_{\frac{11}{8}}^*=0.91$.
\end{remark}
\begin{remark}Since $b_\nu(x)<\frac{1}{2}$ for all $x>0$, we have the following simpler two-sided inequality:
\begin{equation*}\frac{x}{\nu+\frac{1}{2}+\sqrt{\big(\nu+\frac{1}{2}\big)^2+x^2}}<\frac{\mathbf{L}_{\nu}(x)}{\mathbf{L}_{\nu-1}(x)}<\frac{x}{\nu-\frac{1}{2}+\sqrt{\big(\nu-\frac{1}{2}\big)^2+x^2}},
\end{equation*}
with the same range of validity as (\ref{upper1}). Similar such simplifications can be made to all bounds given in this paper.
\end{remark}
\begin{remark}We will use the lower and upper bounds of (\ref{upper1}) throughout this paper. It is therefore useful to gain some insight into the quality of the approximation. Denote the lower and upper bounds by $l_\nu^b(x)$ and $u_\nu^b(x)$, respectively, and write $h_\nu(x)=\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$. From the asymptotic formula (\ref{ltend0}) and the fact that $b_\nu(0^+)=\frac{1}{2}$, we have
\begin{equation*}1-\lim_{x\downarrow0}\frac{l_\nu^b(x)}{h_\nu(x)}=0, \quad \lim_{x\downarrow0}\frac{u_\nu^b(x)}{h_\nu(x)}-1=\frac{2}{2\nu-1},
\end{equation*}
and so the relative error in approximating $h_\nu(x)$ by $l_\nu^b(x)$ is 0 in the limit $x\downarrow0$. The relative error in approximating $h_\nu(x)$ by $u_\nu^b(x)$ in the limit $x\downarrow0$ blows up as $\nu\downarrow\frac{1}{2}$, but decreases as $\nu$ increases, and the bound is tight as $\nu\rightarrow\infty$. Furthermore, as $x\downarrow0$,
\[l_\nu^b(x)\sim\frac{x}{2\nu+1}-\frac{4(\nu+2)x^3}{3(2\nu+1)^3(2\nu+3)}, \quad h_\nu(x)\sim\frac{x}{2\nu+1}-\frac{2x^3}{3(2\nu+1)^2(2\nu+3)},\]
and so, as $\nu\rightarrow\infty$, the second term in the $x\downarrow0$ expansion of $l_\nu^b(x)$ approaches the second term of the expansion of $h_\nu(x)$. Hence, both the lower and upper bounds improve for `small' $x$ as $\nu$ increases.
From the asymptotic formulas (\ref{linfty}) and (\ref{bnu1}), we have, as $x\rightarrow\infty$,
\begin{align*}l_\nu^b(x)&\sim1-\frac{2\nu-1}{2x}+\frac{4\nu^2-4\nu+1}{8x^2}, \quad h_\nu(x)\sim1-\frac{2\nu-1}{2x}+\frac{4\nu^2-8\nu+3}{8x^2}, \\
u_\nu^b(x)&\sim1-\frac{2\nu-1}{2x}+\frac{4\nu^2-12\nu+1}{8x^2}.
\end{align*}
All of $l_\nu^b(x)$, $h_\nu(x)$ and $u_\nu^b(x)$ have the same first two terms in the $x\rightarrow\infty$ expansion, but differ in the third term. We see that, for `large' $x$, the quality of both the lower and upper bound approximations decreases as $\nu$ increases. The $O(x^{-2})$ error in the approximation is much larger than the $O(x^{\nu+1/2}\mathrm{e}^{-x})$ error of the double inequality (\ref{firstcor1}). The comments given in this remark are supported by numerical results obtained using \emph{Mathematica}, which are reported in Tables \ref{table3} and \ref{table4}.
\end{remark}
\begin{table}[h]
\centering
\caption{\footnotesize{Relative error in approximating $\mathbf{L}_{\nu}(x)/\mathbf{L}_{\nu-1}(x)$ by the lower bound of (\ref{upper1}).}}
\label{table3}
{\scriptsize
\begin{tabular}{|c|rrrrrrrrrr|}
\hline
\backslashbox{$\nu$}{$x$} & 0 & 0.5 & 1 & 2.5 & 5 & 7.5 & 10 & 15 & 25 & 50 \\
\hline
0 & 0.0000 & 0.1057 & 0.1973 & 0.1545 & 0.0319 & 0.0073 & 0.0030 & 0.0012 & 0.0004 & 0.0001 \\
0.5 & 0.0000 & 0.0267 & 0.0732 & 0.1073 & 0.0383 & 0.0117 & 0.0053 & 0.0022 & 0.0008 & 0.0002 \\
1 & 0.0000 & 0.0102 & 0.0329 & 0.0725 & 0.0390 & 0.0147 & 0.0071 & 0.0031 & 0.0011 & 0.0003 \\
2.5 & 0.0000 & 0.0017 & 0.0063 & 0.0243 & 0.0287 & 0.0173 & 0.0100 & 0.0049 & 0.0020 & 0.0006 \\
5 & 0.0000 & 0.0003 & 0.0012 & 0.0062 & 0.0132 & 0.0128 & 0.0098 & 0.0059 & 0.0029 & 0.0009 \\
7.5 & 0.0000 & 0.0001 & 0.0004 & 0.0024 & 0.0063 & 0.0081 & 0.0076 & 0.0056 & 0.0033 & 0.0012 \\
10 & 0.0000 & 0.0000 & 0.0002 & 0.0011 & 0.0034 & 0.0051 & 0.0055 & 0.0048 & 0.0033 & 0.0014 \\
\hline
\end{tabular}}
\end{table}
\begin{table}[h]
\centering
\caption{\footnotesize{Relative error in approximating $\mathbf{L}_{\nu}(x)/\mathbf{L}_{\nu-1}(x)$ by the upper bound of (\ref{upper1}).}}
\label{table4}
{\scriptsize
\begin{tabular}{|c|rrrrrrrrrr|}
\hline
\backslashbox{$\nu$}{$x$} & 0 & 0.5 & 1 & 2.5 & 5 & 7.5 & 10 & 15 & 25 & 50 \\
\hline
0.5 & $\infty$ & 3.0830 & 1.1640 & 0.1789 & 0.0136 & 0.0011 & 0.0001 & 0.0000 & 0.0000 & 0.0000 \\
1 & 2.0000 & 1.5128 & 0.9357 & 0.2417 & 0.0338 & 0.0074 & 0.0030 & 0.0012 & 0.0004 & 0.0001 \\
2.5 & 0.5000 & 0.4824 & 0.4360 & 0.2524 & 0.0777 & 0.0259 & 0.0117 & 0.0047 & 0.0017 & 0.0004 \\
5 & 0.2222 & 0.2199 & 0.2131 & 0.1736 & 0.0950 & 0.0460 & 0.0239 & 0.0099 & 0.0036 & 0.0009 \\
7.5 & 0.1429 & 0.1421 & 0.1397 & 0.1247 & 0.0864 & 0.0523 & 0.0310 & 0.0139 & 0.0054 & 0.0014 \\
10 & 0.1053 & 0.1049 & 0.1037 & 0.0962 & 0.0747 & 0.0516 & 0.0341 & 0.0166 & 0.0069 & 0.0019 \\
\hline
\end{tabular}}
\end{table}
\subsection{Further bounds for the ratio $\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$}\label{sec2.4}
Since the modified Struve function $\mathbf{L}_\nu(x)$ and modified Bessel function $I_\nu(x)$ are closely related, some of the techniques used in the literature to bound the ratio $I_\nu(x)/I_{\nu-1}(x)$ can be easily adapted to bound the ratio $\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$. In the following theorem, we give two such examples that complement the bounds we gave in Corollary \ref{bghj}. The first bound is obtained by adapting the method used to prove Theorem 1.1 of \cite{ln10}, and we adapt the approach used to prove inequality (1.9) of \cite{ifantis} to establish the second.
\begin{theorem}For $x>0$,
\begin{equation}\label{lower1}\frac{\mathbf{L}_{\nu}(x)}{\mathbf{L}_{\nu-1}(x)}>\frac{x}{\nu+b_\nu(x)+\sqrt{(\nu+b_\nu(x))^2+x^2}}, \quad \nu\geq-\tfrac{1}{2},
\end{equation}
and
\begin{equation}\label{bibidt}\frac{\mathbf{L}_{\nu}(x)}{\mathbf{L}_{\nu-1}(x)}>\frac{x\tanh\big(\frac{1}{2}x\big)}{x+(2\nu-1)\tanh\big(\frac{1}{2}x\big)+2\big(b_\nu(x)-b_{\frac{1}{2}}(x)\big)\tanh\big(\frac{1}{2}x\big)}, \quad \nu>\tfrac{1}{2}.
\end{equation}
We have equality in (\ref{bibidt}) if $\nu=\frac{1}{2}$.
\end{theorem}
\begin{proof}We begin by noting a Tur\'{a}n-type inequality, which was proved by \cite{bp14, jn98}. For $x>0$ and $\nu>-\frac{3}{2}$, we have $\mathbf{L}_{\nu-1}(x)\mathbf{L}_{\nu+1}(x)<\mathbf{L}_\nu^2(x)$. From the relation (\ref{struveid1}), we thus obtain
\begin{equation*}\mathbf{L}_{\nu-1}(x)\bigg[\mathbf{L}_{\nu-1}(x)-\frac{2\nu}{x}\mathbf{L}_\nu(x)-a_\nu(x)\bigg]<\mathbf{L}_\nu^2(x).
\end{equation*}
Dividing both sides by $\mathbf{L}_{\nu-1}^2(x)$ and defining $h_\nu(x)=\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$, we obtain
\begin{equation*}1-\bigg(\frac{2\nu}{x}+\frac{2b_\nu(x)}{x}\bigg)h_\nu(x)<h_\nu^2(x).
\end{equation*}
Solving this quadratic inequality gives, for $\nu\geq-\frac{1}{2}$,
\begin{align*}h_\nu(x)&>-\frac{\nu}{x}-\frac{b_\nu(x)}{x}+\sqrt{\bigg(\frac{\nu}{x}+\frac{b_\nu(x)}{x}\bigg)^2+1} =\frac{x}{\nu+b_\nu(x)+\sqrt{(\nu+b_\nu(x))^2+x^2}}.
\end{align*}
Moving on to inequality (\ref{bibidt}), from relation (\ref{struveid1}), we have
\begin{equation*}\frac{\mathbf{L}_{\nu-1}(x)}{\mathbf{L}_{\nu}(x)}-\frac{2\nu}{x}-\frac{2b_\nu(x)}{x}=\frac{\mathbf{L}_{\nu+1}(x)}{\mathbf{L}_{\nu}(x)}.
\end{equation*}
Now, by part (vi) of Theorem 2.2 of \cite{bp14}, we have that, for all $x>0$, the function $\mathbf{L}_{\nu+1}(x)/\mathbf{L}_{\nu}(x)$ decreases as $\nu$ increases in the interval $(\frac{1}{2},\infty)$, and therefore the function $\frac{\mathbf{L}_{\nu-1}(x)}{\mathbf{L}_{\nu}(x)}-\frac{2\nu}{x}-\frac{2b_\nu(x)}{x}$ also decreases as $\nu$ increases in the interval $(\frac{1}{2},\infty)$. Using the standard formulas (see \cite[11.4(i)]{olver})
\begin{equation*}\mathbf{L}_{-\frac{1}{2}}(x)=\sqrt{\frac{2}{\pi x}}\sinh(x), \quad \mathbf{L}_{\frac{1}{2}}(x)=\sqrt{\frac{2}{\pi x}}\big(\cosh(x)-1\big)
\end{equation*}
we have
\begin{equation*}\frac{\mathbf{L}_{-\frac{1}{2}}(x)}{\mathbf{L}_{\frac{1}{2}}(x)}=\frac{\sinh(x)}{\cosh(x)-1}=\coth\bigg(\frac{x}{2}\bigg).
\end{equation*}
From the monotonicity property we thus deduce that, for $\nu>\frac{1}{2}$,
\begin{equation*}\frac{\mathbf{L}_{\nu-1}(x)}{\mathbf{L}_{\nu}(x)}-\frac{2\nu}{x}-\frac{2b_\nu(x)}{x}<\frac{1}{\tanh\big(\frac{1}{2}x\big)}-\frac{1}{x}-\frac{2b_{\frac{1}{2}}(x)}{x},
\end{equation*}
whence on rearranging we obtain (\ref{bibidt}), as required.
\end{proof}
\begin{remark}Inequality (\ref{lower1}) has a larger range of validity than the lower bound of (\ref{upper1}), but is outperformed by (\ref{upper1}) for all $x>0$ if $\nu\geq0$. However, as we shall see shortly, in some situations, for reasons of simplicity, (\ref{lower1}) may be preferable to the lower bound of (\ref{upper1}). Inequality (\ref{bibidt}) also improves inequality (\ref{lowtanh}) for all $x>0$ if $\nu>\frac{1}{2}$. Together with the upper bound (\ref{bpineq3}) it forms a two-sided inequality for $\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$, involving hyperbolic functions, that is exact for $\nu=\frac{1}{2}$.
\end{remark}
We end this section by noting that it is possible to obtain further bounds for the ratio $\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$ from the relation
\begin{equation}\label{iteqn1}\frac{\mathbf{L}_{\nu}(x)}{\mathbf{L}_{\nu-1}(x)}=\frac{1}{\displaystyle \frac{2\nu}{x}+\frac{2b_\nu(x)}{x}+\frac{\mathbf{L}_{\nu+1}(x)}{\mathbf{L}_{\nu}(x)}}.
\end{equation}
An analogous relation for the ratio $I_\nu(x)/I_{\nu-1}(x)$ has been used by \cite{amos74,rs16,segura} to obtain a sequence of iteratively refined upper and lower bounds that converge to the ratio $I_\nu(x)/I_{\nu-1}(x)$. We do not undertake such an investigation in this paper, and we contend ourselves with the following simple illustration of the approach.
\begin{corollary}\label{thmr2}For $x>0$,
\begin{align}\label{upperlower}\frac{\mathbf{L}_{\nu}(x)}{\mathbf{L}_{\nu-1}(x)}<\frac{x}{\nu-1+2b_\nu(x)-b_{\nu+1}(x)+\sqrt{\big(\nu+1+b_{\nu+1}(x)\big)^2+x^2}}, \quad \nu\geq 0.
\end{align}
\end{corollary}
\begin{proof}Applying inequality (\ref{lower1}) to the relation (\ref{iteqn1}) gives the inequality
\begin{align}\frac{\mathbf{L}_{\nu}(x)}{\mathbf{L}_{\nu-1}(x)}&<\frac{1}{\displaystyle\frac{2\nu}{x}+\frac{2b_\nu(x)}{x}+\frac{x}{\nu+1+b_{\nu+1}(x)+\sqrt{(\nu+1+b_{\nu+1}(x))^2+x^2}}} \nonumberumber\\
\label{jko}&=\frac{1}{\displaystyle\frac{2\nu}{x}+\frac{2b_\nu(x)}{x}+\frac{-\nu-1-b_{\nu+1}(x)+\sqrt{(\nu+1+b_{\nu+1}(x))^2+x^2}}{x}} \\
&=\frac{x}{\displaystyle \nu-1+2b_\nu(x)-b_{\nu+1}(x)+\sqrt{(\nu+1+b_{\nu+1}(x))^2+x^2}}.\nonumberumber
\end{align}
\end{proof}
\begin{remark}On applying the upper bound of (\ref{upper1}) of Corollary \ref{bghj} to the relation (\ref{iteqn1}) we recover the lower bound of (\ref{upper1}), through an alternative method.
We could have used the lower bound of (\ref{upper1}), instead of the lower bound (\ref{lower1}), to obtain an upper bound for $\mathbf{L}_{\nu}(x)/\mathbf{L}_{\nu-1}(x)$ that is less than (\ref{upperlower}) for all $x>0$ and $\nu\geq0$. However, this bound would not take such a simple form, because the form of the lower bound of (\ref{upper1}) would not allow for such a neat simplification as the one used to obtain the equality (\ref{jko}).
A straightforward asymptotic analysis shows that inequality (\ref{upperlower}) improves on the upper bound of (\ref{upper1}) in the limit $x\downarrow0$ (in fact the relative error in approximating $\mathbf{L}_{\nu}(x)/\mathbf{L}_{\nu-1}(x)$ is 0 in this limit), whereas the reverse is true in the limit $x\rightarrow\infty$ (the first two terms in the $x\rightarrow\infty$ expansion are given by $1-(\nu-1)/x$). Letting $x_\nu^*>0$ denote the point at which the two inequalities are equal, we used \emph{Mathematica} to find that $x_1^*=5.34$, $x_{2.5}^*=8.42$, $x_5^*=14.9$. For $\nu\geq5$, we find that $x_\nu^*\approx 2\sqrt{\nu(2\nu+1)}$ (for example, $2\sqrt{5\times11}=14.83$). This follows from setting $b_\nu(x)$ to be equal to 0 in (\ref{upperlower}) and then solving $\sqrt{(\nu+1)^2+(x_\nu^*)^2}\approx1/2+\sqrt{(\nu+1/2)^2+(x_\nu^*)^2}$, which, on account of the exponential decay of $b_\nu(x)$, is a reasonable approximation.
\end{remark}
\section{Further bounds for modified Struve functions of the first kind and their ratios}\label{sec3}
In this section, we apply the bounds of Section \ref{sec2} for the ratio $\mathbf{L}_{\nu}(x)/\mathbf{L}_{\nu-1}(x)$ to obtain further functional inequalities for the modified Struve function of the first kind.
\subsection{Bounds for the condition numbers}\label{sec3.2}
We shall follow the notation of \cite{segura} and write $C\big(\mathbf{L}_\nu(x)\big)=x\mathbf{L}'_\nu(x)/\mathbf{L}_\nu(x)$ and $C\big(I_\nu(x)\big)=xI'_\nu(x)/I_\nu(x)$. These are positive quantities if $\nu\geq-1$ and $\nu\geq0$, respectively. See also \cite{segura} for comments regarding the utility of condition numbers $C\big(f(x)\big)=|xf'(x)/f(x)|$ in comparing functions. The first inequalities for $C\big(I_\nu(x)\big)$, due to \cite{g32}, were motivated by a problem in wave mechanics.
From the relations (\ref{struveid1}) and (\ref{struveid2}) we obtain the relations
\begin{align}\label{star60}C\big(\mathbf{L}_\nu(x)\big)&=\frac{x\mathbf{L}_{\nu-1}(x)}{\mathbf{L}_\nu(x)}-\nu, \\
\label{star61}C\big(\mathbf{L}_\nu(x)\big)&=\frac{x\mathbf{L}_{\nu+1}(x)}{\mathbf{L}_\nu(x)}+\nu+2b_\nu(x),
\end{align}
and thus bounds on the ratio $\mathbf{L}_\nu(x)/\mathbf{L}_{\nu-1}(x)$ immediately lead to bounds on the condition number $C\big(\mathbf{L}_\nu(x)\big)$. (For the modified Bessel function $I_\nu(x)$, the same relations hold but without the $2b_\nu(x)$ term.)
This approach was used by \cite{bp14} to obtain the lower bounds
\begin{equation*}C\big(\mathbf{L}_\nu(x)\big)>\nu+1, \quad \nu>-\tfrac{3}{2}, \quad C\big(\mathbf{L}_\nu(x)\big)>x-\nu, \quad \nu\geq-\tfrac{1}{2},
\end{equation*}
and the following improvement of the second bound (this follows from inequality (\ref{bpineq3})):
\begin{equation*}C\big(\mathbf{L}_\nu(x)\big)\geq\frac{x\sinh(x)}{\cosh(x)-1}-\nu=x\coth\bigg(\frac{x}{2}\bigg)-\nu,
\end{equation*}
with equality if and only if $\nu=\frac{1}{2}$. Also, rearranging inequality (2.5) of \cite{bp14} and using the notation of this paper gives the upper bound
\begin{equation}\label{apti}C\big(\mathbf{L}_\nu(x)\big)<\sqrt{x^2+\nu^2+2(2\nu+1) b_\nu(x)}, \quad \nu>-\tfrac{3}{2}.
\end{equation}
In the following theorem, we given a two-sided inequality for $C\big(\mathbf{L}_\nu(x)\big)$ in terms of $C\big(I_\nu(x)\big)$, which parallels the two-sided inequality (\ref{firstcor1}) of Theorem \ref{thmil} in that it allows one to make use of the literature on bounds for $C\big(I_\nu(x)\big)$ (see \cite{amos74,b092,g32,ln10,p99,pm50,segura}) to bound $C\big(\mathbf{L}_\nu(x)\big)$. We also obtain a number of lower and upper bounds which complement inequality (\ref{apti}).
\begin{theorem}The following inequalities hold:
\noindent (i) For $x>0$,
\begin{equation}\label{condlo}C\big(I_\nu(x)\big)<C\big(\mathbf{L}_\nu(x)\big)<C\big(I_\nu(x)\big)+2b_\nu(x),
\end{equation}
where the lower bound is valid for $\nu\geq\frac{1}{2}$ and the upper bound is valid for $\nu\geq-\frac{1}{2}$.
\noindent (ii) For $x>0$,
\begin{align}\label{star10}\sqrt{\big(\nu-\tfrac{1}{2}\big)^2+x^2}-\tfrac{1}{2}<C\big(\mathbf{L}_\nu(x)\big)<\sqrt{\big(\nu+b_\nu(x)\big)^2+x^2}+b_\nu(x),
\end{align}
where the lower bound holds for $\nu\geq\frac{1}{2}$ and the upper bound holds for $\nu\geq-\frac{1}{2}$;
\begin{align}&\sqrt{\big(\nu+1+b_{\nu+1}(x)\big)^2+x^2}+2b_\nu(x)-b_{\nu+1}(x)-1<C\big(\mathbf{L}_\nu(x)\big)\nonumberumber \\
\label{star11}&\quad\quad\quad\quad\quad<\sqrt{\big(\nu+\tfrac{1}{2}\big)^2+x^2}+2b_{\nu}(x)-\tfrac{1}{2},
\end{align}
where the lower bound holds for $\nu\geq-1$ and the upper bound holds for $\nu\geq-\frac{1}{2}$;
\begin{align}\label{star40}C\big(\mathbf{L}_\nu(x)\big)>\nu+2b_{\nu}(x)+\frac{x^2}{\nu+\frac{1}{2}+2b_{\nu+1}(x)+\sqrt{\big(\nu+\frac{3}{2}\big)^2+x^2}},
\end{align}
which is valid for $\nu\geq-1$.
\end{theorem}
\begin{proof}\noindent (i) From the relations (\ref{star60}) and (\ref{star61}) for $\mathbf{L}_\nu(x)$, as well as the corresponding ones for $I_\nu(x)$, and the upper bound of inequality (\ref{firstcor1}) of Theorem \ref{thmil}, we have, for $\nu\geq\frac{1}{2}$,
\begin{align*}\frac{x\mathbf{L}_\nu'(x)}{\mathbf{L}_\nu(x)}=\frac{x\mathbf{L}_{\nu-1}(x)}{\mathbf{L}_\nu(x)}-\nu>\frac{xI_{\nu-1}(x)}{I_\nu(x)}-\nu=\frac{xI_\nu'(x)}{I_\nu(x)},
\end{align*}
and, for $\nu\geq-\frac{1}{2}$,
\begin{align*}\frac{x\mathbf{L}_\nu'(x)}{\mathbf{L}_\nu(x)}=\frac{x\mathbf{L}_{\nu+1}(x)}{\mathbf{L}_\nu(x)}+\nu+2b_\nu(x)<\frac{xI_{\nu+1}(x)}{I_\nu(x)}+\nu+2b_\nu(x)=\frac{xI_\nu'(x)}{I_\nu(x)}+2b_\nu(x).
\end{align*}
\noindent (ii) We obtain the double inequality (\ref{star10}) by combining the upper bound of (\ref{upper1}) and inequality (\ref{lower1}) with (\ref{star60}). The double inequality (\ref{star11}) follows from combining inequality (\ref{lower1}) and the upper bound of (\ref{upper1}) with (\ref{star61}). Finally, inequality (\ref{star40}) follows from applying the lower bound of (\ref{upper1}) to (\ref{star61}).
Alternatively, one can obtain the lower bound of (\ref{star10}) and the upper bound of (\ref{star11}) by combining part (i) with the following inequality (see inequalities (71) and (72) of \cite{segura}):
\[\sqrt{\big(\nu-\tfrac{1}{2}\big)^2+x^2}-\tfrac{1}{2}<C\big(I_\nu(x)\big)<\sqrt{\big(\nu+\tfrac{1}{2}\big)^2+x^2}-\tfrac{1}{2},\]
where the lower bound holds for $\nu\geq\frac{1}{2}$ and the upper bound holds for $\nu\geq-\frac{1}{2}$.
\end{proof}
\begin{remark}Since the lower bound of (\ref{upperlower}) is greater than the lower bound (\ref{lower1}), it follows that the lower bound (\ref{star40}) for the condition number $C\big(\mathbf{L}_\nu(x)\big)$ is greater than the lower bound (\ref{star11}). Also, the upper bound (\ref{star11}) is less than the upper bound (\ref{star10}). However, the comparison between the lower bounds (\ref{star10}) and (\ref{star40}), and the upper bounds (\ref{apti}) and (\ref{star11}) is more involved. Denote the lower bounds (\ref{star10}) and (\ref{star40}) by $l_\nu^{c,1}(x)$ and $l_\nu^{c,2}(x)$, respectively, and the upper bounds (\ref{apti}) and (\ref{star11}) by $u_\nu^{c,1}(x)$ and $u_\nu^{c,2}(x)$, respectively. We shall compare their asymptotic behaviour as $x\downarrow0$ and $x\rightarrow\infty$. For reference, using the asymptotic formulas (\ref{ltend0}) and (\ref{linfty}) gives that
\begin{align*}C\big(\mathbf{L}_\nu(x)\big)\sim \nu+1+\frac{2x^2}{3(2\nu+3)},\:\: x\downarrow0, \quad C\big(\mathbf{L}_\nu(x)\big)\sim x-\frac{1}{2}+\frac{4\nu^2-1}{8x}, \:\: x\rightarrow\infty.
\end{align*}
From the asymptotic formulas (\ref{bnu0}) and (\ref{bnu1}) we obtain
\begin{align*}&l_\nu^{c,1}(x)\sim \nu-1+\frac{x^2}{2\nu-1},\:\: x\downarrow0,\: (\nu>-\tfrac{1}{2}), \quad l_\nu^{c,1}(x)\sim x-\frac{1}{2}+\frac{(2\nu-1)^2}{8x}, \:\: x\rightarrow\infty, \\
&l_\nu^{c,2}(x)\sim \nu+1+\frac{2x^2}{3(2\nu+3)},\:\: x\downarrow0, \quad l_\nu^{c,2}(x)\sim x-\frac{1}{2}+\bigg(\frac{(2\nu-1)^2}{8}-1\bigg)\frac{1}{x}, \:\: x\rightarrow\infty,
\end{align*}
\begin{align*}
&u_\nu^{c,1}(x)\sim \nu+1+\frac{2(\nu+2)x^2}{3(\nu+1)(2\nu+3)},\:\: x\downarrow0, \quad u_\nu^{c,1}(x)\sim x+\frac{\nu^2}{2x}, \:\: x\rightarrow\infty, \\
&u_\nu^{c,2}(x)\sim \nu+1+\frac{(10\nu+17)x^2}{6(2\nu+1)(2\nu+3)},\:\: x\downarrow0, \quad u_\nu^{c,2}(x)\sim x-\frac{1}{2}+\frac{(2\nu+1)^2}{8x}, \:\: x\rightarrow\infty.
\end{align*}
Other than $l_\nu^{c,1}(x)$ in the limit $x\downarrow0$, the bounds are tight as $x\downarrow0$ and $x\rightarrow\infty$. Also, numerical experiments suggest that, for all $\nu$ in the ranges of validity, $l_\nu^{c,1}(x)<l_\nu^{c,2}(x)$ for all $x\in(0,x_\nu^*)$ and $l_\nu^{c,1}(x)>l_\nu^{c,2}(x)$ for all $x>x_\nu^*$, for some $x_\nu^*>0$, and $u_\nu^{c,1}(x)<u_\nu^{c,2}(x)$ for all $x\in(0,x_\nu^{**})$ and $u_\nu^{c,1}(x)>u_\nu^{c,2}(x)$ for all $x>x_\nu^{**}$, for some $x_\nu^{**}>0$.
\end{remark}
\subsection{Upper and lower bounds for the ratio $\mathbf{L}_\nu(x)/\mathbf{L}_\nu(y)$ and the modified Struve function $\mathbf{L}_\nu(x)$}\label{sec3.3}
The results of Section \ref{sec3.2} have the following immediate application. Integrating $A_\nu(t)<C\big(\mathbf{L}_\nu(t)\big)<B_\nu(t)$ gives the two-sided inequality
\begin{equation}\label{abcineq}\exp\bigg(-\int_x^y\frac{A_\nu(t)}{t}\,\mathrm{d}t\bigg)<\frac{\mathbf{L}_\nu(x)}{\mathbf{L}_\nu(y)}<\exp\bigg(-\int_x^y\frac{B_\nu(t)}{t}\,\mathrm{d}t\bigg).
\end{equation}
This approach was used by \cite{bp14} to prove that, for $0<x<y$,
\begin{equation}\label{nvnvj}\frac{\mathbf{L}_\nu(x)}{\mathbf{L}_\nu(y)}<\bigg(\frac{x}{y}\bigg)^{\nu+1}, \quad \nu>-\tfrac{3}{2} \quad \text{and} \quad \frac{\mathbf{L}_\nu(x)}{\mathbf{L}_\nu(y)}<\mathrm{e}^{x-y}\bigg(\frac{y}{x}\bigg)^\nu, \quad \nu\geq\tfrac{1}{2},
\end{equation}
where the first inequality of (\ref{nvnvj}) was proved for $\nu>-\frac{1}{2}$ by \cite{jn98}, and it was also shown by \cite{bp14} that the second inequality of (\ref{nvnvj}) can be improved to
\begin{equation}\label{bpineq5}\frac{\mathbf{L}_\nu(x)}{\mathbf{L}_\nu(y)}\leq\bigg(\frac{\cosh(x)-1}{\cosh(y)-1}\bigg)\bigg(\frac{y}{x}\bigg)^\nu,
\end{equation}
with equality if and only if $\nu=\frac{1}{2}$.
We do not further explore combining the bounds of Section \ref{sec3.2} with (\ref{abcineq}), but instead note that, in a similar manner, one can integrate the relations
\begin{align*}\frac{\mathbf{L}_\nu'(t)}{\mathbf{L}_\nu(t)}=\frac{\mathbf{L}_{\nu-1}(t)}{\mathbf{L}_\nu(t)}-\frac{\nu}{t}, \quad
\frac{\mathbf{L}_\nu'(t)}{\mathbf{L}_\nu(t)}=\frac{\mathbf{L}_{\nu+1}(t)}{\mathbf{L}_\nu(t)}+\frac{\nu}{t}+\frac{a_\nu(t)}{\mathbf{L}_\nu(t)}
\end{align*}
between $x$ and $y$ to obtain
\begin{align}\label{star30}\frac{\mathbf{L}_\nu(x)}{\mathbf{L}_\nu(y)}&=\bigg(\frac{y}{x}\bigg)^\nu\exp\bigg(-\int_x^y\frac{\mathbf{L}_{\nu-1}(t)}{\mathbf{L}_\nu(t)}\,\mathrm{d}t\bigg), \\
\label{star31}\frac{\mathbf{L}_\nu(x)}{\mathbf{L}_\nu(y)}&=\bigg(\frac{x}{y}\bigg)^\nu\exp\bigg(-2\int_x^y\frac{b_\nu(t)}{t}\,\mathrm{d}t\bigg)\exp\bigg(-\int_x^y\frac{\mathbf{L}_{\nu+1}(t)}{\mathbf{L}_\nu(t)}\,\mathrm{d}t\bigg).
\end{align}
We combine these formulas and the bounds of Section \ref{sec2} to prove the following theorem.
\begin{theorem}\label{thmalm}The following inequalities hold:
\noindent (i) For $0<x<y$,
\begin{equation}\label{thmalm1}\frac{x}{y}\sqrt{\frac{3(2\nu+3)+y^2}{3(2\nu+3)+x^2}}\frac{I_\nu(x)}{I_\nu(y)}<\frac{\mathbf{L}_\nu(x)}{\mathbf{L}_\nu(y)}<\frac{I_\nu(x)}{I_\nu(y)},
\end{equation}
where the lower bound holds for $\nu\geq-\frac{1}{2}$ and the upper bound holds for $\nu\geq\frac{1}{2}$.
\noindent (ii) Let $\nu\geq-\frac{1}{2}$. Then, for $0<x<y$,
\begin{align}&\frac{\mathrm{e}^{\sqrt{(\nu+1/2)^2+x^2}}}{\mathrm{e}^{\sqrt{(\nu+1/2)^2+y^2}}}\bigg(\frac{x}{y}\bigg)^{\nu+1}\sqrt{\frac{3(2\nu+3)+y^2}{3(2\nu+3)+x^2}}\Bigg(\frac{\nu+\frac{1}{2}+\sqrt{(\nu+\frac{1}{2})^2+y^2}}{\nu+\frac{1}{2}+\sqrt{(\nu+\frac{1}{2})^2+x^2}}\Bigg)^{\nu+\frac{1}{2}}<\frac{\mathbf{L}_\nu(x)}{\mathbf{L}_\nu(y)}<\nonumberumber\\
\label{thmu9}&\quad<\frac{\mathrm{e}^{\sqrt{(\nu+3/2)^2+x^2}}}{\mathrm{e}^{\sqrt{(\nu+3/2)^2+y^2}}}\frac{\tanh\big(\frac{1}{2}x\big)}{\tanh\big(\frac{1}{2}y\big)}\bigg(\frac{x}{y}\bigg)^{\nu}\Bigg(\frac{\nu+\frac{3}{2}+\sqrt{(\nu+\frac{3}{2})^2+y^2}}{\nu+\frac{3}{2}+\sqrt{(\nu+\frac{3}{2})^2+x^2}}\Bigg)^{\nu+\frac{3}{2}}.
\end{align}
\noindent (iii) Let $\nu\geq-\frac{1}{2}$. Then, for $x>0$,
\begin{align}\label{coru1}&\frac{\mathrm{e}^{\sqrt{(\nu+3/2)^2+x^2}-\nu-3/2}}{\sqrt{\pi}2^{\nu-1}\Gamma(\nu+\frac{3}{2})}x^\nu\tanh\Big(\frac{x}{2}\Big)\Bigg(\frac{2\nu+3}{\nu+\frac{3}{2}+\sqrt{\big(\nu+\frac{3}{2}\big)^2+x^2}}\Bigg)^{\nu+\frac{3}{2}}<\mathbf{L}_\nu(x)<\nonumberumber \\
&\quad<\frac{\mathrm{e}^{\sqrt{(\nu+1/2)^2+x^2}-\nu-1/2}}{\sqrt{\pi}2^{\nu}\Gamma(\nu+\frac{3}{2})}x^{\nu+1}\sqrt{\frac{3(2\nu+3)}{3(2\nu+3)+x^2}}\Bigg(\frac{2\nu+1}{\nu+\frac{1}{2}+\sqrt{\big(\nu+\frac{1}{2}\big)^2+x^2}}\Bigg)^{\nu+\frac{1}{2}}.
\end{align}
\end{theorem}
\begin{proof}(i) We begin by noting two integral formulas. From the relations $I_\nu'(t)=I_{\nu-1}(t)-\frac{\nu}{t}I_\nu(t)$ and $I_\nu'(t)=I_{\nu+1}(t)+\frac{\nu}{t}I_\nu(t)$, we have
\begin{align*}\int\frac{I_{\nu}(t)}{I_{\nu-1}(t)}\,\mathrm{d}t&=\int\bigg(\frac{I_{\nu-1}'(t)}{I_{\nu-1}(t)}-\frac{\nu-1}{t}\bigg)\,\mathrm{d}t=\log\big(I_{\nu-1}(t)\big)-(\nu-1)\log(t),\\
\int\frac{I_{\nu-1}(t)}{I_{\nu}(t)}\,\mathrm{d}t&=\int\bigg(\frac{I_{\nu}'(t)}{I_{\nu}(t)}+\frac{\nu}{t}\bigg)\,\mathrm{d}t=\log\big(I_{\nu}(t)\big)+\nu\log(t).
\end{align*}
Now, let us prove the upper bound. By the upper bound of (\ref{firstcor1}), we have, for $\nu\geq\frac{1}{2}$,
\begin{align*}\int_x^y\frac{\mathbf{L}_{\nu}(t)}{\mathbf{L}_{\nu-1}(t)}\,\mathrm{d}t>\int_x^y\frac{I_{\nu}(t)}{I_{\nu-1}(t)}\,\mathrm{d}t=\log\bigg(\frac{I_{\nu}(y)}{I_{\nu}(x)}\bigg)+\nu\log\bigg(\frac{y}{x}\bigg).
\end{align*}
Combining this inequality with (\ref{star30}) yields the upper bound. For the lower bound, we use the upper bound of (\ref{firstcor1}) to obtain, for $\nu\geq-\frac{1}{2}$,
\begin{align*}\int_x^y\frac{\mathbf{L}_{\nu+1}(t)}{\mathbf{L}_{\nu}(t)}\,\mathrm{d}t<\int_x^y\frac{I_{\nu+1}(t)}{I_{\nu}(t)}\,\mathrm{d}t=\log\bigg(\frac{I_{\nu}(y)}{I_{\nu}(x)}\bigg)-\nu\log\bigg(\frac{y}{x}\bigg).
\end{align*}
Also, by inequality (\ref{bcrude2}),
\begin{equation}\label{mncz}2\int_x^y\frac{b_\nu(t)}{t}\,\mathrm{d}t<\int_x^y\frac{1}{t\big(1+\frac{1}{3(2\nu+3)}t^2\big)}\,\mathrm{d}t=\log\Bigg(\frac{y}{x}\sqrt{\frac{3(2\nu+3)+x^2}{3(2\nu+3)+y^2}}\Bigg).
\end{equation}
On combining these inequalities with (\ref{star31}) we obtain the lower bound.
\noindent (ii) Let $\nu\geq-\frac{1}{2}$. We first prove the upper bound. From the lower bound of (\ref{upper1}) and the fact that $b_\nu(t)<\frac{1}{2}$ we have
\begin{align}\label{intlp}&\int_x^y\frac{\mathbf{L}_{\nu+1}(t)}{\mathbf{L}_\nu(t)}\,\mathrm{d}t>\int_x^y\frac{t}{\nu+\frac{1}{2}+2b_{\nu+1}(t)+\sqrt{\big(\nu+\frac{3}{2}\big)^2+t^2}}\,\mathrm{d}t \\
&\quad\quad>\int_x^y\frac{t}{\nu+\frac{3}{2}+\sqrt{\big(\nu+\frac{3}{2}\big)^2+t^2}}\,\mathrm{d}t \nonumberumber \\
&\quad\quad=\sqrt{(\nu+\tfrac{3}{2})^2+y^2}-\sqrt{(\nu+\tfrac{3}{2})^2+x^2} +(\nu+\tfrac{3}{2})\log\Bigg(\frac{\nu+\frac{3}{2}+\sqrt{(\nu+\frac{3}{2})^2+x^2}}{\nu+\frac{3}{2}+\sqrt{(\nu+\frac{3}{2})^2+y^2}}\Bigg). \nonumberumber
\end{align}
Also, from the lower bound of (\ref{star5}), we obtain
\begin{equation*}2\int_x^y\frac{b_\nu(t)}{t}\,\mathrm{d}t>\int_x^y\mathrm{csch}(t)\,\mathrm{d}t=\log\Bigg(\frac{\tanh\big(\frac{1}{2}x\big)}{\tanh\big(\frac{1}{2}y\big)}\Bigg).
\end{equation*}
On combining these inequalities with (\ref{star31}) we obtain the upper bound, as required.
Now, we prove the lower bound. From the upper bound of (\ref{upper1}) we have
\begin{align*}&\int_x^y\frac{\mathbf{L}_{\nu+1}(t)}{\mathbf{L}_\nu(t)}\,\mathrm{d}t<\int_x^y\frac{t}{\nu+\frac{1}{2}+\sqrt{(\nu+\frac{1}{2})^2+t^2}}\,\mathrm{d}t \\
&\quad\quad=\sqrt{(\nu+\tfrac{1}{2})^2+y^2}-\sqrt{(\nu+\tfrac{1}{2})^2+x^2}+(\nu+\tfrac{1}{2})\log\Bigg(\frac{\nu+\frac{1}{2}+\sqrt{(\nu+\frac{1}{2})^2+x^2}}{\nu+\frac{1}{2}+\sqrt{(\nu+\frac{1}{2})^2+y^2}}\Bigg).
\end{align*}
On combining this bound and inequality (\ref{mncz}) with (\ref{star31}) we obtain the lower bound, as required.
\noindent (iii) Let $x\downarrow0$ in (\ref{thmu9}) and use the limits $\lim_{x\downarrow0}\frac{\mathbf{L}_\nu(x)}{x^{\nu+1}}=\frac{1}{\sqrt{\pi}2^{\nu}\Gamma(\nu+\frac{3}{2})}$ and $\lim_{x\downarrow0}\frac{1}{x}\tanh(\frac{x}{2})=\frac{1}{2}$. Then replace $y$ by $x$.
\end{proof}
\begin{remark}The double inequality in part (i) of Theorem \ref{thmalm} allows one to take advantage of the substantial literature (as given in the Introduction) on bounds for the ratio $I_\nu(x)/I_\nu(y)$. For example, inequality (2.19) of \cite{baricz2} and inequality (1.6) of \cite{jb96}, respectively, give that, for $0<x<y$,
\begin{align}\label{hbv}\frac{I_\nu(x)}{I_\nu(y)}&>\frac{\cosh(x)}{\cosh(y)}\bigg(\frac{x}{y}\bigg)^{\nu}, \quad \nu>-\tfrac{1}{2}, \\
\label{johl} \frac{I_\nu(x)}{I_\nu(y)}&>\mathrm{e}^{x-y}\bigg(\frac{y+\nu}{x+\nu}\bigg)^{\nu}\bigg(\frac{x}{y}\bigg)^{\nu}, \quad \nu\geq0,
\end{align}
where (\ref{johl}) is also valid for $\nu>-1$ (see \cite{jb96}) provided suitable restrictions are imposed on $x$ and $y$. Combining (\ref{hbv}) with the lower bound of (\ref{thmalm1}) gives
\begin{equation*}\frac{\mathbf{L}_\nu(x)}{\mathbf{L}_\nu(y)}>\frac{\cosh(x)}{\cosh(y)}\bigg(\frac{x}{y}\bigg)^{\nu+1}\sqrt{\frac{3(2\nu+3)+y^2}{3(2\nu+3)+x^2}}, \quad \nu>-\tfrac{1}{2}, \:0<x<y,
\end{equation*}
which complements the upper bound (\ref{bpineq5}) that was proved by \cite{bp14}. Now, from (\ref{johl}), we have
\begin{equation}\label{setit}\frac{\mathbf{L}_\nu(x)}{\mathbf{L}_\nu(y)}>\mathrm{e}^{x-y}\bigg(\frac{y+\nu}{x+\nu}\bigg)^{\nu}\bigg(\frac{x}{y}\bigg)^{\nu+1}\sqrt{\frac{3(2\nu+3)+y^2}{3(2\nu+3)+x^2}}, \quad \nu\geq0, \:0<x<y.
\end{equation}
Arguing as we did in the proof of part (iii) of Theorem \ref{thmalm}, we obtain the inequality
\begin{equation}\label{vivi}\mathbf{L}_\nu(x)<\frac{1}{\sqrt{\pi}2^\nu\Gamma(\nu+\frac{3}{2})}\bigg(\frac{\nu}{x+\nu}\bigg)^\nu\sqrt{\frac{3(2\nu+3)}{3(2\nu+3)+x^2}} x^{\nu+1}\mathrm{e}^x, \quad \nu\geq0, \:x>0.
\end{equation}
Inequalities (\ref{setit}) and (\ref{vivi}) are outperformed by the more complicated corresponding bounds from the two-sided inequalities (\ref{thmu9}) and (\ref{coru1}), respectively.
\end{remark}
\begin{remark}For fixed $y>0$, both the lower bound and upper bound of (\ref{thmu9}) are $O(x^{\nu+1})$, as $x\downarrow0$. For fixed $x>0$, as $y\rightarrow\infty$, the lower bound is $O(y^{1/2}\mathrm{e}^{-y})$, which is the correct order (see (\ref{linfty})). However, the upper bound is $O(y^{3/2}\mathrm{e}^{-y})$, as $y\rightarrow\infty$. That the upper bound is not of the correct order as $y\rightarrow\infty$ can be at least partly traced back to the use of the inequality $b_\nu(t)<\frac{1}{2}$ in its derivation. This inequality is tight as $t\downarrow0$, but very crude for large $t$ (see (\ref{bnu1})). Using the refined inequality (\ref{bcrude2}) in the derivation of the lower bound (\ref{thmu9}) enabled us to obtain the correct order as $y\rightarrow\infty$, but using this inequality to bound the integral (\ref{intlp}) leads to a considerably less tractable integral.
To the best knowledge of this author, our lower bound for the ratio $\mathbf{L}_\nu(x)/\mathbf{L}_\nu(y)$ is the first such bound to appear in the literature. The upper bound improves on inequality (\ref{nvnvj}) for all $\nu\geq-\frac{1}{2}$. The comparison between inequality (\ref{bpineq5}) and our upper bound is more involved. Denote these bounds by $u_\nu^{d,1}(x,y)$ and $u_\nu^{d,2}(x,y)$, respectively. In fact, $u_{\frac{1}{2}}^{d,1}(x,y)=\mathbf{L}_{\frac{1}{2}}(x)/\mathbf{L}_{\frac{1}{2}}(y)$. For fixed $y$, as $x\downarrow0$, we have $u_\nu^{d,1}(x,y)=O(x^{2-\nu})$ and therefore, for `small' $x$, $u_\nu^{d,2}(x,y)$ outperforms $u_\nu^{d,1}(x,y)$ for all $\nu>-\frac{1}{2}$. For fixed $x$, as $y\rightarrow\infty$, $u_\nu^{d,2}(x,y)=O(y^{3/2}\mathrm{e}^{-y})$ and $u_\nu^{d,1}(x,y)=O(y^{\nu}\mathrm{e}^{-y})$, and so, in this regime, $u_\nu^{d,2}(x,y)\gg u_\nu^{d,1}(x,y)$ if $\nu<\frac{3}{2}$ and $u_\nu^{d,2}(x,y)\ll u_\nu^{d,1}(x,y)$ if $\nu>\frac{3}{2}$.
\end{remark}
\begin{remark}\label{rem12}The lower bound and upper bounds of (\ref{coru1}) for $\mathbf{L}_\nu(x)$ are both tight in the limit $x\downarrow0$. As $x\rightarrow\infty$, the upper bound is $O(x^{-1/2}\mathrm{e}^x)$ and the lower bound is $O(x^{-3/2}\mathrm{e}^x)$, whereas $\mathrm{L}_\nu(x)=O(x^{-1/2}\mathrm{e}^x)$. That the lower bound is not of the correct order as $x\rightarrow\infty$ is a consequence of the fact that the upper bound of (\ref{thmu9}) is not of the correct order as $y\rightarrow\infty$, as discussed in the previous remark.
We examine the upper bound of (\ref{coru1}) in further detail. Denote this bound by $u_\nu^{e,1}(x)$. It has the following asymptotic behaviour:
\begin{align*}u_\nu^{e,1}(x)&\sim\frac{x^{\nu+1}}{\sqrt{\pi}2^{\nu}\Gamma(\nu+\frac{3}{2})}\bigg(1+\frac{2(\nu+2)x^2}{3(2\nu+1)(2\nu+3)}\bigg), \quad x\downarrow0, \\
u_\nu^{e,1}(x) &\sim a_\nu\frac{\mathrm{e}^x}{\sqrt{x}}, \quad x\rightarrow\infty,
\end{align*}
where
\[a_\nu=\sqrt{\frac{12}{\pi}}\frac{\sqrt{\nu+\frac{3}{2}}}{\Gamma(\nu+\frac{3}{2})}(\nu+\tfrac{1}{2})^{\nu+1/2}\mathrm{e}^{-\nu-1/2},\]
which, by an application of Stirling's inequality \cite[5.6.1]{olver}, can be bounded by
\begin{equation}\label{aplm}\frac{\sqrt{6}}{\pi}\sqrt{\frac{2\nu+3}{2\nu+1}}\mathrm{e}^{-\frac{1}{6(2\nu+1)}}<a_\nu<\frac{\sqrt{6}}{\pi}\sqrt{\frac{2\nu+3}{2\nu+1}}, \quad \nu>-\tfrac{1}{2}.
\end{equation}
We see that the second term in the $x\downarrow0$ expansion of $u_\nu^{e,1}(x)$ approaches that of $\mathbf{L}_\nu(x)$ as $\nu\rightarrow\infty$. It is straightforward to show that there exists a constant $C>0$, independent of $\nu$, such that $a_\nu<C$ for all $\nu\geq-\frac{1}{2}$, which means that, whilst $a_\nu>\frac{1}{\sqrt{2\pi}}$ (recall that $\mathbf{L}_\nu(x)\sim\frac{1}{\sqrt{2\pi x}}\mathrm{e}^x$, as $x\rightarrow\infty$), the upper bound is always within an absolute constant multiple of $\mathbf{L}_\nu(x)$ for `large' $x$. Numerical results (see Table \ref{table5}) obtained using \emph{Mathematica} support this analysis and suggest that; for fixed $x>0$, the relative error in approximating $\mathbf{L}_\nu(x)$ decreases as $\nu$ increases, and, for fixed $\nu$, the relative error increases from an initial value of 0 at $x=0$ up to a maximum $\sqrt{2\pi}a_\nu-1$ as $x$ increases.
Several simpler upper bounds for $\mathbf{L}_\nu(x)$ in terms of elementary functions were given in Section 3 of \cite{bp14}, although none of these bounds are of the correct order as $x\rightarrow\infty$. In addition, \cite{bp14} obtained the following useful inequality:
\begin{equation}\label{bpstu}\mathbf{L}_\nu(x)<\frac{2\Gamma(\nu+2)}{\sqrt{\pi}\Gamma(\nu+\frac{3}{2})}I_{\nu+1}(x), \quad x>0, \:\nu>-\tfrac{1}{2}.
\end{equation}
One can thus exploit the extensive literature on inequalities for $I_\nu(x)$ to upper bound $\mathbf{L}_\nu(x)$. For example, applying Theorem 2 of \cite{p99} to (\ref{bpstu}) gives the bound
\begin{align}\label{nearr}\mathbf{L}_\nu(x)<\frac{\sqrt{2}\Gamma(\nu+2)}{\pi\Gamma(\nu+\frac{3}{2})}\frac{\mathrm{e}^{\sqrt{x^2+(\nu+1)^2}+\frac{2}{\sqrt{x^2+(\nu+1)^2}}}}{(x^2+(\nu+1)^2)^{1/4}}\bigg(\frac{x}{\nu+1+\sqrt{x^2+(\nu+1)^2}}\bigg)^{\nu+1},
\end{align}
which holds for all $\nu>-\frac{1}{2}$ and $x>0$. The upper bound (\ref{nearr}), which we denote by $u_\nu^{e,2}(x)$, is $O(x^{\nu+1})$, as $x\downarrow0$, and $O(x^{-1/2}\mathrm{e}^x)$, as $x\rightarrow\infty$, which, as is the case for $u_\nu^{e,1}(x)$, is in agreement with the asymptotic behaviour of $\mathbf{L}_\nu(x)$. However, the multiplicative constant in the leading term of the $x\downarrow0$ expansion is larger than that of $\mathbf{L}_\nu(x)$. As $x\rightarrow\infty$, $u_\nu^{e,2}(x)\sim b_\nu x^{-1/2}\mathrm{e}^x$, where $b_\nu=\frac{\sqrt{2}\Gamma(\nu+2)}{\pi\Gamma(\nu+\frac{3}{2})}$. Unlike $a_\nu$, the constant $b_\nu$ increases at rate $\sqrt{\nu}$ as $\nu\rightarrow\infty$ (see \cite[5.6.4]{olver}). However, for small enough $\nu$ we have $b_\nu<a_\nu$; we used \emph{Mathematica} to find that $b_\nu=a_\nu$ when $\nu=2.521$. Further numerical experiments suggest that $u_\nu^{e,1}(x)<u_\nu^{e,2}(x)$ for all $x>0$ if $\nu>2.521$. Some results are reported in Table \ref{table6}. Notice that up to $x=100$ we have $u_{2.5}^{e,1}(x)<u_{2.5}^{e,2}(x)$, and then, as would be expected, this inequality is reversed for `large' values of $x$.
\begin{table}[h]
\centering
\caption{\footnotesize{Relative error in approximating $\mathbf{L}_{\nu}(x)$ by the upper bound of (\ref{coru1}).}}
\label{table5}
{\scriptsize
\begin{tabular}{|c|rrrrrrrrrr|}
\hline
\backslashbox{$\nu$}{$x$} & 0.5 & 1 & 2.5 & 5 & 10 & 15 & 25 & 50 & 100 & 200 \\
\hline
0 & 0.0743 & 0.2403 & 0.8053 & 1.3722 & 1.7107 & 1.7994 & 1.8540 & 1.8839 & 1.8951 & 1.9000 \\
1 & 0.0163 & 0.0618 & 0.2928 & 0.6854 & 1.0716 & 1.2020 & 1.2914 & 1.3462 & 1.3690 & 1.3792 \\
2.5 & 0.0052 & 0.0204 & 0.1151 & 0.3523 & 0.7301 & 0.9026 & 1.0340 & 1.1214 & 1.1602 & 1.1782 \\
5 & 0.0017 & 0.0070 & 0.0431 & 0.1612 & 0.4601 & 0.6600 & 0.8388 & 0.9706 & 1.0333 & 1.0635 \\
10 & 0.0005 & 0.0021 & 0.0135 & 0.0582 & 0.2302 & 0.4133 & 0.6309 & 0.8216 & 0.9238 & 0.9762 \\
\hline
\end{tabular}}
\end{table}
\begin{table}[h]
\centering
\caption{\footnotesize{Relative error in approximating $\mathbf{L}_{\nu}(x)$ by the upper bound (\ref{nearr}).}}
\label{table6}
{\scriptsize
\begin{tabular}{|c|rrrrrrrrrr|}
\hline
\backslashbox{$\nu$}{$x$} & 0.5 & 1 & 2.5 & 5 & 10 & 15 & 25 & 50 & 100 & 200 \\
\hline
0 & 5.3417 & 3.2145 & 1.1605 & 0.6502 & 0.4549 & 0.3931 & 0.3445 & 0.3086 & 0.2908 & 0.2820 \\
1 & 1.7475 & 1.5473 & 1.0183 & 0.7830 & 0.7437 & 0.7328 & 0.7207 & 0.7098 & 0.7039 & 0.7008 \\
2.5 & 0.8072 & 0.7908 & 0.7309 & 0.7459 & 0.9201 & 1.0127 & 1.0853 & 1.1374 & 1.1627 & 1.1751 \\
5 & 0.4167 & 0.4215 & 0.4563 & 0.5838 & 0.9410 & 1.1928 & 1.4306 & 1.6218 & 1.7208 & 1.7712 \\
10 & 0.2102 & 0.2151 & 0.2491 & 0.3664 & 0.7552 & 1.1596 & 1.6780 & 2.1815 & 2.4709 & 2.6250 \\
\hline
\end{tabular}}
\end{table}
\end{remark}
\end{document}
|
\begin{document}
\title{Realisations of posets and tameness}
\author{Wojciech Chach\'olski}
\thanks{Mathematics, KTH, S-10044
Stockholm, Sweden\email{[email protected]} \email{[email protected]} \email{[email protected]} }
\author{ Alvin Jin }
\author{Francesca Tombari}
\maketitle
\begin{abstract}
We introduce a construction called realisation which transforms posets into posets.
We show that realisations share several key features with upper semilattices
which are essential in persistence. For example, we define local dimensions of points in a poset and show that these numbers for realisations behave in a similar way as they do for upper semilattices. Furthermore, similarly to upper semilattices, realisations
have well behaved discrete approximations which are suitable for capturing homological properties of functors indexed by them. These discretisations are convenient and effective for describing tameness of functors.
Homotopical and homological properties of tame functors,
particularly those indexed by realisations, are discussed.
\end{abstract}
\section{Introduction}
An input for persistent homology can be organised into two ingredients:
a function $f\colon X\to Y$, between a topological space $X$ and a set $Y$, and a
functor $\alpha\colon [0,\infty)^r\to 2^Y$, between the poset of $r$-tuples of non-negative integers and the inclusion poset of all subsets of $Y$, representing a $r$-multifiltration of subsets of $Y$.
Persistent homology transforms such an input into a functor, indexed by the poset $[0,\infty)^r$, assigning to an element
$a$ the homology of the inverse image
$f^{-1}(\alpha(a))$. For example, the homologies of the sublevel sets of a function $X\to [0,\infty)^r$ form a persistent homology functor, and so do the homologies of Vietoris-Rips (multi) filtrations (see~\cite{MR2476414}).
Encoding information in form of functors indexed by the poset $[0,\infty)^r$
is attractive for three reasons:
\begin{itemize}
\item metric properties of $[0,\infty)^r$ can be used to define and study distances on
functors indexed by $[0,\infty)^r$ (see for example~\cite{algebraic_stability, Lesnick2015}), which are essential for addressing stability of various invariants and can be used for hierarchical stabilisation constructions (see~\cite{MR3735858, OliverWojtek, MR4057607});
\item the poset $[0,\infty)^r$ has well behaved discrete approximations given by sublattices of the form ${\mathbb N}^r\hookrightarrow [0,\infty)^r$, which can be used
to provide finite approximations of functors indexed by $[0,\infty)^r$;
\item the mentioned discretisations and approximations have well studied algebraic and homological properties, as the path algebra of the poset ${\mathbb N}^r$ is
isomorphic to the multigraded polynomial ring in $r$ variables.
\end{itemize}
There has been a lot of research
focused on understanding the interplay between the mentioned three aspects of
functors indexed by the poset $[0,\infty)^r$.
For example, the study of this interplay for the homologies of the sublevel sets of $X\to [0,\infty)^r$ has been truly beneficial for understanding geometrical and topological properties of $X$,
for instance properties described by Morse functions.
This interplay has been also central in applied topology (see for example~\cite{adamsatlanasovcarlsson, MR1991588}).
For instance, the rank invariant (see~\cite{MR3083259, MR2506738}) is an algebraic invariant, is stable with respect to natural choices of distances, and can be effectively calculated using the discretisations. The tight relation between these three
aspects of the rank invariant makes it informative and
attractive for data analysis purposes.
Generalising these results to functors indexed by other posets
is a growing research direction in the applied topology
community, reflected by an increasing number of publications on this subject, see for example~\cite{EMiller1,MR4323617,Botnan2020ART, MR4334502, MR3975559}. However there seems to be lack of explicit examples of posets, not directly related to $[0,\infty)^r$, for which the mentioned three aspects are tightly intertwined. The aim of this article
is to introduce a rich family of such posets.
We define a construction called \textbf{realisation}
(see~\ref{drjfhhj}) which transforms posets into posets, and show that the realisations of
finite type posets (posets where down sets of elements are finite) have natural discretisations
tightly related to homological properties of functors indexed by them. Metric aspects of functors indexed by realisations are going to be the subject of a follow up paper, however we hope that the ``continuity" properties of realisations, related to their metric properties, will be at least intuitively clear.
The realisation of a poset $I$ is assembled by posets
of the form $(-1,0)^s$ in the following way.
For every element $a$ in $I$, and for every finite subset $S$ of parents of $a$ (a parent is an element covered by $a$) that has a common ancestor, consider the poset
$(-1,0)^{|S|}$. The realisation $\mathcal{R}(I)$ is the disjoint union of
all these posets $(-1,0)^{|S|}$ for all $a$ in $I$ and all $S$.
For example, if $S$ is empty, then $(-1,0)^0$ is of size $1$ and we identify its element with $a$. In this way
we obtain an inclusion $I\subset \mathcal{R}(I)$. If $S=\{p\}$ is of size $1$, than we think about the associated subposet $(-1,0)\subset \mathcal{R}(I) $ as time of going back from $a$ to its parent $p$.
Figure~\ref{intro} illustrates the realisations
of the posets $[1]:=\{0<1\}$ and $[1]^2$ with colors indicating some of the summands $(-1,0)^s$, identifying these posets with, respectively, $[0,1]$ and $[0,1]^2$.
A similar description of the realisation can be obtained if $I$ is a finite type distributive (or more generally
consistent, see~\ref{adgdgfjdghj}) upper semilattice. For example, $[0,\infty)^r$ is the realisation of
$\mathbb{N}^r$. For a general poset,
however, to define the poset relation on its realisation is non-trivial. Our strategy is to identify the realisation as a subposet of the Grothendieck construction of a certain lax functor, which may fail to be a functor in the case $I$ is not a consistent upper semilattice.
The fact that for an arbitrary poset this functor is essentially lax is the main difficulty in expressing the poset relation in the realisation in simple terms.
The subposet inclusion $I\subset \mathcal{R}(I)$, of which
the standard inclusion $\mathbb{N}^r\subset [0,\infty)^r $ is an example, has the following property. Assume a distance is chosen on a set $Y$. Then every poset functor $U\colon I\to 2^Y $ with non-empty values can be extended to the realisation $\mathcal{R}(I)$ and induce a commutative
triangle of poset functors (see~\ref{sasDFGSDGJFHG}):
\[\begin{tikzcd}
I\ar{dr}{U}\ar[hook]{rr} & & \mathcal{R}(I)\ar{ld}[swap]{\overline{U}}\\
& 2^Y
\end{tikzcd}\]
For example, consider a pair of non-empty subsets $U_0\subset U_1\subset Y$, which describes a functor $[1]\to 2^Y$. Choose a distance $d$ on $Y$ whose maximum value is finite.
For $t$ in $(-1,0)$, consider $\overline{U}(t):= U_1\cap B\left(U_0,(1+t)\text{max}(d)\right)$, where $B(U_0,s)$ denotes the set $\{y\in Y\ |\ d(x,y)< s\text{ for some $x$ in $U_0$}\}$.
All these sets form a functor $\overline{U}\colon\mathcal{R}([1])\to 2^Y$
which is a desired extension.
The functor $\overline{U}\colon \mathcal{R}(I)\to Y$ can be then used to transform
a function $f\colon X\to Y$ into a functor, indexed by $\mathcal{R}(I)$,
assigning to an element $a$ the homology of the preimage $f^{-1}(\overline{U}(a))$.
This procedure is analogous to the way persistent homology is constructed (see the first paragraph of this introduction), and it is a rich and important source of examples of functors indexed by realisations.
Our story about realisations is divided into two parts. In the first part, the internal properties of realisations as posets are discussed.
This part starts with describing a certain regularity property of realisations of finite type posets. This regularity is expressed in terms of two natural numbers
assigned to every element in a poset (see Section~\ref{dsfsefseff}). These numbers, called
dimension and parental dimension, are supposed to
capture the complexity
of expressing an element as an upper bound of subsets in the poset.
For every element, its dimension is never bigger than its parental dimension (see Proposition \ref{adgfdsfhgf}). Although in general these numbers might differ, they are the same for every element in a distributive
upper semilattice, which we regard as a regular poset.
It turns out that these numbers coincide also for every element in the realisation of a finite type poset (Corollary~\ref{sdfwefwefwsghj}).
The parental dimension is important since it often bounds the homological dimension of vector space valued functors indexed by the poset.
This is the case if for example the indexing poset is an upper semilattice (see~\ref{fjgdkvfkdcmv}). The same
is also true for realisations of finite type posets (see~\ref{sdrtyhgf}), again an illustration of their regularity.
Recall that the realisation $\mathcal{R}(I)$ is assembled from posets of the form $(-1,0)^s$.
If we choose a finite subposet $V\subset (-1,0)$ and use $V^s$, instead of
$(-1,0)^s$, we obtain a subposet denoted by $\mathcal{R}(I,V)\subset \mathcal{R}(I)$ (see~\ref{sdgdfhkjkl}).
For instance, if $V=\emptyset$, then $\mathcal{R}(I,V)=I$. These subposets, for various $V$, form discrete approximations of the realisation. These approximations are informative since, for example, we prove that, for every element $x$ in $\mathcal{R}(I)$, there is $V$ for which $x$ belongs to $\mathcal{R}(I,V)$ and the dimension of $x$ as an element of $\mathcal{R}(I,V)$ equals its dimension as an element of $\mathcal{R}(I)$ (see Theorem~\ref{afashgrjh}). Furthermore, if $\mathcal{R}(I)$ is a (distributive) upper semilattice, then so is $\mathcal{R}(I,V)$ and it is a sublattice of the realisation. The intuition is that the denser $V$ is in $(-1,0)$, the denser $\mathcal{R}(I,V)$ is in $\mathcal{R}(I)$ in the following sense: every tame functor (see Definition~\ref{sdfubbeoru}) indexed by $\mathcal{R}(I)$ is discretised by some $\mathcal{R}(I,V)\subset \mathcal{R}(I)$ (Proposition~\ref{asdsfgadfshfgjh}).
In the first part, we also discuss assumptions under which the realisation of an upper semilattice is again an upper semilattice. We show that this is guaranteed under a consistency assumption (see~\ref{adgdgfjdghj}). Key examples of consistent upper semilattices are distributive upper semilattices. In this context, our main result is Theorem~\ref{dasghfgjk} which states that the realisation of a finite type distributive upper semilattice is a distributive upper semilattice.
The reason we care about
upper semilattices is because finite type functions out of them admit transfers (see Section~\ref{asfsdfhfhgjmd}).
Let $I$ be an upper semilattice
and $f\colon I\to J$ be a function of finite type (not necessarily a functor) (see~\ref{dsdgfsfghjdghj}). The
transfer of $f$ is a functor of the form $f^{!}\colon J_{\ast}\to I_{\ast}$, where ${-}_\ast$ denotes the operation of adding a global minimum to a poset. It assigns to an element $a$ in $J_{\ast}$ the coproduct of $(f_\ast\leq a)$ in $I_\ast$ (see~\ref{sDGSDFHFGJ} and compare with~\cite{Botnan2020ART}). In the case $f$ is a homomorphism (it maps sup elements to sup elements, see~\ref{koikik}), its transfer can be characterised by the following universal property:
for every category $\mathcal{C}$ with an initial object, the functor
$(-)^{f^!}\colon \text{Fun}_\ast(I_\ast,\mathcal C)\to \text{Fun}_\ast(J_\ast,\mathcal C)$, given by precomposing with
$f^!$, is left adjoint to $(-)^{f_\ast}\colon \text{Fun}_\ast(J_\ast,\mathcal C)\to \text{Fun}_\ast(I_\ast,\mathcal C)$ (see~\ref{sdgdfhfh}). Thus, in this case, $(-)^{f^!}$ is the left Kan extension of $f$ (see~\cite{MR1712872}). Recall that left Kan extensions commute with colimits but do not in general commute with limits. Since the precomposition operation does commute with limits, we obtain an important property of
homomorphisms of finite type out of upper semilattices: left Kan extensions along them commute with both colimits and limits (see Propositions~\ref{asdsfgadfshfgjh} and~\ref{afsadagdfhbg}).
Left Kan extensions play an essential role
in the second part of our article whose focus is on tame functors. The notion of tameness of functors indexed by the posets $[0,\infty)^r$ has been central in both geometry and applied topology, as finiteness properties of such functors guarantee that various invariants can be defined and calculated.
By definition (see Definition~\ref{sdfubbeoru}), a functor $F\colon J\to \mathcal{C}$, is tame if
there is a finite poset $I$ and functors
$G\colon I\to \mathcal{C}$ and $f\colon I\to J$ for which
$F$ is isomorphic to the left Kan extension of $G$ along $f$.
The notion of tameness is restrictive and meaningful only when $J$ is an infinite poset, for example if it is a realisation.
Every tame functor is finally encoded in the following sense (compare with~\cite{EMiller2}):
there is a finite poset $I$ and functors $G\colon I\to \mathcal{C}$ and
$g\colon J\to I$ for which $F$ is isomorphic to the composition $Gg$. The reverse implication is not true and finitely encoded functors may not be tame in general. However,
in the case $I$ is a finite consistent upper semilattice, then we prove that a functor $F\colon \mathcal{R}(I)\to \mathcal{C}$ is tame if and only if there is a finite subposet
$V\subset (-1,0)$ and a functor $G\colon \mathcal{R}(I,V)_\ast\to \mathcal{C}$
for which $F$ is isomorphic to $Gf^!$ where $f^!\colon \mathcal{R}(I)\to \mathcal{R}(I,V)_\ast$ is the transfer of the inclusion $\mathcal{R}(I,V)\subset \mathcal{R}(I)$ (see~\ref{wertgf}). Thus tame functors indexed by $\mathcal{R}(I)$, when
$I$ is a finite consistent upper semilattice,
are exactly the functors which are constant on the fibers of the transfer
$f^!\colon \mathcal{R}(I)\to \mathcal{R}(I,V)_\ast$ for some $V$.
For example if $V=\emptyset$, then the fibers of the transfer $f^!\colon \mathcal{R}([0,n)^r)\to \mathcal{R}([0,n)^r,\emptyset)_\ast = [0,n)^r_\ast$ are of the form $[k_1,k_1+1)\times\cdots\times [k_r,k_r+1)$, where $k_i$ belongs to $[0,n)$.
This is a standard way of describing tame functors indexed by $[0,n)^r$ (see~\cite{MR3873177, MR3735858, MR3975559}).
Tame functors indexed by an upper semilattice form a particularly nice category.
For example, if the values have finite colimits and limits, then
so does the category of tame functors indexed by an upper semilattice. More generally,
we prove (see Theorem~\ref{afadfhsfgh}) that a model structure on the category $\mathcal{C}$ naturally extends to a model structure on tame functors indexed by an upper semilattice with values in $\mathcal{C}$.
This enables the use of homotopical and homological algebra tools to study tame functors indexed by upper semilattices.
There are situations however when we would like to understand homological properties of tame functors indexed by realisations which are not upper semilattices. Surprisingly, a lot can be described in such cases as well. The last Section~\ref{sdgadfhsgdb} is devoted to present how one might construct minimal resolutions, Betti diagrams, and how to calculate them using Koszul complexes for tame functors indexed by rather general posets with values in the category of vector spaces. These results become particularly transparent in the case of functors indexed by realisations which are described in Theorem~\ref{sdrtyhgf}. For example, parental dimensions of its elements can be used to bound their homological dimensions.
\noindent
\textbf{Part I, posets.}
\section{Basic notions}\leftarrowbel{adsgehg}
In this section, we recall and introduce some basic concepts that might already be familiar. The reader may choose to skip this section and come back to it whenever the text refers to a specific notion.
\begin{point}\leftarrowbel{afadfhfgsh}
The standard poset of real numbers is denoted by $\mathbb{R}$.
Its subposet of non-negative real numbers is denoted by $[0,\infty)$, of
natural numbers $\{0,1,\ldots\}$ by $\mathbb{N}$, the first $n+1$ natural
numbers by $[n]$, $[a,b]:=\{x\in\mathbb{R} \ |\ a\leq x\leq b\}$,
and $(a,b]:=\{x\in\mathbb{R} \ |\ a< x\leq b\}$.
The product of posets $(I,\leq )$ and $(J,\leq)$ is the poset $(I\times J, \leq)$, where $(x,y)\leq (x_1,y_1)$ if $x\leq x_1$ and $y\leq y_1$.
For a set $S$ and a poset $I$, the symbol $I^S$ denotes the poset of all functions $f\colon S\to I$ with $f\leq g$ if $f(x)\leq g(x)$ for every $x$ in $S$. The poset $I^S$ is isomorphic to the $|S|$ fold product $I^{|S|}$.
The poset $[1]^S$ is called the \textbf{discrete cube} of dimension $|S|$
and the poset $[-1,0]^S$ is called the \textbf{geometric cube} of dimension $|S|$.
The inclusion poset of all subsets of $S$ is denoted by $2^S$. The function mapping $f\colon S\to [1]$ in $[1]^S$ to the subset $f^{-1}(1)\subset S$ is an isomorphism between the posets $[1]^S$ and $2^S$. This function is used to identify these two posets, and $2^S$ is also referred to as the discrete cube of dimension $|S|$.
Enlarge a set $S$ with two additional elements (denoted by $\bigwedge S$ and $\bigvee S$) to form a disjoint union $S\coprod\{\bigwedge S,\bigvee S\}$, and consider the following relation on this enlarged set:
elements in $S$ are incomparable and $\bigwedge S\leq s\leq \bigvee S$ for all $s$ in $S$.
This poset is denoted by $\Sigma S$ and called the \textbf{suspension} of $S$.
\end{point}
\begin{point}\leftarrowbel{dsdgfsfghjdghj}
Let $I$ be a poset and $a$ be its element.
For a subset $S\subset I$:
$S\leq a:=\{s\in S\ |\ s\leq a\}$, $S< a:=\{s\in S\ |\ s< a\}$, and $a\leq S\ : =\{s\in S\ |\ a\leq s\}$.
A poset $I$ is called of \textbf{finite type} if $I\leq a$ is finite for every $a$ in $I$.
A finite poset is of finite type. The poset $\mathbb{N}$ is infinite and of finite type. The poset $\mathbb{R}$ is not of finite type.
An element $x$ in $I$ is called a \textbf{parent} of $a$ if $x<a$ and
there is no element $y$ in $I$ such that $x<y<a$.
The term $a$ \textbf{covers} $x$ is also commonly used
to decribe $x$ is a parent of $a$.
The symbol ${\mathcal P}_I(a)$ or ${\mathcal P}(a)$, if $I$ is understood from the context, denotes the set of all parents of $a$.
For example, every element in $\mathbb{R}$ has an empty set of parents. In the poset $[1]$,
${\mathcal P}_{[1]}(0)=\emptyset$ and ${\mathcal P}_{[1]}(1)=\{0\}$. A poset $I$ is of finite type if and only if, for every $a$ in $I$,
the sets ${\mathcal P}_I(a)$ and $\{n\ |\ \text{there is a sequence } x_0<\cdots < x_n =a\text{ in $I$}\}$ are finite.
Let $I$ and $J$ be posets and $f\colon I\to J$ be a function (not necessarily preserving the poset relations).
For an element $a$ in $J$:
\[f \leq a:=\{x\in I\ |\ f(x)\leq a\}\ \ \ \ \ \ \ a\leq f :=\{x\in I\ |\ a\leq f(x)\}.\]
For example $(\text{id}_I\leq a)\subset I$ coincides with
$(I\leq a)\subset I$.
If $f\leq a$ is finite for every $a$ in $J$, then $f\colon I\to J$ is called of \textbf{finite type}. For example, if $I$ is finite, then every function $f\colon I\to J$ is of finite type.
\end{point}
\begin{point}\leftarrowbel{dsdgfsgjhdgh}
Let $S\subset I$ be a subset of a poset $I$.
If $(a\leq S)=S$ (i.e. $a\leq s$ for all $s$ in $S$), then $a$ is called an \textbf{ancestor} of $S$. If $(a< S)=S$ (i.e. $a< s$ for all $s$ in $S$), then $a$ is called a \textbf{proper ancestor} of $S$. Every proper ancestor of $S$ is its ancestor. Every element is a proper ancestor of the empty subset.
If $a$ is an ancestor of $S$ for which there is no other ancestor $b$ of $S$ such that $a<b$, then $a$ is called an \textbf{inf} of $S$.
For example, an element $a$ in $I$ is an inf of the empty subset if and only if
there is no $b$ in $I$ for which $a<b$. Such an element is also called \textbf{maximal} in $I$. For instance an element is an inf of $S$ if and only if it is maximal in
the subposet $\{a\in I\ |\ (a\leq S)=S\}\subset I$.
An element $a$ in $I$ is an inf of the entire $I$ if and only if $a\leq x$ for every $x$ in $I$. If such an element
exists, then it is unique and is called the \textbf{global minimum} of $I$.
Two different inf elements of a subset are not comparable.
In general, $S$ can have many inf elements. In the case $S$ has only one inf element, then this element is called the \textbf{product} of $S$ and is denoted either by $\bigwedge_I S$ or $\bigwedge S$, if $I$ is clear form the context. Explicitly, the product of $S$ is an element $\nu$ in $I$ such that $(\nu \leq S)=S$ and, for every $a$ in $I$ for which $(a\leq S)=S$, the relation $a\leq \nu$ holds. The product $\bigwedge_I\{x,y\}$ is also denoted as $x\wedge_I y$ or $x\wedge y$. For example, if the product of the empty subset of $I$ exists, then $I$ has a unique maximal element given by $\bigwedge_I\emptyset $. This element may fail however to be the global maximum of $I$.
The element $\bigwedge_I I $, if it exists, is the global minimum of $I$.
\end{point}
\begin{point}\leftarrowbel{dsdgdfhfgjh}
Let $S\subset I$ be a subset of a poset $I$.
If $(S\leq a)=S$ ($s\leq a$ for all $s$ in $S$), then $a$ is called a \textbf{descendent} of $S$.
For example, every element is a descendent of the empty subset.
If $a$ is a descendent of $S$ for which there is no other descendent $b$ of $S$ such that $b<a$, then $a$ is called a \textbf{sup} of $S$. For example, an element $a$ in $I$ is a sup of the empty subset if and only if there is no $b$ in $I$ for which $b<a$. Such an element is also called \textbf{minimal} in $I$. For instance an element is a sup of $S$ if and only if it is minimal in the subposet
$\{a\in I\ |\ (S\leq a)=S\}\subset I$.
An element $a$ in $I$ is a sup of the entire $I$ if and only if $x\leq a$ for every $x$ in $I$. If such an element
exists, then it is unique and is called the \textbf{global maximum} of $I$.
Two different sup elements of a subset are not comparable.
In general, $S$ can have many sup elements. In the case $S$ has only one sup element, then this element is called the \textbf{coproduct} of $S$ and is denoted either by $\bigvee_I S$ or
$\bigvee S$, if $I$ is clear form the context.
Explicitly, the coproduct of $S$ is an element $\nu$ in $I$ such that $(S\leq\nu)=S$, and for every $a$ in $I$ for which $(S\leq a)=S$, the relation $ \nu\leq a$ holds. The coproduct $\bigvee_I\{x,y\}$ is also denoted as $x\vee_I y$ or $x\vee y$. For example, if
the coproduct of the empty subset of $I$ exists, then $I$ has a unique minimal element given by $\bigvee_I\emptyset$. This element may fail however to be the global minimum of $I$.
The element $\bigvee_I I$, if it exists, is the global maximum of $I$.
The product can be expressed as a coproduct $\bigwedge_I S = \bigvee_I \left(\bigcap _{x\in S}(I\le x)\right)$, where the equality should be read as follows: the coproduct on the
right exists if and only if the product on the left exists, in which case they are equal.
Thus, if every subset of $I$ has the coproduct, then every subset has also the product.
\end{point}
\begin{point}\leftarrowbel{sdfgbsdhgfsh}
Every subset of $[0,\infty)$ has the product. Every subset of $[-1,0]$ has both the product and the coproduct.
Let $S$ be a set.
If all subsets of a poset $I$ have products, then the same is true for all subsets of
$I^S$, where the product of $T\subset I^S$ is given by the function
mapping $x$ in $S$ to the product $\bigwedge_{I}\{f(x)\ |\ f\in T\}$ in $I$.
If all subsets of $I$ have coproducts, then the same is true for all subsets of
$I^S$, where the coproduct of $T\subset I^S$ is given by the function
mapping $x$ in $S$ to the coproduct $\bigvee_{I}\{f(x)\ |\ f\in T\}$ in $I$.
In the suspension $\Sigma S$ (see~\ref{afadfhfgsh}), the element
$\bigwedge S$ is its global minimum, and the element $\bigvee S$ is its global maximum.
The elements $\bigwedge S$ and $\bigvee S$ are respectively the product and coproduct of any subset $U\subset S$ of size at least $2$. The suspension $\Sigma S$ is of finite type if and only if $S$ is finite.
The discrete cube $2^S$ (see~\ref{afadfhfgsh}) is a poset whose every subset $Z$ has the coproduct and consequently the product which are given respectively by the union $\bigvee Z=\bigcup_{\sigma\in Z}\sigma$ and the intersection $\bigwedge Z=\bigcap_{\sigma\in Z}\sigma$. The subset $\emptyset\subset S$ is the global minimum of $2^S$
and $S\subset S$ is the global maximum.
Note also that every parent of $U\subset S$ in $2^S$
is of the form $U\setminus \{x\}$ for $x$ in $U$. Thus,
the function $x\mapsto U\setminus \{x\}$ is a bijection between
$U$ and $\mathcal{P}_{2^S}(U)$ (see~\ref{dsdgfsfghjdghj}).
\end{point}
\begin{point}\leftarrowbel{dasgfsdfhhfk}
A poset that has a global minimum is called \textbf{unital}. For example $[-1,0]$ is unital and
$\mathbb{R}$ is not. If $I$ is unital, then so is $I^S$, for every set $S$, with its global minimum given by the constant function mapping every $s$ in $S$ to the global minimum of $I$.
A function $f\colon I\to J$ between unital posets is called \textbf{unital} if it maps the global minimum in $I$ to the global minimum in $J$.
For a poset $I$, the symbol $I_{\ast}$ denotes the poset formed by adding an additional element $-\infty$ to $I$ and setting
$-\infty< x$ for all $x$ in $I$. The element $-\infty$ in $I_\ast$ is its global minimum.
Any function $f\colon I\to J$ extends uniquely
to a unital function $f_\ast\colon I_\ast\to J_\ast$ making the following diagram commutative:
\[\begin{tikzcd}[row sep=small]
I\ar{r}{f} \ar[hook]{d} & J\ar[hook]{d}\\
I_\ast\ar{r}{f_\ast} & J_\ast
\end{tikzcd}\]
Note that $(f_\ast\leq -\infty)=\{-\infty\}$, and $(f_\ast\leq a) = (f \leq a )\cup \{-\infty\}$ if $a$ is in $J$.
Thus, a function $f\colon I\to J$ is of finite type if and only if $f_\ast\colon I_\ast\to J_\ast$ is of finite type. Furthermore, $f_\ast\leq a$ is non-empty for every $a$ in $J_\ast$.
The function $J^I\to J_{\ast}^{I_{\ast}}$, mapping $f$ to $f_\ast$, is injective and preserves the poset relations: if $f\leq g$, then $f_\ast \leq g_\ast$.
\end{point}
\begin{point}\leftarrowbel{sdgsdgjdghkfhjk}
For a poset $(I,\leq)$, the same symbol $I$ denotes also the category whose set of
objects is $I$ and where
$\text{mor}_I(x, y)$ is either empty if $x\not\leq y$, or has cardinality $1$ in case
$x\leq y$.
A \textbf{functor} between poset categories $I$ and $J$ is a function $f\colon I\to J$ preserving the poset relations:
if $x\leq y$ in $I$, then $f(x)\leq f(y)$ in $J$. For example
the function $J^{I}\to J_{\ast}^{I_{\ast}}$, described in~\ref{dasgfsdfhhfk}, is a functor.
A poset isomorphism is also a functor.
The category whose objects are posets and morphisms are functors is denoted by $\text{Posets}$.
A function $f\colon I\to J$ is called a \textbf{subposet inclusion} if, for every $x$ and $y$ in $I$, $x\leq y$ if and only if $f(x)\leq f(y)$ in $J$. A subposet inclusion is always an injective functor. Not all injective functors however are subposet inclusions. If $I\subset J$ is a subposet of $J$, then this inclusion is a subposet inclusion. If $f\colon I\to J$ is a subposet inclusion, then it induces a poset isomorphism between $I$ and its image subposet $f(I)\subset J$.
The symbol $\text{Fun}(I,J)$ denotes the subposet of $J^{I}$ (see~\ref{afadfhfgsh})
whose elements are functors. Since in this poset $f\leq g$ if $f(x)\leq g(x)$ for all $x$ in $I$, the category $\text{Fun}(I,J)$ coincides with the category whose morphisms are natural transformations.
More generally, for a poset $I$ and a category $\mathcal{C}$, the symbol $\text{Fun}(I,\mathcal{C})$ denotes the category of functors $I\to\mathcal{C}$ with natural transformations as morphisms.
If $f\colon I\to J$ is a functor of posets, then precomposing with $f$ functor is denoted by
$(-)^f\colon \text{Fun}(J,\mathcal{C})\to \text{Fun}(I,\mathcal{C})$.
The global minimum (see~\ref{dsdgfsgjhdgh}) of a poset coincides with the initial object in the associated category.
If $I$ is a unital poset (it has the global minimum) and $\mathcal{C}$ is a category with an initial object, then a functor $f\colon I\to \mathcal{C}$ is called \textbf{unital} if it maps the global minimum in $I$ to an initial object
in $ \mathcal{C}$. The symbol $\text{Fun}_\ast(I, \mathcal{C})$ denotes the category whose objects are unital functors from $I$ to $\mathcal{C}$ and morphisms are natural transformations.
Let $I$ be a poset and $\mathcal{C}$ be a category with a unique initial object. Any functor $F\colon I\to \mathcal{C}$ can be extended uniquely to a unital functor
$F_\ast\colon I_\ast\to \mathcal{C}$ whose composition with $I\subset I_\ast$ is $F$.
The association $F\mapsto F_\ast$ is an isomorphism of categories between $\text{Fun}(I, \mathcal{C})$ and $\text{Fun}_{\ast}(I_\ast, \mathcal{C})$.
We use this isomorphism to identify these categories.
\end{point}
\begin{point}\leftarrowbel{asdfsgadfsfhjejk}
Let $I$ be a poset. A \textbf{lax functor} $T\colon I\rightsquigarrow \text{Posets}$
(\cite[Definition 1]{MR347936})
assigns to every element $a$ in $I$ a poset $T_a$ and to every relation $a\leq b$ in $I$
a functor $T_{a\leq b}\colon T_a\to T_b $. These functors are required to satisfy two conditions. First, $T_{a\leq a}$ is the identity for all $a$ in $I$. Second,
$T_{a\leq c}\leq T_{b\leq c}T_{a\leq b}$
for all $a\leq b\leq c$ in $I$. For example, every functor is a lax functor,
as in this case the equality $T_{a\leq c}= T_{b\leq c}T_{a\leq b}$ holds.
Let $T\colon I\rightsquigarrow \text{Posets}$ be a lax functor.
Define $\text{Gr}_I T : =\{(a,f)\ |\ a\in I, f\in T_a\}$. For $(a,f)$ and $(b,g)$ in $\text{Gr}_IT $, set $(a,f)\leq (b,g)$ if $a\leq b$ in $I$ and
$T_{a\leq b}(f)\leq g$ in $T_b$. For example, $(a,f)\leq (a,g)$ if and only if
$f\leq g$ in $T_a$. The conditions required to be satisfied by lax functors guarantee the transitivity of the relation $\leq$ on
$\text{Gr}_IT$.
The poset $(\text{Gr}_I T,\leq)$
is called \textbf{Grothendieck construction}. In the case $T$ is the constant functor with value $J$, then $\text{Gr}_I T$ is isomorphic to the product $I\times J$.
The function $\pi\colon \text{Gr}_IT\to I$, mapping $(a,f)$ to $a$, is a functor called the \textbf{standard projection}. For $a$ in $I$, the function $\text{in}_a\colon T_a\to \text{Gr}_IT$, mapping $f$ to $(a,f)$, is also a functor called
the \textbf{standard inclusion}. The functor $\text{in}_a$ is a subposet inclusion (see~\ref{sdgsdgjdghkfhjk}).
\end{point}
\begin{prop}\leftarrowbel{adfgsgdhjdg} Let $I$ be a poset, $T\colon I\rightsquigarrow \text{\rm Posets}$ a lax functor, and $S\subset \text{\rm Gr}_IT$ a non-empty subset.
\begin{enumerate}
\item Assume $\pi(S)=\{a\}$. Then an element $(b,y)$ is a sup of
$S$ in $\text{\rm Gr}_IT$ if and only if $b=a$ and $g$ is a sup of $\{f\ |\ (a,f)\in S\}$ in $T_a$.
\item Let $b$ be a sup of $\pi(S)$ in $I$. Then an element $(b,m)$ is a sup of $S$
in $\text{\rm Gr}_IT$ if and only if $m$ is a sup of
$\{T_{a\leq b}f\ |\ (a,f) \text{ in } S\}$ in $T_b$.
\end{enumerate}
\end{prop}
\begin{proof}
Statement (1) is a particular case of (2).
Statement (2) is a consequence of the equivalence: $T_{a\leq b}f\leq g\leq m$ in $T_b$, for every $(a,f)$ in $S$, if and only if $(a,f)\leq (b,g)\leq (b,m)$ in $\text{\rm Gr}_IT$,
for every $(a,f)$ in $S$.
\end{proof}
\begin{point}\leftarrowbel{dadgdfhg}
Let $f\colon I\to J$ be a function (not necessarily a functor). Define
$J[f]\subset 2^I$ to be the subposet whose elements are
subsets of the form $(f\leq a)\subset I$, for $a$ in $J$.
Let $f\!\!\leq\colon J\to J[f]$ be the function mapping $a$ to $f\leq a$. Note that if $a\leq b$ in $J$, then $(f\leq a)\subset (f\leq b)$,
which means $f\!\!\leq$ is a functor. An important property of being a functor is that its fibers $(f\!\!\leq)^{-1}(f\leq a)\subset J$, for $a$ in $J$, satisfy the following property: if $x\leq y$ in $J$ belong to $(f\!\!\leq)^{-1}(f\leq a)$, then so does any $z$ in $J$ such that $x\leq z\leq y$. Recall that such subposets of $J$ are called \textbf{intervals} or \textbf{convex}.
\end{point}
\begin{point}\leftarrowbel{werqergdfghjg}
Let $f\colon I\to J$ be a functor of posets and $\mathcal{C}$ a category.
Recall that a functor $F\colon J\to \mathcal{C}$ is called a \textbf{left Kan extension} of $G\colon I\to \mathcal{C}$ along $f$ (see~\cite{MR1712872}), if there is a natural transformation
$\alpha\colon G\to Ff$ satisfying the following universal property: for every functor $H\colon J\to \mathcal{C}$,
the function $\text{Nat}_{J}(F,H)\to \text{Nat}_I(G,Hf)$,
mapping $\phi\colon F\to H$ to the following composition, called the \textbf{adjoint} to $\phi$, is a bijection:
\[
\begin{tikzcd}
G\ar{r}{\alpha} & Ff\ar{r}{\phi^f} &
Hf
\end{tikzcd}
\]
This universal property has two consequences. First, is the uniqueness: if $F$ and $F'$ are two left Kan extensions of $G$ along $f\colon I\to J$,
and $\alpha\colon G\to Ff$ and $\alpha'\colon G\to F'f$ are natural transformations satisfying the above universal property, then
there is a unique isomorphism $\psi\colon F\to F'$ for which
$\alpha' =\psi^f \alpha$. Because of this uniqueness, if it exists, a left Kan extension
of $G\colon I\to \mathcal{C}$ along $f\colon I\to J$ is denoted by $f^kG\colon J\to \mathcal{C}$.
Functoriality of left Kan extensions is another consequence of the universal property. Let $\phi\colon G\to G'$ be a natural transformation between two functors $G,G'\colon I\to\mathcal{C}$. If these functors admit the left Kan extensions along $f$, then there is a unique
natural transformation $f^k\phi\colon f^kG\to f^kG'$ for which the following square commutes:
\[\begin{tikzcd}
G\ar{d}[swap]{\alpha}\ar{rr}{\phi} && G'\ar{d}{\alpha'}\\
(f^kG)f \ar{rr}{(f^k\phi)^f}&& (f^kG')f
\end{tikzcd}\]
If all functors in $ \text{Fun}(I,\mathcal{C})$ admit
left Kan extensions along $f$, then the association
$f^k\colon \text{Fun}(I,\mathcal{C})\to \text{Fun}(J,\mathcal{C})$ is a functor which is left adjoint to the precomposition with $f$ functor
$(-)^f\colon \text{Fun}(J,\mathcal{C})\to \text{Fun}(I,\mathcal{C})$. For example, this happens if
$\mathcal{C}$ is closed under finite colimits and
$f\colon I\to J$ is of finite type. In this case,
the left Kan extension of $G$ along $f$ is given by
$a\mapsto \text{colim}_{f\leq a} G$ (see~\cite{MR1712872}).
This description of the left Kan extension, in the case $f$ is of finite type and $\mathcal{C}$ is closed under finite colimits,
has several consequences. For example, $f^kG$
is isomorphic to a functor given by a composition (see~\ref{dadgdfhg}):
\[
\begin{tikzcd}
J\ar{r}{f\leq} &J[f]\ar{r} &\mathcal{C}
\end{tikzcd}
\]
The restrictions of $f^kG$ to the fibres $(f\!\!\leq)^{-1}(f\leq a)= \{b\in J\ |\ (f\leq b) =(f\leq a)\}\subset J$, for $a$ in $J$, are therefore isomorphic to constant functors.
When $I$ is finite, then, since $J[f]$ is also finite, the left Kan extension $f^kG$ factors through a finite poset $J[f]$. Such functors have been considered for example in~\cite[Definition 2.11]{EMiller1}.
\end{point}
\begin{point}\leftarrowbel{asfdfhgdhj}
Let $\mathcal{C}$ be a category closed under finite colimits, $f\colon I\subset J$ a subposet inclusion (see~\ref{sdgsdgjdghkfhjk}) of finite type, and $G\colon I\to \mathcal{C}$ a functor.
Since every $a$ in $I$ is the terminal object in $f\leq f(a)$, the morphism $G(a)\to \text{colim}_{f\leq a} G$ is an isomorphism. Consequently the natural transformation $\alpha\colon G\to (f^k G)f$, adjoint to $\text{id}\colon f^kG\to f^kG$, is an isomorphism.
This implies that, for every pair of functors $G,H\colon I\to \mathcal{C}$, the function $f^k\colon \text{Nat}_I(H,G)\to
\text{Nat}_J(f^kH,f^kG)$, mapping $\phi$ to $f^k\phi$, is a bijection. Injectivity is clear by what has been already stated. For surjectivity, consider a natural transformation $\psi\colon f^kH\to f^kG$. Let $\phi\colon H\to G$ be the unique natural transformation which makes the following left triangle commutative, and where the horizontal arrow $\psi'$ represent the adjoint to $\psi$ (such $\phi$ exists since $\alpha$ is an isomorphism):
\[\begin{tikzcd}
H\ar{r}{\psi'}\ar{dr}[swap]{\phi} & (f^kG)f\\
& G\ar{u}[swap]{\alpha}
\end{tikzcd}
\ \ \ \ \ \ \ \ \ \ \
\begin{tikzcd}
f^kH\ar{r}{\psi}\ar{dr}[swap]{f^k\phi} & f^kG\\
& f^kG\ar{u}[swap]{\text{id}}
\end{tikzcd}\]
By taking adjoints to $\psi'$ and $\alpha$ we obtain a commutative triangle depicted
on the right above, showing that $\psi=f^k\phi$.
\end{point}
\section{Dimension and parental dimension}\leftarrowbel{dsfsefseff}
Let $(I,\leq)$ be a poset and $x$ be an element in $I$.
In this section we propose two ways of measuring complexity of expressing $x$ as a sup (see~\ref{dsdgdfhfgjh}).
\begin{point}\leftarrowbel{adsgdgfjhgh}
Let
\[\textbf Phi_I(x):=\left\{U\ |\
\begin{subarray}{c} U\subset I \text{ is finite, has a proper ancestor, $x$ is a sup of it, and }\\
\text{ $x$ is not a sup of any $S$ such that $\emptyset\not= S\subsetneq U$}\end{subarray}
\right\}.\]
Since $\emptyset$ belongs to $\textbf Phi_I(x)$, this collection is non-empty.
The following extended (containing $\infty$) number is called the \textbf{dimension} of $x$:
\[ \text{dim}_I(x):= \text{sup}\{ |U|\ |\ U\in \textbf Phi_I(x)\}.\]
An element $U$ in $\textbf Phi_I(x)$, for which $\text{dim}_I(x)=|U|$, is said to \textbf{realise} $\text{dim}_I(x)$.
Since any subset $U\subset I$, for which $x$ is a sup, is contained in $I\leq x$, there is an inequality $\text{dim}_I(x)\leq |I\leq x|$.
In particular, if $I$ is of finite type (see~\ref{dsdgfsfghjdghj}), then $\text{dim}_I(x)$ is finite for every $x$.
The empty set realises $\text{dim}_I(x)$ if and only if
$\text{dim}_I(x)=0$, which happens if and only if $x$ is minimal in $I$ (see~\ref{dsdgdfhfgjh}).
The set $\{x\}$ is the only set realising $\text{dim}_I(x)$ if and only if $\text{dim}_I(x)=1$.
If $\text{dim}_I(x)>1$, then every set realising $\text{dim}_I(x)$ cannot contain $x$.
Here are some examples:
\begin{itemize}
\item In the poset $[n]$,
$\text{dim}_{[n]}(k)=\begin{cases}
0 &\text{ if } k=0\\
1 & \text{ if } k>0
\end{cases}$.
\item In the poset $[-1,0]$,
$\text{dim}_{[-1,0]}(x)=\begin{cases}
0 &\text{ if } x=-1\\
1 & \text{ if } x>-1
\end{cases}$.
\item In the poset $\mathbb{R}$, $\text{dim}_{\mathbb{R}}(x)=1$ for every $x$ in $\mathbb{R}$.
\item In the poset $2^S$ (see~\ref{afadfhfgsh}), $\text{dim}_{2^S}(\sigma)=|\sigma|$ and
this dimension is realised by, for example, $\{\{x\}\ |\ x\in \sigma\}$.
\item In the suspension poset $\Sigma S$ (see~\ref{afadfhfgsh}),
\[\text{dim}_{\Sigma S}(x)=\begin{cases}
2 &\text{ if $x =\bigvee S$ and $ |S|\geq 2$} \\
1 & \text{ if either $x\in S$, or $x =\bigvee S$ and $ |S|< 2$}\\
0 & \text{ if $x= \bigwedge S$}
\end{cases}\]
If $|S|\geq 2$, then every subset of $S$ of size $2$ realises
$\text{dim}_{\Sigma S}(\bigvee S)=2$.
\end{itemize}
\end{point}
The dimension of an element $x$ depends on the global properties of the subposet $I\leq x$. This dimension can be approximated by a more tractable parental dimension which for elements in a finite type poset depends only on their parents (see Proposition~\ref{sfgasg}).
\begin{point}\leftarrowbel{aDFGSDFFHDHGJ}
Let
\[\textbf Psi_I(x):=
\left\{ U\ |\ \begin{subarray}{c} U\subset (I<x) \text{ is finite, has an ancestor, and }\\
\text{$x$ is a sup of every two element subset of $U$}\end{subarray}\right\}.
\]
Since $\emptyset$ belongs to $\textbf Psi_I(x)$, this collection is non-empty.
The following extended number is called the $\textbf{parental dimension}$ of $x$:
\[ \text{par-dim}_I(x):= \text{sup}\{ |U|\ |\ U\in \textbf Psi_I(x)\}.\]
An element $U$ in $\textbf Psi_I(x)$, for which $\text{par-dim}_I(x)=|U|$, is said to \textbf{realise} $\text{par-dim}_I(x)$.
Here are some examples:
\begin{itemize}
\item If $I$ is $[n]$, or $[-1,0]$, or $\mathbb{R}$, then $\text{par-dim}_{I}(x)=\text{dim}_{I}(x)$
for all $x$ in $I$.
\item In the poset $2^S$, for every $\sigma\subset S$,
$\text{par-dim}_{I}(x)=|\sigma|=\text{dim}_{I}(x)$ and the parental dimension is realised
by for example
$\{\sigma\setminus \{x\}\ |\ x\in \sigma\}$.
\item In the suspension poset $\Sigma S$, $\text{par-dim}_{\Sigma S}(\bigvee S) = \text{max}\{|S|,1\}$, and, if $S$ is non-empty, then $S$ realises
$\text{par-dim}_{\Sigma S}(\bigvee S)$. If $S$ has at least three elements, then
there is a strict inequality
$\text{dim}_{\Sigma S}(\bigvee S)=2< |S|=\text{par-dim}_{\Sigma S}(\bigvee S)$.
\end{itemize}
The empty set realises $\text{par-dim}_I(x)$ if and only if $\text{par-dim}_I(x)=0$, which happens if and only if $x$ is minimal in $I$.
Thus, $\text{par-dim}_I(x)=0$ if and only if $\text{dim}_I(x)=0$.
The equality $\text{par-dim}_I(x)=1$ holds if and only if there is $y< x$ and $x$ is not a sup of any two element subset that has an ancestor.
Let $S$ in $\textbf Phi_I(x)$ (see~\ref{adsgdgfjhgh}) be such that $|S|\geq 2$.
Choose $s$ in $S$. Since $x$ is not a sup of $S\setminus\{s\}$, there is
$z<x$ for which $(S\setminus \{s\})\subset (I\leq z)$. Moreover $s<x$ and
$x$ is a sup of $\{s,z\}$ which is a contradiction. Thus every element of
$\textbf Phi_I(x)$ is of size at most $1$, and hence $\text{dim}_I(x)=1$.
We have just proved:
\end{point}
\begin{prop} \leftarrowbel{hryfjhxfh}
Let $I$ be a poset.
\begin{enumerate}
\item $\text{\rm dim}_I(x)=0$ if and only if $\text{\rm par-dim}_I(x)=0$.
\item $\text{\rm dim}_I(x)=1$ if and only if $\text{\rm par-dim}_I(x)=1$.
\item $\text{\rm dim}_I(x)\geq 2$ if and only if $\text{\rm par-dim}_I(x)\geq 2$.
\end{enumerate}
\end{prop}
One reason why parental dimension is easier to calculate is:
\begin{lemma}\leftarrowbel{dadgsdsgfhn}
Let $I$ be a poset, $x$ its element, and $U$ be in $\textbf Psi_I(x)$.
Assume $\alpha\colon U\to I$ is a function such that $u\leq \alpha(u)<x$,
for every $u$ in $U$. Then $\alpha$ is injective, in particular $|\alpha(U)|=|U|$, and
its image $\alpha(U)$ belongs to $\textbf Psi_I(x)$.
\end{lemma}
\begin{proof}
Let $s$ and $u$ be elements in $U$. If $\alpha(s)=\alpha(u)$, then the relations $s\leq \alpha(s)=\alpha(u)\geq u$ and $\alpha(s)<x$ imply that $x$ is not a sup of $\{s,u\}$. This can happen only if $s=u$, as $U$ belongs to $\textbf Psi_I(x)$.
Thus
$\alpha$ is injective
and $|U|=|\alpha(U)|$.
If $\alpha(s)\not = \alpha(u)$, then a consequence of the relations $s\leq \alpha(s)<x>\alpha(u)\geq u$ and the fact that $x$ is a sup of $\{s,u\}$, is that $x$ is also a sup of
$\{\alpha(s),\alpha(u)\}$. Furthermore, any ancestor of $U$ is also an ancestor of $\alpha(U)$. The set $\alpha(U)$ belongs therefore to $\textbf Psi_I(x)$.
\end{proof}
Lemma~\ref{dadgsdsgfhn} can be used to prove the following proposition which
is the reason behind choosing the name parental dimension.
\begin{prop}\leftarrowbel{sfgasg}
If $I$ is a poset of finite type, then, for every $x$ in $I$:
\[
\text{\rm par-dim}_I(x)=\text{\rm max}\{|S|\ |\ S\subset {\mathcal P}(x)\text{ and $S$ has an ancestor}\}
\]
\end{prop}
\begin{proof}
Since $x$ is a sup of every two elements subset of ${\mathcal P}(x)$, the right side of the claimed equality is smaller or equal than the left side.
Let $U$ be in $\textbf Psi_I(x)$. Since $I$ is of finite type, for all $u$ in $U$, there is a $\alpha(u)$ in $\mathcal{P}(x)$ for which $u\leq \alpha(u)$. According to Lemma~\ref{dadgsdsgfhn},
$|\alpha(U)|=|U|$ so the left side of the claimed equality is smaller or equal than the right side.
\end{proof}
There are two numbers assigned to $x$ in $I$, its dimension $\text{dim}_I(x)$ and
its parental dimension $\text{par-dim}_I(x)$. According to Proposition~\ref{hryfjhxfh}, if one of these dimensions is $0$ or $1$ then so is the other. In general the parental dimension always bounds the dimension:
\begin{prop}\leftarrowbel{adgfdsfhgf}
For every $x$ in a poset $I$, $\text{\rm dim}_I(x)\le \text{\rm par-dim}_I(x)$.
\end{prop}
\begin{proof}
The cases when $\text{\rm dim}_I(x)$ is $0$ and $1$ follow from Proposition~\ref{hryfjhxfh}.
Let $U$ be in $\textbf Phi_I(x)$, with $\lvert U\rvert \ge2$. Since, for every $u$ in $U$, the element $x$
is not a sup of $U\setminus\{u\}$, there is $s_u$ in $I$ such that
$u'\leq s_u<x$ for every $u'$ in $U\setminus\{u\}$. If $u_0$ and $u_1$ are different elements in
$U$, then $x$ being a sup of $U$ implies that $s_{u_0}$ and $s_{u_1}$ are also different and $x$ is a sup of $\{s_{u_0},s_{u_1}\}$. Moreover, any proper ancestor of $U$
is also an ancestor of $S:=\{s_u\ |\ u\in U\}$.
Thus, $S$ has the same size as $U$ and belongs to $\textbf Psi_I(x)$.
For every element $U$ in $\textbf Phi_I(x)$,
we have constructed an element $S$ in $\textbf Psi_I(x)$ of the same size as $U$.
\end{proof}
Note that the inequality in~\ref{adgfdsfhgf} can be strict (see~\ref{aDFGSDFFHDHGJ}).
\begin{prop}\leftarrowbel{ghjkghjk}
Let $I$ and $J$ be posets. For every $x$ in $I$ and $y$ in $J$:
\begin{enumerate}
\item $\text{\rm dim}_{I \times J}(x,y) = \text{\rm dim}_I(x) + \text{\rm dim}_J(y)$,
\item $\text{\rm par-dim}_{I \times J}(x,y) = \text{\rm par-dim}_I(x) + \text{\rm par-dim}_J(y)$.
\end{enumerate}
\end{prop}
\begin{proof}
(1):\quad
First, we show $\text{\rm dim}_{I \times J}(x,y) \ge \text{\rm dim}_I(x) + \text{\rm dim}_J(y)$.
If $\text{\rm dim}_I(x)=0$, then $x$ is minimal in $I$. In this case every
element of $\textbf Phi_{I\times J}(x,y)$ is of the form $\{x\}\times U$, where $U$ belongs to $\textbf Phi_{J}(y)$ and hence the inequality is clear.
Assume $\text{\rm dim}_I(x)\geq 1$ and $\text{\rm dim}_J(y)\geq 1$.
Let $S$ and $U$ belong respectively to $\textbf Phi_{I}(x)$ and $\textbf Phi_{J}(y)$. Let $x'$ be a proper ancestor of $S$ and $y'$ be a proper ancestor of $U$. Then
$\lvert S\rvert +\lvert U\rvert =\lvert (S\times \{y'\})\cup (\{x'\}\times U)\rvert$. Since $(S\times \{y'\})\cup (\{x'\}\times U)\subset I\times J$ belongs to $\textbf Phi_{I\times J}(x,y)$, we get $\text{\rm dim}_{I \times J}(x,y)\geq \lvert S\rvert +\lvert U\rvert$, which gives the desired inequality.
To show $\text{\rm dim}_{I \times J}(x,y) \leq \text{\rm dim}_I(x) + \text{\rm dim}_J(y)$, consider $W$ in $\textbf Phi_{I\times J}(x,y)$. For every element $(a,b)$ in $W$, the set $W$
cannot contain a subset of the form $\{(a',b),(a,b')\}$, where $a'\not=a$
and $b'\not = b$, otherwise $(x,y)$ would be a sup of $W\setminus\{(a,b)\}$.
Consequently, $\lvert W\rvert \le \lvert \text{pr}_I(W)\rvert + \lvert \text{pr}_J(W)\rvert$, where
$\text{pr}_I$ and $\text{pr}_J$ denote the projections. Moreover, $\text{pr}_I(W)$ and $\text{pr}_J(W)$ have, respectively, $x$ and $y$ as sup, so $\lvert \text{pr}_I(W)\rvert + \lvert \text{pr}_J(W)\rvert\le \text{dim}_I(x)+\text{dim}_J(y)$.
\noindent
(2):\quad As before, we start by showing $\text{\rm par-dim}_{I \times J}(x,y) \ge \text{\rm par-dim}_I(x) + \text{\rm par-dim}_J(y)$. We argue in the same way.
Let $S$ and $U$ belong respectively to $\textbf Psi_{I}(x)$ and $\textbf Psi_{J}(y)$.
Then $(S\times \{y\})\cup (\{x\}\times U)\subset I\times J$ belongs to $\textbf Psi_{I\times J}(x,y)$ and is of size $|S|+|U|$, which gives the desired inequality.
To show the other inequality $\text{\rm par-dim}_{I \times J}(x,y) \leq \text{\rm par-dim}_I(x) + \text{\rm par-dim}_J(y)$ consider $W$ in $\textbf Psi_{I\times J}(x,y)$. Then every $(a,b)$ in $W$ is such that $(a,b)< (x,y)$ and we define $\alpha(a,b):=(a,y)$ if $a<x$, and $\alpha(a,b):=(a,b)$ if $a=x$. These elements are chosen so that
$(a,b)\le \alpha(a,b)<(x,y)$ for every $(a,b)$ in $W$.
By Lemma~\ref{dadgsdsgfhn}, $\alpha (W)$ also belongs to $\textbf Psi_{I\times J}(x,y)$. Since $\lvert \alpha(W)\rvert\le \lvert \text{pr}_{I} \alpha (W)\rvert+\lvert \text{pr} _{J}\alpha(W)\rvert$, we get the desired inequality.
\end{proof}
Let $S$ be a finite set. Since $[-1,0]^{ S}$
and $[-1,0]^{ |S|}$ are isomorphic (see~\ref{afadfhfgsh}), Proposition~\ref{ghjkghjk} gives $\text{dim}_{[-1,0]^{ S}}f=\text{par-dim}_{[-1,0]^{S}}f=
\lvert \{s\in S\mid f(s)>-1\}\rvert$.
\begin{point}
Neither the dimension nor the parental dimension are monotonic in the following sense. If $I\subset J$ is a subposet inclusion (see~\ref{sdgsdgjdghkfhjk}) and $x\in I$, then in general the following inequalities may fail: $\text{dim}_I(x)\leq \text{dim}_J(x) $ and $\text{par-dim}_I(x)\leq \text{par-dim}_J(x)$.
For example, consider:
\[I=\{(0,0),(1,0),(0,1),(2,2)\}\subset \mathbb{N}^2\]
\[J=\{(0,0),(1,0),(0,1),(1,1), (2,2)\}\subset \mathbb{N}^2\] Then
$\text{dim}_I(2,2)= \text{par-dim}_I(2,2)=2$, $\text{dim}_J(2,2)=\text{par-dim}_J(2,2)=1$. For the monotonicity, additional assumptions need to be made, for example:
\end{point}
\begin{prop}\leftarrowbel{aDFDFHFHJ}
Let $I\subset J$ be a subposet inclusion (see~\ref{sdgsdgjdghkfhjk}).
Assume an element $x$ in $I$ has the following property: for every finite subset $S\subset I$, if $x$ is a sup of $S$ in $I$, then $x$ is a sup of $S$ in $J$. Under this assumption
$\text{\rm dim}_I(x)\leq \text{\rm dim}_J(x)$ and $\text{\rm par-dim}_I(x)\leq \text{\rm par-dim}_J(x)$.
\end{prop}
\begin{proof}
The assumption on $x$ implies that $\textbf Phi_I(x)\subset \textbf Phi_J(x)$ (see~\ref{adsgdgfjhgh}) and $\textbf Psi_I(x)\subset \textbf Psi_J(x)$ (see~\ref{aDFGSDFFHDHGJ}), which gives the claimed inequalities.
\end{proof}
\section{Realising posets}
In this section we introduce a construction that transforms posets into posets, mimicking
the relation between $\mathbb{N}^r$ and $[0,\infty)^r$.
Let $(I,\leq )$ be a poset.
\begin{point}\leftarrowbel{adgsdgfhjhg}
Consider $a$ in $I$ and its set of parents $\mathcal{P}(a)$.
Table~\ref{zdfhsfgjhdgjdgh} describes how $a$ partitions $I$ into three disjoint subsets of $a$-\textbf{inconsistent}, $a$-\textbf{dependent}, and $a$-\textbf{independent} elements. For example, $x$ is $a$-independent if it is not $a$-dependent ($a\not\leq x$), however it is $p$-dependent ($p\leq x$) for some $p$ in $\mathcal{P}(a)$. Observe that every parent $p$ of $a$ is $a$-independent. An element which is either $a$-dependent or $a$-independent is called $a$-\textbf{consistent}.
This terminology is inspired by linear algebra.
We think of this partition as solutions of certain poset conditions, which in the case of an upper semilattice (see~\ref{adfgsgfjfjk}) behave as solutions of a linear system: an inconsistent system has no solutions, an independent system has a unique solution, and a dependent system has many solutions.
\begin{table}[H]
\centering
\begin{tabular}{|c|c|c|}
\hline
\multirow{ 2}{*}{$x$ is $a$-consistent} & $x$ is $a$-dependent & $a\leq x$ \\ \cline{2-3}
& $x$ is $a$-independent & $a\not\leq x$ and $(\mathcal{P}(a)\leq x)\not = \emptyset$\\ \hline
\multicolumn{2}{|c|}{$a$-inconsistent}& $a\not\leq x$ and $(\mathcal{P}(a)\leq x) = \emptyset$ \\ \hline
\end{tabular}
\caption{}
\leftarrowbel{zdfhsfgjhdgjdgh}
\end{table}
Figure~\ref{partition} illustrates the partitions
of $\mathbb{N}^2$, $\Sigma S$, and a poset of size $5$ (as illustrated in (c)), into $a$-inconsistent, $a$-dependent, and $a$-independent blocks for some choices of $a$.
\begin{figure}
\caption{Black triangles are $a$-dependent, dark grey squares are $a$-independent, and light grey dots are $a$-inconsistent}
\end{figure}
A consequence of the transitivity is that being dependent is preserved by the poset relation: if $a\leq b$, then a $b$-dependent element is also
$a$-dependent. Being independent does not have this property: a $b$-independent element may fail to be $a$-independent for some $a\leq b$.
For example, consider the subposet $I=\{(0,0), (2,0), (3,0), (0,2), (3,2)\}\subset [0,\infty)^2$. Then, in $I$, the element $(0,2)$ is $(3,2)$-independent, however it is not $(3,0)$-independent. The element $(0,2)$ is neither $(3,0)$-dependent and hence neither consistency is in general preserved by the poset relation.
\end{point}
\begin{point}\leftarrowbel{adgdgfjdghj}
Define $I$ to be \textbf{consistent} if, for every $a\leq b$ in $I$,
every $b$-independent element, which has a common ancestor with $a$, is also $a$-consistent.
Thus, $I$ being consistent is equivalent to the following condition for every $a\leq b$ in $I$:
a $b$-consistent element $x$ is $a$-consistent if and only if there is an element $y$ in $I$ such that $x\geq y\leq a$.
Distributive upper semilattices of finite type are key examples of
consistent posets (see~\ref{asfsgdhgfhjnhgn}). For example,
the poset $\mathbb{N}^r$ is consistent. If $|S|\geq 3$, then
the suspension $\Sigma S$ (see~\ref{afadfhfgsh}) is an example of a consistent
upper semilattice which is not distributive (see~\ref{dfhieir}).
\end{point}
\begin{point}\leftarrowbel{sadsfgdfghjhn}
Define $\mathcal{G}(I):=\coprod_{a\in I} [-1,0]^{\mathcal{P}(a)}$ (see~\ref{afadfhfgsh} and~\ref{dsdgfsfghjdghj}). We are going to identify elements of $\mathcal{G}(I)$ with pairs $(a,f)$ consisting of an element $a$ in $I$ and a function $f\colon \mathcal{P}(a)\to [-1,0]$.
The set $\text{supp}(f):=\{p\in \mathcal{P}(a)\ |\ f(p)<0\}$ is called the
\textbf{support} of $f$.
If $\mathcal{P}(a)$ is empty, then the set $[-1,0]^{\mathcal{P}(a)}$
contains only one element which we denote by $0$, in which case $(a,0)$
is the only element in $\mathcal{G}(I)$ with the first coordinate $a$.
For $(a,f)$ and $(b,g)$ in $\mathcal{G}(I)$, set $(a,f)\leq (b,g)$ if:
\begin{itemize}
\item[(a)] $a\leq b$ in $I$;
\item[(b)] all elements in $\text{supp}(g)$ are $a$-consistent: if $g(x)<0$, then either
$x$ is $a$-dependent ($a\leq x$) or $x$ is $a$-independent ($a\not\leq x$ and $ (\mathcal{P}(a)\leq x) \not = \emptyset$);
\item[(c)]
$\bigwedge_{[-1,0]}\{f(y)\ |\ y\in(\mathcal{P}(a)\leq x)\}\leq g(x)$ for all
$a$-independent $x$ in $\mathcal{P}(b)$.
\end{itemize}
For example, $(a,f)\leq (a,g)$ in $\mathcal{G}(I)$ if and only if $f\leq g$ in $[-1,0]^{\mathcal{P}(a)}$. Thus the inclusion
$[-1,0]^{\mathcal{P}(a)}\subset \mathcal{G}(I)$, assigning to $f$ the pair $(a,f)$, is a subposet inclusion
(\ref{sdgsdgjdghkfhjk}).
If $0$ is the constant function with value $0$, then $(a,0)\leq (b,0)$ in $\mathcal{G}(I)$ if and only if $a\leq b$ in $I$. Thus the inclusion $I\subset \mathcal{G}(I)$, assigning to $a$ the pair $(a,0)$, is also a subposet inclusion. The projection $\pi\colon \mathcal{G}(I)\to I$, assigning to a pair $(a,f)$ the element $a$, is a functor of posets.
We think about an element $(a,f)$ in $\mathcal{G}(I)$ as an intermediate point
between $a$ and its parents, where the value of $f(p)$ describes the time needed to go back in the direction of the parent $p$.
\end{point}
\begin{prop}\leftarrowbel{asdgsfghg}
If $(a,f)\leq (b,g)$ in $\mathcal{G}(I)$, then every $w\leq a$ which is an ancestor of $\text{\rm supp}(f)$ is also
an ancestor of $\text{\rm supp}(g)$.
\end{prop}
\begin{proof}
Let $w\leq a$ be an ancestor of $\text{\rm supp}(f)$, and
$x$ be in $\text{supp}(g)$. Then $x$ is $a$-consistent, and, thus, either $a\le x$, implying $w\le x$, or
$\bigwedge\{f(y)\ |\ y\in(\mathcal{P}(a)\leq x)\}\leq g(x)<0$,
implying
the existence of $y$ in $\mathcal{P}(a)\le x$ with $f(y)< 0$. For such a $y$,
$w\le y\le x$.
\end{proof}
To understand the relation $\leq$ on $\mathcal{G}(I)$ it is
convenient to describe it in an alternative way using translations.
\begin{Def}\leftarrowbel{asfadfhs}
For $a$ in $I$, define $T_a:=[-1,0]^{\mathcal{P}(a)}$ (see~\ref{afadfhfgsh}).
For $a\leq b$ in $I$, define $T_{a\leq b}\colon T_a\to T_b$ to map
$f\colon \mathcal{P}(a)\to [-1,0]$ to $T _{a\le b}f\colon \mathcal{P}(b)\to [-1,0]$ where:
\[
(T _{a\le b}f)(x):=\begin{cases} -1 & \text{if $x$ is $a$-dependent,} \\
\bigwedge_{[-1,0]}\{f(y)\ |\ y\in (\mathcal{P}(a)\leq x)\} & \text{if $x$ is $a$-independent,} \\
0 & \text{if $x$ is $a$-inconsistent.}
\end{cases}
\]
The function $T_{a\leq b}$ is called \textbf{translation} along $a\leq b$.
\end{Def}
Translations form a lax functor (see~\ref{asdfsgadfsfhjejk}) which is the content of:
\begin{lemma}\leftarrowbel{asdgdsgshdgf}
Let $a\le b \le c$ be in $I$.
\begin{enumerate}
\item $T_{a\leq a}\colon T_a\to T_a$ is the identity,
\item $T_{a\leq b}\colon [-1,0]^{\mathcal{P}(a)}\to [-1,0]^{\mathcal{P}(b)}$
is a functor of posets (see~\ref{sdgsdgjdghkfhjk}),
\item $T_{a\le c} \leq T _{b\le c}T_{a\le b}$.
\end{enumerate}
\end{lemma}
\begin{proof}
Statements (1) and (2) are direct consequences of the definition.
To prove statement (3) choose a function $f\colon\mathcal{P}(a)\to [-1,0]$ and an element $x$ in $\mathcal{P}(c)$.
If $x$ is $a$-dependent, then $(T_{a\le c}f)(x) =-1\leq (T _{b\le c}T_{a\le b}f)(x)$.
Assume $x$ is not $a$-dependent. Then $x$ is not $b$-dependent.
If $x$ is $b$-inconsistent, then $(T_{a\le c}f)(x)\leq 0= (T _{b\le c}T_{a\le b}f)(x)$. Let $x$ be $b$-independent. Then
$(T _{b\le c}T_{a\le b}f)(x) = \bigwedge\{(T_{a\le b}f)(y)\ |\ y\in (\mathcal{P}(b)\leq x)\} $, and the desired inequality $(T_{a\le c}f )(x) \leq (T _{b\le c}T_{a\le b}f)(x)$ would then follow if $(T_{a\le c}f )(x)\leq (T_{a\le b}f)(y)$ for all $y$ in $\mathcal{P}(b)\leq x$. Let $y$ be in $\mathcal{P}(b)\leq x$. Then $y$ cannot be $a$-dependent, since $x$ is not $a$-dependent. If $y$ is $a$-independent, then $x$ is also $a$-independent, and since
$(\mathcal{P}(a)\leq y)\subset (\mathcal{P}(a)\leq x)$, then:
\[\scalebox{0.92}{$(T_{a\le c}f)(x) = \bigwedge\{f(z)\ |\ z\in (\mathcal{P}(a)\leq x)\} \leq \bigwedge\{f(z)\ |\ z\in (\mathcal{P}(a)\leq y)\} =(T_{a\le b}f)(y)$} \]
Finally, if $y$ is $a$-inconsistent, then $(T_{a\le c}f)(x)\leq 0 = (T_{a\le b}f)(y)$.
\end{proof}
According to Lemma~\ref{asdgdsgshdgf}, translations form a lax functor which we denote by
$T\colon I\rightsquigarrow \text{Posets}$ and name \textbf{$I$-translation}.
One should be aware that in general $T$ may fail to be a functor. For example:
\begin{example}
Consider the subposet $I=\{x=(0,0), a =(2,0), b=(3,0), y=(0,2), c=(3,2)\}\subset [0,\infty)^2$.
Then $\mathcal{P}(a)=\{x\}$, $\mathcal{P}(b)=\{a\}$, and $\mathcal{P}(c)=\{y,b\}$. Furthermore, $y$ is $b$-inconsistent.
Then:
\begin{itemize}
\item $T_{a\le b}(-0.5)\colon \mathcal{P}(b)\to [-1,0]$ is the constant function $-1$.
\item $T_{b\le c}(T_{a\le b}(-0.5))
\colon \mathcal{P}(c)\to [-1,0]$ maps $y$ to $0$ and $b$ to $-1$.
\item $T_{a\le c}(-0.5)\colon \mathcal{P}(c)\to [-1,0]$ maps $y$ to $-0.5$
and $b$ to $-1$.
\end{itemize}
In this case $T_{a\le c}(-0.5)<T_{b\le c}T_{a\le b}(-0.5)$.
\end{example}
Since $T\colon I\rightsquigarrow \text{Posets}$ is a lax functor, we can form its
Grothendieck construction $\text{Gr}_IT$ (see~\ref{asdfsgadfsfhjejk}).
As sets, $\mathcal{G}(I)$ and $\text{Gr}_IT$
are identical. The next lemma states that the relation $\leq$ on $\mathcal{G}(I)$
coincides with the poset relation on the Grothendieck construction $\text{Gr}_IT$ described in~\ref{asdfsgadfsfhjejk}.
\begin{lemma}\leftarrowbel{asdgdfhfgjhg}
Let $(a,f)$ and $(b,g)$ be elements in $\mathcal{G}(I)$.
\begin{enumerate}
\item $(a,f)\leq (b,g)$ if and only if $a\leq b$ in $I$ and $T _{a\le b}f\leq g$ in $[-1,0]^{\mathcal{P}(b)}$.
\item If $a\le b$, then $(a,f)\leq (b,T_{a\leq b}f)$.
\item Elements in $\text{\rm supp}(T_{a\leq b}f) $ are $a$-consistent.
\end{enumerate}
\end{lemma}
\begin{proof}
Statement (3) is a direct consequence of (2) and (2) is a direct consequence of (1).
To show (1), choose $x$ in $\mathcal{P}(b)$. If $x$ is $a$-dependent, then $(T _{a\le b}f)(x)=-1\leq g(x)$.
If $x$ is $a$-independent, then $(T _{a\le b}f)(x)=\bigwedge\{f(y)\ |\ y\in (\mathcal{P}(a)\leq x)\}$ and hence $(T _{a\le b}f)(x)\leq g(x)$ is equivalent to condition (c) in~\ref{sadsfgdfghjhn}. If $x$ is $a$-inconsistent, then
$T _{a\le b}f(x)=0$ and hence $T _{a\le b}f(x)\leq g(x)$ if and only if $x$ is not in the support of $g$. These equivalences are what is needed to show (1).
\end{proof}
Since the posets $\mathcal{G}(I)$ and $\text{Gr}_IT$ coincide,
Proposition~\ref{adfgsgdhjdg} gives the following explicit description
of some sup elements in $\mathcal{G}(I)$.
\begin{cor}\leftarrowbel{dbhfjgyhdklsadj}
Let $S\subset \mathcal{G}(I)$ be a non-empty subset.
\begin{enumerate}
\item If $\pi(S)=\{a\}$, then $S$ has a coproduct and $\bigvee_{\mathcal{G}(I)} S=(a,g)$, where $g =\bigvee_{[-1,0]^{\mathcal{P}(a)}}\{f\ |\ (a,f)\in S\}$.
\item If $b$ is a sup of $\pi(S)$ in $I$, then there is a unique sup of $S$ in
$\mathcal{G}(I)$ of the form $(b,m)$, and $m=\bigvee_{[-1,0]^{\mathcal{P}(b)}}\{T_{a\leq b}f\ |\ (a,f) \text{ in } S\}$.
\end{enumerate}
\end{cor}
In this article, we are primarily interested not in $\mathcal{G}(I)$, but in its subposet called the realisation of $I$:
\begin{Def}\leftarrowbel{drjfhhj}
For an element $a$ in $I$, define $\mathcal{R}_a(I)$ to be the
subposet of $ \mathcal{G}(I)$ consisting of all the pairs $(a,f)$ satisfying the following conditions:
\begin{itemize}
\item $f(x)>-1$, for every $x$ in $\mathcal{P}(a)$;
\item $\text{supp}(f)$ is finite;
\item $\text{supp}(f)$ has an ancestor in $I$.
\end{itemize}
The subposet $\mathcal{R}(I):=\bigcup_{a\in I}\mathcal{R}_a(I)\subset \mathcal{G}(I)$
is called the \textbf{realisation} of $I$.
\end{Def}
For example, for every $a$ in $I$, the element $(a,0)$ belongs to $\mathcal{R}_a(I)\subset \mathcal{R}(I)$. The function $I\to \mathcal{R}(I)$, mapping
$a$ to $(a,0)$, is a subposet inclusion, and we identify $I$ with its image in $\mathcal{R}(I)$.
Let $(a,f)$ be in $\mathcal{R}(I)$ and $(a,f)\leq (b,g)$ in $\mathcal{G}(I)$. Then, according to Proposition~\ref{asdgsfghg}, $\text{supp}(g)$ has an ancestor. Thus to show $(b,g)$ is in $\mathcal{R}(I)$ only the first two conditions
in Definition~\ref{drjfhhj} need to be verified, which are: the values of $g$ are strictly bigger than $-1$ and the support of $g$ is finite.
Let $a$ be in $I$. For a finite subset $S\subset \mathcal{P}(a)$ which has an ancestor and a function $f\colon S\to (-1,0)$, denote by $\overline{f}\colon \mathcal{P}(a)\to (-1,0]$
the extension:
\[\overline{f}(x):=\begin{cases}
f(x) &\text{ if } x\in S\\
0 &\text{ if } x\not\in S
\end{cases}\]
The element $(a,\overline{f})$ belongs to $\mathcal{R}_a(I)\subset \mathcal{R}(I)$. The function $(-1,0)^{S}\to \mathcal{R}_a(I)$,
mapping $f$ to $(a,\overline{f})$, is a subposet inclusion. Since these subposets are disjoint, as a set, $\mathcal{R}_a(I)$ can be identified with the disjoint union
$\coprod_S (-1,0)^{S}$ where $S$ ranges over finite subsets of $\mathcal{P}(a)$ that have ancestors. Thus $\mathcal{R}(I)$ can be identified with the disjoint union
$\coprod_{a} \coprod_{S}(-1,0)^{S}$ where $a$ ranges over all elements of $I$ and, for each such $a$, $S$ ranges over all finite subsets of $\mathcal{P}(a)$ that have ancestors.
Here are two families of examples of realisations:
\begin{point}\leftarrowbel{dsgsdfhfj}
The reason we are interested in the realisation is that it generalises the relation $\mathbb{N}^r\subset [0,\infty)^r$.
Consider $\alpha \colon \mathcal{R}(\mathbb{N}^r) \to [0,\infty)^r$ mapping $(a,f)$
to $a+\sum_{(a-e_i)\in \mathcal{P}(a)}f(a-e_i)e_i$
where $e_i$ is the $i$-th vector in the standard basis
of $\mathbb{R}^r$.
This function is an isomorphism of posets. We use this to identify
the realisation $\mathcal{R}(\mathbb{N}^r)$ with $[0,\infty)^r$. The composition
of the function $\mathbb{N}^r\to \mathcal{R}(\mathbb{N}^r)$, mapping $a$ to $(a,0)$, with $\alpha$ is the inclusion $\mathbb{N}^r\subset [0,\infty)^r$.
The same formula $(a,f)\mapsto a+\sum_{(a-e_i)\in \mathcal{P}(a)}f(a-e_i)e_i$ gives a poset isomorphism between $\mathcal{R}([1]^r)$ and $[0,1]^r$.
\end{point}
\begin{point}
Let $P$ be a set. Consider the inclusion poset $2^P$ of subsets of $P$ (see~\ref{afadfhfgsh}). Since $2^P$ has a global minimum (see~\ref{dsdgfsgjhdgh}), given by the empty subset,
all subsets of $2^P$ have an ancestor.
Every parent of $S$ in $2^P$ is of the form $S\setminus\{x\}$ for $x$ in $S$, and hence the
function mapping $x$ in $S$ to $S\setminus\{x\}$ is a bijection between $S$ and the set of its parents $\mathcal{P}_{2^P}(S)$ in $2^P$.
We use this bijection to identify $\mathcal{P}_{2^P}(S)$ with $S$ (see~\ref{sdfgbsdhgfsh}).
The realisation $\mathcal{R}(2^P)$ can be identified with a subposet of $[-1,0]^P$.
By definition, $\mathcal{R}(2^P)$ consists of pairs
$(S,f\colon S\to (-1,0])$ (here we use the
identification between $S$ and $\mathcal{P}_{2^P}(S)$) where the support of $f$ is finite. For such an element in $\mathcal{R}(2^P)$, define $\underline{f}\colon P\to [-1,0]$:
\[\underline{f}(x) :=\begin{cases}
f(x) &\text{ if } x\in S\\
-1 &\text{ if } x\not\in S
\end{cases}\]
The function $\mathcal{R}(2^P) \to [-1,0]^P$, mapping $(S,f\colon S\to (-1,0])$ to $\underline{f}$, is a subposet inclusion (see~\ref{sdgsdgjdghkfhjk}).
Its image consists of these functions $g\colon P\to [-1,0]$ for which the set $g^{-1}((-1,0))=\{x\in P\ |\ -1<g(x)<0\}$ is finite.
The subposet in $ [-1,0]^P$ of such functions is therefore isomorphic to $\mathcal{R}(2^P)$. For example, if $P$ is finite, then
$\mathcal{R}(2^P)$ is isomorphic to $ [-1,0]^P$: the realisation of the discrete cube of finite dimension is
isomorphic to the geometric cube of the same dimension (see~\ref{afadfhfgsh}).
\end{point}
\begin{point}\leftarrowbel{sadgsfghb}
The realisation $\mathcal{R}(I)$ has the following extension property, which is convenient for constructing functors indexed by it.
Let $U\colon I\to 2^Y$ be a functor of posets (see~\ref{sdgsdgjdghkfhjk}) where $Y$ is a set and $2^Y$ is the inclusion poset of all subsets of $Y$ (see~\ref{afadfhfgsh}). Choose a distance $d$ on $Y$ whose values are bounded by a real number $m$.
When all the values of $U$ are non-empty, we are going to use this distance to extend $U$ along $I\subset \mathcal{R}(I)$ to form a commutative diagram of poset functors:
\[\begin{tikzcd}
I\ar[hook]{rr}\ar{rd}{U} & & \mathcal{R}(I)\ar{ld}[swap]{\overline{U}}\\
& 2^{Y}
\end{tikzcd}\]
For $(a,f)$ in $\mathcal{R}(I)$, define:
\[\overline{U}(a,f):=U(a)\cap\bigcap_{p\in \mathcal{P}(a)}
B(U(p),(1+f(p))m)\]
where $B(V,r)=\{y\in Y\ |\ d(x,y)<r\text{ for some
$x$ in $V$}\}$. Note that $\overline{U}(a,0)=U(a)$.
To prove the functoriality of $\overline{U}$ all the conditions
describing the poset relation on $\mathcal{R}(I)$ (see~\ref{sadsfgdfghjhn}) are needed.
\end{point}
\begin{prop}\leftarrowbel{sasDFGSDGJFHG}
Let $Y$ be a set and $U\colon I\to 2^Y$ be a functor of posets whose values are non-empty subsets of $Y$.
If $(a,f)\subset (b,g)$ in
$\mathcal{R}(I)$, then $\overline{U}(a,f)\subset \overline{U}(b,g)$ in $2^Y$.
Moreover all values of $\overline{U}$ are non-empty.
\end{prop}
\begin{proof}
Let $u$ be in $\overline{U}(a,f)$. By definition, this means:
$u$ is in $U(a)$ and, for all $p$ in $\mathcal{P}(a)$, there is $u_p$ in $U(p)$ for which $d(u,u_p) < (1+f(p))m$.
We need to show that $u$ is in $\overline{U}(b,g)$, which is equivalent to: $u$ being in $U(b)$ and, for all $x$ in $\mathcal{P}(b)$, there is $v_x$ in $U(x)$ for which $d(u,v_x) < (1+g(x))m$.
The relation $(a,f)\leq (b,g)$ yields $a\leq b$ (condition (a) in~\ref{sadsfgdfghjhn}), and hence $U(a)\subset U(b)$,
implying $u$ is in $U(b)$.
Choose $x$ in $\mathcal{P}(b)$. If $g(x)=0$, then since all distances in $Y$ are bounded by $m$, we can choose $v_x$ to be any element in $U(x)$ (which exists
since $U(x)$ is non-empty by assumption).
Assume $g(x)<0$.
By condition (b) in~\ref{sadsfgdfghjhn}, either $x$ is $a$-dependent ($a\leq x$) or $x$ is $a$-independent ($a\not\leq x$ and $(\mathcal{P}(a)\leq x)\not=\emptyset$).
If $a\leq x$, then we can take $v_x=u$, since $U(a)\subset U(x)$.
If $x$ is $a$-independent, then the relation $(a,f)\leq (b,g)$ implies the existence of
$p$ in $\mathcal{P}(a)\leq x$ for which $f(p)\leq g(x)$ (here we use the fact that
$\text{supp}(f)$ is finite so that $\bigwedge_{[-1,0]}\{f(y)\ |\ y\in(\mathcal{P}(a)\leq x)\}$ is realised by $p$, see condition (c) in ~\ref{sadsfgdfghjhn}). In this case we can take $v_x= u_p$, as
$d(u_p,x)\leq (1+f(p))m\leq (1+g(x))m$.
To show non-emptiness of $\overline{U}(a,f)$, note that $\text{supp}(f)$ has an ancestor $w$. Thus $U(w)\subset U(a)$ and $U(w)\subset U(p)$ for every
$p$ in $\text{supp}(f)$ and consequently $U(w)\subset \overline{U}(a,f)$.
\end{proof}
Since $\mathcal{R}(I)$ is a subposet of $\mathcal{G}(I)$, one may also construct some sup elements in $\mathcal{R}(I)$ analogously to
Corollary~\ref{dbhfjgyhdklsadj}.
\begin{cor}\leftarrowbel{bcvfhdxncvb}
Let $S\subset \mathcal{R}(I)$ be a non-empty subset.
\begin{enumerate}
\item If $\pi(S)=\{a\}$, then
$S$ has a coproduct in $\mathcal{R}(I)$ and $\bigvee_{\mathcal{R}(I)} S=(a,g)$ where $g =\bigvee_{[-1,0]^{\mathcal{P}(a)}}\{f\ |\ (a,f)\in S\}$.
\item Assume $I$ is a poset whose every element has a finite set of parents.
If $b$ is a sup of $\pi(S)$ in $I$, then there is a unique sup of $S$ in
$\mathcal{R}(I)$ of the form $(b,m)$ and $m=\bigvee_{[-1,0]^{\mathcal{P}(b)}}\{T_{a\leq b}f\ |\ (a,f) \text{ in } S\}$.
\end{enumerate}
\end{cor}
\begin{proof}
Since $\mathcal{R}(I)$ is a subposet of $\mathcal{G}(I)$, to prove the result it is enough to show $(a,g)$ and $(b,m)$
belong to $\mathcal{R}(I)$.
\noindent
(1):\quad For every $(a,f)$ in $S$,
since $f\leq g$, then $g(x)>-1$ for all $x$ in $\mathcal{P}(a)$, and $\text{supp}(g)\subset \text{supp}(f)$. Thus, $\text{supp}(g)$ is finite and every
ancestor of $\text{supp}(f)$ is also an ancestor of $\text{supp}(g)$.
\noindent
(2):\quad
The equality $m(x)=-1$ holds if and only if $(T_{a\leq b}f)(x)=-1$
for all $(a,f)$ in $S$. Thus, $m(x)=-1$ if and only if
$a\leq x$ ($x$ is $a$-dependent), for every $a$ in $\pi(S)$. The equality $m(x)=-1$ would then contradict the assumption that $b$ is a sup of $\pi(S)$, since $x$ is in $\mathcal{P}(b)$.
The values of the function $m$ belong, therefore, to $(-1,0]$.
The finiteness of $\text{supp}(m)$ is guaranteed by the finiteness assumption on the sets of parents in $I$, and the existence of its ancestor is guaranteed by
Lemma~\ref{asdgsfghg}.
\end{proof}
According to Corollary~\ref{bcvfhdxncvb}, if the set of parents of every element in $I$ is finite,
then elements in $\mathcal{R}(I)$ of the form $(b,\bigvee\{T_{a\leq b}f\ |\ (a,f) \text{ in } S\})$, where $b$ is a sup of $\pi(S)$ in $I$, are sups of $S\subset \mathcal{R}(I)$.
One should be aware however that an element $(c,h)$ in $\mathcal{R}(I)$ may be a sup of $S$ even though $c$ is not a sup of $\pi(S)$ in $I$ as Example~\ref{svdgsfghb} illustrates.
\begin{point}\leftarrowbel{sdgdfhkjkl}
An important aspect of realisations is that they admit
explicit grid-like discretisations. These discretisations are particularly useful for describing
properties of tame functors, such as
their homological dimension (see~\ref{sdrtyhgf}).
For subposets $D\subset I$ and $V\subset (-1,0)$, denote by
$\mathcal{R}_D(I, V)\subset \mathcal{R}(I)$ the following subposet:
\[\mathcal{R}_D(I, V):=\{(a,f)\in \mathcal{R}(I)\ |\ a\in D\text{ and } f(\text{supp}(f))\subset V\}.\]
An element $(a,f)$ in $\mathcal{R}(I)$ belongs to $\mathcal{R}_D(I, V)$ if and only if $a$ is in $D$ and
all non-zero values of $f$ belong to $V$. In particular
$(a,0)$ belongs to $\mathcal{R}_D(I, V)$ if and only if $a$ is in $D$, which means the following inclusions hold:
\[\begin{tikzcd}
D\ar[hook]{r}\ar[hook]{d} & \mathcal{R}_D(I, V)\ar[hook]{d}\\
I\ar[hook]{r} & \mathcal{R}(I)
\end{tikzcd}
\]
Recall $\mathcal{R}(I)$ can be identified with the disjoint union
$\coprod_{a} \coprod_{S}(-1,0)^{S}$, where for every $a$ in $I$, $S\subset$ the sets $S$ (see the paragraph after~\ref{drjfhhj}). Via this identification,
$ \mathcal{R}_D(I, V)$ corresponds to $\coprod_{a} \coprod_{S}V^{S}$
where $a$ ranges over all elements in $D$ and, for each such $a$, $S$ ranges over all finite subsets of $\mathcal{P}(a)$ that have ancestors.
If $D=\{a\}$, then $\mathcal{R}_D(I, V)$ is also denoted as
$\mathcal{R}_a(I, V)$, if $D=I$, then $\mathcal{R}_D(I, V)$ is also denoted as
$\mathcal{R}(I, V)$, and if $V=(-1,0)$, then
$\mathcal{R}_D(I, V)$ is also denoted as
$\mathcal{R}_D(I)$.
For example, choose
$V=\{-0.5\}\subset (-1,0)$. Recall that the realisation
$\mathcal{R}(\mathbb{N}^r)$ can be identified with
the poset
$[0,\infty)^r$
(see~\ref{dsgsdfhfj}).
Via this identification,
$\mathcal{R}(\mathbb{N}^r, V)\subset \mathcal{R}(\mathbb{N}^r) $
corresponds to $0.5 \mathbb{N}^r\subset
[0,\infty)^r.$
If $D$ and $V$ are finite, then so is $\mathcal{R}_D(I, V)$.
Furthermore, for any finite subposet $S\subset \mathcal{R}(I)$, there is a finite $D\subset I$
and a finite $V\subset (-1,0)$ for which $S\subset \mathcal{R}_D(I, V)$.
In case $I$ is of finite type we could choose $D$ to be of the form $I\leq a$ for some $a$ in $I$.
\end{point}
If $I $ is of finite type, then the dimension and the parental dimension of every element in the realisation $\mathcal{R}(I)$ coincide and can be calculated in an analogous way as in Proposition~\ref{sfgasg}.
\begin{thm}\leftarrowbel{afashgrjh}
Let $V\subset (-1,0)$ be a subset, $I$ be a poset of finite type, and $D\subset I$ a subposet such that $(I\leq d)\subset D$ for every $d$ in $D$.
Assume $(a,f)$ is in $\mathcal{R}_{D}(I, V)$ for which
there is $\varepsilon$ in $V$ such that $\varepsilon<f(x)$ for all $x$ in $\mathcal{P}(a)$. Then the numbers
$\text{\rm par-dim}_{\mathcal{R}_a(I,V)}(a,f)$,
$\text{\rm par-dim}_{\mathcal{R}_{D}(I,V)}(a,f)$,
$\text{\rm dim}_{\mathcal{R}_a(I,V)}(a,f)$, and
$\text{\rm dim}_{\mathcal{R}_{D}(I,V)}(a,f)$
coincide and are equal to:
\[
\text{\rm max}\{|S|\ |\ \text{\rm supp}(f)\subset S\subset \mathcal{P}(a) \text{ and $S$ has an ancestor}.\}
\]
\end{thm}
\begin{proof}
The vertical inequalities in the following diagram follow from Proposition~\ref{adgfdsfhgf}. The horizontal inequalities are a consequence of
Proposition~\ref{aDFDFHFHJ}, where the required assumption is given by Corollary~\ref{bcvfhdxncvb}.(1):
\[\begin{tikzcd}[column sep = 0.7em, row sep = 0.7em]
\text{\rm par-dim}_{\mathcal{R}_{D}(I,V)}(a,f)\arrow[r,symbol=\geq]\arrow[d,symbol=\geq] & \text{\rm par-dim}_{\mathcal{R}_a(I,V)}(a,f)\arrow[d,symbol=\geq] \\
\text{\rm dim}_{\mathcal{R}_{D}(I,V)}(a,f)\arrow[r,symbol=\geq] &\text{\rm dim}_{\mathcal{R}_a(I,V)}(a,f)
\end{tikzcd}\]
Let $\mathcal{A}:=\{S\ |\ \text{\rm supp}(f)\subset S\subset \mathcal{P}(a) \text{ and $S$ has an ancestor}\}$.
Next we show $\text{\rm dim}_{\mathcal{R}_a(I,V)}(a,f)\geq \text{\rm max}\{|S|\ |\ S\in \mathcal{A}\}$. Since $I$ is of finite type, the collection $\mathcal{A}$ is finite and all its elements are finite.
Let $S$ be in $\mathcal{A}$.
Define $\varepsilon_S\colon \mathcal{P}(a)\to (-1,0]$ and $f_p\colon \mathcal{P}(a)\to (-1,0]$, for every $p$ in $S$, by the formulas:
\[
\varepsilon_S(x) = \begin{cases}
\varepsilon & \text{ if } x\in S\\
0 & \text{ if } x\not\in S
\end{cases}
\ \ \ \ \ \ \ \ \
f_p(x)=
\begin{cases}
f(x) & \text{ if } x=p\\
\varepsilon &\text{ if } x\in S\setminus\{p\}\\
0 & \text{ if } x\not\in S
\end{cases}
\]
These functions are chosen so that, for every $p$ in $S$, $-1<\varepsilon_S< f_p<f$.
Since $\text{supp}(f_p)\subset S= \text{supp}(\varepsilon_S)$, both sets
$\text{supp}(f_p)$ and $\text{supp}(\varepsilon_S)$ have ancestors in $I$ and thus
$(a,f_p)$ and $(a,\varepsilon_S)$ belong to $\mathcal{R}_a(I,V)$.
The relation $\varepsilon_S<f_p$, for every $p$, implies $(a,\varepsilon_S)$ is a proper ancestor of $U:=\{(a,f_p)\ |\ p\in S\}$ in $\mathcal{R}_a(I,V)$. If $p\not = q$ in $S$, then $f_p\not=f_q$,
and hence $|U|=|S|$. Furthermore, $\bigvee_{\mathcal{R}_a(I,V)} U = (a,f)$ and
$\bigvee_{\mathcal{R}_a(I,V)} (U\setminus\{(a,f_p)\}) < (a,f)$ for every $p$ in $S$. The set $U$ belongs, therefore, to
the collection $\textbf Phi_{\mathcal{R}_a(I,V)}(a,f)$ used to define the dimension (see~\ref{adsgdgfjhgh}), and, consequently, $\text{dim}_{\mathcal{R}_a(I,V)}(a,f)\geq |U|=|S|$. As this happens for every $S$ in $\mathcal{A}$, we get the desired inequality.
To finish the proof we show
$\text{\rm max}\{|S|\ |\ S\in \mathcal{A}\}\geq \text{\rm par-dim}_{\mathcal{R}_{D}(I,V)}(a,f)$.
Let $U$ be an element in $\textbf Psi_{\mathcal{R}_D(I,V)}(a,f)$ (see~\ref{aDFGSDFFHDHGJ}).
In particular, $U$ has an ancestor $(c,h)$ in $\mathcal{R}_D(I,V)$.
For $(b,g)$ in $U$ consider $\alpha(b,g):=(T_{b\leq a}g)\vee \varepsilon$.
We claim $(a,\alpha(b,g))$ belongs to $\mathcal{R}_D(I,V)$ and
$\alpha(b,g)< f$. This is clear if $b=a$. If $b<a$, since $I$ is of finite type, there is a parent $p$ of $a$ such that $b\leq p<a$. In this case $T_{b\leq a}g(p)=-1$, and, consequently, $\alpha(b,g)(p)=\varepsilon<f(p)$.
Furthermore,
any ancestor of $\text{supp}(h)$ is also an ancestor of $\text{supp}(\alpha(b,g))$. In particular, $S:=\bigcup_{(b,g)\in U} \text{supp}(\alpha(b,g))$ has an ancestor, and it contains $\text{supp($f$)}$, as $(T_{b\le a}g\vee \varepsilon) \le f$. Thus, $S$ is in $\mathcal{A}$.
For every $(b,g)$ in $U$, let $p_{(b,g)}$ be a parent of $a$ in $I$ for which $\alpha(b,g)(p_{(b,g)})<f(p_{(b,g)})$.
If $\alpha(b,g)\not =\alpha(b',g')$, then, since $\alpha(b,g)\vee\alpha(b',g') = f$, the elements
$p_{(b,g)}$ and $p_{(b',g')}$ have to be different. Thus $|\{p_{(b,g)}\ |\ (b,g)\in U\}|=|U|$, and consequently $|S|\geq |U|$ as $\{p_{(b,g)}\ |\ (b,g)\in U\}\subset S$. As this happens for every $U$ in $\textbf Psi_{\mathcal{R}_D(I,V)}(a,f)$, we get $\text{max}\{|S|\ |\ S\in \mathcal{A}\}\geq \text{par-dim}_{\mathcal{R}_D(I,V)}(a,f)$.
\end{proof}
Note that the considered dimensions in Theorem~\ref{afashgrjh} do not depend on
the choice of $D$ and $V$. Consequently:
\begin{cor}\leftarrowbel{sdfwefwefwsghj}
Let $I$ be a poset of finite type. Then for every element $(a,f)$ in $\mathcal{R}(I)$, the following numbers
$\text{\rm par-dim}_{\mathcal{R}_a(I)}(a,f)$,
$\text{\rm par-dim}_{\mathcal{R}(I)}(a,f)$,
$\text{\rm dim}_{\mathcal{R}_a(I)}(a,f)$, and
$\text{\rm dim}_{\mathcal{R}(I)}(a,f)$
coincide and are equal to:
\[
\text{\rm max}\{|S|\ |\ \text{\rm supp}(f)\subset S\subset \mathcal{P}(a) \text{ and $S$ has an ancestor}\}\]
\end{cor}
\begin{point}
Let $I$ be a poset of finite type and $(a,f)$ be in $\mathcal{R}(I)$. According to Theorem~\ref{afashgrjh}, $ \text{par-dim}_I(a)=
\text{par-dim}_{\mathcal{R}(I)}(a,0)$ and
$\text{ par-dim}_{\mathcal{R}(I)}(a,f)\leq \text{par-dim}_I(a)$. This last inequality can be sharp. For example, consider the subposet $I=\{(1,0,0), (1,1,0), (1,0,1), (0,1,1), (1,1,1)\}\subset \mathbb{R}^3$. Let $a=(1,1,1)$ and $f\colon\mathcal{P}(a)\to (-1,0]$
be the function which is $0$ except for $f(0,1,1)\neq 0$. In this case,
$\text{ par-dim}_{\mathcal{R}(I)}(a,f)=1$ and $\text{par-dim}_I(a)=2$.
\end{point}
\begin{point}\leftarrowbel{ertyuiuygf}
We now aim to give a visual representation of realisations of some finite posets.
Consider the following elements in $\mathbb{R}^2$:
$a=(0,0)$, $b=(3,0)$, $c=(0,2)$, $d=(3,2)$, $h=(2,0)$, and $k=(1,2)$, and the following subposets of $\mathbb{R}^2$: $I_1 =\{a,b,c,d\}$, $I_2=\{a,b,c,d,h\}$, and
$I_3=\{a,b,c,d,h,k\}$. Then their realisations are isomorphic to subposets of
$\mathbb{R}^2$ illustrated in Figure~\ref{dasgsdghsdfhjgdhj}. Note that in the second subfigure, the points in the grey square are not comparable to the points in the line segment from $h$ to $b$. In the third subfigure, the points in the grey square are not comparable to the points in the line segments from $c$ to $k$ and from $h$ to $b$.
\begin{figure}
\caption{The dashed lines are not part of the realisation poset.
}
\end{figure}
\end{point}
\section{Upper semilattices}
\begin{point}\leftarrowbel{dfhieir}
By definition an \textbf{upper semilattice} is a poset whose every non-empty finite subset has a coproduct (see~\ref{dsdgdfhfgjh}). To verify if a poset is an upper semilattice it is enough to prove the existence of the coproduct of every two non-comparable elements.
An upper semilattice $I$ is called \textbf{distributive} if, for every $a$, $b$, $x$ in $I$ for which the products $a\wedge x$ and $b\wedge x$ exist, the product
$(a\vee b)\wedge x$ also exists and $(a\vee b)\wedge x=(a\wedge x)\vee (b\wedge x)$.
A poset $I$ is an upper semilattice if
$I_\ast$ (see~\ref{dasgfsdfhhfk}) is an upper semilattice. Moreover, an upper semilattice $I$ is distributive if and only if $I_\ast$ is distributive.
The product $I\times J$ of posets (see~\ref{afadfhfgsh}) is an upper semilattice if and only if $I$ and $J$
are upper semilattices. If $I$ and $J$
are upper semilattices, then $(x_1,y_1) \vee (x_2,y_2) =(x_1 \vee x_2,\ y_1\vee y_2)$ for $(x_1,y_1)$, $ (x_2,y_2)$ in $I\times J$. The product $I\times J$ is a distributive upper semilattice if and only if $I$ and $J$
are distributive upper semilattices (comparte with~\ref{jfsilss}).
If $I$ is a (distributive) upper semilattice, then so is $I^S$ for every set $S$.
The poset $\mathbb{R}$ is a distributive upper semilattice which is not unital. The posets $[n]$, $\mathbb{N}^r$, $[-1,0]^S$, and $[0,\infty)^r$ are unital distributive upper semilattices.
Let $S$ be a set. The suspension $\Sigma S$ (see~\ref{afadfhfgsh}) is a consistent and unital upper semilattice.
It is distributive if and only if $|S|\leq 2$.
For example, if $S=\{s_1,s_2,s_3\}$, then $(s_1\vee s_2)\wedge s_3=s_3$, while $(s_1\wedge s_3)\vee (s_2\wedge s_3)=\bigwedge S$.
Thus, if $|S|>2$, then $\Sigma S$ is a consistent unital
upper semilattice which is not distributive.
For every element $a$ in a poset $I$, the poset $\mathcal{R}_a(I)$ (see~\ref{drjfhhj})
is a distributive upper semilattice.
Thus, the realisation
$\mathcal{R}(I)=\bigcup_{a\in I}\mathcal{R}_a(I)$ is a union of distributive upper semilattices. The realisation
$\mathcal{R}(I)$, however, may fail to be an upper semilattice, even if $I$ is an
upper semilattice (see Example~\ref{svdgsfghb}).
\end{point}
\begin{point}\leftarrowbel{koikik}
Let $I$ be an upper semilattice and $J$ be a poset. A function $f\colon I\to J$ is called
a \textbf{homomorphism} if, for every non-empty finite subset $S\subset I$, the element $f(\bigvee_I S)$ is a sup of $f(S)$ in $J$.
If $f\colon I\to J$ is a homomorphism, then it is a functor since if $a\leq b$ in $I$, then
$f(b)=f(a\vee b)$ is a sup of $\{f(a),f(b)\}$, which implies $f(a)\leq f(b)$ in $J$.
If $J$ is an upper semilattice, then a function $f\colon I\to J$ is a homomorphism if and only if it is a functor and $f(x\vee y) =f(x)\vee f(y)$ for all non-comparable elements $x$ and $y$ in $I$.
If every two elements in $I$ are comparable, then a function $f\colon I\to J$ is a homomorphism if and only if it is a functor.
For example $f\colon\mathbb{N}\to [0,\infty)^r$ is a homomorphism if and only it is a functor.
The inclusion $I\subset I_\ast$ is a homomorphism (see~\ref{dasgfsdfhhfk}). Furthermore,
a function $f\colon I\to J$ is a homomorphism if and only if
$f_\ast\colon I_\ast\to J_\ast$ is a homomorphism. The standard inclusion $\text{in}\colon I\to \mathcal{R}(I)$ (see~\ref{drjfhhj}) is also a homomorphism.
For every $a$ in $J$, the poset $\mathcal{R}_a(J)$ is
an upper semilattice (see~\ref{dfhieir}) and
according to Corollary~\ref{dbhfjgyhdklsadj} the inclusion
$\mathcal{R}_a(J)\subset \mathcal{R}(J)$ is a homomorphism. For every $a$ in $J$, $T_a=[-1,0]^{\mathcal{P}(a)}$ is an upper semilattice and, for every $a\le b$ in $J$, the translation $T_{a\le b}$ (see~\ref{asfadfhs}) is a homomorphism.
\end{point}
\begin{example}\leftarrowbel{asdfghg} Let $f\colon [1]^2\to [0,\infty)^2$ be a function defined as follows:
\[f(0,0)=(0,0),\ \ f(1,0)=(1,0),\ \ f(0,1) = (0,1),\ \ f(1,1)=(2,2)\]
This function is a unital functor, however it is not a homomorphism since:
\begin{center}$\bigvee_{[1]^2}\{(1,0),(0,1)\} =(1,1)\ \ \ \ \ \ \ \bigvee_{[0,\infty)^2}\{(1,0),(0,1)\}=(1,1)$\end{center}
\begin{center}$f(\bigvee_{[1]^2}\{(1,0),(0,1)\})=f(1,1)=(2,2)\not= (1,1) = \bigvee_{[0,\infty)^2}\{(1,0),(0,1)\}
$\end{center}
\end{example}
\begin{point}\leftarrowbel{jfsilss}
To construct upper semilattices,
the Grothendieck construction (see~\ref{asdfsgadfsfhjejk}) can be used.
Let $I$ be an upper semilattice and $T\colon I\to \text{Posets}$ be a functor (and not just a lax functor) such that $T_a$ is an upper semilattice and $T_{a\le b}$ is a homomorphism, for every $a\le b$ in $I$.
Then $\text{Gr}_I T$ is also an upper semilattice.
To see this, consider $(a,x)$ and $(b,y)$ in $\text{Gr}_I T$.
We claim $(a\vee b, T_{a\le a\vee b}x\vee T_{b\le a\vee b}y)$ is their coproduct in $\text{Gr}_I T$. Let $ (a,x)\leq (c,z)\ge (b,y)$, which means $a\leq c\geq b$ in $I$ and $T_{a\le c}x\le z\geq T_{b\le c}y$ in $T_c$. Since $I$ is an upper semilattice, $a\vee b\le c$, and, since $T$ is a functor, $T_{a\vee b\le c}T_{a\le a\vee b}x=T_{a\le c}x\le z\geq T_{b\le c}y=T_{a\vee b\le c}T_{b\le a\vee b}y$. Consequently, $(T_{a\vee b\le c}T_{a\le a\vee b}x)\vee (T_{a\vee b\le c}T_{b\le a\vee b}y)\le z$. Using the fact
$T_{a\vee b\le c}$ is a homomorphism, we get $T_{a\vee b\le c}(T_{a\le a\vee b}x \vee T_{b\le a\vee b}y)\le z$, which implies $(a\vee b, T_{a\le a\vee b}x \vee T_{b\le a\vee b}y)\le (c,z)$.
\end{point}
\begin{point}
The dimension (see~\ref{adsgdgfjhgh}) and the parental dimension
(see~\ref{aDFGSDFFHDHGJ}) of an element $x$ in an upper semilattice $I$ can be described using coproducts:
\begin{align*}
\text{dim}_I(x) &=\text{sup}\left\{ |U| \ |\ \begin{subarray}{c} U\subset I \text{ is finite, has a proper ancestor, $\bigvee U=x$,}\\
\text{and $\bigvee S< x$ for every set $S$ such that $\emptyset\not= S\subsetneq U$}\end{subarray}\right\}\\
\text{par-dim}_I(x) &=\text{sup}\left\{ |U|\ |\ \begin{subarray}{c} U\subset (I<x) \text{ is finite, has an ancestor,}\\
\text{and $a\vee b= x$, for every $a\not= b$ in $U$}\end{subarray} \right\}
\end{align*}
\end{point}
\begin{point}\leftarrowbel{afdafhghj}
Let $I$ be an upper semilattice of finite type (see~\ref{dsdgfsfghjdghj}) and $S\subset I$ be non-empty.
If the product of $S$ exists, then it is an ancestor of $S$. On the other hand, if $S$ has an ancestor, then this ancestor belongs to
$\bigcap _{x\in S}(I\le x)$, which is finite by the finite type assumption. Thus, if $S$ has an ancestor, then the coproduct
$\bigvee \left(\bigcap _{x\in S}(I\le x)\right)$
exists ($I$ is an upper semilattice), which implies the existence of the product $\bigwedge S$ and the equality $\bigwedge S=\bigvee \left(\bigcap _{x\in S}(I\le x)\right)$ (see the end of~\ref{dsdgfsfghjdghj}). In conclusion, the product of a subset of a finite type upper semilattice exists if and only if this subset has an ancestor. In particular, if $\bigwedge S$ exists in $I$, then so does $\bigwedge U$ for every non-empty subset $U\subset S$.
From this discussion and Proposition~\ref{sfgasg} it follows that the parental dimension
of an element $x$ in $I$ can be described using products:
\begin{center}
$\text{par-dim}_I(x)=\text{max}\{|S|\ |\ S\subset {\mathcal P}(x)\text{ for which $\bigwedge S$ exists}\}$
\end{center}
\end{point}
\begin{point}\leftarrowbel{afsgsdfhdfgjhdhgj}
Let $J$ be a poset. A subposet inclusion $f\colon I\subset J$ is called a \textbf{sublattice} if
$I$ is an upper semilattice and $f$ is a homomorphism: for every finite non-empty subset $S\subset I$, the coproduct
$\bigvee _I S$ is a sup of $S$ in $J$. For example $[0,\infty)\subset \mathbb{R} \supset \mathbb{N}$ are sublattices. If $J$ is
an upper semilattice, then
for every $a$ in $J$, the inclusion
$(J\leq a)\subset J$ is a sublattice.
A sublattice $I\subset J$ satisfies the assumption of Proposition~\ref{aDFDFHFHJ} and hence $\text{\rm dim}_I(x)\leq \text{\rm dim}_J(x)$ and $\text{\rm par-dim}_I(x)\leq \text{\rm par-dim}_J(x)$, for all $x$ in $I$.
If $J$ is an upper semilattice, then
the intersection of sublattices in $J$ is also a sublattice (note that this may fail if $J$ is not an upper semilattice).
Thus, if $J$ is an upper semilattice, then
the intersection of all the sublattices of $J$ containing a subset $U\subset J$ is the smallest sublattice of $J$ containing $U$.
This intersection is denoted by $\leftarrowngle U\rightarrowngle$ and called the sublattice \textbf{generated} by $U$. A sublattice generated by a subset
$U\subset J$ is only defined when $J$ is an upper semilattice and this is automatically assumed whenever $\leftarrowngle U\rightarrowngle$ is discussed. The sublattice $\leftarrowngle U\rightarrowngle$ can be described explicitly $\leftarrowngle U\rightarrowngle= U\cup\{\bigvee_J S\ |\ S\subset U \text{ is finite and non-empty}\}$.
Thus if $U$ is finite, then so is $\leftarrowngle U\rightarrowngle$.
\end{point}
In Proposition~\ref{adgfdsfhgf} it was shown that the parental dimension of an element bounds its dimension. This bound can be strict as the suspension example
(see~\ref{aDFGSDFFHDHGJ}) illustrates.
However, for distributive upper semilattices of finite type:
\begin{prop}\leftarrowbel{gtyuyhgfdt}
Assume $I$ is a finite type distributive upper semilattice. Then $\text{\rm dim}_I(x)= \text{\rm par-dim}_I(x)$ for every $x$ in $I$.
\end{prop}
\begin{proof}
The cases when $\text{\rm dim}_I(x)$ is $0$ and $1$ follow from Proposition~\ref{hryfjhxfh}.
Assume $\text{par-dim}_I(x)\geq 2$.
Let $S\subset \mathcal{P}(x)$ be a subset realising $\text{par-dim}_I(x)$, which means $S$ is a maximal subset of $ \mathcal{P}(x)$ whose
product $\bigwedge S$ exists. The assumption $\text{par-dim}_I(x)\geq 2$ guarantees $\lvert S\rvert\ge 2$.
For $s$ in $S$, set $u_s:=\bigwedge(S\setminus \{s\})$ and $U:=\{u_s\ | \ s\in S\}$. We claim: (a) $\bigwedge U=\bigwedge S$, (b) $(\bigvee U)=x$, (c) $|U|=|S|$, and (d) $(\bigvee U')<x$ if $\emptyset\not=U'\subsetneq U$.
These properties imply that $U$ belongs to $\textbf Phi_I(x)$ (see~\ref{dsdgfsfghjdghj}) and hence
$\text{dim}_I(x)\geq |U|=|S|=\text{par-dim}_I(x)$, which together with~\ref{adgfdsfhgf} gives the desired equality.
Property (a) is clear. Assume (b) does not hold and there is $p$ in $\mathcal{P}(x)$ such that $\bigvee U\le p<x$, which means
$u_s\leq p$ for every $s$ in $S$. If $p$ is not in $S$, then $\bigwedge S\le \bigwedge(S\setminus \{s\})\le p$ would imply existence of the product $\bigwedge (S\cup\{p\})$ contradicting the maximality of $S$. The element $p$ is therefore in $S$, and $\bigwedge(S\setminus \{p\})=u_p\le p$,
leading to $u_p\vee p=p$. Since $s\vee p=x$ for every $s$ in $S\setminus \{p\}$, distributivity gives a contradiction
$x=\bigwedge_{s\in S\setminus \{p\}} (s\vee p)\leq u_p\vee p =p$.
To prove the property (c) we need to show that if $s\not=s'$, then $t_s\not= t_{s'}$. Assume this is not the case and $t_s= t_{s'}$.
Distributivity, the already proven property (b) and the equality $\bigvee U =x$ leads to a contradiction:
\begin{center}
$s'=x\wedge s' = (\bigvee U)\wedge s' = \bigvee_{s} (u_s \wedge s')= (u_{s'}\wedge s') \vee (\bigvee _{s\neq s'}(u_s\wedge s')) = (\bigwedge S) \vee (\bigvee _{s\neq s'}u_s)=\bigvee _{s\neq s'}u_s= \bigvee _{s}u_s=\bigvee U=x
$\end{center}
Thus, $U$ and $S$ have the same size, which coincide with property (c).
It remains to show (d): $\bigvee U'<x$ if $\emptyset\not=U'\subsetneq U$.
Assume by contradiction that there is $s'$ in $S$ such that
$\bigvee _{s\neq s'}u_s=x$. Then again distributivity and $\bigvee U =x$ leads to a contradiction:
\[
s' = x\wedge s'=(\bigvee _{s\neq s'}u_s)\wedge s' = \bigvee _{s\neq s'}(u_s\wedge s') = \bigvee _{s\neq s'}u_s=x. \qedhere
\]
\end{proof}
\begin{point}
We finish this section with
a characterisation of
finite type posets whose elements have dimension at most one. A poset $I$ is called
a \textbf{forest} if every pair of non-comparable elements $x$ and $y$ in $I$ has no common ancestor. Every subposet of a forest is still a forest. A forest is called a \textbf{tree} if it is connected. Therefore, every connected subposet of a forest is a tree.
\end{point}
\begin{prop}\leftarrowbel{sdagdsfjthj}
\begin{enumerate}
\item An upper semilattice whose every element has dimension (see~\ref{adsgdgfjhgh}) at most $1$ is a tree.
\item A tree of finite type is an upper semilattice whose every element has dimension at most $1$.
\end{enumerate}
\end{prop}
\begin{proof}
\noindent
(1):\quad Let $x$ and $y$ be non-comparable elements in an upper semilattice $I$ whose every element has dimension at most $1$. If $x$ and $y$ had a common ancestor, then
$\{x,y\}$ would belong to $\textbf Phi_I(x\vee y)$ (see~\ref{adsgdgfjhgh}) leading to a contradiction $\text{dim}_I(x\vee y)\ge 2$.
\noindent
(2):\quad
Let $I$ be a tree of finite type and $x$, $y$ be its elements. We need to show the existence of $x\vee y$. First, assume $x$ and $y$ have a common descendent. Then, since $I$ is of finite type, $\{x,y\}$ has a sup. We claim that this sup is unique, otherwise two such sup elements would be non-comparable and have $x$ as a common ancestor contradicting
the assumption $I$ is a tree.
Assume $x$ and $y$ do not have a common descendent.
Since $I$ is connected, there is a sequence $C=\{c_0=x,c_1,\dots, c_n=y\}$ in $I$ where
$c_i$ is related to $c_{i+1}$ for every $0\leq i<n$. This set $C$ does not have a descendent, otherwise $\{x,y\}$ would also have it. Thus
$C$ must have at least two maximal elements.
Among these, there are $c_i$ and $c_j$ such that $(C\le c_i)\cap (C\le c_j)\neq \emptyset$, otherwise $C$ would not be connected. The elements
$c_i$ and $c_j$ are not comparable and have no common ancestor. This contradicts again the assumption $I$ is a tree.
Finally, for every $x$ in $I$, $\text{dim}_I(x)\leq 1$, since any subset of size at least $2$ with $x$ as sup cannot have a common ancestor by hypothesis.
\end{proof}
\section{Realisations of upper semilattices}
The realisation
of an upper semilattice may fail to be an upper semilattice (see Example~\ref{svdgsfghb}).
This section is devoted to discussing some of the reasons for this and what assumptions can eliminate them. Our aim is to prove:
\begin{thm}\leftarrowbel{dasghfgjk}
If $I$ is a distributive upper semilattice of finite type, then its
realisation $\mathcal{R}(I)$ (see~\ref{sadsfgdfghjhn}) is also a distributive upper semilattice.
\end{thm}
For example, an upper semilattice whose elements have dimension at most $1$ is distributive.
In particular finite type trees are distributive upper semilattices (see~\ref{sdagdsfjthj}.(2)). Theorem~\ref{dasghfgjk} implies therefore that the realisation of a finite type tree is a distributive upper semilattice, and since its elements have dimensions at most $1$ (see~\ref{afashgrjh}), it is a tree (see~\ref{sdagdsfjthj}.(1)).
\begin{point}\leftarrowbel{adfgsgfjfjk}
Let $I$ be an upper semilattice and $a$, $x$ be in $I$.
If $\mathcal{P}(a)\leq x$ contains two different elements
$y_1$ and $y_2$, then $y_1\vee y_2=a$, which implies $a\le x$, and hence $(\mathcal{P}(a)\leq x)=\mathcal{P}(a)$. Thus, there are three possibilities: the set $\mathcal{P}(a)\leq x$ might be
empty, or consist of only one element, or be the entire set $\mathcal{P}(a)$.
Furthermore, if $\mathcal{P}(a)\leq x$ contains only one element and $a\not\leq x$, then this element has to be the product $a\wedge x$.
These facts are used to give in Table~\ref{vabsfgdbnsfgnds} a characterisation of the blocks in the partition of $I$ described in~\ref{adgsdgfhjhg}.
\begin{table}[htbp]
\centering
\begin{tabular}{|c|c|c|}
\hline
\multirow{ 2}{*}{$x$ is $a$-consistent} & $x$ is $a$-dependent & \parbox[t]{6cm}{ \centering $a\leq x.$ \\
In this case $(\mathcal{P}(a)\leq x) = \mathcal{P}(a)$}\\ \cline{2-3}
& $x$ is $a$-independent & \parbox[t]{6cm}{\centering $a\not\leq x$ and the product $a\wedge x$ exists and belongs to $\mathcal{P}(a).$
\\ In this case $\lvert\mathcal{P}(a)\leq x\rvert =1.$}\\ \hline
\multicolumn{2}{|c|}{$a$-inconsistent}& \parbox[t]{6cm}{\centering$a\not\leq x$ and either the product $a\wedge x$ does not exist or it exists but does not belong to $\mathcal{P}(a).$\\
In this case $(\mathcal{P}(a)\leq x)=\emptyset.$} \\ \hline
\end{tabular}
\caption{}
\leftarrowbel{vabsfgdbnsfgnds}
\end{table}
This table can be used to describe the translation operation (see~\ref{asfadfhs}) more explicitly.
Let $a\leq b$ in $I$ and $f\colon\mathcal{P}(a)\to [-1,0]$ be a function. Then:
\[
(T _{a\le b}f)(x)=\begin{cases} -1 & \text{if $x$ is $a$-dependent,} \\
f(a\wedge x) & \text{if $x$ is $a$-independent,} \\
0 & \text{if $x$ is $a$-inconsistent.}
\end{cases}
\]
\end{point}
Similarly, a simplified characterisation of an upper semilattice of finite type to be
consistent (see~\ref{adgsdgfhjhg}) can be given.
\begin{lemma}\leftarrowbel{sadfgadfhg}
Let $I$ be an upper semilattice of finite type. Then
$I$ is consistent if and only if the following condition is satisfied for every $a\leq b$ in $I$:
if $x$ is $b$-independent and the product $a\wedge x$ exists, then $x$ is $a$-consistent, in which case either
$a\wedge x=a$ ($x$ is $a$-dependent) or $a\wedge x$ is a parent of $a$ ($x$ is $a$-independent).
\end{lemma}
\begin{proof}
Consider elements $a\leq b$ in $I$.
Assume $I$ is a consistent upper semilattice and $x$ is $b$-independent.
If the product $a\wedge x$ exists, then $a\wedge x$ is a common ancestor of $a$ and $x$,
and consistency of $I$ implies that $x$ is $a$-consistent.
Assume the condition in the lemma holds. Let $x$ be $b$-independent having a common ancestor with $a$. Since $I$ is of finite type, the product $a\wedge x$ exists.
The $a$-consistency of $x$ is then given by the assumed condition.
\end{proof}
Table~\ref{sdfbsfghnegfhnbd} gives a characterisation, based on~\ref{sadfgadfhg}, of dependent, independent and inconsistent
elements in a consistent upper semilattice of finite type $I$.
\begin{table}[htbp]
\centering
\begin{tabular}{|c|c|c|}
\hline
\multirow{ 2}{*}{$x$ is $a$-consistent} & $x$ is $a$-dependent & {\centering $a\leq x$}\\ \cline{2-3}
& $x$ is $a$-independent & \parbox[t]{5.7cm}{\centering $a\not\leq x$ and the product $a\wedge x$ exists, in which case $a\wedge x$ is a parent of $a$}\\ \hline
\multicolumn{2}{|c|}{$a$-inconsistent}& {\centering the product $a\wedge x$ does not exist} \\ \hline
\end{tabular}
\caption{}
\leftarrowbel{sdfbsfghnegfhnbd}
\end{table}
\begin{prop}\leftarrowbel{asfsgdhgfhjnhgn}
A distributive upper semilattice of finite type is consistent.
\end{prop}
\begin{proof}
To prove the proposition we use Lemma~\ref{sadfgadfhg}.
Let $a\leq b$ and $x$ be an element which is $b$-independent
for which the product $a\wedge x$ exists. Let $p$ be a parent of $b$ such that $p\leq x$.
Then $p\leq p \vee (a\wedge x) \leq b$, and since $b\not\leq x$, the element $b$ cannot be equal to $p \vee (a\wedge x)$. Thus,
$a\wedge x\leq p\vee (a\wedge x)=p$. There are three options: either $a\wedge x=p$, or $a\wedge x$ is a parent of $a$, or there is $y$ such that
$a\wedge x<y<a$. The first two options give $a$-consistency of $x$ proving the proposition.
We claim that the third option is not possible. For $y$ such that that $a\wedge x<y<a$, we have $y\not\leq p$, otherwise $y\leq x$ implying $y\leq a\wedge x$. Since $y\not\leq p$, then
$p\vee y =b$. We can then use distributivity to obtain a contradiction $a = b\wedge a = (p\vee y)\wedge a = (p\wedge a) \vee (y\wedge a)\leq (a\wedge x)\vee y\leq y$.
\end{proof}
The key reason why we are interested in consistent upper semilattices is:
\begin{lemma}\leftarrowbel{ddsgadsgh}
If $I$ is a consistent upper-semilattice, then the $I$-translation is a functor: $T_{a\leq c}=T_{b\leq c}T_{a\leq b}$ for every $a\leq b\leq c$ in $I$.
\end{lemma}
\begin{proof}
Consider a function $f\colon\mathcal{P}(a)\to[-1,0]$.
According to Lemma~\ref{asdgdsgshdgf} we need to show $(T_{a\leq c}f)(x)\geq (T_{b\leq c}T_{a\leq b}f)(x)$ for all $x$ in $\mathcal{P}(c)$. Let $x$ be in $\mathcal{P}(c)$.
If $x$ is $a$-inconsistent, then $(T_{a\leq c}f)(x)=0$ and the desired inequality is clear.
Assume $x$ is $a$-consistent. Then $a\wedge x$ exists and either $a=a\wedge x$ or $a\wedge x$ is a parent of $a$.
Since $x$ is a parent of $c$, it is $c$-consistent. Consistency of $I$ applied to $b\leq c$
implies the $b$-consistency of $x$. Thus, either $x$ is $b$-dependent, or
$b$-independent. If $x$ is $b$-dependent, then $(T_{b\leq c}T_{a\leq b}f)(x)=-1$ and the desired inequality is clear.
Assume $x$ is $b$-independent. Then $(T_{b\leq c}T_{a\leq b}f)(x)=(T_{a\leq b}f)(b\wedge x)$.
The product $b\wedge x$ is $a$-dependent if and only if $x$ is $a$-dependent. If this happens, then
$(T_{a\leq c}f)=-1=(T_{a\leq b}f)(b\wedge x)$ and the desired equality holds in this case as well.
The product $b\wedge x$ is $a$-independent if and only if $x$ is $a$-independent. If this happens, then
$(T_{a\leq c}f)=f(a\wedge x)=f(a\wedge b\wedge x)=(T_{a\leq b}f)(b\wedge x)$ and the desired equality also holds.
\end{proof}
For every $a\leq b$ in a poset $I$, the translation $T_{a\leq b}$ is a homomorphism of upper semilattices. If, in addition, $I$ is a consistent upper semilattice, then
by Lemma~\ref{ddsgadsgh}, the $I$-translation is a functor, and consequently (see~\ref{jfsilss}):
\begin{prop}\leftarrowbel{asfdfjk}
If $I$ is a consistent upper semilattice, then the Grothendieck construction $\text{\rm Gr}_IT=\mathcal{G}(I)$ is an upper semilattice.
\end{prop}
For the realisation $\mathcal{R}(I)\subset \mathcal{G}(I)$ (see~\ref{sadsfgdfghjhn}) to be an upper semilattice
we need a further finiteness restriction:
\begin{prop}\leftarrowbel{sdfgfds}
Let $I$ be a consistent upper semilattice whose every element has a finite set of parents. Then $\mathcal{R}(I)$ is an upper semilattice.
\end{prop}
\begin{proof}
Let $(a,f)$, $(b,g)$ be in $\mathcal{R}(I)$ and $m:=\bigvee\{T_{a\leq a\vee b}f,T_{b\leq a\vee b}g\}$. The poset $\mathcal{G}(I)$ is an upper semilattice (see~\ref{asfdfjk}). Thus
$(a\vee b,m)$ is the coproduct of $(a,f)$ and $(b,g)$ in $\mathcal{G}(I)$ (see~\ref{dbhfjgyhdklsadj}).
Since
$(a\vee b,m)$ belongs to $\mathcal{R}(I)$ (see~\ref{bcvfhdxncvb}),
it is the coproduct of $(a,f)$ and $(b,g)$ in $\mathcal{R}(I)$.
\end{proof}
\begin{cor}\leftarrowbel{asfhgfgkkl}
Let $I$ be a consistent upper semilattice whose every element has a finite set of parents. Choose an element $d$ in $I$ and a subposet $V\subset (-1,0)$. Then the inclusion $\alpha\colon \mathcal{R}_{I\leq d}(I,V)\subset \mathcal{R}(I)
$ is a sublattice (see~\ref{afsgsdfhdfgjhdhgj}), i.e.,
$\mathcal{R}_{I\leq d}(I,V)$ is an upper semilattice, and $\alpha$ is a
homomorphism.
\end{cor}
\begin{proof}
Just note that if $(a,f)$ and $(b,g)$ belong to $\mathcal{R}_{I\leq d}(I,V)$, then so does their coproduct $(a\vee b, m)$ described in the proof of~\ref{sdfgfds}.
\end{proof}
We are now ready to prove Theorem~\ref{dasghfgjk}.
\begin{proof}[Proof of Theorem~\ref{dasghfgjk}]
Let $I$ be a distributive upper semilattice of finite type.
Its realisation $\mathcal{R}(I)$ is an upper semilattice because
of propositions~\ref{asfsgdhgfhjnhgn} and~\ref{sdfgfds}.
It remains to show $\mathcal{R}(I)$ is distributive.
This requires understanding of products in $\mathcal{R}(I)$.
Consider elements $(a,f)\geq (c,m)\leq (x,h)$ in $\mathcal{R}(I)$.
Then $a\geq c\leq x$ and hence the product $a\wedge x$ exists (see discussion in~\ref{afdafhghj}).
Let $y$ be a parent of $a\wedge x$. If $p_1$ and $p_2$ are different parents of
$a$ for which $p_1 \wedge x = y =p_2\wedge x$, then, by the distributivity, we would get a contradiction
$a\wedge x= (p_1\vee p_2)\wedge x =(p_1\wedge x)\vee (p_2\wedge x)=y$.
Thus, the set $\{p\in \mathcal{P}(a)\ |\ p\wedge x=y\}$
contains at most one element. If $\{p\in \mathcal{P}(a)\ |\ p\wedge x=y\}$ is non-empty, then its unique element is denoted by $yx^{-1}$.
Define $R(f)\colon\mathcal{P}(a\wedge x)\to(-1,0]$
\[R(f)(y):=
\begin{cases}
f(yx^{-1}) &\text{ if }\{p\in \mathcal{P}(a)\ |\ p\wedge x=y\}\not=\emptyset\\
0&\text{ otherwise}
\end{cases}\]
Let $w$ be an ancestor of $\text{supp}(f)$.
For every $y$ in $\text{supp}(R(f))$, the set $\{p\in \mathcal{P}(a)\ |\ p\wedge x=y\}$ is non-empty and
$R(f)(y)=f(yx^{-1})<0$. Thus $w\leq yx^{-1}$ which implies $w\wedge x\leq y$. The element $w\wedge x$
is therefore an ancestor of $\text{supp}(R(f))$ and $(a\wedge x, R(f))$ is in $\mathcal{R}(I)$.
We claim that the following relations hold $(c,m)\leq (a\wedge x,R(f))\leq (a,f)$.
Let $y$ be in $\text{supp}(R(f))$. In particular, $f(yx^{-1})<0$. The relation
$(c,m)\leq (a,f)$ has two consequences. First, the product $yx^{-1}\wedge c=y\wedge c$ exists and
$y$ is $c$-consistent (see Table~\ref{sdfbsfghnegfhnbd}). All the elements of $\text{supp}(R(f))$ are
therefore $c$-consistent. Second, if $y$ in addition is $c$-independent,
then $yx^{-1}$ is also $c$-independent and $m(y\wedge c)=m(yx^{-1}\wedge c)\leq f(yx^{-1})=R(f)(y)$. These two consequences give the relation $( c,m)\leq (a\wedge x,R(f))$.
By direct calculation:
\[T_{a\wedge x\leq a}R(f)(p)=
\begin{cases}
-1 &\text{ if $p$ is $a\wedge x$-dependent}\\
f(p)&\text{ if $p$ is $a\wedge x$-independent}\\
0 & \text{ if $p$ is $a\wedge x$-inconsistent}
\end{cases}
\]
If $p$ in $\mathcal{P}(a)$ is $a\wedge x$-inconsistent, then it is also $c$-inconsistent (see Table~\ref{sdfbsfghnegfhnbd}). Thus, $T_{a\wedge x\leq a}R(f)\leq f$, as all the elements in $\text{supp}(f)$ are $c$-consistent. This proves the relation $(a\wedge x,R(f))\leq (a,f)$ (see~\ref{asdgdfhfgjhg}).
In an analogous way
$(c,m)\leq (a\wedge x,R(h))\leq (x,h)$ and the obtained relations
imply:
\[\begin{tikzcd}[column sep=10,row sep=10]
(a,f) \arrow[r,symbol=\geq] & (a\wedge x,\bigwedge\{R(f),R(h)\})\arrow[r,symbol=\leq] \arrow[d,symbol=\geq] & (x,h)\\
& (c,m)
\end{tikzcd}\]
This proves $(a\wedge x,\bigwedge\{R(f),R(h)\})$ is the product of $(a,f)$ and $(x,h)$.
In particular, the product of two elements in $\mathcal{R}(I)$ exists if and only if these
elements have a common ancestor.
We are now ready to prove distributivity of $\mathcal{R}(I)$.
Let $(a,f)$, $(b,g)$, $(x,h)$ be in $\mathcal{R}(I)$ for which the products
$(a,f)\wedge (x,h)$ and $(b,g)\wedge (x,h)$
exist. Since $(a,f)\wedge (x,h)$ is an ancestor of both $(a,f)\vee (b,g)$ and $(x,h)$,
the product $((a,f)\vee (b,g))\wedge (x,h)$ exists. Moreover, this product is of the form
$((a\wedge x)\vee (b\wedge x),m)$, where:
\[m= \bigwedge\{R(\bigvee\{T_{a\leq a\vee b}f, T_{b\leq a\vee b}g\}), R(h) \}.\]
By direct verification $m= \bigvee \{\bigwedge\{R(T_{a\leq a\vee b}f), R(h)\},
\bigwedge\{R(T_{b\leq a\vee b}g), R(h)\}\}$, which gives the desired
equality $((a,f)\vee (b,g))\wedge (x,h)= ((a,f)\wedge (x,h)) \vee ((b,g)\wedge (x,h))$.
\end{proof}
\begin{examples}\leftarrowbel{svdgsfghb}
The subposet $I=\{a=(0,0), b=(3,0), c=(0,2), h=(2,0), d=(3,2)\}\subset [0,\infty)^2$ is an example of a non-consistent upper semilattice, which according to~\ref{asfsgdhgfhjnhgn}, cannot be distributive. Indeed, $b$ is $d$-independent, but it is not $c$-consistent, even though $c\le d$ and $b\wedge c=a$ exists. However, its realisation is still an upper semilattice (see~\ref{ertyuiuygf}).
Consider the subposet $I\subset [0,\infty)^3$ given by the points $\{a\wedge x=(1,0,0), b\wedge x=(0,1,0), a=(3,0,0), b=(0,3,0), x\wedge z=(1,1,0), z=(2,2,0), a\vee b=(3,3,0), x=(1,1,2), c=(3,3,2)\}$, where the names of the points are chosen to help understanding the relations between them (see Figure~\ref{cons}).
Analogously, the poset $I$ is a non-consistent upper semilattice. Furthermore, its realisation $\mathcal{R}(I)$ is not an upper semilattice. This can be seen by taking the points $(a,f\colon\{a\wedge x\}\to (-1,0])$ and $(b,g\colon\{b\wedge x\}\to (-1,0])$ of $\mathcal{R}(I)$, where $f(a\wedge x)=-0.25$ and $g(b\wedge x)=-0.5$. We can define $(a\vee b,m\colon\{a,b,z\}\to (-1,0])$, where $m(a)=0$, $m(b)=0$ and $m(z)=f(a\wedge x)\vee g(b\wedge x)=-0.25$. Consider also $(c,h\colon\{a\vee b,x\}\to (-1,0])$, with $h(a\vee b)=0$ and $h(x)=-0.25$. The two elements $(a\vee b,m)$ and $(c,h)$ are both sups of $\{(a,f),(b,g)\}$. They are indeed not comparable, since $x$ in $\text{supp($h$)}$ is $(a\vee b)$-inconsistent.
\end{examples}
\begin{figure}
\caption{The arrows represent relations between elements in this representation of an upper semilattice.}
\end{figure}
\noindent
\textbf{Part II, tameness.} In this part we introduce tame functors indexed by posets. An important property of a tame functor is that to describe it only a finite amount of information
is needed. For example every functor indexed by a finite poset is tame. Tameness is therefore an interesting property for functors indexed by infinite posets, for example by the realisations.
\section{Transfers and left Kan extensions}\leftarrowbel{asfsdfhfhgjmd}
Constructions recalled in this section also play a prominent role in~\cite{Botnan2020ART}.
\begin{point}\leftarrowbel{sDGSDFHFGJ}
Let $I$ be an upper semilattice (see~\ref{dfhieir}) and $f\colon I\to J$ a function of finite type (see~\ref{dsdgfsfghjdghj}).
Consider the unital function $f_\ast\colon I_\ast\to J_\ast$ (see~\ref{dasgfsdfhhfk}).
Since $I_\ast$ is also an upper semilattice and $f_\ast \leq a$ is non-empty and finite,
we can form the coproduct
$\bigvee_{I_\ast} (f_\ast \leq a)$ for all $a$ in $J_\ast$. Note that:
\[\text{$\textstyle \bigvee_{I_\ast} (f_\ast \leq a)$}=
\begin{cases}
-\infty &\text{ if $a=-\infty$, or $a$ is in $J$ and $(f\leq a)=\emptyset$}\\
\bigvee_{I} (f \leq a) & \text{ if $a$ is in $J$ and $(f\leq a)\not=\emptyset$}
\end{cases}\]
If $a\leq b$ in $J_\ast$, then $\bigvee_{I_\ast} (f_\ast \leq a)\leq \bigvee_{I_\ast} (f_\ast \leq b)$ since
$(f_\ast \leq a)\subset (f_\ast \leq b)$.
The function mapping $a$ in $J_\ast$ to $\bigvee_{I_\ast} (f_{\ast} \leq a)$ in $I_\ast$ is therefore a unital functor, which we denote by $f^{!}\colon J_\ast\to I_\ast$ and call the \textbf{transfer} of $f$. The transfer $f^{!}\colon J_\ast\to I_\ast$ is only defined when
$f\colon I\to J$ is a function of finite type between an upper semilattice $I$ and a poset $J$. Whenever the transfer
$f^{!}$ is considered, we automatically assume that these conditions are satisfied.
Since $f_{\ast} \leq f_{\ast}(a)$ contains $a$,
the relation $a\leq \bigvee_{I_\ast} (f_\ast \leq f_\ast (a))=f^{!}f_\ast (a)$
holds for every $a$ in $I_\ast$. Consequently, $\text{id}_{I_\ast}\leq f^{!} f_\ast$ holds in the poset ${I_\ast}^{I_\ast}$.
In general, $f^!(a)= \bigvee_{I_\ast} (f_\ast\leq a)$ may fail to belong to $f_\ast\leq a$, i.e., the relation $f_\ast f^{!}(a)=f_\ast ( \bigvee_{I_\ast} (f_\ast \leq a))\leq a$ may fail to hold. For example when $f\colon [1]^2\to [0,\infty)^2$ is given as in Example~\ref{asdfghg} and $a=(1,1)$ in $[0,\infty)^2$.
However, if $f$ is a homomorphism of finite type, then:
\end{point}
\begin{prop}\leftarrowbel{asfadfhgfgmn}
Let $f\colon I\to J$ be a homomorphism of finite type between
an upper semilattice $I$ and a poset $J$ (see~\ref{koikik}).
\begin{enumerate}
\item $f_\ast f^{!}\leq \text{\rm id}_{J_\ast}$ in ${J_\ast}^{J_\ast}$.
\item For every $a$ in $J_\ast$, $f^!(a)$ belongs to $f_\ast\leq a$ and is its global maximum.
\item $f^{!}f_\ast f^{!} = f^{!}$.
\item $f_\ast f^{!}f_\ast = f_\ast $.
\item For every $a$ in $J_\ast$, $f_\ast f^!(a)$ belongs to $\{b\in J_\ast\ |\ (f_\ast\leq a)=(f_\ast\leq b)\}$ and it is its global minimum.
\item For $a$ and $b$ in $J$, $(f\leq a)=(f\leq b)$ if and only if $f^!(a)=f^!(b)$.
\item If $f$ is injective, then $f^{!}f_\ast= \text{\rm id}_{I_\ast}$.
\item If $f$ is surjective, then $f_\ast f^{!} = \text{\rm id}_{J_\ast}$.
\end{enumerate}
\end{prop}
\begin{proof}
\noindent
(1):\quad Let $a$ be in $J_\ast$. Since $f$ is a homomorphism, then so is $f_\ast$, and therefore
$f_\ast f^{!}(a) = f_\ast (\bigvee_{I_\ast} (f_\ast \leq a))$ is a sup of
$\{f_\ast (x)\ |\ x\in ( f_\ast \leq a)\}$ in $J_{\ast}$. The desired relation,
$f_\ast f^{!}(a)\leq a$, is then
a consequence of the inclusion
$\{f_\ast (x)\ |\ x\in ( f_\ast \leq a)\}\subset (J_\ast\leq a)$.
\noindent
(2):\quad By (1), $f_\ast f^!(a)\leq a$, and hence $f^!(a)$ is in $f_\ast\leq a$.
The element $f^!(a)$ is the global maximum of $f_\ast\leq a$
since it belongs to it and it is its coproduct.
\noindent
(3):\quad
The relation $ f^{!}\leq f^{!}f_\ast f^{!}$ holds in general and does not require $f$ to be a homomorphism. The reverse relation
$f^{!}f_\ast f^{!}\leq f^{!}$ follows from (1) and the fact that $f^{!}$ is a functor (see~\ref{koikik}).
\noindent
(4):\quad
The relation $\text{id}_{I_\ast}\leq f^{!}f_\ast $ and $f_\ast$ being a functor imply $f_\ast\leq f_\ast f^{!}f_\ast$.
The reverse relation $f_\ast f^{!}f_\ast \leq f_\ast$ is a particular case of (1).
\noindent
(5):\quad
The inclusion $(f_\ast \leq f_\ast f^!(a))\subset (f_\ast \leq a)$
follows from (1). If $x$ in $I_\ast$ is such that $f_{\ast}(x)\leq a$, then, since $f^!$ and $f_\ast$ are homomorphisms, $f_\ast f^!f_{\ast}(x)\leq f_\ast f^!(a)$.
Statement (4) implies therefore $f_{\ast}(x)\leq f_\ast f^!(a)$.
This shows $(f_\ast \leq a)\subset (f_\ast \leq f_\ast f^!(a))$ and consequently $(f_\ast \leq a)= (f_\ast \leq f_\ast f^!(a))$, proving the first part of the statement.
According to (2), if $(f\leq a)=(f\leq b)$, then $f^!(a)$ belongs to $f\leq b$,
and consequently $f_\ast f^!(a)\leq b$, showing the minimality of $f_\ast f^!(a)$.
\noindent
(6):\quad If $(f\leq a)=(f\leq b)$, then $(f_\ast\leq a)=(f_\ast\leq b)$ and hence the coproducts
$f^!(a)=\bigvee_I(f_\ast\leq a)$ and $f^!(b)=\bigvee_I(f_\ast\leq b)$
are equal. If $f^!(a)=f^!(b)$, then $f_\ast f^!(a)=f_\ast f^!(b)$
and consequently $(f_\ast\leq a)=(f_\ast\leq f_\ast f^!(a))$ and
$(f_\ast\leq b)=(f_\ast\leq f_\ast f^!(b))$ are also equal, which implies
$(f\leq a)=(f\leq b)$.
\noindent
(7, 8):\quad These two statements are direct consequences of (4).
\end{proof}
We can use Proposition~\ref{asfadfhgfgmn} to interpret
the transfer of a homomorphism of
finite type as a localization which is a process
involving inverting morphisms.
\begin{cor}\leftarrowbel{asdagsdfhjjk}
Let $f\colon I\to J$ be a homomorphism of finite type between an upper semilattice $I$ and a poset $J$, and
$\mathcal{C}$ a category with an initial object.
Then the following statements about a functor
$F\colon J_\ast\to \mathcal{C}$ are equivalent:
\begin{itemize}
\item for all $a$ in $J$, the morphism $F(f_\ast f^!(a)\leq a)$ is an isomorphism ($F$ inverts morphisms of the form $f_\ast f^!(a)\leq a$);
\item there is a functor $G\colon I_\ast\to\mathcal{C}$ for which $F$ is isomorphic to $G f^!$.
\end{itemize}
\end{cor}
\begin{proof}
Assume $F(f_\ast f^!(a)\leq a)$ is an isomorphism for all $a$ in $J$. The natural transformation $(Ff_\ast)f^!\to F$, given by the morphisms $\{F(f_\ast f^!(a)\leq a)\}_{a\in J}$, is therefore an isomorphism. In this case we could take $Ff_\ast$ for $G$.
Assume $F$ is isomorphic to $Gf^!$. Since $f^!f_\ast f^!=f^!$ (see~\ref{asfadfhgfgmn}.(3)), for all $a$ in $J$, the morphism $Gf^!(f_\ast f^!(a)\leq a)$ is the identity and hence an isomorphism. The same is therefore true for $F(f_\ast f^!(a)\leq a)$.
\end{proof}
The transfer is convenient for constructing certain left adjoints
even in cases when colimits cannot be performed.
\begin{prop}\leftarrowbel{sdgdfhfh}
Let $\mathcal{C}$ be a category with an initial object and $f\colon I\to J$ a homomorphism of finite type between
an upper semilattice $I$ and a poset $J$. Then the functor $(-)^{f^{!}}\colon
\text{\rm Fun}_{\ast}(I_\ast,\mathcal{C})\to \text{\rm Fun}_{\ast}(J_\ast,\mathcal{C})$ is left adjoint to
$(-)^{f_\ast}\colon \text{\rm Fun}_{\ast}(J_\ast,\mathcal{C})\to \text{\rm Fun}_{\ast}(I_\ast,\mathcal{C})$.
\end{prop}
\begin{proof}
Let $G\colon I_\ast\to \mathcal{C}$ and $F\colon J_\ast\to \mathcal{C}$ be unital functors.
We need to show there is a bijection, functorial in $G$ and $F$, between the sets of natural transformations $\text{Nat}_{J_\ast}(Gf^{!},F)$ and $\text{Nat}_{I_\ast}(G,Ff_\ast)$. For a natural transformation $\phi=\{\phi_a\colon Gf^{!}(a)\to F(a)\}_{a\in J_\ast}$ between $Gf^{!}$ and $F$
in $\text{Fun}_{\ast}(J_\ast, \mathcal{C})$, define:
\[\overline{\phi} :=\left\{\begin{tikzcd}[column sep = 5em]
G(x)\ar{r}{G(x\leq f^{!}f_\ast (x))} & Gf^{!}f_\ast (x)\ar{r}{\phi_{f_\ast (x)}} & Ff_\ast (x)
\end{tikzcd}\right\}_{x\in I_\ast}\]
Then $\overline{\phi} $ is a natural transformation between $G$ and $Ff_\ast$ in $\text{Fun}_{\ast}(I_\ast, \mathcal{C})$.
For a natural transformation $\psi=\{\psi_x\colon G(x)\to Ff_\ast (x)\}_{x\in I_\ast}$ between $G$ and $Ff_\ast$
in $\text{Fun}_{\ast}(I_\ast, \mathcal{C})$, define:
\[\widehat{\psi} :=\left\{\begin{tikzcd}[column sep = 5em]
Gf^{!}(a) \ar{r}{\psi_{f^{!}(a)}} & Ff_\ast f^{!}(a) \ar{r}{F(f_\ast f^{!}(a)\leq a)} & F(a)
\end{tikzcd}\right\}_{a\in J_\ast}\]
Then $\widehat{\psi} $ is a natural transformation between $Gf^{!}$ and $F$ in $\text{Fun}_{\ast}(J_\ast, \mathcal{C})$.
Statements (2) and (3) of Proposition~\ref{asfadfhgfgmn} imply $\widehat{\overline{\phi}} = \phi$ and $\overline{\widehat{\psi}} =\psi$.
The assignments $\phi\mapsto \overline{\phi}$ and $\psi\mapsto \widehat{\psi} $ are therefore inverse bijections.
\end{proof}
Recall that there is a commutative square of functors where the vertical functors are isomorphism of categories (see~\ref{sdgsdgjdghkfhjk}):
\[\begin{tikzcd}[column sep = 5em]
\text{Fun}(J,\mathcal{C})\ar{r}{(-)^f}
\ar{d}[swap]{-_\ast}& \text{Fun}(I,\mathcal{C})\ar{d}{-_\ast}\\
\text{\rm Fun}_{\ast}(J_\ast,\mathcal{C}) \ar{r}{(-)^{f_\ast}} & \text{\rm Fun}_{\ast}(I_\ast,\mathcal{C})
\end{tikzcd}\]
Using these vertical
isomorphisms, Proposition~\ref{sdgdfhfh} can be rephrased as:
\begin{cor}\leftarrowbel{afdsfgjjk}
Let $\mathcal{C}$ be a category with an initial object and $f\colon I\to J$ a homomorphism of finite type between
an upper semilattice $I$ and a poset $J$. Then
$(-)^f\colon \text{\rm Fun}(J,\mathcal{C})\to \text{\rm Fun}(I,\mathcal{C})$ has a left adjoint given by
$(-)^{f^{!}}$.
\end{cor}
\begin{point}\leftarrowbel{sdfadfhfghdfgj}
The left adjoint to $(-)^f\colon \text{\rm Fun}(J,\mathcal{C})\to \text{\rm Fun}(I,\mathcal{C})$
is also called the \textbf{left Kan extension} along $f$ (see~\ref{werqergdfghjg}).
Typically such left adjoints are constructed by performing colimits in $\mathcal{C}$.
However, according to~\ref{afdsfgjjk}, when $\mathcal{C}$ has an initial object $e$ and $f\colon I\to J$ is a homomorphism of finite type between
an upper semilattice $I$ and a poset $J$, the left Kan extension $f^kG\colon J\to \mathcal{C}$ of $G\colon I\to C$ along $f$ can be constructed explicitly using the transfer
(see also~\cite[Proposition 5.6]{Botnan2020ART}):
\[f^kG(a) \text{ is isomorphic to } \begin{cases}
e &\text{ if } (f\leq a)= \emptyset\\
G(\bigvee_I(f\leq a)) & \text{ if } (f\leq a)\not=\emptyset
\end{cases}
\]
This explicit description of the left Kan extension $f^kG$ has consequences which are generally not true for arbitrary left Kan extensions:
\end{point}
\begin{prop}\leftarrowbel{aSFEATGERYUU}
Let $f\colon I\to J$ be a homomorphism of finite type between
an upper semilattice $I$ and a poset $J$, and $\mathcal{C}$ a category with an initial object.
\begin{enumerate}
\item If $f$ is injective, then the morphism $G\to (f^{k}G)f$, adjoint to the identity $\text{\rm id}\colon f^kG\to f^kG$,
is an isomorphism for every functor $G\colon I\to \mathcal{C}$.
\item If $f$ is surjective, then the morphism $f^{k}(Ff)\to F$, adjoint to the identity $\text{\rm id}\colon Ff\to Ff$,
is an isomorphism for every functor $F\colon J\to \mathcal{C}$.
\item If $\mathcal{C}$ is closed under taking (finite) limits, then the left Kan extension functor $f^k\colon \text{\rm Fun}(I,\mathcal{C})\to \text{\rm Fun}(J,\mathcal{C})$
commutes with (finite) limits.
\end{enumerate}
\end{prop}
\begin{proof}
Statement (1) follows from~\ref{asfadfhgfgmn}.(7), and (2) from~\ref{asfadfhgfgmn}.(8).
To prove (3), consider a functor
$\phi\colon D \to \text{\rm Fun}(I,\mathcal{C})$
indexed by a (finite) small category $D$. Since $\mathcal{C}$ is closed under taking (finite) limits, then so is
$\text{\rm Fun}(I,\mathcal{C})$.
According to the above description of the left Kan extension and the fact that the limits in $\text{\rm Fun}(I,\mathcal{C})$ are constructed objectwise:
\begin{align*} f^k(\text{lim}_D\phi)(a) & \text{ is isomorphic to }\begin{cases}
e &\text{ if } (f\leq a)= \emptyset\\
(\text{lim}_D\phi)(\bigvee_I (f\leq a)) & \text{ if } (f\leq a)\not=\emptyset
\end{cases}\\
\text{lim}_D (f^k\phi)(a) &\text{ is isomorphic to }
\begin{cases}
e &\text{ if } (f\leq a)= \emptyset\\
\text{lim}_D(\phi(\bigvee_I(f\leq a)) ) & \text{ if } (f\leq a)\not=\emptyset
\end{cases}
\end{align*}
from which it follows that $ \text{lim}_D (f^k\phi)$ and $ f^k(\text{lim}_D\phi)$ are isomorphic.
\end{proof}
Here is a characterisation, based on Corollary~\ref{asdagsdfhjjk}, of functors isomorphic to left Kan extensions along a homomorphism of finite type:
\begin{cor}\leftarrowbel{dsgsdghf}
Let $f\colon I\to J$ be a homomorphism of finite type between
an upper semilattice $I$ and a poset $J$, and $\mathcal{C}$ a category with an initial object $e$.
Then the following statements about a functor
$F\colon J\to \mathcal{C}$ are equivalent:
\begin{itemize}
\item for all $a$ in $J$,
if $(f\leq a)\not=\emptyset$, then
$F(f(\bigvee_I(f\leq a))\leq a)$ is an isomorphism, and if
$(f\leq a)=\emptyset$, then
$F(a) $ is isomorphic to $e$;
\item there is a functor $G\colon I\to\mathcal{C}$ for which
$F$ is isomorphic to $f^k G$;
\item $F$ is isomorphic to $f^k(Ff)$;
\item there is a unital functor $G\colon I_\ast\to\mathcal{C}$
for which $F$ is isomorphic to the composition
$\begin{tikzcd}
J\ar[hook]{r} & J_\ast\ar{r}{f^!} &
I_\ast\ar{r}{G} & \mathcal{C}.
\end{tikzcd}$
\end{itemize}
\end{cor}
\begin{point}
Recall that every function $f\colon I\to J$ leads to a functor $f\!\!\leq\colon J\to J[f]$, where $J[f]\subset 2^I$ is the subposet consisting of
subsets of the form $(f\leq a)\subset I$ for $a$ in $J$, and $f\!\!\leq$ maps $a$ to $f\leq a$ (see~\ref{dadgdfhg}).
Since $f\!\!\leq $ is a functor, its fibers
$(f\!\!\leq)^{-1}(f\leq a)\subset J$, for $a$ in $J$, are convex
subposets of $J$ (see~\ref{dadgdfhg}).
As noted in~\ref{werqergdfghjg}, if $f$ is a
functor of finite type and $\mathcal{C}$ is a category closed under finite colimits, then, for every $G\colon I\to \mathcal{C}$,
the left Kan extension $f^k G\colon J\to \mathcal{C}$ is isomorphic to a functor that factors through
$f\!\!\leq \colon J\to J[f]$. This implies that the restrictions of $f^k G$ to the fibers $(f\!\!\leq)^{-1}(f\leq a)\subset J$ of $f\!\!\leq$, for $a$ in $J$, are isomorphic to constant functors.
In general these fibers, although being convex, can still be rather complex subposets
of $J$.
Assume $f\colon I\to J$ is a homomorphism of finite type between an upper semilattice $I$ and a poset $J$. In this case, the only assumption we need to make about $\mathcal{C}$ is that it has an initial object $e$.
Under these assumptions the fibers of $f\!\!\leq$ can be described using the transfer. Both functors,
the transfer $f^!\colon J_\ast\to I_\ast$
and $f\!\!\leq\colon J\to J[f]$, fit into the following commutative diagram:
\[ \begin{tikzcd}
I\ar{r}{f} & J\ar{r}{f\leq}\ar[hook]{d} & J[f]\ar{d}{V}\\
& J_\ast\ar{r}{f^!} & I_\ast
\end{tikzcd}\]
where $V(f\leq a):= f^!(a)=\bigvee_{I_\ast}(f_{\ast}\leq a)$. According to Proposition~\ref{asfadfhgfgmn}.(6), $V\colon J[f]\to I_\ast$ is a monomorphism. Since
$f\!\!\leq$ is a surjection, $V$ is a poset isomorphism between $J[f]$ and the image $f^!(J)\subset I_\ast$.
Moreover, the fibers of $f\!\!\leq$ can be expressed using the fibers of the transfer:
\[(f\!\!\leq)^{-1}(I\leq a)=
\begin{cases}
(f^!)^{-1}(-\infty)\cap J & \text{ if } (I\leq a)=\emptyset\\
(f^!)^{-1}(f^!(a)) & \text{ if } (I\leq a)\not=\emptyset
\end{cases}\]
Thus, if $(I\leq a)\not=\emptyset$, then, according to~\ref{asfadfhgfgmn}.(5), the fiber
$(f\!\!\leq)^{-1}(I\leq a)=(f^!)^{-1}(f^!(a))$
is a poset with a global minimum given by $f(\bigvee_I(f\leq a))$.
In this case, for every $G\colon I\to \mathcal{C}$, the left Kan extension $f^kG\colon J\to \mathcal{C}$ is constant on
subposets of $J$ that are not only convex but also have global minima.
\end{point}
\begin{point}\leftarrowbel{wertgf}
Assume $I$ is a consistent upper semilattice of finite type. According to~\ref{sdfgfds}, its realisation $\mathcal{R}(I)$ is also an upper semilattice. Choose an
element $d$ in $I$ and a finite subset $V\subset (-1,0)$. According to Corollary~\ref{asfhgfgkkl}, the inclusion $\alpha\colon\mathcal{R}_{I\leq d}(I, V)\subset \mathcal{R}(I)$ is a sublattice
(see~\ref{afsgsdfhdfgjhdhgj}). It consists of these pairs $(a,f)$ for which $a\leq d$, and non-zero values of $f$ are in $V$.
We are going to describe the transfer $\alpha^{!}\colon \mathcal{R}(I)_{\ast}\to \mathcal{R}_{I\leq d}(I, V)_{\ast}$.
Let $(a,f)$ be in $\mathcal{R}(I)$. By definition (see~\ref{sDGSDFHFGJ}):
\[\alpha^{!}(a,f)=\begin{cases}
-\infty & \text{ if } \mathcal{R}_{I\leq d}(I, V)\leq (a,f)\text{ is empty}\\
\bigvee \left( \mathcal{R}_{I\leq d}(I, V))\leq (a,f)\right) & \text{ otherwise }
\end{cases}\]
Thus, to describe $\alpha^{!}(a,f)$, the set
$\mathcal{R}_{I\leq d}(I, V)\leq (a,f)$ needs to be discussed.
We claim that it is non-empty if and only if $d$ and $\text{supp}(f)$ have a common ancestor. Here is a proof.
If there is
$(b,g)$ in $\mathcal{R}_{I\leq d}(I, V)$ for which $(b,g)\leq (a,f)$, then,
according to Proposition~\ref{asdgsfghg}, any ancestor of $\text{supp}(g)$ is also
an ancestor of $\text{supp}(f)$. The relation $b\leq d$
implies therefore that any common ancestor
of $b$ and $\text{supp}(g)$ is also a common ancestor of $d$ and $\text{supp}(f)$.
Assume $d$ and $\text{supp}(f)$ have a common ancestor.
If $V$ is empty, set $v_0:=0$, if $V$ is non-empty, set
$v_0$ to be the minimal element in $V$. Define
$S:=\{x\in \mathcal{P}(a)\ |\ f(x)< v_0\}$. Since $S$ is a subset of $\text{supp}(f)$, it has a common ancestor with $d$, and consequently
the following definition of an element $c$ in $I$ makes sense as the necessary products exist due to $I$ being an upper semilattice of finite type:
\[c :=\begin{cases}
(\bigwedge S)\wedge d & \text{if }S\neq \emptyset\\
a\wedge d & \text{if }S=\emptyset
\end{cases}
\]
Any ancestor of $\{d,\text{supp}(f),a\}$ is also an ancestor of $c$.
Let $y$ be a parent of $c$. Consider the subset $(y\leq \mathcal{P}(a))\subset \mathcal{P}(a)$.
There are two possibilities, either $c$ is an ancestor of the set $y\leq \mathcal{P}(a)$ or not.
In the second case, for every $x$ in $(y\leq \mathcal{P}(a))\setminus (c\leq I)$, two things happen:
first $f(x)\geq v_0$ ($x$ does not belong to $S$) and second
there is an equality $y = c\wedge x$. We use the first inequality to
define a function $h\colon\mathcal{P}(c)\to (-1,0] $ by the following formula:
\[
h(y) :=\begin{cases}
0 & \hspace{-37mm} \text{if $c$ is an ancestor of } y\le \mathcal{P}(a)\\
\text{max}\{v\in V\ |\ v\leq f\left((y\leq\mathcal{P}(a))\setminus (c\le I)\right) \} & \text{otherwise }
\end{cases}
\]
According to the above formula,
if $h(y)<0$, then there is $x$ in
$\mathcal{P}(a)$ for which $y=c\wedge x$ and $f(x)<0$. Any common ancestor of $c$ and $\text{supp}(f)$ is therefore also an ancestor of $y$. As this happens for every $y$ for which
$h(y)<0$,
the support $\text{supp}(h)$ has an ancestor, and hence $(c,h)$ belongs to
the realisation $\mathcal{R}(I)$.
Since $h$ has values in $V,$ the element $(c,h)$ belongs to $\mathcal{R}_{I\leq d}(I, V)$.
Consider the translation $T_{c\leq a}h$. Let $x$ be a parent of $a$ for which $f(x)<0$.
Consistency of $I$ and the fact $c$ and $\text{supp}(f)$ have a common ancestor, imply that the product $c\wedge x$ exists and is either $c$ or is a parent of $c$.
If $c\leq x$,
then $T_{c\leq a}h(x)=-1\leq f(x)$, and, if $c\wedge x$ is a parent of $c$, then
$T_{c\leq c}h(x)= h(c\wedge x)\leq f(x)$ by the formula defining $h$. These relations imply
$(c,h)\leq (a,f)$, and hence
the set $\mathcal{R}_{I\leq d}(I, V)\leq (a,f)$ is non-empty.
We are going to prove $(c,h)= \bigvee \left( \mathcal{R}_{I\leq d}(I, V))\leq (a,f)\right)$,
which gives:
\[\alpha^{!}(a,f)=(c,h)\]
Let $(b,g)$ be in $\mathcal{R}_{I\leq d}(I, V)\leq (a,f)$. We need to show $(b,g)\leq (c,h)$. The relation $b\leq c$ holds since $b$ is an ancestor of $\{d, \text{supp}(f), a\}$.
Consider
$T_{b\leq c}g$. Let $y$ be a parent of $c$ for which $h(y)<0$.
Then there is $x$ in $\mathcal{P}(a)$ such that $y=c\wedge x$ and
$f(x)<0$. By the assumption $T_{b\leq c}g(y)\leq f(x)$. Since the values of
$T_{b\leq c}g$ are in $V$, then $T_{b\leq c}g(y)\leq h(y)$, which gives $(b,g)\leq (c,h)$.
Consider $V=\{-0.5\}$ and $d=(2,2)$. Recall that the subposet inclusion $\alpha\colon \mathcal{R}_{\mathbb{N}^2\le d}(\mathbb{N}^2,V)\hookrightarrow \mathcal{R}(\mathbb{N}^2)$ can be identified with $0.5[2]^2\subset [0,\infty)^2$ (see~\ref{sdgdfhkjkl}).
Figure~\ref{transfer} illustrates the effect of the transfer of
$\alpha$ via this identification.
\begin{figure}
\caption{The intersections of the solid lines correspond to points in $\mathbb{N}
\end{figure}
\end{point}
\section{Tame functors}
\begin{Def}\leftarrowbel{sdfubbeoru}
Let $\mathcal{C}$ be category and $J$ a poset.
A functor $F\colon J\to \mathcal{C}$ is called \textbf{tame} or \textbf{discretisable}
if there is a finite poset $I$ and functors $f\colon I\to J$ and $G\colon I\to\mathcal{C}$ for which $F$ is isomorphic to the left Kan extension $f^kG$ of $G$ along $f$ (see~\ref{werqergdfghjg}), in which case $F$ is also said to be \textbf{discretised} by $f$, and $G$ is called a \textbf{discretisation} of $F$.
The symbol $\text{Tame}(J, \mathcal{C})$ denotes the full subcategory of $\text{Fun}(J, \mathcal{C})$
whose objects are tame functors.
\end{Def}
If $J$ is a finite poset, then any functor $F\colon J\to \mathcal{C}$ is tame. The notion of tameness is therefore restrictive and meaningful only in case $J$ is
infinite.
To determine if $F\colon J\to \mathcal{C}$ is tame we need to look for two things: a functor $f\colon I\to J$ that discretises $F$ and a discretisation $G\colon I\to \mathcal{C}$ of $F$.
How can we search for a discretising $f$? In many situations it is enough to consider only finite subposet inclusions $I\subset J$. This is the case if for example $\mathcal{C}$ is closed under finite colimits. For example, consider $\mathcal{C}$ the category of vector spaces over a given field. The closure of $\mathcal{C}$ under finite colimits guarantees the existence of the left Kan extension $f^k\colon \text{Fun}(I,\mathcal{C})\to \text{Fun}(J,\mathcal{C})$ for every functor $f\colon I\to J$ with finite $I$. This has the following consequences:
\begin{prop} \leftarrowbel{asdsfgadfshfgjh}
Let $\mathcal{C}$ be a category closed under finite colimits and $J$ a poset.
\begin{enumerate}
\item Consider the following commutative diagram of poset functors with $I_0$ and $I_1$ finite: \[\begin{tikzcd}
I_0\ar{rr}{g}\ar[bend right]{dr}{f_0} & & I_1\ar[bend left]{dl}[swap]{f_1} \\
& J
\end{tikzcd}\]
If $F\colon J\to\mathcal{C}$ is discretised by $f_0$, then it is also discretised by $f_1$.
\item If $F\colon J\to\mathcal{C}$ is discretised by $f\colon I\to J$, then
it is also discretised by every subposet $I'\subset J$ for which $f(I)\subset I'$.
\item Let $F_1,\ldots, F_n\colon J\to \mathcal{C}$ be a finite sequence of tame functors. Then there is a finite subposet $I\subset J$
that discretises $F_i$ for all $i$.
\item Let $L$ a finite category, and $\phi\colon L \to \text{\rm Fun}(J,\mathcal{C})$ a functor. Assume
$\phi(l)$ is tame for every object $l$ in $L$. Then the functor $\text{\rm colim}_L\phi$ is also tame.
\end{enumerate}
\end{prop}
\begin{proof}
\noindent (1):\quad As long as they exist, Kan extensions commute with compositions:
$f_0^k$ and $f_1^kg^k$ are naturally isomorphic. Thus if $F$ is isomorphic to
$f_0^kG$ (is discretised by $f_0$), then it is also isomorphic to $f_1^k(g^kG)$.
\noindent (2):\quad It is a direct consequence of (1).
\noindent (3):\quad Let $I_i\subset J$ be a finite subposet discretising $F_i$. Then,
according to (2),
$\cup_{i=1}^{n}I_i\subset I$ discretises $F_i$, for every $i$.
\noindent (4):\quad
Use (3) to choose a finite subposet $f\colon I\subset J$ that discretises $\phi(l)$ for every $l$ in $L$
(see~\ref{sdgfgjhdgh}.(3)).
Since $f$ is an injection, the natural transformation $f^k (\phi f)\to \phi $, adjoint to the identity $\text{id}\colon \phi f\to \phi f$, is an isomorphism (see the end of~\ref{werqergdfghjg}). Consequently $\text{\rm colim}_L\phi$ is isomorphic to $\text{\rm colim}_Lf^k (\phi f)$. Kan extensions commute with colimits and hence we can conclude
$\text{\rm colim}_L\phi$ is isomorphic to
$f^k\text{\rm colim}_L (\phi f)$ proving its tameness.
\end{proof}
According to Proposition~\ref{asdsfgadfshfgjh}, if $\mathcal{C}$ is closed under finite colimits, the search for discretising functors can be
restricted to finite subposet inclusions $I\subset J$, in which case
a discretisation is given by restricting to $I$ (see~\ref{werqergdfghjg}).
That is a considerable simplification.
For example, a tame functor indexed by a realisation $\mathcal{R}(I)$ with values in a category closed under finite colimits can always be discretised by a subposet of the form $\mathcal{R}_D(I,V)$, for some finite $D\subset I$ and a finite $V\subset (-1,0)$ (see~\ref{sdgdfhkjkl}).
However not all the categories $\mathcal{C}$ for which we would like to have a simpler way of verifying tameness
are closed under finite colimits. For example, the category of simplicial complexes fails to have this property.
In such a case Corollary~\ref{dsgsdghf} can be used where the only assumption on $\mathcal{C}$ is that it has an initial object $e$. It gives a characterisation of functors which are discretised by a homomorphism $f\colon I\to J$ out of a finite upper semilattice $I$. According to this corollary, a functor
$F\colon J\to\mathcal{C}$ is discretised by
$f$ if and only if, for every $a$ in $J$, if $(f\leq a)\not=\emptyset$, then $F(f(\bigvee_I(f\leq a))\leq a)$ is an isomorphism, and if
$(f\leq a)=\emptyset$, then $F(a) $ is isomorphic to $e$.
For an arbitrary $J$, not all tame functors are discretised by homomorphisms. To make sure all tame functors are discretised by a homomorphism,
$J$ itself needs to be an upper semilattice.
The rest of this section is devoted to discussing tameness under this assumption on $J$ and when
$\mathcal{C}$ is a category with an initial object $e$.
We start with a statement analogous to Proposition~\ref{asdsfgadfshfgjh}:
\begin{prop}\leftarrowbel{sdgfgjhdgh}
Let $J$ be an upper semilattice and $\mathcal{C}$ a category with an initial object.
\begin{enumerate}
\item
A functor $F\colon J\to \mathcal{C}$ is tame if and only if it is
discretised by a finite sublattice $I\subset J$ (see~\ref{afsgsdfhdfgjhdhgj}).
\item Let $I_0\subset I_1\subset J$ be finite sublattices of $J$. If $F\colon J\to \mathcal{C}$ is discretised by $I_0\subset J$, then it is also discretised by $I_1\subset J$.
\item Let $F_1,\ldots, F_n\colon J\to \mathcal{C}$ be a finite sequence of tame functors. Then there is a finite sublattice $I\subset J$
that discretises $F_i$ for all $i$.
\end{enumerate}
\end{prop}
\begin{proof}
\noindent
(1):\quad
Let $F\colon J\to\mathcal{C}$ be tame. Chose a finite poset $I'$ and functors $f\colon I'\to J$ and $G\colon I'\to \mathcal{C}$ for which there is an isomorphism
$f^kG\to F$. Let $\phi\colon G\to Ff$ be the natural transformation adjoint to this isomorphism. Since $f(I')\subset J$ is finite, the sublattice generated by this image $I:=\leftarrowngle f(I')\rightarrowngle\subset J$ is also finite (see~\ref{afsgsdfhdfgjhdhgj}).
Let us denoted the inclusion $I'\subset J$ by $g$ and by $h\colon I\to I'$ the functor
that maps $x$ to $f(x)$.
Since $g$ is a homomorphism of finite type between upper semilattices, the left Kan extension $g^k\colon \text{Fun}(I,\mathcal{C})\to \text{Fun}(J,\mathcal{C})$
exists (see Section~\ref{asfsdfhfhgjmd}). We claim the natural transformation $g^k(Fg)\to F$, adjoint to the identity $\text{id}\colon Fg\to Fg$, is an isomorphism. This would mean $F$ is
discretised by the finite sublattice $I\subset J$ proving the lemma. According to Proposition~\ref{aSFEATGERYUU}.(1), we have
$g^k(Fg)f=g^k(Fg)gh=Fgh=Ff$. Thus the natural transformation $F\to g^k(Fg)$,
whose adjoint is $\phi\colon G\to Ff =g^k(Fg)f$, is the inverse to
$g^k(Fg)\to F$.
\noindent
(2):\quad Exactly the same argument as in~\ref{asdsfgadfshfgjh}.(2) can be used to prove this statement since
the left Kan extensions along $I_0\subset I_1$, $I_0\subset J$, and $I_1\subset J$ exist by~\ref{afdsfgjjk}.
\noindent
(3):\quad
For every $i$ choose a finite sublattice $ I_i\subset J$ discretising $F_i$.
Define $I:=\leftarrowngle I_1\cup\cdots \cup I_n\rightarrowngle\subset Q$ (see~\ref{afsgsdfhdfgjhdhgj}).
Since $I$ is finite, according to (2), $F_i$ is discretised by the inclusion $I\subset J$ for every $i$.
\end{proof}
\begin{point}
For example, let $J$ be an upper semilattice of finite type.
Then for every finite subset $I\subset J$, there is $b$ (for example $\bigvee_JI$) for which $I\subset (J\leq b)\subset J$. Consequently, according to Proposition~\ref{sdgfgjhdgh},
a functor $F\colon J\to \mathcal{C}$ is tame if and only if
it is discretised by the sublattice $(J\leq b)\subset J$, for some $b$ in $J$. Thus, according to Corollary~\ref{dsgsdghf},
$F$ is tame if and only if, there is $b$ in $J$ such that, for every $a$ in $J$, if $(J\leq b)\cap(J\leq a)\not=\emptyset$, then
$F\left((\bigvee (J\leq b)\cap(J\leq a))\leq a\right)$ is an isomorphism, and if $(J\leq b)\cap(J\leq a)=\emptyset$, then $F(a)$ is isomorphic to $e$.
\end{point}
If $ \mathcal{C}$ is closed under taking finite colimits and limits, then so is the category
of all functors $\text{Fun}(J,\mathcal{C})$ for every poset $J$.
In this case the category $\text{Tame}(J,\mathcal{C})$ is also closed under finite colimits (see~\ref{asdsfgadfshfgjh}.(4)). In general, we do not know if
$\text{Tame}(J,\mathcal{C})$ is closed under finite limits.
A sufficient assumption to guarantee this is again $J$ being an upper semilattice.
\begin{prop}\leftarrowbel{afsadagdfhbg}
Let $J$ be an upper semilattice, $ \mathcal{C}$ a category closed under finite limits, $L$ a finite category, and $\phi\colon L \to \text{\rm Fun}(J,\mathcal{C})$ a functor. Assume
$\phi(l)$ is tame for every object $l$ in $L$. Then the functor $\text{\rm lim}_L\phi$ is also tame.
\end{prop}
\begin{proof}
Let $f\colon I\subset J$ be a finite sublattice that discretises $\phi(l)$ for every object $l$ in $L$
(see~\ref{sdgfgjhdgh}.(3)). Then the natural transformation $f^k (\phi f)\to \phi $, adjoint to the identity $\text{id}\colon \phi f\to \phi f$, is an isomorphism (see~\ref{dsgsdghf}). Consequently $\text{\rm lim}_L\phi$ is isomorphic to $\text{\rm lim}_Lf^k (\phi f)$ and hence,
by Proposition~\ref{aSFEATGERYUU}, it is also isomorphic to $f^k (\text{\rm lim}_L\phi f)$.
\end{proof}
According to~\ref{asdsfgadfshfgjh}.(4) and~\ref{afsadagdfhbg},
if $J$ is an upper semilattice and $ \mathcal{C}$ is closed under finite limits and colimits, then so is
the category of tame functors $\text{Tame}(J, \mathcal{C})$. Furthermore the inclusion $\text{Tame}(J, \mathcal{C})\subset \text{Fun}(J, \mathcal{C})$ preserves finite limits and colimits.
\section{Homotopy theory of tame functors}
\leftarrowbel{afasdfdghfdgjh}
Let $\mathcal{M}$ be a model category as defined in~\cite{MR1361887}. This means three classes of morphisms in $\mathcal{M}$ are chosen:
{\bf weak equivalences} ($\xrightarrow{\sim}$), {\bf fibrations} ($\twoheadrightarrow$), and {\bf cofibrations}
($\hookrightarrow$), which
are required to satisfy the following axioms:
\begin{enumerate}
\item[MC1] Finite limits and colimits exist in $\mathcal{M}$.
\item[MC2] If $f$ and $g$ are morphisms in $\mathcal{M}$ for which $g f$ is defined and if two of the three morphisms $f$, $g$, $gf$ are weak equivalences, then so is the third.
\item[MC3] The three classes of morphisms are
preserved by retracts.
\item[MC4] Consider a commutative square
in $\mathcal M$ consisting of the solid morphisms:
\[
\begin{tikzcd}[row sep=13pt]
X \arrow[hook']{d}[swap]{\alpha} \arrow[r]
& E\arrow[two heads]{d}{\beta}
\\
Y\arrow[r] \arrow[ru,dotted]
& B
\end{tikzcd}
\]
Then a morphism, depicted by the dotted arrow, making this diagram commutative, exists under either of the following two assumptions: (i) $\alpha$ is a cofibration and a weak equivalence and $\beta$ is a fibration, or
(ii) $\alpha$ is a cofibration and $\beta$ is a fibration and a weak equivalence.
\item[MC5] Every morphism in $\mathcal{M}$ can be factored in two ways:
(i) $ \beta\alpha$, where $\alpha$ is a cofibration and $\beta$ is a fibration and a weak equivalence, and
(ii) $\beta\alpha$, where $\alpha$ is a cofibration and a weak equivalence and $\beta$ is a fibration.
\end{enumerate}
In particular, the axiom MC1 guarantees the existence of the initial object $\text{colim}_{\emptyset}F$, denoted by $e$, and of the terminal object $\text{lim}_{\emptyset}F$, denoted by $\ast$.
An object $X$ in $\mathcal M$ is called {\bf cofibrant} if the morphism $e \to X$ is a cofibration.
If the morphism $X\to \ast$ is a fibration, then $X$ is called {\bf fibrant}.
Assume $I$ is a finite poset. Then the following choices of weak equivalences, fibrations, and cofibrations in
$\text{Fun}(I,\mathcal{M})$ is a model structure (see for example~\cite{reedy,MR1361887}).
A natural transformation $\varphi:F \to G$ in $\text{Fun}(I,\mathcal{M})$ is:
\begin{itemize}
\item a weak equivalence (resp.\@ a fibration) if, for all $a$ in $I$, the morphism $\varphi_a: F(a) \to G(a)$ is a weak equivalence (resp.\@ a fibration) in
$\mathcal{M}$;
\item a cofibration if, for all $a$ in $I$,
the morphism
\[\colim\left(\colim_{I<a}G \leftarrow \colim_{I<a} F \rightarrow F(a)\right)\to G(a),\]
induced by the following commutative diagram, is a cofibration in $\mathcal{M}$:
\[\begin{tikzcd}
\colim_{I<a}F \ar{r} \ar{d}[swap]{\colim_{I<a} \varphi} & F(a) \ar{d}{\varphi_a}\\
\colim_{I<a}G \ar{r} & G(a)
\end{tikzcd}\]
\end{itemize}
The described model structures on functors indexed by finite posets are compatible in the following sense. Let $f\colon I_0\to I_1$ be a functor between finite posets. If $\phi\colon F\to G$ is a cofibration (resp.\@ a cofibration and a weak equivalence)
in $\text{\rm Fun}(I_0,\mathcal{M})$, then so is $f^k\phi\colon f^kF\to f^kG$
in $\text{\rm Fun}(I_1,\mathcal{M})$.
This is a consequence of the universal property of left Kan extensions and the axiom (MC4).
However, left Kan extensions in general fail to preserve weak equivalences and fibrations (compare with Proposition~\ref{asdgadfsghjdhjghk}).
To verify if a natural transformation
in $\text{Fun}(I,\mathcal{M})$ is a cofibration, we need to perform colimits over
subposets $(I<a)\subset I$. In general, such subposets can be large and constructing colimits over them
may require performing a lot of identifications. When $I$ is an upper semilattice however,
we can be more efficient. For $a$ in a finite upper semilattice $I$, define a sublattice:
\[I_a:=\{\text{$\bigwedge$} S\ |\ S\subset \mathcal{P}(a) \text{ has an ancestor}\}\subset (I<a).\]
For all $b$ in $I<a$, the set $b\leq \mathcal{P}(a)$ is non-empty and $b\leq \bigwedge (b\leq \mathcal{P}(a))$.
The element $ \bigwedge (b\leq \mathcal{P}(a))$ is therefore the initial object in $b\leq I_a$ and
consequently $b\leq I_a$ is contractible (see~\cite{MR0365573}). As this happens for all $b$ in $I<a$, the inclusion $I_a\subset (I<a)$ is cofinal (see~\cite{MR1712872, MR0365573}), which proves:
\begin{prop}\leftarrowbel{dsdfgdafhrgyhj}
Let $I$ be a finite upper semilattice and $a$ its element. For every functor $F\colon (I<a)\to\mathcal{M}$, the morphism $\text{\rm colim}_{I_a}F\to \text{\rm colim}_{I<a}F$ is an isomorphism and
$\text{\rm hocolim}_{I_a}F\to \text{\rm hocolim}_{I<a}F$ is a weak equivalence.
\end{prop}
According to Proposition~\ref{dsdfgdafhrgyhj}, when $I$ is a finite upper semilattice, to verify if a natural transformation in $\text{Fun}(I,\mathcal{M})$ is a cofibration, we need only to perform colimits over the subposets $I_a\subset I$ for $a$ in $I$. How can such colimits be calculated? One way is to consider, for $a$ in $I$, the subposet of the the discrete cube
(see~\ref{afadfhfgsh}):
\[C_a:=\{S\subset \mathcal{P}(a)\ |\ \mathcal{P}(a)\setminus S\text{ has an ancestor}\}\subset 2^{\mathcal{P}(a)}\]
and the functor $\bigwedge^c\colon C_a\to I_a$ mapping $S$ to $\bigwedge (\mathcal{P}(a)\setminus S)$. Since, for every
$b$ in $I_a$, the subset $\mathcal{P}(a)\setminus(b\leq \mathcal{P}(a))\subset \mathcal{P}(a)$ is the initial object in the poset $b\leq \bigwedge^c$ (see~\ref{sdgsdgjdghkfhjk}), this poset is contractible. The functor
$\bigwedge^c$ is therefore cofinal. Thus, for every $F\colon I_a\to \mathcal{M}$, the morphism
$\text{colim}_{C_a}(F\bigwedge^c) \to \text{colim}_{I_a}F$ is an isomorphism and
$\text{hocolim}_{C_a}(F\bigwedge^c) \to \text{hocolim}_{I_a}F$ is a weak equivalence.
The assumption on the indexing poset being an upper semilattice is not only helpful in
verifying if a natural tranformation is a cofibration. It is also crucial in proving that
the following choices of weak equivalences, fibrations, and cofibrations in $\text{Tame}(J,\mathcal{M})$ satisfy the axioms of a model structure.
\begin{Def}\leftarrowbel{sdfsdgsfghjd}
Let $J$ be an upper semilattice and $\mathcal{M}$ a model category. A natural transformation $\phi\colon F\to G$ in $\text{Tame}(J,\mathcal{M})$ is called:
\begin{itemize}
\item a weak equivalence (resp. fibration) if, for all $a$ in $J$, the morphism $\phi_a\colon F(a)\to G(a)$ is
a weak equivalence (resp. fibration) in $\mathcal{M}$;
\item a cofibration if there is a finite subposet inclusion
$f\colon I\subset J$ discretising $F$ and $G$, and for which $\phi^f\colon Ff\to Gf$ is a cofibration in
$\text{Fun}(I,\mathcal{M})$.
\end{itemize}
\end{Def}
\begin{thm}\leftarrowbel{afadfhsfgh}
Let $J$ be an upper semilattice and $\mathcal{M}$ a model category. The choices described in Definition~\ref{sdfsdgsfghjd} satisfy the axioms of a model structure on $\text{\rm Tame}(J,\mathcal{M})$.
\end{thm}
Before we prove Theorem~\ref{afadfhsfgh}, we first show:
\begin{prop}\leftarrowbel{asdgadfsghjdhjghk}
Let $\mathcal{M}$ be a model category and $f\colon I\to J$ a homomorphism of finite type
between upper semilattices.
If a natural transformation $\phi\colon F\to G$ in $\text{\rm Fun}(I,\mathcal{M})$ is a
weak equivalence (resp.\@ fibration), then so is its left Kan extension $f^k\phi\colon
f^kF\to f^kG$ in $\text{\rm Fun}(J,\mathcal{M})$.
\end{prop}
\begin{proof}
Let $\phi\colon F\to G$ in $\text{\rm Fun}(I,\mathcal{M})$ be a weak equivalence (resp.\@ fibration).
Since $f$ is a finite-type homomorphism between upper semilattices,
the morphism $(f^k\phi)_a\colon
f^kF(a)\to f^kG(a)$ is isomorphic to either $\text{id}\colon e\to e$, if ($f\leq a)= \emptyset$,
or to $\phi_{\bigvee_{I}(f\leq a)}\colon F(\bigvee_{I}(f\leq a)) \to G(\bigvee_{I}(f\leq a)) $, if
$ (f\leq a)\not=\emptyset$ (see~\ref{sdfadfhfghdfgj}). As these morphisms are weak equivalences (resp.\@ fibrations), for all $a$, then so is the left Kan extension $f^k\phi$.
\end{proof}
\begin{proof}[Proof of Theorem~\ref{afadfhsfgh}]
Requirement MC1 follows from Proposition~\ref{afsadagdfhbg}.
MC2 and MC3 are clear since $\mathcal{M}$ satisfies them.
\noindent
MC4:\quad
Let $\alpha\colon F\hookrightarrow G$ be a cofibration in $\text{\rm Tame}(J,\mathcal{M})$.
Choose a finite sublattice $I \subset J$ and functor $f: I \to J$ that discretises both $F$ and $G$ and for which
$\alpha^f\colon Ff\to Gf$
is a cofibration in $\text{\rm Fun}(I,\mathcal{M})$. Assume $\alpha$ is part of a commutative square in $\text{\rm Tame}(J,\mathcal{M})$
depicted in Figure~\ref{adfhsgghjs} on the left, where $\beta$ is a fibration. By applying $(-)^f$ to this square we get a commutative square in $\text{\rm Fun}(I,\mathcal{M})$, depicted by the solid arrows square on the right in Figure~\ref{adfhsgghjs}, where as indicted the natural transformations are a cofibration and a fibration in $\text{\rm Fun}(I,\mathcal{M})$. If in addition either $\alpha$ or $\beta$ is a weak equivalence, then the lift, depicted by the dotted arrow in the right of Figure~\ref{adfhsgghjs}, exists
since $\text{\rm Fun}(I,\mathcal{M})$ is a model category.
By applying the left Kan extension $f^k$ to the square on the right in Figure~\ref{adfhsgghjs}
and comparing the result to the original square on the left in Figure~\ref{adfhsgghjs} we can form the commutative $3$-dimensional cube in Figure~\ref{cdfgdfagdfgdfvons} in $\text{\rm Tame}(J,\mathcal{M})$.
Since the natural transformation $f^k(Gf)\to G$ is an isomorphism, a desired lift exists
in the right square of Figure~\ref{adfhsgghjs} under the additional assumption that either $\alpha$ or $\beta$ is a weak equivalence.
\noindent
MC5:\quad
Let $\phi\colon F\to G$ be a natural transformation in $\text{\rm Tame}(J,\mathcal{M})$.
Choose a finite sublattice $f\colon I\subset J$ that discretises both $F$ and $G$.
Factor the natural transformation
$\phi^f\colon Ff\to Gf$ in $\text{\rm Fun}(I,\mathcal{M})$
as $\phi^f=\beta\alpha$ where $\alpha\colon Ff\to H$ is a cofibration, $\beta\colon H\to Gf$ is a fibration, and either $\alpha$ or $\beta$ is a weak equivalence.
By applying the left Kan extension $f^k$ to these factorisations we obtain a commutative diagram
in $\text{\rm Tame}(J,\mathcal{M})$ where the vertical natural transformations are isomorphisms:
\[\begin{tikzcd}
f^k(Ff)\ar{d}\ar{r}{f^k\alpha} & f^kH\ar{r}{f^k\beta} & f^k(Gf)\ar{d}\\
F\ar{rr}{\phi} & & G
\end{tikzcd}\]
Since $(f^k\alpha)^f$ is isomorphic to $\alpha$, it is a cofibration
in $\text{\rm Fun}(I,\mathcal{M})$ and consequently $f^k\alpha$ is a cofibration in
$\text{\rm Tame}(J,\mathcal{M})$. According to Proposition~\ref{asdgadfsghjdhjghk},
$f^k\beta$ is a fibration in $\text{\rm Tame}(J,\mathcal{M})$. The same proposition assures also that
if $\alpha$ (resp. $\beta$) is a weak equivalence, then so is $f^k\alpha$
(resp. $f^k\beta$). This gives the desired factorisation of $\phi$.
\end{proof}
\section{Betti diagrams of vector spaces valued functors.}\leftarrowbel{sdgadfhsgdb}
In this section we are going to discuss a standard strategy
of retrieving Betti diagrams of functors indexed by posets
with values in the category $\text{vect}_K$ of \textbf{finite dimensional} $K$-vector spaces, where $K$ is a chosen field.
If $J$ is a finite poset, then the language of model categories
can be used for phrasing homological properties of functors in $\text{Fun}(J,\text{vect}_K)$. This is because, in this case, the category $\text{ch}(\text{Fun}(J,\text{vect}_K))$ of \textbf{non-negative chain complexes}
of such functors, which can be identified with $\text{Fun}(J,\text{ch}(\text{vect}_K))$, has a natural model structure where weak equivalences are given by the homology isomorphisms. See Section~\ref{afasdfdghfdgjh} for a recollection of how such a model structure can be obtained.
It is however unlikely that the category $\text{Fun}(J,\text{ch}(\text{vect}_K))$ has a natural model structure
with same weak equivalences for a general infinite $J$.
The reason is the restriction to functors with values in finite dimensional $K$ vector spaces which might prevent the existence of the required factorisations. This restriction however is important as our primary interest is in circumstances when Betti diagrams can be defined and calculated. For that the finite dimensionality assumption is essential.
In Section~\ref{afasdfdghfdgjh} (see~\ref{afadfhsfgh}), it was also explained how the model structures on $\text{Fun}(I,\text{ch}(\text{vect}_K))$, for finite $I$, can be extended to $\text{Tame}(J,\text{ch}(\text{vect}_K))$
where $J$ is an arbitrary upper semilattice.
However, if $J$ is not finite and not an upper semilattice, then
$\text{Tame}(J,\text{ch}(\text{vect}_K))$ may even fail to be closed under finite limits,
preventing for example the existence of resolutions.
This means that for $J$ which is not finite, care needs to be exercised in order to be able to consider resolutions and Betti diagrams of functors indexed by $J$.
In this section we recall necessary foundations for defining Betti diagrams, and also present a strategy based on Koszul complexes to calculate them.
\begin{point}[\em Freeness]\leftarrowbel{dhjfgki}
Let $J$ be a poset with the poset relation denoted by $\leq$. Consider the poset $(J,=)$ with the trivial poset relation on the set $J$,
where two elements are related if and only if they are equal.
The identity function $\iota\colon (J,=)\to J$, mapping $a$ to $\iota(a)=a$,
is a functor. A functor $V\colon (J,=)\to \text{vect}_K$ is just a sequence $\{V_a\}_{a\in J}$ of finite dimensional $K$-vector spaces. The set $\{a\in J\ |\ V_a\not = 0\}$ is called the support of $V$ and is denoted by $\text{supp}(V)$.
A functor $F\colon J\to \text{vect}_K$ is called \textbf{free finitely generated } if it is isomorphic to the left Kan extension along $\iota\colon (J,=)\to J$ of some $V=\{V_a\}_{a\in J}$ whose support is finite.
Since all free functors considered in this article are finitely generated, we use the term \textbf{free}, without mentioning finite generation, to describe such functors.
The name free is justified by the universal property of the left Kan extension, which gives a linear isomorphism between $\text{Nat}_J(\iota^k V, H)$ and $\prod_{a\in J}\text{Hom}(V_a,H(a))$,
for every functor $H\colon J\to \text{vect}_K$. For example, for $b$ in $J$ and a finite dimensional vector space $U$, consider the simple functor
$U[b]\colon J\to \text{vect}_K$:
\[U[b](a):=\begin{cases}
0 &\text{ if } a\not = b\\
U &\text{ if } a = b
\end{cases}\]
Then the vector spaces $\text{Nat}_J(\iota^k V, U[b])$ and
$\text{Hom}(V_b,U)$ are isomorphic.
By varing $b$ in $J$ and taking $U$ to be $K$, we can conclude that free functors $\iota^k V$ and $\iota^k W$ are isomorphic if and only if
$V_a$ and $W_a$ are isomorphic for all $a$ in $J$. Thus, a free functor $F$ determines a unique sequence $\beta F=\{(\beta F)_a\}_{a\in J}$ of vector spaces whose support is finite, called the \textbf{Betti diagram} of $F$, for which $F$ is isomorphic to the left Kan extension $\iota^k (\beta F)$.
If $\text{supp}(\beta F)=\{a\}$, then
$F$ is called \textbf{homogeneous} and is also denoted by the symbol
$F(a)[a,-)$. The restriction of $F(a)[a,-)$
to the subposet $(a\leq J)\subset J$ is isomorphic to the constant functor with value $F(a)$.
Its restriction to $\{x\in J\ |\ a\not \leq x\}$
is isomorphic to the constant functor with value $0$.
If $F$ is free, then it is isomorphic to the direct sum
$\oplus_{a\in J}(\beta F)_a[a,-)$ and, consequently, $F$ is isomorphic to the left Kan extension of $\{(\beta F)_a\}_{a\in \text{supp}(\beta F)}$ along $(\text{supp}(\beta F),=)\hookrightarrow J$.
Thus, every free functor $F$ is tame and is discretised by the subposet inclusion $\text{supp}(\beta F)\subset J$. Moreover every collection $V=\{V_a\}_{a\in J}$ with finite support is the Betti diagram of a free functor.
If $G\colon I\to \text{vect}_K$ is free, then, for every functor
$f\colon I\to J$, the left Kan extension $f^kG\colon J\to \text{vect}_K$ is also free and $(\beta f^k G)_a=0$ if $f^{-1}(a)$ is empty, and $(\beta f^k G)_a$ is isomorphic to $\oplus_{b\in f^{-1}(a)}(\beta G)_b$, if $f^{-1}(a)$ is non-empty.
\end{point}
\begin{point}[\em Resolutions]
Let $J$ be a poset and $F\colon J\to \text{vect}_K$ a functor.
An exact sequence $P_{n-1}\to\cdots\to P_0\to F\to 0$ in $\text{Fun}(J,\text{vect}_K)$, with $P_i$ free for all $i$, is called an \textbf{$n$-resolution} of $F$. A $1$-resolution $P_0\to F\to 0$ is also called a \textbf{cover} of $F$. A $2$-resolution $P_1\to P_0\to F\to 0$ is also called a \textbf{presentation} of $F$. An infinite exact sequence $\cdots\to P_1\to P_0\to F\to 0$, with $P_i$ free for all $i$, is called an \textbf{$\infty$-resolution} of $F$.
An $n$-resolution of $F$ is also denoted, as a map of chain complexes, by $P\to F$, where $F$ is a chain complex concentrated in degree $0$, and either
$P=(P_{n-1}\to\cdots\to P_0)$ or
$P=(\cdots\to P_1\to P_0)$, depending if $n$ is finite.
A functor $F\colon J\to \text{vect}_K$ is called \textbf{$n$-resolvable} if it has an $n$-resolution. Functors which are $1$-resolvable are also called \textbf{finitely generated}.
Functors which are $2$-resolvable are also called \textbf{finitely presented}.
If the indexing poset $J$ is finite, then all functors are $\infty$-resolvable.
This is because all such functors are finitely generated, and hence by taking covers of successive kernels, an $\infty$-resolution can be constructed.
If $J$ is infinite, then not all $n$-resolvable functors have to be $(n+1)$-resolvable.
For example, let $J=[0,2)\coprod \{a,b\}\coprod (2,3]$, with $a$ and $b$ incomparable and
$x<a<y$ and $x<b<y$, for $x$ in $[0,2)$ and $y$ in $(2,3]$.
Consider the subposet $I:= \{1, a,b\}\subset J$ and a functor $G\colon I\to \text{\rm Vect}_K$ where both
$G(1<a)$ and $G(1<b)$ are given by the function $K\to 0$.
Let $F\colon J\to \text{\rm Vect}_K$ be the left Kan extension of $G$ along $I\subset J$.
Then $F$ is finitely presented ($2$-resolvable), however it is not
$3$-resolvable.
\end{point}
Here is a characterisation of finitely generated and presented functors.
\begin{prop}\leftarrowbel{sgdgjhf}
Let $J$ be a poset and $F\colon J\to \text{\rm vect}_K$ a functor.
\begin{enumerate}
\item $F$ is finitely generated if and only if
there is a finite poset $I$ and a functor $f\colon I\to J$ for which the natural transformation
$\mu\colon f^k(Ff)\to F$, adjoint to the identity $\text{\rm id}\colon Ff\to Ff$, is surjective.
\item $F$ is finitely presented if and only if it is tame, i.e., if and only if there is a finite poset $I$ and a functor $f\colon I\to J$ for which the natural transformation
$\mu\colon f^k(Ff)\to F$, adjoint to the identity $\text{\rm id}\colon Ff\to Ff$, is an isomorphism.
\end{enumerate}
\end{prop}
\begin{proof}
\noindent
(1):\quad Let $\pi\colon P_0\to F$ be a cover. Since $P_0$ is tame, there is
a functor $f\colon I\to J$, with finite $I$,
discretising $P_0$ (for example $I=\text{supp}(\beta P_0)\subset J$). The commutativity of the following square and the fact that left Kan extensions preserve surjections imply the surjectivity of $\mu$:
\[\begin{tikzcd}
f^k(P_0 f)\ar{r}\ar{d}[swap]{f^k (\pi^f)} & P_0\ar{d}{\pi}\\
f^k(Ff)\ar{r}{\mu} & F
\end{tikzcd}\]
Let $I$ be a finite poset, $f\colon I\to J $ a functor for which $\mu\colon f^k(Ff)\to F$ is surjective, and
$\pi\colon P_0\to Ff$ a cover.
The surjectivity of $\mu$ implies the surjectivity of the
following composition, which is then a cover of $F$:
\[\begin{tikzcd}
f^k(P_0)\ar{r}{f^k\pi}& f^k(Ff)\ar{r}{\mu} & F
\end{tikzcd}\]
\noindent(2):\quad
If $P_1\to P_0\to F\to 0$ is a $2$-resolution, then $\text{colim}(0\leftarrow P_1\to P_0)$ is isomorphic to $F$. Since tameness is preserved by finite colimits (see~\ref{asdsfgadfshfgjh}.(4)), $F$ is tame.
Assume $F$ is discretised by a subposet inclusion $f\colon I\subset J$ with finite $I$. Consider a $2$-resolution
$P_1\to P_0\to Ff\to 0$, which exists since $I$ is finite.
As before, $f^k(Ff)$, and hence $F$, is isomorphic to $\text{colim}(0\leftarrow f^kP_1\to f^kP_0)$. Since $f^kP_1$ and $f^kP_0$
are free, $F$ is finitely presented.
\end{proof}
Proposition~\ref{sgdgjhf} characterises $n$-resolvable functors for $n\leq 2$.
We do not have a similar characterisation for $n>2$, only a partial result:
\begin{prop}\leftarrowbel{fhjghgkyik}
Let $J$ be a poset. Assume $F\colon J\to \text{\rm vect}_K$ is discretised by
a functor $f\colon I\to J$ out of a finite poset $I$ and for which
$f^k\colon \text{\rm Fun}(I,\text{\rm vect}_K)\to \text{\rm Fun}(J,\text{\rm vect}_K)$ is exact. Then $F$ is $\infty$-resolvable.
\end{prop}
\begin{proof}
Let $G\colon I\to \text{\rm vect}_K$ be a functor for which $f^kG$ is isomorphic to $F$. Choose an $\infty$-resolution $\pi\colon P\to G$. Exactness of $f^k$ means that the left Kan extension $f^k\pi \colon f^kP\to f^kG$ is an $\infty$-resolution
$f^kG$.
\end{proof}
Here is a condition guaranteeing exactness of the left Kan extension:
\begin{lemma}\leftarrowbel{sjjhkiluilk}
Assume $f\colon I\to J$ is a functor of posets with finite $I$ satisfying the following property:
for every $a$ in $J$, every pair of elements in $f\leq a$ that have an ancestor also has
a descendent (such posets are called weakly directed in~\cite{beyondpersistence}).
Then $f^k\colon \text{\rm Fun}(I,\text{\rm vect}_K)\to \text{\rm Fun}(J,\text{\rm vect}_K)$ is exact.
\end{lemma}
\begin{proof}
We show $\text{colim}_{f\leq a} \colon
\text{\rm Fun}(f\leq a,\text{\rm vect}_K)\to \text{\rm vect}_K$ is exact for all $a$ in $J$.
Let $M\subset (f\leq a)$ consists of all the maximal elements.
The assumption on $f$ implies $M\subset (f\leq a)$ is cofinal and hence $\text{colim}_{f\leq a} F$ is isomorphic to
$\bigoplus_{x\in M} F(x)$. The lemma follows from the exactness of direct sums.
\end{proof}
\begin{cor}\leftarrowbel{asdfgadfgsdfhg}
Let $J$ be a poset.
\begin{enumerate}
\item Every functor $F\colon J\to \text{\rm vect}_K$
discretised by a homomorphism $f\colon I\to J$ from a finite upper semilattice $I$ is $\infty$-resolvable.
\item If $J$ is an upper semilattice, then
a functor $F\colon J\to \text{\rm vect}_K$ is tame if and only if it is $\infty$-resolvable.
\end{enumerate}
\end{cor}
\begin{proof}
In both cases (1) and (2) the poset $f\leq a$, for all $a$ in $J$, has a terminal object given by its coproduct in $I$. Thus the assumption of Lemma~\ref{sjjhkiluilk} is satisfied and hence the conclusion of~\ref{fhjghgkyik} holds.
\end{proof}
\begin{point}[\em Minimality and Betti diagrams]
An $n$-resolution $\pi\colon P\to F$ is called \textbf{minimal} if every chain map $\phi\colon P\to P,$ for which the following triangle commutes, is an isomorphism:
\[\begin{tikzcd}
P\ar{rr}{\phi}\ar{dr} & & P\ar{dl}\\
& F
\end{tikzcd}\]
If $P\to F$ and $Q\to F$ are minimal $n$-resolutions of $F$, then $P$ and $Q$ are isomorphic.
Thus, if $P\to F$ is a minimal $n$-resolution, then the isomorphism type of $P_i$, for $i< n$, is uniquely determined by the isomorphism type of $F$, and its Betti diagram
$\beta P_i$ is called the $i$-th \textbf{Betti diagram} of $F$,
and is
denoted by
$\beta^{i} F$.
For example if $F$ is free, then $\text{id}\colon F\to F$ is a minimal $\infty$-resolution of $F$ and, hence, for every $a$ in $J$, the vector space
$(\beta^{0} F)_a$ is isomorphic to $(\beta F)_a$, and $(\beta^{i} F)_a=0$ for $i>0$.
A minimal $1$-resolution $P_0\to F$ is also called a \textbf{minimal cover} of $F$.
An $n$-resolution $P\to F$ is minimal if and only if, for every $i<n$, the following
natural transformations are minimal covers:
$P_0\to F$, $P_1\to \text{ker}(P_{0}\to F)$, \ldots,
$P_{i}\to \text{ker}(P_{i-1}\to P_{i-2})$. Thus, if
$P_0\to F$ is a minimal cover of an $n$-resolvable functor, then, for $1\leq i<n$, $\beta^iF$ is isomorphic to $\beta^{i-1}\text{ker}(P_0\to F)$.
Also, if $P\to F$ is a minimal $n$-resolution, then
$\beta^0F$ is isomorphic to $\beta P_0$,
$\beta^1F$ is isomorphic to $\beta^0\text{ker}(P_{0}\to F)$, and for $i>1$,
$\beta^iF$ is isomorphic to $\beta^0\text{ker}(P_{i-1}\to P_{i-2})$.
\end{point}
Our strategy for constructing minimal resolutions of $n$-resolvable functors
indexed by an arbitrary poset is to reduce this problem to
the case when the indexing poset is finite.
\begin{prop}\leftarrowbel{afsgsdfshfd}
Let $J$ be a poset and $F\colon J\to \text{\rm vect}_K $ a functor.
Assume $P\to F$ is an $n$-resolution which is discretised
by a subposet inclusion $f\colon I\subset J$ with finite $I$.
If $Q\to Ff$ is a minimal $n$-resolution of $Ff$, then its adjoint $f^kQ\to F$ is a minimal resolution of $F$ and, for $i<n$ and $a$ in $J$:
\[(\beta^i F)_a \text{ is isomorphic to }\begin{cases}
(\beta^i (Ff))_a= (\beta Q_i)_a & \text{ if } a\in I\\
0 & \text{ if } a\not \in I
\end{cases}\]
\end{prop}
\begin{proof}
By restricting the $n$-resolution $P\to F$ of $F$ along $f$, we obtain a resolution $Pf\to Ff$ of $Ff$. Let $Q\to Ff$ be a minimal $n$-resolution. Then there are chain maps
$\phi\colon Q\to Pf$ and $\psi\colon Pf\to Q$ for which the composition
$\psi\phi\colon Q\to Q$ is an isomorphism and the diagram on the left commutes:
\[\begin{tikzcd}
Q\ar{r}{\phi} \ar{dr} & Pf\ar{d}\ar{r}{\psi} & Q\ar{dl}\\
& Ff
\end{tikzcd}\ \ \ \ \ \
\begin{tikzcd}
f^kQ\ar{r}{\phi} \ar{dr} & f^k(Pf)\ar{d}\ar{r}{\psi} & f^kQ\ar{dl}\\
& F
\end{tikzcd}\]
By taking the adjoints of the vertical maps in the left diagram, we obtain a commutative diagram
on the right. Since $f^k(Pf)\to F$ is an $n$-resolution, then so is its retract $f^kQ\to F$. Its minimality follows from the minimality of $Q\to Ff$ and the fact that $f^k\colon \text{Nat}_I(Q_i,Q_i)\to \text{Nat}_J(f^kQ_i,f^kQ_i)$ is a
bijection for every $i$ (see~\ref{asfdfhgdhj}).
\end{proof}
According to Proposition~\ref{afsgsdfshfd},
to construct a minimal $n$-resolution of a functor $F\colon J\to \text{\rm vect}_K$,
the first step is to find a finite subposet inclusion $f\colon I\subset J$ for which there is an $n$-resolution $P\to F$ with $P$ being discretised by $f$ (the natural transformation $f^k(Pf)\to P$ is an isomorphism). For $n=1$, according to the proof of~\ref{sgdgjhf}.(1), such a subposet inclusion is given by any $f\colon I\subset J$ for which $f^k(Ff)\to F$ is a surjection. For $n=2$, according to the proof of~\ref{sgdgjhf}.(2), such a subposet inclusion is given by any $f\colon I\subset J$ that discretises $F$.
We do not have a similar statement for $n> 2$.
The second step is to construct a minimal $n$-resolution $Q\to Ff$ of the restriction $Ff$. The adjoint of this minimal resolution $f^kQ\to F$ is then the desired minimal $n$-resolution of $F$. This process reduces finding a minimal $n$-resolution of $F$ to finding a minimal $n$-resolution of $Ff$ which is a functor indexed by a finite poset. Constructing minimal resolutions of functors indexed by finite posets is standard and involves radicals (see for example~\cite{MR1476671, MR1731415}).
\begin{point}[\em Radical]
Let $I$ be a finite poset.
The \textbf{radical} of $G\colon I\to \text{\rm vect}_K$ is a subfunctor $\text{rad}(G)\subset G$ given by
$\text{rad}(G)(a)=\sum_{s\in (I<a)}\text{im}(G(s<a))$ for $a$ in $I$.
The quotient functor $G/\text{rad}(G)$ is semisimple as it is isomorphic to a direct sum $\oplus_{a\in I} U_a[a]$ of simple functors (see~\ref{dhjfgki}), where $U_a:=(G/\text{rad}(G))(a)$.
For example, for a free functor $G=\oplus _{a\in I} (\beta G)_a[a,-)$, the quotient
$G/\text{rad}(G)$ is isomorphic to $\oplus _{a\in I} (\beta G)_a[a]$.
A key property of the quotienting by the radical, when the indexing poset is finite, is the surjectivity detection: a natural transformation $H\to G$ is surjective if and only if its composition with the quotient $G\to G/\text{rad}(G)$
is surjective. The surjectivity detection may fail for infinite posets.
The surjectivity detection can be used to construct minimal covers.
Consider the quotient $G/\text{rad}(G)$ of $G\colon I\to \text{\rm vect}_K$. Set $P_0:=\bigoplus_{a\in I}U_a[a,-)$, where
$U_a= (G/\text{rad}(G))(a)$. Note that
there is an isomorphism $P_0/\text{rad}(P_0)\to G/\text{rad}(G)$. Let $\pi\colon P_0\to G$ be any natural transformation fitting into the following commutative square, where
the bottom horizontal arrow represent the chosen isomorphism:
\[\begin{tikzcd}
P_0\ar{r}{\pi}\ar{d} & G\ar{d}\\
P_0/\text{rad}(P_0)\ar{r} & G/\text{rad}(G)
\end{tikzcd}\]
Such $\pi$ exists since $P_0$ is free. The composition $P_0\to G/\text{rad}(G)$ is surjective, and hence so is $\pi$. The same argument
gives the surjectivity of every $\phi\colon P_0\to P_0 $ for which $\pi \phi = \pi$.
Since the values of $P_0$ are finite dimensional, every such $\phi$ is therefore an isomorphism, and hence $\pi\colon P_0\to G $ is a minimal cover. This shows:
\end{point}
\begin{prop}\leftarrowbel{sadgfsfasddh}
Let $I$ be a finite poset and $G\colon I\to \text{\rm vect}_K$ a functor. A natural transformation
$P_0\to G$ is a minimal cover if and only if $P_0$ is free and the induced natural transformation
$P_0/\text{\rm rad}(P_0)\to G/\text{\rm rad}(G)$ is an isomorphism. Moreover $(\beta^0G)_a$
is isomorphic to $(G/\text{\rm rad}(G))(a)$ for all $a$.
\end{prop}
Since all the functors indexed by a finite poset $I$ are finitely generated, by taking minimal covers of successive kernels, every functor indexed by $I$ admits a minimal $\infty$-resolution.
This inductive step-wise construction of a minimal $\infty$-resolution can be used for a step wise
inductive procedure of calculating the Betti diagrams. Can the Betti diagrams be retrieved
in one step without the need of an inductive procedure? For this purpose Koszul complexes are standardly used.
\begin{point}
Let $I$ be a finite poset and $a$ its element. Choose a linear ordering $\prec$ on the set of parents $\mathcal{P}(a)$ of $a$. For every functor $G\colon I\to \text{vect}_K$, we define a non-negative chain complex denoted by $\mathcal{K}_a G$ and called the \textbf{Koszul complex} of $G$ at $a$.
Let $k$ be a natural number. Define:
\[(\mathcal{K}_a G)_k:=\begin{cases}
G(a) & \text{ if } k=0\\
\displaystyle \bigoplus _{\substack{S\subset \mathcal{P}(a),\ |S|=k\\ S\text{ has an ancestor}}} \text{colim}_{\substack{\cap_{s\in S}(I\le s)}} G
& \text{ if } k>0
\end{cases}
\]
For example $(\mathcal{K}_a G)_1=\bigoplus _{s\in \mathcal{P}(a)}G(s) $ and
$(\mathcal{K}_a G)_k=0$ if $k>\text{par-dim}_I(a)$ (see~\ref{sfgasg}).
Define $\partial\colon (\mathcal{K}_a G)_{k+1}\to (\mathcal{K}_a G)_k$ as follows:
\begin{itemize}
\item If $k=0$, then $\partial\colon (\mathcal{K}_a G)_1\to (\mathcal{K}_a G)_0=G(a)$ is the linear function which on
the summand $G(s)$ in $(\mathcal{K}_a G)_1$, indexed by $s$ in $\mathcal{P}(a)$, is given by $G(s<a)$.
\item Let $k>0$. For $k\geq j\geq 0$, let
$\partial_j\colon (\mathcal{K}_a G)_{k+1}\to (\mathcal{K}_a G)_k$
be the linear function mapping
the summand $\text{colim}_{\substack{\cap_{s\in S}(I\le s)}} G$ in $(\mathcal{K}_a G)_{k+1}$, indexed by
$S=\{s_0\prec\cdots\prec s_{k}\}\subset \mathcal{P}(a)$, to the summand
$\text{colim}_{\substack{\cap_{s\in S\setminus\{s_j\}}(I\le s)}} G$ in $(\mathcal{K}_a G)_{k}$,
indexed by $S\setminus\{s_j\}\subset \mathcal{P}(a)$, via the function
of the colimits $\text{colim}_{\substack{\cap_{s\in S}(I\le s)}} G\to \text{colim}_{\substack{\cap_{s\in S\setminus\{s_j\}}(I\le s)}} G$ induced by the poset inclusion
$\cap_{s\in S}(I\le s)\subset \cap_{s\in S\setminus\{s_j\}}(I\le s)$.
Define $\partial\colon (\mathcal{K}_a G)_{k+1}\to (\mathcal{K}_a G)_k$ to be the alternating sum
$\partial = \sum_{j=0}^k(-1)^j\partial_j$.
\end{itemize}
The linear functions $\partial$ form a chain complex as it is standard to verify that composition of two consecutive such functions is the $0$ function.
For a natural transformation
$\phi\colon F\to G$, define:
\[
\begin{tikzcd}
(\mathcal{K}_a F)_k\ar{r}{(\mathcal{K}_a \phi)_k} & (\mathcal{K}_a G)_k
\end{tikzcd}:=
\begin{cases}
\phi_a & \text{ if } k=0\\
\displaystyle \bigoplus _{\substack{S\subset \mathcal{P}(a),\ |S|=k\\ S\text{ has an ancestor}}} \text{colim}_{\substack{\cap_{s\in S}(I\le s)}} \phi
& \text{ if } k>0
\end{cases}
\]
These linear functions, for all $k$, form a chain map denoted by $\mathcal{K}_a \phi\colon \mathcal{K}_a F\to \mathcal{K}_a G$.
The association $\phi\mapsto \mathcal{K}_a \phi$ is a functor.
The image of the differential $\partial\colon (\mathcal{K}_aG)_1\to (\mathcal{K}_aG)_1=G(a)$
coincides with $\text{rad}(G)(a)=\sum_{s\in \mathcal{P}(a)}\text{im}(G(s< a))$, and consequently, according to~\ref{sadgfsfasddh}, the vector spaces $H_0(\mathcal{K}_aG)$, $(G/\text{rad}(G))(a)$, and
$(\beta^0 G)_a$ are isomorphic.
\end{point}
\begin{point}\leftarrowbel{dgdfgdfhsfgn}
Let an element $a$ in a finite poset $I$ have the following property:
every subset $S\subset \mathcal{P}(a)$ which has an ancestor has the product $\bigwedge S$ in $I$. For example, if $I$ is an upper semilattice, then all its elements satisfy this property. Under this assumption, for every subset
$S\subset \mathcal{P}(a)$ that has an ancestor, the product $\bigwedge S$ is the terminal object in the category
$\cap_{s\in S}(I\le s)$ and consequently, for $k>0$,
\[(\mathcal{K}_a G)_k= \displaystyle \bigoplus _{\substack{S\subset \mathcal{P}(a),\ |S|=k\\ S\text{ has an ancestor}}} G(\bigwedge S)\]
\end{point}
\begin{point}
Let $I$ be a finite poset and $a$ its element.
Since colimits commute with direct sums, so does the functor $\mathcal{K}_a$, i.e.,
the natural transformation $\mathcal{K}_a F\oplus \mathcal{K}_a G\to \mathcal{K}_a(F\oplus G)$ is an isomorphism.
As the colimit operation is right exact, then so is our Koszul complex construction: if
$0\to F\to G\to H\to 0$ is an exact sequence in $\text{Fun}(I,\text{vect}_K)$, then
$\mathcal{K}_aF\to \mathcal{K}_aG\to \mathcal{K}_aH\to 0$ is an exact sequence of chain complexes.
In general colimits do not preserve monomorphisms, and hence one does not expect the Koszul complex construction to preserve monomorphisms in general either. Let $n$ be an extended positive natural number (containing $\infty$). An element $a$ in $I$ is called \textbf{Koszul $n$-exact} if, for every exact sequence of functors $0\to F\to G\to H\to 0$ and $k< n$, the following sequence of vector spaces
is exact:
\[
0\to (\mathcal{K}_aF)_k\to (\mathcal{K}_aG)_k\to (\mathcal{K}_aH)_k\to 0
\]
For example all elements in $I$ turn out to be Koszul $2$-exact.
\end{point}
\begin{prop}\leftarrowbel{asfgdfhjdfh}
Let $I$ be a finite poset.
\begin{enumerate}
\item Then every element $a$ in $I$ is Koszul $2$-exact.
\item Assume an element $a$ in $I$ has the following property: every subset $S\subset \mathcal{P}(a)$ which has an ancestor has the product $\bigwedge S$ in $I$. Then $a$ is Koszul $\infty$-exact.
\item If $I$ is an upper semilattice, then all its elements are Koszul $\infty$-exact.
\end{enumerate}
\end{prop}
\begin{proof}
\noindent
Statement (1) is a consequence of the fact that direct sums preserves exactness.
Under the assumption of statement (2), for a functor $F$ and $k>0$, the vector space
$ (\mathcal{K}_aF)_k$ can be also described as (see~\ref{dgdfgdfhsfgn}):
\[\displaystyle \bigoplus _{\substack{S\subset \mathcal{P}(a),\ |S|=k\\ S\text{ has an ancestor}}} F(\bigwedge S)\]
In this case, the statement also follows from the exactness of direct sums. Finally statement (3) is a particular case of (2).
\end{proof}
Proposition~\ref{asfgdfhjdfh} translates into the homology exact sequence:
\begin{cor}\leftarrowbel{aSDFGDFHJG}
Let $I$ be a finite poset and $0\to F\to G\to H\to 0$ be an exact sequence
in $\text{\rm Fun}(I,\text{\rm vect}_K)$.
\begin{enumerate}
\item For every element $a$ in $I$, there is an exact sequence of vector spaces:
\[\hspace{-10mm}\begin{tikzcd}
& H_2(\mathcal{K}_a G)\ar{r} & H_{2}(\mathcal{K}_a H)\ar[out=0, in=180]{dll}\\
H_1(\mathcal{K}_a F)\ar{r} & H_1(\mathcal{K}_a G)\ar{r}& H_{1}(\mathcal{K}_a H)\ar[out=0, in=180]{dll}\\
H_0(\mathcal{K}_a F)\ar{r} & H_0(\mathcal{K}_a G)\ar{r}& H_{0}(\mathcal{K}_a H)\ar{r} & 0
\end{tikzcd}\]
\item Assume an element $a$ in $I$ has the following property: every subset $S\subset \mathcal{P}(a)$ which has an ancestor has the product $\bigwedge S$ in $I$. Then
there is an exact sequence of vector spaces:
\[\hspace{-10mm}\begin{tikzcd}
& & \cdots \ar[out=0, in=180]{dll}\\
H_2(\mathcal{K}_a F)\ar{r} & H_2(\mathcal{K}_a G)\ar{r}& H_{2}(\mathcal{K}_a H)\ar[out=0, in=180]{dll}\\
H_1(\mathcal{K}_a F)\ar{r} & H_1(\mathcal{K}_a G)\ar{r}& H_{1}(\mathcal{K}_a H)\ar[out=0, in=180]{dll}\\
H_0(\mathcal{K}_a F)\ar{r} & H_0(\mathcal{K}_a G)\ar{r}& H_{0}(\mathcal{K}_a H)\ar{r} & 0
\end{tikzcd}\]
\end{enumerate}
\end{cor}
\begin{point}
Consider a homogeneous free functor
$F=V[b,-)\colon I\to \text{vect}_K$, where $I$ is a finite poset.
We claim that $\mathcal{K}_a F$ has the following homology:
\[H_i(\mathcal{K}_a F)\text{ is isomorphic to }
\begin{cases}
V & \text{ if } i= 0 \text{ and } b= a \\
0 &\text{ otherwise }
\end{cases}\]
To prove the claim, recall
$F(x)=0$ if $b\not\leq x$, and $F$ restricted to
$(b\leq I)\subset I$ is isomorphic to the constant functor with value $V$.
Thus, if $b\not\leq a$, then $\mathcal{K}_a F=0$, and the claim holds.
If $b=a$, then $(\mathcal{K}_a F)_0=F(a)=V$ and $(\mathcal{K}_a F)_i=0$ for $i>0$, and again the claim holds. Assume $b<a$. Then $(\mathcal{K}_a F)_0=F(a)$, which is isomorphic to $V$. Moreover, for a subset that has an ancestor
$S\subset \mathcal{P}(a)$ with $|S|>0$, the colimit $\text{colim}_{\substack{\cap_{s\in S}(I\le s)}} F $ is isomorphic to $\text{colim}_{\substack{\cap_{s\in S}(b\leq I\le s)}} F $,
which is either isomorphic to $V$, in the case $S\subset (b\leq \mathcal{P}(a))$, or is $0$ otherwise. Consequently, the complex
$\mathcal{K}_a F$ is isomorphic to $L\otimes V$, where $L$
is the augmented chain complex of the standard
$|b\leq \mathcal{P}(a)|$-dimensional simplex whose homology is trivial in all degrees:
\[L:=\left(\cdots\to\bigoplus _{\substack{S\subset (b\leq \mathcal{P}(a))\\ |S|=2}}K \to\bigoplus _{\substack{S\subset (b\leq \mathcal{P}(a))\\ |S|=1}}K \to K\right)
\]
Since the Koszul complex commutes with direct sums, if $F$ is free, isomorphic to
$\oplus_{b\in I}(\beta F)_b[b,-)$, then:
\[H_i(\mathcal{K}_a F)\text{ is isomorphic to }
\begin{cases}
(\beta F)_a& \text{ if } i=0\\
0 &\text{ if } i>0
\end{cases}
\]
\end{point}
We are now ready to state the key fact connecting the homology of the Koszul complexes of a functor with its Betti diagrams.
\begin{thm}\leftarrowbel{asdfgdfhfgjh}
Let $I$ be a finite poset and $G\colon I\to \text{\rm vect}_K$ a functor.
\begin{enumerate}
\item For every $a$ in $I$ and $i=0,1,2$, the vector spaces $(\beta^i G)_a$ and $H_i(\mathcal{K}_a G)$ are isomorphic.
\item Assume an element $a$ in $I$ has the following property: every subset $S\subset \mathcal{P}(a)$ which has an ancestor has the product $\bigwedge S$ in $I$. Then, for every $i$, the vector spaces $(\beta^i G)_a$ and $H_i(\mathcal{K}_a G)$ are isomorphic.
\end{enumerate}
\end{thm}
\begin{proof}
The proof relies on Corollary~\ref{aSDFGDFHJG}. Since the arguments for the statements (1) and (2) are analogous, we show only (1).
The case $i=0$ follows from Proposition~\ref{sadgfsfasddh} and the fact that
$H_0(\mathcal{K}_a G)$ is isomorphic to $(G/\text{rad}(G))(a)$.
Consider an exact sequence
$0\to S_1\to P_0\xrightarrow{\pi} G\to 0$ where $\pi$ is a minimal cover.
It leads to an exact sequence of homologies (see~\ref{aSDFGDFHJG}):
\[\begin{tikzcd}
& H_2(\mathcal{K}_a P_0)\ar{r} & H_{2}(\mathcal{K}_a G)\ar[out=0, in=180]{dll}[description]{\alpha_2}\\
H_1(\mathcal{K}_a S_1)\ar{r} & H_1(\mathcal{K}_a P_0)\ar{r}& H_{1}(\mathcal{K}_a G)\ar[out=0, in=180]{dll}[description]{\alpha_1}\\
H_0(\mathcal{K}_a S_1)\ar{r} & H_0(\mathcal{K}_a P_0)\ar{r}{H_0(\mathcal{K}_a\pi)}& H_{0}(\mathcal{K}_a G)\ar{r} & 0
\end{tikzcd}\]
Minimality of $\pi$ is equivalent to $H_0(\mathcal{K}_a\pi)$ being an isomorphism. Since $P_0$ is free, $H_1(\mathcal{K}_a P_0)=H_2(\mathcal{K}_a P_0)=0$. These two observations imply $H_{1}(\mathcal{K}_a G)$ is isomorphic to $H_0(\mathcal{K}_a S_1)$, and $H_{2}(\mathcal{K}_a G)$ is isomorphic to $H_1(\mathcal{K}_a S_1)$.
By the already proven case $i=0$, $H_0(\mathcal{K}_a S_1)$ is isomorphic to
$(\beta^0S_1)_a$ which is isomorphic to $(\beta^1G)_a$. This gives the case $i=1$. Applying this case to $S_1$, we get that $H_1(\mathcal{K}_aS_1)$ is
isomorphic to $(\beta ^1 S_1)_a$, which is isomorphic to $(\beta ^2 G)_a$, and the case $i=2$ also holds.
\end{proof}
Here are some consequences of the presented statements,
which are proved by the same strategy: first dicretise and then use the Koszul complex construction.
\begin{cor}\leftarrowbel{fjgdkvfkdcmv}
Let $J$ be a poset and $F\colon J\to \text{\rm vect}_K$ a functor.
\begin{enumerate}
\item Assume $F$ is $n$-resolvable and $f\colon I\subset J$ is a
subposet inclusion with finite $I$ discretising an $n$-resolution of $F$.
Then $\text{\rm supp}(\beta^iF)\subset I$ for all $0\leq i< n$. Moreover, for $0\leq i< \text{\rm min}(3,n)$ and $a$ in $I$, $(\beta^i F)_{a}$ is isomorphic to
$H_i(\mathcal{K}_a(Ff))$.
\item Assume $F$ is discretised by a subposet inclusion $f\colon I\subset J$
which is a homomorphism out of a finite upper semilattice $I$. Then $F$
is $\infty$-resolvable and
$\text{\rm supp}(\beta^iF)\subset I$ for all $i\geq 0$. Moreover,
for $i\geq 0$ and $a$ in $I$, $(\beta^i F)_{a}$ is isomorphic to $H_i(\mathcal{K}_a(Ff))$.
\item Assume $J$ is an upper semilattice and $F$ a tame functor.
Then $F$ is $\infty$-resolvable and, for $a$ in $J$ with $\text{\rm par-dim}_J(a)<i$, $(\beta^i F)_{a}=0$.
\end{enumerate}
\end{cor}
\begin{proof}
Statement 1 is a consequence of Proposition~\ref{afsgsdfshfd} and Theorem~\ref{asdfgdfhfgjh}. Statement 2 is a consequence of Corollary~\ref{asdfgadfgsdfhg}.(1), and again Proposition~\ref{afsgsdfshfd} and Theorem~\ref{asdfgdfhfgjh}. Statement 3 follows
from statement 2 and Proposition~\ref{aDFDFHFHJ} since
tame functors indexed by an upper semilattice can be discretised by a finite sublattice (see~\ref{sdgfgjhdgh}.(1)).
\end{proof}
We finish this long article with our key theorem describing how to determine Betti diagrams of functors indexed by realisations of finite type posets that admit discretisable resolutions. Our main result is that for such functors the Koszul complex can also be used for this purpose, similarly to functors indexed by upper semilattices (see~\ref{fjgdkvfkdcmv}). The key reason for this is the following fact:
functors indexed by realisations have natural grid-like discretisations that can be refined in a way that every set of parents of an element having an ancestor
also has a product in the refinement.
\begin{thm}\leftarrowbel{sdrtyhgf}
Let $I$ be a finite type poset and $F\colon \mathcal{R}(I)\to \text{\rm vect}_K$ an $n$-resolvable functor. Assume $d$ is an element in $I$ and $V$ is a finite subset of $(-1,0)$ for which
$\text{\rm supp}(\beta^jF)\subset \mathcal{R}_{I\le d}(I,V)\stackrel{\alpha}{\hookrightarrow} \mathcal{R}(I)$ for all $j\leq i$.
\begin{enumerate}
\item
Let $0\leq i<\text{\rm min}(3,n)$ and $(a,f)$ be in $\text{\rm supp}(\beta^iF)$.
Then $(\beta^i F)_{(a,f)}$ is isomorphic to $H_i(\mathcal{K}_{(a,f)}(F\alpha))$.
\item Let $i<n$ and $(a,f)$ be in $\text{\rm supp}(\beta^iF)$ for which there is
$\varepsilon$ in $V$ such that $f(x)>\varepsilon$ for all $x$ in $\mathcal{P}(a)$.
Then $(\beta^i F)_{(a,f)}$ is isomorphic to $H_i(\mathcal{K}_{(a,f)}(F\alpha))$.
\item If $\text{\rm par-dim}_{\mathcal{R}(I)}(a,f)< i< n $, then
$(\beta^i F)_{(a,f)}=0$.
\end{enumerate}
\end{thm}
\begin{proof}
\noindent
1:\quad The assumption implies that a minimal $\text{min}(3,n)$-resolution of $F$ is discretised by
$\mathcal{R}_{I\le d}(I,V)\subset \mathcal{R}(I)$.
This statement is then a particular case of
Corollary~\ref{fjgdkvfkdcmv}.(1).
\noindent
2:\quad The assumption implies that a minimal $i+1$ resolution of $F$ is discretised by
$\mathcal{R}_{I\le d}(I,V)\subset \mathcal{R}(I)$. Moreover the product of every set of parents of $(a,f)$ in $\mathcal{R}_{I\le d}(I,V)$ exists.
This statement is then a particular case of
Theorem~\ref{asdfgdfhfgjh}.(2).
\noindent
3:\quad
Since $\text{\rm par-dim}_{\mathcal{R}_{I\le d}(I,V)}(a,f)\leq \text{\rm par-dim}_{\mathcal{R}(I)}(a,f)$
for $(a,f)$ in $\mathcal{R}_{I\le d}(I,V)$,
this statement follows from 2.
\end{proof}
\paragraph{\em Acknowledgments.} A.\ Jin and F.\ Tombari
were supported by the Wallenberg AI, Autonomous System and Software Program (WASP) funded by Knut and Alice Wallenberg Foundation. W. Chach\'olski was partially supported by VR, the Wallenberg AI, Autonomous System and Software Program (WASP) funded by Knut and Alice Wallenberg Foundation, and MultipleMS funded by the European Union under the Horizon 2020 program, grant agreement 733161, and dBRAIN collaborative project at digital futures at KTH.
\begin{comment}
Recall that if $I$ is a consistent upper semilattice of finite type, its realisation $\mathcal{R}(I)$ is also an upper semilattice (see~\ref{sdfgfds}). Thus, a tame functor $F$ indexed by $\mathcal{R}(I)$ is $\infty$-resolvable and, for $(a,f)$ in $\mathcal{R}(I)$ with $\text{par-dim}_{\mathcal{R}(I)}(a,f)<i$, $(\beta ^i F)_{(a,f)}=0$, by Corollary~\ref{fjgdkvfkdcmv}(3). In this case we may even say more about these Betti diagrams. Proposition~\ref{asfhgfgkkl} states that $\alpha \colon\mathcal{R}(I,V)\hookrightarrow \mathcal{R}(I)$ is a homomorphism out of an upper semilattice of finite type. Moreover, $F$ can be discretised by a function of this form~\ref{}. Let $(a,f)$ be an element in $\mathcal{R}(I)$, then we can consider $V'=V\cup \{f(x)\}$. In this way $\alpha \colon\mathcal{R}(I,V')\hookrightarrow \mathcal{R}(I)$ is still a discretisation and a homomorphism, and $(a,f)$ is an element in $\mathcal{R}(I,V')$. Since $\mathcal{R}(I,V')$ is an upper semilattice of finite type, we can consider its set of parents, which will have a product, for how $V'$ is considered. As a consequence, the Koszul complex $\mathcal{K}_{(a,f)}(F\alpha)$ is well defined and its $i$-th homology is isomorphic to $(\beta ^i F)_{(a,f)}$, for $i\ge 0$, by Corollary~\ref{fjgdkvfkdcmv}(2).
\begin{itemize}
\item a tame functor indexed by a realisation of the type above is $\infty$-resolvable and, for $(a,f)$ in $\mathcal{R}(I)$ with $\text{par-dim}_{\mathcal{R}(I)}(a,f)$, $(\beta ^i F)_{(a,f)}=0$;
\item $F$ is discretised by $\alpha \colon\mathcal{R}_D(I,V)\hookrightarrow \mathcal{R}(I)$, which is a homomorphism out of an upper semilattice of finite type (see~\ref{asfhgfgkkl}), thus we can apply~\ref{fjgdkvfkdcmv}(2) to know the Betti digrams in relation to the homology of the Koszul complex at $a$ of $F\alpha$
\end{itemize}
\end{comment}
\printbibliography
\end{document}
|
\begin{document}
\title{Anti-Symmetric DGN: a stable architecture for Deep Graph Networks}
\begin{abstract}
Deep Graph Networks (DGNs) currently dominate the research landscape of learning from graphs, due to their efficiency and ability to implement an adaptive message-passing scheme between the nodes. However, DGNs are typically limited in their ability to propagate and preserve long-term dependencies between nodes, i.e., they suffer from the over-squashing phenomena. This reduces their effectiveness, since predictive problems may require to capture interactions at different, and possibly large, radii in order to be effectively solved.
In this work, we present Anti-Symmetric Deep Graph Networks (A-DGNs), a framework for stable and non-dissipative DGN design, conceived through the lens of ordinary differential equations. We give theoretical proof that our method is stable and non-dissipative, leading to two key results: long-range information between nodes is preserved, and no gradient vanishing or explosion occurs in training. We empirically validate the proposed approach on several graph benchmarks, showing that A-DGN leads to improved performance and enables to learn effectively even when dozens of layers are used.
\end{abstract}
\section{Introduction}
Representation learning for graphs has become one of the most prominent fields in machine learning. Such popularity derives from the ubiquitousness of graphs. Indeed, graphs are an extremely powerful tool to represent systems of relations and interactions and are extensively employed in many domains \citep{battaglia2016interaction, MPNN, bioinformatics, social_network, google_maps}. For example, they can model social networks, molecular structures, protein-protein interaction networks, recommender systems, and traffic networks.
The primary challenge in this field is how we capture and encode structural information in the learning model. Common methods used in representation learning for graphs usually employ \textit{Deep Graph Networks} (DGNs)~\citep{BACCIU2020203, GNNsurvey}. DGNs are a family of learning models that learn a mapping function that compresses the complex relational information encoded in a graph into an information-rich feature vector that reflects both the topological and the label information in the original graph. As widely popular with neural networks, also DGNs consists of multiple layers. Each of them updates the node representations by aggregating previous node states and their neighbors, following a message passing paradigm. However, in some problems, the exploitation of local interactions between nodes is not enough to learn representative embeddings. In this scenario, it is often the case that the DGN needs to capture information concerning interactions between nodes that are far away in the graph, i.e., by stacking multiple layers. A specific predictive problem typically needs to consider a specific range of node interactions in order to be effectively solved, hence requiring a specific number (possibly large) of DGN layers.
Despite the progress made in recent years in the field, many of the proposed methods suffer from the \textit{over-squashing} problem~\citep{bottleneck} when the number of layers increases.
Specifically, when increasing the number of layers to cater for longer-range interactions, one observes an excessive amplification or an annihilation of the information being routed to the node by the message passing process to update its fixed length encoding.
As such, over-squashing prevents DGNs to learn long-range information.
In this work, we present \textit{Anti-Symmetric Deep Graph Network} (A-DGN), a framework for effective long-term propagation of information in DGN architectures
designed through the lens of ordinary differential equations (ODEs).
Leveraging the connections between ODEs and deep neural architectures, we provide theoretical conditions for realizing a \textit{stable} and \textit{non-dissipative} ODE system on graphs through the use of anti-symmetric weight matrices.
The formulation of the A-DGN layer then results from the forward Euler discretization of the achieved graph ODE.
Thanks to the properties enforced on the ODE, our framework preserves the long-term dependencies between nodes as well as prevents from gradient explosion or vanishing.
Interestingly, our analysis also paves the way for rethinking the formulation of standard DGNs as discrete versions of non-dissipative and stable ODEs on graphs.
The key contributions of this work can be summarized as follows:
\begin{itemize}
\item We introduce A-DGN, a novel design scheme for deep graph networks stemming from an ODE formulation. Stability
and non-dissipation are the main properties that characterize our method, allowing the preservation of long-term dependencies in the information flow.
\item We theoretically prove that the employed ODE on graphs has
stable and non-dissipative behavior. Such result leads to the absence of exploding and vanishing gradient problems during training, typical of unstable and lossy systems.
\item We conduct extensive experiments to demonstrate the benefits of our method.
A-DGN can outperform classical DGNs over several datasets even when dozens of layers are used.
\end{itemize}
The rest of this paper is organized as follows. We introduce the A-DGN framework in Section 2 by theoretically proving its properties. In Section 3, we give an overview of the related work in the field of representation learning for graphs and continuous dynamic models. Afterwards, we provide the experimental assessment of our method in Section 4. Finally, Section 5 concludes the paper.
\section{Anti-Symmetric Deep Graph Network}
Recent advancements in the field of representation learning propose to treat neural network architectures as an ensemble of continuous (rather than discrete) layers, thereby drawing connections between deep neural networks and ordinary differential equations (ODEs)~\citep{StableArchitecture, NeuralODE}.
This connection can be pushed up to neural processing of graphs as introduced in \citep{GDE}, by making a suitable ODE define the
computation on a graph structure.
We focus on static graphs, i.e., on structures described by
$\mathcal{G}=(\mathcal{V}, \mathcal{E})$, with $\mathcal{V}$ and $\mathcal{E}$
respectively denoting the fixed sets of nodes and edges.
For each node $u \in \mathcal{V}$ we consider a state $\mathbf{x}_u(t) \in \mathbb{R}^{d}$, which provides a
representation
of the node $u$ at time $t$. We can then define a Cauchy problem on graphs in terms of the following node-wise defined ODE:
\begin{equation}\label{eq:ode}
\frac{\partial \mathbf{x}_u(t)}{\partial t} = f_{\mathcal{G}}(\mathbf{x}_u(t)),
\end{equation}
for time $t\in[0,T]$, and subject to the initial condition $\mathbf{x}_u(0)= \mathbf{x}_u^0\in\mathbb{R}^{d}$.
The dynamics of node's representations is described by the function
$f_{\mathcal{G}}:\mathbb{R}^{d} {\textnormal{i}}ghtarrow \mathbb{R}^{d}$, while
the initial condition $\mathbf{x}_u(0)$ can be interpreted as the initial configuration of the node's information, hence as the input for our computational model.
As a consequence, the ODE defined in Equation~{\textnormal{e}}f{eq:ode} can be seen as a continuous information processing system over the graph, which starting from the input configuration $\mathbf{x}_u(0)$ computes the final node's representation (i.e., embedding) $\mathbf{x}_u(T)$. Notice that this process shares similarities with standard DGNs, in what it computes nodes' states that can be used as an embedded representation of the graph and then used to feed a readout layer in a downstream task on graphs. The top of Figure~{\textnormal{e}}f{fig:framework} visually summarizes this concept, showing how nodes evolve following a specific graph ODE in the time span between $0$ and a terminal time $T>0$.
\begin{figure}
\caption{A high level overview
of our proposed framework, summarizing the involved concepts of ODE over a graph, its discretization as layers of a DGN, and the resulting node update of Anti-symmetric DGN.
At the top, it is illustrated the continuous processing of nodes’ states in the time span between $0$ and $T>0$, as a Cauchy problem on graphs. The node-wise ODE $f_{\mathcal{G}
\label{fig:framework}
\end{figure}
Since for most ODEs it is impractical to compute an analytical solution,
a common approach relies on finding an approximate
one
through a numerical discretization procedure (such as the forward Euler method).
In this way,
the time variable is discretized
and the ODE solution is
computed by the successive application of an iterated map that operates on the discrete set of points between $0$ and $T$, with a step size ${\epsilon}ilon>0$.
Crucially, as already observed for feed-forward and recurrent neural models \citep{StableArchitecture,AntisymmetricRNN},
each step of the ODE discretization process can be equated to one layer of a DGN network. The whole neural architecture contains as many layers as the integration steps in the numerical method (i.e., $L = T/{\epsilon}ilon$),
and each layer $\ell=1, ..., L$ computes nodes' states $\mathbf{x}_u^\ell$ which approximates
$\mathbf{x}_u({\epsilon}ilon \, \ell)$.
This process is summarized visually in the middle of Figure~{\textnormal{e}}f{fig:framework}.
Leveraging the concept of graph neural ODEs \citep{GDE},
in this paper
we perform a further step by reformulating
a DGN as a solution to a \tilde{p}h{stable and non-dissipative}
Cauchy problem over a graph.
The main goal of our work is therefore achieving preservation of long-range information between nodes, while laying down the conditions that prevent gradient vanishing or explosion.
Inspired by the works on stable deep architectures that discretize ODE solutions \citep{StableArchitecture,AntisymmetricRNN},
we do so by first deriving conditions under which the graph ODE is constrained to the desired stable and non-dissipative behavior.
Since we are dealing with static graphs, we
instantiate
Equation~{\textnormal{e}}f{eq:ode} for a node $u$ as follows:
\begin{equation}\label{eq:simple_ode_dgn}
\frac{\partial \mathbf{x}_u(t)}{\partial t} = \sigma \left( \mathbf{W}_t \mathbf{x}_u(t) + \Phi(\mathbf{X}(t), \mathcal{N}_u) + \mathbf{b}_t{\textnormal{i}}ght),
\end{equation}
where $\sigma$ is a monotonically non-decreasing activation function,
$\mathbf{W}_t\in\mathbb{R}^{d\times d}$
and $\mathbf{b}_t\in\mathbb{R}^d$ are, respectively, a weight matrix and a bias vector that contain the trainable parameters of the system.
We denote by $\Phi(\mathbf{X}(t), \mathcal{N}_u)$ the aggregation function for the states
of the nodes in the neighborhood of $u$. We refer to $\mathbf{X}(t) \in \mathbb{R}^{|\mathcal{V}|\times d}$ as the node feature matrix of the whole graph, with $d$ the number of available features.
For simplicity, in the following we keep $\mathbf{W}_t$ and $\mathbf{b}_t$ constant over time, hence dropping the $t$ subscript in the notation.
Well-posedness and stability are essential concepts when designing DGNs as solutions to Cauchy problems, both relying on the continuous dependence of the solution from initial conditions. An ill-posed unstable system, even if potentially yielding a low training error, is likely to lead to a poor generalization error on perturbed data.
On the other hand, the solution of a Cauchy problem is stable if the long-term behavior of the system does not depend significantly on the initial conditions \citep{stabilityODE}.
In our case, where the ODE defines a message passing diffusion over a graph, our intuition is that a stable encoding system will be robust to perturbations in the input nodes information. Hence, the state representations will change smoothly with the input, resulting in a
non-exploding forward propagation and
better generalization. This intuition is formalized by the
Definition~{\textnormal{e}}f{def:stable} (see Appendix~{\textnormal{e}}f{definitions}), whose idea is that a small perturbation of size $\delta$ of the initial state (i.e., the node input features) results in a perturbation on the subsequent states that is at most $\omega$.
As known from the stability theory of autonomous systems \citep{stabilityODE},
this condition is met when the maximum real part of the Jacobian's eigenvalues of $f_\mathcal{G}$ is smaller or equal than 0, i.e., $\max_{i=1,...,d} Re(\lambda_i(\mathbf{J}(t))) \leq 0$, $\forall t\geq0$.
Although stability is a necessary condition for successful learning, it alone is not sufficient to capture long-term dependencies.
As it is discussed
in \cite{StableArchitecture}, if $\max_{i=1,...,d} Re(\lambda_i(\mathbf{J}(t))) \ll 0$ the result is a lossy system subject to catastrophic forgetting during propagation.
Thus, in
the graph domain, this means that only local neighborhood information is preserved by the system, while long-range dependencies among nodes are forgotten.
If no long-range information is preserved, then it is likely that the DGN will underperform, since it will not be able to reach the minimum radius of inter-nodes interactions needed to effectively solve the task.
Therefore, we can design an ODE for graphs which is stable and non-dissipative (see Definition~{\textnormal{e}}f{def:dissipative} in Appendix~{\textnormal{e}}f{definitions}) that leads to well-posed learning, when the criterion that guarantees stability
is met and the Jacobian's eigenvalues of $f_\mathcal{G}$ are nearly zero.
Under this condition, the forward propagation produces at most moderate amplification or shrinking of the input, which enables to preserve long-term dependencies in the node states. During training, the backward propagation needed to compute the gradient of the loss $\partial \mathcal{L} / \partial \mathbf{x}_u(t)$ will have the same properties of the forward propagation. As such, no gradient vanish nor explosion is expected to occur. More formally:
\begin{proposition}\label{prep:stableCondition}
Assuming that $\mathbf{J}(t)$ does not change significantly over time, the forward and backward propagations of the ODE in Equation~{\textnormal{e}}f{eq:simple_ode_dgn} are stable and non-dissipative if \begin{equation}\label{eq:eigenvalue_condition}
Re(\lambda_i(\mathbf{J}(t)))
= 0, \quad \forall i=1, ..., d.
\end{equation}
\end{proposition}
see the proof in Appendix~{\textnormal{e}}f{proof_gradient}.
A simple way to impose the condition in Equation~{\textnormal{e}}f{eq:eigenvalue_condition} is to use an anti-symmetric\footnote{A matrix $\mathbf{A}\in\mathbb{R}^{d\times d}$ is anti-symmetric (i.e., skew-symmetric) if $\mathbf{A}^T=-\mathbf{A}$.} weight matrix
in Equation~{\textnormal{e}}f{eq:simple_ode_dgn}.
Under this assumption, we can rewrite Equation~{\textnormal{e}}f{eq:simple_ode_dgn} as follows:
\begin{equation}\label{eq:stableode}
\frac{\partial \mathbf{x}_u(t)}{\partial t} = \sigma \left( (\mathbf{W}-\mathbf{W}^T) \mathbf{x}_u(t) + \Phi(\mathbf{X}(t), \mathcal{N}_u) + \mathbf{b}{\textnormal{i}}ght)
\end{equation}
where $(\mathbf{W}-\mathbf{W}^T)\in\mathbb{R}^{d\times d}$ is the anti-symmetric weight matrix.
The next Proposition~{\textnormal{e}}f{prep:jacobian} ensures that when the aggregation function $\Phi(\mathbf{X}(t), \mathcal{N}_u)$ is independent of $\mathbf{x}_u(t)$ (see for example Equation~{\textnormal{e}}f{eq:simple_aggregation}), the Jacobian of the resulting ODE has imaginary eigenvalues, hence it is stable and non-dissipative according to Proposition~{\textnormal{e}}f{prep:stableCondition}.
As discussed in Appendix~{\textnormal{e}}f{Proposition2_variant}, whenever $\Phi(\mathbf{X}(t), \mathcal{N}_u)$ includes $\mathbf{x}_u(t)$ in its definition (see for example Equation~{\textnormal{e}}f{eq:gcn_aggregation}), the eigenvalues of the resulting Jacobian are still bounded in a small neighborhood around the imaginary axis.
\begin{proposition}\label{prep:jacobian}
Provided that $\Phi(\mathbf{X}(t), \mathcal{N}_u)$ is independent of $\mathbf{x}_u(t)$, the Jacobian matrix of the ODE in Equation~{\textnormal{e}}f{eq:stableode}
has purely imaginary eigenvalues, i.e.
$$Re(\lambda_i(\mathbf{J}(t))) = 0, \forall i=1, ..., d.$$
Therefore the ODE in Equation~{\textnormal{e}}f{eq:stableode} is stable and non-dissipative.
\end{proposition}
See proof in Appendix~{\textnormal{e}}f{proof_jacobian}.
We now proceed to discretize the ODE in Equation~{\textnormal{e}}f{eq:stableode} by means of the \textit{forward Euler's method}. To preserve stability of the discretized system\footnote{The interested reader is referred to \citep{stableEuler2} for an in-depth analysis on the stability of the forward Euler method.}, we add a diffusion term to Equation~{\textnormal{e}}f{eq:stableode}, yielding the following node state update equation:
\begin{equation}\label{eq:AGC_euler}
\mathbf{x}^{\ell}_u = \mathbf{x}^{\ell-1}_u + {\epsilon}ilon \sigma \left( (\mathbf{W}-\mathbf{W}^T-\gamma \mathbf{I}) \mathbf{x}_u^{\ell-1} + \Phi(\mathbf{X}^{\ell-1}, \mathcal{N}_u) + \mathbf{b}{\textnormal{i}}ght)
\end{equation}
where $\mathbf{I}$ is the identity matrix, $\gamma$ is a hyper-parameter that regulates the strength of the diffusion, and ${\epsilon}ilon$ is the discretization step. By building on the relationship between the discretization and the DGN layers, we have introduced $\mathbf{x}_u^\ell$ as the state of node $u$ at layer $\ell$, i.e. the discretization of state at time $t={\epsilon}ilon \ell$.
Now, both ODE and its Euler discretization are stable and non-dissipative. We refer to the framework defined by Equation~{\textnormal{e}}f{eq:AGC_euler} as \textit{Anti-symmetric Deep Graph Network} (A-DGN), whose
state update process is schematically illustrated in the bottom of Figure~{\textnormal{e}}f{fig:framework}.
Notice that having assumed the parameters of the ODE constant in time, A-DGN can also be interpreted as a recursive DGN with weight sharing between layers.
We recall that $\Phi(\mathbf{X}^{\ell-1}, \mathcal{N}_u)$ can be any function that aggregates nodes (and edges) information.
Therefore, the general formulation of $\Phi(\mathbf{X}^{\ell-1}, \mathcal{N}_u)$ in A-DGN allows casting all standard DGNs through in their non-dissipative, stable and well-posed version. As a result, A-DGN can be implemented leveraging the aggregation function that is more adequate for the specific task, while allowing to preserve long-range relationships in the graph. As a demonstration of this, in Section {\textnormal{e}}f{sec:experiments} we explore two neighborhood aggregation functions, that are
\begin{equation}\label{eq:simple_aggregation}
\Phi(\mathbf{X}^{\ell-1}, \mathcal{N}_u) = \sum_{j\in\mathcal{N}_u} \mathbf{V} \mathbf{x}^{\ell-1}_j,
\end{equation}
(which is also employed in \cite{graphconv}) and the classical GCN aggregation
\begin{equation}\label{eq:gcn_aggregation}
\Phi(\mathbf{X}^{\ell-1}, \mathcal{N}_u) = \mathbf{V} \sum_{j \in \mathcal{N}_u \cup \{ u \}} \frac{1}{\sqrt{\hat{d}_j\hat{d}_u}} \mathbf{x}^{\ell-1}_j,
\end{equation}
where $\mathbf{V}$ is the weight matrix, $\hat{d}_j$ and $\hat{d}_u$ are, respectively, the degrees of nodes $j$ and $u$.
Finally, although we designed A-DGN with weight sharing in mind (for ease of presentation),
a more general version of the framework, with
layer-dependent weights
$\mathbf{W}^\ell-(\mathbf{W}^\ell)^T$, is possible\footnote{
The dynamical properties discussed in this section are
in fact still true even in the case of time varying $\mathbf{W}_t$ in Equation~{\textnormal{e}}f{eq:simple_ode_dgn},
provided that $\max_{i=1,...,d} Re(\lambda_i(\mathbf{J}(t))) \leq 0$ and $\mathbf{J}(t)$ changes sufficiently slow over time (see \citep{stabilityODE, StableArchitecture}).}.
\section{Related work}
\paragraph{Deep Graph Network}
Nowadays, most of the DGNs typically relies on the concepts introduced by the Message Passing Neural Network (MPNN)~\citep{MPNN}, which is a general framework based on the message passing paradigm. The MPNN updates the representation for a node $u$ at layer $\ell$ as
\begin{equation}
\label{eq:MPNN}
\mathbf{x}_u^\ell = \phi_U(\mathbf{x}_u^{\ell-1}, \sum_{j\in \mathcal{N}_u} \phi_M(\mathbf{x}_u^{\ell-1}, \mathbf{x}_v^{\ell-1}, \mathbf{e}_{uv}))
\end{equation}
where $\phi_U$ and $\phi_M$ are respectively the \textit{update} and \textit{message} functions. Hence, the role of the message function is to compute the message for each node, and then dispatch it among the neighbors. On the other hand, the update function has the role of collecting the incoming messages and update the node state. A typical implementation of the MPNN model is $\mathbf{x}_u^\ell = \mathbf{W}\mathbf{x}_u^{\ell-1} + \sum_{j\in \mathcal{N}_u} \mathrm{MLP}(\mathbf{e}_{uv})\mathbf{x}_v^{\ell-1} + \mathbf{b}$, where $\mathbf{e}_{uv}\in \mathbb{R}^{d_e}$ is the edge feature vector between node $u$ and $v$. Thus, by relaxing the concepts of stability and non-dissipation
from our framework, MPNN becomes a specific discretization instance of A-DGN.
Depending on the definition of the update and message functions, it is possible to derive a variety of DGNs that mainly differ on the neighbor aggregation scheme \citep{GCN,GAT,SAGE,GIN, chebnet, gine}. However, all these methods focus on presenting new and effective functions without questioning the stability and non-dissipative behavior
of the final network. As a result, most of these DGNs are usually not able to capture long-term interactions. Indeed, only few layers can be employed without falling into the over-squashing phenomenon, as it is discussed by \citet{bottleneck}.
Since the previous methods are all specific cases of MPNN, they are all instances of the discretized and unconstrained version of A-DGN. Moreover, a proper design of $\Phi(\mathbf{X}^{\ell-1}, \mathcal{N}_u)$ in A-DGN allows rethinking the discussed DGNs through the lens of non-dissipative and stable ODEs.
\cite{gcnii}, \cite{egnn}, and \cite{pathGCN} proposed three methods to alleviate over-smoothing, which is a phenomenon where all node features become almost indistinguishable after few embedding updates. Similarly to the forward Euler discretization, the first method employs identity mapping. It also exploits initial residual connections to ensure that the final representation of each node retains at least a fraction of input. The second method proposes a DGN that constrains the Dirichlet energy at each layer and leverages initial residual connections, while the latter tackles over-smoothing by aggregating random paths
over the graph nodes. Thus, the novelty of our method is still preserved since A-DGN defines a map between DGNs and stable and non-dissipative graph ODEs to preserve long-range dependencies between nodes.
\paragraph{Continuous Dynamic Models}
\citet{NeuralODE} introduce NeuralODE, a new family of neural network models that parametrizes the continuous dynamics of recurrent neural networks using ordinary differential equations. Similarly,
\cite{AntisymmetricRNN} and \cite{EulerSN} draw a connection between ODEs and, respectively, RNNs and Reservoir Computing architectures. Both methods focus on the stability of the solution and the employed numerical method.
Inspired by the NeuralODE approach, \citet{GDE} develops a DGN defined as a continuum of layers. In such a work, the authors focus on building the connection between ODEs and DGNs. We extend their work to include stability and non-dissipation
, which are fundamental properties to preserve long-term dependencies between nodes and prevent gradient explosion or vanishing during training. Thus, by relaxing these two properties from our framework, the work by \citet{GDE} becomes a specific instance of A-DGN. \citet{grand} propose GRAND an architecture to learn graph diffusion as a partial derivative equation (PDE). Differently from GRAND, our framework designs an architecture that is theoretically non-dissipative and free from gradient vanishing or explosion.
DGC~\citep{DGC} and SGC~\citep{SGC} propose linear models that propagate node information as the discretization of the graph heat equation, $\partial \mathbf{X}(t)/\partial t = -\mathbf{L}\mathbf{X}(t)$, without learning. Specifically, DGC mainly focus on exploring the influence of the step size ${\epsilon}ilon$ in the Euler discretization method.
\citet{pde-gcn} and \cite{graphcon} present two methods to preserve the energy of the system, i.e., they mitigate over-smoothing, instead of preserving long-range information between nodes. Differently from our method, which employs a first-order ODE, the former leverages the conservative mapping defined by hyperbolic PDEs, while the latter is defined as second-order ODEs that preserve the Dirichlet energy. In general, this testifies that non-dissipation in graph ODEs is an important property to pursue, not only when preserving long-range dependencies. However, to the best of our knowledge, we are the first to propose a non-dissipative graph ODE to effectively propagate the information on the graph structure.
\section{Experiments}
\label{sec:experiments}
In this section, we discuss the empirical assessment of our method. Specifically, we show the efficacy of preserving long-range information between nodes and mitigating the over-squashing by evaluating our framework on graph property prediction tasks where we predict single source shortest path, node eccentricity, and graph diameter (see Section 4.1). With the same purpose, we report the experiments on the TreeNeighboursMatch problem~\citep{bottleneck} in Appendix~{\textnormal{e}}f{exp_treeneighmatch}.
Moreover, we assess the performance of the proposed A-DGN approach on classical graph homophilic (see Section 4.2) and heterophilic (see Appendix~{\textnormal{e}}f{exp_hetero}) benchmarks. The performance of A-DGN is assessed against DGN variants from the literature.
We carried the experiments on a Dell server with 4 Nvidia GPUs A100. We release openly the code implementing our methodology and reproducing our empirical analysis at \url{https://github.com/gravins/Anti-SymmetricDGN}.
\subsection{Graph property prediction}
\paragraph{Setup}
For the graph property prediction task, we considered three datasets extracted from the work of \citet{PNA}. The analysis consists of classical graph theory tasks on undirected unweighted randomly generated graphs sampled from a wide variety of distributions. Specifically, we considered two node level tasks and one graph level task, which are single source shortest path (SSSP), node eccentricity, and graph diameter. Such tasks require capturing long-term dependencies in order to be solved, thus mitigating the over-squashing phenomenon. Indeed, in the SSSP task, we are computing the shortest paths between a given node $u$ and all other nodes in the graph. Thus, it is fundamental to propagate not only the information of the direct neighborhood of $u$, but also the information of nodes which are extremely far from it. Similarly, for diameter and eccentricity.
We employed the same seed and generator as \citet{PNA} to generate the datasets, but we considered graphs with 25 to 35 nodes, instead of 15-25 nodes as in the original work
, to increase the task complexity and lengthen long-range dependencies required to solve the task. As in the original work, we used 5120 graphs as training set, 640 as validation set, and 1280 as test set.
We explored the performance of two versions of A-DGN, i.e., weight sharing and layer dependent weights. Moreover, we employed two instances of our method leveraging the two aggregation functions in Equation {\textnormal{e}}f{eq:simple_aggregation} and {\textnormal{e}}f{eq:gcn_aggregation}. We will refer to the former as simple aggregation and to the latter as GCN-based aggregation. We compared our method with GCNII, two neuralODE-based models, i.e., GRAND and DGC, and the four most popular DGNs, i.e., GCN, GAT, GraphSAGE, and GIN.
We designed each model as a combination of three main components. The first is the encoder which maps the node input features into a latent hidden space; the second is the graph convolution (i.e., A-DGN or the DGN baseline); and the third is a readout that maps the output of the convolution into the output space. The encoder and the readout are Multi-Layer Perceptrons that share the same architecture among all models in the experiments.
We performed hyper-parameter tuning via grid search, optimizing the Mean Square Error (MSE). We trained the models using Adam optimizer for a maximum of 1500 epochs and early stopping with patience of 100 epochs on the validation error. For each model configuration, we performed 4 training runs with different weight initialization and report the average of the results.
We report in Appendix~{\textnormal{e}}f{grids} the grid of hyper-parameters exploited for this experiment. We observe that even if we do not directly explore in the hyper-parameter space the terminal time $T$ in which the node evolution produces the best embeddings, that is done indirectly by fixing the values of the step size ${\epsilon}ilon$ and the maximum number of layers $L$, since $T=L{\epsilon}ilon$.
\paragraph{Results}
We present the results on the graph property prediction in Table~{\textnormal{e}}f{tab:results_GraphProp}. Specifically, we report $log_{10}(\mathrm{MSE})$ as the evaluation metric. We observe that our method, A-DGN, outperforms all the DGNs employed in this experiment. Indeed, by employing GCN-based aggregation, we achieve an error score that is on average
0.80 points better than the selected baselines. Notably, if we leverage the simple aggregation scheme, A-DGN increases the performance gap from the baselines. A-DGN with simple aggregation
shows a decisive improvement with respect to baselines. Specifically, it achieves a performance that is 200\% to 300\% better than the best baseline in each task. Moreover, it is on average $3.6\times$ faster that the baselines (see Table~{\textnormal{e}}f{tab:results_GraphProp_time} in Appendix~{\textnormal{e}}f{complete_results}).
We observe that the main challenge when predicting diameter, eccentricity, or SSSP is to leverage not only local information but also global graph information. Such knowledge can only be learned by exploring long-range dependencies. Indeed, the three tasks are extremely correlated. All of them require to compute shortest paths in the graph. Thus, as for standard algorithmic solutions (e.g., Bellman–Ford~\citep{Bellman}, Dijkstra's algorithm~\citep{Dijkstra}), more messages between nodes need to be exchanged in order to achieve accurate solutions. This suggests that A-DGN can better capture and exploit such information. Moreover, this indicates also that the simple aggregator is more effective than the GCN-based because the tasks are mainly based on counting distances. Thus, exploiting the information derived from the Laplacian operator is not helpful for solving this kind of algorithmic tasks.
\input{tables/results_GraphProp}
\subsection{Graph benchmarks}\label{graph_benchmark}
\paragraph{Setup}
In the graph benchmark setting we consider five well-known graph datasets for node classification, i.e., PubMed~\citep{pubmed}; coauthor graphs CS and Physics; and the Amazon co-purchasing graphs Computer and Photo from \citet{pitfalls}. Also for this class of experiments, we considered the same baselines and architectural choices as for the graph property prediction task. However, in this experiment we study only the version of A-DGN with weight sharing, since it achieve good performances with low training costs.
Within the aim to accurately assess the generalization performance of the models, we randomly split the datasets into multiple train/validation/test sets. Similarly to \citet{pitfalls}, we use 20 labeled nodes per class as the training set, 30 nodes per class as the validation set, and the rest as the test set. We generate 5 random splits per dataset and 5 random weight initialization for each configuration in each split.
We perform hyper-parameter tuning via grid search, optimizing the accuracy score. We train for a maximum of 10000 epochs to minimize the Cross-Entropy loss. We use an early stopping criterion that stops the training if the validation score does not improve for 50 epochs. We report in Appendix~{\textnormal{e}}f{grids} the grid of hyper-parameters explored for this experiment.
\paragraph{Results}
We present the results on the graph benchmark in Table~{\textnormal{e}}f{tab:results}. Specifically, we report the accuracy as the evaluation metric. Even in this scenario, A-DGN outperforms the selected baselines,
except in PubMed and Amazon Computers where GCNII is slightly better than our method.
In this benchmark, results that the GCN-based aggregation produces higher scores with respect to the simple aggregation. Thus, additional local neighborhood features extracted from the graph Laplacian seem to strengthen the final predictions.
It appears also that there is less benefit from including global information with respect to the graph property prediction scenario. As a result, exploiting extremely long-range dependencies do not strongly improve the performance as the number of layers increases.
To demonstrate that our approach performs well with many layers, we show in Figure~{\textnormal{e}}f{fig:all_comparison} how the number of layers affects the accuracy score. Our model maintains or improves the performance as the number of layers increases. On the other hand, all the baselines obtain good scores only with one to two layers and most of them exhibit a strong performance degradation as the number of layers increases.
Indeed, in the Coauthor CS dataset we obtain that GraphSAGE, GAT, GCN and GIN lose 24.5\% to 78.2\% of accuracy. We observe that DGC does not degrade its performance since the convolution do not contain parameters.
Although extreme long-range dependencies do not produce the same boost as in the graph property prediction scenario, including more than 5-hop neighborhoods is fundamental to improve state-of-the-art performances. As clear from Figure~{\textnormal{e}}f{fig:all_comparison}, this is not practical when standard DGNs are employed. On the other hand, A-DGN demonstrates that can capture and exploit such information without any performance drop.
\input{tables/results}
\begin{figure}
\caption{The
test accuracy with respect to the number of layers on all the graph benchmark datasets. From the top left to the bottom, we show: PubMed, Coauthor CS, Coauthor Physics, Amazon Computers, and Amazon Photo. The accuracy is averaged over 5 random train/validation/test splits and 5 random weight initialization of the best configuration per split.}
\label{fig:all_comparison}
\end{figure}
\section{Conclusion}
In this paper, we have presented \textit{Anti-Symmetric Deep Graph Network} (A-DGN), a new framework for DGNs
achieved from the study of ordinary differential equation (ODE) representing a continuous process of information diffusion on graphs.
Unlike previous approaches, by imposing stability and conservative constraints to the ODE through the use of anti-symmetric weight matrices, the proposed framework is able to learn and preserve long-range dependencies between nodes.
The A-DGN layer formulation is the result of the forward Euler discretization of the corresponding Cauchy problem on graphs.
We theoretically prove that the differential equation corresponding to A-DGN is stable as well as non-dissipative.
Consequently, typical problems of systems with unstable and lossy dynamics, e.g., no gradient explosion or vanishing, do not occur. Thanks to its formulation, A-DGN can be used to reinterpret and extend any classical DGN as a non-dissipative and stable
ODEs.
Our experimental analysis, on the one hand, shows that when capturing long-range dependencies is important for the task, our framework largely outperform standard DGNs. On the other hand, it indicates the general competitiveness of A-DGN on several graph benchmarks.
Overall, A-DGN shows the ability to effectively explore long-range dependencies and leverage dozens of layers without any noticeable drop in performance.
For such reasons, we believe it can be a step towards the mitigation of the over-squashing problem in DGNs.
The results of our experimental analysis and the comparison with state-of-the-art methods suggest that the presented approach can be a starting point to mitigate also over-smoothing. Thus, we plan to accurately explore the consequences of our approach on over-smoothing as a future research direction.
Looking ahead to other future developments, we plan to extend the analysis presented in this paper to study DGN architectures resulting from alternative discretization methods of the underlying graph ODE, e.g., using adaptive multi-step schemes \citep{stableEuler2}.
Other future lines of investigation include extending the framework to dynamical graph structures, and evaluating its impact in the area of Reservoir Computing \citep{tanaka2019recent}.
\subsubsection*{Acknowledgments}
This research was partially supported by EMERGE, a project funded by EU Horizon research and innovation programme under GA No 101070918.
\begin{thebibliography}{43}
\providecommand{\natexlab}[1]{#1}
\providecommand{\url}[1]{\texttt{#1}}
\expandafter\ifx\csname urlstyle\endcsname{\textnormal{e}}lax
\providecommand{\doi}[1]{doi: #1}\else
\providecommand{\doi}{doi: \begingroup \urlstyle{rm}\Url}\fi
\bibitem[Alon \& Yahav(2021)Alon and Yahav]{bottleneck}
Uri Alon and Eran Yahav.
\marginpar{NEW}block On the bottleneck of graph neural networks and its practical
implications.
\marginpar{NEW}block In \tilde{p}h{International Conference on Learning Representations}, 2021.
\marginpar{NEW}block URL \url{https://openreview.net/forum?id=i80OPhOCVH2}.
\bibitem[Ascher et~al.(1995)Ascher, Mattheij, and Russell]{stabilityODE}
U.M. Ascher, R.M.M. Mattheij, and R.D. Russell.
\marginpar{NEW}block \tilde{p}h{Numerical solution of boundary value problems for ordinary
differential equations}.
\marginpar{NEW}block Classics in applied mathematics. Society for Industrial and Applied
Mathematics (SIAM), United States, unabridged, corr. republication. edition,
1995.
\marginpar{NEW}block ISBN 0-89871-354-4.
\bibitem[Ascher \& Petzold(1998)Ascher and Petzold]{stableEuler2}
Uri~M. Ascher and Linda~R. Petzold.
\marginpar{NEW}block \tilde{p}h{Computer Methods for Ordinary Differential Equations and
Differential-Algebraic Equations}.
\marginpar{NEW}block Society for Industrial and Applied Mathematics, USA, 1st edition,
1998.
\marginpar{NEW}block ISBN 0898714125.
\bibitem[Bacciu et~al.(2020)Bacciu, Errica, Micheli, and Podda]{BACCIU2020203}
Davide Bacciu, Federico Errica, Alessio Micheli, and Marco Podda.
\marginpar{NEW}block A gentle introduction to deep learning for graphs.
\marginpar{NEW}block \tilde{p}h{Neural Networks}, 129:\penalty0 203--221, 2020.
\marginpar{NEW}block ISSN 0893-6080.
\marginpar{NEW}block \doi{https://doi.org/10.1016/j.neunet.2020.06.006}.
\marginpar{NEW}block URL
\url{https://www.sciencedirect.com/science/article/pii/S0893608020302197}.
\bibitem[Battaglia et~al.(2016)Battaglia, Pascanu, Lai, Jimenez~Rezende,
et~al.]{battaglia2016interaction}
Peter Battaglia, Razvan Pascanu, Matthew Lai, Danilo Jimenez~Rezende, et~al.
\marginpar{NEW}block Interaction networks for learning about objects, relations and
physics.
\marginpar{NEW}block \tilde{p}h{Advances in neural information processing systems}, 29, 2016.
\bibitem[Bauer \& Fike(1960)Bauer and Fike]{bauer1960norms}
Friedrich~L Bauer and Charles~T Fike.
\marginpar{NEW}block Norms and exclusion theorems.
\marginpar{NEW}block \tilde{p}h{Numerische Mathematik}, 2\penalty0 (1):\penalty0 137--141,
1960.
\bibitem[Bellman(1958)]{Bellman}
Richard Bellman.
\marginpar{NEW}block On a routing problem.
\marginpar{NEW}block \tilde{p}h{Quarterly of applied mathematics}, 16\penalty0 (1):\penalty0
87--90, 1958.
\bibitem[Chamberlain et~al.(2021)Chamberlain, Rowbottom, Gorinova, Bronstein,
Webb, and Rossi]{grand}
Ben Chamberlain, James Rowbottom, Maria~I Gorinova, Michael Bronstein, Stefan
Webb, and Emanuele Rossi.
\marginpar{NEW}block Grand: Graph neural diffusion.
\marginpar{NEW}block In \tilde{p}h{International Conference on Machine Learning}, pp.\
1407--1418. PMLR, 2021.
\bibitem[Chang et~al.(2018)Chang, Meng, Haber, Ruthotto, Begert, and
Holtham]{chang2018reversible}
Bo~Chang, Lili Meng, Eldad Haber, Lars Ruthotto, David Begert, and Elliot
Holtham.
\marginpar{NEW}block Reversible architectures for arbitrarily deep residual neural
networks.
\marginpar{NEW}block In \tilde{p}h{Proceedings of the AAAI conference on artificial
intelligence}, volume~32, 2018.
\bibitem[Chang et~al.(2019)Chang, Chen, Haber, and Chi]{AntisymmetricRNN}
Bo~Chang, Minmin Chen, Eldad Haber, and Ed~H. Chi.
\marginpar{NEW}block Antisymmetric{RNN}: A dynamical system view on recurrent neural
networks.
\marginpar{NEW}block In \tilde{p}h{International Conference on Learning Representations}, 2019.
\marginpar{NEW}block URL \url{https://openreview.net/forum?id=ryxepo0cFX}.
\bibitem[Chen et~al.(2018)Chen, Rubanova, Bettencourt, and Duvenaud]{NeuralODE}
Ricky T.~Q. Chen, Yulia Rubanova, Jesse Bettencourt, and David~K Duvenaud.
\marginpar{NEW}block Neural ordinary differential equations.
\marginpar{NEW}block In S.~Bengio, H.~Wallach, H.~Larochelle, K.~Grauman, N.~Cesa-Bianchi,
and R.~Garnett (eds.), \tilde{p}h{Advances in Neural Information Processing
Systems}, volume~31. Curran Associates, Inc., 2018.
\marginpar{NEW}block URL
\url{https://proceedings.neurips.cc/paper/2018/file/69386f6bb1dfed68692a24c8686939b9-Paper.pdf}.
\bibitem[Corso et~al.(2020)Corso, Cavalleri, Beaini, Li\`{o}, and
Veli\v{c}kovi\'{c}]{PNA}
Gabriele Corso, Luca Cavalleri, Dominique Beaini, Pietro Li\`{o}, and Petar
Veli\v{c}kovi\'{c}.
\marginpar{NEW}block Principal neighbourhood aggregation for graph nets.
\marginpar{NEW}block In \tilde{p}h{Advances in Neural Information Processing Systems}, 2020.
\bibitem[Defferrard et~al.(2016)Defferrard, Bresson, and
Vandergheynst]{chebnet}
Micha\"{e}l Defferrard, Xavier Bresson, and Pierre Vandergheynst.
\marginpar{NEW}block Convolutional neural networks on graphs with fast localized spectral
filtering.
\marginpar{NEW}block NIPS'16, pp.\ 3844–3852, Red Hook, NY, USA, 2016. Curran
Associates Inc.
\marginpar{NEW}block ISBN 9781510838819.
\bibitem[Derrow-Pinion et~al.(2021)Derrow-Pinion, She, Wong, Lange, Hester,
Perez, Nunkesser, Lee, Guo, Wiltshire, Battaglia, Gupta, Li, Xu,
Sanchez-Gonzalez, Li, and Velickovic]{google_maps}
Austin Derrow-Pinion, Jennifer She, David Wong, Oliver Lange, Todd Hester, Luis
Perez, Marc Nunkesser, Seongjae Lee, Xueying Guo, Brett Wiltshire, Peter~W.
Battaglia, Vishal Gupta, Ang Li, Zhongwen Xu, Alvaro Sanchez-Gonzalez, Yujia
Li, and Petar Velickovic.
\marginpar{NEW}block Eta prediction with graph neural networks in google maps.
\marginpar{NEW}block In \tilde{p}h{Proceedings of the 30th ACM International Conference on
Information \& Knowledge Management}, CIKM '21, pp.\ 3767–3776, New York,
NY, USA, 2021. Association for Computing Machinery.
\marginpar{NEW}block ISBN 9781450384469.
\marginpar{NEW}block \doi{10.1145/3459637.3481916}.
\marginpar{NEW}block URL \url{https://doi.org/10.1145/3459637.3481916}.
\bibitem[Dijkstra(1959)]{Dijkstra}
Edsger.~W. Dijkstra.
\marginpar{NEW}block A note on two problems in connexion with graphs.
\marginpar{NEW}block \tilde{p}h{Numerische Mathematik}, 1:\penalty0 269--271, 1959.
\bibitem[Eliasof et~al.(2021)Eliasof, Haber, and Treister]{pde-gcn}
Moshe Eliasof, Eldad Haber, and Eran Treister.
\marginpar{NEW}block {PDE}-{GCN}: Novel architectures for graph neural networks motivated
by partial differential equations.
\marginpar{NEW}block In A.~Beygelzimer, Y.~Dauphin, P.~Liang, and J.~Wortman Vaughan
(eds.), \tilde{p}h{Advances in Neural Information Processing Systems}, 2021.
\marginpar{NEW}block URL \url{https://openreview.net/forum?id=wWtk6GxJB2x}.
\bibitem[Eliasof et~al.(2022)Eliasof, Haber, and Treister]{pathGCN}
Moshe Eliasof, Eldad Haber, and Eran Treister.
\marginpar{NEW}block path{GCN}: Learning general graph spatial operators from paths.
\marginpar{NEW}block In \tilde{p}h{ICML}, 2022.
\bibitem[Gallicchio(2022)]{EulerSN}
Claudio Gallicchio.
\marginpar{NEW}block Euler state networks.
\marginpar{NEW}block \tilde{p}h{arXiv preprint arXiv:2203.09382}, 2022.
\bibitem[Gilmer et~al.(2017)Gilmer, Schoenholz, Riley, Vinyals, and Dahl]{MPNN}
Justin Gilmer, Samuel~S. Schoenholz, Patrick~F. Riley, Oriol Vinyals, and
George~E. Dahl.
\marginpar{NEW}block Neural message passing for quantum chemistry.
\marginpar{NEW}block In \tilde{p}h{Proceedings of the 34th International Conference on Machine
Learning - Volume 70}, ICML'17, pp.\ 1263–1272. JMLR.org, 2017.
\bibitem[Glendinning(1994)]{glendinning_1994}
Paul Glendinning.
\marginpar{NEW}block \tilde{p}h{Stability, Instability and Chaos: An Introduction to the Theory
of Nonlinear Differential Equations}.
\marginpar{NEW}block Cambridge Texts in Applied Mathematics. Cambridge University Press,
1994.
\marginpar{NEW}block \doi{10.1017/CBO9780511626296}.
\bibitem[Haber \& Ruthotto(2017)Haber and Ruthotto]{StableArchitecture}
Eldad Haber and Lars Ruthotto.
\marginpar{NEW}block Stable architectures for deep neural networks.
\marginpar{NEW}block \tilde{p}h{CoRR}, abs/1705.03341, 2017.
\marginpar{NEW}block URL \url{http://arxiv.org/abs/1705.03341}.
\bibitem[Hamilton et~al.(2017)Hamilton, Ying, and Leskovec]{SAGE}
William~L. Hamilton, Rex Ying, and Jure Leskovec.
\marginpar{NEW}block Inductive representation learning on large graphs.
\marginpar{NEW}block In \tilde{p}h{NIPS}, 2017.
\bibitem[Hu et~al.(2020)Hu, Liu, Gomes, Zitnik, Liang, Pande, and
Leskovec]{gine}
Weihua Hu, Bowen Liu, Joseph Gomes, Marinka Zitnik, Percy Liang, Vijay Pande,
and Jure Leskovec.
\marginpar{NEW}block Strategies for pre-training graph neural networks.
\marginpar{NEW}block In \tilde{p}h{International Conference on Learning Representations}, 2020.
\marginpar{NEW}block URL \url{https://openreview.net/forum?id=HJlWWJSFDH}.
\bibitem[Humphries \& Stuart(1994)Humphries and Stuart]{dissipative}
A.~R. Humphries and A.~M. Stuart.
\marginpar{NEW}block Runge-kutta methods for dissipative and gradient dynamical systems.
\marginpar{NEW}block \tilde{p}h{SIAM J. Numer. Anal.}, 31\penalty0 (5):\penalty0 1452–1485,
oct 1994.
\marginpar{NEW}block ISSN 0036-1429.
\marginpar{NEW}block \doi{10.1137/0731075}.
\marginpar{NEW}block URL \url{https://doi.org/10.1137/0731075}.
\bibitem[Kipf \& Welling(2017)Kipf and Welling]{GCN}
Thomas~N. Kipf and Max Welling.
\marginpar{NEW}block Semi-supervised classification with graph convolutional networks.
\marginpar{NEW}block In \tilde{p}h{International Conference on Learning Representations
(ICLR)}, 2017.
\bibitem[Ming~Chen et~al.(2020)Ming~Chen, Zengfeng~Huang, and Li]{gcnii}
Zhewei~Wei Ming~Chen, Bolin~Ding Zengfeng~Huang, and Yaliang Li.
\marginpar{NEW}block Simple and deep graph convolutional networks.
\marginpar{NEW}block 2020.
\bibitem[Monti et~al.(2019)Monti, Frasca, Eynard, Mannion, and
Bronstein]{social_network}
Federico Monti, Fabrizio Frasca, Davide Eynard, Damon Mannion, and Michael~M.
Bronstein.
\marginpar{NEW}block Fake news detection on social media using geometric deep learning.
\marginpar{NEW}block \tilde{p}h{CoRR}, abs/1902.06673, 2019.
\marginpar{NEW}block URL \url{http://arxiv.org/abs/1902.06673}.
\bibitem[Morris et~al.(2019)Morris, Ritzert, Fey, Hamilton, Lenssen, Rattan,
and Grohe]{graphconv}
Christopher Morris, Martin Ritzert, Matthias Fey, William~L. Hamilton, Jan~Eric
Lenssen, Gaurav Rattan, and Martin Grohe.
\marginpar{NEW}block Weisfeiler and leman go neural: Higher-order graph neural networks.
\marginpar{NEW}block In \tilde{p}h{Proceedings of the Thirty-Third AAAI Conference on
Artificial Intelligence and Thirty-First Innovative Applications of
Artificial Intelligence Conference and Ninth AAAI Symposium on Educational
Advances in Artificial Intelligence}, AAAI'19/IAAI'19/EAAI'19. AAAI Press,
2019.
\marginpar{NEW}block ISBN 978-1-57735-809-1.
\marginpar{NEW}block \doi{10.1609/aaai.v33i01.33014602}.
\marginpar{NEW}block URL \url{https://doi.org/10.1609/aaai.v33i01.33014602}.
\bibitem[Namata et~al.(2012)Namata, London, Getoor, and Huang]{pubmed}
Galileo~Mark Namata, Ben London, Lise Getoor, and Bert Huang.
\marginpar{NEW}block Query-driven active surveying for collective classification.
\marginpar{NEW}block In \tilde{p}h{Workshop on Mining and Learning with Graphs}, 2012.
\marginpar{NEW}block URL
\url{http://linqs.cs.umd.edu/basilic/web/Publications/2012/namata:mlg12-wkshp/namata-mlg12.pdf}.
\bibitem[Pei et~al.(2020)Pei, Wei, Chang, Lei, and Yang]{geom-gcn}
Hongbin Pei, Bingzhe Wei, Kevin Chen-Chuan Chang, Yu~Lei, and Bo~Yang.
\marginpar{NEW}block Geom-gcn: Geometric graph convolutional networks.
\marginpar{NEW}block In \tilde{p}h{International Conference on Learning Representations}, 2020.
\marginpar{NEW}block URL \url{https://openreview.net/forum?id=S1e2agrFvS}.
\bibitem[Poli et~al.(2019)Poli, Massaroli, Park, Yamashita, Asama, and
Park]{GDE}
Michael Poli, Stefano Massaroli, Junyoung Park, Atsushi Yamashita, Hajime
Asama, and Jinkyoo Park.
\marginpar{NEW}block Graph neural ordinary differential equations.
\marginpar{NEW}block \tilde{p}h{arXiv preprint arXiv:1911.07532}, 2019.
\bibitem[Rusch et~al.(2022)Rusch, Chamberlain, Rowbottom, Mishra, and
Bronstein]{graphcon}
T~Konstantin Rusch, Benjamin~P Chamberlain, James Rowbottom, Siddhartha Mishra,
and Michael~M Bronstein.
\marginpar{NEW}block Graph-coupled oscillator networks.
\marginpar{NEW}block \tilde{p}h{arXiv preprint arXiv:2202.02296}, 2022.
\bibitem[Shchur et~al.(2018)Shchur, Mumme, Bojchevski, and
G{\"{u}}nnemann]{pitfalls}
Oleksandr Shchur, Maximilian Mumme, Aleksandar Bojchevski, and Stephan
G{\"{u}}nnemann.
\marginpar{NEW}block Pitfalls of graph neural network evaluation.
\marginpar{NEW}block \tilde{p}h{CoRR}, abs/1811.05868, 2018.
\marginpar{NEW}block URL \url{http://arxiv.org/abs/1811.05868}.
\bibitem[Tanaka et~al.(2019)Tanaka, Yamane, H{\'e}roux, Nakane, Kanazawa,
Takeda, Numata, Nakano, and Hirose]{tanaka2019recent}
Gouhei Tanaka, Toshiyuki Yamane, Jean~Benoit H{\'e}roux, Ryosho Nakane, Naoki
Kanazawa, Seiji Takeda, Hidetoshi Numata, Daiju Nakano, and Akira Hirose.
\marginpar{NEW}block Recent advances in physical reservoir computing: A review.
\marginpar{NEW}block \tilde{p}h{Neural Networks}, 115:\penalty0 100--123, 2019.
\bibitem[Topping et~al.(2022)Topping, Giovanni, Chamberlain, Dong, and
Bronstein]{graph-rewiring}
Jake Topping, Francesco~Di Giovanni, Benjamin~Paul Chamberlain, Xiaowen Dong,
and Michael~M. Bronstein.
\marginpar{NEW}block Understanding over-squashing and bottlenecks on graphs via curvature.
\marginpar{NEW}block In \tilde{p}h{International Conference on Learning Representations}, 2022.
\marginpar{NEW}block URL \url{https://openreview.net/forum?id=7UmjRGzp-A}.
\bibitem[Veli{\v{c}}kovi{\'{c}} et~al.(2018)Veli{\v{c}}kovi{\'{c}}, Cucurull,
Casanova, Romero, Li{\`{o}}, and Bengio]{GAT}
Petar Veli{\v{c}}kovi{\'{c}}, Guillem Cucurull, Arantxa Casanova, Adriana
Romero, Pietro Li{\`{o}}, and Yoshua Bengio.
\marginpar{NEW}block {Graph Attention Networks}.
\marginpar{NEW}block \tilde{p}h{International Conference on Learning Representations}, 2018.
\marginpar{NEW}block URL \url{https://openreview.net/forum?id=rJXMpikCZ}.
\marginpar{NEW}block accepted as poster.
\bibitem[Wang et~al.(2021)Wang, Wang, Yang, and Lin]{DGC}
Yifei Wang, Yisen Wang, Jiansheng Yang, and Zhouchen Lin.
\marginpar{NEW}block Dissecting the diffusion process in linear graph convolutional
networks.
\marginpar{NEW}block \tilde{p}h{CoRR}, abs/2102.10739, 2021.
\marginpar{NEW}block URL \url{https://arxiv.org/abs/2102.10739}.
\bibitem[Wu et~al.(2019)Wu, Souza, Zhang, Fifty, Yu, and Weinberger]{SGC}
Felix Wu, Amauri Souza, Tianyi Zhang, Christopher Fifty, Tao Yu, and Kilian
Weinberger.
\marginpar{NEW}block Simplifying graph convolutional networks.
\marginpar{NEW}block In Kamalika Chaudhuri and Ruslan Salakhutdinov (eds.),
\tilde{p}h{Proceedings of the 36th International Conference on Machine Learning},
volume~97 of \tilde{p}h{Proceedings of Machine Learning Research}, pp.\
6861--6871. PMLR, 09--15 Jun 2019.
\marginpar{NEW}block URL \url{https://proceedings.mlr.press/v97/wu19e.html}.
\bibitem[Wu et~al.(2021)Wu, Pan, Chen, Long, Zhang, and Yu]{GNNsurvey}
Zonghan Wu, Shirui Pan, Fengwen Chen, Guodong Long, Chengqi Zhang, and
Philip~S. Yu.
\marginpar{NEW}block A comprehensive survey on graph neural networks.
\marginpar{NEW}block \tilde{p}h{IEEE Transactions on Neural Networks and Learning Systems},
32\penalty0 (1):\penalty0 4--24, 2021.
\marginpar{NEW}block \doi{10.1109/TNNLS.2020.2978386}.
\bibitem[Xu et~al.(2019)Xu, Hu, Leskovec, and Jegelka]{GIN}
Keyulu Xu, Weihua Hu, Jure Leskovec, and Stefanie Jegelka.
\marginpar{NEW}block How powerful are graph neural networks?
\marginpar{NEW}block In \tilde{p}h{7th International Conference on Learning Representations,
{ICLR} 2019, New Orleans, LA, USA, May 6-9, 2019}. OpenReview.net, 2019.
\marginpar{NEW}block URL \url{https://openreview.net/forum?id=ryGs6iA5Km}.
\bibitem[Yan et~al.(2021)Yan, Hashemi, Swersky, Yang, and
Koutra]{heterophily_results2}
Yujun Yan, Milad Hashemi, Kevin Swersky, Yaoqing Yang, and Danai Koutra.
\marginpar{NEW}block Two sides of the same coin: Heterophily and oversmoothing in graph
convolutional neural networks.
\marginpar{NEW}block \tilde{p}h{arXiv preprint arXiv:2102.06462}, 2021.
\bibitem[Zhou et~al.(2021)Zhou, Huang, Zha, Chen, Li, Choi, and Hu]{egnn}
Kaixiong Zhou, Xiao Huang, Daochen Zha, Rui Chen, Li~Li, Soo-Hyun Choi, and Xia
Hu.
\marginpar{NEW}block Dirichlet energy constrained learning for deep graph neural networks.
\marginpar{NEW}block \tilde{p}h{Advances in neural information processing systems}, 2021.
\bibitem[Zitnik et~al.(2018)Zitnik, Agrawal, and Leskovec]{bioinformatics}
Marinka Zitnik, Monica Agrawal, and Jure Leskovec.
\marginpar{NEW}block {Modeling polypharmacy side effects with graph convolutional
networks}.
\marginpar{NEW}block \tilde{p}h{Bioinformatics}, 34\penalty0 (13):\penalty0 i457--i466, 06
2018.
\marginpar{NEW}block ISSN 1367-4803.
\marginpar{NEW}block \doi{10.1093/bioinformatics/bty294}.
\marginpar{NEW}block URL \url{https://doi.org/10.1093/bioinformatics/bty294}.
\end{thebibliography}
\appendix
\section{Stability and dissipativity definitions}\label{definitions}
In the following, we define the condition in which the solution of the Cauchy problem in Equation~{\textnormal{e}}f{eq:simple_ode_dgn} is stable.
\begin{definition}\label{def:stable}
A solution $\mathbf{x}_u(t)$ of the ODE in Equation~{\textnormal{e}}f{eq:simple_ode_dgn}, with initial condition $\mathbf{x}_u(0)$, is stable if for any $\omega > 0$, there exists a $\delta > 0$ such that any other solution $\mathbf{\tilde{x}}_u(t)$ of the ODE with initial condition $\mathbf{\tilde{x}}_u(0)$ satisfying $|\mathbf{x}_u(0) -\mathbf{\tilde{x}}_u(0)| \leq \delta$ also satisfies $|\mathbf{x}_u(t) -\mathbf{\tilde{x}}_u(t)| \leq \omega$, for all $t\ge0$.
\end{definition}
We now provide a definition of a dissipative system
based on the one provided in \cite{dissipative}.
\begin{definition}\label{def:dissipative}
Let define $E\subseteq \mathbb{R}^{d}$ a bounded set that contains any initial condition $\mathbf{x}_u(0)$ for the ODE in Equation~{\textnormal{e}}f{eq:simple_ode_dgn}. The system defined by the ODE in Equation~{\textnormal{e}}f{eq:simple_ode_dgn} is dissipative if there is a bounded set $B$ where, for any $E$, exists $t^*\geq 0$ such that $\left\{\mathbf{x}_u(t) \mid \mathbf{x}_u(0) \in E{\textnormal{i}}ght\} \subseteq B$ for $t> t^*$.
\end{definition}
\section{Proof of Proposition~{\textnormal{e}}f{prep:stableCondition}}\label{proof_gradient}
Let us consider the ODE defined in Equation~{\textnormal{e}}f{eq:simple_ode_dgn} and analyze the sensitivity of its solution to the initial conditions.
Following \citep{AntisymmetricRNN}, we differentiate
both sides of Equation~{\textnormal{e}}f{eq:simple_ode_dgn}
with respect to $\mathbf{x}_u(0)$,
obtaining:
\begin{equation}
\label{eq:appendix1}
\frac{d}{dt}\left(\frac{\partial \mathbf{x}_u(t)}{\partial \mathbf{x}_u(0)}{\textnormal{i}}ght) = \mathbf{J}(t) \frac{\partial \mathbf{x}_u(t)}{\partial\mathbf{x}_u(0)}.
\end{equation}
Assuming the Jacobian does not change significantly over time,
we can apply results from autonomous differential equations \citep{glendinning_1994} and solve Equation~{\textnormal{e}}f{eq:appendix1} analytically as follows:
\begin{equation}
\frac{\partial \mathbf{x}_u(t)}{\partial \mathbf{x}_u(0)} = e^{t \mathbf{J}} = \mathbf{T} e^{t \mathbf{\Lambda}}\mathbf{T}^{-1}
= \mathbf{T}
\big(\sum_{k=0}^\infty \frac{(t \mathbf{\Lambda})^k}{k!}\big)
\mathbf{T}^{-1},
\end{equation}
where $\mathbf{\Lambda}$ is the diagonal matrix whose non-zero entries contain the eigenvalues of $\mathbf{J}$, and $\mathbf{T}$ has the eigenvectors of $\mathbf{J}$ as columns.
The qualitative behavior of $\partial \mathbf{x}_u(t)/\partial \mathbf{x}_u(0)$ is then determined by the real parts of the eigenvalues of $\mathbf{J}$.
When $\max_{i=1,...,d} Re(\lambda_i(\mathbf{J}(t))) > 0$, a small perturbation of the initial condition (i.e., a perturbation on the input graph) would cause an exponentially exploding difference in the nodes representations, and the system would be unstable.
On the contrary, for
$\max_{i=1,...,d} Re(\lambda_i(\mathbf{J}(t))) < 0$,
the term $\partial \mathbf{x}_u(t)/\partial \mathbf{x}_u(0)$ would vanish exponentially fast over time, thereby making the nodes' representation insensitive to differences in the input graph. Accordingly, the system states $\mathbf{x}_u(t)$ would asymptotically approach the same embeddings for all the possible initial conditions $\mathbf{x}_u(0)$, and the system would be dissipative.
Notice that the effects of explosion and dissipation are progressively more evident for larger absolute values of $\max_{i=1,...,d} Re(\lambda_i(\mathbf{J}(t)))$.
If $Re(\lambda_i(\mathbf{J}(t)))
= 0$ for $i=1,...,d$ then the magnitude of $\partial \mathbf{x}(t)/\partial \mathbf{x}(0)$ is constant over time, and the input graph information is effectively propagated through the successive transformations into the final nodes' representations.
In this last case, the system is hence both stable and non-dissipative.
Let us now consider a loss function $\mathcal{L}$, and observe that its sensitivity to the initial condition (i.e., the input graph) $\partial \mathcal{L}/\partial \mathbf{x}_u(0)$ is proportional to $\partial \mathbf{x}_u(t)/\partial \mathbf{x}_u(0)$.
Hence, in light of the previous considerations, if $Re(\lambda_i(\mathbf{J}(t)))
=0$ for $i=1,...,d$, then the magnitude of $\partial \mathcal{L}/\partial \mathbf{x}_u(0)$, which is the
longest gradient chain
that we can obtain during
back-propagation, stays constant over time. The backward propagation is then stable and non-dissipative, and no gradient vanishing or explosion can occur during training.
\section{Proof of Proposition~{\textnormal{e}}f{prep:jacobian}}\label{proof_jacobian}
Under the assumption that the aggregation function $\Phi(\mathbf{X}(t), \mathcal{N}_u)$ does not include a term that depends on $\mathbf{x}_u(t)$ itself (see Equation~{\textnormal{e}}f{eq:simple_aggregation}
for an example), the Jacobian matrix of Equation~{\textnormal{e}}f{eq:stableode} is given by:
\begin{equation}
\label{eq:jacobian_appendix}
\mathbf{J}(t) = \mathrm{diag}\left[\sigma' \left((\mathbf{W}-\mathbf{W}^T) \mathbf{x}_u(t) +\Phi(\mathbf{X}(t), \mathcal{N}_u) + \mathbf{b}{\textnormal{i}}ght){\textnormal{i}}ght](\mathbf{W}-\mathbf{W}^T).
\end{equation}
Following \citep{AntisymmetricRNN, chang2018reversible},
we can see the right-hand side of Equation~{\textnormal{e}}f{eq:jacobian_appendix} as the result of a matrix multiplication between an invertible diagonal matrix and an anti-symmetric matrix.
Specifically,
defining $\mathbf{A} = \mathrm{diag}\left[\sigma' \left((\mathbf{W}-\mathbf{W}^T) \mathbf{x}_u(t) +\Phi(\mathbf{X}(t), \mathcal{N}_u) + \mathbf{b}{\textnormal{i}}ght){\textnormal{i}}ght]$ and $\mathbf{B} = \mathbf{W}-\mathbf{W}^T$,
we have
$\mathbf{J}(t) = \mathbf{A}\mathbf{B}$.
Let us now consider an eigenpair of $\mathbf{A} \mathbf{B}$, where the eigenvector is denoted by $\mathbf{v}$ and the eigenvalue by $\lambda$. Then:
\begin{align}
\label{eq:eigen}
\mathbf{A}\mathbf{B}\mathbf{v} &= \lambda \mathbf{v},\notag \\
\mathbf{B}\mathbf{v} &= \lambda \mathbf{A}^{-1}\mathbf{v},\notag \\
\mathbf{v}^*\mathbf{B}\mathbf{v} &= \lambda (\mathbf{v}^*\mathbf{A}^{-1}\mathbf{v})
\end{align}
where $*$ represents the conjugate transpose.
On the right-hand side of Equation~{\textnormal{e}}f{eq:eigen}, we can notice that the $(\mathbf{v}^*\mathbf{A}^{-1}\mathbf{v})$ term is a real number.
Recalling that $\mathbf{B}^* = \mathbf{B}^T=-\mathbf{B}$ for a real anti-symmetric matrix, we can notice that
$(\mathbf{v}^*\mathbf{B}\mathbf{v})^* = \mathbf{v}^*\mathbf{B}^*\mathbf{v} = -\mathbf{v}^*\mathbf{B}\mathbf{v}$. Hence,
the $\mathbf{v}^*\mathbf{B}\mathbf{v}$ term on the left-hand side
of Equation~{\textnormal{e}}f{eq:eigen}
is an imaginary number.
Thereby, $\lambda$ needs to be purely imaginary, and, as a result, all eigenvalues of $\mathbf{J}(t)$ are purely imaginary.
\section{Bounded eigenvalues of $\mathbf{J}(t)$}
\label{Proposition2_variant}
Let us consider here the case in which $\Phi(\mathbf{X}(t), \mathcal{N}_u)$ is defined such that it depends on $\mathbf{x}_u(t)$ (see for example
Equation~{\textnormal{e}}f{eq:gcn_aggregation}).
In this case, the Jacobian matrix of Equation~{\textnormal{e}}f{eq:stableode} can be written (in a more general form than Equation~{\textnormal{e}}f{eq:jacobian_appendix}), as follows:
\begin{equation}
\label{eq:jacobian2_appendix}
\mathbf{J}(t) = \mathrm{diag}\left[\sigma' \left((\mathbf{W}-\mathbf{W}^T) \mathbf{x}_u(t) +\Phi(\mathbf{X}(t), \mathcal{N}_u) + \mathbf{b}{\textnormal{i}}ght){\textnormal{i}}ght]
\left(
(\mathbf{W}-\mathbf{W}^T)+\mathbf{C}
{\textnormal{i}}ght),
\end{equation}
where the term $\mathbf{C}$ represents the derivative of $\Phi(\mathbf{X}(t), \mathcal{N}_u)$ with respect to $\mathbf{x}_u(t)$.
For example, for the aggregation function in Equation~{\textnormal{e}}f{eq:gcn_aggregation}, we have $\mathbf{C} = \mathbf{V} / \hat{d}_u$ (instead, under the assumption of Proposition~{\textnormal{e}}f{prep:jacobian}, $\mathbf{C}$ is a zero matrix).
Similarly to Appendix~{\textnormal{e}}f{proof_jacobian}, we can see the right-hand side of Equation~{\textnormal{e}}f{eq:jacobian2_appendix} as $\mathbf{J}(t) = \mathbf{A}(\mathbf{B} + \mathbf{C}) = \mathbf{A} \mathbf{B} + \mathbf{A} \mathbf{C}$.
Thereby, we can bound the eigenvalues of $\mathbf{J}(t)$ around those of $\mathbf{A} \mathbf{B}$ by applying the results of the Bauer-Fike's theorem \citep{bauer1960norms}. Recalling that the eigenvalues of $\mathbf{A} \mathbf{B}$ are all imaginary (as proved in Appendix~{\textnormal{e}}f{proof_jacobian}), we can conclude that the eigenvalues of $\mathbf{J}(t)$ are contained in a neighborhood of the imaginary axis with radius $r = \|\mathbf{A} \mathbf{C}\| \leq \|\mathbf{C}\|$.
For example, using the definition of the aggregation function $\Phi(\mathbf{X}(t), \mathcal{N}_u)$ given in Equation~{\textnormal{e}}f{eq:gcn_aggregation}, we have $r \leq \|\mathbf{V}\|/\hat{d}_u$.
Although this result does not guarantee that the eigenvalues of the Jacobian are imaginary, in practice it crucially limits their position around the imaginary axis,
limiting the dynamics of the system on the graph to show at most moderate amplification or loss of signals over the structure.
\iffalse
This is empirically confirmed in Table~{\textnormal{e}}f{}, which reports the bounds on the $r$ values empirically found in our experiments.
\fi
\section{Datasets description and statistics}
In the graph property prediction (GPP) experiments, we employed the same generation procedure as in \citet{PNA}. Graphs are randomly sampled from several graph distributions, such as Erd\H{o}s–R\'{e}nyi, Barabasi-Albert, and grid. Each node have random identifiers as input features. Target values represent single source shortest path, node eccentricity, and graph diameter.
PubMed is a citation network where each node represents a paper and each edge indicates that one paper cites another one. Each publication in the dataset is described by a 0/1-valued word vector indicating the absence/presence of the corresponding word from the dictionary. The class labels represent the papers categories.
Amazon Computers and Amazon Photo are portions of the Amazon co-purchase graph, where nodes represent goods and edges indicate that two goods are frequently bought together. Node features are bag-of-words encoded product reviews, and class labels are given by the product category.
Coauthor CS and Coauthor Physics are co-authorship graphs extracted from the Microsoft Academic Graph\footnote{https://www.kdd.org/kdd-cup/view/kdd-cup-2016/Data} where nodes are authors, that are connected by an edge if they co-authored a paper. Node features represent paper keywords for each author’s papers, and class labels indicate most active fields of study for each author.
Table~{\textnormal{e}}f{tab:data_stats} contains the statistics of the employed datasets, sorted by graph density. The density of a graph is computed as the ratio between the number of edges and the number of possible edges, i.e., $d =\frac{|\mathcal{E}|}{|\mathcal{V}|(|\mathcal{V}|-1)}$.
\input{tables/data_stats}
\section{Explored hyper-parameter space}\label{grids}
In Table~{\textnormal{e}}f{tab:configs_GraphProp} and Table~{\textnormal{e}}f{tab:configs} we report the grids of hyper-parameters employed in our experiments by each method. We recall that the hyper-parameters ${\epsilon}ilon$ and $\gamma$ refer only to our method.
\input{tables/config_GraphProp}
\input{tables/config}
\section{Complete results}\label{complete_results}
In Table~{\textnormal{e}}f{tab:results_GraphProp_complete} we report the complete results for the graph property prediction benchmark, including all the versions and cofiguration of our A-DGN. We reported the average time per epoch (measured in seconds) for each model in the graph property prediction task in Table~{\textnormal{e}}f{tab:results_GraphProp_time}. Each model in the evaluation has 20 layers and an embedding dimesion equal to 30.
\input{tables/results_GraphProp_complete}
\input{tables/GraphProp_time}
\section{Additional experiments}\label{additional_exp}
\subsection{Graph heterophilic benchmarks}\label{exp_hetero}
In the graph heterophilic benchmark we consider six well-known graph datasets for node classification, i.e., Chameleon, Squirrel, Actor, Cornell, Texas, and Wisconsin. We employed the same experimental setting as \cite{geom-gcn}. As in the graph benchmark in Section~{\textnormal{e}}f{graph_benchmark}, we study only the version of A-DGN with weight sharing. We perform hyper-parameter tuning via grid search, optimizing the accuracy score. We report in
Table~{\textnormal{e}}f{tab:configs_hetero} the grid of hyper-parameters explored for this experiment.
\input{tables/conig_hetero}
We present the results on the graph heterophilic benchmarks in Table~{\textnormal{e}}f{tab:results_hetero}, reporting the achieved accuracy.
We observe that our method obtains comparable results to state-of-the-art methods on four out of six datasets (i.e., Actor, Cornell, Texas, Wisconsin). As stated in the work of \cite{heterophily_results2}, the main cause of performance degradation in heterophilic benchmarks is strongly related to over-smoothing. Therefore, since A-DGN is not designed to tackle the over-smoothing problem, the achieved level of accuracy on these datasets is a remarkable performance. In fact, our method outperforms most of the DGNs specifically designed to mitigate this phenomenon, ranking third and fourth among all the models when considering the average rank of each model across all benchmarks.
Similarly to the graph benchmarks in Section~{\textnormal{e}}f{graph_benchmark}, our approach maintains or improves the performance as the number of layers increases, as Figure~{\textnormal{e}}f{fig:comparison_hetero} shows. Moreover, in this experiment, we show that A-DGN has outstanding performance even with 64 layers. Thus, A-DGN is able to effectively propagate the long-range information between nodes even in the scenario of graphs with high heterophilic levels. Such result suggests that the presented approach can be a starting point to mitigate the over-smoothing problem as well.
\input{tables/results_hetero}
\begin{figure}
\caption{The
test accuracy of A-DGN with respect to the number of layers on all the graph heterophilic datasets. From the top left to the bottom, we show: Chameleon, Squirrel, Actor, Cornell, Texas, and Wisconsin. The accuracy is averaged over 10 train/validation/test splits.}
\label{fig:comparison_hetero}
\end{figure}
\subsection{Tree-NeighborsMatch}\label{exp_treeneighmatch}
In the Tree-NeighborsMatch task we consider the same experimental setting as \cite{bottleneck}. For simplicity, we focus on the version of A-DGN with weight sharing and simple aggregation scheme (Equation~{\textnormal{e}}f{eq:simple_aggregation}). We report in Table~{\textnormal{e}}f{tab:configs_treeneighmatch} the grid of hyper-parameters explored for this experiment.
\input{tables/config_treeneighmatch}
We present the results on the Tree-NeighborsMatch benchmark in Table~{\textnormal{e}}f{tab:results_neighmatch}.
In this case, to avoid having an overly unbalanced setup in terms of the number of trainable parameters, we have increased the number of hidden unit in our method (we use weight sharing, while baselines present up to 8 trainable layers). Moreover, we have normalized the achieved accuracy, reporting the ratio between the accuracy score in percentage points and the employed total number of trainable hidden units, i.e., $acc \times 100 / N_{tot}$. As it can be seen, A-DGN generally outperforms the baselines (with the only exception of radius = 5). Moreover, even considering the un-normalized (i.e., original) accuracy, we observe that, even with fewer trainable parameters, A-DGN achieves on par or better performance compared to GAT, GCN, and GIN.
\input{tables/TreeNeighMatch}
\end{document}
|
\begin{document}
\title{Algebraic polytopes in Normaliz}
\author{Winfried Bruns\orcidID{0000-0002-7081-2261}}
\authorrunning{W. Bruns}
\institute{Institut f\"ur Mathematik, Universit\"at Osnabr\"uck, 49069 Osnabr\"uck, Germany
\email{[email protected]}\\
\url{http://www.home.uni-osnabrueck.de/wbruns/} }
\maketitle
\begin{abstract}
We describe the implementation of algebraic polyhedra in Normaliz. In addition to convex hull computation/vertex enumeration, Normaliz computes triangulations, volumes, lattice points, face lattices and automorphism groups. The arithmetic is based on the package \textit{e-antic}\ by V.~Delecroix.
\keywords{polyhedron \and real algebraic number field \and computation.}
\end{abstract}
Algebraic polytopes lacking a rational realization are among the first geometric objects encountered in high school geometry: at least one vertex of an equilateral triangle in the plane has non-rational coordinates. Three of the five Platonic solids, namely the tetrahedron, the icosahedron and the dodecahedron are non-rational, and, among the $4$-dimensional regular polytopes, the $120$-cell and the $600$-cell live outside the rational world.
But algebraic polytopes do not only appear in connection with Coxeter groups. Other contexts include enumerative combinatorics \cite{rote}, Dirichlet domains of hyperbolic group actions \cite{DelPage}, $\operatorname{SL}(2,{\mathbb R})$-orbit closures in the moduli space of translation surfaces, and parameter spaces and perturbation polyhedra of cut-generating functions in integer programming.
\section{Real embedded algebraic number fields}
The notion of convexity is defined over any ordered field, not only over the rationals ${\mathbb Q}$ or the reals ${\mathbb R}$. \emph{Real embedded algebraic number fields} are subfields of the real numbers (and therefore ordered) that have finite dimension as a ${\mathbb Q}$-vector space. It is well known that such a field ${\mathbb A}$ has a primitive element, i.e., an element $a$ such that no proper subfield of ${\mathbb A}$ contains $a$. The minimal polynomial of $a$ is the least degree monic polynomial $\mu$ with coefficients in ${\mathbb Q}$ such that $\mu(a)=0$. It is an irreducible polynomial, and $\dim_{\mathbb Q}{\mathbb A}=\deg \mu$. In particular, every element $b$ of ${\mathbb A}$ has a unique representation $b= \alpha_{n-1}a^{n-1}+\dots+\alpha_1a +\alpha_0$ with $\alpha_{n-1},\dots,\alpha_0\in{\mathbb Q}$, $n=\deg \mu$. The arithmetic in ${\mathbb A}$ is completely determined by $\mu$: addition is the addition of polynomials and multiplication is that of polynomials followed by reduction modulo $\mu$. The multiplicative inverse can be computed by the extended Euclidean algorithm. The unique determination of the coefficients $\alpha_i$ allows one to decide whether $b=0$. Every element of ${\mathbb A}$ can be written as the quotient of a polynomial expression $\alpha_{n-1}a^{n-1}+\dots+\alpha_1a +\alpha_0$ with $\alpha_i\in{\mathbb Z}$ for all $i$ and an integer denominator; this representation is used in the implementation.
However, the algebraic structure alone does not define an ordering of ${\mathbb A}$. For example, $\sqrt 2$ and $\sqrt{-2}$ cannot be distinguished algebraically: there exists an automorphism of ${\mathbb Q}[\sqrt 2]$ that exchanges them. For the ordering we must fix a real number $a$ whose minimal polynomial is $\mu$. (Note that not every algebraic number field has an embedding into ${\mathbb R}$.) In order to decide whether $b>0$ for some $b\in{\mathbb A}$ we need a floating point approximation to $b$ of controlled precision.
Normaliz \cite{Nmz} uses the package \textit{e-antic}\ of V. Delecroix \cite{e-antic} for the arithmetic and ordering in real algebraic number fields. The algebraic operations are realized by functions taken from the package \textit{antic}\ of W. Hart and F. Johansson \cite{antic} (imported to \textit{e-antic}) while the controlled floating point arithmetic is delivered by the package \textit{arb}\ of F. Johansson \cite{arb}. Both packages are based on W. Hart's \textit{Flint}\ \cite{Flint}.
In order to specify an algebraic number field, one chooses the minimal polynomial $\mu$ of $a$ and an interval $I$ in ${\mathbb R}$ such that $\mu$ has a unique zero in $I$, namely $a$. An initial approximation to $a$ is computed at the start. Whenever the current precision of $b$ does not allow to decide whether $b>0$, first the approximation of $b$ is improved, and if the precision of $a$ is not sufficient, it is replaced by one with twice the number of correct digits.
\section{Polyhedra}
A subset $P\subset {\mathbb R}^d$ is a \emph{polyhedron} if it is the intersection of finitely many affine halfspaces:
$$
P=\bigcap_{i=0}^s H_i^+,\qquad H_i^+=\{x:\lambda_i(x) \ge \beta_i \}, \qquad i=1,\dots,s,
$$
where $\lambda_i$ is a linear form and $\beta_i\in{\mathbb R}$. It is a \emph{cone} if one can choose $\beta_i=0$ for all $i$, and it is a \emph{polytope} if it is bounded.
By the theorem of Minkowski-Weyl-Motzkin \cite[1.C]{BG} one can equivalently describe polyhedra by ``generators'': there exist $c_1,\dots,c_t\in{\mathbb R}^d$ and $v_1,\dots,v_u\in {\mathbb R}^d$ such that
$$
P=C+Q
$$
where $C=\bigl\{c\gamma_1c_1+\dots+\gamma_tc_t:\gamma_i\in{\mathbb R},\gamma_i\ge 0 \bigr\}$ is the \emph{recession cone} and $Q=\bigl\{\kappa_1v_1+\dots+\kappa_uv_u: \kappa_i\in{\mathbb R},\kappa_i\ge 0,\sum \kappa_i=1 \bigr\}$ is a polytope. These two descriptions are often called \emph{H-representation} and \emph{V-representation}. The conversion from H to V is \emph{vertex enumeration} and the opposite conversion is \emph{convex hull computation}.
For theoretical and computational reasons it is advisable to present a polyhedron $P$ as the intersection of a cone and a hyperplane. Let $C(P)$ be the \emph{cone over} $P$, i.e., the smallest cone containing $P\times\{1\} $, and $D=\{x: x_{d+1}=1\}$ the \emph{dehomogenizing hyperplane}. Then $P$ can be identified with $C(P)\cap D$. After this step, convex hull computation and vertex enumeration are two sides of the same coin, namely the dualization of cones.
In the definition of polyhedra and all statements following it, the field ${\mathbb R}$ can be replaced by an arbitrary subfield (and even by an arbitrary ordered field), for example a real algebraic number field ${\mathbb A}$. The smallest choice for ${\mathbb A}$ is ${\mathbb Q}$: for it we obtain the class of \emph{rational polyhedra}. For general ${\mathbb A}$ we get \emph{algebraic polyhedra}.
For the terminology related to polyhedra and further details we refer the treader to \cite{BG}.
\section{Normaliz}
Normaliz tackles many computational problems for rational and algebraic polyhedra:
\begin{itemize}
\item dual cones: convex hulls and vertex enumeration
\item projections of cones and polyhedra
\item triangulations, disjoint decompositions and Stanley decompositions
\item Hilbert bases of rational, not necessarily pointed cones
\item normalizations of affine monoids (hence the name)
\item lattice points of polytopes and (unbounded) polyhedra
\item automorphisms (euclidean, integral, rational/algebraic, combinatorial)
\item face lattices and f-vectors
\item Euclidean and lattice normalized volumes of polytopes
\item Hilbert (or Ehrhart) series and (quasi) polynomials under ${\mathbb Z}$-gradings
\item generalized (or weighted) Ehrhart series and Lebesgue integrals of polynomials over rational polytopes
\end{itemize}
Of course, not all of these computation goals make sense for algebraic polyhedra. The main difference between the rational and the non-rational case can be described as follows: the monoid of lattice points in a full dimensional cone is finitely generated if and only if the cone is rational.
Normaliz is based on a templated C++ library. The template allows one to choose the arithmetic, and so it would be possible to extend Normaliz to more general ordered fields. The main condition is that the arithmetic of the field has been coded in a C++ class library. There is no restriction on the real algebraic number fields that Normaliz can use.
Normaliz has a library as well as a file interface. It can be reached from CoCoA, GAP \cite{GAP-NmzInterface}, Macaulay2, Singular, Python \cite{PyNormaliz} and SageMath. The full functionality is reached on Linux and Mac OS platforms, but the basic functionality for rational polyhedra is also available on MS Windows systems.
Its history goes back to the mid 90ies. For recent developments see \cite{BI2} and \cite{BSS}. The extension to algebraic polytopes was done in several steps since 2016. We are grateful to Matthias K\"oppe for suggesting it.
The work on algebraic polytopes has been done in cooperation with Vincent Delecroix (\textit{e-antic}), Sebastian Gutsche (PyNormaliz), Matthias K\"oppe and Jean-Phiippe Labb\'e (integration into SageMath). A comprehensive article with these coauthors is in preparation.
\section{The icosahedron}
Let us specify the icosahedron, a Platonic solid, by its vertices:
\noindent\begin{minipage}[b]{0.5\textwidth}
\small
\begin{verbatim}
amb_space 3
number_field min_poly (a^2 - 5) embedding [2 +/- 1]
vertices 12
0 2 (a + 1) 4
0 -2 (a + 1) 4
2 (a + 1) 0 4
...
(-a - 1) 0 -2 4
Volume
LatticePoints
FVector
EuclideanAutomorphisms
\end{verbatim}
\end{minipage}
\hspace{1.5cm}
\begin{minipage}[t]{0.4\textwidth}
\vspace*{-4.0cm}
\begin{tikzpicture}
[x={(0.700041cm, -0.429565cm)},
y={(0.714101cm, 0.419519cm)},
z={(0.001418cm, 0.799673cm)},
scale=2.00000,
back/.style={dotted, thin},
edge/.style={color=black!95!black, thick},
facet/.style={fill=yellow,fill opacity=0.600000},
vertex/.style={inner sep=0pt,circle,draw=black!25!black,fill=black!75!black,thick}]
\coordinate (0.80902, 0.00000, 0.50000) at (0.80902, 0.00000, 0.50000);
\coordinate (0.80902, 0.00000, -0.50000) at (0.80902, 0.00000, -0.50000);
\coordinate (0.00000, 0.50000, 0.80902) at (0.00000, 0.50000, 0.80902);
\coordinate (0.00000, 0.50000, -0.80902) at (0.00000, 0.50000, -0.80902);
\coordinate (0.50000, 0.80902, 0.00000) at (0.50000, 0.80902, 0.00000);
\coordinate (-0.50000, 0.80902, 0.00000) at (-0.50000, 0.80902, 0.00000);
\coordinate (0.00000, -0.50000, 0.80902) at (0.00000, -0.50000, 0.80902);
\coordinate (0.00000, -0.50000, -0.80902) at (0.00000, -0.50000, -0.80902);
\coordinate (0.50000, -0.80902, 0.00000) at (0.50000, -0.80902, 0.00000);
\coordinate (-0.80902, 0.00000, 0.50000) at (-0.80902, 0.00000, 0.50000);
\coordinate (-0.80902, 0.00000, -0.50000) at (-0.80902, 0.00000, -0.50000);
\coordinate (-0.50000, -0.80902, 0.00000) at (-0.50000, -0.80902, 0.00000);
\fill[facet] (0.00000, -0.50000, 0.80902) -- (0.80902, 0.00000, 0.50000) -- (0.00000, 0.50000, 0.80902) -- cycle {};
\fill[facet] (-0.80902, 0.00000, 0.50000) -- (0.00000, 0.50000, 0.80902) -- (0.00000, -0.50000, 0.80902) -- cycle {};
\fill[facet] (0.50000, 0.80902, 0.00000) -- (0.80902, 0.00000, 0.50000) -- (0.00000, 0.50000, 0.80902) -- cycle {};
\fill[facet] (0.50000, 0.80902, 0.00000) -- (0.80902, 0.00000, 0.50000) -- (0.80902, 0.00000, -0.50000) -- cycle {};
\fill[facet] (0.50000, -0.80902, 0.00000) -- (0.80902, 0.00000, 0.50000) -- (0.00000, -0.50000, 0.80902) -- cycle {};
\fill[facet] (0.50000, -0.80902, 0.00000) -- (0.80902, 0.00000, -0.50000) -- (0.00000, -0.50000, -0.80902) -- cycle {};
\fill[facet] (0.50000, -0.80902, 0.00000) -- (0.80902, 0.00000, 0.50000) -- (0.80902, 0.00000, -0.50000) -- cycle {};
\fill[facet] (-0.50000, -0.80902, 0.00000) -- (0.00000, -0.50000, 0.80902) -- (-0.80902, 0.00000, 0.50000) -- cycle {};
\fill[facet] (-0.50000, -0.80902, 0.00000) -- (0.00000, -0.50000, 0.80902) -- (0.50000, -0.80902, 0.00000) -- cycle {};
\fill[facet] (-0.50000, -0.80902, 0.00000) -- (0.00000, -0.50000, -0.80902) -- (0.50000, -0.80902, 0.00000) -- cycle {};
\draw[edge,back] (0.80902, 0.00000, -0.50000) -- (0.00000, 0.50000, -0.80902);
\draw[edge,back] (0.00000, 0.50000, 0.80902) -- (-0.50000, 0.80902, 0.00000);
\draw[edge,back] (0.00000, 0.50000, -0.80902) -- (0.50000, 0.80902, 0.00000);
\draw[edge,back] (0.00000, 0.50000, -0.80902) -- (-0.50000, 0.80902, 0.00000);
\draw[edge,back] (0.00000, 0.50000, -0.80902) -- (0.00000, -0.50000, -0.80902);
\draw[edge,back] (0.00000, 0.50000, -0.80902) -- (-0.80902, 0.00000, -0.50000);
\draw[edge,back] (0.50000, 0.80902, 0.00000) -- (-0.50000, 0.80902, 0.00000);
\draw[edge,back] (-0.50000, 0.80902, 0.00000) -- (-0.80902, 0.00000, 0.50000);
\draw[edge,back] (-0.50000, 0.80902, 0.00000) -- (-0.80902, 0.00000, -0.50000);
\draw[edge,back] (0.00000, -0.50000, -0.80902) -- (-0.80902, 0.00000, -0.50000);
\draw[edge,back] (-0.80902, 0.00000, 0.50000) -- (-0.80902, 0.00000, -0.50000);
\draw[edge,back] (-0.80902, 0.00000, -0.50000) -- (-0.50000, -0.80902, 0.00000);
\draw[edge] (0.80902, 0.00000, 0.50000) -- (0.80902, 0.00000, -0.50000);
\draw[edge] (0.80902, 0.00000, 0.50000) -- (0.00000, 0.50000, 0.80902);
\draw[edge] (0.80902, 0.00000, 0.50000) -- (0.50000, 0.80902, 0.00000);
\draw[edge] (0.80902, 0.00000, 0.50000) -- (0.00000, -0.50000, 0.80902);
\draw[edge] (0.80902, 0.00000, 0.50000) -- (0.50000, -0.80902, 0.00000);
\draw[edge] (0.80902, 0.00000, -0.50000) -- (0.50000, 0.80902, 0.00000);
\draw[edge] (0.80902, 0.00000, -0.50000) -- (0.00000, -0.50000, -0.80902);
\draw[edge] (0.80902, 0.00000, -0.50000) -- (0.50000, -0.80902, 0.00000);
\draw[edge] (0.00000, 0.50000, 0.80902) -- (0.50000, 0.80902, 0.00000);
\draw[edge] (0.00000, 0.50000, 0.80902) -- (0.00000, -0.50000, 0.80902);
\draw[edge] (0.00000, 0.50000, 0.80902) -- (-0.80902, 0.00000, 0.50000);
\draw[edge] (0.00000, -0.50000, 0.80902) -- (0.50000, -0.80902, 0.00000);
\draw[edge] (0.00000, -0.50000, 0.80902) -- (-0.80902, 0.00000, 0.50000);
\draw[edge] (0.00000, -0.50000, 0.80902) -- (-0.50000, -0.80902, 0.00000);
\draw[edge] (0.00000, -0.50000, -0.80902) -- (0.50000, -0.80902, 0.00000);
\draw[edge] (0.00000, -0.50000, -0.80902) -- (-0.50000, -0.80902, 0.00000);
\draw[edge] (0.50000, -0.80902, 0.00000) -- (-0.50000, -0.80902, 0.00000);
\draw[edge] (-0.80902, 0.00000, 0.50000) -- (-0.50000, -0.80902, 0.00000);
\end{tikzpicture}
\end{minipage}
The first line specifies the dimension of the affine space. The second defines the unique positive sqare root of $5$ as the generator of the number field. It is followed by the $12$ vertices. Each of them is given as a vector with $4$ components for which the fourth component acts as a common denominator of the first three. Expressions involving $a$ are enclosed in round brackets. The last lines list the computation goals for Normaliz. (Picture by J.-P. Labb\'e)
Normaliz has a wide variety of input data types. For example, it would be equally possible to define the icosahedron by inequalities. Now we have a look into the output file. (We indicate omitted lines by \dots)
{\small
\begin{verbatim}
Real embedded number field:
min_poly (a^2 - 5) embedding [2.23606797...835961152572 +/- 5.14e-54]
1 lattice points in polytope
12 vertices of polyhedron
0 extreme rays of recession cone
20 support hyperplanes of polyhedron (homogenized)
f-vector:
1 12 30 20 1
embedding dimension = 4
affine dimension of the polyhedron = 3 (maximal)
rank of recession cone = 0 (polyhedron is polytope)
...
volume (lattice normalized) = (5/2*a+15/2 ~ 13.090170)
volume (Euclidean) = 2.18169499062
Euclidean automorphism group has order 120
***********************************************************************
1 lattice points in polytope:
0 0 0 1
12 vertices of polyhedron:
...
0 extreme rays of recession cone:
20 support hyperplanes of polyhedron (homogenized):
(-a+1 ~ -1.236068) (-2*a+4 ~ -0.472136) 0 1
...
(a-1 ~ 1.236068) (2*a-4 ~ 0.472136) 0 1
\end{verbatim}
}
The output (in homogenized coordinates) is self-explanatory. Note that non-integral numbers in the output are printed as polynomials in $a$ together with a rational approximation. At the top we can see to what precision $\sqrt 5$ had to be computed. The automorphism group is described in another output file:
{
\small
\begin{verbatim}
Euclidean automorphism group of order 120
************************************************************************
3 permutations of 12 vertices of polyhedron
Perm 1: 1 2 4 3 7 8 5 6 10 9 11 12
Perm 2: 1 3 2 5 4 6 7 9 8 11 10 12
Perm 3: 2 1 3 4 6 5 8 7 9 10 12 11
Cycle decompositions
Perm 1: (3 4) (5 7) (6 8) (9 10) --
Perm 2: (2 3) (4 5) (8 9) (10 11) --
Perm 3: (1 2) (5 6) (7 8) (11 12) --
1 orbits of vertices of polyhedron
Orbit 1 , length 12: 1 2 3 4 5 6 7 8 9 10 11 12
************************************************************************
3 permutations of 20 support hyperplanes
Perm 1: 2 1 5 6 3 4 7 8 11 12 9 10 13 14 17 18 15 16 20 19
...
Cycle decompositions
Perm 1: (1 2) (3 5) (4 6) (9 11) (10 12) (15 17) (16 18) (19 20) --
...
1 orbits of support hyperplanes
Orbit 1 , length 20: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
\end{verbatim}
}
\section{Computation goals for algebraic polyhedra}
The basic computation in linear convex geometry is the dualization of cones. We start from a cone $C\subset {\mathbb R}^d$, given by generators $x_1,\dots,x_n$. The first (easy) step is to find a coordinate transformation that replaces ${\mathbb R}^d$ by the vector subspace generated by $x_1,\dots,x_n$. In other words, we can assume $\dim C=d$.
The goal is to find a minimal generating set $\sigma_1,\dots,\sigma_s\in({\mathbb R}^d)^*$ of the dual cone $C^*=\bigl\{\lambda: \lambda(x_i)\ge 0, \ i=1,\dots,n\bigr\}$. Because of $\dim C=d$, the linear forms $\sigma_1,\dots,\sigma_s$ are uniquely determined up to positive scalars: they are the extreme rays of $C^*$. By a slight abuse of terminology we call the hyperplanes $S_i=\{x: \sigma_i(x)=0\}$ the \emph{support hyperplanes} of $C$.
Let $C_k$ be the cone generated by $x_1,\dots,x_k$. Normaliz proceeds as follows:
\begin{enumerate}
\item It finds a basis of ${\mathbb R}^d$ among the generators $x_1,\dots,x_n$, say $x_1,\dots,x_d$. Computing $C_d^*$ amounts to a matrix inversion.
\item Iteratively it extends the cone $C_k$ to $C_{k+1}$, and shrinks $C_k^*$ to $C_{k+1}^*$, $k=d\dots,n-1$.
\end{enumerate}
Step 2 is done by \emph{Fourier-Motzkin elimination}: if $\sigma_1,\dots,\sigma_t$ generate $C_k^*$, then $C_{k+1}^*$ is generated by
$$
\bigl\{\sigma_i: \sigma_i(x_{k+1})\ge0 \bigr\}\cup \bigl\{\sigma_i(x_{k+1})\sigma_j-\sigma_j(x_{k+1})\sigma_i: \sigma_i(x_{k+1})\ >0, \sigma_j(x_{k+1}) < 0 \bigr\}.
$$
From this generating set of $C^*_{k+1}$ the extreme rays of $C^*_{k+1}$ must be selected.
This step is of critical complexity. Normaliz has a sophisticated implementation in which \emph{pyramid decomposition} is a crucial tool; see \cite{BIS}. It competes very well with dedicated packages (see \cite{Koeppe}). The implementation is independent of the field of coefficients. As said above, ${\mathbb R}$ can be replaced by an algebraic number field ${\mathbb A}$. In this case Normaliz uses the arithmetic over the field ${\mathbb A}$ realized by \textit{e-antic}, whereas arithmetic over ${\mathbb Q}$ is avoided in favor of arithmetic over ${\mathbb Z}$.
In addition to the critical complexity caused by the combinatorics of cones, one must tame the coordinates of the linear combination $\lambda=\sigma_i(x_{k+1})\sigma_j-\sigma_j(x_{k+1})\sigma_i$. For example, if, over ${\mathbb Z}$, both $\sigma_i$ and $\sigma_j$ are divisible by $2$, then $\lambda$ is divisible by $4$. If this observation is ignored, a doubly exponential explosion of coefficients will happen. One therefore extracts the gcd of the coordinates. But there is usually no well-defined gcd of algebraic integers, and even if one has unique decomposition into prime elements, there is in general no Euclidean algorithm. Normaliz therefore applies two steps:
\begin{enumerate}
\item $\lambda$ is divided by the absolute value of the last nonzero component (or by another ``norm'').
\item All integral denominators are cleared by multiplication with their lcm.
\end{enumerate}
Computational experience has shown that these two steps together are a very good choice.
Normaliz tries to measure the complexity of the arithmetic in ${\mathbb A}$ and to control the algorithmic alternatives of the dualization by the measurements. There are several ``screws'' that can be turned, and it is difficult to find the optimal tuning beforehand.
Normaliz computes lexicographic triangulations of algebraic cones in the same way as triangulations of rational cones. Their construction is interleaved with the extension from $C_k$ to $C_{k+1}$: the already computed triangulation of $C_k$ is extended by the simplicial cones generated by $x_{k+1}$ and those subcones in the triangulation of $C_k$ that are ``visible'' from $x_{k+1}$.
An algebraic polytope $P$ contains only finitely many integral points. They are computed by Normaliz' project-and-lift algorithm. The truncated Hilbert basis approaches, which Normaliz can also use for rational polytopes, are not applicable in the algebraic case. Once the lattice points are known, one can compute their convex hull, called the \emph{integer hull} of $P$.
At present Normaliz computes volumes only for full-dimensional algebraic polytopes. The volume is the sum of the volumes of the simplices in a triangulation, and these are simply (absolute values of) determinants. We do not see any reasonable definition of ``algebraic volume'' for lower dimensional polytopes that could replace the lattice normalized volume. The latter is defined for all rational polytopes and is a rational number that can be computed precisely.
It would certainly be possible to extend the computation of the approximate Euclidean volume to all algebraic polytopes, and this extension may be included in future Normaliz versions. Note that the Euclidean volume does in general not belong to ${\mathbb A}$ if $P$ is lower dimensional. Its precise computation would require an extension of ${\mathbb A}$ by square roots.
The computation of automorphism groups follows the suggestions in \cite{Bremner}. First one transforms the defining data into a graph, and then computes the automorphism group of this graph by \emph{nauty} \cite{nauty}. For algebraic polytopes the Euclidean and the algebraic automorphism groups can be computed, and the combinatorial automorphism group is accessible for all polyhedra.
The Euclidean automorphism group is the group of rigid motions of the ambient space that map the polytope to itself, and the algebraic automorphism group is the group of affine transformations over ${\mathbb A}$ stabilizing the polytope. Both groups are finite, as well as the combinatorial automorphism group, the automorphism group of the face lattice, which can be computed from the facet-vertex incidence vectors, just as in the rational case.
We do not try to define the algebraic (or Euclidean) automorphism group for unbounded polyhedra. First of all, the algebraic automorphism group is infinite in general. Second, it would have to be realized as the permutation group of a vector configuration, and there seems to be no reasonable way to norm the involved vectors. But for polytopes we can and must use the vertices.
\section{Scaled convex hull computations}
We illustrate the influence of the algebraic number field on the computation time by some examples. For each of them we start from a cone (over a polyhedron) that is originally defined over the integers. Then we scale some coordinates by elements of the field ${\mathbb A}$. This transformation preserves the combinatorial structure throughout. It helps to isolate the complexity of the arithmetic operations. The types of arithmetic that we compare are
\begin{quote}
int: original input, computation with machine integers,\\
mpz: same input as int, but computation with GMP mpz\_class integers,\\
rat: same input as int, but computation in ${\mathbb Q}[\sqrt 5]$,\\
sc2: scaled input in ${\mathbb Q}[\sqrt 5]$,\\
sc8: scaled input in ${\mathbb Q}[\sqrt[8] 5]$,\\
p12: scaled input in ${\mathbb Q}[a]$, $a^{12} + a^6+a^5+a^2- 5=0$, $a>1$.
\end{quote}
The test candidates are A553 (from the Ohsugi-Hibi classification of contingency tables \cite{OH}), the cone q27f1 from \cite{Koeppe}, the linear order polytope for $S_6$, and the cyclic polytope of dimension $15$ with $30$ vertices. The last two are classical polytopes. While the other three cones are given by their extreme rays, q27f1 is defined by $406$ equations and inequalities.
\begin{table}[hbt]
\caption{Combinatorial data of the test candidates}\label{comb}
\centering
\small
\tabcolsep1ex
\begin{tabular}{| r | r | r | r | r |}
\hline
\vphantom{\Large(} & amb\_space & dim& ext rays & supp hyps\\
\hline
\vphantom{\Large(} A553& $55$ & $43$ & $75$ & $306,955$\\
\hline
\vphantom{\Large(} q27f1& $30$ & $13$ & $68,216$ & $92$ \\
\hline
\vphantom{\Large(} lo6& $16$& $16$& $720$ & $910$ \\
\hline
\vphantom{\Large(} cyc15-30 &$16$ & $16$ & $30$ & $341088$\\
\hline
\end{tabular}
\end{table}
The Normaliz version is 3.8.4, compiled into a static binary with gcc 5.4 under Ubuntu 16-04. The computations use $8$ parallel threads (the default choice of Normaliz). They were taken on the author's PC with an AMD Ryzen 7 1700X at 3.2 GHz. Table \ref{perf} lists wall times in seconds. As a rule of thumb, for a single thread the times must be multiplied by $6$.
\begin{table}[hbt]
\caption{Wall times of scaled convex hull computations in seconds}\label{perf}
\centering
\small
\tabcolsep1ex
\begin{tabular}{|l|r|r|r|r|r|}
\hline
\vphantom{\Large(} coeff & A553 & q27f1 & lo6 & cyc15-30 \\
\hline
\vphantom{\Large(} int &57 &16 & 5 & --\\
\hline
\vphantom{\Large(} mpz &299 &58 & 5 & 7\\
\hline
\vphantom{\Large(} rat &277 &40 & 5 & 7 \\
\hline
\vphantom{\Large(} sc2 &783 &166 & 4 & 14 \\
\hline
\vphantom{\Large(} sc8 &1272 &475 &15 & 28\\
\hline
\vphantom{\Large(} p12 &2908 &905 &31 & 42\\
\hline
\end{tabular}
\end{table}
The cyclic polytope and all intermediate polytopes coming up in its computation are simplicial. Therefore it profits from Normaliz' special treatment of simplicial facets---almost everything can be done by set theoretic operations. Also lo6 is combinatorially not complicated. That lo6 is fastest with sc2, is caused by the fine tuning of the pyramid decomposition, which is not always optimal.
Surprisingly, rat is faster than mpz for A553 and q27f1. This can be explained by the fact that linear algebra over ${\mathbb Z}$ must use the Euclidean algorithm, and therefore needs more steps than the true rational arithmetic of rat.
\end{document}
|
\begin{document}
\title{A short note on the operator norm upper bound for sub-Gaussian tailed random matrices}
\author{Eric Benhamou \thanks{A.I. Square Connect} \thanks{Lamsade, Paris Dauphine} \thanks{Email:
\texttt{[email protected], [email protected]}} \and
Jamal Atif \footnotemark[2] \thanks{Email: \texttt{[email protected]}} \and
Rida Laraki \footnotemark[2] \thanks{Email: \texttt{[email protected]}}}
\maketitle
\begin{abstract}
This paper investigates an upper bound of the operator norm for sub-Gaussian tailed random matrices. A lot of attention has been put on uniformly bounded sub-Gaussian tailed random matrices with independent coefficients. However, little has been done for sub-Gaussian tailed random matrices whose matrix coefficients variance are not equal or for matrix for which coefficients are not independent. This is precisely the subject of this paper. After proving that random matrices with uniform sub-Gaussian tailed independent coefficients satisfy the Tracy Widom bound, that is, their matrix operator norm remains bounded by $O(\sqrt n )$ with overwhelming probability, we prove that a less stringent condition is that the matrix rows are independent and uniformly sub-Gaussian. This does not impose in particular that all matrix coefficients are independent, but only their rows, which is a weaker condition.
\end{abstract}
\section{Introduction}
Random matrices and their spectra have been under intensive study in many fields. This is the case in Statistics since
the work of \cite{Wishart_1928} on sample covariance matrices, in Numerical Analysis since their introduction by \cite{VonNeumann_1947} in the 1940s, in Physics as a consequence of the work of \cite{Wigner_1955, Wigner_1958} since the 1950s on in Banach Space Theory and Differential Geometric Analysis with the work of \cite{Grothendieck_1956} in a similar period.
More recently, in machine learning, the netflix prize (see \cite{wiki:Netflix_prize}) has attracted a lot of attention with a large part of the community investigating recommender systems (see \cite{wiki:recommender_system}) and collaborative filtering methods, which ultimately also rely on random matrices and their eigen and singular values spectra.
In particular, an interesting and important problem in matrix completion problem has been to investigate where the operator norm is concentrated to be able to make some reasonable assumptions about missing entries. Other important contribution have been the Tracy Widom law, which says that for Wigner matrix, the operator norm is concentrated in the range of $\left[ 2 \sqrt{n} - O(n^{-1/6}), \right.$ $ \left. 2 \sqrt{n} + O(n^{-1/6}) \right]$
(see \cite{Tracy_1994}), and the Marchenko–Pastur distribution that describes the asymptotic behavior of singular values of large rectangular random matrices (see \cite{Marcenko_1967}).
However, most of these results have been derived under the assumptions of independent and identically distributed coefficients. It is natural to ask similar questions about general random matrices whose entries distribution may differ. In particular, to make the question more concrete, we are interested in finding an upper bound of the operator norm of a random matrix whose coefficients are sub Gaussian and see the implied consequence for the matrix coefficients. The paper is organized as follows. In section \ref{intro}, we recall various definitions. In section \ref{main}, we first proved that for independent and uniform sub-Gaussian tailed random squared matrices their operator norm satisfies the Tracy Widom bound, that is, the matrix operator norm for the $L_a, L_b$ norm remains bounded by $O(\sqrt n )$. We see that a less stringent sufficient condition is that the matrix rows $L_{a}$ norms are uniformly sub-Gaussian and independent. This implies in particular that a matrix with coefficients that are not necessarily independent and sub-Gaussian can still validate an upper bound for the its operator norm of $O(\sqrt n )$ with overwhelming probability.
The condition of independence of rows has already been mentioned in \cite{vershynin_2018} with a similar setting and proof and appeared as early as 2017. Additionally, \cite{Benaych_2018} provided a similar proof in the Hermitian case and pointed kindly to the authors the last two references that authors were not aware of at the time of their writing. This article has at least the merit to be self contained and to focus only on sub-Gaussian random matrix making the presentation shorter and self consistent. But for more details, we advise the reader to refer to the last two references that cover a much wider scope and are respectively 300 and 80 pages long.
\section{Some definitions}\label{intro}
Suppose $\| \cdot \|_a$ and $\| \cdot \|_b$ are norms on $\mathbb{R}^m$ and $\mathbb{R}^n$, respectively. We can of course generalize easily the concept to norms operating on $\mathbb{C}^m$ and $\mathbb{C}^n$ if we look at matrices with complex number coefficients.
\begin{definition}\label{operatornorm_def}
We define the operator norm of $ \mathbf{X} \in \mathbb{R}^{m \times n}$, induced by the norms $\| \dots \|_a$ and $\| \dots \|_b$, as
\begin{equation}\label{operatornorm}
\| \mathbf{X} \|_{a,b} = \operatorname{sup} \{ \| \mathbf{X}u \|_{a} \,\, | \,\, \| u \|_{b} \leq 1\} .
\end{equation}
We will denote this norm as $\| \cdot \|_{op}$ and we will drop the $a,b$ indices to make things simpler whenever there is no risk of confusion and have the following definition
\begin{equation}\label{operatornorm2}
\| \mathbf{X} \|_{op} = \operatorname{sup} \{ \| \mathbf{X}u \| \,\, | \,\, \| u \| \leq 1\} .
\end{equation}
\end{definition}
When $\| \cdot \|_a$ and $\| \cdot \|_b$ are both Euclidean norms, the operator norm of $\mathbf{X}$ is its
\textit{maximum singular value}, and is denoted $\| \cdot \|_2$:
\begin{equation}
\| \mathbf{X} \|_{2} = \sigma_{\text{max}}( \mathbf{X} ) = ( \lambda_{\text{max}} (\mathbf{X}^T \mathbf{X} ))^{1/2}.
\end{equation}
where $ \sigma_{\text{max}}( \mathbf{X} )$ is the maximum singular value of the matrix $\mathbf{X}$ and where $\lambda_{\text{max}} (\mathbf{X}^T \mathbf{X} )$ is the maximum eigen value of the matrix $\mathbf{X}^T \mathbf{X}$ also defined as $\text{sup} \{ u^T \mathbf{X}^T \mathbf{X} u\,\| \,\ \| u \|_2 = 1\}$.
In the rest of the paper, we will assume to simplify notation that $a = b = 2$ to keep things simple but all results remain the same for any $a, b \geq 1$.
\begin{remark}
For the trivial matrix consisting entirely of single ones, it has an operator norm of exactly $n$.
This can be seen easily by taking the vector $u = ( 1/ \sqrt n, \ldots, 1/ \sqrt n)^T$ that gives $ \| \mathbf{X}u \|_{ 2} = n$, and proves that the operator norm should be at least equal to $n$. But the Cauchy-Schwarz inequality proves that it cannot be more than $n$. This vector is the right one to choose for the $L_2$ norm. But using the fact that any norm is equivalent in finite dimension (and that the matrix space is of finite dimension $n^2$), this result is not specific to the $L_2$ norm and is true for any norm.
Furthermore, the same application of the Cauchy Schwartz proves that the operator norm of any matrix whose coefficients are uniformly bounded by a constant $K$ has an operator norm bounded by $Kn$. In other words, using the Landau notation, any matrix whose entries are all uniformly $O(1)$ has an operator norm of $O(n)$. However, this upper bound does not take into account of any possible cancellations in the matrix $M$. Indeed, intuitively, using the concentration inequality of Hoeffding and Markov, we should expect with overwhelming probability (a notion that we will define shortly) that the operator norm should be bounded by $\sqrt{n}$ rather than $n$ in most cases where matrices coefficients are symmetrically distributed and have tails that are decreasing fast enough, a concept that we will also make more precised shortly with the concept of sub-Gaussian tails.
As for Euclidean norms, the operator norm boils down to computing the maximum singular value and for symmetric matrices, the maximum eigen values, it gives fruitful information about the these two quantities.
\end{remark}
\begin{definition}
A random variable $\xi$ is called sub-Gaussian if there are non negative constants $B, b > 0$ such that for every $t > 0$,
\begin{equation}\label{sub-Gaussian}
\mathbb{P}(| \xi |>t) \leq B \exp(- b t^2).
\end{equation}
where $\mathbb{P}$ is the probability measure defined on a usual probability space $ \Omega = ( \Omega, \mathcal{B}, \mathbb{P})$. where $ \Omega$ is the ambient sample space, associated with a $\sigma$-algebra $\mathcal{B}$ of subsets of
$\Omega$.
\end{definition}
\begin{remark}
Sub-Gaussian can be defined in multiple ways. We have used the traditional definition that states that the tails of the variable $\xi$ are dominated by, meaning they decay at least as fast as, the tails of a Gaussian. A more probabilistic way of defining the sub-Gaussian is to state that a random variable $\xi$ is called sub-Gaussian with variance proxy $\sigma$ if
\begin{equation}\label{sub-Gaussian2}
\mathbb{P}(| \xi - \mathbb{E}[X] |>t) \leq 2 \exp(- \frac{ t^2}{2 \sigma^2}).
\end{equation}
Chernoff bound allows to translate a bound on the moment generating function into a tail bound and vice versa. So we should expect to have equivalent definition in terms of moment generating, Laplace transform and many more criteria. Indeed, there are many equivalent definitions ( that can be found for instance in \cite{Buldygin_1980} or \cite{Ledoux_1991})
\begin{itemize}
\item A random variable $\xi$ is sub-Gaussian.
\item A random variable $\xi$ satisfies the $\psi_2$ -condition, that is, there exist two non negative real constants $B, b>0$ such that $ \mathbb{E}[e^{b \xi^2}] \leq B$.
\item A random variable $\xi$ satisfies the Laplace transform condition, that is there exist two non negative real constants $B, b>0$ such that $\forall \lambda \in \mathbb{R}$, $\ \ \mathbb{E}[e^{\lambda (\xi-\operatorname{E}[\xi])} ] \leq Be^{\lambda^2 b / 2}$. This condition is also referred to as the moment generating-condition, that is there exist two non negative real constants $B, b>0$ such that $ \mathbb{E}[ e^{t \xi} ] \leq B e^{t^2 b^2 /2 }$. The parameter $b$ is directly related to the variance proxy $\sigma$.
\item A random variable $\xi$ satisfies the Moment condition, that is there exists a non negative real constant $K>0$ such that $\ \forall p \geq 1 \ \left ( \mathbb{E}[ |\xi|^p \right ])^{1/p} \leq K \sqrt{p}$. It is easy to see with for instance Gaussian variables that $K$ can be expressed with respect to the variance proxy $\sigma$ as follows: $K= \sigma e^{1 / e}$ for $k \geq 2$ and $K = \sigma \sqrt{ 2 \pi}$.
\item A random variable $\xi$ satisfies the Union bound condition, that is there exists a non negative real constant $c>0$ such that $ \forall n \ge c \ \mathbb{E}[\max\{|\xi_1 - \operatorname{E}[\xi]|,\ldots,|\xi_n - \operatorname{E}[\xi]|\}] \leq c \sqrt{\log n}$ where $\xi_1, \ldots, \xi_n$ are independent and identically distributed random variables, copies of $\xi$.
\item The tail is less than the one of a Gaussian of variance proxy $\sigma$, there exist $b > 0$ and $Z \sim \mathcal{N}(0, \sigma^2)$ such that
$\mathbb{P}(| \xi |>t) \leq b \mathbb{P}(| Z | \geq t )$. The latter definition explains the term sub-Gaussian constants quite well.
\end{itemize}
Obviously, the different negative real constants $B, b>0$ are not necessarily the same.
\end{remark}
\begin{definition}
Referring to \cite{Tao_2013}, we say that an event $E$ holds with overwhelming probability if, for every fixed real constant
$k > 0$, we have
\begin{equation}\label{overwhelming}
\mathbb{P}(E) \geq 1 - C_k / n^k
\end{equation}
\noindent for some constant $C_k$ independent of $n$ or equivalently
$\mathbb{P}(E^{c}) \leq C_k e^{ -k \ln n } $ where $A^{c}$ denotes the complementary of $A$.
\end{definition}
\begin{remark}
Of course, the concept of overwhelming probability can be extended to a family of events $E_{\alpha}$ depending on some parameter $\alpha$ with the condition that each event in the family holds with overwhelming probability uniformly in $\alpha$ if the constant $C_k$ in the definition of overwhelming probability is independent of $\alpha$.
\end{remark}
\begin{remark}
Using Boole's inequality (also referred to as the union bound in the English mathematical literature) that states that the probability measure is $\sigma$-sub additive, we trivially see that if a family of events $E_{\alpha}$ of polynomial cardinality holds with overwhelming probability, then the intersection over $\alpha$ of this family $\bigcap \limits_{\alpha} E_{\alpha}$ still holds with overwhelming probability.
\end{remark}
\begin{remark}
The previous Boole's inequality remark emphasizes that although the concept of overwhelming probability is not the same as the one of almost surely, it is still something with very high probability. In the rest of the paper, we will even get tighter bound and prove
\begin{equation}\label{overwhelming2}
\mathbb{P}(E^{c}) \leq C_k e^{ -k n }
\end{equation}
which implies that the event $E$ holds with overwhelming probability.
\end{remark}
\section{Upper bound for operator norm for sub-Gaussian tailed matrices}\label{main}
Equipped with these definition, we shall prove the following statement
\begin{proposition}\label{prop1}
Let a squared matrix $M$ be with independent coefficients $\xi_{i,j}$ with zero mean that are uniformly sub-Gaussian , then there exist non negative real constants $C,c > 0$ such that
\begin{equation}
\mathbb{P} ( \| M \|_{op} > A \sqrt n )\leq C \exp( -c A n) \label{eq1}
\end{equation}
\noindent for all $ A \geq C$. In particular, we have $\| M \|_{op} = O(\sqrt n )$
with overwhelming probability
\end{proposition}
\begin{proof}
See \ref{proof1}.
\end{proof}
\begin{remark}
This result is quite natural as the matrix coefficients $\xi_{i,j}$ are uniformly sub-Gaussian. Indeed in the proof, we have used the fact that the matrix coefficients $\xi_{i,j}$ $L_{\infty}$ norm was sub-Gaussian, hence any of the matrix row for the $L_a$ was sub-Gaussian. But can we go further and find a less stringent sufficient condition for the inequality \ref{eq1} to hold? The answer is yes and is provided by the condition stated in proposition \ref{prop2}.
\end{remark}
\begin{proposition}\label{prop2}
Let a squared matrix $M$ such that any of its row is uniformly sub-Gaussian for the norm $L_a$ and independent, then there exist non negative real constants $C,c > 0$ such that
\begin{equation}
\mathbb{P} ( \| M \|_{a,b} > A \sqrt n )\leq C \exp( -c A n) \label{eq2}
\end{equation}
\noindent for all $ A \geq C$. In particular, we have $\| M \|_{a,b} = O(\sqrt n )$
with overwhelming probability
\end{proposition}
\begin{proof}
See \ref{proof2}.
\end{proof}
\begin{remark}
If a random matrix has its rows uniformly sub-Gaussian, necessarily, any of its coefficients is also uniformly sub-Gaussian. This is trivially seen as for a given coefficient $\xi_{ij}$, the corresponding row $R_i$ is sub-Gaussian, hence there are positive constants $B, b$ that does not depend on $i$ such that for every $t > 0$,
\begin{equation}
\mathbb {P} (\| R_i \ |>t) \leq B \exp( {-b t^{2}} )
\end{equation}
Hence since $\| R_i \ | > | xi_{ij} |$, we have as well
\begin{equation}
\mathbb{P} ( | xi_{ij} | > t )\leq B \exp({ -b t^{2} } )
\end{equation}
which proves the uniform sub-Gaussian character of any of the matrix row.
The independence of the matrix rows, however, does not imply that each of the matrix row are independent, making the condition of proposition \ref{prop1} less stringent.
\end{remark}
\section{Conclusion}
This paper investigated an upper bound of the operator norm for sub-Gaussian tailed random matrices. We proved here that random matrices with independent rows that are uniformly sub-Gaussian satisfy the Tracy Widom bound, that is, the matrix operator norm remains bounded by $O(\sqrt n )$. An interesting extension would be to see how we can generalize our result to the $(\ell_p,\ell_r)$-Grothendieck problem, which seeks to maximize the bilinear form $y^T A x$ for an input matrix $A \in {\mathbb R}^{m \times n}$ over vectors $x,y$ with $\|x\|_p=\|y\|_r=1$. We know this problem is equivalent to computing the $p \to r^\ast$ operator norm of $A$, where $\ell_{r^*}$ is the dual norm to $\ell_r$.
\ifnum1=1
\appendix
\section{Proofs}
\subsection{Proof of proposition \ref{prop1}}\label{proof1}
We will do the proof thanks to three simple lemmas below that take advantage of the uniform sub-Gaussian tails bounds and the remarkable property of the Lipschitz character of the map $x \rightarrow \| M x \|$, combined with the compacity of the unit sphere.
Let us define the unit sphere $\mathcal{S} := \{ u \in \mathbb{R}^n | \| u \| = 1 \}$ of the $\mathbb{R}^n$ vector space. The result is similar for complex coefficients matrices in which case the unit sphere is modified into $\mathcal{S} := \{ u \in \mathbb{R}^n | \| u \| = 1 \}$ of the $\mathbb{C}^n$.
We will first prove the following lemma
\begin{lemma}\label{lemma1}
If the coefficients $\xi_{i,j}$ of $M$ are independent and have uniformly sub-Gaussian tails, then there exist absolute constants $C, c > 0$ such that for any $u \in \mathcal{S}$, we have
\begin{equation}\label{lemma1_eq}
\mathbb{P} ( \| M u \| > A \sqrt n )\leq C \exp( -c A n)
\end{equation}
for all $ A \geq C$.
\end{lemma}
\begin{proof}
Let $R_1, \ldots, R_n$ be the $n$ rows of the matrix $\mathbf{M}$, then the column vector $\mathbf{M} u$ has coefficients
$R_i u $ for $i = 1, \ldots, n$.
The matrix coefficients $\xi_{i,j}$ are all uniformly sub Gaussian, hence there are positive constants $B, b> 0$ independent of $i,j$ such that for every $t > 0$,
\begin{equation}
\mathbb{P}( | \xi_{i,j} | \geq t ) \leq B \exp(-b t^2).
\end{equation}
This implies in particular that $R_i$ is also with sub-Gaussian tails but with different coefficients. This is because we have
\begin{equation}
\mathbb{P}( | R_i | \geq t ) \leq \mathbb{P}( \sqrt n \max_{j}| \xi_{i,j} | \geq t ) \leq B \exp(-\frac{b}{n} t^2).
\end{equation}
Hence taking $b'= \frac{b}{n}$, we have
\begin{equation}
\mathbb{P}( \| R_i \| \geq t ) \leq B \exp(-b' t^2).
\end{equation}
The Cauchy Schwartz inequality gives us that for $u \in \mathcal{S}$, we have $| R_i u | \leq \| R_i \| \| u \| = \| R_i \| $ as $\| u \| =1$, hence,
\begin{equation}
\mathbb{P}( | R_i u | \geq t ) \leq \mathbb{P}( \| R_i \| \geq t ) \leq B \exp(-b' t^2).
\end{equation}
which states that $R_i u$ is uniformly sub-Gaussian or equivalently, that it satisfies the $\psi_2$ condition, that there exist two non negative constants $b, B >0$ (that are different constants from previously) such that:
\begin{equation}
\mathbb{E}[ e^{b | R_i u | ^2} ] \leq B.
\end{equation}
Because of the assumption that the matrix coefficients are independent, each row $R_i u$ is also independent and the vector $M u$ satisfies also the $\psi_2$ condition as:
\begin{equation}
\mathbb{E}[ e^{b \| M u \| ^2} ] = \mathbb{E}[ \prod_{i=1}^n e^{b | R_i u | ^2} ] = \prod_{i=1}^n \mathbb{E}[ e^{b | R_i u | ^2} ] \leq B^n.
\end{equation}
Let us take $C= B^n$ and take $A \geq C$ and $n \geq 1$. The Markov property gives us
\begin{eqnarray}
\mathbb{P}( \| M u \| \geq A \sqrt{n} ) = \mathbb{P}( e^{ b \| M u \|^2 } \geq e^{ b \, A^2 n } ) \leq \frac{ \mathbb{E}[ e^{b \| M u \| ^2} ] } { e^{ b \, A^2 n } } \leq C e^{ -b \, A^2 n } \leq C e^{ -b \, C A n }
\end{eqnarray}
Taking $c = b \,C$, we get the required inequality:
$$
\mathbb{P} ( \| M u \| > A \sqrt n )\leq C \exp( -c A n)
$$ which concludes the proof.
\end{proof}
\begin{remark}
Expressing the lemma \ref{lemma1} in terms of probability, we have proved that for any individual unit vector $u$, the norm of the matrix multiplication of $M$ with $u$, denoted by $\| M u \|$ is with growth at most $\sqrt n$ or equivalently $\| M u \| = O(\sqrt n )$ with overwhelming probability.
\end{remark}
\begin{remark}
At this stage, we could imagine that equipped with lemma \ref{lemma1}, we could finalize the proof of proposition \ref{prop1}. The slight difference between lemma \ref{lemma1} and proposition \ref{prop1} is the applying set. Lemma \ref{lemma1} states that for any individual unit vector $u$, the norm of the matrix multiplication of $M$ with $u$, denoted by $\| M u \|$ is with growth at most $\sqrt n$. Proposition \ref{prop1} states that the supremum over the unit sphere of any individual unit vector $u$, the norm of the matrix multiplication of $M$ with $u$, denoted by $\| M u \|$ is with growth at most $\sqrt n$.
We could imagine going from lemma \ref{lemma1} to proposition \ref{prop1} using the simple union bound on all points of the unit sphere for the operator norm as follows:
\begin{equation}
\mathbb{P}( \| \mathbf{M} \|_{op} > \lambda ) \leq \mathbb{P}( \bigcup \limits_{u \in \mathcal{S} } \| \mathbf{M} u \| > \lambda )
\end{equation}
However, we would be stuck as the unit sphere $\mathcal{S}$ is an uncountable number of points set.
To solve this issue, we shall change the set in the union bound and use the usual trick of maximal $\varepsilon$-net of the unit sphere $\mathcal{S}$, denoted by $\Sigma(\varepsilon)$. This leads to lemma \ref{lemma2}. As we will see shortly, the maximal $\varepsilon$-net of the sphere $\mathcal{S}$ is countable, using standard packing arguments. On this particular set, we can exploit the fact that the map $x \rightarrow \| M x \|$ is Lipschitz with Lipschitz constant given by $\| M \|_{op}$. The induced continuity also us controlling the upper bound of the norm of $\| M v \|$ for $v \in \Sigma(\varepsilon)$.
\end{remark}
\begin{lemma}\label{lemma2}
Let $0 < \varepsilon < 1$ and $\Sigma(\varepsilon)$ be the maximal $\varepsilon$-net of the sphere $\mathcal{S}$, that is the set of points in $\mathcal{S}$ separated from each other by a distance of at least $\varepsilon$ and which is maximal with respect to set inclusion. Then for any $n \times n$ matrix $M$ and any $\lambda > 0$, we
have
\begin{equation}\label{lemma2_eq}
\mathbb{P}( \| M \|_{op} > \lambda) \leq \mathbb{P}( \bigcup \limits_{v \in \Sigma(\varepsilon) } \| \mathbf{M} v \| > \lambda (1-\varepsilon) )
\end{equation}
\end{lemma}
\begin{proof}
From the definition of the operator norm (see \ref{operatornorm_def}) as a supremum, using the fact that the map $x \rightarrow \| M x \|$ is Lipschitz, hence continuous and that the unit sphere $\mathcal{S}$ is compact as we are in finite dimension, we can find $x \in \mathcal{S}$ such that it attains the supremum (recall that a continuous function attains its supremum on a compact set).
\begin{equation}
\| M x \| = \| M \|_{op}
\end{equation}
We can eliminate the trivial case of $x$ belonging to $\Sigma(\varepsilon)$
as the inequality \ref{lemma2_eq} is easily verified in this scenario.
In the other case,
where $x$ does not belong to $\Sigma(\varepsilon)$, there must exist a point $y$ in $\Sigma(\varepsilon)$
whose distance to $x$ is less than $\varepsilon$ (otherwise we would have a contradiction of the maximality of $\Sigma(\varepsilon)$
by including $x$ to $\Sigma(\varepsilon)$).
We are going now to use the Lipschitz feature of the map $x \rightarrow \| M x \|$ whose Lipschitz constant given by $\| M \|_{op}$
Since $\| x- y \| \leq \varepsilon$,
the Lipschitz property gives us
\begin{equation}
\| M (x-y) \| \leq \| M \|_{op}\| x- y \|\leq M \|_{op}\| \varepsilon
\end{equation}
The triangular inequality gives us
\begin{equation}
\| M x \|_{op} = \| M x \| \leq \| M (x-y) \| + \| M y \| \leq \ \| M \|_{op} \varepsilon + \| M y \|
\end{equation}
Hence,
\begin{equation}
\| M y \| \geq M \|_{op} (1-\varepsilon)
\end{equation}
In particular if $\| M y \|_{op} > \lambda$, then $\| M y \| > \lambda (1-\varepsilon)$ which concludes the proof
\end{proof}
\begin{remark}
The lemma \ref{lemma2_eq} is very intuitive. The continuity of the map $x \rightarrow \| M x \|$ implies that it attains its maximum on the compact unit sphere. By packing argument, we have necessarily that around this optimum, there is a point of the maximum set $\Sigma(\varepsilon)$ with a distance lower than $\varepsilon$. As the map $x \rightarrow \| M x \|$ is Lipschitz, the decrease between the optimum and this point in $x \rightarrow \| M x \|$ should be at most $ \| M \|_{op} \varepsilon $ as $ \| M \|_{op} $ is the constant Lipschitz.
\end{remark}
We recall last but not least that the cardinality of the maximal $\varepsilon$-net of the sphere $\mathcal{S}$, $\Sigma(\varepsilon)$ should be polynomial at most in $n-1$, the dimension of the sphere with the following two lemmas
\begin{lemma}\label{lemma3}
Let $0 < \varepsilon < 1$, and let $\Sigma(\varepsilon)$ be a maximal $\varepsilon$-net of the unit sphere $\mathcal{S}$. Then $\Sigma(\varepsilon)$ has cardinality at most
$\frac{C }{\varepsilon^{n-1}} $ for some non negative constant $C > 0$ and at least $\frac{c}{\varepsilon^{n-1}}$ for some constant $c >0$.
\end{lemma}
\begin{proof}
The proof is quite intuitive and simple. It relies on a volume packing argument. The balls of radius $\varepsilon / 2$ centered around each point of $\Sigma(\varepsilon)$ are disjoint and they are in the same numbers as the cardinal of $\Sigma(\varepsilon)$. By the triangular inequality, and using the fact that $\varepsilon / 2 <1$, all these balls are contained within the intersection of the large ball of radius $(1+\varepsilon / 2)$ and center the origin, and the smaller ball of radius $(1-\varepsilon / 2)$ and center the same origin. Hence, (using the fact that the volume of a ball is a constant times the radius to the power the dimension of the space), we can pack at most
$$
\frac{ (1+\varepsilon / 2 )^{n} - (1-\varepsilon /2)^{n} }{ (\varepsilon /2)^n}
$$
of these balls, which proves that the cardinality is at most $(C / \varepsilon)) ^{n-1} $ for some non negative constant $C > 0$ as the constant is for $\varepsilon $ small equivalent to $\frac{2 n} {\varepsilon ^{n-1}}$.
Reciprocally, for $\varepsilon < 2$, the $\Sigma(\varepsilon)$ is not empty. If we sequentially pack the space between the same previous large and small balll of radius $(1+\varepsilon / 2)$ and $(1-\varepsilon / 2)$ respectively, both centered at the origin, with balls that do not intersect and have radius $\varepsilon / 2$ and with centers on the unit sphere, we can take the set of the centers of these balls. As the balls of radius $\varepsilon / 2$ do not intersect, by the triangular inequality, their centers are at least at a distance greater or equal to $\varepsilon$. Because $\Sigma(\varepsilon)$ is a maximal set, its cardinality should be at least equal to the number of previously created centers. We can pack
$$
\frac{ (1+\varepsilon / 2 )^{n} - (1-\varepsilon /2)^{n} }{ (\varepsilon /2)^n}
$$
of these centers, which proves that the cardinality is at least $(c / \varepsilon)) ^{n-1} $ for some non negative constant $c > 0$
\end{proof}
\begin{proof}
We can now prove proposition \ref{prop1} as follows.
Using lemma \ref{lemma2} and the union bound, we have
\begin{equation}
\mathbb{P}( \| M \|_{op} > A \sqrt{n}) \leq \mathbb{P}( \bigcup \limits_{v \in \Sigma(\varepsilon) } \| \mathbf{M} v \| > A \sqrt{n} (1-\varepsilon) ) \leq \sum_{v \in \Sigma(\varepsilon)} \mathbb{P}( \| \mathbf{M} v \| > A \sqrt{n} (1-\varepsilon) )
\end{equation}
Lemma \ref{lemma1} states that for $v \in \mathcal{S}$, there exist absolute constants $C,c > 0$ such that
\begin{equation}
\mathbb{P}( \| \mathbf{M} v \| > A \sqrt{n} (1-\varepsilon) ) \leq C \exp( -c A (1-\varepsilon) n )
\end{equation}
Since the cardinality of $\Sigma(\varepsilon) $ is bounded by $\frac{K }{\varepsilon^{n-1}}$, we can upper bound $\mathbb{P}( \| M \|_{op} > A \sqrt{n})$ by
\begin{equation}
\mathbb{P}( \| M \|_{op} > A \sqrt{n}) \leq \frac{K }{\varepsilon^{n-1}} C \exp( -c A (1-\varepsilon) n )
\end{equation}
Fixing $\varepsilon = 1 / 2$, denoting by $C ' = K C $ and taking $c'$ such that $c'A = cA / 2 - \ln 2$, we have
\begin{equation}
\mathbb{P}( \| M \|_{op} > A \sqrt{n}) \leq C' exp( -c' A n )
\end{equation}
which concludes the proof.
\end{proof}
\subsection{Proof of proposition \ref{prop2}}\label{proof2}
\begin{proof}
This is exactly the same reasoning as proposition \ref{prop1} but starting with the fact that any of the matrix $M$ row is uniformly sub-Gaussian for the norm $L_a$. This means that there exist absolute constants $B,b>0$ such that for any $i=1, \ldots,n$
\begin{equation}
\mathbb{P}( \| R_i \|_a \geq t ) \leq B \exp(-b t^2).
\end{equation}
for the norm $L_a$. The independence of the rows allows us proving the following lemma (similar to lemma \ref{lemma1}) that under the condition of proposition \ref{prop2}, there exist absolute constants $C, c > 0$ such that for any $u \in \mathcal{S}$, we have
\begin{equation}\label{lemma1_eq2}
\mathbb{P} ( \| M u \| > A \sqrt n )\leq C \exp( -c A n)
\end{equation}
for all $ A \geq C$. lemma \ref{lemma2} and \ref{lemma3} remain unchanged allowing to conclude.
\end{proof}
\end{document}
|
\begin{document}
\currannalsline{164}{2006}
\title{A Mass Transference Principle\\ and the
Duffin-Schaeffer conjecture\\ for Hausdorff measures}
\acknowledgements{Research supported by EPSRC
GR/R90727/01.
\hglue7pt$^{\textstyle\ast\ast}$Royal Society
University Research
Fellow}
\twoauthors{Victor Beresnevich$^{\textstyle\ast}$}{Sanju VelaniR_{\alpha}ise2.75pt\hbox{${\textstyle\ast}$}}
\institution{Institute of Mathematics,
Academy of Sciences of Belarus, Minsk, Belarus
\\\current{University of York, York, England}\\
\email{[email protected]}
\\
\vglue-9pt
University of York, York, England
\\
\email{[email protected]}}
\shorttitle{A mass transference principle}
\shortname{Victor Beresnevich and Sanju Velani}
\centerline{{\it Dedicated to Tatiana Beresnevich}}
\vglue15pt
\centerline{\bf Abstract}
\vglue12pt A Hausdorff measure version of the
Duffin-Schaeffer conjecture in metric number theory is introduced
and discussed. The general conjecture is established modulo the
original conjecture. The key result is a Mass Transference
Principle which allows us to transfer Lebesgue measure theoretic
statements for $\limsup$ subsets of ${\Bbb R}^k$ to Hausdorff measure
theoretic statements. In view of this, the Lebesgue theory of
$\limsup $ sets is shown to underpin the general Hausdorff
theory. This is rather surprising since the latter theory is
viewed to be a subtle refinement of the former.
\section{Introduction}
Throughout $\psi:{\Bbb R}^+\to{\Bbb R}^+$ will denote a real, positive
function
and will be referred to as an
\emph{approximating function}. Given an approximating function
$\psisi$, a point ${\bf y}=(y_1,\dots,y_k)\in{\Bbb R}^k$ is called {\it
simultaneously $\psisi$-approximable} if there are infinitely many
$q\in{\Bbb N}$ and ${\bf p}= (p_1, \ldots,p_k) \in {\Bbb Z}^k$ such that
\begin{equation}
\leqslantft|y_i - \frac{p_i}{q}\right|\ <\
\frac{\psisi(q)}{q} {\cal H}^space{9mm} (p_i,q) = 1 \, , {\cal H}^space{5mm} 1
\leqslantq i \leqslantq k \ . \label{1}
\end{equation}
The set of simultaneously
$\psi$-approximable points in ${\Bbb I}^k:=[0,1]^k$ will be denoted by
${\cal S}_k(\psisi)$. For convenience, we work within the unit cube
${\Bbb I}^k$ rather than ${\Bbb R}^k$; it makes full measure results easier
to state and avoids ambiguity. In fact, this is not at all
restrictive as the set of simultaneously $\psisi$-approximable
points is invariant under translations by integer vectors.
The pairwise co-primeness condition
imposed in the above definition clearly ensures that the rational
points $(p_1/q, \ldots,p_k/q)$ are distinct. To some extent the
approximation of points in ${\Bbb I}^k$ by {\em distinct} rational
points should be the main feature when defining $ {\cal S}_k(\psisi)$ in
which case pairwise co-primeness in (\ref{1}) should be replaced
by the condition that $(p_1, \ldots,p_k,q) =1$. Clearly, both
conditions coincide in the case $k=1$. We shall return to this
discussion in Section \ref{DSR}.
\Subsec{The Duffin-Schaeffer conjecture\label{tds}}
On making use of the fact that ${\cal S}_k(\psisi)$ is a $\limsup$ set, a
simple consequence of the Borel-Cantelli lemma from probability
theory is that
\begin{equation*}
m({\cal S}_k(\psisi)) = 0 \ \ \ \ \ {\rm if \ } \ \ \ \ \
\sum_{n=1}^{\infty} \leqslantft(\psihi(n) \, \psisi(n)/n \right)^k \ < \
\infty \ ,
\end{equation*} where $m$ is $k$-dimensional Lebesgue
measure and $\psihi$ is the Euler function. In view of this, it is
natural to ask: what happens if the above sum diverges? It is
conjectured that ${\cal S}_k(\psisi)$ is of full measure.
\demo{\scshape Conjecture 1}
\begin{equation}
m({\cal S}_k(\psisi)) = 1 \ \ \ \ \ {\rm if \ } \ \ \ \ \
\sum_{n=1}^{\infty} \leqslantft(\psihi(n) \, \psisi(n)/n \right)^k \ = \
\infty \ . \label{3}
\end{equation}
When $k=1$, this is the famous Duffin-Schaeffer conjecture in
metric number theory \cite{DS}. Although various partial results
are know, it remains a major open problem and has attracted much
attention (see \cite{Harman} and references within). For $k \geqslantq
2$, the conjecture was formally stated by Sprind\v{z}uk \cite{Sp}
and settled by Pollington and Vaughan \cite{PV}.
\begin{thschmidt}
For $k\geqslantq 2${\rm ,} Conjecture {\rm 1} is true.
\end{thschmidt}
If we assume that the approximating function $\psisi$ is monotonic,
then we are in good shape thanks to Khintchine's fundamental
result.
\begin{thkh}
If $\psisi$ is monotonic{\rm ,} then Conjecture $1$ is true.
\end{thkh}
Indeed, the whole point of Conjecture 1 is to remove the
monotonicity condition on $\psisi$ from Khintchine's theorem. Note
that in the case that $\psisi$ is monotonic, the
convergence/divergence behavior of the sum in (\ref{3}) is
equivalent to that of $\sum \psisi(n)^k$; i.e. the co-primeness
condition imposed in (\ref{1}) is irrelevant.
\Subsec{The Duffin-Schaeffer conjecture for Hausdorff
measures} In this paper, we consider a generalization of
Conjecture 1 which in our view is the `real' problem and the truth
of which yields a complete metric theory. Throughout, $f$ is a
dimension function and ${\cal H}^f$ denotes the Hausdorff $f$-measure;
see Section \ref{HM}. {\em Also, we assume that $r^{-k} f(r)$ is
monotonic}; this is a natural condition which is not
particularly restrictive. A straightforward covering argument
making use of the $\limsup$ nature of ${\cal S}_k(\psisi)$ implies that
\begin{equation}
{\cal H}^f({\cal S}_k(\psisi)) = 0 \ \ \ \ \ {\rm if \ } \ \ \ \ \
\sum_{n=1}^{\infty} f( \psisi(n)/n ) \; \psihi(n)^k \ < \ \infty \ .
\label{4}
\end{equation}
\noindent In view of this, the following is a `natural'
generalization of Conjecture 1 and can be viewed as the
Duffin-Schaeffer conjecture for Hausdorff measures.
\demo{\scshape Conjecture 2}
$\displaystyle{{\cal H}^f({\cal S}_k(\psisi)) = {\cal H}^f({\Bbb I}^k)}$ {\it if}
$\displaystyle{\sum_{n=1}^{\infty}
f( \psisi(n)/n ) \; \psihi(n)^k \ = \ \infty}$.
\Enddemo
Again, in the case that $\psisi$ is monotonic we are in good shape.
This time, thanks to Jarn\'{\i}k's fundamental result.
\begin{thjar}
If $\psisi$ is monotonic{\rm ,} then Conjecture {\rm 2} is true.
\end{thjar}
To be precise, the above theorem follows on combining
Khintchine's theorem together with Jarn\'{\i}k's theorem as
stated in \cite[\S8.1]{BDV}; the co-primeness condition imposed
on the set ${\cal S}_k(\psisi)$
is irrelevant
since $\psisi$ is monotonic. The point is that in Jarn\'{\i}k's
original statement, various additional hypotheses on $f$ and
$\psisi$ were assumed and they would prevent us from stating the
above clear cut version. Note that Jarn\'{\i}k's theorem together
with (\ref{4}), imply precise Hausdorff dimension results for the
sets ${\cal S}_k(\psisi)$; see \cite[\S1.2]{BDV}.
\Subsec{Statement of results\label{sor}}
Regarding Conjecture 2, nothing seems to be known outside of
Jarn\'{\i}k's theorem which relies on $\psisi$ being monotonic. Of
course, the whole point of Conjecture 2 is to remove the
monotonicity condition from Jarn\'{\i}k's theorem. Clearly, on
taking ${\cal H}^f = m $ we have that
$$ Conjecture \ {\rm 2} {\cal H}^space{4mm}
\Longrightarrow {\cal H}^space{4mm} Conjecture \ {\rm 1} \ . $$
We shall prove the converse of this statement which turns out to
have obvious but nevertheless rather unexpected consequences.
\begin{theorem}
Conjecture {\rm 1} $ {\cal H}^space{4mm} \Longrightarrow {\cal H}^space{4mm} $
Conjecture {\rm 2}. \label{main}
\end{theorem}
Theorem \ref{main} together with Theorem PV gives:
\begin{corollary}
For $k\geqslantq 2${\rm ,} Conjecture {\rm 2} is true. \label{cor1}
\end{corollary}
Theorem \ref{main} gives:
\begin{corollary} Khintchine\/{\rm '}\/s theorem $ {\cal H}^space{4mm}
\Longrightarrow {\cal H}^space{4mm} $ Jarn\'{\i}k\/{\rm '}\/s theorem.
\label{cor2}
\end{corollary}
It is remarkable that Conjecture 1, which is only concerned with
the metric theory of ${\cal S}_k(\psisi)$ with respect to the ambient
measure $m$, underpins the whole general metric theory. In
particular, as a consequence of Corollary \ref{cor2}, if $\psisi$ is
monotonic then Hausdorff dimension results for ${\cal S}_k(\psisi)$ (i.e.\
the general form of the Jarn\'{\i}k-Besicovitch theorem) can in
fact be obtained via Khintchine's Theorem. At first, this seems
rather counterintuitive. In fact, the dimension results for
monotonic $\psisi$ are a trivial consequence of Dirichlet's theorem
(see \S\ref{secJB}).
The key to establishing Theorem \ref{main} is the Mass
Transference Principle of Section \ref{secMTP}. In short, this allows us
to transfer $m$-measure theoretic statements for $\limsup$ subsets
of ${\Bbb R}^k$ to ${\cal H}^f$-measure theoretic statements. In
Section \ref{secmtpg}, we state a general Mass Transference Principle
which allows us to obtain the analogue of Theorem \ref{main} for
$\limsup$ subsets of locally compact metric spaces.
\section{Preliminaries}
Throughout $(X,d)$ is a metric space such that for every $\rho>0$
the space $X$ can be covered by a countable collection of balls
with diameters $<\rho$. A ball $B=B(x,r):=\{y\in X:d(x,y)\leqslant r\}$
is defined by a fixed centre and radius, although these in general
are not uniquely determined by $B$ as a set. By definition, $B$ is
a subset of $X$. For any $\lambda>0$, we denote by $\lambda
B$ the ball $B$ scaled by a factor $\lambda$; i.e. $\lambda B(x,
r):= B(x, \lambda r)$.
\Subsec{Hausdorff measures\label{HM}}
In this section we give a brief account of Hausdorff measures. A
{\em dimension function} $f \, : \, {\Bbb R}^+ \to {\Bbb R}^+ $ is a continuous,
nondecreasing function such that $f(r)\to 0$ as $r\to 0 \, $. Given
a ball $B=B(x,r)$, the quantity
\begin{equation}\label{e:004}
V^f(B)\,:=\,f(r)
\end{equation}
will be referred to as the {\em $f$-volume of $B$}. If $B$ is a
ball in ${\Bbb R}^k$, $m$ is $k$-dimensional Lebesgue measure and
$f(x)=m(B(0,1))x^k$, then $V^f$ is simply the volume of $B$ in the
usual geometric sense; i.e. $V^f(B)=m(B)$. In the case when
$f(x)=x^s$ for some $s\geqslantq0$, we write $V^s$ for $V^f$.
The Hausdorff $f$-measure with respect to the dimension function
$f$ will be denoted throughout by ${\cal H}^{f}$ and is defined as
follows. Suppose $F$ is a subset of $(X,d)$. For $\rho
> 0$, a countable collection $ \leqslantft\{B_{i} \right\} $ of balls in
$X$ with $r(B_i) \leqslantq \rho $ for each $i$ such that $F \subset
\bigcup_{i} B_{i} $ is called a {\em $ \rho $-cover for $F$}.
Clearly such a cover exists for every $\rho > 0$. For a dimension
function $f$ define $$
{\cal H}^{f}_{\rho} (F) \, = \, \inf \ \sum_{i} V^f(B_i),
$$
where the infimum is taken over all $\rho$-covers of $F$. The {\it
Hausdorff $f$-measure} $ {\cal H}^{f} (F)$ of $F$ with respect to
the dimension function $f$ is defined by $$ {\cal H}^{f} (F) :=
\lim_{ \rho \rightarrow 0} {\cal H}^{f}_{\rho} (F) \; = \;
\sup_{\rho > 0 } {\cal H}^{f}_{\rho} (F) \; . $$
A simple consequence of the definition of $ {\cal H}^f $ is the
following useful fact.
\begin{lemma}
If $ \, f$ and $g$ are two dimension functions such that the ratio
$f(r)/g(r) \to 0 $ as $ r \to 0 ${\rm ,} then ${\cal H}^{f} (F) =0 $
whenever ${\cal H}^{g} (F) < \infty $. \label{dimfunlemma}
\end{lemma}
In the case that $f(r) = r^s$ ($s \geqslantq 0$), the measure $ {\cal H}^f $
is the usual $s$-dimensional Hausdorff measure ${\cal H}^s $ and the
Hausdorff dimension $\dim F$ of a set $F$ is defined by $$ \dim \,
F \, := \, \inf \leqslantft\{ s : {\cal H}^{s} (F) =0 \right\} = \sup
\leqslantft\{ s : {\cal H}^{s} (F) = \infty \right\} . $$ In particular
when $s$ is an integer and $X = {\Bbb R}^s$, ${\cal H}^s$ is comparable to
the $s$-dimensional Lebesgue measure. Actually, ${\cal H}^s$ is a constant
multiple of the $s$-dimensional Lebesgue measure but we shall not
need this stronger statement.
For further details see \cite{falc, mat}. A general and classical
method for obtaining a lower bound for the Hausdorff $f$-measure
of an arbitrary set $F$ is the following mass distribution
principle.
\demo{\scshape Lemma (Mass Distribution Principle)} {\em
Let $ \mu $ be a probability measure supported on a subset $F$ of $
(X,d) $.
Suppose there are positive constants $c$ and $r_o$ such that} $$
\mu ( B ) \leqslantq \, c \; V^f(B) \;
$$ {\em for any ball $B$ with
radius $r \leqslantq r_o \, $. If $E$ is a subset of $F$ with $\mu(E) =
\lambda > 0$ then $ {\cal H}^{f} (E) \geqslantq \lambda/c \, $. }
\Proof If $ \leqslantft\{B_{i} \right\} $ is a
$\rho$-cover of $E$ with $\rho \leqslantq r_o$ then $$ \lambda = \mu(E)
= \mu \leqslantft( \cup_{i} B_i \right) \leqslantq \sum_i \mu \leqslantft( B_i
\right) \leqslantq c \sum_i V^f(B_i) \; . $$ It follows that $
{\cal H}^{f}_{\rho } (E) \geqslantq \lambda/c $ for any $\rho \leqslantq r_o$.
On letting $\rho \to 0 \, $, the quantity ${\cal H}^{f}_{ \rho }
(E)$ increases and so we obtain the required result.
\Endproof\vskip4pt
The following basic covering lemma will be required at various
stages \cite{jh},~\cite{mat}.
\begin{lemma}[The $5r$ covering lemma]\label{5r}
Every family ${\cal F}$ of balls of uniformly bounded diameter in
a metric space $(X,d)$ contains a disjoint subfamily ${\cal G} $
such that $$ \bigcup_{B \in {\cal F} } B \ \subset \ \bigcup_{B
\in {\cal G} } 5B \ \ \ . $$
\end{lemma}
\Subsec{Positive and full measure sets}
Let $\mu$ be a finite measure supported on
$(X,d)$. The measure $\mu$ is said to be {\em doubling} if there
exists a constant $\lambda
> 1 $ such that for $x \in X$ $$\mu(B(x,2r)) \, \leqslantq \, \lambda\,
\mu(B(x,r)) \ . $$
Clearly, the measure ${\cal H}^k$ is a doubling measure on ${\Bbb R}^k$. In
this section we state two measure theoretic results which will be
required during the course of the paper.
\begin{lemma}\label{lem1a}
Let $(X,d)$ be a metric space and let $\mu$ be a finite doubling
measure on $X$ such that any open set is $\mu$ measurable. Let $E$
be a Borel subset of\/ $X$. Assume that there are constants
$r_0,c>0$ such that for any ball $B$ with $r(B)<r_0$ and center
in $X${\rm ,} we have that $\mu(E\cap B)\geqslant c\ \mu(B)$. Then{\rm ,} for any
ball $B$
$$\mu(E\cap B) \, = \, \mu(B) \ . $$
\end{lemma}
\begin{lemma}\label{lem3a}
Let $(X,d)$ be a metric space and $\mu$ be a finite measure on $X$.
Let $B$ be a ball in $X$ and $E_n $ a sequence of
$\mu$-measurable sets. Suppose there exists a constant $c > 0$
such that $\limsup_{n \to \infty} \mu(B \, \cap \, E_n) \geqslant c \;
\mu(B)$. Then $$ \mu (B \, \cap \, \limsup_{n\to\infty}E_n ) \ \geqslant
\ c^2 \, \mu(B) \ \ . $$
\end{lemma}
For the details regarding these two lemmas see \cite[\S8]{BDV}.
\section{A mass transference principle \label{secMTP}}
Given a dimension function $f$ and a ball $B=B(x,r)$ in ${\Bbb R}^k$,
we define another ball
\begin{equation}\label{e:006}
\textstyle B^f:=B(x,f(r)^{1/k}) \ .
\end{equation}
When $f(x)=x^s$ for some $s>0$ we also adopt the notation $B^s$,
i.e.\ $ B^s:=B^{(x\mapsto x^s)}. $ It is readily verified that
\begin{equation}\label{e:008}
B^k=B.
\end{equation}
Next, given a collection $K$ of balls in ${\Bbb R}^k$, denote by $K^f$
the collection of balls obtained from $K$ under the
transformation (\ref{e:006}); i.e. $K^f := \{ B^f : B \in K \} $.
The following property immediately follows from (\ref{e:004}),
(\ref{e:006}) and (\ref{e:008}):
\begin{equation}\label{e:009}
V^k(B^f)=V^f(B^k)\qquad\text{for any ball $B$.}
\end{equation}
Note that (\ref{e:009}) could have been taken to be a definition
in which case (\ref{e:006}) would follow.
Recall that ${\cal H}^k$ is comparable to the $k$-dimensional Lebesgue
measure $m$. Trivially, for any ball $B$ we have that $V^k(B)$ is
comparable to $m(B)$. Thus there are constants
$0<c_1<1<c_2<\infty$ such that for any ball $B$
\begin{equation}\label{e:010}
c_1\ V^k(B)\leqslant {\cal H}^k(B)\leqslant c_2\ V^k(B).
\end{equation}
\noindent In fact, we have the stronger statement that $ {\cal H}^k(B)
$ is a constant multiple of $V^k(B) $. However, the analogue of
this stronger statement is not necessarily true in the general
framework considered in Section \ref{secmtpg} whereas (\ref{e:010}) is.
Therefore, we have opted to work with (\ref{e:010}) even in our
current setup. Given a sequence of balls $B_i$, $i=1,2,3,\ldots$,
as usual its limsup set is
$$
\limsup_{i\to\infty}B_i:=\bigcap_{j=1}^\infty\ \bigcup_{i\geqslant j}B_i
\ .
$$
\noindent The following theorem is without doubt the main result
of this paper. It is the key to establishing the
Duffin-Schaeffer conjecture for Hausdorff measures.
\begin{theorem}[Mass Transference Principle]\label{thm3}
Let $\{B_i\}_{i\in{\Bbb N}}$ be a sequence of balls in ${\Bbb R}^k$ with
$r(B_i)\to 0$ as $i\to\infty$. Let $f$ be a dimension function
such that $x^{-k}f(x)$ is monotonic and suppose that for any ball
$B$ in ${\Bbb R}^k$
\begin{equation}\label{e:011}
{\cal H}^k\big(\/B\cap\limsup_{i\to\infty}B^f_i{}\,\big)={\cal H}^k(B) \ .
\end{equation}
Then{\rm ,} for any ball $B$ in ${\Bbb R}^k$
\begin{equation*}
{\cal H}^f\big(\/B\cap\limsup_{i\to\infty}B^k_i\,\big)={\cal H}^f(B) \ .
\end{equation*}
\end{theorem}
{\em Remark}\/ 1. ${\cal H}^k$ is comparable to the Lebesgue
measure $m$ in ${\Bbb R}^k$. Thus (\ref{e:011}) simply states that the
set $\limsup B_i^f$ is of full $m$ measure in ${\Bbb R}^k$, i.e.\ its
complement in ${\Bbb R}^k$ is of $m$ measure zero.
\demo{Remark $2$} In the statement of Theorem~\ref{thm3}
the condition $r(B_i)\to0$ as $i\to\infty$ is redundant.
However,
it is included to avoid unnecessary further discussion.
\demo{Remark $3$} If $x^{-k}f(x) \to l $ as $x \to 0 $
and $l$ is finite then the above statement is relatively
straightforward to establish. The main substance of the Mass
Transference Principle is when $x^{-k}f(x)\to\infty$ as $x\to0$.
In this case, it trivially follows via Lemma \ref{dimfunlemma}
that ${\cal H}^f(B)=\infty$.
\Subsec{Proof of Theorem {\rm \ref{main}}\label{thmmain}}
First of all let us dispose of the case that $\psisi(r)/r
\nrightarrow 0 $ as $r \to \infty$. Then trivially, ${\cal S}_k(\psisi)=
{\Bbb I}^k$ and the result is obvious. Without loss of generality,
assume that $\psisi(r)/r \to 0 $ as $r \to \infty$. We are given
that $ \sum f( \psisi(n)/n ) \; \psihi(n)^k = \infty $. Let $
\theta(r) := r \, f( \psisi(r)/r )^{1/k}$. Then $\theta$ is an
approximating function and $ \sum (\psihi(n) \, \theta(n)/n )^k =
\infty $. Thus, on using the supremum norm, Conjecture 1 implies
that $ {\cal H}^k(B \cap {\cal S}_k(\theta)) = {\cal H}^k(B \cap {\Bbb I}^k)$ for any
ball $B$ in ${\Bbb R}^k$. It now follows via the Mass Transference
Principle that $ {\cal H}^f({\cal S}_k(\psisi)) = {\cal H}^f({\Bbb I}^k) $ and this
completes the proof of Theorem \ref{main}.
\Subsec{The Jarn\'{\i}k-Besicovitch theorem}\label{secJB}
In the case $k=1$ and $\psisi( x) := x^{-\tau}\!$, let us write
${\cal S}(\tau)$ for ${\cal S}_k(\psisi)$. The Jarn\'{\i}k-Besicovitch
theorem states that $\dim {\cal S}(\tau) = d:= 2/(1+\tau) $ for $\tau
> 1$. This fundamental result is easily deduced on combining
Dirichlet's theorem with the Mass Transference Principle.
Dirichlet's theorem states that for any irrational $y \in {\Bbb R}$,
there exists infintely many reduced rationals $p/q$ ($q>0$) such
that $|y - p/q| \leqslantq q^{-2}$. With $f(x) := x^{d}$, (\ref{e:011})
is trivially satisfied and the Mass Transference Principle
implies that ${\cal H}^{d} ({\cal S}(\tau) ) = \infty $. Hence $\dim
{\cal S}(\tau) \geqslantq d $. The upper bound is trivial. Note
that we have actually proved a lot more than simply the
Jarn\'{\i}k-Besicovitch theorem. We have proved that the
$s$-dimensional Hausdorff measure ${\cal H}^{s}$ of ${\cal S}(\tau)$ at
the critical exponent $s=d$ is infinite.
\section{The $K_{G,B} $ covering lemma}
Before establishing the Mass Transference Principle we state and
prove the following covering lemma, which provides an equivalent
description of the full measure property (\ref{e:011}).
\begin{lemma}[The $K_{G,B} $ lemma] \label{lem1}
Let $\{B_i\}_{i\in{\Bbb N}}$ be a sequence of balls in ${\Bbb R}^k$ with
$r(B_i)\to 0$ as $i\to\infty$. Let $f$ be a dimension function
and for any ball $B$
in ${\Bbb R}^k$ suppose that {\rm (\ref{e:011})} is satisfied. Then for any
$B$ and any $G>1$ there is a finite
sub-collection $K_{G,B}\subset\{B_i\,:\,i\geqslant G\}$ such that the
corresponding balls in $K_{G,B}f$ are disjoint{\rm ,} lie inside $B$ and
\begin{equation}\label{e:014}
{\cal H}^k\Big(\bigcup^\circ_{L\in K_{G,B}f}L\Big) \ \geqslant \ \kappa
\ {\cal H}^k(B) \,
{\cal H}^space{12mm}\text{with}\quad\kappa := \mbox{\footnotesize$
\frac{1}{2} $}
(\mbox{\footnotesize$ \frac{c_1}{c_2} $} )^2 10^{-k} \ \ .
\end{equation}
\end{lemma}
{\it Proof of Lemma}~\ref{lem1}. Let ${\cal F}:=\{B^f_i\,:\,
B^f_i\ \cap \mbox{\footnotesize{$\frac{1}{2}$}}B \neq \emptyset \,
,\ i\geqslant G\}$. Since, $f(x)\to0$ as $x\to0$ and $r(B_i)\to0$ as
$i\to\infty$ we can ensure that every ball in $ {\cal F}$ is contained
in $ B $ for $i$ sufficiently large. In view of the $5r$ covering
lemma (Lemma \ref{5r}), there exists a disjoint sub-family ${\cal
G} $ such that $$ \bigcup_{B^f_i\in {\cal F} } B^f_i \ \subset \
\bigcup_{B^f_i \in {\cal G} } 5B^f_i \ \ \ . $$ It follows that
\begin{eqnarray*}
{\cal H}^k \leqslantft(\bigcup_{B^f_i \in {\cal G} } 5B^f_i\right) \
\geqslantq \ {\cal H}^k \big( \mbox{\footnotesize{$\frac{1}{2}$}} B \cap
\limsup_{i\to\infty}B^f_i \big) \stackrel{(\ref{e:011})}{\ = \ }
{\cal H}^k(\mbox{\footnotesize{$\frac{1}{2}$}} B \big)
\stackrel{(\ref{e:010})}{\ \geqslantq \ }
\frac{c_1}{c_2} \ 2^{-k} \; {\cal H}^k(B) \ .
\end{eqnarray*}
\noindent However, since ${\cal G} $ is a disjoint collection of
balls we have that
\begin{eqnarray*}
{\cal H}^k \leqslantft(\bigcup_{B^f_i \in {\cal G} } 5B^f_i\right)
\stackrel{(\ref{e:010})}{\ \leqslantq \ } \frac{c_2}{c_1} \ 5^{k} \ \
{\cal H}^k \leqslantft(\bigcup^\circ_{B^f_i \in {\cal G} } B^f_i\right)
\ .
\end{eqnarray*}
Thus, \begin{equation}
{\cal H}^k \leqslantft(\bigcup^\circ_{B^f_i \in
{\cal G} } B^f_i\right) \ \geqslantq \ \leqslantft(\frac{c_1}{c_2}\right)^2 \
10^{-k} \; \ {\cal H}^k(B) \ . \label{kgb001} \end{equation} The balls
$B^f_i \in {\cal G}$ are disjoint, and since $r(B^f_i)\to0$ as
$i\to\infty$ we have that $$ {\cal H}^k \leqslantft(\bigcup^\circ_{B^f_i
\in {\cal G}\, : \, i \geqslantq j } B^f_i\right) \ \to \ 0
{\cal H}^space{8mm} {\rm as } {\cal H}^space{4mm} j \to \infty \ \ . $$ Thus,
there exists some $j_0 > G $ for which
\begin{equation} {\cal H}^k \leqslantft(\bigcup^\circ_{B^f_i \in {\cal G} \,
: \, i
\geqslantq j_0 } B^f_i\right) \ < \ \frac12 \;
\leqslantft(\frac{c_1}{c_2}\right)^2 \ 10^{-k} \; \ {\cal H}^k(B) \ .
\label{kgb002} \end{equation} Now let $K_{G,B} := \{B_i : B^f_i \in
{\cal G} , i < j_0 \} $. Clearly, this is a finite sub-collection
of $\{B_i\,:\,i\geqslant G\}$. Moreover, in view of (\ref{kgb001}) and
(\ref{kgb002}) the collection $K_{G,B}f$
satisfies the desired properties.
\Endproof\vskip4pt
Lemma~\ref{lem1} shows that the full measure property
(\ref{e:011}) of the Mass Transference Principle implies the
existence of the collection $K_{G,B}f$ satisfying (\ref{e:014}) of
the $K_{G,B}$ Lemma. For completeness, we prove that the converse is also
true.
\begin{lemma}\label{lem1-}
Let $\{B_i\}_{i\in{\Bbb N}}$ be a sequence of balls in ${\Bbb R}^k$ with
$r(B_i)\to 0$ as $i\to\infty$. Let $f$ be a dimension function
and for any ball $B$ and any $G>1${\rm ,} assume that there is a
collection $K_{G,B}f$ of balls satisfying {\rm (\ref{e:014})} of
Lemma~{\rm \ref{lem1}.}
Then{\rm ,} for any ball $B$ the full measure property {\rm (\ref{e:011})} of the
Mass Transference Principle is
satisfied.
\end{lemma}
{\it Proof of Lemma~{\rm \ref{lem1-}}}. For any ball $B$ and
any $G\in{\Bbb N}$, the collection $K_{G,B}f$ is contained in $B$ and is a
finite sub-collection of $\{B^f_i\}$ with $i\geqslant G$. We define
$$E_G \, := \, \bigcup_{L\inK_{G,B}f}L \ .$$ Since $K_{G,B}f$ is finite, we
have that \begin{equation*} \limsup_{G\to\infty}E_G \ \subset \
B\cap\limsup_{i\to\infty}B^f_i \ . \label{-inc} \end{equation*} It
follows from (\ref{e:014}) that $ {\cal H}^k(E_G) \geqslant \kappa \,
{\cal H}^k(B) $ which together with Lemma~\ref{lem3a} implies that
${\cal H}^k(\limsup_{G\to\infty} E_G)\geqslant
\kappa^2\,{\cal H}^k(B)$.
Hence, ${\cal H}^k(B\cap\limsup_{i\to\infty} B^f_i)\geqslant
\kappa^2\,{\cal H}^k(B)$. The measure ${\cal H}^k$ is doubling and so the
statement of the lemma follows on applying Lemma~\ref{lem1a}.
\Endproof
In short, Lemmas \ref{lem1} and \ref{lem1-} establish the
equivalence: $ \mbox{ (\ref{e:011})} \iff \mbox{(\ref{e:014})} $.
\vglue-22pt
\psihantom{up}
\section{Proof of Theorem \ref{thm3} (Mass Transference Principle)}
\vglue-8pt
We start by considering the case that $x^{-k}f(x) \to l $ as $x
\to 0 $ and $l$ is finite. If $l=0$, then Lemma \ref{dimfunlemma}
implies that ${\cal H}^f(B)=0$ and since $ B\cap\limsup B^k_i \subset
B$ the result follows. If $l \neq 0 $ and is finite then ${\cal H}^f$
is comparable to ${\cal H}^k$ (in fact, ${\cal H}^f = l \, {\cal H}^k$).
Therefore the required statement follows on showing that
${\cal H}^k\big(\/B\cap\limsup_{i\to\infty}B^k_i\,\big)={\cal H}^k(B)$. This
can be established by first noting that the ratio of the radii of
the balls $B^k_i$ and $B^f_i$ are uniformly bounded between
positive constants and then
adapting the proof of Lemma~\ref{lem1-} in the obvious manner.
In view of the above discussion, we can assume without loss of
generality that $$x^{-k}f(x) \ \to \ \infty {\cal H}^space{6mm} {\rm as }
{\cal H}^space{6mm} x\to0 \ \ . $$ Note that in this case, it trivially
follows via Lemma \ref{dimfunlemma} that ${\cal H}^f(B)=\infty$. Fix
some arbitrary bounded ball $B_0$ of ${\Bbb R}^k$. The statement of the
Mass Transference Principle will therefore follow on showing that
$${\cal H}^f (B_0 \cap \limsup B_i) = \infty \ . $$
\noindent To achieve this we proceed as follows. For any constant
$\eta>1$, our aim is to construct a Cantor subset ${\Bbb K}_\eta$ of
$B_0 \cap \limsup B_i$ and a probability measure $\mu$ supported
on ${\Bbb K}_{\eta}$ satisfying the condition that for an arbitrary ball
$A$ of sufficiently small radius $r(A)
$
\begin{equation}
\mu(A) \; \ll \; \frac{V^f(A)}{\eta} \; , \label{task}
\end{equation}
where the implied constant in the Vinogradov symbol ($\ll$) is
absolute. By the Mass Distribution Principle, the above inequality
implies that $$ {\cal H}^f({\Bbb K}_{\eta}) \; \gg \; \eta \;. $$ Since
$ {\Bbb K}_{\eta}\subset B_0 \cap \limsup B_i$, we obtain that ${\cal
H}^f \leqslantft( B_0 \cap \limsup B_i \right)\gg \eta$. However, $\eta
$ can be made arbitrarily large whence ${\cal H}^f \leqslantft( B_0 \cap
\limsup B_i \right)=\infty$ and this proves Theorem \ref{thm3}.
In view of the above outline, the whole strategy of our proof is
centred around the construction of a `right type' of Cantor set
${\Bbb K}_{\eta}$ which supports a measure $\mu$ with the desired
property.
\Subsec{The desired properties of ${\Bbb K}_{\eta}$}
In this section we summarize the desired properties of the
Cantor set ${\Bbb K}_\eta$. The existence of ${\Bbb K}_\eta$ will be
established in the next section. Let $$
{\Bbb K}_\eta:=\bigcap_{n=1}^\infty {\Bbb K}(n) \ , $$ where each {\em level}
${\Bbb K}(n)$ is a finite union of disjoint balls such that
\begin{equation*}\label{e:016}
{\Bbb K}(1)\supset {\Bbb K}(2) \supset {\Bbb K}(3) \supset \ldots \ \ .
\end{equation*}
Thus, the levels are nested. Moreover, if $K(n)$ denotes the
collection of balls which constitute level $n$, then $K(n)
\subset \{B_i : i \in {\Bbb N} \} $ for each $n \geqslantq 2$. We will define
$K(1) := B_0$. It is then clear that ${\Bbb K}_\eta$ is a subset of
$B_0 \cap \limsup B_i$. It will be convenient to also refer to the
collection $K(n)$ as the $n$-th level. Strictly speaking, ${\Bbb K}(n)=
\bigcup_{ B \in K(n)} B $ is the $n$-th level. However, from the
context it will be clear what we mean and no ambiguity should
arise.
The construction is inductive and the general idea is as
follows. Suppose the $(n-1)$-th level ${\Bbb K}(n-1)$ has been
constructed. The next level is constructed by `looking' locally at
each ball from the previous level. More precisely, for every
ball $B\in K(n-1)$ we construct the $(n,B)$-{\em local level}
denoted by $K(n,B)$ consisting of balls contained in $B$. Thus
\begin{equation*}\label{e:017}
K(n)\ := \ \bigcup_{B\in K(n-1)}K(n,B)\qquad\text{and}\qquad
{\Bbb K}(n)\ := \ \bigcup_{B\in K(n-1)}{\Bbb K}(n,B) \ \ ,
\end{equation*}
where $${\Bbb K}(n,B)\ := \ \bigcup_{L\in K(n,B)} \!\!\!\! L \ = \ B\cap
{\Bbb K}(n) \ \ . $$
\noindent As mentioned above, the balls in each level will be
disjoint. Moreover, we ensure that balls in each level scaled by
a factor of three are disjoint. This is property (P1) below. This
alone is not sufficient to obtain the required lower bound for
${\cal H}^f({\Bbb K}_\eta)$. For this purpose, every local level will be
defined as a union of {\em local sub-levels}. The $(n,B)$-local
level will take on the following form
\begin{equation*}\label{e:018}
K(n,B):=\bigcup_{i=1}^{l_B}K(n,B,i) \ ,
\end{equation*}
where $l_B$ is the number of local sub-levels (see property (P5)
below) and $K(n,B,i)$ is the $i$-th local sub-level. Within each
local sub-level $K(n,B,i)$, the separation of balls is much more
demanding than simply property (P1) and is given by property (P2)
below.
To achieve our main objective, the lower bound for
${\cal H}^f({\Bbb K}_\eta)$, we will require a controlled build up of
`mass' on the balls in every sub-level. The mass is related to the
$f$-volume $V^f$ of the balls in the construction and the overall
number of sub-levels. These are governed by properties (P3) and
(P5) below.
Finally, we will require that the $f$-volume of balls from one
sub-level to the next decreases sufficiently fast. This is
property (P4) below. However, the total $f$-volume within any one
sub-level remains about the same. This is a consequence of
property (P3) below.
We now formally state the properties (P1)--(P5) discussed above
together with a trivial property (P0).
\smallbreak {\it The properties of levels and sub-levels of ${\Bbb K}_\eta$}
\begin{enumerate}
\item[{\bf(P0)}] $K(1)$ consists of one ball, namely $B_0$.
\item[{\bf(P1)}] For any $n\geqslant 2$ and any $B\in K(n-1)$ the balls
$$
\{3L\ :\ L\in K(n,B)\}
$$
are disjoint and contained in $B$ and $3L\subset L^f$.
\item[{\bf(P2)}] For any $n\geqslant 2$, $B\in K(n-1)$ and any
$i\in\{1\,\ldots, l_B\}$ the
balls
$$
\{L^f\ :\ L\in K(n,B,i)\}
$$
are disjoint and contained in $B$.
\item[{\bf(P3)}]
For any $n\geqslant 2$, $B\in K(n-1)$ and $i\in\{1\,\ldots,l_B\}$ $$
\sum_{L\in K(n,B,i)}V^k(L^f) \ \geqslant\ c_3\ V^k(B), $$ where $c_3
:= \frac{\kappa\,c_1^2}{2\,c_2^2\,10^k} > 0 $ is an absolute
constant.
\item[{\bf(P4)}] For any $n\geqslant 2$, $B\in K(n-1)$, any
$i\in\{1\,\ldots,l_B-1\}$ and any $L\in K(n,B,i)$ and $M\in
K(n,B,i+1)$
$$
V^f(M)\leqslant \frac{1}{2}\ V^f(L).
$$
\item[{\bf(P5)}] The number of local sub-levels is defined by
$$
l_B:=\leqslantft\{
\begin{array}{lcl}
\displaystyle\leqslantft[\frac{c_2\,\eta}{c_3\,{\cal H}^k(B)}\right]+1 & , &
\mbox{ if
$B=B_0 := {\Bbb K}(1)$,}\\[5ex]
\displaystyle\leqslantft[\frac{V^f(B)}{c_3\,V^k(B)}\right]+1 & , & \mbox{ if
$B\in K(n)$ with $n\geqslant 2$}
\end{array}
\right.
$$
and satisfies $l_B\geqslant 2$ for $B\in K(n)$ with $n\geqslant2$.
\end{enumerate}
\Subsec{The existence of ${\Bbb K}_{\eta}$} \label{kantor1}
In this section we show that it is indeed possible to construct a
Cantor set ${\Bbb K}_{\eta}$ with the desired properties as discussed
in the previous section. We will use the notation $$ K_l(n,B) \
:= \ \bigcup_{i=1}^{l}K(n,B,i) \ . $$ Thus, $K(n,B)$ is simply
$K_{l_B}(n,B)$.
\demo{Level $1$} This is defined by
taking the arbitrary ball $B_0$. Thus, ${\Bbb K}(1) := B_0$ and property
(P0) is trivially satisfied.
\vskip9pt
We proceed by induction. Assume that the first $(n-1)$ levels
${\Bbb K}(1), {\Bbb K}(2),\break \ldots , {\Bbb K}(n-1)$ have been constructed. We now
construct the $n$-th level ${\Bbb K}(n)$.
\demo{Level $n$} To construct
this level we construct local levels $K(n,B)$ for each $B\in
K(n-1)$. Recall, that each local level $K(n,B)$ will consist of
sub-levels $K(n,B,i)$ where $1\leqslantq i \leqslantq l_B$ and $l_B $ is given
by property (P5). Therefore, fix some ball $B\in K(n-1)$ and a
sufficiently small constant $\varepsilon=\varepsilon(B)>0$ which will be
determined later. Let $G$ be sufficiently large so that
\begin{equation}\label{e:019}
r(3B_i)<r(B_i^f)\qquad\text{ whenever}\qquad i\geqslant G
\end{equation}
\begin{equation}\label{e:020} \frac{V^k(B_i)}{V^f(B_i)}< \varepsilon \
\frac{V^k(B)}{V^f(B)}\qquad\text{ whenever}\qquad i\geqslant G
\end{equation}
and
\begin{equation}\label{card}
\leqslantft[\frac{V^f(B_i)}{c_3\,V^k(B_i)}\right] \ \geqslantq \ 1
\qquad\text{ whenever}\qquad i\geqslant G \ ,
\end{equation}
where $c_3$ is the constant appearing in property (P3) above. This
is possible since $x^k/f(x)\to0$ as $x\to0$. Now let ${\cal C}_G
:= \{B_i : i \geqslantq G\}$. The local level $ K(n,B)$ will be
constructed to be a finite, disjoint sub-collection of ${\cal
C}_G$. Thus, (\ref{e:019})--(\ref{card}) are satisfied for any
ball $B_i$ in $ K(n,B)$. In particular, (\ref{card}) implies that
$l_{B_i} \geqslantq 2 $ and so property (P5) will automatically be
satisfied for balls in $ K(n,B)$.
\demo{Sub-level $1$} With $B$ and $G$ as above, let $K_{G,B}$
denote the collection of balls arising from Lemma~\ref{lem1}.
Note, that in view of (\ref{e:019}) the collection $K_{G,B}$ is a
disjoint collection of balls. Define the first sub-level of $
K(n,B)$ to be $K_{G,B}$; that is $$ K(n,B,1) \ := \ K_{G,B} \ . $$
\noindent By Lemma~\ref{lem1}, it is clear that (P2) and (P3) are
fulfilled for $i=1$. By (\ref{e:019}) and the fact that the balls
in $K_{G,B}f$ are disjoint, we also have that (P1) is satisfied
within this first sub-level. Clearly, $ K(n,B,1) \subset {\cal
C}_G$.
\demo{Higher sub-levels} To construct higher
sub-levels we argue by induction. For $l<l_B$, assume that we
have constructed the sub-levels $K(n,B,1), \dots\break \dots, K(n,B,l)$
satisfying properties (P1)--(P4) with $l_B$ replaced by $l$ and
such that $ K_l(n,B) \subset {\cal C}_G$. In view of the latter,
(\ref{e:019})--(\ref{card}) are satisfied for any ball $L$ in $
K_l(n,B)$. In particular, in view of (\ref{card}), for any ball
$L$ in $ K_l(n,B)$ property (P5) is trivially satisfied; i.e. $l_L
\geqslantq 2$. We now construct the next sub-level $K(n,B,l+1)$.
As every sub-level of the construction has to be well separated
from the previous ones, we first verify that there is enough
`space' left over in $B$ once we have removed the sub-levels
$K(n,B,1), \dots, K(n,B,l)$ from $B$. More precisely, let
$$A^{(l)} \ := \ \mbox{\small{$\frac{1}{2}$}}B \ \setminus
\bigcup_{L\in K_l(n,B)} \!\!\! 4L \ . $$ We show that
\begin{equation}\label{e:021}
{\cal H}^k\big(A^{(l)} \big)\geqslant \frac12\
{\cal H}^k(\mbox{\footnotesize{$\frac{1}{2}$}}B) \ .
\end{equation}
\noindent By construction and the fact that $l < l_B$,
\begin{eqnarray}&&\label{but}\\ {\cal H}^k(\bigcup_{L\in K_l(n,B)}4L) \!\!\!\!\! & \leqslant
& \sum_{L\in K_l(n,B)}{\cal H}^k(4L) \nonumber
\\ & \stackrel{(\ref{e:010})}{\,\leqslant\,} & 4^kc_2\sum_{L\in
K_l(n,B)}V^k(L) \ = \ 4^k c_2\sum_{L\in
K_l(n,B)}V^f(L)\,\frac{V^k(L)}{V^f(L)} \nonumber \\ &
\stackrel{(\ref{e:020})}{\,\leqslant\,} & 4^kc_2 \sum_{L\in
K_l(n,B)}V^f(L)\ \varepsilon \ \frac{V^k(B)}{V^f(B)} \nonumber \\ &
\stackrel{(\ref{e:009})}{\,=\,} & 4^kc_2 \ \varepsilon \
\frac{V^k(B)}{V^f(B)}\ \sum_{i=1}^{l}\sum_{L\in K(n,B,i)}V^k(L^f)
\nonumber \\ & \stackrel{(\ref{e:010})}{\,\leqslant\,} & \frac{4^kc_2\
\varepsilon}{c_1}\ \frac{V^k(B)}{V^f(B)}\ \sum_{i=1}^{l}\sum_{L\in
K(n,B,i)}{\cal H}^k(L^f) \nonumber \\ &
\stackrel{\,\,(P2) \,}{\,\leqslant\,} &
\frac{4^kc_2\ \varepsilon}{c_1}\
\frac{V^k(B)}{V^f(B)}\ ( l_B -1 ) \ {\cal H}^k(B) \ . \nonumber
\end{eqnarray}
\noindent Now, if $B= B_0$ let $$ \varepsilon = \varepsilon(B_o) \ := \
\frac{1}{2} \leqslantft(\frac{c_1}{c_2} \right)^2 \frac{c_3}{2^k \, 4^k}
\ \frac{V^f(B_0)}{\eta} \ . $$ If $B \neq B_0$, so that $ B \in
K(n) $ for some $n \geqslantq 2$, let $ \varepsilon := \varepsilon(B_0) \times
(\eta / V^f(B_0))$ -- a constant independent of $B$, $B_0$ and
$\eta$.
It then follows from (\ref{but}), (P5) and (\ref{e:010}) that $$
{\cal H}^k(\bigcup_{L\in K_l(n,B)}4L) \ \leqslant \ \frac{1}{2} \
{\cal H}^k(\mbox{\footnotesize{$\frac{1}{2}$}}B) \ , $$ and this
clearly establishes (\ref{e:021}).
By construction, $K_l(n,B)$ is a finite collection of balls and so
$d_{\min} := \min \{r(L): L \in K_l(n,B) \} $ is well defined.
Let $B^{(l)}$ denote a generic ball of diameter $d_{\min}$. At
each point of $A^{(l)} $ place a ball $B^{(l)}$ and denote this
collection by ${\cal A}^{(l)}$. By the $5r$-covering lemma (Lemma
\ref{5r}), there exists a disjoint sub-collection ${\cal G}^{(l)}$
such that $$ A^{(l)} \ \subset \ \bigcup_{B^{(l)} \in {\cal
A}^{(l)} } B^{(l)} \ \subset \ \bigcup_{B^{(l)} \in {\cal G}^{(l)}
} 5 B^{(l)} \ \ \ . $$ The collection ${\cal G}^{(l)}$ is clearly
contained within $B$ and it is finite; the balls are disjoint
and all of the same size. Moreover, by construction
\begin{equation}
B^{(l)} \cap \bigcup_{L \in K_l(n,B)} 3 L =
\emptyset {\cal H}^space{7mm} {\rm for\ any \ } B^{(l)} \in {\cal
G}^{(l)} \ ; \label{blcoll} \end{equation} i.e.\ the balls in
${\cal G}^{(l)} $ do not intersect any of the $3L$ balls from the
previous sub-levels. It follows that $$ {\cal H}^k (
\bigcup_{B^{(l)} \in {\cal G}^{(l)} } 5 B^{(l)} ) \ \geqslantq \ {\cal
H}^k (A^{(l)} ) \ \stackrel{(\ref{e:021})}{\ \geqslantq \ } \mbox{
\small $\frac12$} \ {\cal H}^k(\mbox{\footnotesize{$\frac{1}{2}$}}B) \
. $$ On the other hand, since ${\cal G}^{(l)}$ is a disjoint
collection of balls we have that $${\cal H}^k ( \bigcup_{B^{(l)}
\in {\cal G}^{(l)} } 5 B^{(l)} ) \stackrel{(\ref{e:010})}{\ \leqslantq \
} \frac{c_2}{c_1} \ 5^{k} \ \ {\cal H}^k (\bigcup^\circ_{B^{(l)}
\in {\cal G}^{(l)} } B^{(l)} ) \ , $$ and so
\begin{equation}\label{e:025}
{\cal H}^k (\bigcup^\circ_{B^{(l)} \in {\cal G}^{(l)} } B^{(l)}
)\,\geqslant\,\frac{c_1}{2 c_2 5^k} \ \
{\cal H}^k(\mbox{\footnotesize{$\frac{1}{2}$}}B)\,.
\end{equation}
We are now in the position to construct the $(l+1)$-th sub-level
$K(n,B,\break l+1)$. To this end, let $G' \geqslantq G $ be sufficiently
large so that
for every $i\geqslant G'$
\begin{equation}\label{e:026}
V^f(B_i) \ \leqslant \ \frac{1}{2}\ \min_{L\in K_l(n,B)}V^f(L)\,.
\end{equation}
We recall that $\{B_i\}$ is the original sequence of balls in
Theorem~\ref{thm3}. The number on the right of (\ref{e:026}) is
well defined and positive as there are only finitely many balls in
$K_l(n,B)$. Furthermore, (\ref{e:026}) is possible since
$\lim_{i\to\infty} r(B_i)=0$ and $\lim_{x \to 0}f(x)=0$. Now
to each ball $B^{(l)} \in {\cal G}^{(l)}$ we apply
Lemma~\ref{lem1} to obtain a collection $K_{G,B}l$ and define $$
K(n,B,l+1):=\bigcup_{B^{(l)} \in {\cal G}^{(l)}} K_{G,B}l \ . $$ Note
that since $G' \geqslantq G$, (\ref{e:019})--(\ref{card}) remain valid
and $K(n,B,l+1)\subset {\cal C}_G$. We now verify properties
(P1)--(P5) for this sub-level.
In view of Lemma~\ref{lem1}, for any $B^{(l)} $ in $ {\cal
G}^{(l)}$ the collection $K^f_{G',B^{(l)}}$ is disjoint and
contained within $B^{(l)}$. This together
with (\ref{e:019}) establishes property (P1) for balls $L$ in
$K_{G,B}l$. Since the balls $B^{(l)}$ in $ {\cal G}^{(l)}$ are
disjoint and contained within $B$, we have that (P1) is satisfied
for balls $L$ in $K(n,B,l+1)$. In turn, this together with
(\ref{blcoll}) implies property (P1) for balls $L$ in
$K_{l+1}(n,B)$. Clearly, the above argument also verifies property
(P2) for balls $L$ in $K(n,B,l+1)$. The following establishes
property (P3) for $i=l+1$: \begin{eqnarray*}
\sum_{L\in
K(n,B,l+1)} \!\!\! V^k(L^f) \,& = & \, \sum_{B^{(l)} \in {\cal
G}^{(l)} } \ \ \sum_{L \in K_{G,B}l} \!\! V^k(L^f) \\
&\stackrel{(\ref{e:010})}{\,\geqslant\,} & \frac{1}{c_2}\sum_{B^{(l)}
\in {\cal G}^{(l)} } \ \ \sum_{L \in K_{G,B}l}{\cal H}^k(L^f)\, \\ & & \\
& \stackrel{(\ref{e:014})}{\,\geqslant\,} & \frac{\kappa}{c_2}
\sum_{B^{(l)} \in {\cal G}^{(l)} } {\cal H}^k(B^{(l)}) \
\stackrel{(\ref{e:025})}{\,\geqslant\,} \ \frac{\kappa}{c_2}
\frac{c_1}{2 c_2 5^k} \ \
{\cal H}^k(\mbox{\footnotesize{$\frac{1}{2}$}}B)
\\ & & \\ \ & \stackrel{(\ref{e:010})}{\,\geqslant\,} & \frac{\kappa
c_1^2}{2 c_2^2
10^k} \ V^k(B) \ := \ c_3 \ V^k(B) \ .
\end{eqnarray*}
Property
(P4) is trivially satisfied as we have imposed condition
(\ref{e:026}). Finally, in view of (\ref{card}), for any ball $L$
in $K(n,B,l+1)$ property (P5) is satisfied; i.e. $l_{L} \geqslantq 2 $.
The upshot is that (P1)--(P5) are satisfied up to the
local sub-level\break $K(n,l+1,B)$ and so completes the inductive step.
This establishes the existence of the local level $K(n,B):=
K_{l_B}(n,B)$ for each $B \in K(n-1)$ and thereby the existence of
the $n$-th level $K(n)$.
\Subsec{The measure $\mu$ on ${\Bbb K}_\eta$}
In this section, we define a probability measure $\mu$ supported
on ${\Bbb K}_\eta$. We will eventually show that the measure satisfies
(\ref{task}). For any ball $L \in K(n)$, we attach a weight
$\mu(L)$ defined recursively as follows.
For $n =1$, we have that $L= B_0 := {\Bbb K}(1)$ and we set
$\mu(L):=1$.
For $n \geqslantq 2$, let $L$ be a ball in $K(n)$. By construction,
there is a unique ball $B \in K(n-1)$ such that $L \subset B$. We
set $$ \mu(L)\ := \
\frac{V^f(L)}{\rule{0ex}{2.5ex}\sum\limits_{M\in K(n,B)} \!\!\!
V^f(M)}\ \times \ \mu(B)\ . $$
This procedure thus defines inductively a mass on any
ball appearing in the construction of ${\Bbb K}_{\eta}$. In fact a lot
more is true; $\mu$ can be further extended to all Borel
subsets $F$ of ${\Bbb R}^k$ to determine $\mu(F)$ so that $\mu$
constructed as above actually defines a measure supported on
${\Bbb K}_{\eta}$; see Proposition 1.7 \cite{falc}. We state this
formally as a
\demo{Fact} The probability measure $\mu$ constructed
above is supported on ${\Bbb K}_{\eta}$ and for any Borel subset $F$ of
${\Bbb R}^k$
$$
\mu(F):= \mu(F \cap {\Bbb K}_{\eta}) \; = \; \inf\;\sum_{L\in{\cal
C}(F)}\mu(L) \ ,
$$
where the infimum is taken over all coverings ${\cal C}(F)$ of $F
\cap {\Bbb K}_{\eta}$ by balls $L \in \bigcup_{n\in{\Bbb N}}K(n)$.
\Subsec{The measure of a ball in the Cantor construction}
With $n \geqslantq 2$, the aim of this section is to show that for any
ball $L$ in $ K(n)$ we have that
\begin{equation}\label{e:027}
\mu(L) \ \leqslant \ \frac{V^f(L)}{\eta} \ ;
\end{equation}
i.e.\ (\ref{task}) is satisfied for balls in the Cantor
construction. We start with level $n=2$ and fix a ball $L\in
K(2)=K(2,B_0)$; recall that $B_0 = {\Bbb K}(1)$. Also, recall that
$B=B^k$ for any ball $B$; see (\ref{e:008}). By definition, $$
\mu(L) \ := \ \frac{V^f(L)}{\rule{0ex}{2.5ex}\sum\limits_{M\in
K(2,B_0)} \!\!\! V^f(M)}\ \times \ \mu(B_0) \ =\
\frac{V^f(L^k)}{\rule{0ex}{4ex}\sum\limits_{i=1}^{l_{B_0}}\
\sum\limits_{M\in K(2,B_0,i)} \!\!\! V^f(M^k)} \ . $$ However, $$
\sum\limits_{M\in K(2,B_0,i)} V^f(M^k)\
\stackrel{(\ref{e:009})}{=} \ \sum\limits_{M\in K(2,B_0,i)}
V^k(M^f) \ \stackrel{(P3)}{\geqslantq} \ c_3\ V^k(B_0)\
\stackrel{(\ref{e:010})}{\geqslantq}\ \frac{c_3}{c_2}\ {\cal H}^k(B_0)\,. $$
It now follows from the definition of $l_{B_0}$; see (P5), that
$$ \mu(L) \ \leqslant \ \frac{c_2\,V^f(L)}{c_3\,{\cal H}^k(B_0)\,l_{B_0}} \
\leqslant \ \frac{V^f(L)}{\eta} \ . $$
\noindent To establish (\ref{e:027}) for general $n$, we proceed
by induction. For $n > 2$, assume that (\ref{e:027}) holds for
balls in $K(n-1)$. Consider an arbitrary ball $L$ in $K(n)$.
Then, $L \in K(n,B)$ for some $B\in K(n-1)$. By definition and
our induction hypothesis, $$ \mu(L) \ := \
\frac{V^f(L)}{\rule{0ex}{2.5ex}\sum\limits_{M\in K(n,B)} \!\!\!
V^f(M)}\ \times\ \mu(B)\ \leqslant \
\frac{V^f(L)}{\rule{0ex}{2.5ex}\sum\limits_{M\in K(n,B)} \!\!\!
V^f(M)}\times \frac{V^f(B)}{\eta}\ . $$ Thus, (\ref{e:027})
follows on showing that $$ \sum\limits_{M\in K(n,B)}
V^f(M)\,\,=\,\sum\limits_{M\in K(n,B)} V^f(M^k)\,\geqslant\, V^f(B) \ .
$$ Well, \begin{eqnarray*}
\sum\limits_{M\in K(n,B)}
V^f(M^k)\, & = & \,\sum_{i=1}^{l_B}\ \sum\limits_{M\in K(n,B,i)}
V^f(M^k)\ \stackrel{(\ref{e:009})}{=} \ \sum_{i=1}^{l_B}\
\sum\limits_{M\in K(n,B,i)} V^k(M^f) \\ & & \\
&\stackrel{(P3)}{\geqslant} & \, c_3 \sum_{i=1}^{l_B}\ V^k(B)\
\stackrel{(P5)}{\geqslant}\, c_3\ V^k(B)\,\frac{V^f(B)}{c_3\,V^k(B)}\ =\
V^f(B) \ \end{eqnarray*} and so we are done. This completes the
inductive step and thereby establishes (\ref{e:027}) for any $L$
in $K(n)$ with $n \geqslantq 2$.
\Subsec{The measure of an arbitrary ball}
Set $r_o:=\min\{r(B):B\in K(2)\}$. Take an arbitrary ball $A$ in
${\Bbb R}^k$ with $r(A)<r_o$. The aim of this section is to establish
(\ref{task}) for $A$; that is
\begin{equation*}\label{e:028}
\mu(A) \ \ll \ \frac{V^f(A)}{\eta} \ ,
\end{equation*}
were the implied constant is independent of both $A$ and $\eta$.
This will then complete the proof of the Mass Transference
Principle.
We begin by establishing the following geometric lemma.
\begin{lemma}\label{lem2}
Let $A=B(x_A,r_A)$ and $M=B(x_M,r_M)$ be arbitrary balls such that
$A\cap M\not=\emptyset$ and $A\setminus(cM)\not=\emptyset$ for
some $c\geqslant3$. Then $r_M\,\leqslant \,r_A$ and $cM\subset 5A$.
\end{lemma}
{\it Proof.} Let $z\in A\cap M$. Then $d(x_A,x_M)\leqslant
d(x_A,z)+d(z,x_M)\leqslant r_A+r_M$. Here $d(.,.)$ is the standard
Euclidean metric in ${\Bbb R}^k$. Now take $z\in A\setminus(cM)$. Then
$$ c\,r_M \ \leqslant \ d(x_M,z) \ \leqslant \ d(x_M,x_A)+d(x_A,z) \ < \
r_A+r_M+r_A \ . $$ Hence, $r_M \leqslant \frac{2}{c-1}\ r_A$ and since
$c \geqslantq 3$ we have that $r_M \leqslant r_A$. Now for any $z\in cM$, we
have that \begin{eqnarray*}
d(x_A,z) \ & \leqslant & \ d(x_A,x_M)+d(x_M,z) \
\leqslant \ r_A+r_M+ c\,r_M \ = \ r_A+(1+c)r_M \\ & & \\ & \leqslant & \
r_A+\frac{2(1+c)}{c-1}r_A \ = \ \Big(3+\frac{4}{c-1}\Big)\ r_A\
\leqslant\ 5\ r_A\ . \end{eqnarray*}
\vglue-20pt\Endproof\vskip12pt
The measure $\mu$ is supported on ${\Bbb K}_\eta$. Thus, without loss of
generality we can assume that $ A \cap {\Bbb K}_\eta \neq \emptyset $;
otherwise $\mu(A) = 0$ and there is nothing to prove.
We can also assume that for every $n$ large enough $A$ intersects at
least two balls in $ K(n)$; since if $B$ is the only ball in $ K(n)$
which has nonempty intersection with $A$, then
$$
\mu(A) \ \leqslantq \ \mu(B) \ \stackrel{(\ref{e:027})}{\leqslant} \
\frac{V^f(B)}{\eta} \ \to \ 0
{\cal H}^space{8mm} {\rm as } {\cal H}^space{5mm} n \to \infty \
$$
($r(B) \to 0$ as $n \to \infty$) and again there is nothing to
prove. Thus we may assume that there exists a unique integer $n$
such that:
\begin{equation}\label{e:029}
\text{$A$ intersects at least 2 balls from $K(n)$}
\end{equation}
and
$$
\text{$A$ intersects only one ball $B$ from $K(n-1)$}.
$$
In view of our choice of $r_0$ and the fact that $r(A) < r_0$, we
have that $n>2$. Note that since $B$ is the only ball from
$K(n-1)$ which has nonempty intersection with $A$, we trivially
have that $\mu(A)\leqslant\mu(B)$. It follows that we can also assume
that
\begin{equation}
r(A) \ < \ r(B) \ .
\label{A<B}
\end{equation}
Otherwise, since $f$ is increasing
$$
\mu(A) \ \leqslantq \ \mu(B) \ \stackrel{(\ref{e:027})}{\leqslant} \
V^f(B)/\eta \ := \ f(r(B))/\eta \ \leqslant \ f(r(A))/\eta
\ := \ V^f(A)/\eta $$
and we are done. Since $K(n,B)$ is a cover for $A\cap{\Bbb K}_\eta$, we
have that
\begin{equation}\label{e:031}
\mu(A) \ \leqslant \ \sum_{i=1}^{l_B}\ \sum_{L\in K(n,B,i),\ L\cap
A\not=\emptyset} \!\!\!\!\! \mu(L) \ \stackrel{(\ref{e:027})}{\leqslant}
\ \ \sum_{i=1}^{l_B}\ \sum_{L\in K(n,B,i),\ L\cap A\not=\emptyset}
\!\!\!\!\! V^f(L)/\eta\ .
\end{equation}
In order to estimate the right-hand side of (\ref{e:031}), we
consider two
cases:
\medbreak
\underline{{\em Case}\/} (i):~ Sub-levels $K(n,B,i)$ for
which $$\#\{\,L\in K(n,B,i)\,:\ L\cap A\not=\emptyset\,\} \,=\, 1.$$
\medbreak
\underline{{\em Case}\/} (ii):~ Sub-levels $K(n,B,i)$
for which $$\#\{\,L\in K(n,B,i)\,:\ L\cap A\not=\emptyset\,\}
\,\geqslant\, 2.$$
\Enddemo
Formally, there is a third case corresponding to those
sub-levels $K(n,B,i)$ for which $\#\{\,L\in K(n,B,i)\,:\ L\cap
A\not=\emptyset\,\} \,=\, 0$. However, this case is irrelevant
since the contribution to the right-hand side of (\ref{e:031}) from
such
sub-levels is zero.
\demo{Dealing with Case {\rm (i)}} Pick a ball $L\in
K(n,B,i)$ such that $L\cap A\not=\emptyset$. By (\ref{e:029}),
there is another ball $M\in K(n,B)$ such that $A\cap
M\not=\emptyset$. By property (P1), $3L$ and $3M$ are disjoint. It
follows that $A\setminus3L\not=\emptyset$. Therefore, by
Lemma~\ref{lem2}, $r(L)\leqslantr(A)$ and thus
\begin{equation}\label{c}
V^f(L)\ \leqslant \ V^f(A) \ .
\end{equation}
Now, let $K(n,B,i^*)$ denote the first sub-level which has
nonempty intersection with $A$. Thus, $L \cap A = \emptyset$ for
any $L \in K(n,B,i)$ with $i < i^*$ and there exists a unique ball
$L^*$ in $K(n,B,i^*)$ such that $L^*\cap A \neq \emptyset$. Since
we are in case (i), the internal sum of (\ref{e:031}) consists of
just one summand. It follows, via property (P4) and (\ref{c}),
that
\begin{eqnarray} \sum_{i\,\in\,\text{Case(i)}} \ \sum_{L\in
K(n,B,i),\ L\cap A\not=\emptyset} \!\!\!\!\! V^f(L)/\eta\ & \leqslant &
\ \sum_{i\,\in\,\text{Case(i)}}\ \frac{1}{2^{i-i^*}}\
\frac{V^f(L^*)}{\eta}\, \label{c1} \\ &\leqslant & \, 2 \;
\frac{V^f(L^*)}{\eta}\,\leqslant\, 2\; \frac{V^f(A)}{\eta}\ .\nonumber
\end{eqnarray}
\demo{Dealing with Case {\rm (ii)}} Again pick a ball $L\in
K(n,B,i)$ such that $L\cap A\not=\emptyset$. Since we are in case
(ii), there is another ball $M\in K(n,B,i)$ such that $A\cap
M\not=\emptyset$. By property (P2), the balls $L^f$ and $M^f$ are
disjoint. It follows that $A\setminus L^f\not=\emptyset$. Hence,
by Lemma~\ref{lem2} and property (P1) we have that
\begin{equation}\label{cc}
L^f\subset 5 A.
\end{equation}
It follows that
\begin{eqnarray} &\!\!\!\!& \label{c2} \\
\sum_{i\,\in\text{ Case(ii)}} \ \sum_{L\in K(n,B,i),\ L\cap
A\not=\emptyset} \!\!\!\! \frac{V^f(L)}{\eta} &\!\!
\stackrel{(\ref{e:009})}{=}\!\! & \sum_{i\,\in\text{ Case(ii)}}\
\sum_{L\in K(n,B,i),\ L\cap A\not=\emptyset} \frac{V^k(L^f)}{\eta}
\nonumber \\
& \!\!\!\!& \nonumber \\
&\!\!\stackrel{(\ref{e:010})}{\leqslant}\!\!& \frac{1}{c_1\,
\eta}\sum_{i\,\in\text{ Case (ii)}}\ \sum_{L\in K(n,B,i),\ L\cap
A\not=\emptyset} \!\!\!\!\!\!\! {\cal H}^k(L^f) \nonumber \\
& \!\! \!\!& \nonumber \\ & \!\!\stackrel{(P2) \& (\ref{cc})}{\leqslant}\!\! &
\frac{1}{c_1\,\eta}\sum_{i\,\in\text{ Case(ii)}} \!\!\!\!\!\!
{\cal H}^k(5A) \ \stackrel{(\ref{e:010})}{\leqslant} \ \frac{5^k c_2
\,V^k(A)\,l_B}{c_1\,\eta}
\nonumber \\
&\!\!\!\! & \nonumber \\
&\!\! \stackrel{(P5)}{\leqslant} \!\!& \frac{5^k c_2\,V^k(A)}{c_1\,\eta} \times
\frac{2\,V^f(B)}{c_3\,V^k(B)}\
\nonumber \\
& \!\! \!\!& \nonumber \\
& \!\! \leqslantq\!\! & \frac{2 \; 5^k c_2}{c_1\,c_3} \times
\frac{V^f(A)}{\eta}\ . \nonumber
\end{eqnarray}
The last inequality follows
from (\ref{A<B}) and the fact that the function $x^{-k} f(x)$ is
decreasing.
On combining (\ref{e:031}), (\ref{c1}) and (\ref{c2}) we attain
our goal; i.e.\ $ \mu(A) \ll V^f(A)/\eta $.
\vglue-22pt
\psihantom{up}
\section{Final comments\label{last}}
\vglue-17pt
\Subsec{A general Mass Transference Principle \label{secmtpg}}
We say that a function $f$ is {\em doubling} if there exists a
constant $\lambda
> 1 $ such that for $x >0$ $$f(2x) \, \leqslantq \, \lambda f(x) \ . $$
Let $(X,d)$ be a locally compact metric space. Let $g$ be a
doubling, dimension function and suppose there exist constants
$0<c_1<1<c_2<\infty$ and $r_0 > 0$ such that
\begin{equation*}\label{g}
c_1\ g(r(B)) \leqslant {\cal H}^g(B)\leqslant c_2\ g(r(B)) \ ,
\end{equation*}
for any ball $B=B(x,r)$ with $x\in X$ and $r\leqslant r_0$. Since $g$ is
doubling, the measure ${\cal H}^g$ is doubling on $X$. Recall that
$V^g(B):=g(r(B))$. Thus, the above condition corresponds to
(\ref{e:010}) in the ${\Bbb R}^k$ setup. Next, given a dimension
function $f$ and a ball $B=B(x,r)$ we define
$$
B^f:=B(x,g^{-1}f(r))\,.
$$
By definition, $B^g(x,r)=B(x,r)$ and
\begin{equation*} \label{e:009n}
V^f(B^g)\,=\,V^g(B^f) \qquad\text{for any ball $B$.}
\end{equation*}
This is an analogue of (\ref{e:009}). In the case $g(x)=x^k$, the
current setup precisely coincides with that of Section \ref{secMTP} in
which $X = {\Bbb R}^k$. The following result is a natural
generalization of Theorem \ref{thm3} --- the Mass Transference
Principle.
\vglue-18pt
\psihantom{up}
\begin{theorem}[A general Mass Transference Principle ]\label{thm3b}
Let $(X,d)$ and $g$ be as above and let $\{B_i\}_{i\in{\Bbb N}}$ be a
sequence of balls in $X$ with $r(B_i)\to 0$ as $i\to\infty$.
Let $f$ be a dimension function such that $f(x)/g(x)$ is monotonic
and suppose that for any ball $B$ in $X$
\begin{equation*}\label{e:011b}
{\cal H}^g\big(\/B\cap\limsup_{i\to\infty}B^f_i{}\,\big)={\cal H}^g(B) \ .
\end{equation*}
Then{\rm ,} for any ball $B$ in $X$
\begin{equation*}\label{e:012b}
{\cal H}^f\big(\/B\cap\limsup_{i\to\infty}B^g_i\,\big)={\cal H}^f(B) \ .
\end{equation*}
\end{theorem}
The proof of the general Mass Transference Principle follows on
adapting the proof of Theorem \ref{thm3} in the obvious manner.
The property that ${\cal H}^k$ is doubling is used repeatedly in the
proof of Theorem \ref{thm3}. In establishing Theorem~\ref{thm3b},
this property is replaced by the assumption that ${\cal H}^g$ is
doubling.
In short, the general Mass Transference Principle allows
us to transfer ${\cal H}^g$-measure theoretic statements for $\limsup$
subsets of $X$ to general ${\cal H}^f$-measure theoretic statements.
Thus, whenever we have a Duffin-Schaeffer type statement with
respect to a measure $\mu$ comparable to ${\cal H}^g$, we obtain a
general Hausdorff measure theory for free. For numerous examples
of $\limsup$ sets and associated Khintchine type theorems (the
approximating function $\psisi$ is assumed to be monotonic) within
the framework of this section, the reader is referred to
\cite{BDV}.
\Subsec{The Duffin-Schaeffer conjecture revisited} \label{DSR}
Let ${\cal S}_k^* (\psisi)$ denote the set of points ${\bf
y}=(y_1,\dots,y_k)\in{\Bbb I}^k$ for which there exist infinitely many
$q\in{\Bbb N}$ and ${\bf p}= (p_1, \dots,p_k) \in {\Bbb Z}^k$ with $(p_1,
\dots,p_k,q) =1$, such that
\begin{equation*}
\leqslantft|y_i - \frac{p_i}{q}\right|\ <\
\frac{\psisi(q)}{q} {\cal H}^space{9mm} 1 \leqslantq i \leqslantq k \ . \label{1P}
\end{equation*}
\noindent Here, we simply ask
that points in ${\Bbb I}^k$ are approximated by distinct rationals
whereas in the definition of ${\cal S}_k(\psisi)$ a pairwise co-primeness
condition on the rationals is imposed. For $k=1$, the two sets
coincide. For $k \geqslantq 2$, it is easy to verify that $
m({\cal S}_k^*(\psisi)) = 0 $ if $ \sum \psisi(n)^k < \infty $. The
complementary divergent result is due to Gallagher \cite{gall}.
\vskip6pt{\scshape Theorem G} {\em For $k \geqslantq 2${\rm ,} $ {\cal H}^space{5mm}
m({\cal S}_k^*(\psisi)) = 1 \ \ \ \ \ {\it if \ } \ \ \ \ \
\sum_{n=1}^{\infty} \ \psisi(n)^k \ = \ \infty \ . $}
\vskip6pt
Notice that the Euler function $\psihi$ plays no role in
determining the measure of ${\cal S}_k^*(\psisi)$ when $k \geqslantq 2$. This
is unlike the situation when considering the measure of the set
${\cal S}_k(\psisi) $; see Theorem PV (\S\ref{tds}) and Corollary
\ref{cor1}. It is worth mentioning that Gallagher actually obtains
a quantative version of Theorem G.
The Mass Transference Principle together with Theorem G, implies
the following general statement.
\demo{\scshape Theorem 3} {\it For} $k \geqslantq 2$, $$
{\cal H}^f({\cal S}_k^*(\psisi)) = {\cal H}^f({\Bbb I}^k) \ {\it if} \
\sum_{n=1}^{\infty} \ f(\psisi(n)/n) n^k\break = \infty.$$
\Enddemo
It would be highly desirable to establish a version of
the Mass Transference Principle which allows us to deduce a
quantative Hausdorff measure statement from a quantative Lebesgue
measure statement. We hope to investigate this sometime in the
near future.
\demo{Acknowledgments} SV would like to thank Ayesha
and Iona for making him appreciate once again all those
wonderfully simple things around us: ants, pussycats, sticks,
leaves and of course the many imaginary worlds that are often
neglected in adulthood, especially the world of hobgoblins. VB
would like to thank Tatiana for her help and patience during the
difficult but nevertheless exciting time over the past nine
months.
\references {999}
\bibitem[1]{BDV} \name{V. Beresnevich, H. Dickinson}, and \name{S.\ L. Velani},
\emph{Measure Theoretic Laws for Limsup Sets}, {\it Memoirs Amer.\ Math.
Soc\/}.\ {\bf 179} (2006), 1--91; preprint:
arkiv:math.NT/0401118.
\bibitem[2]{DS}
\name{R.\ J. Duffin} and \name{A.\ C. Schaeffer}, Khintchine's problem in metric
Diophantine approximation, {\em Duke Math.\ J\/}.\ {\bf 8} (1941),
243--255.
\bibitem[3]{falc}
\name{K. Falconer}, {\em Fractal Geometry}: {\em Mathematical Foundations and
Applications}, John Wiley \& Sons, New York (1990).
\bibitem[4]{gall}
\name{P.\ X. Gallagher}, Metric simultaneous diophantine approximation.\ II,
{\em Mathematika} {\bf 12} (1962), 123--127.
\bibitem[5]{Harman}
\name{G. Harman}, {\em Metric Number Theory}, {\it London Math.\ Series
Monographs\/} {\bf 18},
Clarendon Press, Oxford (1998).
\bibitem[6]{jh}
\name{J. Heinonen}, {\em Lectures on Analysis on Metric Spaces},
{\it Universitext\/}, Springer-Verlag, New York (2001).
\bibitem[7]{mat}
\name{P. Mattila}, {\em Geometry of Sets and Measures in Euclidean
Spaces}, {\it Cambridge Studies Adv.\ Math\/}.\ {\bf 44},
Cambridge Univ. Press, Cambridge
(1995).
\bibitem[8]{PV} \name{A.\ D. Pollington} and \name{R.\ C. Vaughan}, The $k$-dimensional
Duffin and Schaeffer conjecture, {\it Mathematika} {\bf 37} (1990),
190--200.
\bibitem[9]{Sp}
\name{V.\ G. Sprind\v{z}uk}, {\em Metric Theory of Diophantine
Approximation} (translated by R.\ A.\ Silverman), V.\ H.\ Winston \&
Sons, Washington D.C. (1979).
\Endrefs
\end{document}
|
\begin{document}
\author{Jan Roksvold}
\title{Betti numbers of skeletons}
\address{Department of Education, UiT The Arctic University of Norway \\ N-9037 Troms\o, Norway}
\email{[email protected] (Corresponding author)}
\author{Hugues Verdure}
\address{Department of Mathematics, UiT The Arctic University of Norway \\ N-9037 Troms\o, Norway}
\email{[email protected]}
\begin{abstract}
\mathbb{N}oindent We demonstrate that the Betti numbers associated to an $\mathbb{N}_{0}$-graded minimal free resolution of the Stanley-Reisner ring $\srring{\iskel{\Delta}{d-1}}$ of the $(d-1)$-skeleton of a simplicial complex $\Delta$ of dimension $d$ can be expressed as a $\mathbb{Z}$-linear combination of the corresponding Betti numbers of $\Delta$. An immediate implication of our main result is that the projective dimension of $\srring{\iskel{\Delta}{d-1}}$ is at most one greater than the projective dimension of $\srring{\Delta}$, and it thus provides a new and direct proof of this. Our result extends immediately to matroids and their truncations. A similar result for matroid elongations can not be hoped for, but we do obtain a weaker result for these. The result does not apply to generalized skeleton ideals.
\end{abstract}
\maketitle
\section{Introduction}
\mathbb{N}oindent In this paper we investigate certain aspects of the relationship between an $\mathbb{N}_{0}$-graded minimal free resolution of the Stanley-Reisner ring of a simplicial complex and those associated to its skeletons. Our main result is Theorem \ref{mainThe}, which says that each of the Betti numbers associated to an $\mathbb{N}_{0}$-graded minimal free resolution of $\srring{\iskel{\Delta}{d-1}}$, where $\srid{\iskel{\Delta}{d-1}}$ is the ideal generated by monomials corresponding to nonfaces of the $(d-1)$-skeleton of a finite simplicial complex $\Delta$, can be expressed as a $\mathbb{Z}$-linear sum of the Betti numbers associated to $\srring{\Delta}$.
Previous results on the Stanley-Reisner rings of skeletons include the classic \cite[Corollary 2.6]{Hib} which states that \begin{equation}\label{Hibis}\depth\srring{\Delta}=\max\{j:\iskel{\Delta}{j-1} \text{ is Cohen-Macaulay}\}.\end{equation} This result was later generalized to arbitrary monomial ideals in \cite[Corollary 2.5]{HJZ} (we shall return to this in our final section, where we give a counterexample showing that our main result can not be generalized in the same way). By the Auslander-Buchsbaum identity, it follows from (\ref{Hibis}) that \[\pd{\Delta}\leq\ppd{\srring{\iskel{\Delta}{d-1}}}\leq 1+ \ppd{\srring{\Delta}}.\] From the latter of these inequalities it is easily demonstrated, again by using the Auslander-Buchsbaum identity, that every skeleton of a Cohen-Macaulay simplicial complex is Cohen-Macaulay - a fact which was proved in \cite[Corollary 2.5]{Hib} as well.
That $\ppd{\srring{\iskel{\Delta}{d-1}}}\leq 1+\ppd{\srring{\Delta}}$ can also be seen as an immediate consequence of our main result, and Theorem \ref{main} thus provides a new and direct proof of this and therefore also of the fact that the Cohen-Macaulay property is inherited by skeletons.
The projective dimension of Stanley-Reisner rings has seen recent research interest. Most notably, it was demonstrated in \cite[Corollary 3.33]{MV} that \[\ppd{\srring{\Delta}}\geq \max\{|C|:C \text{ is a circuit of the Alexander dual } \Delta^*\text{ of }\Delta\},\] with equality if $\srring{\Delta}$ is sequentially Cohen-Macaulay.
Our main result extends immediately to a matroid $M$ and its truncations. Such matroid truncations have themselves seen recent research interest. Examples of this are \cite{JP}, which contains the strengthening of a result by Brylawski \cite[Proposition 7.4.10]{Bry} concerning the representability of truncations, and \cite[Proposition 15]{Bri}, where it is demonstrated that the Tutte polynomial of $M$ determines that of its truncation $\tru{M}{1}$.
Corresponding to our main result applied to matroid truncations, we give a considerably weaker result concerning matroid elongations. It says that the Betti table associated to the elongation of $M$ to rank $r(M)+1$ is equal to the Betti table obtained by removing the second column from the Betti table of $\srring{M}$ - but only in terms of zeros and nonzeros.
\subsection{Structure of this paper}
\begin{itemize}
\item In Section \ref{Secprelim} we provide definitions and results used later on.
\item In Section \ref{SecSimp} we demonstrate that the Betti numbers associated to a $\mathbb{N}_{0}$-graded minimal free resolution of the Stanley Reisner ring of a skeleton can be expressed as a $\mathbb{Z}$-linear combination of the corresponding Betti numbers of the original complex. This leads immediately to a new and direct proof that the property of being Cohen-Macaulay is inherited from the original complex.
\item In Section \ref{SecMat} we see how our main result applies to truncations of matroids. We also explore whether a similar result can be obtained for matroid elongations.
\item In Section \ref{SecCounter} we give a counterexample demonstrating that our main result does not hold for the generalized skeleton ideals constructed in \cite{HVZ} and \cite{HJZ}.
\end{itemize}
\section{Preliminaries}\label{Secprelim}
\subsection{Simplicial complexes}
\begin{definition}A \emph{simplicial complex} $\Delta$ on $E=\{1,\ldots,n\}$ is a collection of subsets of $E$ that is closed under inclusion.
\end{definition}
We refer to the elements of $\Delta$ as the \emph{faces} of $\Delta$. A \emph{facet} of $\Delta$ is a face that is not properly contained in another face, while a \emph{nonface} is a subset of $E$ that is not a face.
\begin{definition}
If $X\subseteq E$, then $\Delta_{|X}=\{\sigma\subseteq X:\sigma \in \Delta\}$ is itself a simplicial complex. We refer to $\Delta_{|X}$ as the \emph{restriction of $\Delta$ to $X$}.
\end{definition}
\begin{definition}
Let $m$ be the cardinality of the largest face contained in $X\subseteq E$. The \emph{dimension} of $X$ is $\dim(X)=m-1$.
\end{definition}
In particular, the dimension of a face $\sigma$ is equal to $|\sigma|-1$. We define $\dim(\Delta)=\dim(E),$ and refer to this as the dimension of $\Delta$.
\begin{definition}[The $i$-skeleton of $\Delta$]For $0\leq i\leq \dim(\Delta)$, let the $i$-skeleton $\iskel{\Delta}{i}$ be the simplicial complex \[\iskel{\Delta}{i}=\{\sigma\in\Delta:\dim(\sigma)\leq i\}.\]
\end{definition}
In particular, we have $\iskel{\Delta}{d}=\Delta$. The $1$-skeleton $\iskel{\Delta}{1}$ is often referred to as the underlying graph of $\Delta$.
\begin{remark}\label{remarkSigm}Whenever $\sigma \in \mathbb{N}_{0}^{n}$ the expression $|\sigma|$ shall signify the sum of the coordinates of $\sigma$. When, on the other hand, $\sigma\subseteq\{1\ldots n\}$, the expression $|\sigma|$ denotes the cardinality of $\sigma$.
\end{remark}
\subsection{Matroids}There are numerous equivalent ways of defining a matroid. It is most convenient here to give the definition in terms of independent sets. For an introduction to matroid theory in general, we recommend e.g.~\cite{Oxl}.
\begin{definition}A \emph{matroid} $M$ consists of a finite set $E$ and a non-empty set $I(M)$ of subsets of $E$ such that:
\begin{itemize} \item $I(M)$ is a simplicial complex. \item If $I_{1},I_{2}\in \ind{M}$ and $|I_{1}|>|I_{2}|$, then there is an $x\in I_{1}\smallsetminus I_{2}$ such that $I_{2}\cup x\in \ind{M}$.\end{itemize}
\end{definition}
The elements of $\ind{M}$ are referred to as the \emph{independent sets} (of $M$). The \emph{bases} of $M$ are the independent sets that are not contained in any other independent set: in other words, the facets of $\ind{M}$. Conversely, given the bases of a matroid, we find the independent sets to be those sets that are contained in a basis. We denote the bases of $M$ by $\bas{M}$. It is a fundamental result that all bases of a matroid have the same cardinality, which implies that $I(M)$ is a \emph{pure} simplicial complex.
The dual matroid $\dual{M}$ is the matroid on $E$ whose bases are the complements of the bases of $M$. Thus \[\bas{\dual{M}}=\{E\smallsetminus B:B\in \bas{M}\}.\]
\begin{definition}For $X\subseteq E$, the rank function $r_{M}$ of $M$ is defined by \[r_{M}(X)=\max\{|I|:I \in \ind{M}, I\subseteq X\}.\]\end{definition}
Whenever the matroid $M$ is clear from the context, we omit the subscript and write simply $r(X)$. The rank $r(M)$ of $M$ itself is defined as $r(M)=r_{M}(E)$. Whenever $I(M)$ is considered as a simplicial complex we thus have $r(X)=\dim(X)+1$ for all $X\subseteq E$, and $r(M)=\dim(\ind{M})+1$.
\begin{definition}
If $X\subseteq E$, then $\{I\subseteq X:I \in \ind{M}\}$ form the set of independent sets of a matroid $M_{|X}$ on $X$. We refer to $M_{|X}$ as the \emph{restriction of $M$ to $X$}.
\end{definition}
In \cite{Lar} the $i$th generalized Hamming weight of a linear code is generalized to matroids as follows.
\begin{definition}
For $1\leq i \leq n-r(M)$, the $i$th higher weight of $M$ is \[d_i(M)=\min\{|X|:X\subseteq E\text{ and }|X|-r(X)=i\}.\]
\end{definition}
We refer to $\{d_i(M)\}$ as the \emph{higher weights} of $M$.
\begin{definition}[Truncation]
The $i$th truncation $\tru{M}{i}$ of $M$ is the matroid on $E$ whose independent sets consist of the independent sets of $M$ that have rank less than or equal to $r(M)-i$. In other words \[\ind{\tru{M}{i}}=\{X \subseteq E:r(X)=|X|, r(X)\leq r(M)-i\}.\]
\end{definition}
Observe that $\tru{M}{i}=\iskel{\ind{M}}{r(M)-i-1}$, whenever $I(M)$ is considered as a simplicial complex. That is, the $i$th truncation corresponds to the $(d-i)$-skeleton.
\begin{definition}[Elongation]
For $0\leq i\leq n-r(M)$, let $\elo{M}{i}$ be the matroid whose independent sets are $I(\elo{M}{i})=\{\sigma\in E: n(\sigma)\leq i\}$.
\end{definition}
Since $r(\elo{M}{i})=r(M)+i$, the matroid $\elo{M}{i}$ is commonly referred to as the \emph{elongation} of $M$ to rank $r(M)+i$.
It is straightforward to verify that for $i \in [0,\ldots,n-r(M)]$ we have $\elo{\overline{M}}{i}=\overline{\tru{M}{i}}$.
\subsection{The Stanley-Reisner ideal, Betti numbers, and the reduced chain complex}
Let $\Delta$ be an abstract simplicial complex on $E=\{1,\ldots,n\}$. Let $\field{k}$ be a field, and let $S=\polr{k}{x}{n}$. By employing the standard abbreviated notation \[x_1^{\feit{a}(1)}x_2^{\feit{a}(2)}\cdots x_n^{\feit{a}(n)}=\mon{a}\] for monomials, we establish a $1-1$ connection between monomials of $S$ and vectors in $\mathbb{N}_{0}^{n}$. Furthermore, identifying a subset of $E$ with its indicator vector in $\mathbb{N}_{0}^{n}$ (as is done in Definition \ref{sridDef} below) thus provides a $1-1$ connection between squarefree monomials of $S$ and subsets of $E$.
\begin{definition}\label{sridDef}
Let $I_{\Delta}$ be the ideal in $S$ generated by monomials corresponding to nonfaces of $\Delta$. That is, let \[I_{\Delta}=\langle\mon{\sigma}:\sigma\mathbb{N}otin \Delta\rangle.\] We refer to $I_{\Delta}$ and $\srring{\Delta}$, respectively, as the \emph{Stanley-Reisner ideal} and \emph{Stanley-Reisner ring} of $\Delta$.
\end{definition}
Being a (squarefree) monomial ideal, the Stanley-Reisner ideal, and thus also the Stanley-Reisner ring, permits both the standard $\mathbb{N}_{0}$-grading and the standard $\mathbb{N}_{0}^{n}$-grading. For $\feit{b}\in\mathbb{N}_{0}^{n}$ let $S_{\feit{b}}$ be the $1$-dimensional $\field{k}$-vector space generated by $\feit{x}^{\feit{b}}$, and let $S(\feit{a})$, $S$ shifted by $\feit{a}$, be defined by $S(a)_{\feit{b}}=S_{\feit{a}+\feit{b}}$. Analogously, for $j\in\mathbb{N}_{0}$ let $S_{i}$ be the $\field{k}$-vector space generated by monomials of degree $i$, and let $S(j)$ be defined by $S(j)_{i}=S_{i+j}$. For the remainder of this section let $N$ be an $\mathbb{N}_{0}^{n}$-graded $S$-module.
\begin{definition}\label{minfree}An \emph{($\mathbb{N}_{0}^{n}$- or $\mathbb{N}_{0}$-)graded minimal free resolution} of $N$ is a left complex
\[\begin{CD}
0@<<<F_{0}@<\phi_1<<F_{1}@<\phi_2<<F_{2}@<<<\cdots@<\phi_l<<F_{l}@<<<0
\end{CD}\]
with the following properties: \begin{itemize}
\item $F_i=\begin{cases}\bigoplus_{\mathbf{a}\in\mathbb{N}_{0}^{n}}S(-\mathbf{a})^{\Bb{i}{\feit{a}}}, \mathbb{N}_{0}^{n}\text{-graded resolution}\\
\bigoplus_{j\in\mathbb{N}_{0}}S(-j)^{\Bb{i}{j}}, \mathbb{N}_{0}\text{-graded resolution}\\ \end{cases}$
\item $\im\phi_i=\ker\phi_{i-1}$ for all $i\geq2$, and $F_0/\im\phi_1\cong N$ (Exact)
\item $\im\phi_{i}\subseteq\feit{m}F_{i-1}$ (Minimal)
\item \begin{flalign*}\phi_{i}\big((F_i)_{\feit{a}}\big)\subseteq&(F_{i-1})_{\feit{a}}\text{ (Degree preserving, $\mathbb{N}_{0}^{n}$-graded case)}\\
\phi_{i}\big((F_i)_{j}\big)\subseteq&(F_{i-1})_{j} \text{ (Degree preserving, $\mathbb{N}_{0}$-graded case)}.\end{flalign*}\end{itemize}
\end{definition}
It follow from \cite[Theorem A.2.2]{HH} that the Betti numbers associated to a ($\mathbb{N}_{0}$- or $\mathbb{N}_{0}^{n}$-graded) minimal free resolution are unique, in that any other minimal free resolution must have the same Betti numbers. We may therefore without ambiguity refer to $\{\BbModulk{i}{\feit{a}}{N}\}$ and $\{\BbModulk{i}{j}{N}\}$, respectively, as the $\mathbb{N}_{0}^{n}$-graded and $\mathbb{N}_{0}$-graded Betti numbers of $N$ (over $\field{k}$). Observe that \[\BbModulk{i}{j}{N}=\sum_{|\feit{a}|=j}\BbModulk{i}{\feit{a}}{N}\] where $|\feit{a}|=\feit{a}(1)+\feit{a}(2)+\cdots+\feit{a}(n)$ (see Remark \ref{remarkSigm}, above). Note also that for an $\mathbb{N}_{0}^{n}$-graded (that is, monomial) ideal $I\subseteq S$, we have $\BbModulk{i}{\sigma}{S/I}=\BbModulk{i-1}{\sigma}{I}$ for all $i\geq 1$, and $\BbModulk{0}{\sigma}{S/I}=\begin{cases}1, \sigma=\emptyset\\ 0, \sigma\mathbb{N}eq\emptyset\end{cases}$.
The $\mathbb{N}_{0}$-graded Betti numbers of $N$ may be compactly presented in a so-called \emph{Betti table}:
\[\beta[N;\field{k}]=\begin{array}{r|cccc}
& 0 & 1& \cdots & l\\
\hline
j & \BbModulk{0}{j}{N} & \BbModulk{1}{j+1}{N}&\cdots & \BbModulk{l}{j+l}{N}\\
j+1 & \BbModulk{0}{j+1}{N} & \BbModulk{1}{j+2}{N}&\cdots & \BbModulk{l}{j+l+1}{N}\\
\vdots& \vdots & \vdots & \cdots & \vdots\\
k & \BbModulk{0}{k}{N} & \BbModulk{1}{k+1}{N}&\cdots & \BbModulk{l}{k+l}{N}\\
\end{array}\]
By the (graded) \emph{Hilbert Syzygy Theorem} we have $F_i=0$ for all $i\geq n$. If $F_l\mathbb{N}eq 0$ but $F_i=0$ for all $i>l$, we refer to $l$ as the \emph{length} of the minimal free resolution. It can be seen from e.g.~\cite[Corollary 1.8]{Eis} that the length of a minimal free resolution of $N$ equals its projective dimension ($\ppd N$).
A sequence $f_1,\ldots,f_r\in \langle x_1,x_2,\ldots,x_n\rangle$ is said to be a \emph{regular $N$-sequence} if $f_{i+1}$ is not a zero-divisor on $N/(f_1N+\cdots+f_iN)$.
\begin{definition}
The \emph{depth} of $N$ is the common length of a longest regular $N$-sequence. Whenever $N$ is $\mathbb{N}_{0}$-graded the polynomials may be assumed to be homogeneous.
\end{definition}
In general we have $\depth N\leq \Kdim N$, where $\Kdim N$ denotes the Krull dimension of $N$. The following is a particular case of the famous \emph{Auslander-Buchsbaum Theorem}.
\begin{theorem}[Auslander-Buchsbaum]
\[\ppd N+\depth N=n.\]
\end{theorem}
\begin{proof}
See e.g.~\cite[Corollary A.4.3]{HH}.
\end{proof}
Note that the Krull dimension $\Kdim\srring{\Delta}$ of $\srring{\Delta}$ is one more than the dimension of $\Delta$ (see \cite[Corollary 6.2.2]{HH}). The simplicial complex $\Delta$ is said to be \emph{Cohen-Macaulay} if $\depth\srring{\Delta} =\Kdim \srring{\Delta}$. That is, if $\srring{\Delta}$ is Cohen-Macaulay as an $S$-module.
\begin{definition}
Let $\ino{\Delta}{i}$ denote the set of $i$-dimensional faces of $\Delta$. That is, \[\ino{\Delta}{i}=\{\sigma \in \Delta:|\sigma|=i+1\}.\] Let $\field{k}^{\ino{\Delta}{i}}$ be the free $\field{k}$-vector space on $\ino{\Delta}{i}$. The \emph{(reduced) chain complex} of $M$ over $\field{k}$ is the complex \[\minCDarrowwidth13pt\begin{CD}0
@<<<\field{k}^{\ino{\Delta}{-1}}@<\delta_{0}<<\cdots@<<<\field{k}^{\ino{\Delta}{i-1}}@<\delta_{i}<<\field{k}^{\ino{\Delta}{i}}@<<<\cdots@<\delta_{\dim(\Delta)}<<\field{k}^{\ino{\Delta}{\dim(\Delta)}}@<<<0\end{CD},\] where the boundary maps $\delta_{i}$ are defined as follows: With the natural ordering on $E$, set $\sign(j,\sigma)=(-1)^{r-1}$ if $j$ is the $r$th element of $\sigma\subseteq E$, and let \[\delta_{i}(\sigma)=\sum_{j\in \sigma}\sign(j,\sigma)\;\sigma\smallsetminus j.\] Extending $\delta_{i}$ $\field{k}$-linearly, we obtain a $\field{k}$-linear map from $\field{k}^{\ino{\Delta}{i}}$ to $\field{k}^{\ino{\Delta}{i-1}}$.
\end{definition}
\begin{definition}
The $i$th \emph{reduced homology} of $\Delta$ over $\field{k}$ is the vector space \[\tilde{H}_{i}(\Delta;\field{k})=\ker(\delta_{i})/\im(\delta_{i+1}).\]
\end{definition}
The following is one of the most celebrated results in the intersection between algebra and combinatorics.
\begin{theorem}[Hochster's formula]\label{Hochster}\[\Bringk{i}{\sigma}{\Delta}=\Bnumk{i-1}{\sigma}{\Delta}=\dim_{\field{k}}\tilde{H}_{|\sigma|-i-1}(\Delta_{|\sigma};\field{k}).\]
\end{theorem}
\begin{proof}
See \cite[Corollary 5.12]{MS} and \cite[p.~81]{HH}.
\end{proof}
\section{Betti numbers of $i$-skeletons}\label{SecSimp}
\mathbb{N}oindent Let $\Delta$ be a $d$-dimensional simplicial complex on $\{1,\ldots,n\}$, and let $\field{k}$ be a field. In this section we shall demonstrate how each of the Betti numbers of $\srring{\iskel{\Delta}{d-1}}$ can be expressed as a $\mathbb{Z}$-linear combination of the Betti numbers of $\srring{\Delta}$.
\subsection{The first rows of the Betti table}
\begin{lemma}\label{ansikter}\[\tilde{H}_{i}(\Delta_{|\sigma};\field{k})=\tilde{H}_{i}(\trures{\Delta}{d-1}{\sigma};\field{k})\] for all $0\leq i\leq d-2$.
\end{lemma}
\begin{proof}
By the definition of a skeleton we have $\ino{\Delta_{|\sigma}}{i}=\ino{\trures{\Delta}{d-1}{\sigma}}{i}$ and thus also $\field{k}^{\ino{\Delta_{|\sigma}}{i}}=\field{k}^{\ino{\trures{\Delta}{d-1}{\sigma}}{i}}$, for all $-1\leq i\leq d-1$. In other words, the reduced chain complexes of $\Delta_{|\sigma}$ and ${\iskel{\Delta}{d-1}}_{|\sigma}$ are identical except for in homological degree $d$. The result follows.
\end{proof}
\begin{proposition}\label{likebortsettfrasiste}
For all $i$ and $j \leq d+i-1$ we have \[\Bringk{i}{j}{\Delta}=\Bringk{i}{j}{\iskel{\Delta}{d-1}}.\]
\end{proposition}
\begin{proof}
If $j \leq d+i-1$ then $j-i-1\leq d-2$. By Theorem \ref{Hochster} and Lemma \ref{ansikter} then, we have \begin{flalign*}\Bringk{i}{j}{\Delta}&=\sum_{|\sigma|=j}\Bringk{i}{\sigma}{\Delta}\\
&=\sum_{|\sigma|=j}\dim_{\field{k}}\tilde{H}_{|\sigma|-i-1}(\Delta_{|\sigma};\field{k})\\
&=\sum_{|\sigma|=j}\dim_{\field{k}}\tilde{H}_{|\sigma|-i-1}(\trures{\Delta}{d-1}{\sigma};\field{k})\\
&=\sum_{|\sigma|=j}\Bringk{i}{\sigma}{\tru{\Delta}{d-1}}\\&=\Bringk{i}{j}{\iskel{\Delta}{d-1}}.\end{flalign*}
\end{proof}
\subsection{The final row of the Betti table}
The Hilbert series of $S/\srid{\Delta}$ over $\field{k}$ is $\hilb{S/\srid{\Delta}}=\sum_{i\in\mathbb{Z}}\dim_{\field{k}}(S/\srid{\Delta})_{i}\;t^{i}$. Let $\ff{\Delta}{i}=|\ino{\Delta}{i}|$. By \cite[Section 6.1.3, Equation (6.3)]{HH} we have \[\hilb{S/\srid{\Delta}}=\frac{\sum_{i=0}^{n}(-1)^i\sum_{j}\BbModulk{i}{j}{S/\srid{\Delta}}}{(1-t)^n}.\]
On the other hand, we see from \cite[Proposition 6.2.1]{HH} that \[\hilb{S/\srid{\Delta}}=\frac{\sum_{i=0}^{d+1}\ff{\Delta}{i-1}t^i(1-t)^{d+1-i}}{(1-t)^{d+1}}.\] Combined, these two equations imply
\begin{equation}\label{A}
\sum_{i=0}^{d+1}\ff{\Delta}{i-1}t^{i}(1-t)^{n-i}=\sum_{i=0}^{n}(-1)^{i}\sum_{j}\Bringk{i}{j}{\Delta}t^{j},
\end{equation}
and
\begin{equation}\label{B}
\sum_{i=0}^{d}\ff{\tru{\Delta}{d-1}}{i-1}t^{i}(1-t)^{n-i}=\sum_{i=0}^{n}(-1)^{i}\sum_{j}\Bringk{i}{j}{\tru{\Delta}{d-1}}t^{j}.
\end{equation}
\begin{remark}From here on we shall employ the convention that $i!=0$ for $i<0$, and that $\binom{j}{k}=0$ if one or both of $j$ and $k$ is negative.\end{remark}
Differentiating both sides of equation (\ref{A}) $n-d-1$ times, we get \begin{flalign*}&\sum_{i=0}^{d+1}\ff{\Delta}{i-1}\sum_{l=0}^{n-d-1}(-1)^{l}\binom{n-d-1}{l}\frac{i!(n-i)!}{(i-n+d+1+l)!(n-i-l)!}t^{i-n+d+1+l}(1-t)^{n-i-l}\\&=\sum_{i=0}^{n}(-1)^{i}\sum_{j}\Bringk{i}{j}{\Delta}\frac{j!}{(j-(n-d-1))!}t^{j-n+d+1}. \end{flalign*} When evaluated at $t=1$, the left side of the above equation is $0$ except when $i=d+1$ and $l=n-d-1$. Thus, we have \[(-1)^{n-d-1}(n-d-1)!\ff{\Delta}{d}=\sum_{i=0}^{n}(-1)^{i}\sum_{j\geq n-d-1}\Bringk{i}{j}{\Delta}\frac{j!}{(j-(n-d-1))!},\] and \[\ff{\Delta}{d}=\sum_{i=0}^{n}(-1)^{n+d+i+1}\sum_{j\geq n-d-1}\binom{j}{n-d-1}\Bringk{i}{j}{\Delta}.\]
\begin{lemma}\label{nedenfor}
For all $i$ and $j\geq d+i+2$ we have \[\Bringk{i}{j}{\Delta}=0.\]
\end{lemma}
\begin{proof}
If $|\sigma|\geq d+i+2$, then $|\sigma|-i-1\geq \dim(\Delta)+1$, which implies \[\dim_{\field{k}}\tilde{H}_{|\sigma|-i-1}(\Delta_{|\sigma};\field{k})=0.\] So by Hochster's formula we have that if $j\geq d+i+2$ then
\[\Bringk{i}{j}{\Delta}=\sum_{|\sigma|=j}\Bringk{i}{\sigma}{\Delta}=\sum_{|\sigma|=j}\dim_{\field{k}}\tilde{H}_{|\sigma|-i-1}(\Delta_{|\sigma};\field{k})=0.\]
\end{proof}
According to Proposition \ref{likebortsettfrasiste} and Lemma \ref{nedenfor}, and because $\ff{\Delta}{i}=\ff{\iskel{\Delta}{d-1}}{i}$ for all $i\mathbb{N}eq d$, subtracting equation (\ref{B}) from equation (\ref{A}) yields \begin{flalign*}\label{pent}\ff{\Delta}{d}t^{d+1}(1-t)^{n-d-1}=&\sum_{i=0}^{n}(-1)^{i}\big(\Bringk{i}{d+i}{\Delta}-\Bringk{i}{d+i}{\iskel{\Delta}{d-1}}\big)t^{d+i}\\&+\sum_{i=0}^{n}(-1)^{i}\Bringk{i}{d+i+1}{\Delta}t^{d+i+1}.\end{flalign*} Let $1\leq u\leq n$. Differentiating both sides of the above equation $d+u$ times yields \begin{flalign*}&\ff{\Delta}{d}\sum_{l=0}^{d+u}(-1)^{l}\binom{d+u}{l}\frac{(d+1)!(n-d-1)!}{(l-u+1)!(n-d-1-l)!}t^{l-u+1}(1-t)^{n-d-1-l}\\=&\sum_{i=u}^{n}(-1)^{i}\big(\Bringk{i}{d+i}{\Delta}-\Bringk{i}{d+i+1}{\iskel{\Delta}{d-1}}\big)\frac{(d+i)!}{(i-u)!} t^{i-u}\\&+\sum_{i=u-1}^{n}(-1)^{i}\Bringk{i}{d+i+1}{\Delta}\frac{(d+i+1)!}{(i-u+1)!}t^{i-u+1}.\end{flalign*} Evaluating at $t=0$, we get \begin{flalign*}&\delta^{\prime}*\left((-1)^{u-1}\ff{\Delta}{d}\frac{(d+u)!(n-d-1)!}{(u-1)!(n-d-u)!}\right)\\=&(-1)^{u}\big(\Bringk{u}{
d+u}{\Delta}-\Bringk{u}{d+u}{\iskel{\Delta}{d-1}}\big)(d+u)!\\&+(-1)^{u-1}\Bringk{u-1}{d+u}{\Delta}(d+u)!,\end{flalign*} where \[\delta^{\prime}=\begin{cases}
1,& 1\leq u\leq n-d\\
0,& u> n-d
\end{cases}
.\]
Summarizing the above:
\begin{proposition}\label{main}
For $1\leq u\leq n$, we have \[\Bringk{u}{d+u}{\tru{\Delta}{d-1}}=\Bringk{u}{d+u}{\Delta}-\Bringk{u-1}{d+u}{\Delta}+\binom{n-d-1}{u-1}\delta,\] where \[\delta=\begin{cases}\ff{\Delta}{d}=\sum_{i=0}^{n}(-1)^{n+d+i+1}\sum_{j\geq n-d-1}\binom{j}{n-d-1}\Bringk{i}{j}{\Delta},& 1\leq u\leq n-d\\0,& u> n-d.\end{cases}\]
\end{proposition}
Bringing together Propositions \ref{likebortsettfrasiste} and \ref{main}, we get
\begin{theorem}\label{mainThe}
For all $i\geq1$, we have
\[\Bringk{i}{j}{\tru{\Delta}{d-1}}=\begin{cases}
\Bringk{i}{j}{\Delta}, & \text{$j \leq d+i-1$} \\
\Bringk{i}{d+i}{\Delta}-\Bringk{i-1}{d+i}{\Delta}+\binom{n-d-1}{i-1}\delta, & \text{$j=d+i$,} \\
0, & \text{$j \geq d+i-1$}
\end{cases}\] where \[\delta=\begin{cases}\ff{\Delta}{d}=\sum_{k=0}^{n}(-1)^{n+d+k+1}\sum_{l\geq n-d-1}\binom{l}{n-d-1}\Bringk{k}{l}{\Delta},& 1\leq i\leq n-d\\0,& i> n-d.\end{cases}\]
\end{theorem}
\begin{example}\label{exTriang}
Let $T$ be one of the two irreducible triangulations of the real projective plane (see \cite{Bar}) -- namely the one corresponding to an embedding of the complete graph on $6$ vertices. Clearly then, we have $n=6$ and $d=2$. The Betti table of $\srring{T}$ over $\mathbb{F}_{3}$ is \[\beta[\srring{T};\mathbb{F}_3]=\begin{array}{r|cccc}
& 0 & 1 & 2& 3 \\
\hline
0 & 1 & 0 & 0 & 0\\
1 & 0 & 0 & 0 & 0\\
2 & 0 & 0 & 0 & 0\\
3 & 0 & 10 & 15 & 6\\
\end{array}.\]
In this case \[\ff{\Delta}{d}=\binom{4}{3}\BringKropp{1}{4}{T}{\mathbb{F}_3}-\binom{5}{3}\BringKropp{2}{5}{T}{\mathbb{F}_3}+\binom{6}{3}\BringKropp{3}{6}{T}{\mathbb{F}_3}=10.\] By Theorem \ref{mainThe}, the Betti numbers of $\srring{\iskel{T}{1}}$ are \begin{flalign*}
\BringKropp{1}{4}{\iskel{T}{1}}{\mathbb{F}_3}=&\BringKropp{1}{4}{T}{\mathbb{F}_3}+\binom{3}{0}\delta=10+10.\\
\BringKropp{2}{5}{\iskel{T}{1}}{\mathbb{F}_3}=&\BringKropp{2}{5}{T}{\mathbb{F}_3}-\BringKropp{1}{5}{T}{\mathbb{F}_3}+\binom{3}{1}\delta=15+30.\\
\BringKropp{3}{6}{\iskel{T}{1}}{\mathbb{F}_3}=&\BringKropp{3}{6}{T}{\mathbb{F}_3}-\BringKropp{2}{6}{T}{\mathbb{F}_3}+\binom{3}{2}\delta=6-0+30.\\
\BringKropp{4}{7}{\iskel{T}{1}}{\mathbb{F}_3}=&\BringKropp{4}{7}{T}{\mathbb{F}_3}-\BringKropp{3}{7}{T}{\mathbb{F}_3}+\binom{3}{3}\delta=0-0+10.\\
\end{flalign*}
\[\beta[\srring{\iskel{T}{1}};\mathbb{F}_3]=\begin{array}{r|ccccc}
& 0 & 1 & 2 & 3 & 4 \\
\hline
0 & 1 & 0 & 0 & 0 & 0 \\
1 & 0 & 0 & 0 & 0 & 0 \\
2 & 0 & 0 & 0 & 0 & 0 \\
3 & 0 & 20 & 45 & 36 & 10\\
\end{array}.\]
\end{example}
\begin{remark}
Observe that as \[\beta[\srring{T};\mathbb{F}_2]=\begin{array}{r|ccccc}
& 0 & 1 & 2 & 3 & 4\\
\hline
0 & 1 & 0 & 0 & 0 & 0\\
1 & 0 & 0 & 0 & 0 & 0\\
2 & 0 & 0 & 0 & 0 & 0\\
3 & 0 & 10 & 15 & 6 & 1\\
4 & 0 & 0 & 0 & 1 & 0\\
\end{array},\] the simplicial complex $T$ of Example \ref{exTriang} is an example of a pure simplicial complex whose Betti numbers depend upon the field $\field{k}$ -- as opposed to what is the case for matroids.
\end{remark}
\subsection{The projective dimension of skeletons}
Let $\ppd\srring{\Delta}$ denote the projective dimension of $\srring{\Delta}$. By Auslander-Buchsbaum Theorem we have \begin{flalign*}\ppd{\srring{\Delta}}=&n-\depth \srring{\Delta}\\
\geq&n-\Kdim \srring{\Delta}\\=& n-(d+1), \end{flalign*} so $n-d-1\leq \ppd{\srring{\Delta}}\leq n$.
As for the skeletons, we have
\begin{corollary}\label{pdleq}
\[\ppd{\srring{\iskel{\Delta}{d-1}}}\leq 1+\ppd{\srring{\Delta}}.\]
\end{corollary}
\begin{proof}
Let $p=\ppd{\srring{\Delta}}$. By Proposition \ref{likebortsettfrasiste} it suffices to show that \[\Bringk{p+2}{d+p+2}{\iskel{\Delta}{d-1}}=0.\] But by Theorem \ref{main}, we have \begin{flalign*}\Bringk{p+2}{d+p+2}{\iskel{\Delta}{d-1}}=&\Bringk{p+2}{d+p+2}{\Delta}-\Bringk{p+1}{d+p+2}{\Delta}+\delta\\=&0-0-\delta=0,\end{flalign*}
where the last equality is due to $p+2> n-d$.
\end{proof}
\begin{corollary}\label{CM}
If $\Delta$ is Cohen-Macaulay, then so is $\iskel{\Delta}{d-1}$.
\end{corollary}
\begin{proof}
Let $\Delta$ be a simplicial complex with $\dim(\Delta)=d$ and $\depth\srring{\Delta}=\Kdim\srring{\Delta}$. As $\Kdim\srring{\iskel{\Delta}{d-1}}=d$, we only need to prove that $\depth\srring{\iskel{\Delta}{d-1}}=d$ as well.
Since $\depth \srring{\iskel{\Delta}{d-1}}\leq \Kdim \srring{\iskel{\Delta}{d-1}}=d,$ we have by the Auslander-Buchsbaum Theorem that $\ppd \srring{\iskel{\Delta}{d-1}}\geq n-d.$ On the other hand, since \begin{flalign*}\ppd \srring{\Delta}=&n-\depth \srring{\Delta}\\=&n-\Kdim\srring{\Delta}\\=&n-(d+1),\end{flalign*} we see from Corollary \ref{pdleq} that $\ppd\srring{\iskel{\Delta}{d-1}}\leq n-d$. We conclude that \[\ppd\srring{\iskel{\Delta}{d-1}}= n-d\] and, by Auslander-Buchsbaum again, that $\depth \srring{\iskel{\Delta}{d-1}}=d.$
\end{proof}
\section{Betti numbers of truncations and elongations of matroids}\label{SecMat}
\mathbb{N}oindent Let $M$ be a matroid on $\{1,\ldots,n\}$, with $r(M)=k$. As was established in \cite{Bjoe}, the dimension of $\tilde{H}_{i}(M;\field{k})$ is in fact independent of the field $\field{k}$ . Thus \emph{for matroids, the ($\mathbb{N}_{0}$- or $\mathbb{N}_{0}^{n}$-graded) Betti numbers are not only unique, but independent of the choice of field}. We shall therefore omit referring to or specifying a particular field $\field{k}$ throughout this section. By a slight abuse of notation we shall denote the Stanley-Reisner ideal associated to the set of independent sets $I(M)$ of $M$ simply by $\srid{M}$.
\subsection{Truncations}
Note that the $i$th truncation of $M$ corresponds to the $(k-i-1)$-skeleton of $I(M)$, a fact which enables us to invoke Theorem \ref{mainThe}. In addition, it follows from \cite[Corollary 3(b)]{JV} that the minimal free resolutions of $\srring{M}$ have length $n-k$. We thus have
\begin{proposition}\label{hovedForMatroider}
For all $i$, we have
\[\Bring{i}{j}{\tru{M}{1}}=\begin{cases}
\Bring{i}{j}{M}, & \text{$j \leq k+i-2$.} \\
\Bring{i}{k+i-1}{M}-\Bring{i-1}{k+i-1}{M}\\+\binom{n-k}{i-1}\Big(\sum_{u=0}^{n-k}(-1)^{n+k+u}\sum_{v\geq n-k}\binom{v}{n-k}\Bring{u}{v}{M}\Big), & \text{$j=k+i-1$.} \\
0, & \text{$j\geq k+i$.}
\end{cases}
\]
\end{proposition}
\hide{In \cite{JRV} we demonstrate that the generalized Hamming weights of the first elongation $\elo{M}{1}$ of $M$ is determined by those of $M$ very directly by $d_i(\elo{M}{1})=d_{i+1}(M)$ for all $1\leq i\leq n-k-1$.
From Proposition \ref{hovedForMatroider}, we get a similar result for truncations.}
\begin{corollary}
For all $1\leq i\leq n-k+1$, we have \[d_i(\tru{M}{1})=\min\{d_i(M),k+i-1\}.\]
\end{corollary}
\begin{proof}
By \cite[Theorem 4]{JV} we have $d_i(\tru{M}{1})=\min\{j:\Bring{i}{j}{\tru{M}{1}}\mathbb{N}eq0\}.$ The result now follows immediately from Proposition \ref{hovedForMatroider}.
\end{proof}
\subsection{Elongations}\label{SecElong}
When it comes to elongations, the Betti numbers of $M$ provide far less information about the Betti numbers of $\elo{M}{1}$ than what was the case with truncations. We do however have the following.
\begin{proposition}\label{bettitables}
For $i\geq1$, \[\Bnum{i}{j}{\elo{M}{l}}\mathbb{N}eq0 \iff \Bnum{i-1}{j}{\elo{M}{l+1}}\mathbb{N}eq0.\]
\end{proposition}
\begin{proof}
According to \cite[Theorem 1]{JV}, we have that \[\Bnum{i}{\sigma}{M}\mathbb{N}eq 0 \iff \sigma \text{ is minimal with the property that } n_{M}(\sigma)=i+1.\] Since $\beta_{i,j}=\sum_{|\sigma|=j}\beta_{i,\sigma},$ we see that
\begin{flalign*}
\Bnum{i}{j}{\elo{M}{l}}&\mathbb{N}eq 0\\ &\iff\\
\text{There is a } \sigma \text{ such that } |\sigma|=j \text{ and }& \sigma \text{ is minimal with the property that } n_{\elo{M}{l}}(\sigma)=i+1\\ &\iff\\
\text{There is a } \sigma \text{ such that } |\sigma|=j \text{ and }& \sigma \text{ is minimal with the property that } n_{\elo{M}{l+1}}(\sigma)=i\\ &\iff\\
\Bnum{i-1}{j}{\elo{M}{l+1}}&\mathbb{N}eq 0.
\end{flalign*}
\end{proof}
In terms of Betti tables, this implies that when it comes to zeros and nonzeros the Betti table of $\srid{\elo{M}{i+1}}$ is equal to the table you get by deleting the first column from the table of $\srid{M_i}$. As the following counterexample (computed using MAGMA \cite{MAGMA}) demonstrates, there can be no result for elongations analogous to Theorem \ref{mainThe}.
Let $M$ and $N$ be the matroids on $\{1,\ldots, 8\}$ with bases
\begin{flalign*}B(M)=\big\{
&\{ 1, 3, 4, 6, 7 \},
\{ 1, 2, 3, 6, 8 \},
\{ 1, 2, 3, 4, 8 \},
\{ 1, 2, 3, 5, 8 \},
\{ 1, 2, 5, 6, 8 \}, \\
&\{ 1, 2, 3, 4, 7 \},
\{ 1, 2, 3, 5, 7 \},
\{ 1, 2, 5, 6, 7 \},
\{ 1, 3, 4, 5, 7 \},
\{ 1, 3, 4, 6, 8 \},
\\
&\{ 1, 2, 4, 6, 8 \},
\{ 1, 2, 4, 6, 7 \},
\{ 1, 3, 4, 5, 8 \},
\{ 1, 2, 4, 5, 7 \},
\{ 1, 4, 5, 6, 7 \},
\\
&\{ 1, 2, 3, 6, 7 \},
\{ 1, 3, 5, 6, 7 \},
\{ 1, 4, 5, 6, 8 \},
\{ 1, 3, 5, 6, 8 \},
\{ 1, 2, 4, 5, 8 \}\big\}
\end{flalign*} and
\begin{flalign*}B(N)=\big\{
&\{ 1, 3, 4, 6, 7 \},
\{ 1, 2, 3, 4, 8 \},
\{ 1, 2, 3, 5, 8 \},
\{ 1, 2, 5, 6, 8 \},
\{ 1, 2, 3, 4, 7 \},
\\
&\{ 1, 2, 3, 5, 7 \},
\{ 1, 2, 5, 6, 7 \},
\{ 1, 3, 4, 5, 7 \},
\{ 1, 3, 4, 6, 8 \},
\{ 1, 2, 4, 6, 8 \},
\\
& \{ 1, 2, 4, 6, 7 \},
\{ 1, 3, 4, 5, 8 \},
\{ 1, 2, 4, 5, 7 \},
\{ 1, 3, 4, 5, 6 \},
\{ 1, 2, 4, 5, 6 \},
\\
&\{ 1, 3, 5, 6, 7 \},
\{ 1, 2, 3, 5, 6 \},
\{ 1, 2, 3, 4, 6 \},
\{ 1, 3, 5, 6, 8 \},
\{ 1, 2, 4, 5, 8 \}
\big\}.
\end{flalign*} Both $\srid{M}$ and $\srid{N}$ have Betti table
\[\begin{array}{r|ccc}
& 0 & 1& 2 \\
\hline
2 & 1 & 0&0\\
3 & 0 & 0&0\\
4 & 1 & 4&0\\
5 & 0 &5&4\\
\end{array},\]
but while $\srid{\elo{M}{1}}$ has Betti table
\[\begin{array}{r|cc}
& 1& 2 \\
\hline
5 & 1&0\\
6 &5&5\\
\end{array}\] the ideal $\srid{\elo{N}{1}}$ has Betti table \[\begin{array}{r|cc}
& 1& 2 \\
\hline
5 & 2&0\\
6 &3&4\\
\end{array}.\] This shows that the Betti numbers associated to a matroid do not determine those associated to its elongation.
\section{The $i$th skeleton ideal}\label{SecCounter}
\mathbb{N}oindent As mentioned in the introduction, the Stanley-Reisner ideal of a skeleton is generalized in \cite{HVZ} and \cite{HJZ} to arbitrary monomial ideals. We shall briefly describe the construction as it is found in the above papers, and present a counterexample showing that our main result does not extend to these ideals.
For $\feit{a}, \feit{b}\in\mathbb{N}_{0}^{n}$ we say that $\feit{a}\leq\feit{b}$ if $\feit{a}(i)\leq \feit{b}(i)$ for $1\leq i \leq n$. Clearly, this constitutes a partial order on $\mathbb{N}_{0}^{n}$. Let $I,J\subseteq S$ be monomial ideals with (unique) minimal generating sets $\{\feit{x}^{\feit{a}_1},\ldots,\feit{x}^{\feit{a}_r}\}$ and $\{\feit{x}^{\feit{b}_1},\ldots,\feit{x}^{\feit{b}_s}\}$, respectively, and let $\feit{g}\in\mathbb{N}_{0}^{n}$ be such that $\feit{a}_i\leq \feit{g}$ and $\feit{b}_j\leq \feit{g}$ for all $1\leq i\leq r$, $1\leq j\leq s$. Define the \emph{characteristic poset} $P^{\feit{g}}_{J/I}$ of $J/I$ with respect to $\feit{g}$ to be \[P^{\feit{g}}_{J/I}=\{\feit{b}\in\mathbb{N}_{0}^{n}:\feit{b}\leq \feit{g},\feit{b}\geq \feit{b}_j \text{ for some }j,\feit{b}\mathbb{N}ot\geq \feit{a}_i \text{ for all }i\}.\] For $\feit{b}\in\mathbb{N}_{0}^{n}$, let $\rho(\feit{b})=|i:\feit{b}(i)=\feit{g}(i)|$. It is demonstrated in \cite[Corollary 2.6]{HVZ} that $\Kdim J/I=\max\{\rho(\feit{b}):\feit{b}\in P^{\feit{g}}_{J/I}\}$.
The $j$th \emph{generalized skeleton ideal} $\srid{j}$ is the ideal generated by $\{\feit{x}^{\feit{a}_1},\ldots,\feit{x}^{\feit{a}_r}\}\cup\{\feit{x}^{\feit{b}}:\feit{b}\in\mathbb{N}_{0}^{n}, \rho(\feit{b})>j\}.$ By \cite[Corollary 2.5]{HJZ} these ideals form a chain $I=\srid{d}\subseteq \srid{d-1}\subseteq\cdots\subseteq \srid 0\subseteq S$ with the property that $\srring{j}$ is Cohen-Macauley for all $j\leq \depth\srring{}$, and $\depth\srring{}=\max\{j:\srring{j} \text{ is Cohen-Macauley}\}$. In other words, these ideals successfully generalize \eqref{Hibis} from the introduction. Furthermore, in the special case $J=S$, $I=\srid{\Delta}$, and $\feit{g}=(1,1,\ldots,1)$, we have $\srid{j}=\srid{\iskel{\Delta}{j}}$.
Now, let $M$, $N$ be the matroids on $\{1,\ldots,6\}$ with \begin{flalign*}B(M)=\big\{&\{ 1, 3, 6 \},\{ 1, 3, 5 \},\{ 4, 5, 6 \},\{ 1, 3, 4 \},\{ 2, 3, 6 \},\{ 1, 2, 5 \},\\
&\{ 2, 4, 6 \},\{ 1, 4, 6 \},\{ 3, 5, 6 \},\{ 2, 3, 4 \},\{ 1, 2, 3 \},\{ 1, 5, 6 \},\\
&\{ 3, 4, 5 \}, \{ 1, 4, 5 \}, \{ 1, 2, 4 \}, \{ 2, 5, 6 \},\{ 2, 3, 5 \},\{ 3, 4, 6 \} \big\}
\end{flalign*} and \begin{flalign*}B(N)=\big\{&\{ 1, 3, 6 \}, \{ 1, 3, 5 \},\{ 4, 5, 6 \},\{ 1, 3, 4 \}, \{ 1, 2, 6 \},\{ 2, 3, 6 \},\\
&\{ 1, 2, 5 \},\{ 2, 4, 6 \},\{ 3, 5, 6 \},\{ 2, 3, 4 \},\{ 1, 2, 3 \},\{ 1, 5, 6 \},\\
&\{ 3, 4, 5 \},\{ 1, 4, 5 \},\{ 1, 2, 4 \},\{ 2, 5, 6 \},\{ 2, 4, 5 \},\{ 3, 4, 6 \} \big\}.
\end{flalign*}
Then \[\beta[\srring{M}]=\begin{array}{r|cccc}
& 0 & 1 & 2 & 3 \\
\hline
0 & 1 & 0 & 0 & 0 \\
1 & 0 & 0 & 0 & 0 \\
2 & 0 & 2 & 0 & 0 \\
3 & 0 & 9 & 18 & 8 \\
\end{array}=\beta[\srring{N}],\] for all base fields $\field{k}$. However, if we take $\feit{g}=(1,2,1,1,1,1)$ and $J=S=\polr{Q}{x}{n}$ in the above construction, we get
\[\beta[S/(\srid{M})_{(1)};\mathbb{Q}]=\begin{array}{r|ccccc}
& 0 & 1 & 2 & 3 & 4 \\
\hline
0 & 1 & 0 & 0 & 0 & 0 \\
1 & 0 & 0 & 0 & 0 & 0 \\
2 & 0 & 12 & 30 & 12 & 2 \\
3 & 0 & 17 & 24 & 24 & 8\\
\end{array}\] while \[\beta[S/(\srid{N})_{(1)};\mathbb{Q}]=\begin{array}{r|ccccc}
& 0 & 1 & 2 & 3 & 4 \\
\hline
0 & 1 & 0 & 0 & 0 & 0 \\
1 & 0 & 0 & 0 & 0 & 0 \\
2 & 0 & 11 & 27 & 9 & 1 \\
3 & 0 & 18 & 27 & 27 & 9\\
\end{array}.\]
We conclude that the statement of Theorem \ref{mainThe} does not necessarily hold if one replaces the Stanley-Reisner ideals of skeletons with generalized skeleton ideals.
\end{document}
|
\begin{document}
\title[2-adic Galois images of CM isogeny-torsion graphs]{2-adic Galois images of isogeny-torsion graphs over $\mathbb Q$ with CM}
\author{Garen Chiloyan}
\email{[email protected]}
\urladdr{https://sites.google.com/view/garenmath/home}
\subjclass{Primary: 11F80, Secondary: 11G05, 11G15, 14H52.}
\maketitle
\begin{abstract}
Let $\mathcal{E}$ be a $\mathbb Q$-isogeny class of elliptic curves defined over $\mathbb Q$. The isogeny graph associated to $\mathcal{E}$ is a graph which has a vertex for each elliptic curve over $\mathbb Q$ of $\mathcal{E}$ and an edge for each $\mathbb Q$-isogeny of prime degree that maps one elliptic curve in $\mathcal{E}$ to another elliptic curve in $\mathcal{E}$, with the degree of the isogeny recorded as a label of the edge. The isogeny-torsion graph associated to $\mathcal{E}$ is the isogeny graph associated to $\mathcal{E}$ where, in addition, we label each vertex with the abstract group structure of the torsion subgroup over $\mathbb Q$ of the corresponding elliptic curve. The main result of the article is a classification of the $2$-adic Galois image at each vertex of the isogeny-torsion graphs whose associated $\mathbb Q$-isogeny class consists of elliptic curves over $\mathbb Q$ with complex multiplication.
\end{abstract}
\section{Introduction}
Let $E/\mathbb Q$ be an elliptic curve. It is well known that $E$ has the structure of an abelian group with group identity which we will denote $\mathcal{O}$. By the Mordell--Weil theorem, the set of points on $E$ defined over $\mathbb Q$, denoted $E(\mathbb Q)$ has the structure of a finitely generated abelian group. Thus, the set of points on $E$ defined over $\mathbb Q$ of finite order, denoted $E(\mathbb Q)_{\text{tors}}$ is a finite, abelian group. By Mazur's theorem, $E(\mathbb Q)_{\texttt{tors}}$ is isomorphic to one of fifteen groups (see Theorem \ref{thm-mazur}). Moreover, these fifteen groups occur infinitely often. Let $E'/\mathbb Q$ be an elliptic curve. An isogeny mapping $E$ to $E'$ is a rational morphism $\phi \colon E \to E'$ such that $\phi$ maps the identity of $E$ to the identity of $E'$. If there is a non-constant isogeny defined over $\mathbb Q$, mapping $E$ to $E'$, we say that $E$ is $\mathbb Q$-isogenous to $E'$. This relation is an equivalence relation and the set of elliptic curves defined over $\mathbb Q$ that are $\mathbb Q$-isogenous to $E$ is called the $\mathbb Q$-isogeny class of $E$.
An isogeny is a group homomorphism and the kernel of a non-constant isogeny is finite. We are particularly interested in non-constant isogenies with cyclic kernels. The isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is a visual description of the $\mathbb Q$-isogeny class of $E$. Denote the $\mathbb Q$-isogeny class of $E$ by $\mathcal{E}$. The isogeny graph associated to $\mathcal{E}$ is a graph which has a vertex for each elliptic curve in $\mathcal{E}$ and an edge for each $\mathbb Q$-isogeny of prime degree that maps one elliptic curve in $\mathcal{E}$ to another elliptic curve in $\mathcal{E}$, with the degree recorded as a label of the edge. The isogeny-torsion graph associated to $\mathcal{E}$ is the isogeny graph associated to $\mathcal{E}$ where, in addition, we label each vertex with the abstract group structure of the torsion subgroup over $\mathbb{Q}$ of the corresponding elliptic curve.
\begin{example}\label{T4 example}
There are four elliptic curves in the $\mathbb Q$-isogeny class with LMFDB label \texttt{27.a} which we will denote $E_{1}$, $E_{2}$, $E_{3}$, and $E_{4}$. The isogeny graph associated to \texttt{27.a} is above and the isogeny-torsion graph associated to \texttt{27.a} is below.
\begin{center} \begin{tikzcd}
E_{1} \longrightarrow[r, "3", no head] & E_{2} \longrightarrow[r, "3", no head] & E_{3} \longrightarrow[r, "3", no head] & E_{4}
\end{tikzcd} \end{center}
\begin{center} \begin{tikzcd}
\mathbb{Z} / 3 \mathbb{Z} \longrightarrow[r, no head, "3"] & \mathbb{Z} / 3 \mathbb{Z} \longrightarrow[r, no head, "3"] & \mathbb{Z} / 3 \mathbb{Z} \longrightarrow[r, no head, "3"] & \mathcal{O}
\end{tikzcd}
\end{center}
\end{example}
A proof classifying the isogeny graphs associated to $\mathbb Q$-isogeny classes of elliptic curves over $\mathbb Q$ appears in Section 6 of \cite{gcal-r}.
\begin{thm}
There are $26$ isomorphism types of isogeny graphs that are associated to $\mathbb Q$-isogeny classes of elliptic curves defined over $\mathbb Q$. More precisely, there are $16$ types of (linear) $L_k$ graphs of $k = 1$-$4$ vertices, $3$ types of (nonlinear two-primary torsion) $T_k$ graphs of $k = 4$, $6$, or $8$ vertices, $6$ types of (rectangular) $R_k$ graphs of $k = 4$ or $6$ vertices, and $1$ (special) $S$ graph (see Tables 1 - 4 in \cite{gcal-r}).
\end{thm}
The isogeny class degree of $\mathcal{E}$ is the least common multiple of the degrees of all cyclic, $\mathbb Q$-rational isogenies mapping elliptic curves over $\mathbb Q$ in $\mathcal{E}$ to elliptic curves over $\mathbb Q$ in $\mathcal{E}$. In other words, the isogeny class degree of $\mathcal{E}$ is equal to the greatest degree of a cyclic, $\mathbb Q$-rational isogeny that maps an elliptic curve in $\mathcal{E}$ to an elliptic curve in $\mathcal{E}$. For example, if $\mathcal{E}$ is of $L_{4}$ type, the isogeny class of $\mathcal{E}$ is equal to $27$.
In the case of an isogeny graph of $L_{2}$, $L_{3}$, or $R_{4}$ type, the isogeny class degree of the $\mathbb Q$-isogeny class is written in parentheses to distinguish it from other isogeny-torsion graphs of the same size and shape, but with different isogeny class degree. For example, there are $L_{2}(2)$ graphs; graphs of $L_{2}$ type generated by an isogeny of degree $2$ and there are $L_{2}(3)$ graphs; isogeny graphs of $L_{2}$ type generated by an isogeny of degree $3$. Relying only on the size and shape of isogeny graphs of $L_{2}$ type is not enough to distinguish isogeny graphs of $L_{2}(2)$ type from isogeny graphs of $L_{2}(3)$ type. On the other hand, the isogeny-torsion graph of $L_{4}$ type is the only linear isogeny-torsion graph with four vertices and it is not necessary to designate the isogeny class degree in parentheses to distinguish it from other isogeny-torsion graphs. The main theorem in \cite{gcal-r} was the classification of isogeny-torsion graphs associated to $\mathbb Q$-isogeny classes of elliptic curves over $\mathbb Q$.
\begin{thm}[Chiloyan, Lozano-Robledo, \cite{gcal-r}]
There are $52$ isomorphism types of isogeny-torsion graphs that are associated to $\mathbb{Q}$-isogeny classes of elliptic curves defined over $\mathbb{Q}$. In particular, there are $23$ isogeny-torsion graphs of $L_k$ type, $13$ isogeny-torsion graphs of $T_k$ type, $12$ isogeny-torsion graphs of $R_k$ type, and $4$ isogeny-torsion graphs of $S$ type.
\end{thm}
We denote the cyclic group of order $a$ as $[a]$ and for $b = 1 - 4$, we denote the group $\mathbb Z / 2 \mathbb Z \times \mathbb Z / 2 \cdot b \mathbb Z$ as $[2,b]$. We organize torsion configuration of isogeny-torsion graphs in ``vector-group'' formation corresponding to the enumeration of the elliptic curves in the isogeny-torsion graph. For example, reconsider the $\mathbb Q$-isogeny class with LMFDB label \texttt{27.a} and its associated isogeny graph and isogeny-torsion graph, $\mathcal{G}$.
\begin{center} \begin{tikzcd}
E_{1} \longrightarrow[r, "3", no head] & E_{2} \longrightarrow[r, "3", no head] & E_{3} \longrightarrow[r, "3", no head] & E_{4}
\end{tikzcd} \end{center}
\begin{center} \begin{tikzcd}
\mathbb{Z} / 3 \mathbb{Z} \longrightarrow[r, no head, "3"] & \mathbb{Z} / 3 \mathbb{Z} \longrightarrow[r, no head, "3"] & \mathbb{Z} / 3 \mathbb{Z} \longrightarrow[r, no head, "3"] & \mathcal{O}
\end{tikzcd}
\end{center}
Then we will denote the torsion configuration of $\mathcal{G}$ as $([3],[3],[3],[1])$. For another case, consider the $\mathbb Q$-isogeny class $\mathcal{E}$ with LMFDB label \texttt{17.a}. Then the isogeny graph of $\mathcal{E}$ is below on the left and the isogeny-torsion graph of $\mathcal{E}$ is below on the right
\begin{center}
\begin{tikzcd}
& E_{2} & \\
& E_{1} \longrightarrow[u, no head, "2"] \longrightarrow[ld, no head, "2"'] \longrightarrow[rd, no head, "2"] & \\
E_{3} & & E_{4}
\end{tikzcd}, \begin{tikzcd}
& \mathbb{Z} / 4 \mathbb{Z} & \\
& \mathbb{Z} / 2 \mathbb{Z} \times \mathbb{Z} / 2 \mathbb{Z} \longrightarrow[u, no head, "2"] \longrightarrow[ld, no head, "2"'] \longrightarrow[rd, no head, "2"] & \\
\mathbb{Z} / 4 \mathbb{Z} & & \mathbb{Z} / 2 \mathbb{Z}
\end{tikzcd}
\end{center}
We denote the torsion classification of $\mathcal{G}$ to be $([2,2],[4],[4],[2])$.
Let $E/\mathbb Q$ be an elliptic curve with complex multiplication (CM) and denote the $\mathbb Q$-isogeny class of $E$ by $\mathcal{E}$ and the isogeny-torsion graph associated to $\mathcal{E}$ by $\mathcal{G}$. Then all elliptic curves over $\mathbb Q$ in $\mathcal{E}$ have CM. Thus, it is natural to say that $\mathcal{E}$ and $\mathcal{G}$ are defined over $\mathbb Q$ and have CM. Moreover, it is natural to say that the classification of the $2$-adic Galois image of each of the vertices of $\mathcal{G}$ would classify the $2$-adic Galois image of $\mathcal{G}$. The isogeny-torsion graph, $\mathcal{G}$ is of $L_{2}(p)$, $L_{4}$, $T_{4}$, $R_{4}(6)$, or $R_{4}(14)$ type, where $p \in \{2, 3, 11, 19, 43, 67, 163\}$. Table \ref{tab-CMgraphs} classifies the isogeny-torsion graphs with CM. This same table appears as Table 5 in Section 4 of \cite{gcal-r}.
\begin{table}[h!]
\renewcommand{\longrightarrowaystretch}{1.2}
\begin{tabular}{|c|c|c|c|c|c|}
\hline
$d_K$ & \multicolumn{2}{c|}{$j$} & Type & Torsion config. & LMFDB\\
\hline
\hline
\multirow{10}{*}{$-3$} & \multirow{6}{*}{$0$} & $y^2=x^3+t^3, t=-3,1$ & $R_4(6)$ & $([6],[6],[2],[2])$ & \texttt{36.a4}\\
& &$y^2=x^3+t^3, t\neq -3,1$ & $R_4(6)$ & $([2],[2],[2],[2])$ & \texttt{144.a3}\\
& & $y^2=x^3+16t^3, t=-3,1$ & $L_4$ & $([3],[3],[3],[1])$ & \texttt{27.a3}\\
& & $y^2=x^3+16t^3, t\neq -3,1$ & $L_4$ & $([1],[1],[1],[1])$ & \texttt{432.e3}\\
& & $\!y^2=x^3+s^2,\,s^2\neq t^3,16t^3\!\!$ & $L_2(3)$ & $([3],[1])$ & \texttt{108.a2}\\
& & $\!y^2=x^3+s,\,s\neq t^3,16t^3\!\!$ & $L_2(3)$ & $([1],[1])$ & \texttt{225.c1}\\
\cline{2-6}
& \multirow{2}{*}{$54000$} & $y^2=x^3-15t^2x + 22t^3, t=1,3$ & $R_4(6)$ & $([6],[6],[2],[2])$ & \texttt{36.a1}\\
& & $y^2=x^3-15t^2x + 22t^3, t\neq 1,3$ & $R_4(6)$ & $([2],[2],[2],[2])$ & \texttt{144.a1}\\
\cline{2-6}
& \multirow{2}{*}{$-12288000$} & $E^t, t=-3,1$ & $L_4$ & $([3],[3],[3],[1])$ & \texttt{27.a2}\\
& & $E^t, t\neq -3,1$ & $L_4$ & $([1],[1],[1],[1])$ & \texttt{432.e1}\\
\hline
\multirow{7}{*}{$-4$} & \multirow{4}{*}{$1728$} & $y^2=x^3+tx, t=-1,4$ & $T_4$ & $([2,2],[4],[4],[2])$ & \texttt{32.a3}\\
& & $y^2=x^3+tx, t=-4,1$ & $T_4$ & $([2,2],[4],[2],[2])$ & \texttt{64.a3}\\
& & $y^2=x^3\pm t^2x, t\neq 1,2$ & $T_4$ & $([2,2],[2],[2],[2])$ & \texttt{288.d3}\\
& & $y^2=x^3+sx$, $s\neq \pm t^2$ & $L_2(2)$ & $([2],[2])$ & \texttt{256.b1}\\
\cline{2-6}
& \multirow{3}{*}{$287496$} & $y^2=x^3-11t^2x+14t^3, t=\pm 1$ & \multirow{3}{*}{$T_4$} & $([2,2],[4],[4],[2])$ & \texttt{32.a2}\\
& & $y^2=x^3-11t^2x+14t^3, t=\pm 2$ & & $([2,2],[4],[2],[2])$ & \texttt{64.a1}\\
& & $y^2=x^3-11t^2x+14t^3, t\neq \pm 1, \pm 2$ & & $([2,2],[2],[2],[2])$ & \texttt{288.d1}\\
\hline
\multirow{2}{*}{$-7$} & \multicolumn{2}{c|}{$-3375$} & \multirow{2}*{$R_4(14)$} & $([2],[2],[2],[2])$ & \texttt{49.a2}\\
& \multicolumn{2}{c|}{$16581375$} & & $([2],[2],[2],[2])$ & \texttt{49.a1}\\
\hline
$-8$ & \multicolumn{2}{c|}{$8000$} & $L_2(2)$ & $([2],[2])$ & \texttt{256.a1}\\
\hline
$-11$ & \multicolumn{2}{c|}{$-32768$} & $L_2(11)$ & $([1],[1])$ & \texttt{121.b1}\\
\hline
$-19$ & \multicolumn{2}{c|}{$-884736$} & $L_2(19)$ & $([1],[1])$ & \texttt{361.a1} \\
\hline
$-43$ & \multicolumn{2}{c|}{$-884736000$} & $L_2(43)$ & $([1],[1])$ & \texttt{1849.b1}\\
\hline
$-67$ & \multicolumn{2}{c|}{$-147197952000$} & $L_2(67)$ & $([1],[1])$ & \texttt{4489.b1} \\
\hline
$-163$ & \multicolumn{2}{c|}{$-262537412640768000$} & $L_2(163)$ & $([1],[1])$ & \texttt{26569.a1} \\
\hline
\end{tabular}
\caption{The list of rational $j$-invariants with CM and the possible isogeny-torsion graphs that occur, where $E^t$ denotes the curve $y^2=x^3-38880t^2x+2950992t^3$.}
\label{tab-CMgraphs}
\end{table}
\begin{example}
Let $E/\mathbb Q$ be an elliptic curve such that the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $L_{4}$ type. Then $E$ is represented by one of the elliptic curves $E_{1}$, $E_{2}$, $E_{3}$, or $E_{4}$ in the isogeny graph below.
\begin{center} \begin{tikzcd}
E_{1} \longrightarrow[r, "3", no head] & E_{2} \longrightarrow[r, "3", no head] & E_{3} \longrightarrow[r, "3", no head] & E_{4}
\end{tikzcd} \end{center}
No matter the torsion configuration of the isogeny-torsion graph, $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to the group
$$\left\langle \operatorname{-Id}, \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}, \begin{bmatrix} 7 & 4 \\ -4 & 3 \end{bmatrix}, \begin{bmatrix} 3 & 6 \\ -6 & -3 \end{bmatrix} \right\rangle \subseteq \operatorname{GL}(2, \mathbb Z_{2}).$$
\end{example}
The reason why the $2$-adic Galois image of all elliptic curves in an $L_{4}$ graph are conjugate comes from the fact that $3$ is odd (see Corollary \ref{coprime isogeny-degree}). The situation will not always be as seamless as this. When there are non-trivial, cyclic isogenies of $2$-power degree in the isogeny graph, it is likely the $2$-adic Galois image of the vertices are different.
Let $\delta$, $\phi$, and $N$ be integers such that $N \geq 0$. Denote the subgroup of $\operatorname{GL}\left(2, \mathbb Z / 2^{N} \mathbb Z\right)$ of matrices of the form $\begin{bmatrix}
a + b \cdot \phi & b \\ \delta \cdot b & a
\end{bmatrix}$ by $\mathcal{C}_{\delta, \phi}\left(2^{N}\right)$ and let $\mathcal{N}_{\delta, \phi}\left(2^{N}\right) = \left\langle \mathcal{C}_{\delta, \phi}(2^{N}), \begin{bmatrix} -1 & 0 \\ \phi & 1 \end{bmatrix} \right\rangle$. Finally, let $\mathcal{N}_{\delta,\phi}\left(2^{\infty}\right) = \varprojlim N_{\delta,\phi}(N)$.
\begin{example}
Let $E/\mathbb Q$ be an elliptic curve such that the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $R_{4}(14)$ type (see below).
\begin{center}
\begin{tikzcd}
{E_{1}} \longrightarrow[dd, no head, "7"'] \longrightarrow[rr, no head, "2"] & & {E_{2}} \longrightarrow[dd, no head, "7"] \\
& & \\
{E_{3}} \longrightarrow[rr, no head, "2"'] & & {E_{4}}
\end{tikzcd}
\end{center}
If $E$ is represented by $E_{1}$ or $E_{3}$, then $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{-7,0}(2^{\infty})$ and if $E$ is represented by $E_{2}$ or $E_{4}$, then $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{-7,1}(2^{\infty})$.
\iffalse
Then $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{-7,0}(2^{\infty}) = \left\langle C_{-7,0}(2^{\infty}), \begin{bmatrix} -1 & 0 \\ 0 & 1 \end{bmatrix} \right\rangle$ or $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{-7,1}(2^{\infty}) = \left\langle C_{-7,0}(2^{\infty}), \begin{bmatrix} -1 & 0 \\ 1 & 1 \end{bmatrix} \right\rangle$ where $C_{-7,0}(2^{\infty})$ is the set of all $2 \times 2$, invertible matrices over $\mathbb Z_{2}$ of the form $\begin{bmatrix} a & b \\ -7b & a \end{bmatrix}$.
\fi
\end{example}
Section \ref{sec-background} will be devoted to going over background and some lemmas, Section \ref{sec-Alvaros work} will be devoted to going over work by Lozano-Robledo in classifying the $2$-adic Galois image of elliptic curves defined over $\mathbb Q$ with complex multiplication and Section \ref{proofs} will have the proof of Proposition \ref{propmain} and will fully classify the $2$-adic Galois image attached to isogeny-torsion graphs defined over $\mathbb Q$ with complex multiplication. The proof will be broken up into many steps, appealing to isogeny graphs and \textit{j}-invariants.
\begin{proposition}\label{propmain}
Let $\mathcal{G}$ be a CM isogeny-torsion graph defined over $\mathbb Q$. Then $\mathcal{G}$ fits into Table \ref{TableA} or Table \ref{TableB} with the given classification of the corresponding $2$-adic Galois image of its vertices. Examples of the possible CM isogeny-torsion graphs with the given $2$-adic Galois image classification are provided in the final column of each table.
\begin{center}
\begin{table}[h!]
\renewcommand{\longrightarrowaystretch}{1.3}
\scalebox{0.49}{
\begin{tabular}{|c|c|c|c|c|c|c|}
\hline
Isogeny Graph & Torsion & $\rho_{E_{1},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{E_{2},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{E_{3},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{E_{4},2^{\infty}}(G_{\mathbb Q})$ & Example\\
\hline
\multirow{2}*{\includegraphics[width=40mm]{L4_isogeny_graph.png}} & $([3],[3],[3],[1])$ & \multirow{2}*{$\mathcal{N}_{-1,1}(2^{\infty})$} & \multirow{2}*{$\mathcal{N}_{-1,1}(2^{\infty})$} & \multirow{2}*{$\mathcal{N}_{-1,1}(2^{\infty})$} & \multirow{2}*{$\mathcal{N}_{-1,1}(2^{\infty})$} & \texttt{27.a} \\
\cline{2-2}
\cline{7-7}
& $([1],[1],[1],[1])$ & & & & & \texttt{432.e} \\
\cline{1-7}
\multirow{4}*{\includegraphics[width=20mm]{R46_isogeny_graph.png}} & \multirow{2}*{$([6],[6],[2],[2])$} & \multirow{4}*{$\left\langle \operatorname{-Id}, \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}, \begin{bmatrix} 7 & 4 \\ -4 & 3 \end{bmatrix}, \begin{bmatrix} 3 & 6 \\ -6 & -3 \end{bmatrix} \right\rangle$} & \multirow{4}*{$\mathcal{N}_{-3,0}(2^{\infty})$} & \multirow{4}*{$\left\langle \operatorname{-Id}, \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}, \begin{bmatrix} 7 & 4 \\ -4 & 3 \end{bmatrix}, \begin{bmatrix} 3 & 6 \\ -6 & -3 \end{bmatrix} \right\rangle$} & \multirow{4}*{$\mathcal{N}_{-3,0}(2^{\infty})$} & \multirow{2}*{\texttt{36.a}} \\
& & & & & & \\
\cline{2-2}
\cline{7-7}
& \multirow{2}*{$([2],[2],[2],[2])$} & & & & & \multirow{2}*{\texttt{144.a}} \\
& & & & & & \\
\cline{1-7}
\multirow{4}*{\includegraphics[width=20mm]{R414_isogeny_graph.png}} & \multirow{4}*{$([2],[2],[2],[2])$} & \multirow{4}*{$\mathcal{N}_{-7,0}(2^{\infty})$} & \multirow{4}*{$\mathcal{N}_{-2,1}(2^{\infty})$} & \multirow{4}*{$\mathcal{N}_{-7,0}(2^{\infty})$} & \multirow{4}*{$\mathcal{N}_{-2,1}(2^{\infty})$} & \multirow{4}*{\texttt{49.a}} \\
& & & & & & \\
& & & & & & \\
& & & & & & \\
\hline
\multirow{6}*{\includegraphics[width = 25mm]{T4_isogeny_graph.png}} & \multirow{2}*{$([2,2],[4],[4],[2])$} & \multirow{2}*{$\left\langle 5 \cdot \operatorname{Id} \begin{bmatrix} -1 & -2 \\ 2 & -1 \end{bmatrix}, \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} \right\rangle$} & \multirow{2}*{$\left\langle 5 \cdot \operatorname{Id} \begin{bmatrix} -1 & -2 \\ 2 & -1 \end{bmatrix}, \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix} \right\rangle$} & \multirow{2}*{$\left\langle 5 \cdot \operatorname{Id}, \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix}, \begin{bmatrix} -1 & -1 \\ 4 & -1 \end{bmatrix} \right\rangle$} & \multirow{2}*{$\left\langle 5 \cdot \operatorname{Id}, \begin{bmatrix} -1 & 0 \\ 0 & 1 \end{bmatrix}, \begin{bmatrix} -1 & -1 \\ 4 & -1 \end{bmatrix} \right\rangle$} & \multirow{2}*{\texttt{32.a}} \\
& & & & & & \\
\cline{2-7}
& \multirow{2}*{$([2,2],[2],[4],[2])$} & \multirow{2}*{$\left\langle 5 \cdot \operatorname{Id} \begin{bmatrix} 1 & 2 \\ -2 & 1 \end{bmatrix}, \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} \right\rangle$} & \multirow{2}*{$\left\langle 5 \cdot \operatorname{Id} \begin{bmatrix} 1 & 2 \\ -2 & 1 \end{bmatrix}, \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix} \right\rangle$} & \multirow{2}*{$\left\langle 5 \cdot \operatorname{Id}, \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix}, \begin{bmatrix} 1 & 1 \\ -4 & 1 \end{bmatrix} \right\rangle$} & \multirow{2}*{$\left\langle 5 \cdot \operatorname{Id}, \begin{bmatrix} -1 & 0 \\ 0 & 1 \end{bmatrix}, \begin{bmatrix} 1 & 1 \\ -4 & 1 \end{bmatrix} \right\rangle$} & \multirow{2}*{\texttt{64.a}} \\
& & & & & & \\
\cline{2-7}
& \multirow{2}*{$([2,2],[2],[2],[2])$} & \multirow{2}*{$\left\langle \operatorname{-Id}, 3 \cdot \operatorname{Id}, \begin{bmatrix} 1 & 2 \\ -2 & 1 \end{bmatrix}, \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} \right\rangle$} & \multirow{2}*{$\left\langle \operatorname{-Id}, 3 \cdot \operatorname{Id}, \begin{bmatrix} 1 & 2 \\ -2 & 1 \end{bmatrix}, \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix} \right\rangle$} & \multirow{2}*{$\mathcal{N}_{-4,0}(2^{\infty})$} & \multirow{2}*{$\mathcal{N}_{-4,0}(2^{\infty})$} & \multirow{2}*{288.d} \\
& & & & & & \\
\hline
\end{tabular}}
\caption{}
\label{TableA}
\end{table}
\end{center}
\begin{center}
\begin{table}[h!]
\renewcommand{\longrightarrowaystretch}{1.3}
\scalebox{0.79}{
\begin{tabular}{|c|c|c|c|c|c|}
\hline
Isogeny Graph & $p$ & Torsion & $\rho_{E_{1},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{E_{2},2^{\infty}}(G_{\mathbb Q})$ & Example \\
\hline
\multirow{26}*{\includegraphics[width = 40mm]{L2p_isogeny_graph.png}} & \multirow{12}*{$2$} & \multirow{12}*{$([2],[2])$} & \multirow{2}*{$\left\langle 3 \cdot \operatorname{Id}, \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix}, \begin{bmatrix} 1 & 1 \\ -2 & 1 \end{bmatrix} \right\rangle$} & \multirow{2}*{$\left\langle 3 \cdot \operatorname{Id}, \begin{bmatrix} -1 & 0 \\ 0 & 1 \end{bmatrix}, \begin{bmatrix} 1 & 1 \\ -2 & 1 \end{bmatrix} \right\rangle$} & \multirow{2}*{\texttt{256.a}} \\
& & & & & \\
\cline{4-6}
& & & \multirow{2}*{$\mathcal{N}_{-2,0}(2^{\infty})$} & \multirow{2}*{$\mathcal{N}_{-2,0}(2^{\infty})$} & \multirow{2}*{\texttt{2304.h}} \\
& & & & & \\
\cline{4-6}
& & & \multirow{2}*{$\left\langle -\operatorname{Id}, 3 \cdot \operatorname{Id}, \begin{bmatrix} 2 & 1 \\ -1 & 2 \end{bmatrix}, \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} \right\rangle$} & \multirow{2}*{$\left\langle -\operatorname{Id}, 3 \cdot \operatorname{Id}, \begin{bmatrix} 2 & 1 \\ -1 & 2 \end{bmatrix}, \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix} \right\rangle$} & \multirow{2}*{\texttt{2304.a}} \\
& & & & & \\
\cline{4-6}
& & & \multirow{2}*{$\left\langle 3 \cdot \operatorname{Id}, \begin{bmatrix} 2 & -1 \\ 1 & 2 \end{bmatrix}, \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} \right\rangle$} & \multirow{2}*{$\left\langle 3 \cdot \operatorname{Id}, \begin{bmatrix} 2 & -1 \\ 1 & 2 \end{bmatrix}, \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix} \right\rangle$} & \multirow{2}*{\texttt{256.c}} \\
& & & & & \\
\cline{4-6}
& & & \multirow{2}*{$\left\langle 3 \cdot \operatorname{Id}, \begin{bmatrix} -2 & 1 \\ -1 & -2 \end{bmatrix}, \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} \right\rangle$} & \multirow{2}*{$\left\langle 3 \cdot \operatorname{Id}, \begin{bmatrix} -2 & 1 \\ -1 & -2 \end{bmatrix}, \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix} \right\rangle$} & \multirow{2}*{\texttt{256.b}} \\
& & & & & \\
\cline{4-6}
& & & \multirow{2}*{$\mathcal{N}_{-1,0}(2^{\infty})$} & \multirow{2}*{$\mathcal{N}_{-1,0}(2^{\infty})$} & \multirow{2}*{\texttt{288.a}} \\
& & & & & \\
\cline{2-6}
& \multirow{4}*{$3$} & \multirow{2}*{$([3],[1])$} & \multirow{4}*{$\mathcal{N}_{-1,1}(2^{\infty})$} & \multirow{4}*{$\mathcal{N}_{-1,1}(2^{\infty})$} & \multirow{2}*{\texttt{108.a}} \\
& & & & & \\
\cline{3-3}
\cline{6-6}
& & \multirow{2}*{$([1],[1])$} & & & \multirow{2}*{\texttt{225.c}} \\
& & & & & \\
\cline{2-6}
& \multirow{2}*{$11$} & \multirow{10}*{$([1],[1])$} & \multirow{2}*{$\mathcal{N}_{-3,1}(2^{\infty})$} & \multirow{2}*{$\mathcal{N}_{-3,1}(2^{\infty})$} & \multirow{2}*{\texttt{121.b}} \\
& & & & & \\
\cline{2-2}
\cline{4-6}
& \multirow{2}*{$19$} & & \multirow{2}*{$\mathcal{N}_{-5,1}(2^{\infty})$} & \multirow{2}*{$\mathcal{N}_{-5,1}(2^{\infty})$} & \multirow{2}*{\texttt{361.a}} \\
& & & & & \\
\cline{2-2}
\cline{4-6}
& \multirow{2}*{$43$} & & \multirow{2}*{$\mathcal{N}_{-11,1}(2^{\infty})$} & \multirow{2}*{$\mathcal{N}_{-11,1}(2^{\infty})$} & \multirow{2}*{\texttt{1849.b}} \\
& & & & & \\
\cline{2-2}
\cline{4-6}
& \multirow{2}*{$67$} & & \multirow{2}*{$\mathcal{N}_{-17,1}(2^{\infty})$} & \multirow{2}*{$\mathcal{N}_{-17,1}(2^{\infty})$} & \multirow{2}*{\texttt{4489.b}} \\
& & & & & \\
\cline{2-2}
\cline{4-6}
& \multirow{2}*{$163$} & & \multirow{2}*{$\mathcal{N}_{-41,1}(2^{\infty})$} & \multirow{2}*{$\mathcal{N}_{-41,1}(2^{\infty})$} & \multirow{2}*{\texttt{26569.a}} \\
& & & & & \\
\hline
\end{tabular}}
\caption{}
\label{TableB}
\end{table}
\end{center}
\end{proposition}
\iffalse
Section \ref{sec-background} will be devoted to going over background and some lemmas, Section \ref{sec-Alvaros work} will be devoted to going over work by Lozano-Robledo in classifying the $2$-adic Galois image of elliptic curves defined over $\mathbb Q$ with complex multiplication and Section \ref{proofs} will have the proofs of the following propositions and will fully classify the $2$-adic Galois image attached to isogeny-torsion graphs defined over $\mathbb Q$ with complex multiplication.
\begin{proposition}
Let $E/\mathbb Q$ be an elliptic curve with complex multiplication such that the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $L_{4}$ type or of $L_{2}(3)$ type. Then $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{-1,1}(2^{\infty})$.
\end{proposition}
\begin{proposition}
Let $E/\mathbb Q$ be an elliptic curve with CM by a number field $K$ with discriminant $\Delta_{K}$. Suppose that the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $L_{2}(p)$ type with $p \in \{11, 19, 43, 67, 163\}$. Then $\rho_{E,2^{\infty}}(G_{\mathbb{Q}})$ is conjugate to $\mathcal{N}_{\frac{\Delta_{K}-1}{4},1}(2^{\infty})$.
\end{proposition}
\begin{proposition}
Let $E/\mathbb Q$ be an elliptic curve such that the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $R_{4}(14)$ type. Then, $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{-7,0}(2^{\infty})$ or $\mathcal{N}_{-2,1}(2^{\infty})$.
\end{proposition}
\begin{proposition}
Let $E/\mathbb Q$ be an elliptic curve such that $E$ has CM. Suppose that the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $R_{4}(6)$ type. Then $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{-3,0}(2^{\infty})$ or
$\left\langle \operatorname{-Id}, \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}, \begin{bmatrix} 7 & 4 \\ -4 & 3 \end{bmatrix}, \begin{bmatrix} 3 & 6 \\ -6 & -3 \end{bmatrix} \right\rangle$.
\end{proposition}
\begin{proposition}
Let $E/\mathbb Q$ be an elliptic curve with $\textit{j}_{E} = 8000$. Then $E$ is $2$-isogenous to an elliptic curve $E'/\mathbb Q$ with $\textit{j}_{E'} = 8000$. The isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of type $L_{2}(2)$. Denote
\begin{center} $H_{1,3} = \left\langle \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix}, \begin{bmatrix} 3 & 0 \\ 0 & 3 \end{bmatrix}, \begin{bmatrix} 1 & 1 \\ -2 & 1 \end{bmatrix} \right\rangle$ and $H_{-1,3} = \left\langle \begin{bmatrix} -1 & 0 \\ 0 & 1 \end{bmatrix}, \begin{bmatrix} 3 & 0 \\ 0 & 3 \end{bmatrix}, \begin{bmatrix} 1 & 1 \\ -2 & 1 \end{bmatrix} \right\rangle$. \end{center}
Then $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ fits in the following table
\begin{center} \begin{table}[h!]
\renewcommand{\longrightarrowaystretch}{1.6}
\begin{tabular} { |c|c|c| }
\hline
Isogeny graph & $\rho_{E_{1},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{E_{2},2^{\infty}}(G_{\mathbb Q})$ \\
\hline
\multirow{2}*{\includegraphics[scale=0.05]{L22_graph.png}} & $H_{1, 3}$ & $H_{-1, 3}$ \\
\cline{2-3}
& $\mathcal{N}_{-2,0}(2^{\infty})$ & $\mathcal{N}_{-2,0}(2^{\infty})$ \\
\hline
\end{tabular}
\end{table} \end{center}
\end{proposition}
\begin{proposition}
Define the following subgroups of $\operatorname{GL}(2, \mathbb Z_{2})$
\begin{itemize}
\item $G_{1} = \left\{ \begin{bmatrix} a & b \\ -b & a \end{bmatrix} : a^{2}+b^{2} \not \equiv 0 \mod 2 \right\}$
\item $G_{2,a} = \left\langle \operatorname{-Id}, 3 \cdot \operatorname{Id}, \begin{bmatrix} 1 & 2 \\ -2 & 1 \end{bmatrix} \right\rangle$
\item $G_{2,b} = \left\langle \operatorname{-Id}, 3 \cdot \operatorname{Id}, \begin{bmatrix} 2 & 1 \\ -1 & 2 \end{bmatrix} \right\rangle$
\item $G_{4,a} = \left\langle 5 \cdot \operatorname{Id}, \begin{bmatrix} 1 & 2 \\ -2 & 1 \end{bmatrix} \right\rangle$
\item $G_{4,b} = \left\langle 5 \cdot \operatorname{Id}, \begin{bmatrix} -1 & -2 \\ 2 & -1 \end{bmatrix} \right\rangle$
\item $G_{4,c} = \left\langle 3 \cdot \operatorname{Id}, \begin{bmatrix} 2 & -1 \\ 1 & 2 \end{bmatrix} \right\rangle$
\item $G_{4,d} = \left\langle 3 \cdot \operatorname{Id}, \begin{bmatrix} -2 & 1 \\ -1 & -2 \end{bmatrix} \right\rangle$
\end{itemize}
and let $\Gamma = \left\{c_{1} = \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix}, c_{-1} = \begin{bmatrix} -1 & 0 \\ 0 & 1 \end{bmatrix}, c_{1}' = \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}, c_{-1}' = \begin{bmatrix} 0 & -1 \\ -1 & 0 \end{bmatrix} \right\}$. Let $E/\mathbb Q$ be an elliptic curve such that $\textit{j}_{E} = 1728$. Then $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle H, \gamma \right\rangle$ where $H$ is one of the seven groups above and $\gamma \in \Gamma$.
For $\epsilon \in \left\{\pm 1 \right\}$, denote
\begin{center} $H_{\epsilon} = \left\langle \begin{bmatrix} \epsilon & 0 \\ 0 & -\epsilon \end{bmatrix}, \begin{bmatrix} 5 & 0 \\ 0 & 5 \end{bmatrix}, \begin{bmatrix} 1 & 1 \\ -4 & 1 \end{bmatrix} \right\rangle$ and $H_{\epsilon}' = \left\langle \begin{bmatrix} \epsilon & 0 \\ 0 & -\epsilon \end{bmatrix}, \begin{bmatrix} 5 & 0 \\ 0 & 5 \end{bmatrix}, \begin{bmatrix} -1 & -1 \\ 4 & -1 \end{bmatrix} \right\rangle$. \end{center}
If the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $T_{4}$ type, then the $2$-adic Galois images fit into Table \ref{first 1728}.
\begin{table}[h!]
\renewcommand{\longrightarrowaystretch}{2}
\scalebox{0.8}{
\begin{tabular}{|c|c|c|c|c|c|}
\hline
Isogeny graph & Torsion Configuration & $\rho_{\mathcal{E}_{1},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{\mathcal{E}_{2},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{\mathcal{E}_{3},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{\mathcal{E}_{4},2^{\infty}}(G_{\mathbb Q})$ \\
\hline
\multirow{3}*{\includegraphics[scale=0.09]{T4_isogeny_graph.png}} & $([2,2],[2],[2],[2])$ & $\left\langle G_{2,a}, c_{1} \right\rangle$ & $\left\langle G_{2,a}, c_{1}' \right\rangle$ & $\mathcal{N}_{-4,0}(2^{\infty})$ & $\mathcal{N}_{-4,0}(2^{\infty})$ \\
\cline{2-6}
& $([2,2],[2],[4],[2])$ & $\left\langle G_{4,a}, c_{1} \right\rangle$ & $\left\langle G_{4,a}, c_{1}' \right\rangle$ & $H_{1}$ & $H_{-1}$ \\
\cline{2-6}
& $([2,2],[4],[4],[2])$ & $\left\langle G_{4,b}, c_{1} \right\rangle$ & $\left\langle G_{4,b}, c_{1}' \right\rangle$ & $H_{1}'$ & $H_{-1}'$ \\
\hline
\end{tabular}}
\end{table}
\label{first 1728}
If the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $L_{2}(2)$ type, then the $2$-adic Galois images fit into Table \ref{second 1728}.
\begin{table}[h!]
\renewcommand{\longrightarrowaystretch}{1.6}
\scalebox{0.7}{
\begin{tabular}{|c|c|c|c|}
\hline
Isogeny graph & Torsion configuration & $\rho_{\mathcal{E}_{1},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{\mathcal{E}_{2},2^{\infty}}(G_{\mathbb Q})$ \\
\hline
\multirow{4}*{\includegraphics[scale=0.09]{L22_graph_2.png}} & \multirow{4}*{$([2],[2])$} & $\left\langle G_{2,b}, c_{1} \right\rangle$ & $\left\langle G_{2,b}, c_{1}' \right\rangle$ \\
\cline{3-4}
& & $\left\langle G_{4,c}, c_{1} \right\rangle$ & $\left\langle G_{4,c}, c_{1}' \right\rangle$ \\
\cline{3-4}
& & $\left\langle G_{4,d}, c_{1} \right\rangle$ & $\left\langle G_{4,d}, c_{1}' \right\rangle$ \\
\cline{3-4}
& & $\mathcal{N}_{-1,0}(2^{\infty})$ & $\mathcal{N}_{-1,0}(2^{\infty})$ \\
\hline
\end{tabular}}
\end{table}
\label{second 1728}
\end{proposition}
\fi
\end{ack}
\section{Background and some lemmas}\label{sec-background}
\subsection{Elliptic curves, isogeny graphs, and isogeny-torsion graphs}
Let $E/\mathbb Q$ be an elliptic curve. Then $E$ has the structure of an abelian group. Let $N$ be a positive integer. The set of points on $E$ of order dividing $N$ with coordinates in $\overline{\mathbb Q}$ is a group, denoted $E[N]$ and is isomorphic to $\mathbb Z / N \mathbb Z \times \mathbb Z / N \mathbb Z$. An element of $E[N]$ is called an $N$-torsion point. Let $E/\mathbb Q$ and $E'/\mathbb Q$ be elliptic curves. An isogeny mapping $E$ to $E'$ is a non-constant morphism $\phi \colon E \to E'$ that maps the identity of $E$ to the identity of $E'$. An isogeny is a group homomorphism with a kernel of finite order. The degree of an isogeny agrees with the order of its kernel.
Let $M$ be an integer and let $[M] \colon E \to E$ be the map such that
\begin{center}
$\begin{cases}
[M](P) = \underbrace{P+ \ldots +P}_{\text{M}} & M \geq 1 \\
[M](P) = \underbrace{(-P) + \ldots +(-P)}_{-M} & M \leq -1 \\
[M](P) = \mathcal{O} & M = 0
\end{cases}$
\end{center}
We call the map $[M]$ the multiplication-by-$M$ map. The endomorphism ring of $E$ is the set of all isogenies mapping $E$ to $E$, denoted, $\operatorname{End}(E)$. All of the multiplication-by-$M$ maps are elements of $\operatorname{End}(E)$. If $\operatorname{End}(E)$ consists solely of the multiplication-by-$M$ maps, then $\operatorname{End}(E)$ is ring-isomorphic to $\mathbb Z$ and $E$ is said to not have complex multiplication (CM). Otherwise, $E$ has CM and $\operatorname{End}(E)$ is isomorphic as a ring to an order in a quadratic field.
\begin{example}
Let $E$ be the elliptic curve with LMFDB label \texttt{11.a1}. Then $E$ does not have CM. In other words, $\operatorname{End}(E) \cong \mathbb Z$.
\end{example}
\begin{example}
Let $E$ be the elliptic curve $y^{2} = x^{3} - x$. Consider the isogeny $[i] \colon E \to E$ that maps $\mathcal{O}$ to $\mathcal{O}$ and maps a point $(a,b)$ in $E$ to the point $(-a,ib)$. Thus, $[i]$ maps non-zero points on $E$ to non-zero points on $E$ and hence, the degree of $[i]$ is equal to $1$. As $[i]$ is not equal to the identity or inversion maps, $[i]$ is an endomorphism of $E$ that is not a multiplication-by-$M$ map. Hence, $E$ has CM and $\operatorname{End}(E) = \mathbb Z + [i] \cdot \mathbb Z \cong \mathbb Z[i]$. Note that $i$ in $\mathbb Z + [i] \cdot \mathbb Z$ designates the map $[i]$ and the $i$ in $\mathbb Z[i]$ designates a root of $x^{2}+1$.
\end{example}
Let $E/\mathbb Q$ be a homogenized elliptic curve. The group $G_{\mathbb Q}:= \operatorname{Gal}(\overline{\mathbb Q}/\mathbb Q)$ has a natural action on $E[N]$ for all positive integers $N$; for each $[a,b,c] \in E$ and each $\sigma \in G_{\mathbb Q}$, we have
$$\sigma \cdot [a,b,c] = [\sigma(a),\sigma(b),\sigma(c)].$$
From this action, we have the mod-$N$ Galois representation attached to $E$:
$$ \overline{\rho}_{E,N} \colon G_{\mathbb Q} \to \operatorname{Aut}(E[N]).$$
After identifying $E[N] \cong \mathbb Z / N \mathbb Z \times \mathbb Z / N \mathbb Z$ and fixing a set of (two) generators of $E[N]$, we may consider the mod-$N$ Galois representation attached to $E$ as
$$\overline{\rho}_{E,N} \colon G_{\mathbb Q} \to \operatorname{GL}(2,\mathbb Z / N \mathbb Z).$$
Let $\ell$ be a prime and denote $\rho_{E,\ell^{\infty}}(G_{\mathbb Q}) = \varprojlim \overline{\rho}_{E,\ell^{N}}(G_{\mathbb Q})$. In particular, the group $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is the main focus of this paper. Let $u$ be an element of $\left(\mathbb Z / N \mathbb Z \right)^{\times}$. By the properties of the Weil pairing, there exists an element of $\overline{\rho}_{E,N}(G_{\mathbb Q})$ whose determinant is equal to $u$. Moreover, $\overline{\rho}_{E,N}(G_{\mathbb Q})$ has an element that behaves like complex conjugation.
\begin{definition}
Let $E/\mathbb Q$ be a (homogenized) elliptic curve. A point $P$ on $E$ is said to be defined over $\mathbb Q$ if $P = [a:b:c]$ for some $a,b,c \in \mathbb Q$.
\end{definition}
The set of all points on $E$ defined over $\mathbb Q$ is denoted $E(\mathbb Q)$. By the Mordell--Weil theorem, $E(\mathbb Q)$ has the structure of a finitely-generated abelian group. Let $E(\mathbb Q)_{\text{tors}}$ denote the set of points on $E$ defined over $\mathbb Q$ of finite order.
\begin{thm}[Mazur \cite{mazur1}]\label{thm-mazur}
Let $E/\mathbb{Q}$ be an elliptic curve. Then
\[
E(\mathbb{Q})_\mathrm{tors}\simeq
\begin{cases}
\mathbb Z/M\mathbb Z &\text{with}\ 1\leq M\leq 10\ \text{or}\ M=12,\ \text{or}\\
\mathbb Z/2\mathbb Z \times \mathbb Z/2N\mathbb Z &\text{with}\ 1\leq N \leq 4.
\end{cases}
\]
\end{thm}
Moreover, each of the fifteen torsion subgroups occur for infinitely many \textit{j}-invariants. We now move on to the possible isogenies with finite, cyclic kernel.
\begin{defn}
Let $E/\mathbb Q$ be an elliptic curve. A subgroup $H$ of $E$ of finite order is said to be $\mathbb Q$-rational if $\sigma(H) = H$ for all $\sigma \in G_{\mathbb Q}$.
\end{defn}
\begin{remark}
Note that for an elliptic curve $E / \mathbb Q$, a group generated by a point $P$ on $E$ defined over $\mathbb Q$ of finite order is certainly a $\mathbb Q$-rational group but in general, the elements of a $\mathbb Q$-rational subgroup of $E$ need not be \textit{fixed} by $G_{\mathbb Q}$. For example, $E[3]$ is a $\mathbb Q$-rational subgroup of $E$ of order $9$ and $G_{\mathbb Q}$ fixes one or three of the nine elements of $E[3]$ by Theorem \ref{thm-mazur}.
\end{remark}
\begin{lemma}[III.4.12, \cite{Silverman}]\label{Q-rational}
Let $E/\mathbb Q$ be an elliptic curve. Then for each finite, cyclic, $\mathbb Q$-rational subgroup $H$ of $E$, there is a unique elliptic curve defined over $\mathbb Q$ up to isomorphism denoted $E / H$, and an isogeny $\phi_{H} \colon E \to E / H$ with kernel $H$.
\end{lemma}
\begin{remark}
Note that it is only the elliptic curve $E/H$ that is unique (up to isomorphism) but the isogeny $\phi_{H}$ is not necessarily unique. For any isogeny $\phi$, the isogeny $-\phi$ has the same domain, codomain, and kernel as $\phi$. Moreover, for any positive integer $N$, $\phi$ and $[N] \circ \phi$ have the same domain and the same codomain. This is why the bijection in Lemma \ref{Q-rational} is with \textit{cyclic}, $\mathbb Q$-rational subgroups instead of with all $\mathbb Q$-rational subgroups.
\end{remark}
The $\mathbb Q$-rational points on the modular curves $\operatorname{X}_{0}(N)$ have been described completely in the literature, for all $N\geq 1$. One of the most important milestones in the classification was \cite{mazur1}, where Mazur dealt with the case when $N$ is prime. The complete classification of $\mathbb{Q}$-rational points on $\operatorname{X}_{0}(N)$, for any $N$, was completed due to work by Fricke, Kenku, Klein, Kubert, Ligozat, Mazur and Ogg, among others (see the summary tables in \cite{lozano0}).
\begin{thm}\label{thm-ratnoncusps} Let $N\geq 2$ be a number such that $\operatorname{X}_{0}(N)$ has a non-cuspidal $\mathbb Q$-rational point. Then:
\begin{enumerate}
\item $N\leq 10$, or $N= 12,13, 16,18$ or $25$. In this case $\operatorname{X}_{0}(N)$ is a curve of genus $0$ and its $\mathbb{Q}$-rational points form an infinite $1$-parameter family, or
\item $N=11,14,15,17,19,21$, or $27$. In this case $\operatorname{X}_{0}(N)$ is a curve of genus $1$, i.e.,~$\operatorname{X}_{0}(N)$ is an elliptic curve over $\mathbb{Q}$, but in all cases the Mordell-Weil group $\operatorname{X}_{0}(N)(\mathbb{Q})$ is finite, or
\item $N=37,43,67$ or $163$. In this case $\operatorname{X}_{0}(N)$ is a curve of genus $\geq 2$ and (by Faltings' theorem) there are only finitely many $\mathbb{Q}$-rational points, which are known explicitly.
\end{enumerate}
\end{thm}
\begin{defn}
Let $E/\mathbb{Q}$ be an elliptic curve. We define $C(E)$ as the number of finite, cyclic, $\mathbb Q$-rational subgroups of $E$ (including the trivial subgroup), and we define $C_p(E)$ similarly to $C(E)$ but only counting cyclic, $\mathbb Q$-rational subgroups of order a power of $p$ (like in the definition of $C(E)$, this includes the trivial subgroup), for each prime $p$.
\end{defn}
Notice that it follows from the definition that $C(E)=\prod_p C_p(E)$.
\begin{thm}[Kenku, \cite{kenku}]\label{thm-kenku} There are at most eight $\mathbb{Q}$-isomorphism classes of elliptic curves in each $\mathbb{Q}$-isogeny class. More concretely, let $E / \mathbb{Q}$ be an elliptic curve, then $C(E)=\prod_p C_p(E)\leq 8$. Moreover, each factor $C_p(E)$ is bounded as follows:
\begin{center}
\begin{tabular}{c|ccccccccccccc}
$p$ & $2$ & $3$ & $5$ & $7$ & $11$ & $13$ & $17$ & $19$ & $37$ & $43$ & $67$ & $163$ & \text{else}\\
\hline
$C_p\leq $ & $8$ & $4$ & $3$ & $2$ & $2$ & $2$ & $2$ & $2$ & $2$ & $2$ & $2$ & $2$ & $1$.
\end{tabular}
\end{center}
Moreover:
\begin{enumerate}
\item If $C_{p}(E) = 2$ for a prime $p$ greater than $7$, then $C_{q}(E) = 1$ for all other primes $q$.
\item Suppose $C_{7}(E) = 2$, then $C(E) \leq 4$. Moreover, we have $C_{3}(E) = 2$, or $C_{2}(E) = 2$, or $C(E) = 2$.
\item $C_{5}(E) \leq 3$ and if $C_{5}(E) = 3$, then $C(E) = 3$.
\item If $C_{5}(E) = 2$, then $C(E) \leq 4$. Moreover, either $C_{3}(E) = 2$, or $C_{2}(E) = 2$, or $C(E) = 2$.
\item $C_{3}(E) \leq 4$ and if $C_{3}(E) = 4$, then $C(E) = 4$.
\item If $C_{3}(E) = 3,$ then $C(E) \leq 6$. Moreover, $C_{2}(E) = 2$ or $C(E) = 3$.
\item If $C_{3}(E) = 2$, then $C_{2}(E) \leq 4$.
\end{enumerate}
\end{thm}
Instead of viewing each elliptic curve over $\mathbb Q$ in a $\mathbb Q$-isogeny class individually, we can view them all together. The best way to visualize the $\mathbb Q$-isogeny class is to use its associated isogeny graph.
\begin{thm}\label{thm-mainisogenygraphs}
There are $26$ isomorphism types of isogeny graphs that are associated to $\mathbb Q$-isogeny classes of elliptic curves defined over $\mathbb Q$. More precisely, there are $16$ types of (linear) $L_{k}$ graphs, $3$ types of (nonlinear two-primary torsion) $T_{k}$ graphs, $6$ types of (rectangular) $R_{k}$ graphs, and $1$ type of (special) $S$ graph. Moreover, there are $11$ isomorphism types of isogeny graphs that are associated to $\mathbb Q$-isogeny classes of elliptic curves over $\mathbb Q$ with complex multiplication, namely the types $L_{2}(p)$ for $p=2,3,11,19,43,67,163$, $L_{4}$, $T_{4}$, $R_{4}(6)$, and $R_{4}(14)$. Finally, the isogeny graphs of type $L_{4}$, $R_{4}(14)$, and $L_{2}(p)$ for $p \in \{19, 43, 67, 167\}$ occur exclusively for elliptic curves with CM.
\end{thm}
The main theorem in \cite{gcal-r} was the classification of isogeny-torsion graphs that occur over $\mathbb Q$.
\begin{thm}\label{thm-main2} There are $52$ isomorphism types of isogeny-torsion graphs that are associated to $\mathbb Q$-isogeny classes of elliptic curves defined over $\mathbb Q$. In particular, there are $23$ types of $L_{k}$ graphs, $13$ types of $T_{k}$ graphs, $12$ types of $R_{k}$ graphs, and $4$ types of $S$ graphs. Moreover, there are $16$ isomorphism types of isogeny-torsion graphs that are associated to $\mathbb Q$-isogeny classes of elliptic curves over $\mathbb Q$ with complex multiplication (and examples are given in Table \ref{tab-CMgraphs}).
\end{thm}
The $16$ isomorphism types of isogeny-torsion graphs that occur over $\mathbb Q$ with CM are the two isogeny-torsion graphs of type $L_{4}$
\begin{center}
\begin{tikzcd}
\mathbb{Z} / M \mathbb{Z} \longrightarrow[r, no head, "3"] & \mathbb{Z} / M \mathbb{Z} \longrightarrow[r, no head, "3"] & \mathbb{Z} / M \mathbb{Z} \longrightarrow[r, no head, "3"] & \mathcal{O}
\end{tikzcd}
\end{center}
where $m = 1$ or $3$, the eight isogeny-torsion graphs of type $L_{2}$
\begin{center}
\begin{tikzcd}
\mathbb{Z} / 2 \mathbb{Z} \longrightarrow[r, no head, "2"] & \mathbb{Z} / 2 \mathbb{Z}
\end{tikzcd}, \begin{tikzcd}
\mathbb{Z} / M \mathbb{Z} \longrightarrow[r, no head, "3"] & \mathcal{O}
\end{tikzcd}, \begin{tikzcd}
\mathcal{O} \longrightarrow[r, no head, "p"] & \mathcal{O}
\end{tikzcd}
\end{center}
where $M = 3$ or $1$, and $p = 11$, $19$, $43$, $67$, or $163$, the three isogeny-torsion graphs of $R_{4}$ type
\begin{center}
\begin{tikzcd}
\mathbb{Z} / 2 \mathbb{Z} \longrightarrow[rr, no head, "2"] \longrightarrow[d, no head, "7"'] & & \mathbb{Z} / 2 \mathbb{Z} \longrightarrow[d, no head, "7"] \\
\mathbb{Z} / 2 \mathbb{Z} \longrightarrow[rr, no head, "2"'] & & \mathbb{Z} / 2 \mathbb{Z}
\end{tikzcd}, \begin{tikzcd}
\mathbb{Z} / M \mathbb{Z} \longrightarrow[rr, no head, "2"] \longrightarrow[d, no head, "3"'] & & \mathbb{Z} / M \mathbb{Z} \longrightarrow[d, no head, "3"] \\
\mathbb{Z} / 2 \mathbb{Z} \longrightarrow[rr, no head, "2"'] & & \mathbb{Z} / 2 \mathbb{Z}
\end{tikzcd}
\end{center}
where $M = 2$ or $6$, and the three isogeny-torsion graphs of $T_{4}$ type.
\begin{center}
\begin{tikzcd}
& \mathbb{Z} / 2 \mathbb{Z} & \\
& \mathbb{Z} / 2 \mathbb{Z} \times \mathbb{Z} / 2 \mathbb{Z} \longrightarrow[u, no head, "2"] \longrightarrow[ld, no head, "2"'] \longrightarrow[rd, no head, "2"] & \\
\mathbb{Z} / 2 \mathbb{Z} & & \mathbb{Z} / 2 \mathbb{Z}
\end{tikzcd} \begin{tikzcd}
& \mathbb{Z} / 2 \mathbb{Z} & \\
& \mathbb{Z} / 2 \mathbb{Z} \times \mathbb{Z} / 2 \mathbb{Z} \longrightarrow[u, no head, "2"] \longrightarrow[ld, no head, "2"'] \longrightarrow[rd, no head, "2"] & \\
\mathbb{Z} / 4 \mathbb{Z} & & \mathbb{Z} / 2 \mathbb{Z}
\end{tikzcd} \begin{tikzcd}
& \mathbb{Z} / 2 \mathbb{Z} & \\
& \mathbb{Z} / 2 \mathbb{Z} \times \mathbb{Z} / 2 \mathbb{Z} \longrightarrow[u, no head, "2"] \longrightarrow[ld, no head, "2"'] \longrightarrow[rd, no head, "2"] & \\
\mathbb{Z} / 4 \mathbb{Z} & & \mathbb{Z} / 4 \mathbb{Z}
\end{tikzcd}
\end{center}
We continue with some lemmas and more background that will be used to classify the $2$-adic Galois images attached to isogeny-torsion graphs with CM.
\subsection{Quadratic Twists}
\begin{lemma}\label{group quadratic twists}
Let $N$ be a positive integer such that $\operatorname{GL}(2,\mathbb Z / N \mathbb Z)$ has a subgroup $H$ that does not contain $\operatorname{-Id}$. Let $H' = \left\langle \operatorname{-Id}, H \right\rangle$. Then $H' \cong \left\langle \operatorname{-Id} \right\rangle \times H$.
\end{lemma}
\begin{proof}
Note that $H$ is a subgroup of $H'$ of index $2$. Hence, the order of $H'$ and the order of $\left\langle \operatorname{-Id}, H \right\rangle$ are the same. Define
$$\psi \colon \left\langle \operatorname{-Id} \right\rangle \times H \to H'$$
by $\psi(x,h) = xh$. Then $\psi$ is a group homomorphism as $\operatorname{-Id}$ is in the center of $\operatorname{GL}(2,\mathbb Z/N\mathbb Z)$. We are done if we prove that $\psi$ is injective. Let $(x,h) \in \left\langle \operatorname{-Id} \right\rangle \times H$ such that $\psi(x,h) = xh = \operatorname{Id}$. Then $h = x^{-1} = x$ as the order of $x$ is equal to $1$ or $2$. As $\operatorname{-Id} \notin H$, $h$ cannot be $\operatorname{-Id}$. Hence, $h = x = \operatorname{Id}$ and so, $\psi$ is injective. \end{proof}
\begin{definition}
Let $G$ and $H$ be subgroups of $\operatorname{GL}(2, \mathbb Z_{2})$.Then we will say that $G$ and $H$ are quadratic twists if $G$ is the same as $H$, up to multiplication of some elements of $H$ by $\operatorname{-Id}$. Note that if $\left\langle G, \operatorname{-Id} \right\rangle = \left\langle H, \operatorname{-Id} \right\rangle$, then $H$ and $G$ are quadratic twists.
\end{definition}
\begin{lemma}\label{index of quadratic twists}
Let $N$ be a positive integer, let $H$ be a subgroup of $\operatorname{GL}(2,\mathbb Z/N\mathbb Z)$, and let $H' = \left\langle H, \operatorname{-Id} \right\rangle$. Let $\chi$ be a character of $H$ of degree two. Then $\chi(H) = H'$ or $\chi(H)$ is a subgroup of $H'$ of index $2$.
\end{lemma}
\begin{proof}
The character $\chi$ multiplies some elements of $H$ by $\operatorname{-Id}$. If $\operatorname{-Id} \in \chi(H)$, then we can multiply all of the elements of $H$ that $\chi$ multiplied by $\operatorname{-Id}$ by $\operatorname{-Id}$ again, and recoup all of the elements of $H$. Thus, $\chi(H)$ is a subgroup of $H'$ that contains both $\operatorname{-Id}$ and $H$ and so, $\chi(H) = H'$.
On the other hand, let us say that $\operatorname{-Id} \notin \chi(H)$. Let $\chi(H)' = \left\langle \chi(H), \operatorname{-Id} \right\rangle$. By the same argument from before, we can multiply all of the elements of $H$ that $\chi$ multiplied by $\operatorname{-Id}$ by $\operatorname{-Id}$ again, and recoup all of the elements of $H$ in $\chi(H)'$. In other words, $\chi(H)' = \left\langle \operatorname{-Id}, \chi(H) \right\rangle = \left\langle \operatorname{-Id}, H \right\rangle = H'$. By Lemma \ref{group quadratic twists}, $H' = \left\langle \chi(H), \operatorname{-Id} \right\rangle \cong \left\langle \operatorname{-Id} \right\rangle \times \chi(H)$ and $\chi(H)$ is a subgroup of $H'$ of index $2$.
\end{proof}
Let $E : y^{2} = x^{3} + Ax + B$ be an elliptic curve and let $d$ be a non-zero integer. Then the quadratic twist of $E$ by $d$ is the elliptic curve $E^{(d)} : y^{2} = x^{3} + d^{2}Ax + d^{3}B$. Equivalently, $E^{(d)}$ is isomorphic to the elliptic curve defined by $dy^{2} = x^{3} + Ax + B$. Moreover, $E$ is isomorphic to $E^{(d)}$ over $\mathbb Q(\sqrt{d})$ by the map $\phi \colon E \to E^{(d)}$
defined by fixing $\mathcal{O}$ and mapping any non-zero point $(a,b)$ on $E$ to $\left(a,\frac{b}{\sqrt{d}}\right)$.
\begin{corollary}\label{subgroups of index 2 contain -Id}
Let $N$ be a positive integer such that $\operatorname{GL}(2, \mathbb Z/N \mathbb Z)$ contains a subgroup $H$ such that all subgroups of $H$ of index $2$ contain $\operatorname{-Id}$. Suppose that there exists an elliptic curve $E/\mathbb Q$ such that $\overline{\rho}_{E,N}(G_{\mathbb Q})$ is conjugate to $H$. Let $E^{\chi}$ be a quadratic twist of $E$. Then $\overline{\rho}_{E^{\chi},N}(G_{\mathbb{Q}})$ is conjugate to $H$.
\end{corollary}
\begin{proof}
The group $H$ contains $\operatorname{-Id}$. By Lemma \ref{index of quadratic twists}, $\overline{\rho}_{E^{\chi},N}(G_{\mathbb{Q}})$ is conjugate to $H$ or is conjugate to a subgroup of $H$ of index $2$. Moreover, $\overline{\rho}_{E^{\chi},N}(G_{\mathbb{Q}})$ is the same as $\overline{\rho}_{E,N}(G_{\mathbb{Q}}) = H$, up to multiplication of some elements of $H$ by $\operatorname{-Id}$. As all subgroups of $H$ of index $2$ contain $\operatorname{-Id}$, we can just multiply the elements of $\overline{\rho}_{E^{\chi},N}(G_{\mathbb{Q}})$ that $\chi$ multiplied by $\operatorname{-Id}$ again by $\operatorname{-Id}$ to recoup all elements of $H$. Hence, $\overline{\rho}_{E^{\chi},N}(G_{\mathbb Q})$ is conjugate to $H$.
\end{proof}
\begin{remark}
Let $N$ be a positive integer such that $\operatorname{GL}(2, \mathbb Z/N\mathbb Z)$ contains a subgroup $H$ that does not contain $\operatorname{-Id}$. Suppose there is an elliptic curve $E/\mathbb Q$ such that $\overline{\rho}_{E,N}(G_{\mathbb Q})$ is conjugate to $H' = \left\langle H, \operatorname{-Id} \right\rangle$. Then there is a quadratic twist $\chi$ such that $\overline{\rho}_{E^{\chi},N}(G_{\mathbb Q})$ is conjugate to $H$ (see Remark 1.1.3 and Section 10 in \cite{Rouse2021elladicIO}). Conversely, if $\overline{\rho}_{E,N}(G_{\mathbb Q})$ is conjugate to $H$, then there is a non-zero integer $d$ and a quadratic twist $E^{(d)}$ of $E$, such that $\overline{\rho}_{E^{(d)},N}(G_{\mathbb Q})$ is conjugate to $H'$. Note that $\mathbb Q(E[N])$ does not contain $\mathbb Q(\sqrt{d})$.
\end{remark}
\subsection{Galois representations}
\begin{lemma}\label{ell-adic Galois images}
Let $E$ and $E'$ be elliptic curves defined over $\mathbb Q$ that are $\mathbb Q$-isogenous by an isogeny $\phi$ whose kernel is finite, cyclic, and $\mathbb Q$-rational. Let $\ell$ be a prime and let $r$ be the non-negative integer such that $\ell^{r}$ is the greatest power of $\ell$ that divides the order of $\operatorname{Ker}(\phi)$. Let $m$ be a non-negative integer. Then there is a basis $\{P_{\ell^{m+r}}, Q_{\ell^{m+r}}\}$ of $E[\ell^{m+r}]$ such that
\begin{enumerate}
\item if $\sigma$ is a Galois automorphism of $\mathbb Q$, then, there are integers $A$, $B$, $C$, and $D$, where $\overline{\rho}_{E,\ell^{m+r}}(\sigma) = \begin{bmatrix} A & C \\ B & D \end{bmatrix}$ and $\ell^{r}$ divides $C$,
\item $\{\phi([\ell^{r}]P_{\ell^{m+r}}), \phi(Q_{\ell^{m+r}})\}$ is a basis of $E'[\ell^{m}]$,
\item $\overline{\rho}_{E',\ell^{m}}(\sigma) = \begin{bmatrix} A & \frac{C}{\ell^{r}} \\ \ell^{r} \cdot B & D \end{bmatrix}$.
\end{enumerate}
\end{lemma}
\begin{proof}
We break up the proof into steps
\begin{enumerate}
\item Let $Q_{\ell^{r}}$ be an element of $\operatorname{Ker}(\phi)$ of order $\ell^{r}$ and let $Q_{\ell^{m+r}}$ be a point on $E$ such that $[\ell^{m}]Q_{\ell^{m+r}} = Q_{\ell^{r}}$. Let $P_{\ell^{m+r}}$ be a point on $E$ such that $E[\ell^{m+r}] = \left\langle P_{\ell^{m+r}}, Q_{\ell^{m+r}} \right\rangle$. Let $\sigma$ be a Galois automorphism of $\mathbb Q$. Then there are integers $A$ and $B$ such that $\sigma(P_{\ell^{m+r}}) = [A]P_{\ell^{m+r}} + [B]Q_{\ell^{m+r}}$ and there are integers $C$ and $D$ such that $\sigma(Q_{\ell^{m+r}}) = [C]P_{\ell^{m+r}} + [D]Q_{\ell^{m+r}}$. Then
$$\sigma(Q_{\ell^{r}}) = \sigma([\ell^{m}]Q_{\ell^{m+r}}) = [\ell^{m}]\sigma(Q_{\ell^{m+r}}) = [\ell^{m}]([C]P_{\ell^{m+r}}+[D]Q_{\ell^{m+r}}) = [\ell^{m}C]P_{\ell^{m+r}} + [D]Q_{\ell^{r}}.$$
As $Q_{\ell^{r}}$ generates a $\mathbb Q$-rational group, $\sigma(Q_{\ell^{r}}) = [\ell^{m}C]P_{\ell^{m+r}} + [D]Q_{\ell^{r}} \in \left\langle Q_{\ell^{r}} \right\rangle \subseteq \left\langle Q_{\ell^{m+r}} \right\rangle$. Thus, $[\ell^{m} C]P_{\ell^{m+r}} \in \left\langle Q_{\ell^{m+r}} \right\rangle$. As $\left\langle P_{\ell^{m+r}} \right\rangle \bigcap \left\langle Q_{\ell^{m+r}} \right\rangle = \{\mathcal{O}\}$, we have that $[\ell^{m}C]P_{\ell^{m+r}} = \mathcal{O}$. Thus, $\ell^{m+r}$ divides $\ell^{m}C$ and hence, $\ell^{r}$ divides $C$.
\item We claim that $E'[\ell^{m}] = \left\langle \phi([\ell^{r}]P_{\ell^{m+r}}), \phi(Q_{\ell^{m+r}}) \right\rangle$. We claim that the order of $\phi([\ell^{r}]P_{\ell^{m+r}})$ and the order of $\phi(Q_{\ell^{m+r}})$ are both equal to $\ell^{m}$. Note that $[\ell^{m}]\phi([\ell^{r}]P_{\ell^{m+r}}) = \phi([\ell^{m+r}]P_{\ell^{m+r}}) = \mathcal{O}$. Next, $[\ell^{m}]\phi(Q_{\ell^{m+r}}) = \phi([\ell^{m}]Q_{\ell^{m+r}}) = \phi(Q_{\ell^{r}}) = \mathcal{O}$. If $m = 0$, then we can move on. If $m$ is positive, then $m-1$ is a non-negative integer and
$$[\ell^{m-1}] \cdot \phi([\ell^{r}]P_{\ell^{m+r}}) = \phi([\ell^{m+r-1}]P_{\ell^{m+r}}).$$
If we claim that $\phi([\ell^{m+r-1}]P_{\ell^{m+r}}) = \mathcal{O}$, then $[\ell^{m+r-1}]P_{\ell^{m+r}} \in \left\langle Q_{\ell^{r}} \right\rangle \subseteq \left\langle Q_{\ell^{m+r}} \right\rangle$. The point $[\ell^{m+r-1}]P_{\ell^{m+r}}$ generates the subgroup of $\left\langle P_{\ell^{m+r}} \right\rangle$ of order $\ell$ and so cannot be contained in $\left\langle Q_{\ell^{m+r}} \right\rangle$ and so we arrive at a contradiction. Next,
$$[\ell^{m-1}] \cdot \phi(Q_{\ell^{m+r}}) = \phi([\ell^{m-1}]Q_{\ell^{m+r}}).$$
If we claim that $\phi([\ell^{m-1}]Q_{\ell^{m+r}}) = \mathcal{O}$, then $[\ell^{m-1}]Q_{\ell^{m+r}} \in \left\langle Q_{\ell^{r}} \right\rangle$ but this is
a contradiction as the order of $[\ell^{m-1}]Q_{\ell^{m+r}}$ is equal to $r+1$ and
the order of $Q_{\ell^{r}}$ is equal to $\ell^{r}$.
Now we will prove that $\left\langle \phi([\ell^{r}]P_{\ell^{m+r}}) \right\rangle \bigcap \left\langle \phi(Q_{\ell^{m+r}}) \right\rangle = \left\{\mathcal{O} \right\}$. Now let us say that there are integers $\alpha$ and $\beta$ such that $[\alpha]\phi([\ell^{r}]P_{\ell^{m+r}}) = [\beta]\phi(Q_{\ell^{m+r}})$. Then $\phi([\alpha\ell^{r}]P_{\ell^{m+r}}) = \phi([\beta]Q_{\ell^{m+r}})$. Hence, $[\alpha \ell^{r}]P_{\ell^{m+r}} - [\beta]Q_{\ell^{m+r}} \in \left\langle Q_{\ell^{r}} \right\rangle \subseteq \left\langle Q_{\ell^{m+r}} \right\rangle$ and hence, $[\alpha \ell^{r}]P_{\ell^{m+r}} \in \left\langle Q_{\ell^{m+r}} \right\rangle$. Thus, $[\alpha \ell^{r}]P_{\ell^{m+r}} = \mathcal{O}$ and hence, $[\alpha]\phi([\ell^{m}]P_{\ell^{m+r}}) = \phi([\alpha \ell^{m}]P_{\ell^{m+r}}) = \mathcal{O}$. This means that $\left\langle \phi([\ell^{r}]P_{\ell^{m+r}}) \right\rangle \bigcap \left\langle \phi(Q_{\ell^{m+r}}) \right\rangle = \{\mathcal{O}\}$.
\item Next, we see that
$$\sigma(\phi([\ell^{r}]P_{\ell^{m+r}})) = \phi([\ell^{r}]\sigma(P_{\ell^{m+r}})) = \phi([\ell^{r}]([A]P_{\ell^{m+r}}+[B]Q_{\ell^{m+r}})) = [A]\phi([\ell^{r}]P_{\ell^{m+r}})+[\ell^{r} \cdot B]\phi(Q_{\ell^{m+r}}).$$
Finally, we see that
$$\sigma(\phi(Q_{\ell^{m+r}})) = \phi(\sigma(Q_{\ell^{m+r}})) = \phi([C]P_{\ell^{m+r}}+[D]Q_{\ell^{m+r}})$$
$$= \phi\left(\left[\frac{C}{\ell^{r}}\right][\ell^{r}]P_{\ell^{m+r}}+[D]Q_{\ell^{m+r}}\right) = \left[\frac{C}{\ell^{r}}\right]\phi([\ell^{r}]P_{\ell^{m+r}})+[D]\phi(Q_{\ell^{m+r}}).$$
\end{enumerate}
\end{proof}
\begin{remark}
Let $E$ and $E'$ be elliptic curves defined over $\mathbb Q$. Let $\phi \colon E \to E'$ be a $\mathbb Q$-isogeny with a finite, cyclic, $\mathbb Q$-rational kernel. Let $\ell^{r}$ be a greatest power of $\ell$ that divides the order of $\operatorname{Ker}(\phi)$. Let $m$ be a non-negative integer. Given $\overline{\rho}_{E,\ell^{m+r}}(G_{\mathbb Q})$, we may use Lemma \ref{ell-adic Galois images} to compute $\overline{\rho}_{E',\ell^{m}}(G_{\mathbb Q})$. Therefore, $\rho_{E',\ell^{\infty}}(G_{\mathbb Q})$ is determined by $\rho_{E,\ell^{\infty}}(G_{\mathbb Q})$ (and vice versa).
\end{remark}
\iffalse
\begin{remark}
Let us say that $\rho_{E,\ell^{\infty}}(G_{\mathbb Q})$ is a group of level $\ell^{n}$ for some non-negative integer $n$. Then for all non-negative integers $k$, $\overline{\rho}_{E,\ell^{n+k}}(G_{\mathbb Q})$ is simply the full lift of $\overline{\rho}_{E,\ell^{n}}(G_{\mathbb Q})$ to level $k$. In other words, if we are given that the level of $\rho_{E,\ell^{\infty}}(G_{\mathbb Q})$ is equal to $n$ and if we are given $\overline{\rho}_{E,\ell^{n}}(G_{\mathbb Q})$, then we can compute $\overline{\rho}_{E',\ell^{m}}(G_{\mathbb Q})$ for all elliptic curves $E'$ that are $\mathbb Q$-rational to $E$ and all non-negative integers $m$, using Lemma \ref{ell-adic Galois images}.
\end{remark}
\fi
\begin{corollary}\label{coprime isogeny-degree}
Let $E$ and $E'$ be elliptic curves defined over $\mathbb Q$ and let $\ell$ be a prime number. Suppose that $E$ is $\mathbb Q$-isogenous to $E'$ by an isogeny that is defined over $\mathbb Q$ with a finite, cyclic kernel of degree not divisible by $\ell$. Then $\rho_{E,\ell^{\infty}}(G_{\mathbb Q})$ is conjugate to $\rho_{E',\ell^{\infty}}(G_{\mathbb Q})$.
\end{corollary}
\begin{proof}
Use Lemma \ref{ell-adic Galois images} with $r = 0$.
\end{proof}
\begin{corollary}\label{contains scalars}
Let $\ell$ be a prime and let $E$ and $E'$ be elliptic curves defined over $\mathbb Q$. Suppose that $E$ is $\mathbb Q$-isogenous to $E'$ by an isogeny $\phi$ with a finite, cyclic, $\mathbb Q$-rational kernel. Let $\alpha$ be an integer that is not divisible by $\ell$. Then $\rho_{E,\ell^{\infty}}(G_{\mathbb Q})$ contains $s_{\alpha} = \begin{bmatrix} \alpha & 0 \\ 0 & \alpha \end{bmatrix}$ if and only if $\rho_{E',\ell^{\infty}}(G_{\mathbb Q})$ contains $s_{\alpha}$.
\end{corollary}
\begin{proof}
Suppose that $\rho_{E,\ell^{\infty}}(G_{\mathbb Q})$ contains $s_{\alpha}$. Let $r$ be the non-negative integer such that $\ell^{r}$ is the greatest power of $\ell$ that divides $\operatorname{Ker}(\phi)$ and let $m$ be a non-negative integer. Then $\overline{\rho}_{E,\ell^{m+r}}(G_{\mathbb Q})$ contains $s_{\alpha}$. As $s_{\alpha}$ is in the center of $\operatorname{GL}(2, \mathbb Z / \ell^{m+r} \mathbb Z)$, it does not matter what basis we use for $E[\ell^{m+r}]$. By Lemma \ref{ell-adic Galois images}, $s_{\alpha}$ is an element of $\overline{\rho}_{E',\ell^{m}}(G_{\mathbb Q})$. The converse is proved simply by switching the roles of $E$ and $E'$ and using the dual of $\phi$.
\end{proof}
\begin{corollary}\label{contains -Id}
Let $\ell$ be a prime and let $E$ and $E'$ be elliptic curves defined over $\mathbb Q$. Suppose that $E$ is $\mathbb Q$-isogenous to $E'$ by an isogeny $\phi$ with a finite, cyclic, $\mathbb Q$-rational kernel. Then $\rho_{E,\ell^{\infty}}(G_{\mathbb Q})$ contains $\operatorname{-Id}$ if and only if $\rho_{E',\ell^{\infty}}(G_{\mathbb Q})$ contains $\operatorname{-Id}$.
\end{corollary}
\begin{proof}
Use Corollary \ref{contains scalars} with $\alpha = -1$.
\end{proof}
\iffalse
\begin{lemma}\label{2-adic Galois image corollary 1}
Let $N$ be a positive integer. Let $E/\mathbb Q$ be an elliptic curve with a point $Q_{2}$ of order $2$ defined over $\mathbb Q$. Then there is a basis $\{P_{2^{N+1}}, Q_{2^{N+1}}\}$ of $E[2^{N+1}]$ such that $\overline{\rho}_{E,2^{N+1}}(G_{\mathbb Q}) = \left\langle \begin{bmatrix} A_{1} & C_{1} \\ B_{1} & D_{1} \end{bmatrix}, \ldots, \begin{bmatrix}
A_{s} & C_{s} \\ B_{s} & D_{s}
\end{bmatrix} \right\rangle$ where $2$ divides $C_{1}$, \ldots, $C_{s}$. Moreover, $\overline{\rho}_{E / \left\langle Q_{2} \right\rangle,2^{N}}(G_{\mathbb Q})$ is conjugate to $\left\langle \begin{bmatrix} A_{1} & \frac{C_{1}}{2} \\ 2 \cdot B_{1} & D_{1} \end{bmatrix}, \ldots, \begin{bmatrix}
A_{s} & \frac{C_{s}}{2} \\ 2 \cdot B_{s} & D_{s}
\end{bmatrix} \right\rangle$.
\end{lemma}
\begin{proof}
This follows from the first and third parts of Lemma \ref{ell-adic Galois images} with $\ell = 2$ and $r = 1$.
\end{proof}
\begin{lemma}\label{2-adic Galois image corollary 2}
Let $N$ be a positive integer. Let $E/\mathbb Q$ be an elliptic curve with full two-torsion defined over $\mathbb Q$. Then there is a basis $\{P_{2^{N+1}}, Q_{2^{N+1}}\}$ of $E[2^{N+1}]$ such that $\overline{\rho}_{E,2^{N+1}} = \left\langle \begin{bmatrix}
A_{1} & C_{1} \\ B_{1} & D_{1}
\end{bmatrix}, \ldots, \begin{bmatrix} A_{s} & C_{s} \\ B_{s} & D_{s} \end{bmatrix} \right\rangle$ where $A_{1}$, \ldots, $A_{s}$, $D_{1}$, \ldots, $D_{s}$ are odd and $B_{1}$, \ldots, $B_{s}$, $C_{1}$, \ldots, $C_{s}$ are even. Moreover,
\begin{enumerate}
\item $\overline{\rho}_{E / \left\langle Q_{2} \right\rangle,2^{N}}(G_{\mathbb Q})$ is conjugate to $\left\langle \begin{bmatrix} A_{1} & \frac{C_{1}}{2} \\ 2 \cdot B_{1} & D_{1} \end{bmatrix}, \ldots, \begin{bmatrix}
A_{s} & \frac{C_{s}}{2} \\ 2 \cdot B_{s} & D_{s}
\end{bmatrix} \right\rangle$.
\item $\overline{\rho}_{E / \left\langle P_{2} \right\rangle,2^{N}}(G_{\mathbb Q})$ is conjugate to $\left\langle \begin{bmatrix} D_{1} & \frac{B_{1}}{2} \\ 2 \cdot C_{1} & A_{1} \end{bmatrix}, \ldots, \begin{bmatrix}
D_{s} & \frac{B_{s}}{2} \\ 2 \cdot C_{s} & A_{s}
\end{bmatrix} \right\rangle$.
\item $\overline{\rho}_{E / \left\langle P_{2} + Q_{2} \right\rangle,2^{N}}(G_{\mathbb Q})$ is conjugate to $\left\langle \begin{bmatrix} A_{1}-B_{1} & \frac{A_{1}+C_{1}-B_{1}-D_{1}}{2} \\ 2 \cdot B_{1} & B_{1}+D_{1} \end{bmatrix}, \ldots, \begin{bmatrix} A_{s}-B_{s} & \frac{A_{s}+C_{s}-B_{s}-D_{s}}{2} \\ 2 \cdot B_{s} & B_{s}+D_{s} \end{bmatrix} \right\rangle$.
\end{enumerate}
\end{lemma}
\begin{proof}
Let $P_{2^{N+1}}$ be a point on $E$ such that $[2^{N}]P_{2^{N+1}} = P_{2}$ and let $Q_{2^{N+1}}$ be a point on $E$ such that $[2^{N}]Q_{2^{N+1}} = Q_{2}$. Let $\sigma$ be a Galois automorphism. Then there are integers $A$ and $B$ such that $\sigma(P_{2^{N+1}}) = [A]P_{2^{N+1}}+[B]Q_{2^{N+1}}$. Note that as $P_{2}$ is defined over $\mathbb Q$,
$$P_{2} = \sigma(P_{2}) = \sigma(\left[2^{N}\right]P_{2^{N+1}}) = \left[2^{N}\right]\sigma(P_{2^{N+1}}) = \left[2^{N}\right]([A]P_{2^{N+1}}+[B]Q_{2^{N+1}}] = [A]P_{2}+[B]Q_{2}.$$
Then $A$ must be odd and $B$ must be even. A very similar computation shows that there is an even integer $C$ and an odd integer $D$ such that $\sigma(Q_{2^{N+1}}) = [C]P_{2^{N+1}}+[D]Q_{2^{N+1}}$. We break up the rest of the proof into steps.
\begin{enumerate}
\item The first part follows from the first and third parts of Lemma \ref{ell-adic Galois images} with $\ell = 2$ and $r = 1$.
\item If we rewrite $E[2^{N+1}] = \left\langle Q_{2^{N+1}}, P_{2^{N+1}} \right\rangle$, then $\overline{\rho}_{E,2^{N+1}}(\sigma) = \begin{bmatrix} D & B \\ C & A \end{bmatrix}$. The rest of the part of the theorem follows from the first and third parts of Lemma \ref{ell-adic Galois images} with $\ell = 2$ and $r = 1$.
\item Note that $\sigma(P_{2^{N+1}}) = [A]P_{2^{N+1}}+[B]Q_{2^{N+1}} = [A-B]P_{2^{N+1}}+[B](P_{2^{N+1}}+Q_{2^{N+1}})$ and
$$\sigma(P_{2^{N+1}}+Q_{2^{N+1}}) = [A+C]P_{2^{N+1}}+[B+D]Q_{2^{N+1}} = [A+C-B-D]P_{2^{N+1}}+[B+D](P_{2^{N+1}}+Q_{2^{N+1}}).$$
As $A$ and $D$ are odd and $B$ and $C$ are even, $A+C-B-D$ is even. If we rewrite $E[2^{N+1}] = \left\langle P_{2^{N+1}}, P_{2^{N+1}}+Q_{2^{N+1}} \right\rangle$, then $\overline{\rho}_{E,2^{N+1}}(\sigma) = \begin{bmatrix} A-B & A+C-B-D \\ B & B+D \end{bmatrix}$. By Lemma \ref{ell-adic Galois images} with $\ell = 2$, $\overline{\rho}_{E / \left\langle P_{2} + Q_{2} \right\rangle,2^{N}}(G_{\mathbb Q})$ is conjugate to $\left\langle \begin{bmatrix} A_{1}-B_{1} & \frac{A_{1}+C_{1}-B_{1}-D_{1}}{2} \\ 2 \cdot B_{1} & B_{1}+D_{1} \end{bmatrix}, \ldots, \begin{bmatrix} A_{s}-B_{s} & \frac{A_{s}+C_{s}-B_{s}-D_{s}}{2} \\ 2 \cdot B_{s} & B_{s}+D_{s} \end{bmatrix} \right\rangle$ and $r = 1$.
\end{enumerate}
\end{proof}
\begin{lemma}\label{Álvaro's 2-adic Galois image corollary}
Let $E/\mathbb Q$ be an elliptic curve such that $\overline{\rho}_{E,2}(G_{\mathbb Q}) = \left\langle \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix} \right\rangle$. Then $E$ is $2$-isogenous to an elliptic curve $E'/\mathbb Q$. Suppose that $\rho_{E,2^{\infty}}(G_{\mathbb Q}) = \left\langle \begin{bmatrix} A_{1} & C_{1} \\ B_{1} & D_{1} \end{bmatrix}, \ldots, \begin{bmatrix} A_{s} & C_{s} \\ B_{s} & D_{s} \end{bmatrix} \right\rangle$. Then for each $i \in \{1, \ldots, s\}$, $A_{i}+C_{i}-B_{i}-D_{i}$ is even and $\rho_{E',2^{\infty}}(G_{\mathbb Q}) = \left\langle \begin{bmatrix} A_{1} & \frac{C_{1}}{2} \\ 2 \cdot B_{1} & D_{1} \end{bmatrix}, \ldots, \begin{bmatrix} A_{s} & \frac{C_{s}}{2} \\ 2 \cdot B_{s} & D_{s} \end{bmatrix} \right\rangle$.
\end{lemma}
\begin{proof}
Let $E[2] = \left\langle P_{2}, Q_{2} \right\rangle$ such that $\overline{\rho}_{E,2}(G_{\mathbb Q}) = \left\{\operatorname{Id}, \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}\right\}$. In other words, if $\sigma$ is a Galois automorphism of $\mathbb Q$, then $\sigma$ fixes $P_{2}$ and $Q_{2}$ or $\sigma$ interchanges $P_{2}$ and $Q_{2}$. Then $\sigma$ fixes $P_{2}+Q_{2}$ and so, $P_{2}+Q_{2}$ generates a $\mathbb Q$-rational subgroup of $E$ of order $2$. Moreover, $\{P_{2}, P_{2}+Q_{2}\}$ is a basis of $E[2]$. The rest of the proof is very similar to the proof of the third part of Lemma \ref{2-adic Galois image corollary 2}.
\end{proof}
\fi
\begin{lemma}[Generalized Hensel's Lemma]\label{Hensel}
Let $p$ be a prime and let $f(x)$ be a polynomial with integer coefficients. Suppose that $f(a) \equiv 0 (\mod p^{j})$, $p^{\tau} \mid \mid f'(a)$, and that $j \geq 2\tau + 1$. Then there is a unique $t$ (modulo $p$) such that $f(a+tp^{j-\tau}) \equiv 0 (\mod p^{j+1})$.
\end{lemma}
\section{Classification of $2$-adic Galois images attached to elliptic curves defined over $\mathbb Q$ with CM}\label{sec-Alvaros work}
In \cite{al-rCMGRs}, Lozano-Robledo classified the image of $\ell$-adic Galois representations attached to elliptic curves defined over $\mathbb Q$ with CM for all primes $\ell$. In this section, we will briefly go over the results from \cite{al-rCMGRs} that are important in this paper.
For the rest of the paper, let $K = \mathbb Q(\sqrt{d})$ be a quadratic imaginary field, let $\mathcal{O}_{K}$ be the ring of integers of $K$ with discriminant $\Delta_{K}$. Then $\Delta_{K} = d$ if $d$ is congruent to $1$ modulo $4$ and $\Delta_{K} = 4d$ otherwise. Let $f$ be a positive integer and let $\mathcal{O}_{K,f}$ be the order of $K$ of conductor $f$.
\begin{theorem}[Theorem 1.1, \cite{al-rCMGRs}]\label{Theorem 1.1}
Let $E/\mathbb Q$ be an elliptic curve with CM by $\mathcal{O}_{K,f}$, let $N$ be an even integer greater than or equal to $4$ and let $\overline{\rho}_{E,N} \colon G_{\mathbb Q} \to \operatorname{GL}(2, \mathbb Z / N \mathbb Z)$.
\begin{itemize}
\item If $\Delta_{K} \cdot f^{2} \equiv 0 \mod 4$, then set $\delta = \frac{\Delta_{K} \cdot f^{2}}{4}$ and $\phi = 0$.
\item If $\Delta_{K} \cdot f^{2} \equiv 1 \mod 4$, then set $\delta = \frac{(\Delta_{K}-1)}{4} \cdot f^{2}$ and $\phi = f$.
\end{itemize}
Define the group $\mathcal{C}_{\delta,\phi}(N)$ to be the subgroup of $\operatorname{GL}(2, \mathbb Z / N \mathbb Z)$ consisting of all matrices of the form $\begin{bmatrix} a+b \phi & b \\ \delta b & a \end{bmatrix}$ and define $\mathcal{N}_{\delta, \phi}(N)$ to be the group $\mathcal{N}_{\delta, \phi}(N) = \left\langle \mathcal{C}_{\delta, \phi}(N), \begin{bmatrix} -1 & 0 \\ \phi & 1 \end{bmatrix} \right\rangle$. Then
\begin{enumerate}
\item there is a $\mathbb Z / N \mathbb Z$-basis of $E[N]$ such that $\overline{\rho}_{E,N}(G_{\mathbb Q})$ is contained in $\mathcal{N}_{\delta, \phi}(N)$
\item and $\mathcal{C}_{\delta, \phi}(N) \cong \left(\mathcal{O}_{K,f} / N \mathcal{O}_{K,f} \right)^{\times}$ is a subgroup of $\mathcal{N}_{\delta, \phi}(N)$ of index $2$.
\end{enumerate}
\end{theorem}
\begin{theorem}[Theorem 1.2, \cite{al-rCMGRs}]\label{Theorem 1.2}
Let $E/\mathbb Q$ be an elliptic curve with CM by $\mathcal{O}_{K,f}$.
\begin{itemize}
\item If $\Delta_{K} \cdot f^{2} \equiv 0 \mod 4$, then set $\delta = \frac{\Delta_{K} \cdot f^{2}}{4}$ and $\phi = 0$.
\item If $\Delta_{K} \cdot f^{2} \equiv 1 \mod 4$, then set $\delta = \frac{(\Delta_{K}-1)}{4} \cdot f^{2}$ and $\phi = f$.
\end{itemize}
Let $\rho_{E}$ be the Galois representation $\rho_{E} \colon \operatorname{Gal}(\overline{\mathbb Q}/\mathbb Q) \to \varprojlim \operatorname{Aut}(E[N]) \cong \operatorname{GL}(2, \widehat{\mathbb Z})$ and let $\mathcal{N}_{\delta,\phi} = \varprojlim \mathcal{N}_{\delta,\phi}(N)$. Then there is a compatible system of bases of $E[N]$ such that the image of $\rho_{E}$ is contained in $\mathcal{N}_{\delta,\phi}$, and the index of the image of $\rho_{E}$ in $\mathcal{N}_{\delta,\phi}$ is a divisor of the order of $\mathcal{O}_{K,f}^{\times}$. In particular, the index is a divisor of $4$ or $6$.
\end{theorem}
From now on, let $H_{f} = K(\textit{j}_{K,f})$
\begin{theorem}[Theorem 1.6, \cite{al-rCMGRs}]\label{Theorem 1.6}
Let $E/\mathbb Q(\textit{j}_{K,f})$ be an elliptic curve with CM by an order $\mathcal{O}_{\delta,f}$ in an imaginary field $K$ with $\textit{j}_{E} \neq 0, 1728$. Then, for every $m \geq 1$, we have $\operatorname{Gal}(H_{f}(E[2^{m}])/H_{f}) \subseteq \left( \mathcal{O}_{K,f} / 2^{m} \mathcal{O}_{K,f}\right)^{\times}$. Suppose that $\operatorname{Gal}(H_{f}(E[2^{n}]) / H_{f}) \subsetneq \left(\mathcal{O}_{K,f} / 2^{n}\mathcal{O}_{K,f}\right)^{\times}$ for some positive integer $n$ and assume that $n$ is the smallest such integer. Then $n \leq 3$ and for all $m \geq 3$, we have
$$\operatorname{Gal}(H_{f}(E[2^{m}]) / H_{f}) \cong \left(\mathcal{O}_{K,f} / 2^{m}\mathcal{O}_{K,f}\right)^{\times} / \{\pm 1\}.$$
Further, there are two possibilities:
\begin{enumerate}
\item If $n \leq 2$, then $\operatorname{Gal}(H_{f}(E[4])/H_{f}) \cong \left(\mathcal{O}_{K,f} / 4 \mathcal{O}_{K,f}\right)^{\times} / \{\pm 1 \}$ and:
\begin{enumerate}
\item $disc(\left(\mathcal{O}_{K,f}\right) = \Delta_{K} \cdot f^{2} \equiv 0 \mod 16$. In particular, we have either
\begin{itemize}
\item $\Delta_{K} \equiv 1 \mod 4$ and $f \equiv 0 \mod 4$, or
\item $\Delta_{K} \equiv 0 \mod 4$ and $f \equiv 0 \mod 2$.
\end{itemize}
\item $\mathbb Q(i) \subseteq H_{f}$.
\item For each $m \geq 2$, there is a $\mathbb Z / 2^{m} \mathbb Z$-basis of $E[2^{m}]$ such that the image of the Galos representation $\rho_{E,2^{m}} \colon \operatorname{Gal}(\overline{H}_{f} / H_{f}) \to \operatorname{GL}(2, \mathbb Z / 2^{m} \mathbb Z)$ is one of the groups
\begin{center}
$J_{1} = \left\langle \begin{bmatrix} 5 & 0 \\ 0 & 5 \end{bmatrix}, \begin{bmatrix} 1 & 1 \\ \delta & 1 \end{bmatrix} \right\rangle$ or $J_{2} = \left\langle \begin{bmatrix} 5 & 0 \\ 0 & 5 \end{bmatrix}, \begin{bmatrix} -1 & -1 \\ -\delta & -1 \end{bmatrix} \right\rangle \subseteq \mathcal{C}_{\delta,0}(2^{m})$.
\end{center}
\end{enumerate}
\item If $n = 3$, then $\operatorname{Gal}(H_{f}(E[4]) / H_{f}) \cong \left(\mathcal{O}_{K} / 4 \mathcal{O}_{K,f}\right)^{\times}$ and:
\begin{enumerate}
\item $\Delta_{K} \equiv 0 \mod 8$.
\item For each $m \geq 3$, there is a $\mathbb Z / 2^{m} \mathbb Z$-basis of $E[2^{m}]$ such that the image of the Galois representation $\rho_{E,2^{m}} \colon \operatorname{Gal}(\overline{H}_{f} / H_{f}) \to \operatorname{GL}(2, \mathbb Z / 2^{m} \mathbb Z)$ is the group
\begin{center}
$J_{1} = \left\langle \begin{bmatrix} 3 & 0 \\ 0 & 3 \end{bmatrix}, \begin{bmatrix} 1 & 1 \\ \delta & 1 \end{bmatrix} \right\rangle$ or $J_{2} = \left\langle \begin{bmatrix} 3 & 0 \\ 0 & 3 \end{bmatrix}, \begin{bmatrix} -1 & -1 \\ -\delta & -1 \end{bmatrix} \right\rangle \subseteq \mathcal{C}_{\delta,0}(2^{m})$.
\end{center}
\end{enumerate}
\end{enumerate}
Finally, there is some $\epsilon \in \{\pm 1 \}$ and $\alpha \in \{3, 5\}$ such that the image of $\rho_{E,2^{\infty}}$ is a conjugate of
\begin{center}
$\left\langle \begin{bmatrix} \epsilon & 0 \\ 0 & -\epsilon \end{bmatrix}, \begin{bmatrix} \alpha & 0 \\ 0 & \alpha \end{bmatrix}, \begin{bmatrix} 1 & \delta \\ 1 & 1 \end{bmatrix} \right\rangle$ or $\left\langle \begin{bmatrix} \epsilon & 0 \\ 0 & -\epsilon \end{bmatrix}, \begin{bmatrix} \alpha & 0 \\ 0 & \alpha \end{bmatrix}, \begin{bmatrix} -1 & -\delta \\ -1 & -1 \end{bmatrix} \right\rangle \subseteq \operatorname{GL}(2, \mathbb Z_{2})$.
\end{center}
\end{theorem}
\begin{corollary}\label{Theorem 1.6 corollary 1}
Let $E/\mathbb Q$ be an elliptic curve with CM by an order $\mathcal{O}_{K,f}$ in the number field $K$ with discriminant $\Delta_{K}$ and conductor $f$ and $\textit{j}_{E} \neq 0, 1728$. Let $m$ be a non-negative integer. Then $\overline{\rho}_{E,2^{m+3}}(G_{\mathbb Q})$ is conjugate to the full lift of $\overline{\rho}_{E,8}(G_{\mathbb Q})$ inside the group $\mathcal{N}_{\delta,\phi}\left(2^{3+m}\right)$.
\end{corollary}
\begin{corollary}\label{Theorem 1.6 corollary 2}
Let $E/\mathbb Q$ be an elliptic curve with CM by an order $\mathcal{O}_{K,f}$ in the number field $K$ with discriminant $\Delta_{K}$ and conductor $f$ and $\textit{j}_{E} \neq 0, 1728$. If $\Delta_{K} \cdot f^{2}$ is not divisible by $8$, then $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{\delta, \phi}(2^{\infty})$.
\end{corollary}
\begin{theorem}[Theorem 1.7, \cite{al-rCMGRs}]\label{Theorem 1.7}
Let $E/\mathbb Q$ be an elliptic curve with $\textit{j}_{E} = 1728$ and let $c \in G_{\mathbb Q}$ represent complex conjugation and $\gamma = \rho_{E,2^{\infty}}(G_{\mathbb Q})(c)$. Let $G_{E,2^{\infty}}$ be the image of $\rho_{E,2^{\infty}}$ and let $G_{E,K,2^{\infty}} = \rho_{E,2^{\infty}}(G_{\mathbb Q(i)})$. Then, there is a $\mathbb Z_{2}$-basis of $T_{2}(E) = \varprojlim E[2^{n}]$ such that $G_{E,K,2^{\infty}}$ is one of the following groups:
\begin{itemize}
\item If $[\mathcal{C}_{-1,0}(2^{\infty}) : G_{E,K,2^{\infty}}] = 1$, then $G_{E,K,2^{\infty}}$ is all of $\mathcal{C}_{-1,0}(2^{\infty})$, i.e.,
$$G_{1} = \left\{\begin{bmatrix} a & b \\ -b & a \end{bmatrix} \in \operatorname{GL}(2, \mathbb Z_{2}) : a^{2}+b^{2} \not \equiv 0 \mod 2 \right\}.$$
\item If $[\mathcal{C}_{-1,0}(2^{\infty}) : G_{E,K,2^{\infty}}] = 2$, then $G_{E,K,2^{\infty}}$ is one of the following groups:
\begin{center}
$G_{2,a} = \left\langle \operatorname{-Id}, 3 \cdot \operatorname{Id}, \begin{bmatrix} 1 & 2 \\ -2 & 1 \end{bmatrix} \right\rangle$ or $G_{2,b} = \left\langle \operatorname{-Id}, 3 \cdot \operatorname{Id}, \begin{bmatrix} 2 & 1 \\ -1 & 2 \end{bmatrix} \right\rangle$.
\end{center}
\item If $[\mathcal{C}_{-1,0}(2^{\infty}) : G_{E,K,2^{\infty}}] = 4$, then $G_{E,K,2^{\infty}}$ is one of the following groups
\begin{center} $G_{4,a} = \left\langle 5 \cdot \operatorname{Id}, \begin{bmatrix} 1 & 2 \\ -2 & 1 \end{bmatrix} \right\rangle$, or $G_{4,b} = \left\langle 5 \cdot \operatorname{Id}, \begin{bmatrix} -1 & -2 \\ 2 & -1 \end{bmatrix} \right\rangle$ or \end{center}
\begin{center} $G_{4,c} = \left\langle -3 \cdot \operatorname{Id}, \begin{bmatrix} 2 & -1 \\ 1 & 2 \end{bmatrix} \right\rangle$, or $G_{4,d} = \left\langle -3 \cdot \operatorname{Id}, \begin{bmatrix} -2 & 1 \\ -1 & -2 \end{bmatrix} \right\rangle$. \end{center}
\end{itemize}
Moreover, $G_{E,2^{\infty}} = \left\langle \gamma, G_{E,K,2^{\infty}} \right\rangle = \left\langle \gamma', G_{E,K,2^{\infty}} \right\rangle$ is generated by one of the groups above, and an element
$$\gamma' \in \left\{ c_{1} = \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix}, c_{-1} = \begin{bmatrix} -1 & 0 \\ 0 & 1 \end{bmatrix}, c_{1}' = \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}, c_{-1}' = \begin{bmatrix} 0 & -1 \\ -1 & 0 \end{bmatrix} \right\},$$
such that $\gamma \equiv \gamma' \mod 4$.
\end{theorem}
\begin{theorem}[Theorem 1.8, \cite{al-rCMGRs}]\label{Theorem 1.8}
Let $E/\mathbb Q$ be an elliptic curve with $\textit{j}_{E} = 0$, and let $c \in G_{\mathbb Q}$ represent complex conjugation. Let $G_{E,2^{\infty}}$ be the image of $\rho_{E,2^{\infty}}$ and let $G_{E,K,2^{\infty}} = \rho_{E,2^{\infty}}(G_{\mathbb Q\left(\sqrt{-3}\right)})$. Then there is a $\mathbb Z_{2}$-basis of $T_{2}(E)$ such that the image $G_{E,2^{\infty}}$ of $\rho_{E,2^{\infty}}$ is one of the following groups of $\operatorname{GL}(2, \mathbb Z_{2})$, with $\gamma = \rho_{E,2^{\infty}}(c)$.
\begin{itemize}
\item Either, $[\mathcal{C}_{-1,1}(2^{\infty}) : G_{E,K,2^{\infty}}] = 3$, and
$$G_{E,2^{\infty}} = \left\langle \gamma', \operatorname{-Id}, \begin{bmatrix} 7 & 4 \\ -4 & 3 \end{bmatrix}, \begin{bmatrix} 3 & 6 \\ -6 & -3 \end{bmatrix} \right\rangle$$
$$= \left\langle \gamma', \left\{ \begin{bmatrix} a + b & b \\ -b & a \end{bmatrix} \in \operatorname{GL}(2, \mathbb Z_{2}) : a \not \equiv 0 \mod 2, b \equiv 0 \mod 2 \right\} \right\rangle,$$
and $\left\{ \begin{bmatrix} a + b & b \\ -b & a \end{bmatrix} \in \operatorname{GL}(2, \mathbb Z_{2}) : a \not \equiv 0 \mod 2, b \equiv 0 \mod 2 \right\}$ is precisely the set of matrices that correspond to the subgroup of cubes of Cartan elements $\mathcal{C}_{-1,1}(2^{\infty})^{3}$ which is the unique group of index $3$ in $\mathcal{C}_{-1,1}(2^{\infty})$.
\item Or, $[\mathcal{C}_{-1,1}(2^{\infty}) : G_{E,K,2^{\infty}}] = 1$, and
$$G_{E,2^{\infty}} = \mathcal{N}_{-1,1}(2^{\infty}) = \left\langle \gamma', \operatorname{-Id}, \begin{bmatrix} 7 & 4 \\ -4 & 3 \end{bmatrix}, \begin{bmatrix} 2 & 1 \\ -1 & 1 \end{bmatrix} \right\rangle$$
$$= \left\langle \gamma', \left\{\begin{bmatrix} a + b & b \\ -b & a \end{bmatrix} \in \operatorname{GL}(2, \mathbb Z_{2}) : a \not \equiv 0 \mod 2 \texttt{or} b \not \equiv 0 \mod 2 \right\} \right\rangle$$
where $\gamma' \in \left\{ \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}, \begin{bmatrix} 0 & -1 \\ -1 & 0 \end{bmatrix} \right\}$, and $\gamma \equiv \gamma' \mod 4$.
\end{itemize}
\end{theorem}
\begin{corollary}\label{points of order 2 with j=0}
Let $E/\mathbb Q$ be an elliptic curve with $\textit{j}_{E} = 0$. Then $E$ has a point of order $2$ defined over $\mathbb Q$ if and only if $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle \operatorname{-Id}, \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}, \begin{bmatrix} 7 & 4 \\ -4 & 3 \end{bmatrix}, \begin{bmatrix} 3 & 6 \\ -6 & -3 \end{bmatrix} \right\rangle$ and $E$ does not have a point of order $2$ defined over $\mathbb Q$ if and only if $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{-1,1}(2^{\infty})$.
\end{corollary}
\begin{proof}
From the fact that $\textit{j}_{E} = 0$, $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to one of two groups. The reduction of the group $\left\langle \operatorname{-Id}, \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}, \begin{bmatrix} 7 & 4 \\ -4 & 3 \end{bmatrix}, \begin{bmatrix} 3 & 6 \\ -6 & -3 \end{bmatrix} \right\rangle$ modulo $2$ is a group of order $2$ and the reduction of $\mathcal{N}_{-1,1}(2^{\infty})$ modulo $2$ is a group of order $6$. Hence, if $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to the former, $E$ has a point of order $2$ defined over $\mathbb Q$ and if $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to the latter, then $E$ does not have a point of order $2$ defined over $\mathbb Q$.
\end{proof}
\section{$2$-adic Galois images attached to isogeny-torsion graphs with CM}\label{proofs}
Here we classify the $2$-adic Galois image attached to isogeny-torsion graphs defined over $\mathbb Q$ with CM. We will categorize the proofs based first on isogeny-torsion graphs and then on \textit{j}-invariant.
\begin{proposition}
Let $E/\mathbb Q$ be an elliptic curve with complex multiplication such that the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $L_{4}$ type or of $L_{2}(3)$ type. Then $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{-1,1}(2^{\infty})$.
\end{proposition}
\begin{proof}
If the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $L_{4}$ type, then it looks like the one below with the \textit{j}-invariant of the corresponding elliptic curves listed:
\begin{center}
\begin{tikzcd}
{E_{1}, \textit{j}_{E_{1}} = -12288000} \longrightarrow[rr, no head, "3"] & & {E_{2}, \textit{j}_{E_{2} = 0}} \longrightarrow[rr, no head, "3"] & & {E_{3}, \textit{j}_{E_{3} = 0}} \longrightarrow[rr, no head, "3"] & & {E_{4}, \textit{j}_{E_{4}} = -12288000}
\end{tikzcd}
\end{center}
and if the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $L_{2}(3)$ type, then it looks like the one below with the \textit{j}-invariant of the corresponding elliptic curves listed:
\begin{center}
\begin{tikzcd}
{E, \textit{j}_{E_{1} = 0}} \longrightarrow[r, no head, "3"] & {E_{2}, \textit{j}_{E_{2} = 0}}
\end{tikzcd}
\end{center}
Let $E'/\mathbb Q$ be an elliptic curve that is $3$-isogenous to $E$ with $\textit{j}_{E'} = 0$. By Corollary \ref{coprime isogeny-degree}, $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\rho_{E',2^{\infty}}(G_{\mathbb Q})$. As $E'$ does not have a point of order $2$ defined over $\mathbb Q$, Corollary \ref{points of order 2 with j=0} shows that $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{-1,1}(2^{\infty})$.
\end{proof}
\begin{proposition}
Let $E/\mathbb Q$ be an elliptic curve with CM by a number field $K$ with discriminant $\Delta_{K}$. Suppose that the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $L_{2}(p)$ type with $p \in \{11, 19, 43, 67, 163\}$. Then $\rho_{E,2^{\infty}}(G_{\mathbb{Q}})$ is conjugate to $\mathcal{N}_{\frac{\Delta_{K}-1}{4},1}(2^{\infty})$.
\end{proposition}
\begin{proof}
The elliptic curve $E$ is $p$-isogenous to an elliptic curve $E'/\mathbb Q$. Moreover, $\textit{j}_{E} = \textit{j}_{E'}$ and $\textit{j}_{E} \neq 0, 1728$, meaning that $E$ is a quadratic twist of $E'$. The isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $L_{2}(p)$ type (see below).
\begin{center}
\begin{tikzcd}
E \longrightarrow[r, "p", no head] & E'
\end{tikzcd}
\end{center}
By Corollary \ref{coprime isogeny-degree}, $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\rho_{E',2^{\infty}}(G_{\mathbb Q})$. We prove that $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is unaffected by quadratic twisting by showing that $\operatorname{-Id}$ is an element of every subgroup of $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ of index $2$.
By Table \ref{tab-CMgraphs}, $\textit{j}_{E} \in \left\{-32768, -884736, -884736000, -147197952000, -262537412640768000 \right\}$. Each such elliptic curve has CM by a quadratic imaginary field $K$ of discriminant $\Delta_{K}$ ($ = -11$, $-19$, $-43$, $-67$, and $-163$, respectively). We take an example of each such elliptic curve $E'/\mathbb Q$ with \textit{j}-invariant equal to one of the five above, namely, the elliptic curves with LMFDB labels \texttt{121.b1}, \texttt{361.a1}, \texttt{1849.b1}, \texttt{4489.b1}, and \texttt{26569.a1}, respectively. By the fact that $\textit{j}_{E} = \textit{j}_{E'}$ and $\textit{j}_{E} \neq 0, 1728$, $E$ is a quadratic twist of $E'$. Running code provided by Lozano-Robledo, we see that the conductor of each of the elliptic curves $E'$ is equal to $f = 1$. Hence, $\Delta_{K} \cdot f^{2} \equiv 1 \mod 4$ and so, $\delta = \frac{\Delta_{K}-1}{4}$ and $\phi = 1$.
By the fact that $\Delta_{K} \cdot f^{2}$ is not divisible by $8$, Corollary \ref{Theorem 1.6 corollary 2} shows that, $\rho_{E',2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{\delta,1}(2^{\infty}) = \left\langle C_{\delta,1}(2^{\infty}), \begin{bmatrix} -1 & 0 \\ 1 & 1 \end{bmatrix} \right\rangle$ where $C_{\delta,1}(2^{\infty}) = \left\{ \begin{bmatrix} a+b & b \\ \delta b & a \end{bmatrix} \colon a,b \in \mathbb Z_{2} | \texttt{a and b not both even} \right\}$. Note that setting $a = -1$ and $b = 0$ shows that $\operatorname{-Id} \in \rho_{E',2^{\infty}}(G_{\mathbb{Q}})$ for each of the five elliptic curves. Let $H$ be a subgroup of $\rho_{E',2^{\infty}}(G_{\mathbb{Q}})$ of index $2$. Then $H$ is normal and hence, the squares of all elements of $\rho_{E',2^{\infty}}(G_{\mathbb{Q}})$ are contained in $H$. Let $a$ be an integer and let $b = 1$. Then
$$\left(\begin{bmatrix} -1 & 0 \\ 1 & 1 \end{bmatrix} \cdot \begin{bmatrix} a+1 & 1 \\ \delta & a \end{bmatrix}\right)^{2} = \begin{bmatrix} a^{2}+a-\delta & 0 \\ 0 & a^{2}+a - \delta \end{bmatrix}.$$
We have to show that we have an equality of the form $-1 = a^{2}+a-\delta$ modulo $2^{N}$ for all non-negative integers $N$. Let $p(x) = x^{2} + x - \delta + 1$. Then $p(0) = 0$ in $\mathbb Z / 2 \mathbb Z$ because $\delta$ is odd and $p'(0) = 1 \neq 0 \mod 2$. By Hensel's lemma, there is a unique solution $\alpha \in \mathbb Z_{2}$ to the equality $x^{2} + x - \delta = -1$. Hence, $\operatorname{-Id}$ is a square in $\rho_{E',2^{\infty}}(G_{\mathbb{Q}})$ and so, $H$ contains $\operatorname{-Id}$. By Corollary \ref{subgroups of index 2 contain -Id} and Corollary \ref{coprime isogeny-degree}, $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\rho_{E',2^{\infty}}(G_{\mathbb Q})$. Hence, $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{\delta,1}(2^{\infty}) = \mathcal{N}_{\frac{\Delta_{K}-1}{4},1}(2^{\infty})$.
\end{proof}
\begin{proposition}
Let $E/\mathbb Q$ be an elliptic curve such that the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $R_{4}(14)$ type. Then $\textit{j}_{E} = 16581375$ or $\textit{j}_{E} = -3375$. In the former case, $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{-7,0}(2^{\infty})$ and in the latter case, $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{-2,1}(2^{\infty})$.
\end{proposition}
\begin{proof}
Let $E/\mathbb Q$ be an elliptic curve that has a cyclic, $\mathbb Q$-rational subgroup of order $14$. Then the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is below:
\begin{center}
\begin{tikzcd}
{E_{1}, \textit{j}_{E_{1}} = 16581375} \longrightarrow[dd, no head, "7"'] \longrightarrow[rr, no head, "2"] & & {E_{2}, \textit{j}_{E_{2}} = -3375} \longrightarrow[dd, no head, "7"] \\
& & \\
{E_{3}, \textit{j}_{E_{3}} = 16581375} \longrightarrow[rr, no head, "2"'] & & {E_{4}, \textit{j}_{E_{4}} = -3375}
\end{tikzcd}
\end{center}
Let $E/\mathbb Q$ be the elliptic curve with LMFDB notation \texttt{49.a1}. Then $\textit{j}_{E} = 16581375$. Running code provided by Lozano-Robledo, we see that $E$ has complex multiplication by an order of $K = \mathbb Q(\sqrt{-7})$ with discriminant $\Delta_{K} = -7$ and conductor $f = 2$. Then $\delta = \frac{-7 \cdot 2^{2}}{4} = -7$. By Corollary \ref{Theorem 1.6 corollary 2}, $\rho_{E,2^{\infty}}(G_{\mathbb{Q}})$ is conjugate to $\mathcal{N}_{-7,0}(2^{\infty}) = \left\langle C_{-7,0}(2^{\infty}), \begin{bmatrix} -1 & 0 \\ 0 & 1 \end{bmatrix} \right\rangle$ where $C_{-7,0}(2^{\infty}) = \left\langle \begin{bmatrix} a & b \\ -7b & a \end{bmatrix} \colon 2 \nmid a^{2}+b^{2} \right\rangle$.
We will prove that $\operatorname{-Id}$ is contained in all subgroups of $\mathcal{N}_{-7,0}(2^{\infty})$ of index $2$. Let $a = 0$ and let $b$ be an integer. Then
$$\left(\begin{bmatrix} -1 & 0 \\ 0 & 1 \end{bmatrix} \cdot \begin{bmatrix} 0 & b \\ -7b & 0 \end{bmatrix}\right)^{2} = \begin{bmatrix} 7b^{2} & 0 \\ 0 & 7b^{2} \end{bmatrix}.$$
We have to prove that for each positive integer $N$, there is a solution to the equation $-1 = 7b^{2}$.
Let $p = 2$ and consider the polynomial $f(x) = 7x^{2}+1$. We will use Lemma \ref{Hensel}. Let $a = 1$. Then $f(a) = 8 \equiv 0 \mod 2^{3}$. Next we have $f'(x) = 14x$ and $f'(a) = 14$. Letting $\tau = 1$, we have $2^{1} \lvert \rvert f'(a)$. Setting $j = 3$, we have that $j \geq 2\tau + 1$. So there is a unique integer $t$ modulo $2$ such that $f(1+t \cdot 2^{2}) \equiv 0 \mod 2^{4}$.
Now let $a_{1} = 1 + t \cdot 2^{2}$ and let $j = 4$. Then $f(a_{1}) \equiv 0 \mod 2^{4}$. Next, we have that because $a_{1}$ is odd and $f'(x) = 14x$, that $2^{1} \lvert \rvert f'(a_{1})$. Thus, $j \geq 2 \cdot \tau + 1$ and by Lemma \ref{Hensel}, there is a unique integer $t$ modulo $2$, such that $f(a_{1}+t \cdot 2^{3}) \equiv 0 \mod 2^{5}$. We can continue using Lemma \ref{Hensel} inductively until we find a $2$-adic integer $A = a + a_{1} + \ldots$ such that $f(A) = 0 \mod 2^{N}$ for all positive integers $N$. Thus, $\operatorname{-Id}$ is an element of all subgroups of $\mathcal{N}_{-7,0}(2^{\infty})$ of index $2$. Thus, quadratic twisting does not affect $\rho_{E,2^{\infty}}(G_{\mathbb{Q}})$. By Corollary \ref{subgroups of index 2 contain -Id}, if $\textit{j}_{E} = 16581375$, then $\rho_{E,2^{\infty}}(G_{\mathbb{Q}})$ is conjugate to $\mathcal{N}_{-7,0}(2^{\infty})$.
Let $E'/\mathbb Q$ be an elliptic curve with $\textit{j}_{E'} = -3375$. Then $E'$ is $2$-isogenous to an elliptic curve with \textit{j}-invariant equal to $16581375$. By Corollary \ref{contains -Id}, $\rho_{E',2^{\infty}}(G_{\mathbb{Q}})$ contains $\operatorname{-Id}$ and hence, $\rho_{E',2^{\infty}}(G_{\mathbb{Q}})$ is not affected by quadratic twisting. Using code provided by Lozano-Robledo, we see that $E'$ has complex multiplication by an order of $K = \mathbb Q(\sqrt{-7})$ with discriminant $\Delta_{K} = -7$ and conductor $f = 1$. By the fact that $\Delta_{K} \cdot f^{2} \equiv 1 \mod 4$, we let $\delta = \frac{\Delta_{K}-1}{4} \cdot f^{2} = -2$. Again, by the fact that $\Delta_{K} \cdot f^{2}$ is not divisible by $8$, Corollary \ref{Theorem 1.6 corollary 2} says that $\rho_{E',2^{\infty}}(G_{\mathbb{Q}})$ is conjugate to $\mathcal{N}_{-2,1}(2^{\infty})$.
\end{proof}
\begin{proposition}
Let $E/\mathbb Q$ be an elliptic curve such that $E$ has CM. Suppose that the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $R_{4}(6)$ type. Then $\textit{j}_{E} = 0$ or $\textit{j}_{E} = 54000$. In the former case, $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to
$\left\langle \operatorname{-Id}, \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}, \begin{bmatrix} 7 & 4 \\ -4 & 3 \end{bmatrix}, \begin{bmatrix} 3 & 6 \\ -6 & -3 \end{bmatrix} \right\rangle$ and in the latter case, $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{-3,0}(2^{\infty})$.
\end{proposition}
\begin{proof}
The isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is below
\begin{center}
\begin{tikzcd}
{E_{1}, \textit{j}_{E_{1}} = 0} \longrightarrow[dd, no head, "3"'] \longrightarrow[rr, no head, "2"] & & {E_{2}, \textit{j}_{E_{2}} = 54000} \longrightarrow[dd, no head, "3"] \\
& & \\
{E_{3}, \textit{j}_{E_{3}} = 0} \longrightarrow[rr, no head, "2"'] & & {E_{4}, \textit{j}_{E_{4}} = 54000}
\end{tikzcd}
\end{center}
The elliptic curve $E$ has a point of order $2$ defined over $\mathbb Q$. If $\textit{j}_{E} = 0$, then by Corollary \ref{points of order 2 with j=0}, $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle \operatorname{-Id}, \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}, \begin{bmatrix} 7 & 4 \\ -4 & 3 \end{bmatrix}, \begin{bmatrix} 3 & 6 \\ -6 & -3 \end{bmatrix} \right\rangle$. If $\textit{j}_{E} = 54000$, then $E$ is $2$-isogenous to an elliptic curve $E'/\mathbb Q$ such that $\textit{j}_{E} = 0$. Note that $\operatorname{-Id} \in \rho_{E',2^{\infty}}(G_{\mathbb Q})$ and by Corollary \ref{contains -Id}, $\operatorname{-Id} \in \rho_{E,2^{\infty}}(G_{\mathbb Q})$. Thus, quadratic twisting does not affect $\rho_{E,2^{\infty}}(G_{\mathbb Q})$.
Let $\widetilde{E}$ be the elliptic curve with LMFDB label \texttt{36.a1}. Then $\textit{j}_{\widetilde{E}} = 54000$. Running code provided by Lozano-Robledo, we see that $\widetilde{E}$ has CM by an order of $K = \mathbb Q(\sqrt{-3})$ with discriminant $\Delta_{K} = -3$ and conductor $f = 2$. As $\Delta_{K} \cdot f^{2} \equiv 0 \mod 4$, we have $\delta = \frac{\Delta \cdot f^{2}}{4} = -3$ and $\phi = 0$. Note that $\Delta_{K} \cdot f^{2} = -12$ is not divisible by $8$. By Corollary \ref{Theorem 1.6 corollary 2}, $\rho_{\widetilde{E},2^{\infty}}(G_{\mathbb{Q}})$ is conjugate to $\mathcal{N}_{-3,0}(2^{\infty})$. As $E$ is a quadratic twist of $\widetilde{E}$, $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is also conjugate to $\mathcal{N}_{-3,0}(2^{\infty})$.
\end{proof}
\begin{proposition}
Let $E/\mathbb Q$ be an elliptic curve with $\textit{j}_{E} = 8000$. Then $E$ is $2$-isogenous to an elliptic curve $E'/\mathbb Q$ with $\textit{j}_{E'} = 8000$. The isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of type $L_{2}(2)$. Denote
\begin{center} $H_{1,3} = \left\langle \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix}, \begin{bmatrix} 3 & 0 \\ 0 & 3 \end{bmatrix}, \begin{bmatrix} 1 & 1 \\ -2 & 1 \end{bmatrix} \right\rangle$ and $H_{-1,3} = \left\langle \begin{bmatrix} -1 & 0 \\ 0 & 1 \end{bmatrix}, \begin{bmatrix} 3 & 0 \\ 0 & 3 \end{bmatrix}, \begin{bmatrix} 1 & 1 \\ -2 & 1 \end{bmatrix} \right\rangle$. \end{center}
\end{proposition}
Then $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ fits in the following table
\begin{center} \begin{table}[h!]
\renewcommand{\longrightarrowaystretch}{1.6}
\begin{tabular} { |c|c|c| }
\hline
Isogeny graph & $\rho_{E_{1},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{E_{2},2^{\infty}}(G_{\mathbb Q})$ \\
\hline
\multirow{2}*{\includegraphics[scale=0.05]{L22_graph.png}} & $H_{1, 3}$ & $H_{-1, 3}$ \\
\cline{2-3}
& $\mathcal{N}_{-2,0}(2^{\infty})$ & $\mathcal{N}_{-2,0}(2^{\infty})$ \\
\hline
\end{tabular}
\end{table} \end{center}
\begin{proof}
Let $E/\mathbb Q$ be an elliptic curve such that $\textit{j}_{E} = 8000$. Then the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $L_{2}(2)$ type shown below:
\begin{center}
\begin{tikzcd}
E_{1} \longrightarrow[rr, no head, "2"] & & E_{2}
\end{tikzcd}
\end{center}
If $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is not conjugate to $\mathcal{N}_{\delta,\phi}(2^{\infty})$, then by Theorem \ref{Theorem 1.6}, there are $\epsilon \in \{1, -1\}$ and $\alpha \in \{3, 5\}$, such that $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to
\begin{center} $H_{\epsilon,\alpha} = \left\langle \begin{bmatrix} \epsilon & 0 \\ 0 & -\epsilon \end{bmatrix}, \begin{bmatrix} \alpha & 0 \\ 0 & \alpha \end{bmatrix}, \begin{bmatrix} 1 & 1 \\ \delta & 1 \end{bmatrix} \right\rangle$ or $H_{\epsilon, \alpha}' = \left\langle \begin{bmatrix} \epsilon & 0 \\ 0 & -\epsilon \end{bmatrix}, \begin{bmatrix} \alpha & 0 \\ 0 & \alpha \end{bmatrix}, \begin{bmatrix} -1 & -1 \\ -\delta & -1 \end{bmatrix} \right\rangle$. \end{center}
Moreover, as $\textit{j}_{E} \neq 0, 1728$, all elliptic curves $E'/\mathbb Q$ such that $\textit{j}_{E'} = 8000$ are quadratic twists of $E$. Let $E/\mathbb Q$ be the elliptic curve with LMFDB label \texttt{256.a1}. Then $\textit{j}_{E} = 8000$. Using code provided by Lozano-Robledo, we see that $E$ has complex multiplication by an order of $K = \mathbb Q(\sqrt{-2})$ with $\Delta_{K} = -8$ and conductor $f = 1$. We compute that $\delta = \frac{-8 \cdot 1^{2}}{4} = -2$ and thus, $\phi = 0$. .
A quick computation reveals that $H_{1,5}$, $H_{-1,5}$, $H_{1,5}'$, and $H_{-1,5}'$ are all equal to $\mathcal{N}_{-2,0}(2^{\infty})$ modulo $8$. By Corollary \ref{Theorem 1.6 corollary 1}, if $\overline{\rho}_{E,8}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{-2,0}(2^{\infty})$ modulo $8$, then $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{-2,0}(2^{\infty})$. Another quick computation reveals that $H_{1,3}$ is conjugate to $H_{1,3}'$ and $H_{-1,3}$ is conjugate to $H_{-1,3}'$ modulo $8$. Neither $H_{1,3}$ nor $H_{-1,3}$ contain $\operatorname{-Id}$ and $H_{1,3}$ is not conjugate to $H_{-1,3}$. Moreover, $H_{1,3}$, $H_{-1,3}$, and $H_{1,5}$ are quadratic twists modulo $8$ with $H_{1,5} = \left\langle H_{1,3}, \operatorname{-Id} \right\rangle = \left\langle H_{-1,3}, \operatorname{-Id} \right\rangle$ modulo $8$. In other words, up to conjugation, there are three groups to work with $H_{1,3}$, $H_{-1,3}$, and $H_{1,5} = \mathcal{N}_{-2,0}(2^{\infty})$.
The elliptic curve $E_{1} : y^{2} = x^{3} - 17280x - 774144$ has LMFDB label \texttt{256.a1} and the elliptic curve $E_{2} : y^{2} = x^{3} - 4320x + 96768$ has LMFDB label \texttt{256.a2}. Moreover, $\textit{j}_{E_{1}} = \textit{j}_{E_{2}} = 8000$ and $E_{1}$ is $2$-isogenous to $E_{2}$. By part 2 of Example 9.4 in \cite{al-rCMGRs}, $\rho_{E_{1},2^{\infty}}(G_{\mathbb Q})$ is conjugate to $H_{-1,3}$ and $\rho_{E_{2},2^{\infty}}(G_{\mathbb Q})$ is conjugate to $H_{1,3}$.
Finally, let $E/\mathbb Q$ be the elliptic curve with LMFDB label \texttt{2304.h1}. Then $\textit{j}_{E} = 8000$. Hence, the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $L_{2}(2)$ type and $E$ is $\mathbb Q$-isogenous to one other elliptic curve $E'$ defined over $\mathbb Q$. Using code provided by Lozano-Robledo, we see that $\overline{\rho}_{E,8}(G_{\mathbb Q})$ contains $\operatorname{-Id}$ and by Lemma \ref{contains -Id}, $E'$ does too. By the fact that the \textit{j}-invariants of both $E$ and $E'$ equal $8000$ and $\overline{\rho}_{E,8}(G_{\mathbb Q})$ and $\overline{\rho}_{E',8}(G_{\mathbb Q})$ both contain $\operatorname{-Id}$, we have that $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ and $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ are both conjugate to $\mathcal{N}_{-2,0}(2^{\infty})$.
\end{proof}
\begin{proposition}
Define the following subgroups of $\operatorname{GL}(2, \mathbb Z_{2})$
\begin{itemize}
\item $G_{1} = \left\{ \begin{bmatrix} a & b \\ -b & a \end{bmatrix} : a^{2}+b^{2} \not \equiv 0 \mod 2 \right\}$
\item $G_{2,a} = \left\langle \operatorname{-Id}, 3 \cdot \operatorname{Id}, \begin{bmatrix} 1 & 2 \\ -2 & 1 \end{bmatrix} \right\rangle$
\item $G_{2,b} = \left\langle \operatorname{-Id}, 3 \cdot \operatorname{Id}, \begin{bmatrix} 2 & 1 \\ -1 & 2 \end{bmatrix} \right\rangle$
\item $G_{4,a} = \left\langle 5 \cdot \operatorname{Id}, \begin{bmatrix} 1 & 2 \\ -2 & 1 \end{bmatrix} \right\rangle$
\item $G_{4,b} = \left\langle 5 \cdot \operatorname{Id}, \begin{bmatrix} -1 & -2 \\ 2 & -1 \end{bmatrix} \right\rangle$
\item $G_{4,c} = \left\langle 3 \cdot \operatorname{Id}, \begin{bmatrix} 2 & -1 \\ 1 & 2 \end{bmatrix} \right\rangle$
\item $G_{4,d} = \left\langle 3 \cdot \operatorname{Id}, \begin{bmatrix} -2 & 1 \\ -1 & -2 \end{bmatrix} \right\rangle$
\end{itemize}
and let $\Gamma = \left\{c_{1} = \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix}, c_{-1} = \begin{bmatrix} -1 & 0 \\ 0 & 1 \end{bmatrix}, c_{1}' = \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}, c_{-1}' = \begin{bmatrix} 0 & -1 \\ -1 & 0 \end{bmatrix} \right\}$. Let $E/\mathbb Q$ be an elliptic curve such that $\textit{j}_{E} = 1728$. Then $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle H, \gamma \right\rangle$ where $H$ is one of the seven groups above and $\gamma \in \Gamma$. Either the isogeny graph associated to the $\mathbb Q$-isogeny class $E$ is of type $T_{4}$ or the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of type $L_{2}(2)$. For $\epsilon \in \left\{\pm 1 \right\}$, denote
\begin{center} $H_{\epsilon} = \left\langle \begin{bmatrix} \epsilon & 0 \\ 0 & -\epsilon \end{bmatrix}, \begin{bmatrix} 5 & 0 \\ 0 & 5 \end{bmatrix}, \begin{bmatrix} 1 & 1 \\ -4 & 1 \end{bmatrix} \right\rangle$ and $H_{\epsilon}' = \left\langle \begin{bmatrix} \epsilon & 0 \\ 0 & -\epsilon \end{bmatrix}, \begin{bmatrix} 5 & 0 \\ 0 & 5 \end{bmatrix}, \begin{bmatrix} -1 & -1 \\ 4 & -1 \end{bmatrix} \right\rangle$. \end{center}
If the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $T_{4}$ type, then the $\mathbb Q$-isogeny class of $E$ consists of four elliptic curves over $\mathbb Q$, $E_{1}$, $E_{2}$, $E_{3}$, and $E_{4}$, such that $\textit{j}_{E_{1}} = \textit{j}_{E_{2}} = 1728$ and $\textit{j}_{E_{3}} = \textit{j}_{E_{4}} = 287496$ with the following algebraic data:
\begin{center} \begin{table}[h!]
\renewcommand{\longrightarrowaystretch}{1.6}
\scalebox{1}{
\begin{tabular}{|c|c|c|c|c|c|}
\hline
Isogeny graph & Torsion Configuration & $\rho_{E_{1},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{E_{2},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{E_{3},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{E_{4},2^{\infty}}(G_{\mathbb Q})$ \\
\hline
\multirow{3}*{\includegraphics[scale=0.06]{T4_isogeny_graph.png}} & $([2,2],[2],[2],[2])$ & $\left\langle G_{2,a}, c_{1} \right\rangle$ & $\left\langle G_{2,a}, c_{1}' \right\rangle$ & $\mathcal{N}_{-4,0}(2^{\infty})$ & $\mathcal{N}_{-4,0}(2^{\infty})$ \\
\cline{2-6}
& $([2,2],[2],[4],[2])$ & $\left\langle G_{4,a}, c_{1} \right\rangle$ & $\left\langle G_{4,a}, c_{1}' \right\rangle$ & $H_{1}$ & $H_{-1}$ \\
\cline{2-6}
& $([2,2],[4],[4],[2])$ & $\left\langle G_{4,b}, c_{1} \right\rangle$ & $\left\langle G_{4,b}, c_{1}' \right\rangle$ & $H_{1}'$ & $H_{-1}'$ \\
\hline
\end{tabular}}
\end{table} \end{center}
If the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $L_{2}(2)$ type, then, the $\mathbb Q$-isogeny class of $E$ consists of two elliptic curves over $\mathbb Q$, $E_{1}$ and $E_{2}$ such that $\textit{j}_{E_{1}} = \textit{j}_{E_{2}} = 1728$ with the following algebraic data.
\begin{center} \begin{table}[h!]
\renewcommand{\longrightarrowaystretch}{1.6}
\scalebox{1}{
\begin{tabular}{|c|c|c|c|}
\hline
Isogeny graph & Torsion configuration & $\rho_{E_{1},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{E_{2},2^{\infty}}(G_{\mathbb Q})$ \\
\hline
\multirow{4}*{\includegraphics[scale=0.09]{L22_graph.png}} & \multirow{4}*{$([2],[2])$} & $\left\langle G_{2,b}, c_{1} \right\rangle$ & $\left\langle G_{2,b}, c_{1}' \right\rangle$ \\
\cline{3-4}
& & $\left\langle G_{4,c}, c_{1} \right\rangle$ & $\left\langle G_{4,c}, c_{1}' \right\rangle$ \\
\cline{3-4}
& & $\left\langle G_{4,d}, c_{1} \right\rangle$ & $\left\langle G_{4,d}, c_{1}' \right\rangle$ \\
\cline{3-4}
& & $\mathcal{N}_{-1,0}(2^{\infty})$ & $\mathcal{N}_{-1,0}(2^{\infty})$ \\
\hline
\end{tabular}}
\end{table} \end{center}
\end{proposition}
\begin{proof}
Let $E/\mathbb Q$ be an elliptic curve with $\textit{j}_{E} = 1728$. Then $E$ has CM by an order of $K = \mathbb Q(i)$ with discriminant $\Delta_{K} = -4$ and conductor $f = 1$. Then $\delta = \frac{\Delta_{K} \cdot f^{2}}{4} = -1$. By Theorem \ref{Theorem 1.1}, $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to a subgroup of $\mathcal{N}_{-1,0}(2^{\infty}) = \left\langle G_{1}, \begin{bmatrix} -1 & 0 \\ 0 & 1 \end{bmatrix} \right\rangle$. More precisely, by Theorem \ref{Theorem 1.7}, $\rho_{E,2^{\infty}}(G_{\mathbb{Q}})$ is generated by one of $G_{1}$, $G_{2,a}$, $G_{2b}$, $G_{4,a}$, $G_{4,b}$, $G_{4,c}$, $G_{4,d}$, and one element of $\Gamma$.
First note that $$\begin{bmatrix} 0 & 1 \\ -1 & 0 \end{bmatrix} \cdot \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} = \begin{bmatrix} 0 & -1 \\ -1 & 0 \end{bmatrix}.$$
Let $\gamma, \gamma' \in \Gamma$. By the fact that $G_{1}$ contains $\operatorname{-Id}$ and $\begin{bmatrix} 0 & 1 \\ -1 & 0 \end{bmatrix}$, we have that $\left\langle G_{1}, \gamma \right\rangle = \left\langle G_{1}, \gamma' \right\rangle = \mathcal{N}_{-1,0}(2^{\infty})$. By the fact that $G_{2,a}$ and $G_{2,b}$ contain $\operatorname{-Id}$, $\left\langle G_{2,a}, c_{1} \right\rangle = \left\langle G_{2,a}, c_{-1} \right\rangle$ and $\left\langle G_{2,b}, c_{1} \right\rangle = \left\langle G_{2,b}, c_{-1} \right\rangle$. Similarly, $\left\langle G_{2,a}, c_{1}' \right\rangle = \left\langle G_{2,a}, c_{-1}' \right\rangle$ and $\left\langle G_{2,b}, c_{1}' \right\rangle = \left\langle G_{2,b}, c_{-1}' \right\rangle$. Note that
$$\left\langle G_{4,a}, c_{1} \right\rangle = \left\langle 5 \cdot \operatorname{Id}, \begin{bmatrix} 1 & 2 \\ -2 & 1 \end{bmatrix}, c_{1} \right\rangle = \left\langle 5 \cdot \operatorname{Id}, c_{1} \cdot \begin{bmatrix} 1 & 2 \\ -2 & 1 \end{bmatrix} \cdot c_{1}^{-1}, c_{1} \right\rangle = \left\langle 5 \cdot \operatorname{Id}, \begin{bmatrix} 1 & -2 \\ 2 & 1 \end{bmatrix}, c_{1} \right\rangle.$$ After switching the orders of the generators of $\left\langle G_{4,a}, c_{1} \right\rangle$, we see that this last group is conjugate to the group $\left\langle 5 \cdot \operatorname{Id}, \begin{bmatrix} 1 & 2 \\ -2 & 1 \end{bmatrix}, c_{-1} \right\rangle = \left\langle G_{4,a}, c_{-1} \right\rangle$. Hence, $\left\langle G_{4,a}, c_{1} \right\rangle$ is conjugate to $\left\langle G_{4,a}, c_{-1} \right\rangle$.
Next, note that $\left\langle G_{4,a}, c_{1}' \right\rangle = \left\langle 5 \cdot \operatorname{Id}, \begin{bmatrix} 1 & 2 \\ -2 & 1
\end{bmatrix}, \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix} \right\rangle$ is conjugate to the group
$$\left\langle 5 \cdot \operatorname{Id}, c_{1} \cdot \begin{bmatrix} 1 & 2 \\ -2 & 1 \end{bmatrix} \cdot c_{1}^{-1}, c_{1} \cdot \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix} \cdot c_{1}^{-1} \right\rangle = \left\langle 5 \cdot \operatorname{Id}, \begin{bmatrix} 1 & -2 \\ 2 & 1 \end{bmatrix}, \begin{bmatrix} 0 & -1 \\ -1 & 0 \end{bmatrix} \right\rangle.$$
After switching the order of the generators, this last group is conjugate to $\left\langle 5 \cdot \operatorname{Id}, \begin{bmatrix} 1 & 2 \\ -2 & 1 \end{bmatrix}, \begin{bmatrix} 0 & -1 \\ -1 & 0 \end{bmatrix} \right\rangle = \left\langle G_{4,a}, c_{-1}' \right\rangle$. Hence, $\left\langle G_{4,a}, c_{1}' \right\rangle$ is conjugate to $\left\langle G_{4,a}, c_{-1}' \right\rangle$.
Making similar computations, we see that $\left\langle G_{4,x}, c_{1} \right\rangle$ is conjugate to $\left\langle G_{4,x}, c_{-1} \right\rangle$ and $\left\langle G_{4,x}, c_{1}' \right\rangle$ is conjugate to $\left\langle G_{4,x}, c_{-1}' \right\rangle$ for $x = b$, $c$, and $d$. Hence, we work with the case that $\rho_{E,2^{\infty}}(G_{\mathbb{Q}})$ is conjugate to $\mathcal{N}_{-1,0}(2^{\infty})$, $\left\langle G_{2,a}, c_{1} \right\rangle$, $\left\langle G_{2,a}, c_{1}' \right\rangle$, $\left\langle G_{2,b}, c_{1} \right\rangle$, $\left\langle G_{2,b}, c_{1}' \right\rangle$, $\left\langle G_{4,a}, c_{1} \right\rangle$, $\left\langle G_{4,a}, c_{1}' \right\rangle$, $\left\langle G_{4,b}, c_{1} \right\rangle$, $\left\langle G_{4,b}, c_{1}' \right\rangle$, $\left\langle G_{4,c}, c_{1} \right\rangle$, $\left\langle G_{4,c}, c_{1}' \right\rangle$, $\left\langle G_{4,d}, c_{1} \right\rangle$, or $\left\langle G_{4,d}, c_{1}' \right\rangle$.
If $\rho_{E,2^{\infty}}(G_{\mathbb{Q}})$ is conjugate to $\left\langle G_{2,a}, c_{1} \right\rangle$, $\left\langle G_{4,a}, c_{1} \right\rangle$, or $\left\langle G_{4,b}, c_{1} \right\rangle$, then $\rho_{E,2^{\infty}}(G_{\mathbb{Q}})$ reduces modulo $2$ to the trivial group and hence, $E$ has full two-torsion defined over $\mathbb Q$. Moreover, $\left\langle G_{2,a}, c_{1} \right\rangle$ is the only group of those three which contains $\operatorname{-Id}$ and $\left\langle G_{2,a}, c_{1} \right\rangle = \left\langle G_{4,a}, c_{1}, \operatorname{-Id} \right\rangle = \left\langle G_{4,b}, c_{1}, \operatorname{-Id} \right\rangle$. In other words, $\left\langle G_{2,a}, c_{1} \right\rangle$, $\left\langle G_{4,a}, c_{1} \right\rangle$, and $\left\langle G_{4,b}, c_{1} \right\rangle$ are quadratic twists.
Next, if $\rho_{E,2^{\infty}}(G_{\mathbb{Q}})$ is conjugate to one of $\left\langle G_{2,a}, c_{1}' \right\rangle$, $\left\langle G_{4,a}, c_{1}' \right\rangle$, or $\left\langle G_{4,b}, c_{1}' \right\rangle$, then using magma \cite{magma}, we see that $\rho_{E,2^{\infty}}(G_{\mathbb{Q}})$ is conjugate to a subgroup of $\left\{ \begin{bmatrix} \ast & \ast \\ 0 & \ast \end{bmatrix} \right\} \subseteq \operatorname{GL}(2, \mathbb Z / 4 \mathbb Z)$. Thus, $E$ has a cyclic, $\mathbb Q$-rational subgroup of order $4$ that generates a $\mathbb Q$-rational $4$-isogeny with cyclic kernel. Moreover, $\left\langle G_{2,a}, c_{1}' \right\rangle$ is the only group of those three which contains $\operatorname{-Id}$ and $\left\langle G_{2,a}, c_{1}' \right\rangle = \left\langle G_{4,a}, c_{1}', \operatorname{-Id} \right\rangle = \left\langle G_{4,b}, c_{1}', \operatorname{-Id} \right\rangle$. In other words, $\left\langle G_{2,a}, c_{1}' \right\rangle$, $\left\langle G_{4,a}, c_{1}' \right\rangle$, and $\left\langle G_{4,b}, c_{1}' \right\rangle$ are quadratic twists.
On the other hand, if $\rho_{E,2^{\infty}}(G_{\mathbb{Q}})$ is conjugate to any one of the remaining seven groups, $\mathcal{N}_{-1,0}(2^{\infty})$, $\left\langle G_{2,b}, c_{1} \right\rangle$, $\left\langle G_{2,b}, c_{1}' \right\rangle$, $\left\langle G_{4,c}, c_{1} \right\rangle$, $\left\langle G_{4,c}, c_{1}' \right\rangle$, $\left\langle G_{4,d}, c_{1} \right\rangle$, or $\left\langle G_{4,d}, c_{1}' \right\rangle$ then $E(\mathbb Q)_{\texttt{tors}} \cong \mathbb Z / 2 \mathbb Z$ and $E$ does not have a cyclic, $\mathbb Q$-rational subgroup of order $4$. Of those seven groups $\mathcal{N}_{-1,0}(2^{\infty})$, $\left\langle G_{2,b}, c_{1} \right\rangle$, and $\left\langle G_{2,b}, c_{1}' \right\rangle$ contain $\operatorname{-Id}$. Moreover, $\left\langle G_{2,b}, c_{1} \right\rangle = \left\langle G_{4,c}, c_{1}, \operatorname{-Id} \right\rangle = \left\langle G_{4,d}, c_{1}, \operatorname{-Id} \right\rangle$ and $\left\langle G_{2,b}, c_{1}' \right\rangle = \left\langle G_{4,c}, c_{1}', \operatorname{-Id} \right\rangle = \left\langle G_{4,d}, c_{1}', \operatorname{-Id} \right\rangle$. In other words, $\left\langle G_{2,b}, c_{1} \right\rangle$, $\left\langle G_{4,c}, c_{1} \right\rangle$, and $\left\langle G_{4,d}, c_{1} \right\rangle$ are quadratic twists and $\left\langle G_{2,b}, c_{1}' \right\rangle$, $\left\langle G_{4,c}, c_{1}' \right\rangle$, and $\left\langle G_{4,d}, c_{1}' \right\rangle$ are quadratic twists.
First, we will find examples of elliptic curves over $\mathbb Q$ whose $2$-adic Galois image is conjugate to $\left\langle G_{2,a}, c_{1}' \right\rangle$, $\left\langle G_{4,a}, c_{1}' \right\rangle$, and $\left\langle G_{4,b}, c_{1}' \right\rangle$; the groups that can serve as the $2$-adic Galois image attached to an elliptic curve over $\mathbb Q$ with \textit{j}-invariant equal to $1728$ with a cyclic, $\mathbb Q$-rational subgroup of order $4$, and then classify the $2$-adic Galois image of the elliptic curves in their $\mathbb Q$-isogeny classes. In this case, the isogeny graph associated to the $\mathbb Q$-isogeny class is of $T_{4}$ type and $E$ is represented by the elliptic curve labeled $E_{2}$ (see below):
\begin{center}
\begin{tikzcd}
& E_{2} & \\
& E_{1} \longrightarrow[u, no head, "2"] \longrightarrow[ld, no head, "2"'] \longrightarrow[rd, no head, "2"] & \\
E_{3} & & E_{4}
\end{tikzcd}
\end{center}
\begin{itemize}
\item $\left\langle G_{2,a}, c_{1}' \right\rangle$
Example 9.8 in \cite{al-rCMGRs} says that for $E = E_{2} : y^{2} = x^{3}+9x$, $\textit{j}_{E} = 1728$, and $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle G_{2,a}, c_{1}' \right\rangle$. The $\mathbb Q$-isogeny class of $E$ has LMFDB label \texttt{576.c}. The isogeny-torsion graph associated to \texttt{576.c} is of type $T_{4}$ with torsion configuration $([2,2],[2],[2],[2])$. All elliptic curves in the $\mathbb Q$-isogeny class of $E$ have CM by an order of $K = \mathbb Q(i)$ with discriminant $\Delta_{K} = -4$.
\begin{enumerate}
\item The elliptic curve $E_{1}/\mathbb Q$ with LMFDB label \texttt{576.c3} is $2$-isogenous to $E = E_{2}$. Moreover, $\textit{j}_{E_{1}} = 1728$ and $E_{1}(\mathbb Q)_{\text{tors}} \cong \mathbb Z / 2 \mathbb Z \times \mathbb Z / 2 \mathbb Z$. By Lemma \ref{contains -Id}, $\rho_{E_{1},2^{\infty}}(G_{\mathbb Q})$ contains $\operatorname{-Id}$ and hence, $\rho_{E_{1},2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle G_{2,a}, c_{1} \right\rangle$.
\item The elliptic curve $E_{3} / \mathbb Q$ with LMFDB label \texttt{576.c1} is $4$-isogenous to $E = E_{2}$ and $\textit{j}_{E_{3}} = 287496$. Using code provided by Lozano-Robledo, we see that $E_{3}$ has CM by an order of $K = \mathbb Q(i)$ with discriminant $\Delta_{K} = -4$ and conductor $f = 2$. Hence, $\delta = \frac{\Delta_{K} \cdot f^{2}}{4} = -4$ and $\phi = 0$. By Theorem \ref{Theorem 1.2}, $\rho_{E_{3},2^{\infty}}(G_{\mathbb Q})$ is contained in $\mathcal{N}_{-4,0}(2^{\infty})$. Moreover, $\rho_{E_{3},2^{\infty}}(G_{\mathbb Q})$ is a group of level $4$ and $\overline{\rho}_{E_{3},4}(G_{\mathbb Q})$ is a group of order $16$ in $\operatorname{GL}(2, \mathbb Z / 4 \mathbb Z)$. The reduction of $\mathcal{N}_{-4,0}(2^{\infty})$ modulo $4$ is a group of order $16$. In other words, $\rho_{E_{3},2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{-4,0}(2^{\infty})$.
\item The elliptic curve $E_{4} / \mathbb Q$ with LMFDB label \texttt{576.c2} is $4$-isogenous to $E = E_{2}$. Moreover, $\textit{j}_{E_{4}} = 287496$. Thus, it is a quadratic twist of $E_{3}$. By Corollary \ref{contains -Id}, $\rho_{E_{4},2^{\infty}}(G_{\mathbb Q})$ contains $\operatorname{-Id}$. Thus, $\rho_{E_{4},2^{\infty}}(G_{\mathbb Q})$ is also conjugate to $\mathcal{N}_{-4,0}(2^{\infty})$ as the only quadratic twist of $\mathcal{N}_{-4,0}(2^{\infty})$ that contains $\operatorname{-Id}$ is $\mathcal{N}_{-4,0}(2^{\infty})$ itself.
\end{enumerate}
\item $\left\langle G_{4,a}, c_{1}' \right\rangle$
Example 9.8 in \cite{al-rCMGRs} says that for $E = E_{2} : y^{2} = x^{3} + x$, $\textit{j}_{E} = 1728$, and $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle G_{4,a}, c_{1}' \right\rangle$. The $\mathbb Q$-isogeny class of $E$ has LMFDB label \texttt{64.a}. The isogeny-torsion graph associated to \texttt{64.a} is of type $T_{4}$ with torsion configuration $([2,2],[2],[4],[2])$ (note that in this case, one of the elliptic curves with $\textit{j}$-invariant = $287496$ has a point of order $4$ defined over $\mathbb Q$). Note that $E(\mathbb Q)_{\text{tors}} \cong \mathbb Z / 2 \mathbb Z$. All elliptic curves in the $\mathbb Q$-isogeny class of $E$ have CM by an order of $K = \mathbb Q(i)$ with discriminant $\Delta_{K} = -4$.
\begin{enumerate}
\item The elliptic curve $E_{1}/\mathbb Q$ with LMFDB label \texttt{64.a3} is $2$-isogenous to $E = E_{2}$. Moreover, $E_{1}$ is isomorphic to the elliptic curve $y^{2} = x^{3} - 4x$, $\textit{j}_{E_{1}} = 1728$, and $E_{1}(\mathbb Q)_{\text{tors}} \cong \mathbb Z / 2 \mathbb Z \times \mathbb Z / 2 \mathbb Z$. By Example 9.8 in \cite{al-rCMGRs}, $\rho_{E_{1},2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle G_{4,a}, c_{1} \right\rangle$.
\item The elliptic curve $E_{3} / \mathbb Q$ with LMFDB label \texttt{64.a2} is $4$-isogenous to $E = E_{2}$. Moreover, $E_{3}$ is isomorphic to the elliptic curve $y^{2}=x^{3}-44x+112$, $\textit{j}_{E_{3}} = 287496$, and $E_{3}(\mathbb Q)_{\text{tors}} \cong \mathbb Z / 4 \mathbb Z$. The elliptic curve $E_{3}$ has CM by an order of $K = \mathbb Q(i)$ with $\Delta_{K} = -4$ and conductor $f = 2$. Thus, $\delta = \frac{\Delta_{K} \cdot f^{2}}{4} = -4$. By Example 9.4 in \cite{al-rCMGRs}, $\rho_{E_{3},2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix}, \begin{bmatrix} 5 & 0 \\ 0 & 5 \end{bmatrix}, \begin{bmatrix} 1 & 1 \\ -4 & 1 \end{bmatrix} \right\rangle \subseteq \mathcal{N}_{-4,0}(2^{\infty}) \subseteq \operatorname{GL}(2, \mathbb Z_{2})$.
\item The elliptic curve $E_{4} / \mathbb Q$ with LMFDB label \texttt{64.a1} is $4$-isogenous to $E = E_{2}$. Moreover, $E_{4}$ is isomorphic to the elliptic curve $y^{2}=x^{3}-44x-112$, $\textit{j}_{E_{4}} = 287496$, and $E_{4}(\mathbb Q)_{\text{tors}} \cong \mathbb Z / 2 \mathbb Z$. The elliptic curve $E_{4}$ has CM by an order of $K = \mathbb Q(i)$ with $\Delta_{K} = -4$ and conductor $f = 2$. Thus, $\delta = \frac{\Delta_{K} \cdot f^{2}}{4} = -4$. By Example 9.4 in \cite{al-rCMGRs}, $\rho_{E_{4},2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle \begin{bmatrix} -1 & 0 \\ 0 & 1 \end{bmatrix}, \begin{bmatrix} 5 & 0 \\ 0 & 5 \end{bmatrix}, \begin{bmatrix} 1 & 1 \\ -4 & 1 \end{bmatrix} \right\rangle \subseteq \mathcal{N}_{-4,0}(2^{\infty}) \subseteq \operatorname{GL}(2, \mathbb Z_{2})$.
\end{enumerate}
\item $\left\langle G_{4,b}, c_{1}' \right\rangle$
Example 9.8 in \cite{al-rCMGRs} says that for $E = E_{2} : y^{2} = x^{3} + 4x$, $\textit{j}_{E} = 1728$, and $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle G_{4,b}, c_{1}' \right\rangle$. The $\mathbb Q$-isogeny class of $E$ has LMFDB label \texttt{32.a}. The isogeny-torsion graph associated to \texttt{32.a} is of type $T_{4}$ with torsion configuration $([2,2],[4],[4],[2])$. Note that $E(\mathbb Q)_{\text{tors}} \cong \mathbb Z / 4 \mathbb Z$. All elliptic curves in the $\mathbb Q$-isogeny class of $E$ have CM by an order of $K = \mathbb Q(i)$ with discriminant $\Delta_{K} = -4$.
\begin{enumerate}
\item The elliptic curve $E_{1}/\mathbb Q$ with LMFDB label \texttt{32.a3} is $2$-isogenous to $E = E_{2}$. Moreover, $E_{1}$ is isomorphic to the elliptic curve $y^{2} = x^{3} - x$, $\textit{j}_{E_{1}} = 1728$, and $E_{1}(\mathbb Q)_{\text{tors}} \cong \mathbb Z / 2 \mathbb Z \times \mathbb Z / 2 \mathbb Z$. By Example 9.8 in \cite{al-rCMGRs}, $\rho_{E_{1},2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle G_{4,b}, c_{1} \right\rangle$.
\item The elliptic curve $E_{3} / \mathbb Q$ with LMFDB label \texttt{32.a2} is $4$-isogenous to $E = E_{2}$. Moreover, $E_{3}$ is isomorphic to the elliptic curve $y^{2}=x^{3}-11x+14$, $\textit{j}_{E_{3}} = 287496$, and $E_{3}(\mathbb Q)_{\text{tors}} \cong \mathbb Z / 4 \mathbb Z$. Moreover, $E_{3}$ has CM by an order of $K = \mathbb Q(i)$ with $\Delta_{K} = -4$ and conductor $f = 2$. Thus, $\delta = \frac{\Delta_{K} \cdot f^{2}}{4} = -4$. By Example 9.4 in \cite{al-rCMGRs}, $\rho_{E_{3},2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix}, \begin{bmatrix} 5 & 0 \\ 0 & 5 \end{bmatrix}, \begin{bmatrix} -1 & -1 \\ 4 & -1 \end{bmatrix} \right\rangle \subseteq \mathcal{N}_{-4,0}(2^{\infty}) \subseteq \operatorname{GL}(2, \mathbb Z_{2})$.
\item The elliptic curve $E_{4} / \mathbb Q$ with LMFDB label \texttt{32.a1} is $4$-isogenous to $E = E_{2}$. Moreover, $E_{4}$ is isomorphic to the elliptic curve $y^{2}=x^{3}-11x-14$, $\textit{j}_{E_{4}} = 287496$, and $E_{4}(\mathbb Q)_{\text{tors}} \cong \mathbb Z / 2 \mathbb Z$. The elliptic curve $E_{4}$ has CM by an order of $K = \mathbb Q(i)$ with $\Delta_{K} = -4$ and conductor $f = 2$. Thus, $\delta = \frac{\Delta_{K} \cdot f^{2}}{4} = -4$. By Example 9.4 in \cite{al-rCMGRs}, $\rho_{E_{4},2^{\infty}}(G_{\mathbb Q})$ is conjugate to the group $\left\langle \begin{bmatrix} -1 & 0 \\ 0 & 1 \end{bmatrix}, \begin{bmatrix} 5 & 0 \\ 0 & 5 \end{bmatrix}, \begin{bmatrix} -1 & -1 \\ 4 & -1 \end{bmatrix} \right\rangle \subseteq \mathcal{N}_{-4,0}(2^{\infty}) \subseteq \operatorname{GL}(2, \mathbb Z_{2})$.
\end{enumerate}
Now we move on to classify the $2$-adic Galois image of isogeny-torsion graphs of $L_{2}(2)$ type with CM whose elliptic curves have \textit{j}-invariant equal to $1728$. Up to conjugation, there are seven possible $2$-adic Galois images, $\mathcal{N}_{-1,0}(2^{\infty})$, $\left\langle G_{2,b}, c_{1} \right\rangle$, $\left\langle G_{2,b}, c_{1}' \right\rangle$, $\left\langle G_{4,c}, c_{1} \right\rangle$, $\left\langle G_{4,c}, c_{1}' \right\rangle$, $\left\langle G_{4,d}, c_{1} \right\rangle$, and $\left\langle G_{4,d}, c_{1}' \right\rangle$. We will prove that there are four distinct arrangements. In this case, the $\mathbb Q$-isogeny class of $E$ has two curves, both elliptic curves have \textit{j}-invariant equal to $1728$, and the isogeny graph associated to the $\mathbb Q$-isogeny class of $E$ is of $L_{2}(2)$ type (see below):
\begin{center}
\begin{tikzcd}
E_{1} \longrightarrow[r, "2", no head] & E_{2}
\end{tikzcd}
\end{center}
\begin{enumerate}
\item $\left\langle G_{2,b}, c_{-1} \right\rangle$ and $\left\langle G_{2,b}, c_{-1}' \right\rangle$
Let $E_{1} : y^{2} = x^{3} + 18x$, let $E_{2} : y^{2} = x^{3} - 72x$, let $E_{1}' : y^{2} = x^{3} - 18x$, and let $E_{2}' : y^{2} = x^{3}+72x$. Then $E_{1}$ is $2$-isogenous to $E_{2}$ and $E_{1}'$ is $2$-isogenous to $E_{2}'$. By Example 9.8 in \cite{al-rCMGRs}, $\rho_{E_{1},2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle G_{2,b}, c_{-1}' \right\rangle$ and $\rho_{E_{1}',2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle G_{2,b}, c_{-1} \right\rangle$.
We claim that $\rho_{E_{2},2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle G_{2,b}, c_{-1} \right\rangle$ and $\rho_{E_{2}',2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle G_{2,b}, c_{-1}' \right\rangle$. Note that $E_{1}$ is a quadratic twist of $E_{2}'$ (by $2$) and $E_{2}$ is a quadratic twist of $E_{1}'$ (by $2$). By Corollary \ref{contains -Id}, $\rho_{E_{2},2^{\infty}}(G_{\mathbb Q})$ and $\rho_{E_{2}',2^{\infty}}(G_{\mathbb Q})$ contain $\operatorname{-Id}$. The only quadratic twist of $\left\langle G_{2,b}, c_{-1} \right\rangle$ that contains $\operatorname{-Id}$ is $\left\langle G_{2,b}, c_{-1} \right\rangle$ itself and the only quadratic twist of $\left\langle G_{2,b}, c_{-1}' \right\rangle$ that contains $\operatorname{-Id}$ is $\left\langle G_{2,b}, c_{-1}' \right\rangle$ itself.
\item $\left\langle G_{4,c}, c_{1} \right\rangle$ and $\left\langle G_{4,c}, c_{1}' \right\rangle$
Let $E_{1} : y^{2} = x^{3} + 2x$ and let $E_{2} : y^{2} = x^{3} - 8x$. Then $E_{1}$ is $2$-isogenous to $E_{2}$. By Example 9.8 in \cite{al-rCMGRs}, $\rho_{E_{1},2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle G_{4,c}, c_{1}' \right\rangle$ and $\rho_{E_{2},2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle G_{4,c}, c_{1} \right\rangle$.
\item $\left\langle G_{4,d}, c_{1} \right\rangle$ and $\left\langle G_{4,d}, c_{1}' \right\rangle$
Let $E_{1} : y^{2} = x^{3} - 2x$ and let $E_{2} : y^{2} = x^{3} + 8x$. Then $E_{1}$ is $2$-isogenous to $E_{2}$. By Example 9.8 in \cite{al-rCMGRs}, $\rho_{E_{1},2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle G_{4,d}, c_{1} \right\rangle$ and $\rho_{E_{2},2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\left\langle G_{4,d}, c_{1}' \right\rangle$.
\item $\mathcal{N}_{-1,0}(2^{\infty})$
Let $E_{1}/\mathbb Q$ be an elliptic curve with $\textit{j}_{E_{1}} = 1728$ such that $\rho_{E_{1},2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{-1,0}(2^{\infty})$. Then the isogeny graph associated to the $\mathbb Q$-class of $E_{1}$ is of $L_{2}(2)$ type and $E_{1}$ is $2$-isogenous to an elliptic curve $E_{2}/\mathbb Q$ with $\textit{j}_{E_{2}} = 1728$. A priori, $\rho_{E_{2},2^{\infty}}(G_{\mathbb Q})$ is conjugate to one of the seven groups, $\mathcal{N}_{-1,0}(2^{\infty})$, $\left\langle G_{2,b}, c_{1} \right\rangle$, $\left\langle G_{2,b}, c_{1}' \right\rangle$, $\left\langle G_{4,c}, c_{1} \right\rangle$, $\left\langle G_{4,c}, c_{1}' \right\rangle$, $\left\langle G_{4,d}, c_{1} \right\rangle$, or $\left\langle G_{4,d}, c_{1}' \right\rangle$. We have eliminated six out of the seven possibilities. Hence, $\rho_{E_{2},2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{-1,0}(2^{\infty})$. Let $E$ be the elliptic curve $y^{2} = x^{3} + 3x$. Then by Example 9.8 in \cite{al-rCMGRs}, $\rho_{E,2^{\infty}}(G_{\mathbb Q})$ is conjugate to $\mathcal{N}_{-1,0}(2^{\infty})$.
\end{enumerate}
\end{itemize}
\end{proof}
\iffalse
\begin{tikzcd}
& \mathcal{E}_{2} & \\
& \mathcal{E}_{1} \longrightarrow[u, no head, "2"] \longrightarrow[ld, no head, "2"'] \longrightarrow[rd, no head, "2"] & \\
\mathcal{E}_{3} & & \mathcal{E}_{4}
\end{tikzcd} \begin{tikzcd}
\mathcal{E}_{1} \longrightarrow[r, no head, "2"] & \mathcal{E}_{2}
\end{tikzcd}
\begin{center} \begin{table}[h!]
\renewcommand{\longrightarrowaystretch}{1.6}
\scalebox{0.7}{
\begin{tabular}{|c|c|c|c|c|c|}
\hline
Isogeny graph & Torsion Configuration & $\rho_{\mathcal{E}_{1},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{\mathcal{E}_{2},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{\mathcal{E}_{3},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{\mathcal{E}_{4},2^{\infty}}(G_{\mathbb Q})$ \\
\hline
\multirow{3}*{\includegraphics[scale=0.09]{T4_isogeny_graph.png}} & $([2,2],[2],[2],[2])$ & $\left\langle G_{2,a}, c_{1} \right\rangle$ & $\left\langle G_{2,a}, c_{1}' \right\rangle$ & $\mathcal{N}_{-4,0}(2^{\infty})$ & $\mathcal{N}_{-4,0}(2^{\infty})$ \\
\cline{2-6}
& $([2,2],[2],[4],[2])$ & $\left\langle G_{4,a}, c_{1} \right\rangle$ & $\left\langle G_{4,a}, c_{1}' \right\rangle$ & $\left\langle \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix}, \begin{bmatrix} 5 & 0 \\ 0 & 5 \end{bmatrix}, \begin{bmatrix} 1 & 1 \\ -4 & 1 \end{bmatrix} \right\rangle$ & $\left\langle \begin{bmatrix} -1 & 0 \\ 0 & 1 \end{bmatrix}, \begin{bmatrix} 5 & 0 \\ 0 & 5 \end{bmatrix}, \begin{bmatrix} 1 & 1 \\ -4 & 1 \end{bmatrix} \right\rangle$ \\
\cline{2-6}
& $([2,2],[4],[4],[2])$ & $\left\langle G_{4,b}, c_{1} \right\rangle$ & $\left\langle G_{4,b}, c_{1}' \right\rangle$ & $\left\langle \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix}, \begin{bmatrix} 5 & 0 \\ 0 & 5 \end{bmatrix}, \begin{bmatrix} -1 & -1 \\ 4 & -1 \end{bmatrix} \right\rangle$ & $\left\langle \begin{bmatrix} -1 & 0 \\ 0 & 1 \end{bmatrix}, \begin{bmatrix} 5 & 0 \\ 0 & 5 \end{bmatrix}, \begin{bmatrix} -1 & -1 \\ 4 & -1 \end{bmatrix} \right\rangle$ \\
\hline
\end{tabular}}
\end{table} \end{center}
\begin{center} \begin{table}[h!]
\renewcommand{\longrightarrowaystretch}{1.6}
\scalebox{1}{
\begin{tabular}{|c|c|c|c|c|c|}
\hline
Isogeny graph & Torsion Configuration & $\rho_{\mathcal{E}_{1},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{\mathcal{E}_{2},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{\mathcal{E}_{3},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{\mathcal{E}_{4},2^{\infty}}(G_{\mathbb Q})$ \\
\hline
\multirow{3}*{\includegraphics[scale=0.09]{T4_isogeny_graph.png}} & $([2,2],[2],[2],[2])$ & $\left\langle G_{2,a}, c_{1} \right\rangle$ & $\left\langle G_{2,a}, c_{1}' \right\rangle$ & $\mathcal{N}_{-4,0}(2^{\infty})$ & $\mathcal{N}_{-4,0}(2^{\infty})$ \\
\cline{2-6}
& $([2,2],[2],[4],[2])$ & $\left\langle G_{4,a}, c_{1} \right\rangle$ & $\left\langle G_{4,a}, c_{1}' \right\rangle$ & $H_{1}$ & $H_{-1}$ \\
\cline{2-6}
& $([2,2],[4],[4],[2])$ & $\left\langle G_{4,b}, c_{1} \right\rangle$ & $\left\langle G_{4,b}, c_{1}' \right\rangle$ & $H_{1}'$ & $H_{-1}'$ \\
\hline
\end{tabular}}
\end{table} \end{center}
\begin{center} \begin{table}[h!]
\renewcommand{\longrightarrowaystretch}{1.6}
\scalebox{1}{
\begin{tabular}{|c|c|c|c|}
\hline
Isogeny graph & Torsion configuration & $\rho_{\mathcal{E}_{1},2^{\infty}}(G_{\mathbb Q})$ & $\rho_{\mathcal{E}_{2},2^{\infty}}(G_{\mathbb Q})$ \\
\hline
\multirow{4}*{\includegraphics[scale=0.09]{L22_graph_2.png}} & \multirow{4}*{$([2],[2])$} & $\left\langle G_{2,b}, c_{1} \right\rangle$ & $\left\langle G_{2,b}, c_{1}' \right\rangle$ \\
\cline{3-4}
& & $\left\langle G_{4,c}, c_{1} \right\rangle$ & $\left\langle G_{4,c}, c_{1}' \right\rangle$ \\
\cline{3-4}
& & $\left\langle G_{4,d}, c_{1} \right\rangle$ & $\left\langle G_{4,d}, c_{1}' \right\rangle$ \\
\cline{3-4}
& & $\mathcal{N}_{-1,0}(2^{\infty})$ & $\mathcal{N}_{-1,0}(2^{\infty})$ \\
\hline
\end{tabular}}
\end{table} \end{center}
\fi
\end{document}
|
\mathfrak{b}egin{document}
\title{A refinement of the Ramsey hierarchy via indescribability}
\author[Brent Cody]{Brent Cody}
\address[Brent Cody]{
Virginia Commonwealth University,
Department of Mathematics and Applied Mathematics,
1015 Floyd Avenue, PO Box 842014, Richmond, Virginia 23284, United States
}
\email[B. ~Cody]{[email protected]}
\urladdr{http://www.people.vcu.edu/~bmcody/}
\mathfrak{b}egin{abstract}
A subset $S$ of a cardinal $\kappa$ is Ramsey if for every function $f:[S]^{<\omega}\to \kappa$ with $f(a)<\min a$ for all $a\in[S]^{<\omega}$, there is a set $H{\mathop{\rm sub}}seteq S$ of cardinality $\kappa$ which is \emph{homogeneous} for $f$, meaning that $f\upharpoonright[H]^n$ is constant for each $n<\omega$. Baumgartner proved \cite{MR0384553} that if $\kappa$ is a Ramsey cardinal, then the collection of non-Ramsey subsets of $\kappa$ is a normal ideal on $\kappa$. Sharpe and Welch \cite{MR2817562}, and independently Bagaria \cite{MR3894041}, extended the notion of ${\mathbb P}i^1_n$-indescribability where $n<\omega$ to that of ${\mathbb P}i^1_\xi$-indescribability where $\xi\geq\omega$. We study large cardinal properties and ideals which result from Ramseyness properties in which homogeneous sets are demanded to be ${\mathbb P}i^1_\xi$-indescribable. By iterating Feng's Ramsey operator \cite{MR1077260} on the various ${\mathbb P}i^1_\xi$-indescribability ideals, we obtain new large cardinal hierarchies and corresponding nonlinear increasing hierarchies of normal ideals. We provide a complete account of the containment relationships between the resulting ideals and show that the corresponding large cardinal properties yield a strict linear refinement of Feng's original Ramsey hierarchy. We also show that, given any ordinals $\mathfrak{b}eta_0,\mathfrak{b}eta_1<\kappa$ the increasing chains of ideals obtained by iterating the Ramsey operator on the ${\mathbb P}i^1_{\mathfrak{b}eta_0}$-indescribability ideal and the ${\mathbb P}i^1_{\mathfrak{b}eta_1}$-indescribability ideal respectively, are eventually equal; moreover, we identify the least degree of Ramseyness at which this equality occurs. As an application of our results we show that one can characterize our new large cardinal notions and the corresponding ideals in terms of generic elementary embeddings; as a special case this yields generic embedding characterizations of ${\mathbb P}i^1_\xi$-indescribability and Ramseyness.
\end{abstract}
{\mathop{\rm sub}}jclass[2010]{Primary 03E55; Secondary 03E02, 03E05}
\keywords{}
\maketitle
\section{Introduction}\langlebel{section_introduction}
In his work on decidability problems, Ramsey \cite{MR1576401} proved his famous combinatorial theorem which states that if $m,n<\omega$ and $f:[\omega]^m\to n$ is a function then $f$ has an infinite \emph{homogeneous set} $H{\mathop{\rm sub}}seteq\omega$, meaning that $f\upharpoonright[H]^m$ is constant. The investigation of analogues of Ramsey's theorem for uncountable sets begun by Erd\H{o}s, Hajnal, Tarski, Rado and others (see \cite{MR0008249}, \cite{MR0065615}, \cite{MR81864} and \cite{MR95124}), quickly led to the definition of many large cardinal notions including weak compactness, Ramseyness, measurability and strong compactness (see \cite[Section 7]{MR1994835} for an account of the emergence of certain large cardinal axioms from the theory of partition relations). We say that $\kappa>\omega$ is a \emph{Ramsey cardinal} if for every function $f:[\kappa]^{<\omega}\to 2$ there is a set $H{\mathop{\rm sub}}seteq\kappa$ of size $\kappa$ which is homogeneous for $f$, meaning that $f\upharpoonright[H]^n$ is constant for all $n<\omega$.\footnote{See \cite{MR2830415} for additional motivation and an explanation of how Ramsey cardinals fit into the large cardinal hierarchy.} The study of Ramsey-like properties of uncountable cardinals has been a central concern of set theorists working on large cardinals and infinitary combinatorics, with renewed interest in recent years (see \cite{MR0540770}, \cite{MR534574}, \cite{MR1077260}, \cite{MR2817562}, \cite{MR2830415}, \cite{MR2830435}, \cite{MR3348040}, \cite{MR3800756}, \cite{MR3922802}, \cite{carmody_gitman_habic} and \cite{Holy-Lucke}). In this article, we study Ramsey-like properties of uncountable cardinals in which homogeneous sets are demanded to have degrees of indescribability: for example, a cardinal $\kappa$ is \emph{$1$-${\mathbb P}i^1_n$-Ramsey} where $n<\omega$ if and only if every function $f:[\kappa]^{<\omega}\to 2$ has a ${\mathbb P}i^1_n$-indescribable homogeneous set $H{\mathop{\rm sub}}seteq\kappa$. Among other things, we show that hypotheses of this kind and their generalizations lead to a strict refinement of Feng's \cite{MR1077260} original Ramsey hierarchy: we isolate large cardinal hypotheses which provide strictly increasing hierarchies between Feng's ${\mathbb P}i_\alpha$-Ramsey and ${\mathbb P}i_{\alpha+1}$-Ramsey cardinals for all $\alpha<\kappa$.
Baumgartner showed (see \cite{MR0384553} and \cite{MR0540770}) that in many cases large cardinal properties can be viewed as properties of subsets of cardinals and not just of the cardinals themselves. Recall that for $S{\mathop{\rm sub}}seteq\kappa$ where $\kappa$ is a cardinal, a function $f:[S]^{<\omega}\to\kappa$ is \emph{regressive} if $f(a)<\min a$ for all $a\in[S]^{<\omega}$. It is well-known (see \cite[Section 4]{MR0540770} or \cite[Lemma 2.42]{MR2710923}) that $\kappa$ is a Ramsey cardinal if and only if for every regressive function $f:[\kappa]^{<\omega}\to \kappa$ there is a set $H{\mathop{\rm sub}}seteq\kappa$ of size $\kappa$ which is homogeneous for $f$. A set $S{\mathop{\rm sub}}seteq\kappa$ is \emph{Ramsey} if every regressive function $f:[S]^{<\omega}\to \kappa$ has a homogeneous set $H{\mathop{\rm sub}}seteq S$ of size $\kappa$.\footnote{Let us point out here that several authors, including Baumgartner \cite{MR0540770} and Feng \cite{MR1077260}, use a different definition of Ramsey set which is equivalent to ours (see Proposition \ref{proposition_4_5_6} below): in \cite{MR0540770}, a set $S{\mathop{\rm sub}}seteq\kappa$ is Ramsey if for every club $C{\mathop{\rm sub}}seteq\kappa$ and every regressive function $f:[S]^{<\omega}\to\kappa$ there is a set $H{\mathop{\rm sub}}seteq S\cap C$ of size $\kappa$ which is homogeneous for $f$.} This leads naturally to the consideration of large cardinal ideals: for example, Baumgartner showed that if $\kappa$ is a Ramsey cardinal then the collection of non-Ramsey subsets of $\kappa$ is a nontrivial normal ideal on $\kappa$ called the \emph{Ramsey ideal}. Similarly, a set $S{\mathop{\rm sub}}seteq\kappa$ is \emph{${\mathbb P}i^1_n$-indescribable} if for all $A{\mathop{\rm sub}}seteq V_\kappa$ and all ${\mathbb P}i^1_n$ sentences $\varphi$, if $(V_\kappa,\in,A)\models\varphi$ then there is an $\alpha\in S$ such that $(V_\alpha,\in,A\cap V_\alpha)\models\varphi$, and the collection ${\mathbb P}i^1_n(\kappa)$ of non--${\mathbb P}i^1_n$-indescibable subsets of $\kappa$ is a normal ideal on $\kappa$ when $\kappa$ is ${\mathbb P}i^1_n$-indescribable. Baumgartner proved that a cardinal $\kappa$ is Ramsey if and only if $\kappa$ is \emph{pre-Ramsey},\footnote{Pre-Ramseyness is defined below in Section \ref{section_feng}.} $\kappa$ is ${\mathbb P}i^1_1$-indescribable and the union of the ${\mathbb P}i^1_1$-indescribability ideal on $\kappa$ and the pre-Ramsey ideal on $\kappa$ generate a nontrivial ideal\footnote{An ideal on $\kappa$ is nontrivial if it is not equal to the entire powerset of $\kappa$.} which equals the Ramsey ideal; furthermore, reference to these ideals cannot be removed from this characterization because the least cardinal which is both pre-Ramsey and ${\mathbb P}i^1_1$-indescribable is not Ramsey. Thus, Baumgartner's work shows that consideration of large cardinal ideals is, in a sense, necessary for certain results.
Generalizing the definition of Ramseyness, Feng \cite{MR1077260} defined the \emph{Ramsey operator} ${\mathscr R}$ as follows. Given an ideal $I\supseteq[\kappa]^{<\kappa}$ on $\kappa$, we define an ideal ${\mathscr R}(I)$ on $\kappa$ by letting $S\notin{\mathscr R}(I)$ if and only if for every regressive function $f:[S]^{<\omega}\to \kappa$ there is a set $H\in I^+$ homogeneous for $f$. It is easy to see that $I{\mathop{\rm sub}}seteq{\mathscr R}(I)$ and that $I{\mathop{\rm sub}}seteq J$ implies ${\mathscr R}(I){\mathop{\rm sub}}seteq {\mathscr R}(J)$ for all ideals $I,J\supseteq[\kappa]^{<\kappa}$ on $\kappa$. Feng proved that if $\kappa$ is a regular cardinal and $I\supseteq[\kappa]^{<\kappa}$ is an ideal on $\kappa$, then ${\mathscr R}(I)$ is a normal ideal on $\kappa$.\footnote{Feng used a different definition of ${\mathscr R}(I)$ which is equivalent to ours when either $I\supseteq{\mathop{\rm NS}}_\kappa$ or $I=[\kappa]^{<\kappa}$ (see Theorem \ref{theorem_ramsey_equiv} below).} Notice that $\kappa$ is a Ramsey cardinal if and only if $\kappa\notin{\mathscr R}([\kappa]^{<\kappa})$, and in this case ${\mathscr R}([\kappa]^{<\kappa})$ is the Ramsey ideal on $\kappa$. Building on Baumgartner's work \cite{MR0540770} on the ineffability hierarchy below a completely ineffable cardinal, Feng showed that one can iterate the Ramsey operator to obtain an increasing chain of ideals on $\kappa$ corresponding to a strict hierarchy of large cardinals as follows. Define $I^\kappa_{-2}=[\kappa]^{<\kappa}$ and $I^\kappa_{-1}={\mathop{\rm NS}}_\kappa$. For $n<\omega$ let $I^\kappa_n={\mathscr R}(I^\kappa_{n-2})$. Let $I^\kappa_{\alpha+1}={\mathscr R}(I^\kappa_\alpha)$. If $\alpha$ is a limit ordinal let $I^\kappa_\alpha=\mathfrak{b}igcup_{\xi<\alpha}I^\kappa_\xi$. It may at first appear strange that Feng's definition of $I^\kappa_n$ refers to ${\mathop{\rm NS}}_\kappa$ for odd $n<\omega$. We will return to this issue below in Remark \ref{remark_fengs_defintion} after introducing some notation which clarifies this issue and which will be important for the rest of the paper. In Feng's terminology,\footnote{We tend to avoid Feng's terminology because his ``${\mathbb P}i_\alpha$-Ramsey'' notation may create confusion with notation we employ for Ramsey properties defined using the ${\mathbb P}i^1_\xi$-indescribability ideals.} a cardinal $\kappa$ is \emph{${\mathbb P}i_\alpha$-Ramsey} if and only if $\kappa\notin I^\kappa_\alpha$ and $\kappa$ is \emph{completely Ramsey} if and only if $\kappa\notin I^\kappa_\alpha$ for all $\alpha$. Generalizing a result of Baumgartner, Feng proved that $I^\kappa_m\supseteq{\mathbb P}i^1_{m+1}(\kappa)$ for $1\leq m <\omega$, and as a consequence the axioms ``$\exists\kappa$($\kappa$ is ${\mathbb P}i_n$-Ramsey)'' form a strictly increasing hierarchy. Using canonical functions, which were introduced by Baumgartner \cite{MR0540770} in his study of the ineffability hierarchy, Feng proved that this hierarchy of large cardinal axioms can be extended to obtain a strictly increasing hierarchy of axioms of the form ``$\exists\kappa$($\kappa$ is ${\mathbb P}i_\alpha$-Ramsey)''. Moreover, Feng gave characterizations of the ${\mathbb P}i_n$-Ramsey cardinals for $n<\omega$ in terms of indescribability ideals, which are similar to Baumgartner's above mentioned characterization of Ramseyness in that they use generalizations of pre-Ramseyness and the reference to ideals in the characterizations cannot be removed.
We introduce some notation that differs slightly from Feng's and which simplifies the presentation of our results. For an ideal $I\supseteq[\kappa]^{<\kappa}$ we define ${\mathscr R}^\alpha(I)$ for all ordinals $\alpha$ as follows. Let ${\mathscr R}^0(I)=I$. Assuming ${\mathscr R}^\alpha(I)$ has been defined let ${\mathscr R}^{\alpha+1}(I)={\mathscr R}({\mathscr R}^\alpha(I))$. If $\alpha$ is a limit ordinal, let ${\mathscr R}^\alpha(I)=\mathfrak{b}igcup_{\xi<\alpha}{\mathscr R}^\xi(I)$. Feng's increasing chain of ideals can then be written as
\[[\kappa]^{<\kappa}{\mathop{\rm sub}}seteq{\mathop{\rm NS}}_\kappa{\mathop{\rm sub}}seteq{\mathscr R}([\kappa]^{<\kappa}){\mathop{\rm sub}}seteq{\mathscr R}({\mathop{\rm NS}}_\kappa){\mathop{\rm sub}}seteq{\mathscr R}^2([\kappa]^{<\kappa}){\mathop{\rm sub}}seteq{\mathscr R}^2({\mathop{\rm NS}}_\kappa){\mathop{\rm sub}}seteq\cdots.\tag{F}\langlebel{feng}\]
\mathfrak{b}egin{remark}\langlebel{remark_fengs_defintion}
Notice that ${\mathscr R}^\omega([\kappa]^{<\kappa})={\mathscr R}^\omega({\mathop{\rm NS}}_\kappa)$, and thus ${\mathscr R}^\alpha([\kappa]^{<\kappa})={\mathscr R}^\alpha({\mathop{\rm NS}}_\kappa)$ for $\alpha\geq\omega$.
\end{remark}
Sharpe and Welch \cite[Definition 3.21]{MR2817562} extended the notion of ${\mathbb P}i^1_n$-indescribability of a cardinal $\kappa$ where $n<\omega$ to that of ${\mathbb P}i^1_\xi$-indescribability where $\xi<\kappa^+$ by demanding that the existence of a winning strategy for a particular player in a certain finite game played at $\kappa$ implies that the same player has a winning strategy in the analogous game played at some cardinal less than $\kappa$. Later, Bagaria \cite[Definition 4.2]{MR3894041} gave an alternative definition of the ${\mathbb P}i^1_\xi$-indescribability of a cardinal $\kappa$ for $\xi<\kappa$ using the indescribability of rank-initial segments of the set-theoretic universe by certain sentences in an infinitary logic. In what follows we will use Bagaria's definition since it seems easier to work with in this context. Bagaria extended the definitions of the classes of ${\mathbb P}i^1_n$ and $\mathbb{S}igma^1_n$ formulas to define the natural classes of ${\mathbb P}i^1_\xi$ and $\mathbb{S}igma^1_\xi$ formulas for all ordinals $\xi$. For example, a formula is ${\mathbb P}i^1_\omega$ if it is of the form $\mathfrak{b}igwedge_{n<\omega}\varphi_n$ where each $\varphi_n$ is ${\mathbb P}i^1_n$ and it contains finitely-many free second order variables.\footnote{See Section \ref{section_bagaria} below or \cite{MR3894041} for details.} A set $S{\mathop{\rm sub}}seteq\kappa$ is said to be \emph{${\mathbb P}i^1_\xi$-indescribable} if for all $A{\mathop{\rm sub}}seteq V_\kappa$ and all ${\mathbb P}i^1_\xi$ sentences $\varphi$, if $(V_\kappa,\in,A)\models\varphi$ then there is some $\alpha\in S$ such that $(V_\alpha,\in,A\cap V_\alpha)\models\varphi$. Furthermore, Bagaria showed that when $\xi>0$, if $\kappa$ is ${\mathbb P}i^1_\xi$-indescribable then the collection
\[{\mathbb P}i^1_\xi(\kappa)=\{X{\mathop{\rm sub}}seteq\kappa\mid\text{$X$ is not ${\mathbb P}i^1_\xi$-indescribable}\}\]
is a nontrivial normal ideal on $\kappa$. As a matter of notational convenience we let ${\mathbb P}i^1_{-1}(\kappa)=[\kappa]^{<\kappa}$.
In this article we study ideals of the form ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ for ordinals $\alpha,\mathfrak{b}eta<\kappa$ and the corresponding hierarchy of large cardinals, which provides a strict refinement of Feng's original hierarchy.\footnote{We restrict the values of $\alpha$ and $\mathfrak{b}eta$ for which we consider ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ to be less than $\kappa$ because, as explained in Section \ref{section_bagaria}, for Bagaria's version of indescribability, if the ${\mathbb P}i^1_\mathfrak{b}eta$-indescribability ideal ${\mathbb P}i^1_\mathfrak{b}eta(\kappa)$ is nontrivial then $\mathfrak{b}eta<\kappa$, and if $\alpha\geq\kappa$ and $\mathfrak{b}eta<\kappa$ then ${\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta}(\kappa))={\mathscr R}^\alpha([\kappa]^{<\kappa})$ by Theorem \ref{theorem_culmination} and Corollary \ref{corollary_main_redundnacy}. Thus, apparently, consideration of the ideals ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ for $\alpha\geq\kappa$ and $\mathfrak{b}eta<\kappa$ is redundant given Feng's work on ${\mathscr R}^\alpha([\kappa]^{<\kappa})$.} For $\alpha,\mathfrak{b}eta<\kappa$ we say that $\kappa$ is \emph{$\alpha$-${\mathbb P}i^1_\mathfrak{b}eta$-Ramsey} if $\kappa\notin{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$. We show that, even though $\kappa$ being $\alpha$-${\mathbb P}i^1_\mathfrak{b}eta$-Ramsey may be equivalent to $\kappa$ being $\alpha$-${\mathbb P}i^1_{\mathfrak{b}eta'}$-Ramsey for some $\mathfrak{b}eta<\mathfrak{b}eta'<\kappa$ (see Theorem \ref{theorem_culmination} below), by choosing $\mathfrak{b}eta$'s appropriately, hypotheses of the form ``$\exists\kappa$ ($\kappa$ is $\alpha$-${\mathbb P}i^1_\mathfrak{b}eta$-Ramsey)'' for $\mathfrak{b}eta<\kappa$ yield a strict hierarchy of hypotheses between ``$\exists\kappa$ ($\kappa$ is ${\mathbb P}i_\alpha$-Ramsey)'' and ``$\exists\kappa$ ($\kappa$ is ${\mathbb P}i_{\alpha+1}$-Ramsey)''. In order to prove this hierarchy result, it seems that a careful analysis of the corresponding ideals is required (see Remark \ref{remark_hierarchy} and \textsc{Figure}\ \ref{figure_a_refinement_of_the_ramsey_hierarchy} below). This seems to be a natural sequel to Feng's work, given that he included ${\mathscr R}^n({\mathop{\rm NS}}_\kappa)$ in his hierarchy and when $\kappa$ is inaccessible the ${\mathbb P}i^1_0$-indescribability ideal ${\mathbb P}i^1_0(\kappa)$ equals ${\mathop{\rm NS}}_\kappa$.
As a first observation, it is not hard to see that the ideals ${\mathscr R}^n({\mathbb P}i^1_1(\kappa))$ for $n<\omega$ fit into Feng's increasing chain (\ref{feng}) as expected:
\mathfrak{b}egin{align*}
[\kappa]^{<\kappa}{\mathop{\rm sub}}seteq{\mathop{\rm NS}}_\kappa{\mathop{\rm sub}}seteq{\mathbb P}i^1_1(\kappa){\mathop{\rm sub}}seteq{\mathscr R}([\kappa]^{<\kappa})&{\mathop{\rm sub}}seteq{\mathscr R}({\mathop{\rm NS}}_\kappa){\mathop{\rm sub}}seteq{\mathscr R}({\mathbb P}i^1_1(\kappa)){\mathop{\rm sub}}seteq\\
&{\mathscr R}^2([\kappa]^{<\kappa}){\mathop{\rm sub}}seteq{\mathscr R}^2({\mathop{\rm NS}}_\kappa){\mathop{\rm sub}}seteq{\mathscr R}^2({\mathbb P}i^1_1(\kappa)){\mathop{\rm sub}}seteq\cdots.
\end{align*}
However, since the Ramseyness of a cardinal $\kappa$ can be expressed by a ${\mathbb P}i^1_2$ sentence over $V_\kappa$, it follows that the least Ramsey cardinal is not ${\mathbb P}i^1_2$-indescribable and hence it is not true in general that ${\mathbb P}i^1_2(\kappa){\mathop{\rm sub}}seteq{\mathscr R}([\kappa]^{<\kappa})$. We give a complete account of the nonlinear structure consisting of ideals ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ for $\alpha,\mathfrak{b}eta<\kappa$ under the containment relations ${\mathop{\rm sub}}seteq$ and ${\mathop{\rm sub}}setneq$.
After reviewing the relevant results of Baumgatner, Feng and Bagaria in Section \ref{section_preliminaries} and after establishing some basic properties of the ideals ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ in Section \ref{section_basic_properties}, we prove our first reflection result in Section \ref{section_a_first_reflection_result} concerning the ideals ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ for $\alpha,\mathfrak{b}eta<\kappa$. It follows from a result of Baumgartner \cite[Theorem 4.1]{MR0384553} that if $\kappa$ is a Ramsey cardinal then the set of cardinals less than $\kappa$ which are ${\mathbb P}i^1_n$-indescribable for all $n$ is in the Ramsey filter on $\kappa$. We generalize this result by proving that for all $\alpha<\kappa$, if $\kappa\notin{\mathscr R}^{\alpha+1}([\kappa]^{<\kappa})$ (i.e. $\kappa$ is ${\mathbb P}i_{\alpha+1}$-Ramsey in Feng's terminology) then the set of $\xi<\kappa$ such that $\xi\notin{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\xi))$ \emph{for all} $\mathfrak{b}eta<\xi$ is in the filter dual to ${\mathscr R}^{\alpha+1}([\kappa]^{<\kappa})$. Hence ``$\exists\kappa$($\kappa\notin{\mathscr R}^{\alpha+1}([\kappa]^{<\kappa})$)'' is strictly stronger than ``$\exists\kappa$ $\forall \mathfrak{b}eta<\kappa$ ($\kappa\notin{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$)''.
In Section \ref{section_describing_ramseyness}, we prove a technical lemma which is fundamental for the rest of the paper and which establishes an ordinal $\gamma(\alpha,\mathfrak{b}eta)$ which suffices to express the fact that a set $S{\mathop{\rm sub}}seteq\kappa$ is in ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ using a ${\mathbb P}i^1_{\gamma(\alpha,\mathfrak{b}eta)}$ sentence over $V_\kappa$. This lemma provides a generalization of a result of Sharpe and Welch \cite[Remark 3.17]{MR2817562} which states that ``$S\in{\mathscr R}^\alpha([\kappa]^{<\kappa})^+$'' is a ${\mathbb P}i^1_{2\cdot(1+\alpha)}$ property.
In Section \ref{section_indescribability_in_finite_ramseyness}, we give a full account of the nonlinear containment structure of the ideals ${\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ for $m\leq\omega$ and $\mathfrak{b}eta<\kappa$ (see \textsc{Figure}\ \ref{figure_finite_ideal_diagram} below). We derive several corollaries from this result. For example, we provide characterizations of the large cardinal property $\kappa\notin{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ which are analogous to Baumgartner's characterization of Ramseyness discussed above. As a consequence, $\kappa\notin{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ implies $\kappa$ is ${\mathbb P}i^1_{\mathfrak{b}eta+2m}$-indescribable, and moreover the ideal ${\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ equals the ideal generated by the ${\mathbb P}i^1_{\mathfrak{b}eta+2m}$-indescribability ideal and a generalization of the pre-Ramsey ideal (see Corollary \ref{corollary_indescribability_in_finite_ramseyness} below). Furthermore, we prove that ``$\exists\kappa$($\kappa\notin{\mathscr R}^m({\mathbb P}i^1_{\mathfrak{b}eta+1}(\kappa))$'' is strictly stronger than ``$\exists\kappa$($\kappa\notin{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$'' and that the large cardinal axioms associated to the ideals ${\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ fit into a linear strict hierarchy when the ideals are nontrivial. Furthermore, in analogy with the fact quoted in Remark \ref{remark_fengs_defintion} above, we show that if $\kappa\notin{\mathscr R}^\omega({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ then for all $n<\omega$,
\[{\mathscr R}^\omega({\mathbb P}i^1_\mathfrak{b}eta(\kappa))={\mathscr R}^\omega({\mathbb P}i^1_{\mathfrak{b}eta+n}(\kappa)).\]
Let us point out that the proof of this result is substantially different from the observations made in Remark \ref{remark_fengs_defintion} since the relevant ideals
\[\{{\mathscr R}^m({\mathbb P}i^1_{\mathfrak{b}eta+n}(\kappa))\mid \text{$m,n<\omega$ and $\mathfrak{b}eta<\kappa$}\}\] do not form an increasing chain. Another way of phrasing this result is that at the $\omega$-th level of the Ramsey hierarchy, the ideal chains $\langle{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))\mid\alpha<\kappa\rangle$ and $\langle{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta+n}(\kappa))\mid\alpha<\kappa\rangle$ become equal.
In Section \ref{section_indescribability_in_infinite_ramseyness}, we extend these results to the ideals ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ for $\omega<\alpha<\kappa$ and $\mathfrak{b}eta\in\{-1\}\cup\kappa$. That is, we provide a complete account of the containment relationships between ideals of the form ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$. As a culmination of these results, given $\mathfrak{b}eta_0<\mathfrak{b}eta_1$ in $\{-1\}\cup\kappa$ we isolate the precise location in the Ramsey hierarchy at which the ideal chains $\langle{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa))\mid\alpha<\kappa\rangle$ and $\langle{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))\mid\alpha<\kappa\rangle$ become equal by proving the following theorem (see \textsc{Figure}\ \ref{figure_culmination} below for an illustration of this result). In what follows, ${\mathbb P}i^1_{-1}(\kappa)=[\kappa]^{<\kappa}$ and ${\mathbb P}i^1_0(\kappa)={\mathop{\rm NS}}_\kappa$.
\mathfrak{b}egin{theorem}\langlebel{theorem_culmination}
Suppose $\mathfrak{b}eta_0<\mathfrak{b}eta_1$ are in $\{-1\}\cup\kappa$ and let $\sigma=\mathop{\rm ot}\nolimits(\mathfrak{b}eta_1\setminus\mathfrak{b}eta_0)$. Define $\alpha=\sigma\cdot\omega$. Suppose $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))^+$ so that the ideals under consideration are nontrivial. Then $\alpha$ is the least ordinal such that ${\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa))={\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))$.
\end{theorem}
\noindent Furthermore, we prove that the large cardinal hypotheses of the form ``$\exists \kappa$ $\kappa\notin{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$'' provide a strict linear refinement of Feng's original hierarchy up to ${\mathbb P}i_\kappa$-Ramseyness (see Theorem \ref{theorem_hierarchy_result_for_infinite_alpha} and \textsc{Figure}\ \ref{figure_a_refinement_of_the_ramsey_hierarchy} below).
Finally, in Section \ref{section_generic_embeddings}, as an application of our results we provide characterizations of the ideals ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ for $\alpha,\mathfrak{b}eta<\kappa$ in terms of generic elementary embeddings. As a special case, this also yields generic embedding characterizations of ${\mathbb P}i^1_\xi$-indescribability and Ramseyness.
\section{Preliminaries}\langlebel{section_preliminaries}
Here we describe some notation that will be used throughout the paper and some results from the literature. We cover some results of Baumgartner (from \cite{MR0384553} and \cite{MR0540770}) and Feng \cite{MR1077260} which serve as motivation for our results. Then we give a brief account of Bagaria's extension \cite{MR3894041} of ${\mathbb P}i^1_n$-indescribability to ${\mathbb P}i^1_\xi$-indescribability where $\xi$ can be any ordinal.
{\mathop{\rm sub}}section{Definitions and Notation}
Given an ideal $I$ on a cardinal $\kappa$ we let
\[I^+=\{X{\mathop{\rm sub}}seteq\kappa\mid X\notin I\}\]
be the corresponding collection of positive sets and we let
\[I^*=\{X{\mathop{\rm sub}}seteq\kappa\mid \kappa\setminus S\in I\}\]
be the filter dual to $I$. For notational convenience, and in order to avoid double negations, in what follows we will often write $X\in I^+$ instead of $X\notin I$. If $\mathcal{A}{\mathop{\rm sub}}seteq P(\kappa)$ is a collection of subsets of $\kappa$ then we write $\overline{\mathcal{A}}$ to denote the ideal on $\kappa$ generated by $\mathcal{A}$:
\[\overline{\mathcal{A}}=\{X{\mathop{\rm sub}}seteq\kappa\mid (\exists \mathcal{B}\in[\mathcal{A}]^{<\omega}) X{\mathop{\rm sub}}seteq \mathfrak{b}igcup\mathcal{B}\}.\]
An ideal $I$ on $\kappa$ is called \emph{nontrivial} if $I\neq P(\kappa)$
We will be concerned with showing that certain large cardinal ideals are obtained by taking the ideal generated by a union of some other large cardinal ideals. We will make repeated use of the following simple remark, which was used implicitly by Baumgartner (see the proof of Theorem 4.4 in \cite{MR0540770}).
\mathfrak{b}egin{remark}\langlebel{remark_ideal_generated}
Suppose $I_0$, $I_1$ and $J$ are ideals on $\kappa$. In order to prove that $J=\overline{I_0\cup I_1}$, part of what we must show is that $J\supseteq\overline{I_0\cup I_1}$, or in other words $J^+{\mathop{\rm sub}}seteq\overline{I_0\cup I_1}^+$. Notice that we may obtain a chain of equivalences directly from the definitions involved:
\mathfrak{b}egin{align*}
J^+{\mathop{\rm sub}}seteq\overline{I_0\cup I_1}^+ &\iff \overline{I_0\cup I_1}{\mathop{\rm sub}}seteq J \\
&\iff I_0\cup I_1{\mathop{\rm sub}}seteq J\\
&\iff J^+{\mathop{\rm sub}}seteq I_0^+\cap I_1^+.
\end{align*}
In what follows, in order to prove that the property $J^+{\mathop{\rm sub}}seteq\overline{I_0\cup I_1}^+$ (or equivalently the property $J\supseteq\overline{I_0\cup I_1}$) holds for various ideals, we will prove $J^+{\mathop{\rm sub}}seteq I_0^+\cap I_1^+$ and include a reference to this remark.
\end{remark}
{\mathop{\rm sub}}section{Baumgartner's ineffability hierarchy}\langlebel{section_baumgartners_ineffability_hierarchy}
Let us review a few results due to Baumgartner using slightly different notation than \cite{MR0384553} and \cite{MR0540770}. Suppose $\kappa>\omega$ is a cardinal and $S{\mathop{\rm sub}}seteq\kappa$. We say that $\vec{S}=\langleS_\alpha\mid\alpha\in S\rangle$ is a \emph{$(1,S)$-sequence}\footnote{Such sequences are sometimes called \emph{$S$-lists} (see \cite{MR2959668} or \cite{MR3913154}). However, we prefer Baumgartner's terminology because we will need to distinguish $(1,S)$-sequences from Feng's $(\omega,S)$-sequences (see Section \ref{section_feng}).} if for each $\alpha\in S$ we have $S_\alpha{\mathop{\rm sub}}seteq\alpha$. Given a $(1,S)$-sequence $\vec{S}=\langleS_\alpha\mid\alpha\in S\rangle$, a set $H{\mathop{\rm sub}}seteq S$ is \emph{homogeneous} for $\vec{S}$ if for all $\alpha,\mathfrak{b}eta\in H$ with $\alpha<\mathfrak{b}eta$ we have $S_\alpha=S_\mathfrak{b}eta\cap\alpha$.
Given an ideal $I\supseteq[\kappa]^{<\kappa}$ on $\kappa$ we define another ideal ${\mathscr I}(I)$ by letting $S\in{\mathscr I}(I)^+$ if and only if for every $(1,S)$-sequence $\vec{S}$ there is a set $H{\mathop{\rm sub}}seteq S$ in $I^+$ such that $H$ is homogeneous for $\vec{S}$. A set $S{\mathop{\rm sub}}seteq\kappa$ is \emph{ineffable} if $S\in{\mathscr I}({\mathop{\rm NS}}_\kappa)^+$. Baugartner showed that when $\kappa$ is an ineffable cardinal the collection ${\mathscr I}({\mathop{\rm NS}}_\kappa)$ of non-ineffable subsets of $\kappa$ is a normal ideal on $\kappa$, which we call the \emph{ineffability ideal} on $\kappa$. Notice that ${\mathscr I}$ can be viewed as a function mapping ideals to ideals, which we call the \emph{ineffability operator}.
Baumgartner \cite[Theorem 5.3]{MR0384553} gave several characterizations of ineffability in terms of partition properties. Given a set $S{\mathop{\rm sub}}seteq\kappa$, a function $f:[S]^2\to\kappa$ is said to be \emph{regressive} if $f(a)<\min a$ for all $a\in[S]^2$. A set $H{\mathop{\rm sub}}seteq S$ is \emph{homogeneous} for a function $f:[S]^2\to\kappa$ if $f\upharpoonright [H]^2$ is constant.
\mathfrak{b}egin{theorem}[Baumgartner]\langlebel{theorem_baumgartner_ineff_char}
Let $\kappa$ be a cardinal and $S{\mathop{\rm sub}}seteq\kappa$. The following are equivalent.
\mathfrak{b}egin{enumerate}
\item $S$ is ineffable.
\item For every regressive function $f:[S]^2\to\kappa$ there is a set $H{\mathop{\rm sub}}seteq S$ stationary in $\kappa$ which is homogeneous for $f$.
\item $\kappa$ is regular and for every function $f:[S]^2\to 2$ there is a set $H{\mathop{\rm sub}}seteq S$ stationary in $\kappa$ which is homogeneous for $f$.
\end{enumerate}
\end{theorem}
Now suppose that $\vec{I}=\langleI_\alpha\mid\text{$\alpha\leq\kappa$ is an uncountable cardinal}\rangle$ is a sequence such that each $I_\alpha\supseteq[\alpha]^{<\alpha}$ is an ideal on $\alpha$ and $I_\alpha=P(\alpha)$ when $\alpha$ is a singular cardinal. We define an ideal ${\mathscr I}_0(\vec{I})$ on $\kappa$ by letting $S\in{\mathscr I}_0(\vec{I})^+$ if and only if for every $(1,S)$-sequence $\vec{S}$ and every club $C{\mathop{\rm sub}}seteq\kappa$ there is an $\alpha\in S\cap C$ for which there is a set $H{\mathop{\rm sub}}seteq S\cap C\cap\alpha$ in $I_\alpha^+$ homogeneous for $\vec{S}$. When no confusion will arise, as in the case where the nontrivial ideals $I_\alpha$ have a uniform definition, we write ${\mathscr I}_0(I_\kappa)$ instead of ${\mathscr I}_0(\vec{I})$.
For example, let $\vec{J}=\langleJ_\alpha\mid\text{$\alpha\leq\kappa$ is a cardinal}\rangle$ be defined by letting $J_\alpha={\mathop{\rm NS}}_\alpha$ when $\alpha$ is regular and $J_\alpha=P(\alpha)$ when $\alpha$ is singular. A set $S{\mathop{\rm sub}}seteq\kappa$ is \emph{subtle} if $S\in{\mathscr I}_0({\mathop{\rm NS}}_\kappa)^+={\mathscr I}_0(\vec{J})^+$.\footnote{Baumgartner showed that $S\in{\mathscr I}_0({\mathop{\rm NS}}_\kappa)^+$ is equivalent to the more often used definition of subtlety of a set $S$ given in Theorem \ref{theorem_subtle} (4). We use the stated definition of subtlety of $S$ for ease of presentation.} Futhermore, Baumgartner proved that if $\kappa$ is a subtle cardinal then ${\mathscr I}_0({\mathop{\rm NS}}_\kappa)$ is a normal ideal on $\kappa$, which we call the \emph{subtle ideal} on $\kappa$. We refer to ${\mathscr I}_0$ as the \emph{subtle operator}. Recall that every ineffable set is subtle, the least subtle cardinal is not ${\mathbb P}i^1_1$-indescribable and, as shown by Baumgartner \cite[Theorem 4.1]{MR0384553}, the existence of a subtle cardinal is strictly stronger than the existence of a cardinal which is ${\mathbb P}i^1_n$-indescribable for all $n<\omega$.
As another example, let $\vec{J}=\langleJ_\alpha\mid\text{$\alpha\leq\kappa$ is a cardinal}\rangle$ be a sequence of ideals defined by letting $J_\alpha={\mathbb P}i^1_1(\alpha)$ when $\alpha$ is ${\mathbb P}i^1_1$-indescribable and $J_\alpha=P(\alpha)$ otherwise. Then ${\mathscr I}_0({\mathbb P}i^1_1(\kappa))={\mathscr I}_0(\vec{J})$ and a set $S$ is in ${\mathscr I}_0({\mathbb P}i^1_1(\kappa))^+$ if and only if for every $(1,S)$-sequence $\vec{S}$ and every club $C{\mathop{\rm sub}}seteq\kappa$ there is an $\alpha\in S\cap C$ for which there is a set $H{\mathop{\rm sub}}seteq S\cap C\cap \alpha$ in ${\mathbb P}i^1_1(\alpha)^+$which is homogeneous for $\vec{S}$.\footnote{Note that the set $H$ being in ${\mathbb P}i^1_1(\alpha)^+$ implies $\alpha$ is ${\mathbb P}i^1_1$-indescribable.}
Baumgartner showed \cite[Theorem 5.1]{MR0384553} that subtlety can be characterized using partition properties.
\mathfrak{b}egin{theorem}[Baumgartner]\langlebel{theorem_subtle}
Let $\kappa$ be a cardinal and $S{\mathop{\rm sub}}seteq\kappa$. The following are equivalent.
\mathfrak{b}egin{enumerate}
\item $S$ is subtle, that is, $S\in{\mathscr I}_0({\mathop{\rm NS}}_\kappa)^+$.
\item For every regressive function $f:[S]^2\to\kappa$ and every club $C{\mathop{\rm sub}}seteq\kappa$ there is a regular cardinal $\alpha\leq\kappa$ and a set $H{\mathop{\rm sub}}seteq S\cap C\cap\alpha$ stationary in $\alpha$ which is homogeneous for $f$.
\item For every function $f:[S]^2\to 2$ and every club $C{\mathop{\rm sub}}seteq\kappa$ there is a regular cardinal $\alpha\leq\kappa$ and a set $H{\mathop{\rm sub}}seteq S\cap C\cap \alpha$ stationary in $\alpha$ which is homogeneous for $f$.
\item For every $(1,S)$-sequence $\vec{S}$ and every club $C{\mathop{\rm sub}}seteq\kappa$ there is a set $\{\alpha_0,\alpha_1\}\in[S\cap C]^2$ which is homogeneous for $\vec{S}$.
\end{enumerate}
\end{theorem}
The following theorem, perhaps one of the most noteworthy of \cite{MR0384553}, shows that in order to have a full understanding of certain large cardinals, one \emph{must} consider large cardinal ideals. Taking $n=0$ in the following theorem, one can easily see that a cardinal $\kappa$ is ineffable if and only if it is subtle, ${\mathbb P}i^1_2$-indescribable and additionally the subtle ideal and the ${\mathbb P}i^1_2$-indescribable ideal generate a nontrivial ideal which equals the ineffability ideal; moreover, reference to these ideals cannot be removed from this characterization.
\mathfrak{b}egin{theorem}[Baumgartner]\langlebel{theorem_baumgarnter_ineffability}
Suppose $\kappa$ is a cardinal and $n<\omega$. Then $\kappa\in{\mathscr I}({\mathbb P}i^1_n(\kappa))^+$ if and only if both of the following hold.
\mathfrak{b}egin{enumerate}
\item $\kappa\in{\mathscr I}_0({\mathbb P}i^1_n(\kappa))^+$ and $\kappa\in {\mathbb P}i^1_{n+2}(\kappa)^+$.
\item The ideal generated by ${\mathscr I}_0({\mathbb P}i^1_n(\kappa))\cup{\mathbb P}i^1_{n+2}(\kappa)$ is nontrivial and equals ${\mathscr I}({\mathbb P}i^1_n(\kappa))$.
\end{enumerate}
Moreover, reference to the ideals in the above characterization cannot be removed because the least cardinal $\kappa$ such that $\kappa\in{\mathscr I}_0({\mathbb P}i^1_n(\kappa))^+$ and $\kappa\in{\mathbb P}i^1_{n+2}(\kappa)^+$ is not in ${\mathscr I}({\mathbb P}i^1_n(\kappa))^+$.
\end{theorem}
In his second article \cite{MR0540770} on ineffability properties, Baumgartner iterated the ineffability operator ${\mathscr I}$ and defined an increasing chain of ideals as follows. Define ${\mathscr I}^0({\mathop{\rm NS}}_\kappa)={\mathop{\rm NS}}_\kappa$ and ${\mathscr I}^{\alpha+1}({\mathop{\rm NS}}_\kappa)={\mathscr I}({\mathscr I}^\alpha({\mathop{\rm NS}}_\kappa))$. If $\alpha$ is a limit ordinal let $I^\alpha({\mathop{\rm NS}}_\kappa)=\mathfrak{b}igcup_{\xi<\alpha}{\mathscr I}^\xi({\mathop{\rm NS}}_\kappa)$. Since the ideals ${\mathscr I}^\alpha({\mathop{\rm NS}}_\kappa)$ form an increasing chain and there are only $2^\kappa$ subsets of $\kappa$, there must be an $\alpha<(2^\kappa)^+$ such that ${\mathscr I}^\alpha({\mathop{\rm NS}}_\kappa)={\mathscr I}^{\alpha+1}({\mathop{\rm NS}}_\kappa)$. A cardinal $\kappa$ is \emph{completely ineffable} if when $\alpha$ is the least ordinal such that ${\mathscr I}^\alpha({\mathop{\rm NS}}_\kappa)={\mathscr I}^{\alpha+1}({\mathop{\rm NS}}_\kappa)$ the ideal ${\mathscr I}^\alpha({\mathop{\rm NS}}_\kappa)$ is nontrivial. Baumgartner introduced \emph{canonical function} in order to prove \cite[Theorem 3.7]{MR0540770} that if $\mathfrak{b}eta<\kappa^+$ and $\kappa\in{\mathscr I}^\mathfrak{b}eta({\mathop{\rm NS}}_\kappa)^+$ (i.e. ${\mathscr I}^\mathfrak{b}eta({\mathop{\rm NS}}_\kappa)$ is a nontrivial ideal) then for all $\alpha<\mathfrak{b}eta$ the containment ${\mathscr I}^\alpha({\mathop{\rm NS}}_\kappa){\mathop{\rm sub}}setneq{\mathscr I}^{\alpha+1}({\mathop{\rm NS}}_\kappa)$ is proper.
\mathfrak{b}egin{remark}
Although Baumgartner briefly mentions the ideals ${\mathscr I}^m({\mathbb P}i^1_n(\kappa))$ in \cite{MR0540770} (see the discussion following Corollary 3.5), they, as well as ideals of the form ${\mathscr I}^\alpha({\mathbb P}i^1_n(\kappa))$ for ordinals $\alpha>1$, seem to be otherwise absent from both \cite{MR0384553} and \cite{MR0540770}.
\end{remark}
{\mathop{\rm sub}}section{Baumgartner's result on the Ramsey ideal and Feng'€™s Ramsey hierarchy}\langlebel{section_feng}
Recall from Section \ref{section_introduction}, that Feng \cite{MR1077260} defined the Ramsey operator ${\mathscr R}$, which is analogous to the ineffability operator ${\mathscr I}$, and iterated ${\mathscr R}$ in order to define completely Ramsey cardinals.
As noted by Feng \cite[Definition 2.1]{MR1077260}, it is easy to see that if $I{\mathop{\rm sub}}seteq J$ are ideals on $\kappa$ then ${\mathscr R}(I){\mathop{\rm sub}}seteq{\mathscr R}(J)$. Although it will not be used in what follows, let us show that under reasonable assumptions one obtains proper containment ${\mathscr R}(I){\mathop{\rm sub}}setneq{\mathscr R}(J)$.
\mathfrak{b}egin{proposition}
If $[\kappa]^{<\kappa}{\mathop{\rm sub}}seteq I{\mathop{\rm sub}}setneq J$ are nontrivial ideals on $\kappa$ such that ${\mathscr R}(I)^+\cap J\neq\varnothing$ and for all $X\in I^+$ there exist $X_0,X_1\in I^+$ such that $X=X_0\sqcup X_1$, then ${\mathscr R}(I){\mathop{\rm sub}}setneq{\mathscr R}(J)$.
\end{proposition}
\mathfrak{b}egin{proof}
Suppose $I{\mathop{\rm sub}}setneq J$ are as in the statement of the lemma. Let us show that ${\mathscr R}(I)\neq{\mathscr R}(J)$. Choose $X\in {\mathscr R}(I)^+\cap J$. Since $X\in I^+$, there exist $X_0,X_1\in I^+$ such that $X=X_0\sqcup X_1$. Notice that $X_0,X_1\in J$ since $X_0,X_1{\mathop{\rm sub}}seteq X\in J$. Define a function $f:[X]^{<\omega}\to 2$ by
\[
f(\vec{\alpha})=
\mathfrak{b}egin{cases}
1 & \textrm{if $\vec{\alpha}\in X_0^{<\omega}\cup X_1^{<\omega}$}\\
0 & \textrm{otherwise}
\end{cases}
\]
Since $X\in{\mathscr R}(I)^+$ there is a homogeneous set for $f$ in $I^+$. However, it is straightforward to show that if $H{\mathop{\rm sub}}seteq X$ is homogeneous for $f$, then $H$ is either a subset of $X_0$ or a subset of $X_1$, and is therefore in $J$. Thus $X\in {\mathscr R}(J)$.
\end{proof}
Let us define another operator ${\mathscr R}_0$ which is analogous to ${\mathscr I}_0$. Suppose that $\vec{I}=\langleI_\alpha\mid\text{$\alpha\leq\kappa$ is a cardinal}\rangle$ is a sequence such that each $I_\alpha\supseteq[\alpha]^{<\alpha}$ is an ideal on $\alpha$. Recall that for a set $S{\mathop{\rm sub}}seteq\kappa$ a function $f:[S]^{<\omega}\to\kappa$ is \emph{regressive} if $f(a)<\min(a)$ for all $a\in[S]^{<\omega}$. We define an ideal ${\mathscr R}_0(\vec{I})$ on $\kappa$ by letting $S\in{\mathscr R}_0(\vec{I})^+$ if and only if for every regressive function $f:[S]^{<\omega}\to\kappa$ and every club $C{\mathop{\rm sub}}seteq\kappa$ there is an $\alpha\in S\cap C$ for which there is a set $H{\mathop{\rm sub}}seteq S\cap C\cap\alpha$ in $I_\alpha^+$ which is homogeneous for $f$, meaning that $f\upharpoonright[H]^n$ is constant for each $n<\omega$. As before (see the discussion after Theorem \ref{theorem_baumgartner_ineff_char}), many of the ideals $I_\alpha$ will be understood to be trivial, and when no confusion will arise, as in the case where the ideals $I_\alpha$ have a uniform definition, we write ${\mathscr R}_0(I_\kappa)$ instead of ${\mathscr R}_0(\vec{I})$. Baumgartner defined a set $S{\mathop{\rm sub}}seteq\kappa$ to be \emph{pre-Ramsey} if and only if $S\in{\mathscr R}_0([\kappa]^{<\kappa})^+$. Thus, pre-Ramseyness is to Ramseyness as subtlety is to ineffability.
Feng \cite[Theorem 2.3]{MR1077260} gave a characterization of Ramseyness which resembles the definition of ineffability. Suppose $S{\mathop{\rm sub}}seteq\kappa$. For each $n<\omega$ and for all increasing sequences $\alpha_1<\cdots<\alpha_n$ taken from $S$ suppose that $S_{\alpha_1,\ldots,\alpha_n}{\mathop{\rm sub}}seteq\alpha_1$. Then we say that
\[\vec{S}=\langleS_{\alpha_1,\ldots,\alpha_n}\mid n<\omega\langlend (\alpha_1,\ldots,\alpha_n)\in[S]^n\rangle\]
is an $(\omega,S)$-sequence. A set $H{\mathop{\rm sub}}seteq S$ is said to be \emph{homogeneous} for an $(\omega,S)$-sequences $\vec{S}$ if for all $0<n<\omega$ and for all increasing sequences $\alpha_1<\cdots<\alpha_n$ and $\mathfrak{b}eta_1<\cdots<\mathfrak{b}eta_n$ taken from $H$ with $\alpha_1\leq\mathfrak{b}eta_1$ we have $S_{\alpha_1\cdots\alpha_n}=S_{\mathfrak{b}eta_1\cdots\mathfrak{b}eta_n}\cap\alpha_1$.
The following theorem is essentially due to Feng (see \cite[Theorem 2.2 and Theorem 2.3]{MR1077260}), with the exception of clause (4) which appears in \cite[Theorem 3.2]{MR2817562}.
\mathfrak{b}egin{theorem}[Feng]\langlebel{theorem_omega_S}
Let $\kappa$ be a regular cardinal and suppose $I$ is an ideal on $\kappa$ such that $I\supseteq {\mathop{\rm NS}}_\kappa$. For $S{\mathop{\rm sub}}seteq \kappa$ the following are equivalent.
\mathfrak{b}egin{enumerate}
\item Every function $f:[S]^{<\omega}\to 2$ has a homogeneous set $H\in P(S)\cap I^+$.
\item For all $\gamma<\kappa$, every function $f:[S]^{<\omega}\to\gamma$ has a homogeneous set $H\in P(S)\cap I^+$.
\item Every structure $\mathcal{A}$ in a language of size less than $\kappa$ with $\kappa{\mathop{\rm sub}}seteq\mathcal{A}$ has a set of indiscernibles $H\in P(S)\cap I^+$.
\item $S\in{\mathscr R}(I)^+$, that is, for every regressive function $f:[S]^{<\omega}\to\kappa$ there is a set $H\in P(S)\cap I^+$ which is homogeneous for $f$.
\item For every club $C{\mathop{\rm sub}}seteq\kappa$, every regressive function $f:[S]^{<\omega}\to\kappa$ has a homogeneous set $H\in P(S\cap C)\cap I^+$.
\item For all $(\omega,S)$-sequences $\vec{S}$ there is a set $H\in P(S)\cap I^+$ which is homogeneous for $\vec{S}$.
\end{enumerate}
\end{theorem}
\mathfrak{b}egin{proposition}\langlebel{proposition_4_5_6}
Suppose $I\supseteq[\kappa]^{<\kappa}$ is an ideal on a regular cardinal $\kappa$. Then clauses (4), (5) and (6) of Theorem \ref{theorem_omega_S} are equivalent.
\end{proposition}
\mathfrak{b}egin{proof}
The equivalence of (5) and (6) is due to Feng \cite[Theorem 2.3]{MR1077260}. It is easy to see that (5) implies (4). Let us show that (4) implies (5).\footnote{The author would like to thank the anonymous referee for this argument as well as Victoria Gitman for an earlier version.} Suppose (4) holds and fix a regressive function $f:[S]^{<\omega}\to\kappa$ and a club $C{\mathop{\rm sub}}seteq\kappa$. First, let us argue that $|S\cap C|=\kappa$. Suppose $|S\cap C|<\kappa$ and let $\alpha=\sup(S\cap C)$. Define $g:S\setminus(\alpha+1)\to\kappa$ by letting $g(\xi)$ be the greatest element of $C$ which is less than $\xi$. Notice that $g$ is regressive. Now let $G:[S]^{<\omega}\to\kappa$ be any regressive function with $G(\{\xi\})=g(\xi)$ for $\xi\in S\setminus(\alpha+1)$. By (4), there is a homogeneous set $H\in P(S)\cap I^+$ for $G$, but then since $G\upharpoonright[H]^1$ is constant, it follows that $g$ is constant on the unbounded set $H$, but this contradicts the fact that $C$ is unbounded. Thus $|S\cap C|=\kappa$. To prove (5) we must show that there is a homogeneous set $H\in P(S\cap C)\cap I^+$ for $f$. Let us define another function $h:[S\cap C]^{<\omega}\to \kappa$ by letting
\[h(\{\xi\})=\mathfrak{b}egin{cases}
f(\{\xi\}) & \text{if $x\in S\cap C$}\\
\sup(C\cap\xi) & \text{if $x\in S\setminus C$}
\end{cases}
\]
and for $n>1$ let $h\upharpoonright[S\cap C]^n=f\upharpoonright[S\cap C]^n$. Then $h$ is regressive, and using (4), there is a homogeneous set $H\in P(S)\cap I^+$ for $h$. Using an argument similar to the above argument for $|S\cap C|=\kappa$, we see that $|H\cap (S\setminus C)|<\kappa$ and hence $H\cap C\in I^+$. Since $h\upharpoonright[S\cap C]^{<\omega}=f\upharpoonright[S\cap C]^{<\omega}$, it follows that $H\cap C$ is homogeneous for $f$, yielding (5).
\end{proof}
We will need the next easy consequence of Proposition \ref{proposition_4_5_6}.
\mathfrak{b}egin{corollary}\langlebel{corollary_one_S}
Suppose $\kappa$ is a cardinal, $I\supseteq{\mathop{\rm NS}}_\kappa$ is an ideal on $\kappa$ and $S\in{\mathscr R}(I)^+$. Then every $(1,S)$-sequence $\vec{S}=\langleS_\alpha\mid\alpha\in S\rangle$ has a homogeneous set $H{\mathop{\rm sub}}seteq S$ in $I^+$.
\end{corollary}
\mathfrak{b}egin{proof}
Let $\vec{S}_*$ be any $(\omega,S)$-sequence extending $\vec{S}$. Since $S\in{\mathscr R}(I)^+$ there is a set $H{\mathop{\rm sub}}seteq S$ in $I^+$ which is homogeneous for $\vec{S}_*$. Clearly $H$ is also homogeneous for $\vec{S}$.
\end{proof}
In order to prove a certain reflection result (see Theorem \ref{theorem_ramsey_reflection} below), we will use a characterization of Ramsey sets which is given in terms of elementary embeddings; the analogue of this characterization for Ramsey cardinals is essentially due to Michell \cite{MR534574} and was further explored by Gitman \cite{MR2830415}. Recall that a transitive set $M\models {\rm ZFC}^-$ of size $\kappa$ with $\kappa\in M$ is called a \emph{weak $\kappa$-model}. If $M$ is a weak $\kappa$-model we say that an $M$-ultrafilter $U$ on $\kappa$ is \emph{weakly amenable} if for every sequence $\langleX_\alpha\mid\alpha<\kappa\rangle$ in $M$ of subsets of $\kappa$, the set $\{\alpha<\kappa\mid X_\alpha\in U\}$ is an element of $M$. An $M$-ultrafilter $U$ on $\kappa$ is \emph{countably complete} if whenever $\langleA_n\mid n<\omega\rangle$ is a sequence of elements of $U$, possibly external to $M$, it follows that $\mathfrak{b}igcap_{n<\omega}A_n\neq\varnothing$. Taking $S=\kappa$ in the following theorem one obtains Mitchell's characterization of Ramsey cardinals \cite[Proposition 2.8(3)]{MR2830415}.
\mathfrak{b}egin{theorem}[Mitchell]\langlebel{theorem_ramsey_equiv}
Suppose $\kappa$ is a regular cardinal and $S{\mathop{\rm sub}}seteq\kappa$. Then $S\in{\mathscr R}([\kappa]^{<\kappa})^+$ (i.e. $S$ is a Ramsey set) if and only if for every $A{\mathop{\rm sub}}seteq\kappa$ there is a weak $\kappa$-model $M$ with $A,S\in M$ for which there exists a weakly amenable countably complete $M$-ultrafilter $U$ on $\kappa$ with $S\in U$.
\end{theorem}
\mathfrak{b}egin{proof} Since the proof of this theorem is a straightforward modification of arguments appearing in \cite{MR2710923} and \cite{MR2830415}, we provide a brief sketch together with citations to more detailed arguments.\footnote{The author would like to thank Victoria Gitman for a helpful conversation regarding this argument.}
Suppose $S$ is Ramsey. Fix $A{\mathop{\rm sub}}seteq\kappa$. We follow the argument in \cite[Section 4]{MR2830415}, which is also given in more detail in \cite[Chapter 2]{MR2710923}. First we argue that there is a set $H\in [\kappa]^\kappa$ of good indiscernibles for $(L_\kappa[A,S],A,S)$ (recall that a set of indiscernibles $H{\mathop{\rm sub}}seteq\kappa$ for $(L_\kappa[A,S],A,S)$ is \emph{good} if for all $\gamma\in I$, $\gamma$ is a cardinal, $(L_\gamma[A,S],A,S)\prec(L_\kappa[A,S],A,S)$ and $I\setminus\gamma$ is a set of indiscernibles for $(L_\kappa[A,S],A,S,\xi)_{\xi\in \gamma}$). Using the argument for \cite[Lemma 2.43]{MR2710923}, it follows that there is a club $C{\mathop{\rm sub}}seteq\kappa$ and a regressive function $h:[C]^{<\omega}\to\kappa$ such that any homogeneous set for $h$ is a set of good indiscernibles for $(L_\kappa[A,S],A,S)$. Since $S$ is Ramsey, it follows that $S\cap C$ is Ramsey, and thus the regressive function $h:[S\cap C]^{<\omega}\to \kappa$ has a homogeneous set $H\in P(S\cap C)\cap[\kappa]^\kappa$. Using the fact that $H$ is a good set of indiscernibles for $(L_\kappa[A,S],A,S)$, and by following the argument for \cite[Theorem 2.35, pp. 104--114]{MR2710923}, one can construct a weak $\kappa$-model $M$ with $A,S\in M$ for which there is a weakly amenable countably complete $M$-ultrafilter $U$ on $\kappa$ such that a set $X\in P(\kappa)^M$ is in $U$ if and only if there exists $\alpha<\kappa$ with $H\setminus \alpha{\mathop{\rm sub}}seteq X$ (see \cite[Lemma 2.44.12]{MR2710923} for this characterization of $U$). Since $H{\mathop{\rm sub}}seteq S\cap C$, it follows that $S\in U$.
Conversely, suppose that for every $A{\mathop{\rm sub}}seteq\kappa$ there is a weak $\kappa$-model $M$ with $A,S\in M$ for which there is a weakly amenable countably complete $M$-ultrafilter $U$ on $\kappa$ with $S\in U$. To see that $S$ is Ramsey, fix a regressive function $f:[S]^{<\omega}\to\kappa$. We follow the proof of \cite[Theorem 3.10]{MR2830415}. Let $M$ be a weak $\kappa$-model with $f\in M$ and let $U$ be a weakly amenable countably complete $M$-ultrafilter with $S\in U$. Using the weak amenability of $U$ we can define the product ultrafilters $U^n$ on $P(\kappa^n)^M$ for all $n<\omega$ as follows. Let $U^0=U$. Given $U^n$ let $U^{n+1}$ be the ultrafilter on $\kappa^n\times\kappa$ defined by $X\in U^{n+1}$ if and only if $X\in P(\kappa^n\times\kappa)^M$ and $\{\vec{\alpha}\in \kappa^n\mid \{\xi<\kappa\mid \vec{\alpha}\mathbin{{}^\smallfrown} \xi\in X\}\in U\}\in U^n$. Since $S\in U$, it follows by induction that $S^n\in U^n$ for all $n<\omega$. For each $n<\omega$, let $j_{U^n}:M\to N_n$ be the ultrapower of $M$ by $U^n$ and let $f_n=f\upharpoonright[S]^n$. Recall that for each $n<\omega$, the critical point of $j_{U^n}$ is $\kappa$ and $X\in U^n$ if and only if $(\kappa,j_U(\kappa),\ldots,j_{U^{n-1}}(\kappa))\in j_{U^n}(X)$ (see \cite[Lemma 2.33]{MR2710923}). Fix $n<\omega$. By elementarity $j_{U^n}(f_n)$ is regressive and hence $j_{U^n}(f_n)(\kappa,j_U(\kappa),\ldots,j_{U^{n-1}}(\kappa))=\eta<\kappa$. Thus $H_n'=\{\vec{\alpha}\in S^n\mid f_n(\vec{\alpha})=\eta\}\in U^n$. By \cite[Proposition 2.5]{MR2830415}, there is a set $H_n\in U$ such that for all increasing sequences $\alpha_1<\cdots<\alpha_n$ from $H_n$ we have $(\alpha_1,\ldots,\alpha_n)\in H_n'$. Thus, $H_n$ is homogeneous for $f_n$. Notice that $H=\mathfrak{b}igcap_{n<\omega} H_n$ must have size $\kappa$ because if $H$ were bounded, say $H{\mathop{\rm sub}}seteq\alpha<\kappa$, then the set $(\kappa\setminus\alpha)\cap \mathfrak{b}igcap_{n<\omega}H_n$ would be empty, which is impossible by countable completeness since $\kappa\setminus\alpha\in U$. Since $H$ is homogeneous for $f$ we have established $S\in{\mathscr R}([\kappa]^{<\kappa})$.
\end{proof}
Theorem \ref{theorem_ramsey_equiv} naturally leads to the following characterization of ${\mathscr R}([\kappa]^{<\kappa})$ due to Mitchell in terms of elementary embeddings (see \cite{MR2830415} for more information).
\mathfrak{b}egin{theorem}[Mitchell]\langlebel{theorem_gitman}
A set $S{\mathop{\rm sub}}seteq\kappa$ is Ramsey, or, in other words, $S\in{\mathscr R}([\kappa]^{<\kappa})^+$, if and only if for every $A{\mathop{\rm sub}}seteq\kappa$ there is a weak $\kappa$-model $M$ with $A,S\in M$ and there is an elementary embedding $j:M\to N$ such that
\mathfrak{b}egin{enumerate}
\item The critical point of $j$ is $\kappa$.
\item $N$ is transitive.
\item $P(\kappa)^M=P(\kappa)^N$
\item Whenever $\langleA_n\mid n<\omega\rangle$ is a sequence of elements of $P(\kappa)^M$ which is possibly external to $M$ and $\kappa\in j(A_n)$ for all $n<\omega$, then $\mathfrak{b}igcap_{n<\omega}A_n\neq\varnothing$.
\item $\kappa\in j(S)$.
\end{enumerate}
\end{theorem}
Baumgartner \cite[Theorem 4.4]{MR0540770} gave a characterization of Ramsey cardinals which is similar to his characterization of ineffable cardinals (Theorem \ref{theorem_baumgarnter_ineffability} above): $\kappa$ is Ramsey if and only if it is pre-Ramsey, ${\mathbb P}i^1_1$-indescribable and additionally the pre-Ramsey ideal and the ${\mathbb P}i^1_1$-indescribability ideal generate a nontrivial ideal which equals the Ramsey ideal; moreover, reference to these ideals cannot be removed from this characterization. Feng \cite[Theorem 4.8]{MR1077260} generalized Baumgartner's characterization of Ramseyness. Taking $m=1$ and $n=0$ in the following theorem yields Baumgartner's result.
\mathfrak{b}egin{theorem}[Feng]
Suppose $\kappa$ is a cardinal and let $I_{-1}=[\kappa]^{<\kappa}$ and $I_0={\mathop{\rm NS}}_\kappa$. Let $1\leq m<\omega$ and $n\in\{-1,0\}$. Then $\kappa\in{\mathscr R}^m(I_n)^+$ if and only if both of the following hold.
\mathfrak{b}egin{enumerate}
\item $\kappa\in{\mathscr R}_0({\mathscr R}^{m-1}(I_n))^+$ and $\kappa\in {\mathbb P}i^1_{n+2m}(\kappa)^+$.
\item The ideal generated by ${\mathscr R}_0({\mathscr R}^{m-1}(I_n))\cup{\mathbb P}i^1_{n+2m}(\kappa)$ is nontrivial and equals ${\mathscr R}^m(I_n)$.
\end{enumerate}
Moreover, reference to the ideals in the above characterization cannot be removed because the least cardinal $\kappa$ such that $\kappa\in{\mathscr R}_0({\mathscr R}^{m-1}(I_n))^+$ and $\kappa\in{\mathbb P}i^1_{n+2m}(\kappa)^+$ is not in ${\mathscr R}(I_n)^+$.
\end{theorem}
Feng \cite[Theorem 5.2]{MR1077260} also proved that the ${\mathbb P}i_\alpha$-Ramsey cardinals form a hierarchy which is strictly increasing in consistency strength.
\mathfrak{b}egin{theorem}[Feng]
Let $\langlef_\alpha\mid\alpha<\kappa^+\rangle$ be a sequence of canonical functions on a regular uncountable cardinal $\kappa$.\footnote{See \cite{MR0540770} and \cite{MR1077260} for the definition and relevant facts concerning canonical sequences of functions.} If $\kappa$ is ${\mathbb P}i_{\alpha+1}$-Ramsey and $\alpha<\kappa^+$, then $\{\gamma<\kappa\mid\text{$\gamma$ is ${\mathbb P}i_{f_\alpha(\gamma)}$-Ramsey}\}$ is in the ${\mathbb P}i_{\alpha+1}$-Ramsey filter on $\kappa$.
\end{theorem}
{\mathop{\rm sub}}section{Transfinite indescribability}\langlebel{section_bagaria}
Sharpe and Welch \cite[Definition 3.21]{MR2817562} introduced a version of ${\mathbb P}i^1_\xi$-indescribability for $\xi\geq\omega$ which is defined in terms of the existence of winning strategies in certain finite games. Inedpendently, Bagaria \cite[Section 4]{MR3894041} defined a natural notion of ${\mathbb P}i^1_\xi$-formula for $\xi\geq\omega$ using infinitary logic, and then gave a definition of ${\mathbb P}i^1_\xi$-indescribability in terms of rank initial segments of the set theoretic universe. As mentioned in Section \ref{section_introduction}, it is not difficult to see that Sharpe-Welch notion of the ${\mathbb P}i^1_\xi$-indescribability of a cardinal $\kappa$ is equivalent to Bagaria's notion for $\xi<\kappa$. For the reader's convenience, we summarize Bagaria's definition, which we will use throughout the paper.
\mathfrak{b}egin{definition}[Bagaria]\langlebel{definition_bagaria}
In what follows all quantifiers which are explicitly displayed are of second order. For any ordinal $\xi$, we say that a formula is $\mathbb{S}igma^1_{\xi+1}$ if it is of the form
\[\exists X_0,\ldots,X_k\varphi(X_0,\ldots,X_k)\]
where $\varphi(X_0,\ldots,X_k)$ is ${\mathbb P}i^1_\xi$. And a formula is ${\mathbb P}i^1_{\xi+1}$ if it is of the form
\[\forall X_0,\ldots,X_k\varphi(X_0,\ldots,X_k)\]
where $\varphi(X_0,\ldots,X_k)$ is $\mathbb{S}igma^1_\xi$.
If $\xi$ is a limit ordinal, we say that a formula is ${\mathbb P}i^1_\xi$ if it is of the form
\[\mathfrak{b}igwedge_{\zeta<\xi}\varphi_\zeta\]
where $\varphi_\zeta$ is ${\mathbb P}i^1_\zeta$ for all $\zeta<\xi$ and the infinite conjunction has only finitely-many free second-order variables. And we say that a formula is $\mathbb{S}igma^1_\xi$ if it is of the form
\[\mathfrak{b}igvee_{\zeta<\xi}\varphi_\zeta\]
where $\varphi_\zeta$ is $\mathbb{S}igma^1_\zeta$ for all $\zeta<\xi$ and the infinite disjunction has only finitely-many free second-order variables.
\end{definition}
\mathfrak{b}egin{definition}[Bagaria]
Suppose $\kappa$ is a cardinal. A set $S{\mathop{\rm sub}}seteq\kappa$ is \emph{${\mathbb P}i^1_\xi$-indescribable} if for all subsets $A{\mathop{\rm sub}}seteq V_\kappa$ and every ${\mathbb P}i^1_\xi$ sentence $\varphi$, if $(V_\kappa,\in,A)\models\varphi$ then there is some $\alpha\in S$ such that $(V_\alpha,\in,A\cap V_\alpha)\models\varphi$.
\end{definition}
\mathfrak{b}egin{remark}
As pointed out by Bagaria, it is clear from the definition that if $\kappa$ is ${\mathbb P}i^1_\xi$-indescribable then $\xi<\kappa$. When we write $\kappa\in{\mathbb P}i^1_\xi(\kappa)^+$, this indicates that $\kappa$ is ${\mathbb P}i^1_\xi$-indescribable, and hence it should be understood that $\xi<\kappa$.
\end{remark}
There is a natural normal ideal associated to the ${\mathbb P}i^1_\xi$-indescribability of a cardinal. The following result is due to Bagaria \cite{MR3894041}, and independently Brickhill and Welch (see \cite[Lemma 3.21]{BrickhillWelch} and \cite[Lemma 4.3.7]{Brickhill:Thesis}).
\mathfrak{b}egin{proposition}[Bagaria; Brickhill-Welch]
If $\kappa$ is a ${\mathbb P}i^1_\xi$-indescribable cardinal where $\xi<\kappa$ then the collection
\[{\mathbb P}i^1_\xi(\kappa)=\{X{\mathop{\rm sub}}seteq\kappa\mid\text{$X$ is not ${\mathbb P}i^1_\xi$-indescribable}\}\]
is a nontrivial normal ideal on $\kappa$.
\end{proposition}
In some cases, it will be convenient to work with a weak version of ${\mathbb P}i^1_\xi$-indescribability.
\mathfrak{b}egin{definition} A set $S{\mathop{\rm sub}}seteq\kappa$ is \emph{weakly ${\mathbb P}i^1_\xi$-indescribable} if for all $A{\mathop{\rm sub}}seteq\kappa$ and all ${\mathbb P}i^1_\xi$ sentences $\varphi$, if $(\kappa,\in,A)\models\varphi$ then there is an $\alpha\in S$ such that $(\alpha,\in,A\cap\alpha)\models\varphi$.
\end{definition}
\mathfrak{b}egin{remark} It is easy to check that if $\kappa$ is inaccessible a set $S{\mathop{\rm sub}}seteq\kappa$ is weakly ${\mathbb P}i^1_\xi$-indescribable if and only if it is ${\mathbb P}i^1_\xi$-indescribable, and hence
\[{\mathbb P}i^1_\xi(\kappa)=\{X{\mathop{\rm sub}}seteq\kappa\mid\text{$X$ is weakly ${\mathbb P}i^1_\xi$-indescribable}\}.\]
\end{remark}
Next, we show that one of Baumgartner'€™s fundamental technical lemmas from \cite{MR0384553} can be extended from ${\mathbb P}i^1_n$-indescribability to Bagaria'€™s notion of ${\mathbb P}i^1_\xi$-indesc\-ribability. The following lemma also extends a result of Brickhill and Welch \cite[Theorem 5.3]{BrickhillWelch} concerning their notion of \emph{$\gamma$-ineffability}, where the $\gamma$-ineffability of $\kappa$ is equivalent under the assumption $V=L$ to $\kappa\in{\mathscr I}({\mathbb P}i^1_\gamma(\kappa))^+$ for $\gamma<\kappa$.
\mathfrak{b}egin{lemma}\langlebel{lemma_baumgartner_bagaria}
Suppose $S{\mathop{\rm sub}}seteq\kappa$ and for every $(1,S)$-sequence $\vec{S}=\langleS_\alpha\mid\alpha\in S\rangle$ there is a $B\in Q$ such that $B$ is homogeneous for $\vec{S}$. If $Q{\mathop{\rm sub}}seteq \mathfrak{b}igcap_{\xi\in\{-1\}\cup\mathfrak{b}eta}{\mathbb P}i^1_\xi(\kappa)^+$ where $\mathfrak{b}eta<\kappa$, then $S$ is a ${\mathbb P}i^1_{\mathfrak{b}eta+1}$-indescribable subset of $\kappa$. (Notice that if $\mathfrak{b}eta=\eta+1$ is a successor ordinal, the result states that if $Q{\mathop{\rm sub}}seteq{\mathbb P}i^1_\eta(\kappa)^+$ where $\eta<\kappa$, then $S$ is a ${\mathbb P}i^1_{\eta+2}$-indescribable subset of $\kappa$.)
\end{lemma}
\mathfrak{b}egin{proof} The case in which $\mathfrak{b}eta<\omega$ is due to Baumgartner (see \cite[Lemma 7.1]{MR0384553} for the case in which $1\leq\mathfrak{b}eta<\omega$ and the discussion after Theorem 7.2 in \cite{MR0384553} for the case in which $\mathfrak{b}eta=0$). Notice that the assumption that every $(1,S)$-sequence $\vec{S}=\langleS_\alpha\mid\alpha\in S\rangle$ has a homogeneous set $H\in\mathfrak{b}igcap_{\xi\in\{-1\}\cup\mathfrak{b}eta}{\mathbb P}i^1_\xi(\kappa)^+$ implies that $\kappa$ is inaccessible, and hence, in order to show that $\kappa$ is ${\mathbb P}i^1_{\mathfrak{b}eta+1}$-indescribable, it suffices to show that $\kappa$ is weakly ${\mathbb P}i^1_\mathfrak{b}eta$-indescribable.
First, let us consider the case in which $\mathfrak{b}eta$ is a limit ordinal. Suppose $(\kappa,\in,A)\models\forall X\left(\mathfrak{b}igvee_{\xi<\mathfrak{b}eta}\varphi_\xi\right)$ where $\varphi_\xi$ is $\mathbb{S}igma^1_\xi$ for $\xi<\mathfrak{b}eta$. Suppose for all $\alpha\in S$ there is an $S_\alpha{\mathop{\rm sub}}seteq \alpha$ such that $(\alpha,\in,A\cap \alpha)\models\mathfrak{b}igwedge_{\xi<\mathfrak{b}eta}\lnot\varphi_\xi[S_\alpha]$. By assumption there is a $B\in Q$ which is homogeneous for $\vec{S}=\langleS_\alpha\mid\alpha\in S\rangle$. Let $X=\mathfrak{b}igcup_{\alpha\in B}S_\alpha$. Then for some $\zeta<\mathfrak{b}eta$ we have $(\kappa,\in,A)\models\varphi_\zeta[X]$. Since $B\in \mathfrak{b}igcap_{\xi\in\{-1\}\cup\mathfrak{b}eta}{\mathbb P}i^1_\xi(\kappa)^+$, there is an $\alpha\in B$ such that $(\alpha,\in,A\cap \alpha)\models\varphi_\zeta[X\cap \alpha]$. Since $B$ is homogeneous for $\vec{X}$ we see that $X\cap \alpha= S_\alpha$ and thus $(\alpha,\in,A\cap \alpha)\models\varphi_\zeta[S_\alpha]$, a contradiction.
When $\mathfrak{b}eta=\eta+1$ is a successor ordinal the argument is very similar to Baumgartner's argument for \cite[Lemma 7.1]{MR0384553}. We must show that if $Q{\mathop{\rm sub}}seteq{\mathbb P}i^1_\eta(\kappa)^+$ then $S$ is a ${\mathbb P}i^1_{\eta+2}$-indescribable subset of $\kappa$. Suppose $(\kappa,\in,A)\models\forall X\exists Y\psi(X,Y)$ where $\psi(X,Y)$ is a ${\mathbb P}i^1_\eta$ formula. Further suppose that for each $\alpha\in S$ there is an $S_\alpha{\mathop{\rm sub}}seteq \alpha$ such that $(\alpha,\in,A\cap \alpha,S_\alpha)\models\forall Y\lnot\psi(S_\alpha,Y)$. This defines a $(1,S)$-sequence $\vec{S}=\langleS_\alpha\mid\alpha\in S\rangle$. By assumption there is a $B\in Q$ which is homogeneous for $\vec{S}$. Let $X=\mathfrak{b}igcup_{\alpha\in B}S_\alpha$. Then there is a $Y{\mathop{\rm sub}}seteq \kappa$ such that $(\kappa,\in,A,X,Y)\models\psi(X,Y)$. Since $B$ is ${\mathbb P}i^1_\eta$-indescribable, there is an $\alpha\in B$ such that $(\alpha,\in,A\cap \alpha,X\cap \alpha,Y\cap \alpha)\models\psi(X\cap \alpha,Y\cap \alpha)$. Since $X\cap \alpha=X_\alpha$, this is a contradiction.
\end{proof}
\section{Basic properties of the ideals ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$}\langlebel{section_basic_properties}
In this section we begin our study of the ideals ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ obtained from iterating Feng's Ramsey operator on Bagaria's ${\mathbb P}i^1_\mathfrak{b}eta$-indescribability ideals. The following straightforward lemmas will be used in Section \ref{section_indescribability_in_finite_ramseyness} and Section \ref{section_indescribability_in_infinite_ramseyness} below to prove that a proper containment holds between two particular ideals.
Recall that if $S_\xi$ is a stationary subset of $\xi$ for all $\xi$ in some set $S{\mathop{\rm sub}}seteq\kappa$ which is stationary in $\kappa$, then $\mathfrak{b}igcup_{\xi\in S}S_\xi$ is stationary in $\kappa$. The next lemma shows that the analogous fact is true for the ideals ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$.
\mathfrak{b}egin{lemma}\langlebel{lemma_pos_union_of_pos_sets_is_pos}
Suppose $\alpha<\kappa$ and $\mathfrak{b}eta\in\{-1\}\cup\kappa$. Further suppose $S\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ and for each $\xi\in S$ let $S_\xi\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\xi))^+$. Then $\mathfrak{b}igcup_{\xi\in S} S_\xi\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$.
\end{lemma}
\mathfrak{b}egin{proof}
Suppose $\alpha=0$. Fix $A{\mathop{\rm sub}}seteq V_\kappa$ and let $\varphi$ be a ${\mathbb P}i^1_\mathfrak{b}eta(\kappa)$ sentence such that $(V_\kappa,\in,A)\models\varphi$. Since $S\in{\mathbb P}i^1_\mathfrak{b}eta(\kappa)^+$, there is a $\xi\in S$ such that $(V_\xi,\in,A\cap V_\xi)\models\varphi$. Now since $S_\xi\in{\mathbb P}i^1_\mathfrak{b}eta(\xi)^+$, there is a $\zeta\in S_\xi$ such that $(V_\zeta,\in,A\cap V_\zeta)\models \varphi$. Hence $\mathfrak{b}igcup_{\xi\in S}S_\xi\in{\mathbb P}i^1_\mathfrak{b}eta(\kappa)^+$.
If $\alpha$ is a limit and the result holds for all ordinals less than $\alpha$, it can easily be checked that the result holds for $\alpha$ using the fact that ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\mathfrak{b}igcup_{\zeta<\alpha}{\mathscr R}^\zeta({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$.
Now suppose $\alpha>0$ is a successor ordinal and the result holds for $\alpha-1$, let us show that it holds for $\alpha$. Fix a regressive function $f:\left[\mathfrak{b}igcup_{\xi\in S}S_\xi\right]^{<\omega}\to \kappa$. For each $\xi\in S$ there is a set $H_\xi{\mathop{\rm sub}}seteq S_\xi$ in ${\mathscr R}^{\alpha-1}({\mathbb P}i^1_\mathfrak{b}eta(\xi))^+$ homogeneous for $f\upharpoonright[S_\xi]^{<\omega}$. Since $S\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$, it follows by Corollary \ref{corollary_one_S} that the $(1,S)$-sequence $\vec{H}=\langleH_\xi\mid\xi\in S\rangle$ has a homogeneous set $H{\mathop{\rm sub}}seteq S$ in ${\mathscr R}^{\alpha-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. By our inductive hypothesis, $\mathfrak{b}igcup_{\xi\in H} H_\xi\in{\mathscr R}^{\alpha-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. It will suffice to show that $\mathfrak{b}igcup_{\xi\in H} H_\xi$ is homogeneous for $f$. Suppose $\vec{\alpha},\vec{\mathfrak{b}eta}\in\left[\mathfrak{b}igcup_{\xi\in H} H_\xi\right]^n$. By the homogeneity of $H$, it follows that there is a $\xi\in H$ such that $\vec{\alpha},\vec{\mathfrak{b}eta}\in [H_\xi]^n$. Since $H_\xi$ is homogeneous for $f\upharpoonright[S_\xi]^{<\omega}$, we have $f(\vec{\alpha})=f(\vec{\mathfrak{b}eta})$.
\end{proof}
Recall that if $\kappa$ is a weakly compact cardinal, then the set of non--weakly compact cardinals less than $\kappa$ is a weakly compact subset of $\kappa$. The next lemma shows that the corresponding fact is true for the ideals ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$.
\mathfrak{b}egin{lemma}\langlebel{lemma_set_of_nons_is_positive}
For all $\alpha<\kappa$ and all $\mathfrak{b}eta\in\{-1\}\cup\kappa$, if $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ then the set
\[S=\{\xi<\kappa\mid \xi\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\xi))\}\]
is in ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$.
\end{lemma}
\mathfrak{b}egin{proof}
Let $\kappa$ be the least counterexample. In other words, $\kappa$ is the least cardinal such that $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ and $S=\{\xi<\kappa\mid\xi\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\xi))\}\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$. Then the set $\kappa\setminus S$ is in ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^*$ and hence also in ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. For each $\zeta\in\kappa\setminus S$, by the minimality of $\kappa$, the set $S_\zeta=S\cap\zeta$ is in ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\zeta))^+$. Thus, by Lemma \ref{lemma_pos_union_of_pos_sets_is_pos}, the set $S=\mathfrak{b}igcup_{\zeta\in\kappa\setminus S}S_\zeta$ is in ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$, a contradiction.
\end{proof}
\section{A first reflection result}\langlebel{section_a_first_reflection_result}
Baumgartner showed \cite[Theorem 4.1]{MR0384553} that if $\kappa$ is a subtle cardinal then the set
\[\{\xi<\kappa\mid(\forall n<\omega)\ \xi\in{\mathbb P}i^1_n(\xi)^+\}\]
is in the subtle filter. Since Ramsey cardinals are subtle, Baumgartner'€™s result shows that the existence of a Ramsey cardinal is strictly stronger than the existence of a cardinal that is ${\mathbb P}i^1_n$-indescribable for every $n<\omega$. Our next goal will be to show that the existence of a Ramsey cardinal is strictly stronger than the existence of a cardinal $\kappa$ which is ${\mathbb P}i^1_\mathfrak{b}eta$-indescribable for all $\mathfrak{b}eta<\kappa$; the proof is implicit in \cite{MR3894041} and is obtained by combining the methods of \cite{MR3894041}, \cite{MR2830415} and \cite[Theorem 17.33 and Exercise 17.29]{MR1940513}. In order to prove this result we will use the elementary embedding characterization of Ramseyness due to Mitchell \cite{MR534574} (see Theorem \ref{theorem_gitman} above) and further explored by Gitman \cite{MR2830415} and Sharpe-Welch \cite{MR2817562}.
\mathfrak{b}egin{theorem}\langlebel{theorem_ramsey_reflection}
If $S\in{\mathscr R}([\kappa]^{<\kappa})^+$, then the set
\[T=\{\xi<\kappa\mid(\forall \mathfrak{b}eta<\xi)\ S\cap\xi\in{\mathbb P}i^1_\mathfrak{b}eta(\xi)^+\}\]
is in ${\mathscr R}([\kappa]^{<\kappa})^*$.\marginpar{\tiny Need to generalize this so that it talks about reflection points of sets. This is needed for the following result.}
\end{theorem}
\mathfrak{b}egin{proof}\footnote{The author would like to thank Victoria Gitman for suggesting this proof.}
Suppose $S$ is Ramsey. To show that $T\in{\mathscr R}([\kappa]^{<\kappa})^*$ we must show that there is a set $A{\mathop{\rm sub}}seteq\kappa$ such that whenever $M$ is a weak $\kappa$-model with $A,T\in M$ and whenever $j:M\to N$ is an elementary embedding satisfying properties (1)--(4) from Theorem \ref{theorem_gitman}, then it must be the case that $\kappa\in j(T)$. Take $A=S$. Since $S$ is Ramsey, by Theorem \ref{theorem_gitman}, we may let $M$ be a weak $\kappa$-model with $S,T\in M$ and suppose $j:M\to N$ is an elementary embedding satisfying properties (1)--(4) from Theorem \ref{theorem_gitman} such that $\kappa\in j(S)$. To show that $\kappa\in j(T)$ we must show that for every $\mathfrak{b}eta<\kappa$ we have $N\models$ $S\in{\mathbb P}i^1_\mathfrak{b}eta(\kappa)^+$. Suppose not, that is, suppose that for some fixed $\mathfrak{b}eta<\kappa$, $N$ thinks $S$ is not a ${\mathbb P}i^1_\mathfrak{b}eta$-indescribable subset of $\kappa$. Since $N$ thinks $\kappa$ is strongly inaccessible, it follows that $N$ thinks $S$ is not \emph{weakly} ${\mathbb P}i^1_\mathfrak{b}eta$-indescribable.\marginpar{\tiny Check that when $\kappa$ is strongly inaccessible weak ${\mathbb P}i^1_\mathfrak{b}eta$-indescribability implies ${\mathbb P}i^1_\mathfrak{b}eta$-indescribability. We don'€™t necessarily need this, because we could code subsets of $V_\kappa$ as subsets of $\kappa$, but this seems more convenient.} Thus, there is an $R\in P(\kappa)^N$ and a ${\mathbb P}i^1_\mathfrak{b}eta$ sentence $\varphi$ such that
\[N\models\text{``$(\kappa,\in,R)\models\varphi$''}\]
and
\[N\models \text{``$(\forall\xi\in S) (\xi,\in,R\cap\xi)\models\lnot\varphi$''}.\]
Now, for each $\xi <\kappa$ we have $R\cap\xi\in M$ because $P(\kappa)^M=P(\kappa)^N$. Furthermore, since $j$ is elementary and $\mathop{\rm crit}(j)=\kappa$, it follows that for each $\xi\in S$ we have $M\models$ ``$(\xi,\in,R\cap\xi)\models\lnot\varphi$.''€™ Since $S,R\in M$ we see that
\[M\models\text{``$(\forall\xi\in S) (\xi,\in,R\cap\xi)\models\lnot\varphi$''}.\]
By elementarity
\[N\models\text{``$(\forall\xi\in j(S)) (\xi,\in,j(R)\cap\xi)\models\lnot\varphi$''€™},\]
but this is a contradiction since $\kappa\in j(S)$.
\end{proof}
Next we show that Theorem \ref{theorem_ramsey_reflection} can, in a sense, be pushed up the Ramsey hierarchy.
\mathfrak{b}egin{theorem}\langlebel{theorem_r_reflects_pi1n}
For all ordinals $\alpha<\kappa$, if $S\in{\mathscr R}^{\alpha+1}([\kappa]^{<\kappa})^+$ then the set
\[T=\{\xi<\kappa\mid (\forall\mathfrak{b}eta<\xi)\ S\cap\xi\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\xi))^+\}\]
is in ${\mathscr R}^{\alpha+1}([\kappa]^{<\kappa})^*$.\marginpar{\tiny The ${\mathbb P}i^1_\mathfrak{b}eta(\xi)$ is redundant when $\alpha=\omega$ and $\mathfrak{b}eta<\omega$ because ${\mathscr R}^\omega([\kappa]^{<\kappa})={\mathscr R}^\omega({\mathbb P}i^1_n(\kappa))$ for all $n<\omega$ by the ideal diagram.}
\end{theorem}
\mathfrak{b}egin{proof}
Let us proceed by induction on $\alpha$. If $\alpha=0$, the result follows directly from Theorem \ref{theorem_ramsey_reflection}.
Suppose $\alpha=\alpha_0+1$ is a successor ordinal and the result holds for ordinals less than $\alpha$. Let us show it holds for $\alpha$. Suppose not. Then $S\in{\mathscr R}^{\alpha_0+2}([\kappa]^{<\kappa})^+$ and the set
\[\kappa\setminus T=\{\xi<\kappa\mid(\exists\mathfrak{b}eta<\xi)\ S\cap\xi\in{\mathscr R}^{\alpha_0+1}({\mathbb P}i^1_\mathfrak{b}eta(\xi))\}\]
is in ${\mathscr R}^{\alpha_0+2}([\kappa]^{<\kappa})^+$. Since ${\mathscr R}^{\alpha_0+2}([\kappa]^{<\kappa})$ is a normal ideal on $\kappa$, there is a fixed $\mathfrak{b}eta_0<\kappa$ such that the set
\[E=\{\xi<\kappa\mid S\cap\xi\in{\mathscr R}^{\alpha_0+1}({\mathbb P}i^1_{\mathfrak{b}eta_0}(\xi))\langlend \xi>\mathfrak{b}eta_0\}{\mathop{\rm sub}}seteq\kappa\setminus T\]
is in ${\mathscr R}^{\alpha_0+2}([\kappa]^{<\kappa})^+$. We will define a $(1,E)$-sequence $\vec{S}=\langleE_\xi\mid\xi\in E\rangle$. Without loss of generality, by intersecting with a club, we can assume that every element of $E$ is closed under G\"odel pairing. For each $\xi\in E$, let $f_\xi:[S\cap\xi]^{<\omega}\to \xi$ be a regressive function such that no homogeneous set for $f_\xi$ is in ${\mathscr R}^{\alpha_0}({\mathbb P}i^1_{\mathfrak{b}eta_0}(\xi))^+$. Let $E_\xi$ code $f_\xi$ as a subset of $\xi$ in a sufficiently nice way. Since $E\in{\mathscr R}^{\alpha_0+2}([\kappa]^{<\kappa})^+$, there is a set $X\in P(E)\cap{\mathscr R}^{\alpha_0+1}([\kappa]^{<\kappa})^+$ homogeneous for $\vec{E}$. Let $F=\mathfrak{b}igcup_{\xi\in X}f_\xi$ and notice that $F:[S]^{<\omega}\to \kappa$ is a regressive function and $F\upharpoonright [S\cap\xi]^{<\omega}=f_\xi$ for all $\xi\in X$. Since $S\in{\mathscr R}^{\alpha_0+2}([\kappa]^{<\kappa})^+$ there is an $H\in{\mathscr R}^{\alpha_0+1}([\kappa]^{<\kappa})^+$ homogeneous for $F$. By our inductive assumption, the set
\[C=\{\xi<\kappa\mid H\cap\xi\in{\mathscr R}^{\alpha_0}({\mathbb P}i^1_{\mathfrak{b}eta_0}(\xi))^+\}\]
is in ${\mathscr R}^{\alpha_0+1}([\kappa]^{<\kappa})^*$ and since $X\in{\mathscr R}^{\alpha_0+1}([\kappa]^{<\kappa})^+$ it follows that $X\cap C\in{\mathscr R}^{\alpha_0+1}([\kappa]^{<\kappa})^+$. Choose an ordinal $\xi\in X\cap C$. Then $H\cap\xi\in{\mathscr R}^{\alpha_0}({\mathbb P}i^1_{\mathfrak{b}eta_0}(\xi))^+$ and since $H$ is homogeneous for $F$ and $\xi\in X$ we see that $H\cap\xi$ is homogeneous for $f_\xi$. This contradicts the fact that $f_\xi:[S\cap\xi]^{<\omega}\to \xi$ has no homogeneous set in ${\mathscr R}^{\alpha_0}({\mathbb P}i^1_{\mathfrak{b}eta_0}(\xi))^+$.
Suppose $\alpha$ is a limit ordinal, the result holds for ordinals less than $\alpha$ and, for the sake of contradiction, the result is false for $\alpha$. Suppose $S\in{\mathscr R}^{\alpha+1}([\kappa]^{<\kappa})^+$ and the set
\[\kappa\setminus T=\{\xi<\kappa\mid(\exists\mathfrak{b}eta<\kappa)\ S\cap\xi\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\xi))\}\]
is in ${\mathscr R}^{\alpha+1}([\kappa]^{<\kappa})^+$. Since ${\mathscr R}^{\alpha+1}([\kappa]^{<\kappa})$ is a normal ideal and $\alpha<\kappa$ is a limit ordinal, there are fixed $\alpha_0<\alpha$ and $\mathfrak{b}eta_0<\kappa$ such that the set
\[E=\{\xi<\kappa\mid S\cap\xi\in{\mathscr R}^{\alpha_0+1}({\mathbb P}i^1_{\mathfrak{b}eta_0}(\xi))\}\]
is in ${\mathscr R}^{\alpha+1}([\kappa]^{<\kappa})^+$. The rest of the argument is essentially the same as that of the successor case.
\end{proof}
Since we will refer to it later, let us state the following corollary which asserts that ``$\exists\kappa$ $\kappa\in{\mathscr R}^{\alpha+1}([\kappa]^{<\kappa})^+$'' is strictly stronger than ``$\exists\kappa$ ($\forall\mathfrak{b}eta<\kappa$) $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$''.
\mathfrak{b}egin{corollary}\langlebel{corollary_ramseyness_reflects_indescribability}
For all ordinals $\alpha<\kappa$, if $\kappa\in{\mathscr R}^{\alpha+1}([\kappa]^{<\kappa})^+$ then the set
\[\{\xi<\kappa\mid(\forall\mathfrak{b}eta<\xi)\ \xi\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\xi))^+\}\]
is in ${\mathscr R}^{\alpha+1}([\kappa]^{<\kappa})^*$.
\end{corollary}
\section{Describing degrees of Ramseyness}\langlebel{section_describing_ramseyness}
In order to prove that certain relationships hold between ideals of the form ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$, we will need to know what $\xi$ will suffice to be able to express the fact that a set $S{\mathop{\rm sub}}seteq\kappa$ is in ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ using a ${\mathbb P}i^1_\xi$ sentence. The following lemma is a generalization of a result of Sharpe and Welch \cite[Remark 3.17]{MR2817562}, which states that ``$X\in{\mathscr R}^\alpha([\kappa]^{<\kappa})^+$'' is a ${\mathbb P}i^1_{2\cdot(1+\alpha)}$ property.
\mathfrak{b}egin{lemma}[The case $\mathfrak{b}eta=-1$ is due to Sharpe and Welch]\langlebel{lemma_complexity}
Suppose $\mathfrak{b}eta$ is an ordinal. For each ordinal $\alpha$, if $\alpha>\omega$ let $\alpha=\mathfrak{b}ar{\alpha}+m_\alpha$ where $\mathfrak{b}ar{\alpha}$ is the greatest limit ordinal which is less or equal to $\alpha$ and $m_\alpha<\omega$. Define an ordinal $\gamma(\alpha,\mathfrak{b}eta)$ by
\[
\gamma(\alpha,\mathfrak{b}eta)=\mathfrak{b}egin{cases}
\mathfrak{b}eta+2\alpha+1 & \text{if $\alpha<\omega$}\\
\mathfrak{b}eta+\alpha & \text{if $\alpha$ is a limit}\\
\mathfrak{b}eta+\mathfrak{b}ar{\alpha}+2m_\alpha & \text{if $\alpha>\omega$ is a successor}\\
\end{cases}
\]
Then, for all ordinals $\alpha$, there is a ${\mathbb P}i^1_{\gamma(\alpha,\mathfrak{b}eta)}$ sentence $\varphi$ such that for all cardinals $\mathfrak{d}elta$ with $\max(\alpha,\mathfrak{b}eta)<\mathfrak{d}elta$ and all sets $X{\mathop{\rm sub}}seteq\mathfrak{d}elta$ we have
\[X\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))^+\iff (V_\mathfrak{d}elta,\in,X)\models\varphi.\]
\end{lemma}
\mathfrak{b}egin{proof}
First we consider the case in which $\alpha<\omega$. If $\alpha=0$ then the result holds because there is a ${\mathbb P}i^1_{\mathfrak{b}eta+1}$ sentence $\varphi$ such that if $\mathfrak{b}eta<\mathfrak{d}elta$ and $X{\mathop{\rm sub}}seteq\mathfrak{d}elta$ then $X\in{\mathscr R}^0({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))^+={\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta)^+$ if and only if $(V_\mathfrak{d}elta,\in,X)\models\varphi$ (see \cite{MR3894041}). Assume $0<\alpha<\omega$ and the result holds for the ordinal $\alpha-1<\omega$. Then there is a ${\mathbb P}i^1_{\mathfrak{b}eta+2\alpha-1}$ sentence $\psi$ such that whenever $\max(\alpha,\mathfrak{b}eta)<\mathfrak{d}elta$ and $X{\mathop{\rm sub}}seteq\mathfrak{d}elta$ we have $X\in{\mathscr R}^{\alpha-1}({\mathbb P}i^1_\mathfrak{b}eta)^+$ if and only if $(V_\mathfrak{d}elta,\in,X)\models\psi$. By definition of the Ramsey operator, for any relevant $\mathfrak{d}elta$ and $X{\mathop{\rm sub}}seteq\mathfrak{d}elta$, we have $X\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))^+$ if and only if for every regressive function $f:[X]^{<\omega}\to \mathfrak{d}elta$ there is a set $H\in{\mathscr R}^{\alpha-1}({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))^+$ homogeneous for $f$. Thus there is a ${\mathbb P}i^1_{\mathfrak{b}eta+2\alpha+1}$ sentence $\varphi$ (namely, the sentence ``$\forall f\exists H \psi$'') such that $X\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))^+$ if and only if $(V_\mathfrak{d}elta,\in,X)\models\varphi$.
Suppose $\alpha<\kappa$ is a limit ordinal and the result holds for all ordinals $\eta€™<\alpha$. By definition of the Ramsey hierarchy, for all relevant $\mathfrak{d}elta$ we have ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))=\mathfrak{b}igcup_{\xi<\alpha}{\mathscr R}^\xi({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))$, and thus, for sets $X{\mathop{\rm sub}}seteq\mathfrak{d}elta$ we have
\[X\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))^+\iff V_\mathfrak{d}elta\models\mathfrak{b}igwedge_{\xi<\alpha}\left(X\in{\mathscr R}^\xi({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))^+\right).\]
For each $\xi<\alpha$ there is a ${\mathbb P}i^1_{\gamma(\xi,\mathfrak{b}eta)}$ sentence $\varphi_\xi$ such that $X\in{\mathscr R}^\xi({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))^+$ if and only if $(V_\mathfrak{d}elta,\in,X)\models\varphi_\xi$. Since the sequence $\langle\gamma(\xi,\mathfrak{b}eta)\mid\xi<\alpha\rangle$ is strictly increasing and $\gamma(\xi,\mathfrak{b}eta)<\mathfrak{b}eta+\alpha$ for all $\xi<\alpha$, it follows that there is a ${\mathbb P}i^1_{\mathfrak{b}eta+\alpha}$ sentence $\varphi$ such that $X\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))^+$ if and only if $(V_\mathfrak{d}elta,\in,X)\models\varphi$ for all relevant $\mathfrak{d}elta$ and $X$.
Suppose $\alpha=\mathfrak{b}ar{\alpha}+m_\alpha>\omega$ is a successor ordinal and the result holds for all ordinals less than $\alpha$. Notice that $m_\alpha\geq 1$ since $\mathfrak{b}ar{\alpha}$ is a limit ordinal. Suppose $m_\alpha=1$. By our inductive hypothesis there is a ${\mathbb P}i^1_{\mathfrak{b}eta+\mathfrak{b}ar{\alpha}}$ sentence $\psi$ such that for all relevant $\mathfrak{d}elta$ and $X{\mathop{\rm sub}}seteq\mathfrak{d}elta$ we have $X\in{\mathscr R}^{\mathfrak{b}ar{\alpha}}({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))^+$ if and only if $(V_\mathfrak{d}elta,\in,X)\models\psi$. By definition of the Ramsey operator, for all relevant $\mathfrak{d}elta$ and $X{\mathop{\rm sub}}seteq\mathfrak{d}elta$ we have $X\in{\mathscr R}^{\mathfrak{b}ar{\alpha}+1}({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))^+$ if and only if for every regressive function $f:[X]^{<\omega}\to \kappa$ there is a set $H\in{\mathscr R}^{\mathfrak{b}ar{\alpha}}({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))^+$ homogeneous for $f$. This implies that there is a ${\mathbb P}i^1_{\mathfrak{b}eta+\mathfrak{b}ar{\alpha}+2}$ sentence $\varphi$ such that for all relevant $\mathfrak{d}elta$ and $X{\mathop{\rm sub}}seteq\mathfrak{d}elta$ we have $X\in{\mathscr R}^{\mathfrak{b}ar{\alpha}+1}({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))^+$ if and only if $(V_\mathfrak{d}elta,\in,X)\models\varphi$. Now, suppose $m_\alpha>1$ and assume the result holds for the ordinal $\mathfrak{b}ar{\alpha}+m_\alpha-1$. Then for all relevant $\mathfrak{d}elta$ and $X{\mathop{\rm sub}}seteq\mathfrak{d}elta$ there is a ${\mathbb P}i^1_{\mathfrak{b}eta+\mathfrak{b}ar{\alpha}+2(m_\alpha-1)}$ sentence $\psi$ such that $X\in{\mathscr R}^{\mathfrak{b}ar{\alpha}+m_\alpha-1}({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))^+$ if and only if $(V_\mathfrak{d}elta,\in,X)\models\psi$. For all relevant $\mathfrak{d}elta$ and $X{\mathop{\rm sub}}seteq\mathfrak{d}elta$ we have $X\in{\mathscr R}^{\mathfrak{b}ar{\alpha}+m_\alpha}({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))^+$ if and only if for every regressive function $f:[X]^{<\omega}\to \kappa$ there is a set $H\in{\mathscr R}^{\mathfrak{b}ar{\alpha}+m_\alpha-1}({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))^+$ homogeneous for $f$. Since $H\in{\mathscr R}^{\mathfrak{b}ar{\alpha}+m_\alpha-1}({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))^+$ is expressible by a ${\mathbb P}i^1_{\mathfrak{b}eta+\mathfrak{b}ar{\alpha}+2(m_\alpha-1)}$ sentence $\psi$ over $(V_\mathfrak{d}elta,\in,H)$, it follows that there is a ${\mathbb P}i^1_{\mathfrak{b}eta+\mathfrak{b}ar{\alpha}+2m_\alpha}$ sentence $\varphi$ such that $X\in{\mathscr R}^{\mathfrak{b}ar{\alpha}+m_\alpha}({\mathbb P}i^1_\mathfrak{b}eta(\mathfrak{d}elta))^+$ if and only if $(V_\mathfrak{d}elta,\in,X)\models\varphi$.
\end{proof}
\section{Indescribability in finite degrees of Ramseyness}\langlebel{section_indescribability_in_finite_ramseyness}
Next we prove that for $0<m<\omega$ and $\mathfrak{b}eta<\kappa$, the ideal ${\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ is obtained by using a generating set consisting of the pre-Ramsey operator applied to the ideal ${\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ one-level down in the Ramsey hierarchy together with the ideal ${\mathscr R}^{m-1}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))$. This result also gives more information about the ideals ${\mathscr R}^m([\kappa]^{<\kappa})$ considered by Feng \cite[Theorem 4.8]{MR1077260}.
\mathfrak{b}egin{theorem}\langlebel{theorem_finite_ideal_diagram}
For all $0<m<\omega$ and all $\mathfrak{b}eta\in\{-1\}\cup\kappa$, if $\kappa\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ then
\[{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\overline{{\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathscr R}^{m-1}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))}.\]
\end{theorem}
\mathfrak{b}egin{proof}
We proceed by induction on $m$. For the base case of the induction in which $m=1$, we will show that for all $\mathfrak{b}eta\in\{-1\}\cup\kappa$ we have
\[{\mathscr R}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\overline{{\mathscr R}_0({\mathbb P}i^1_\mathfrak{b}eta(\kappa))\cup{\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa)}.\]
Fix $\mathfrak{b}eta\in\{-1\}\cup\kappa$ and let $I=\overline{{\mathscr R}_0({\mathbb P}i^1_\mathfrak{b}eta(\kappa))\cup{\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa)}$. We will show that $X\in I^+$ if and only if $X\in{\mathscr R}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$.
Suppose $X\in I^+$ and $X\in{\mathscr R}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$. Let $f:[X]^{<\omega}\to \kappa$ be a regressive function and suppose that every homogeneous set for $f$ is not ${\mathbb P}i^1_\mathfrak{b}eta$-indescribable. This can be expressed by a ${\mathbb P}i^1_{\mathfrak{b}eta+2}$ sentence $\varphi$ over $(V_\kappa,\in,X,f)$, and thus the set
\[C=\{\xi<\kappa\mid (V_\xi,\in,X\cap V_\xi,f\cap V_\xi)\models\varphi\}\]
is in ${\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa)^*$. Since $X\notin I$, $X$ is not the union of a set in ${\mathscr R}_0({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ and a set in ${\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa)$, and since $X=(X\cap C)\cup(X\setminus C)$, it follows that $X\cap C\notin {\mathscr R}_0({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$. Thus, by definition of ${\mathscr R}_0({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$, there is a $\xi\in X\cap C$ with $\xi>\mathfrak{b}eta$ such that there is a set $H{\mathop{\rm sub}}seteq X\cap C\cap\xi$ which is ${\mathbb P}i^1_\mathfrak{b}eta$-indescribable in $\xi$ and homogeneous for $f$. This contradicts $\xi\in C$.
Now suppose $X\in{\mathscr R}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. By Remark \ref{remark_ideal_generated}, it suffices to show that $X\in{\mathscr R}_0({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ and $X\in{\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa)^+$. To see that $X\in{\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa)^+$ notice that every $(1,X)$-sequence $\vec{X}=\langleX_\xi\mid\xi\in X\rangle$ has a homogeneous set in ${\mathbb P}i^1_\mathfrak{b}eta(\kappa)^+$, and thus by Lemma \ref{lemma_baumgartner_bagaria}, $X$ is ${\mathbb P}i^1_{\mathfrak{b}eta+2}$-indescribable. Let us show $X\in{\mathscr R}_0({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. Fix a regressive function $f:[X]^{<\omega}\to \kappa$ and a club $C{\mathop{\rm sub}}seteq\kappa$. Since ${\mathscr R}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ is a normal ideal it follows that $X\cap C\in{\mathscr R}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ and thus there is a set $H{\mathop{\rm sub}}seteq X\cap C$ which is ${\mathbb P}i^1_\mathfrak{b}eta$-indescribable in $\kappa$ and homogeneous for $f$. The fact that $H$ is ${\mathbb P}i^1_\mathfrak{b}eta$-indescribable can be expressed by a ${\mathbb P}i^1_{\mathfrak{b}eta+1}$ sentence $\varphi$ over $(V_\kappa,\in,H)$. Since $X\cap C\in{\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa)^+$, it follows that there is a $\xi\in X\cap C$ with $\xi>\mathfrak{b}eta$ such that $(V_\xi,\in,H\cap\xi)\models \varphi$ and hence $H\cap\xi{\mathop{\rm sub}}seteq X\cap C\cap\xi$ is ${\mathbb P}i^1_\mathfrak{b}eta$-indescribable in $\xi$ and homogeneous for $f$. Thus $X\in{\mathscr R}_0({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$.
For the inductive step, suppose that for all $k<m$ and all $\mathfrak{b}eta\in\{-1\}\cup\kappa$ we have
\[{\mathscr R}^k({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\overline{{\mathscr R}_0({\mathscr R}^{k-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathscr R}^{k-1}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))}.\]
Fix $\mathfrak{b}eta\in\{-1\}\cup\kappa$. Let us argue that
\[{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\overline{{\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathscr R}^{m-1}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))}.\] Let $I=\overline{{\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathscr R}^{m-1}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))}$. We will show that $X\in I^+$ if and only if $X\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$.
Suppose $X\in I^+$. For the sake of contradiction suppose that $X\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$. Then there is a regressive function $f:[X]^{<\omega}\to \kappa$ such that every homogeneous set for $f$ is in ${\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$. By Lemma \ref{lemma_complexity}, the fact that every homogeneous set for $f$ is in ${\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ can be expressed by a ${\mathbb P}i^1_{\mathfrak{b}eta+2m}$ sentence $\varphi$ over $(V_\kappa,\in,X,f)$. Thus the set $C=\{\alpha<\kappa\mid (V_\alpha,\in,X\cap\alpha,f\cap V_\alpha)\models\varphi\}$ is in ${\mathbb P}i^1_{\mathfrak{b}eta+2m}(\kappa)^*$. Let us show that our inductive assumption implies that ${\mathbb P}i^1_{\mathfrak{b}eta+2m}(\kappa){\mathop{\rm sub}}seteq{\mathscr R}^{m-1}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))$. From our inductive assumption, it follows that
\mathfrak{b}egin{align*}
{\mathscr R}^{m-1}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))&=\overline{{\mathscr R}_0({\mathscr R}^{m-2}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa)))\cup{\mathscr R}^{m-2}({\mathbb P}i^1_{\mathfrak{b}eta+4}(\kappa))}\\
&=\overline{{\mathscr R}_0({\mathscr R}^{m-2}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa)))\cup{\mathscr R}_0({\mathscr R}^{m-3}({\mathbb P}i^1_{\mathfrak{b}eta+4}(\kappa)))\cup{\mathscr R}^{m-3}({\mathbb P}i^1_{\mathfrak{b}eta+6}(\kappa))}\\
&\ \ \vdots\\
&=\overline{\left(\mathfrak{b}igcup_{i=1}^{m-1} {\mathscr R}_0({\mathscr R}^{m-(i+1)}({\mathbb P}i^1_{\mathfrak{b}eta+2i}(\kappa)))\right)\cup {\mathbb P}i^1_{\mathfrak{b}eta+2m}(\kappa)}.
\end{align*}
Thus $C\in{\mathbb P}i^1_{\mathfrak{b}eta+2m}(\kappa)^*{\mathop{\rm sub}}seteq{\mathscr R}^{m-1}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))^*$. Since $X\in I^+$, $X$ is not the union of a set in ${\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))$ and a set in ${\mathscr R}^{m-1}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))$, and since $X=(X\cap C)\cup (X\setminus C)$, it follows that $X\cap C\notin {\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))$. Thus, by definition of ${\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))$, there is a $\xi\in X\cap C$ with $\xi>\mathfrak{b}eta$ and an $H{\mathop{\rm sub}}seteq X\cap C\cap\xi$ in ${\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\xi))^+$ homogeneous for $f$. But, this contradicts $\xi\in C$.
Suppose $X\in {\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. By Remark \ref{remark_ideal_generated}, it suffices to show that $X\in{\mathscr R}^{m-1}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))^+$ and $X\in{\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. Since $X\in {\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$, every regressive function $f:[X]^{<\omega}\to \kappa$ has a homogeneous set $H\in{\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. From our inductive assumption it follows that
\[{\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\overline{{\mathscr R}_0({\mathscr R}^{m-2}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))\cup{\mathscr R}^{m-2}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))}\]
and thus, every regressive function $f:[X]^{<\omega}\to \kappa$ has a homogeneous set $H\in{\mathscr R}^{m-2}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))^+$. In other words, $X\in{\mathscr R}^{m-1}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))^+$. It remains to show that $X\in{\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. Fix a regressive function $f:[X]^{<\omega}\to \kappa$ and a club $C{\mathop{\rm sub}}seteq\kappa$. Since ${\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ is a normal ideal it follows that $X\cap C\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ and thus every $(1,X\cap C)$-sequence has a homogeneous set in ${\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ (by Theorem \ref{theorem_ramsey_equiv}). From our inductive assumption we see that every element of ${\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ is ${\mathbb P}i^1_{\mathfrak{b}eta+2m-2}$-indescribable, and thus, by Lemma \ref{lemma_baumgartner_bagaria}, $X\cap C$ is ${\mathbb P}i^1_{\mathfrak{b}eta+2m}$-indescribable. Since $X\cap C\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ there is a set $H{\mathop{\rm sub}}seteq X\cap C$ in ${\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ which is homogeneous for $f$. By Lemma \ref{lemma_complexity}, the fact that $H\in{\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ can be expressed by a ${\mathbb P}i^1_{\mathfrak{b}eta+2m-1}$ sentence $\varphi$ over $(V_\kappa,\in,H)$. Since $X\cap C$ is ${\mathbb P}i^1_{\mathfrak{b}eta+2m}$-indescribable we see that there is a $\xi\in X\cap C$ such that $(V_\xi,\in,H\cap\xi)\models\varphi$, in other words, $H\cap\xi{\mathop{\rm sub}}seteq X\cap C\cap\xi$ and $H\cap \xi\in{\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\xi))^+$. Thus $X\in{\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))^+$.
\end{proof}
\mathfrak{b}egin{figure}
\centering
\mathfrak{b}egin{tikzpicture}[x = 1.67cm , y = 1.5cm]
\tiny
\node[draw,rounded corners=0.1cm] (pi1-1) at (0,0) {$[\kappa]^{<\kappa}$};
\node[draw,rounded corners=0.1cm] (pi10) at (1,0) {${\mathop{\rm NS}}_\kappa$};
\node (pi11) at (2,0) {${\mathbb P}i^1_1(\kappa)$};
\node (pi12) at (3,0) {${\mathbb P}i^1_2(\kappa)$};
\node (pi13) at (4,0) {${\mathbb P}i^1_3(\kappa)$};
\node (pi14) at (5,0) {${\mathbb P}i^1_4(\kappa)$};
\node (pi15) at (6,0) {${\mathbb P}i^1_5(\kappa)$};
\node (dots) at (7,0) {$\cdots$};
\node (dots+) at (7,0.5) {$\cdots$};
\node[draw,rounded corners=0.1cm] (rpi1-1) at (0,1) {${\mathscr R}([\kappa]^{<\kappa})$};
\node[draw,rounded corners=0.1cm] (rpi10) at (1,1) {${\mathscr R}({\mathop{\rm NS}}_\kappa)$};
\node (rpi11) at (2,1) {${\mathscr R}({\mathbb P}i^1_1(\kappa))$};
\node (rpi12) at (3,1) {${\mathscr R}({\mathbb P}i^1_2(\kappa))$};
\node (rpi13) at (4,1) {${\mathscr R}({\mathbb P}i^1_3(\kappa))$};
\node (rpi14) at (5,1) {${\mathscr R}({\mathbb P}i^1_4(\kappa))$};
\node (rpi15) at (6,1) {${\mathscr R}({\mathbb P}i^1_5(\kappa))$};
\node (rdots) at (7,1) {$\cdots$};
\node (rdots+) at (7,1.5) {$\cdots$};
\node[draw,rounded corners=0.1cm] (r2pi1-1) at (0,2) {${\mathscr R}^2([\kappa]^{<\kappa})$};
\node[draw,rounded corners=0.1cm] (r2pi10) at (1,2) {${\mathscr R}^2({\mathop{\rm NS}}_\kappa)$};
\node (r2pi11) at (2,2) {${\mathscr R}^2({\mathbb P}i^1_1(\kappa))$};
\node (r2pi12) at (3,2) {${\mathscr R}^2({\mathbb P}i^1_2(\kappa))$};
\node (r2pi13) at (4,2) {${\mathscr R}^2({\mathbb P}i^1_3(\kappa))$};
\node (r2pi14) at (5,2) {${\mathscr R}^2({\mathbb P}i^1_4(\kappa))$};
\node (r2pi15) at (6,2) {${\mathscr R}^2({\mathbb P}i^1_5(\kappa))$};
\node (r2dots) at (7,2) {$\cdots$};
\node (r2dots+) at (7,2.5) {$\cdots$};
\node[draw,rounded corners=0.1cm] (r3pi1-1) at (0,3) {${\mathscr R}^3([\kappa]^{<\kappa})$};
\node[draw,rounded corners=0.1cm] (r3pi10) at (1,3) {${\mathscr R}^3({\mathop{\rm NS}}_\kappa)$};
\node (r3pi11) at (2,3) {${\mathscr R}^3({\mathbb P}i^1_1(\kappa))$};
\node (r3pi12) at (3,3) {${\mathscr R}^3({\mathbb P}i^1_2(\kappa))$};
\node (r3pi13) at (4,3) {${\mathscr R}^3({\mathbb P}i^1_3(\kappa))$};
\node (r3pi14) at (5,3) {${\mathscr R}^3({\mathbb P}i^1_4(\kappa))$};
\node (r3pi15) at (6,3) {${\mathscr R}^3({\mathbb P}i^1_5(\kappa))$};
\node (r3dots) at (7,3) {$\cdots$};
\node (r3dots+) at (6.89,3.5) {};
\node (r4pi1-1) at (0,4.1) {$\vdots$};
\node (r4pi1-1+) at (0.06,3.9) {};
\node (r4pi1-1++) at (0.2,3.88) {};
\node (r4pi10) at (1,4.1) {$\vdots$};
\node (r4pi10+) at (1.06,3.9) {};
\node (r4pi10++) at (1.2,3.88) {};
\node (r4pi11) at (2,4.1) {$\vdots$};
\node (r4pi11+) at (2.06,3.9) {};
\node (r4pi11++) at (2.2,3.88) {};
\node (r4pi12) at (3,4.1) {$\vdots$};
\node (r4pi12+) at (3.06,3.9) {};
\node (r4pi12++) at (3.2,3.88) {};
\node (r4pi13) at (4,4.1) {$\vdots$};
\node (r4pi13+) at (4.06,3.9) {};
\node (r4pi13++) at (4.2,3.88) {};
\node (r4pi14) at (5,4.1) {$\vdots$};
\node (r4pi14+) at (5.06,3.9) {};
\node (r4pi14++) at (5.2,3.88) {};
\node (r4pi15) at (6,4.1) {$\vdots$};
\node (r4pi15+) at (6.06,3.9) {};
\node (r4pi15++) at (6.2,3.88) {};
\mathfrak{d}raw [->] (pi1-1) -- (pi10);
\mathfrak{d}raw [->] (pi10) -- (pi11);
\mathfrak{d}raw [->] (pi11) -- (pi12);
\mathfrak{d}raw [->] (pi12) -- (pi13);
\mathfrak{d}raw [->] (pi13) -- (pi14);
\mathfrak{d}raw [->] (pi14) -- (pi15);
\mathfrak{d}raw [->] (pi15) -- (dots);
\mathfrak{d}raw [->] (pi1-1) -- (rpi1-1);
\mathfrak{d}raw [->] (pi11) -- (rpi1-1);
\mathfrak{d}raw [->] (pi10) -- (rpi10);
\mathfrak{d}raw [->] (pi12) -- (rpi10);
\mathfrak{d}raw [->] (pi11) -- (rpi11);
\mathfrak{d}raw [->] (pi13) -- (rpi11);
\mathfrak{d}raw [->] (pi12) -- (rpi12);
\mathfrak{d}raw [->] (pi14) -- (rpi12);
\mathfrak{d}raw [->] (pi13) -- (rpi13);
\mathfrak{d}raw [->] (pi15) -- (rpi13);
\mathfrak{d}raw [->] (pi14) -- (rpi14);
\mathfrak{d}raw [->] (dots) -- (rpi14);
\mathfrak{d}raw [->] (pi15) -- (rpi15);
\mathfrak{d}raw [->] (dots+) -- (rpi15);
\mathfrak{d}raw [->] (rpi1-1) -- (rpi10);
\mathfrak{d}raw [->] (rpi10) -- (rpi11);
\mathfrak{d}raw [->] (rpi11) -- (rpi12);
\mathfrak{d}raw [->] (rpi12) -- (rpi13);
\mathfrak{d}raw [->] (rpi13) -- (rpi14);
\mathfrak{d}raw [->] (rpi14) -- (rpi15);
\mathfrak{d}raw [->] (rpi15) -- (rdots);
\mathfrak{d}raw [->] (rpi1-1) -- (r2pi1-1);
\mathfrak{d}raw [->] (rpi11) -- (r2pi1-1);
\mathfrak{d}raw [->] (rpi10) -- (r2pi10);
\mathfrak{d}raw [->] (rpi12) -- (r2pi10);
\mathfrak{d}raw [->] (rpi11) -- (r2pi11);
\mathfrak{d}raw [->] (rpi13) -- (r2pi11);
\mathfrak{d}raw [->] (rpi12) -- (r2pi12);
\mathfrak{d}raw [->] (rpi14) -- (r2pi12);
\mathfrak{d}raw [->] (rpi13) -- (r2pi13);
\mathfrak{d}raw [->] (rpi15) -- (r2pi13);
\mathfrak{d}raw [->] (rpi14) -- (r2pi14);
\mathfrak{d}raw [->] (rdots) -- (r2pi14);
\mathfrak{d}raw [->] (rpi15) -- (r2pi15);
\mathfrak{d}raw [->] (rdots+) -- (r2pi15);
\mathfrak{d}raw [->] (r2pi1-1) -- (r2pi10);
\mathfrak{d}raw [->] (r2pi10) -- (r2pi11);
\mathfrak{d}raw [->] (r2pi11) -- (r2pi12);
\mathfrak{d}raw [->] (r2pi12) -- (r2pi13);
\mathfrak{d}raw [->] (r2pi13) -- (r2pi14);
\mathfrak{d}raw [->] (r2pi14) -- (r2pi15);
\mathfrak{d}raw [->] (r2pi15) -- (r2dots);
\mathfrak{d}raw [->] (r2pi1-1) -- (r3pi1-1);
\mathfrak{d}raw [->] (r2pi11) -- (r3pi1-1);
\mathfrak{d}raw [->] (r2pi10) -- (r3pi10);
\mathfrak{d}raw [->] (r2pi12) -- (r3pi10);
\mathfrak{d}raw [->] (r2pi11) -- (r3pi11);
\mathfrak{d}raw [->] (r2pi13) -- (r3pi11);
\mathfrak{d}raw [->] (r2pi12) -- (r3pi12);
\mathfrak{d}raw [->] (r2pi14) -- (r3pi12);
\mathfrak{d}raw [->] (r2pi13) -- (r3pi13);
\mathfrak{d}raw [->] (r2pi15) -- (r3pi13);
\mathfrak{d}raw [->] (r2pi14) -- (r3pi14);
\mathfrak{d}raw [->] (r2dots) -- (r3pi14);
\mathfrak{d}raw [->] (r2pi15) -- (r3pi15);
\mathfrak{d}raw [->] (r2dots+) -- (r3pi15);
\mathfrak{d}raw [->] (r3pi1-1) -- (r3pi10);
\mathfrak{d}raw [->] (r3pi10) -- (r3pi11);
\mathfrak{d}raw [->] (r3pi11) -- (r3pi12);
\mathfrak{d}raw [->] (r3pi12) -- (r3pi13);
\mathfrak{d}raw [->] (r3pi13) -- (r3pi14);
\mathfrak{d}raw [->] (r3pi14) -- (r3pi15);
\mathfrak{d}raw [->] (r3pi15) -- (r3dots);
\mathfrak{d}raw [->] (r3pi1-1) -- (r4pi1-1.south);
\mathfrak{d}raw [->] (r3pi11) -- (r4pi1-1++);
\mathfrak{d}raw [->] (r3pi10) -- (r4pi10.south);
\mathfrak{d}raw [->] (r3pi12) -- (r4pi10++);
\mathfrak{d}raw [->] (r3pi11) -- (r4pi11.south);
\mathfrak{d}raw [->] (r3pi13) -- (r4pi11++);
\mathfrak{d}raw [->] (r3pi12) -- (r4pi12.south);
\mathfrak{d}raw [->] (r3pi14) -- (r4pi12++);
\mathfrak{d}raw [->] (r3pi13) -- (r4pi13.south);
\mathfrak{d}raw [->] (r3pi15) -- (r4pi13++);
\mathfrak{d}raw [->] (r3pi14) -- (r4pi14.south);
\mathfrak{d}raw [->] (r3dots) -- (r4pi14++);
\mathfrak{d}raw [->] (r3pi15) -- (r4pi15.south);
\mathfrak{d}raw [->] (r3dots+) -- (r4pi15++);
\end{tikzpicture}
\caption{\tiny Diagram of ideal containments of ${\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ for $m<\omega$ and $\mathfrak{b}eta<\kappa$. The circled ideals are those in Feng's original hierarchy. Each arrow $\rightarrow$ indicates a containment ${\mathop{\rm sub}}seteq$ which is proper when the ideals are nontrivial by Theorem \ref{theorem_proper_containments_in_finite_diagram}.}\langlebel{figure_finite_ideal_diagram}
\end{figure}
The next corollary generalizes a result of Feng \cite[Theorem 4.8]{MR1077260} and indicates precisely the degree of indescribability that can be derived from a given finite degree of Ramseyness.
\mathfrak{b}egin{corollary}\langlebel{corollary_indescribability_in_finite_ramseyness}
For all $0<m<\omega$ and all $\mathfrak{b}eta\in\{-1\}\cup\kappa$, if $\kappa\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ then
\[{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\overline{{\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathbb P}i^1_{\mathfrak{b}eta+2m}(\kappa)}.\]\marginpar{\tiny We really only need ${\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_n(\kappa)))$ here since the relevant ideals form a chain.}
\end{corollary}
\mathfrak{b}egin{proof}
Fix $\mathfrak{b}eta\in\{-1\}\cup\kappa$. If $m=1$ the result follows directly from Theorem \ref{theorem_finite_ideal_diagram}. Now suppose $1\leq m<\omega$ and the result holds for $m$, let us show that it holds for $m+1$. By Theorem \ref{theorem_finite_ideal_diagram}, we have
\[{\mathscr R}^{m+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\overline{{\mathscr R}_0({\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup {\mathscr R}^m({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))}.\]
By our inductive assumption we see that
\[{\mathscr R}^{m+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\overline{{\mathscr R}_0({\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup {\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa)))\cup{\mathbb P}i^1_{\mathfrak{b}eta+2m+2}(\kappa)}.\]
From Theorem \ref{theorem_finite_ideal_diagram} it follows that ${\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))){\mathop{\rm sub}}seteq {\mathscr R}_0({\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))$ and hence
\[{\mathscr R}^{m+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\overline{{\mathscr R}_0({\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathbb P}i^1_{\mathfrak{b}eta+2(m+1)}(\kappa)}.\]
Thus the result holds for $m+1$.
\end{proof}
As in Baumgartner's characterization of ineffability \cite[Section 7]{MR0384553} in terms of the subtle ideal and the ${\mathbb P}i^1_2$-indescribability ideal, and as in Baumgartner's characterization of Ramseyness \cite[Theorem 4.4 and Theorem 4.5]{MR0540770} in terms of the pre-Ramsey and ${\mathbb P}i^1_1$-indescribability ideals, the next corollary demonstrates that large cardinal ideals are, in a sense, necessary for certain results.
\mathfrak{b}egin{corollary}\langlebel{corollary_necessity}
For all $0<m<\omega$ and all $\mathfrak{b}eta\in\{-1\}\cup\kappa$ we have $\kappa\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ if and only if \emph{both} of the following properties hold.
\mathfrak{b}egin{enumerate}
\item $\kappa\in{\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))^+$ and $\kappa\in{\mathbb P}i^1_{\mathfrak{b}eta+2m}(\kappa)^+$.
\item The ideal generated by ${\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathbb P}i^1_{\mathfrak{b}eta+2m}(\kappa)$ is nontrivial and equals ${\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$.
\end{enumerate}
Moreover, reference to the ideals in the above characterization cannot be removed because the least cardinal $\kappa$ such that $\kappa\in{\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))^+$ and $\kappa\in{\mathbb P}i^1_{\mathfrak{b}eta+2m}(\kappa)^+$ is not in ${\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$.
\end{corollary}
\mathfrak{b}egin{proof}
The characterization of $\kappa\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ follows directly from Corollary \ref{corollary_indescribability_in_finite_ramseyness}. For the additional statement, let us show that if $\kappa\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ then there are many cardinals $\xi<\kappa$ such that $\xi\in{\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\xi)))^+$ and $\xi\in{\mathbb P}i^1_{\mathfrak{b}eta+2m}(\xi)^+$. Suppose $\kappa\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. Since
\[{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\overline{{\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathbb P}i^1_{\mathfrak{b}eta+2m}(\kappa)},\]
it follows that $\kappa\in{\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))^+$ and $\kappa\in{\mathbb P}i^1_{\mathfrak{b}eta+2m}(\kappa)^+$. Now $\kappa\in{\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))^+$ is ${\mathbb P}i^1_1$-expressible over $V_\kappa$ and thus the set
\[C_0=\{\xi<\kappa\mid \xi\in{\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\xi)))^+\}\]
is in ${\mathbb P}i^1_1(\kappa)^*{\mathop{\rm sub}}seteq{\mathbb P}i^1_{\mathfrak{b}eta+2m}(\kappa)^*{\mathop{\rm sub}}seteq{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^*$.
Furthermore, by Corollary \ref{corollary_ramseyness_reflects_indescribability}, we see that the set
\[C_1=\{\xi<\kappa\mid\xi\in{\mathbb P}i^1_{\mathfrak{b}eta+2m}(\kappa)^+\}\]
is in ${\mathscr R}^1([\kappa]^{<\kappa})^*{\mathop{\rm sub}}seteq{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^*$. Therefore, $C_0\cap C_1\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^*$.
\end{proof}
In fact, essentially the same proof shows that the additional statement in Corollary \ref{corollary_necessity} can be improved.
\mathfrak{b}egin{corollary}
Suppose $0<m<\omega$ and $\mathfrak{b}eta\in\{-1\}\cup\kappa$. If $\kappa\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ then the set
\[\{\xi<\kappa\mid \xi\in{\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))^+\cap{\mathscr R}^{m-1}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))^+\}\]
is in ${\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^*$.
\end{corollary}
The following two corollaries of Theorem \ref{theorem_finite_ideal_diagram} show that the assumption ``$\exists\kappa$($\kappa\in{\mathscr R}^m({\mathbb P}i^1_{\mathfrak{b}eta+1}(\kappa))^+$)'' is strictly stronger than ``$\exists\kappa$($\kappa\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$)''. In other words, each row of \textsc{Figure}\ \ref{figure_finite_ideal_diagram} yields a strict hierarchy of large cardinals assuming the ideals are nontrivial.
\mathfrak{b}egin{corollary}
Suppose $0<m<\omega$, $\mathfrak{b}eta\in\{-1\}\cup\kappa$ and $\kappa\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. If $S\in{\mathscr R}^{\hat{m}}({\mathbb P}i^1_{\hat{\mathfrak{b}eta}}(\kappa))^+$ where $\hat{\mathfrak{b}eta}+2\hat{m}+1\leq \mathfrak{b}eta+2m$ then the set
\[T=\{\xi<\kappa\mid S\cap\xi\in{\mathscr R}^{\hat{m}}({\mathbb P}i^1_{\hat{\mathfrak{b}eta}}(\xi))^+\}\]
is in ${\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^*$.
\end{corollary}
\mathfrak{b}egin{proof}
By Lemma \ref{lemma_complexity}, the fact that $S\in{\mathscr R}^{\hat{m}}({\mathbb P}i^1_{\hat{\mathfrak{b}eta}}(\kappa))^+$ is expressible by a ${\mathbb P}i^1_{\hat{\mathfrak{b}eta}+2\hat{m}+1}$ sentence $\varphi$ over $(V_\kappa,\in,S)$. Since $\hat{\mathfrak{b}eta}+2\hat{m}+1\leq\mathfrak{b}eta+2m$ we have ${\mathbb P}i^1_{\hat{\mathfrak{b}eta}+2\hat{m}+1}(\kappa){\mathop{\rm sub}}seteq{\mathbb P}i^1_{\mathfrak{b}eta+2m}(\kappa)$, and by Corollary \ref{corollary_indescribability_in_finite_ramseyness}, since $\kappa\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ we have ${\mathbb P}i^1_{\mathfrak{b}eta+2m}(\kappa){\mathop{\rm sub}}seteq{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$, and thus the set
\[T=\{\xi<\kappa\mid (V_\xi,\in,S\cap\xi)\models\varphi\}=\{\xi<\kappa\mid S\cap\xi\in{\mathscr R}^{\hat{m}}({\mathbb P}i^1_{\hat{\mathfrak{b}eta}}(\xi))^+\}\]
is in ${\mathbb P}i^1_{\hat{\mathfrak{b}eta}+2\hat{m}+1}(\kappa)^*{\mathop{\rm sub}}seteq{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^*$.
\end{proof}
\mathfrak{b}egin{corollary}\langlebel{corollary_beta_to_beta_plus_one_hierarchy}
For all $0< m <\omega$ and all $\mathfrak{b}eta\in\{-1\}\cup\kappa$, if $\kappa\in{\mathscr R}^m({\mathbb P}i^1_{\mathfrak{b}eta+1}(\kappa))^+$ then the set
\[T=\{\xi<\kappa\mid \xi\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\xi))^+\}\]
is in ${\mathscr R}^m({\mathbb P}i^1_{\mathfrak{b}eta+1}(\kappa))^*$.
\end{corollary}
Now let us show that the containments of the ideals from Theorem \ref{theorem_finite_ideal_diagram} as illustrated in \textsc{Figure}\ \ref{figure_finite_ideal_diagram} are proper when the ideals involved are nontrivial.
\mathfrak{b}egin{theorem}\langlebel{theorem_proper_containments_in_finite_diagram}
Suppose $0<m<\omega$ and $\mathfrak{b}eta<\kappa$.
\mathfrak{b}egin{enumerate}
\item If $\kappa\in{\mathscr R}^m({\mathbb P}i^1_{\mathfrak{b}eta+1}(\kappa))^+$ then ${\mathscr R}^m({\mathbb P}i^1_{\mathfrak{b}eta}(\kappa)){\mathop{\rm sub}}setneq{\mathscr R}^m({\mathbb P}i^1_{\mathfrak{b}eta+1}(\kappa))$.
\item If $\kappa\in{\mathscr R}^m({\mathbb P}i^1_{\mathfrak{b}eta}(\kappa))^+$ then ${\mathscr R}^{m-1}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa)){\mathop{\rm sub}}setneq{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$.
\end{enumerate}
\end{theorem}
\mathfrak{b}egin{proof}
The containments follow from Theorem \ref{theorem_finite_ideal_diagram}, so we only need to show the properness of the containments.
For (1), let $S=\{\xi<\kappa\mid\xi\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\xi))\}$. Then $S\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ by Lemma \ref{lemma_set_of_nons_is_positive}. Furthermore, by Corollary \ref{corollary_beta_to_beta_plus_one_hierarchy}, $S\in{\mathscr R}^m({\mathbb P}i^1_{\mathfrak{b}eta+1}(\kappa))$. Thus ${\mathscr R}^m({\mathbb P}i^1_{\mathfrak{b}eta}(\kappa)){\mathop{\rm sub}}setneq{\mathscr R}^m({\mathbb P}i^1_{\mathfrak{b}eta+1}(\kappa))$.
For (2), let $S=\{\xi<\kappa\mid\xi\in{\mathscr R}^{m-1}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\xi))\}$. By Lemma \ref{lemma_set_of_nons_is_positive} we see that $S\in{\mathscr R}^{m-1}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))^+$. From Corollary \ref{corollary_ramseyness_reflects_indescribability}, it follows that $\kappa\setminus S\in{\mathscr R}^m([\kappa]^{<\kappa})^*$ and since ${\mathscr R}^m([\kappa]^{<\kappa}){\mathop{\rm sub}}seteq{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$, this implies $S\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$.
\end{proof}
The next corollary, which follows directly from Theorem \ref{theorem_finite_ideal_diagram}, shows that iterating the Ramsey operator on an indescribability ideal ${\mathbb P}i^1_{\mathfrak{b}eta+n}(\kappa)$ infinitely many times leads to the same ideal, no matter what $n<\omega$ was initially chosen (see \textsc{Figure}\ \ref{figure_first_collapse}).
\mathfrak{b}egin{corollary}\langlebel{corollary_collapse}
The following hold.
\mathfrak{b}egin{enumerate}
\item If $\kappa\in{\mathscr R}^\omega([\kappa]^{<\kappa})^+$, then for all $n<\omega$ we have \[{\mathscr R}^\omega([\kappa]^{<\kappa})={\mathscr R}^\omega({\mathbb P}i^1_n(\kappa)).\]
\item For all limit ordinals $\mathfrak{b}eta<\kappa$, if $\kappa\in{\mathscr R}^\omega({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$, then for all $n<\omega$ we have \[{\mathscr R}^\omega({\mathbb P}i^1_\mathfrak{b}eta(\kappa))={\mathscr R}^\omega({\mathbb P}i^1_{\mathfrak{b}eta+n}(\kappa)).\]
\end{enumerate}
\end{corollary}
\mathfrak{b}egin{figure}
\centering
\mathfrak{b}egin{tikzpicture}[x=2.5cm,y=0.7cm]
\tiny
\node (pi1-1) at (0,0) {${\mathbb P}i^1_\mathfrak{b}eta(\kappa)$};
\node (pi10) at (1,0) {${\mathbb P}i^1_{\mathfrak{b}eta+1}(\kappa)$};
\node (pi11) at (2,0) {${\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa)$};
\node (pi13) at (2.7,0) {$\cdots$};
\node (rpi1-1) at (0,1) {${\mathscr R}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$};
\node (rpi10) at (1,1) {${\mathscr R}({\mathbb P}i^1_{\mathfrak{b}eta+1}(\kappa))$};
\node (rpi11) at (2,1) {${\mathscr R}({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))$};
\node (rpi13) at (2.7,1) {$\cdots$};
\node (r2pi1-1) at (0,2) {${\mathscr R}^2({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$};
\node (r2pi10) at (1,2) {${\mathscr R}^2({\mathbb P}i^1_{\mathfrak{b}eta+1}(\kappa))$};
\node (r2pi11) at (2,2) {${\mathscr R}^2({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))$};
\node (r2pi13) at (2.7,2) {$\cdots$};
\node (r3pi1-1) at (0,3) {${\mathscr R}^3({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$};
\node (r3pi10) at (1,3) {${\mathscr R}^3({\mathbb P}i^1_{\mathfrak{b}eta+1}(\kappa))$};
\node (r3pi11) at (2,3) {${\mathscr R}^3({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))$};
\node (r3pi13) at (2.7,3) {$\cdots$};
\node (r4pi1-1) at (0,4) {$\vdots$};
\node (r4pi10) at (1,4) {$\vdots$};
\node (r4pi11) at (2,4) {$\vdots$};
\mathfrak{d}raw[rounded corners=0.1cm] (-0.5,-0.7) rectangle (2.9,4.5);
\node (rwpi1-1) at (1,6) {${\mathscr R}^\omega({\mathbb P}i^1_\mathfrak{b}eta(\kappa))={\mathscr R}^\omega({\mathbb P}i^1_{\mathfrak{b}eta+1}(\kappa))={\mathscr R}^\omega({\mathbb P}i^1_{\mathfrak{b}eta+2}(\kappa))=\cdots$};
\mathfrak{d}raw [->] (r4pi1-1.north) -- (rwpi1-1);
\mathfrak{d}raw [->] (r4pi10.north) -- (rwpi1-1);
\mathfrak{d}raw [->] (r4pi11.north) -- (rwpi1-1);
\node (rw+1pi1-1) at (1,7.5) {${\mathscr R}^{\omega+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$};
\node (rw+2pi1-1) at (1,9) {${\mathscr R}^{\omega+2}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$};
\node (rw+3pi1-1) at (1,10.5) {};
\node (rw+4pi1-1) at (1,11) {$\vdots$};
\mathfrak{d}raw [->] (rwpi1-1) -- (rw+1pi1-1);
\mathfrak{d}raw [->] (rw+1pi1-1) -- (rw+2pi1-1);
\mathfrak{d}raw [->] (rw+2pi1-1) -- (rw+3pi1-1);
\end{tikzpicture}
\caption{\tiny Indescribability becomes redundant as one moves up the Ramsey hierarchy.}\langlebel{figure_first_collapse}
\end{figure}
Note that, although Corollary \ref{corollary_collapse} is an easy consequence of Theorem \ref{theorem_finite_ideal_diagram}, its proof is substantially different from that of the observation ${\mathscr R}^\omega([\kappa]^{<\kappa})={\mathscr R}^\omega({\mathop{\rm NS}}_\kappa)$ made above in Remark \ref{remark_fengs_defintion}, because the ideals involved \emph{do not} fit into a chain.
\mathfrak{b}egin{remark}\langlebel{remark_first_collapse}
Let us point out an easy consequence of Corollary \ref{corollary_collapse} which will serve as motivation for some of the results in Section \ref{section_indescribability_in_infinite_ramseyness} (see Remark \ref{remark_hierarchy}). The previous corollary easily implies that when $\omega\leq\alpha<\kappa$ and $\mathfrak{b}eta<\kappa$, the assertion $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta}(\kappa))^+$ is equivalent to $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta+n}(\kappa))^+$ for all $n<\omega$. In other words, for $\omega\leq\alpha<\kappa$, $\kappa$ being $\alpha$-${\mathbb P}i^1_\mathfrak{b}eta$-Ramsey is equivalent to $\kappa$ being $\alpha$-${\mathbb P}i^1_{\mathfrak{b}eta+n}$-Ramsey for all $n<\omega$.
\end{remark}
\section{Indescribability in infinite degrees of Ramseyness}\langlebel{section_indescribability_in_infinite_ramseyness}
We now proceed to extend some of the results of Section \ref{section_indescribability_in_finite_ramseyness} to the ideals ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ for $\alpha>\omega$.
\mathfrak{b}egin{lemma}\langlebel{lemma_indescribability_from_ramseyness}
For all ordinals $\alpha,\mathfrak{b}eta<\kappa$ the following hold.
\mathfrak{b}egin{enumerate}
\item If $\alpha$ is a successor ordinal then ${\mathbb P}i^1_{\mathfrak{b}eta+\alpha}(\kappa){\mathop{\rm sub}}seteq{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$.
\item If $\alpha$ is a limit ordinal or if $\alpha=0$ then $\mathfrak{b}igcup_{\xi<\mathfrak{b}eta+\alpha}{\mathbb P}i^1_\xi(\kappa){\mathop{\rm sub}}seteq{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$.
\end{enumerate}
\end{lemma}
\mathfrak{b}egin{proof}
Fix an ordinal $\mathfrak{b}eta<\kappa$. We proceed by induction on $\alpha$. Clearly the result holds for $\alpha=0$, since $\mathfrak{b}igcup_{\xi<\mathfrak{b}eta}{\mathbb P}i^1_\xi(\kappa){\mathop{\rm sub}}seteq{\mathbb P}i^1_\mathfrak{b}eta(\kappa)$. The case in which $\alpha$ is a limit is trivial.
For the successor step of the induction, let us argue that ${\mathscr R}^{\alpha+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+{\mathop{\rm sub}}seteq{\mathbb P}i^1_{\mathfrak{b}eta+\alpha+1}(\kappa)^+$, assuming the result holds for $\alpha$. Suppose $X\in{\mathscr R}^{\alpha+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. Then every regressive function $f:[X]^{<\omega}\to \kappa$ has a homogeneous set $H\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. By our inductive hypothesis, ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+{\mathop{\rm sub}}seteq\mathfrak{b}igcap_{\xi<\mathfrak{b}eta+\alpha}{\mathbb P}i^1_\xi(\kappa)^+$. Thus, by Lemma \ref{lemma_baumgartner_bagaria}, it follows that $X\in{\mathbb P}i^1_{\mathfrak{b}eta+\alpha+1}(\kappa)^+$.
\end{proof}
Among other things, the next theorem shows that Lemma \ref{lemma_indescribability_from_ramseyness} (1) can be improved when $\alpha$ is a successor ordinal which is not an immediate successor of a limit ordinal.
\mathfrak{b}egin{theorem}\langlebel{theorem_indescribability_in_infinite_ramseyness}
Suppose $\kappa$ is a cardinal, $\alpha<\kappa$ is a limit ordinal and $\mathfrak{b}eta\in\{-1\}\cup\kappa$. For all $m<\omega$, if $\kappa\in{\mathscr R}^{\alpha+m+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ then
\[{\mathscr R}^{\alpha+m+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\overline{{\mathscr R}_0({\mathscr R}^{\alpha+m}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathbb P}i^1_{\mathfrak{b}eta+\alpha+2m+1}(\kappa)}.\]
\end{theorem}
\mathfrak{b}egin{proof}
For the base case, let $m=0$. Let $I=\overline{{\mathscr R}_0({\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathbb P}i^1_{\mathfrak{b}eta+\alpha+1}(\kappa)}$. We will show that $X\in{\mathscr R}^{\alpha+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ if and only if $X\in I^+$.
Suppose $X\in{\mathscr R}^{\alpha+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. By Remark \ref{remark_ideal_generated}, it suffices to show that $X\in{\mathbb P}i^1_{\mathfrak{b}eta+\alpha+1}(\kappa)^+$ and $X\in{\mathscr R}_0({\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))^+$. By Lemma \ref{lemma_indescribability_from_ramseyness}, we have $X\in{\mathbb P}i^1_{\mathfrak{b}eta+\alpha+1}(\kappa)^+$. Let us show that $X\in{\mathscr R}_0({\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))^+$. Fix a regressive function $f:[X]^{<\omega}\to \kappa$ and a club $C{\mathop{\rm sub}}seteq\kappa$. By assumption there is a set $H\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ homogeneous for $f$. By Lemma \ref{lemma_complexity}, the fact that $H\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ can be expressed by a ${\mathbb P}i^1_{\mathfrak{b}eta+\alpha}$ sentence $\varphi$ over $(V_\kappa,\in,H)$, and since $X\cap C\in{\mathbb P}i^1_{\mathfrak{b}eta+\alpha+1}(\kappa)^+$, there is a $\xi\in X\cap C$ with $\xi>\alpha,\mathfrak{b}eta$ such that $(V_\xi,\in,H\cap\xi)\models\varphi$, and hence $H\cap\xi\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\xi))^+$. Thus, $X\in{\mathscr R}_0({\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))^+$.
Now suppose $X\in I^+$. We argue that $X\in{\mathscr R}^{\alpha+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. Let $f:[X]^{<\omega}\to \kappa$ be a regressive function. Suppose that every homogeneous set $H$ for $f$ is in ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$. By Lemma \ref{lemma_complexity}, this can be expressed by a ${\mathbb P}i^1_{\mathfrak{b}eta+\alpha+1}$ sentence $\varphi$ over $(V_\kappa,\in,f)$. This implies that the set $C=\{\xi<\kappa\mid(V_\xi,\in,f\cap V_\xi)\models\varphi\}$ is in ${\mathbb P}i^1_{\mathfrak{b}eta+\alpha+1}(\kappa)^*$. Since $X\in I^+$, it follows that $X$ is not the union of a set in ${\mathscr R}_0({\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))$ and a set in ${\mathbb P}i^1_{\mathfrak{b}eta+\alpha+1}$. Since $X=(X\cap C)\cup(X\setminus C)$ and $X\setminus C\in{\mathbb P}i^1_{\mathfrak{b}eta+\alpha+1}(\kappa)$, we see that $X\cap C\in{\mathscr R}_0({\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))^+$. Hence there is a $\xi\in X\cap C$ with $\xi>\alpha,\mathfrak{b}eta$ for which there is a set $H{\mathop{\rm sub}}seteq X\cap C\cap\xi$ in ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\xi))^+$ homogeneous for $f$. This contradicts $\xi\in C$.
For the inductive step, we suppose
\[{\mathscr R}^{\alpha+m}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\overline{{\mathscr R}_0({\mathscr R}^{\alpha+m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathbb P}i^1_{\mathfrak{b}eta+\alpha+2m-1}(\kappa)}\]
and show
\[{\mathscr R}^{\alpha+m+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\overline{{\mathscr R}_0({\mathscr R}^{\alpha+m}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathbb P}i^1_{\mathfrak{b}eta+\alpha+2m+1}(\kappa)}.\]
Let $I=\overline{{\mathscr R}_0({\mathscr R}^{\alpha+m}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathbb P}i^1_{\mathfrak{b}eta+\alpha+2m+1}(\kappa)}$.
Suppose $X\in{\mathscr R}^{\alpha+m+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. By Remark \ref{remark_ideal_generated}, it suffices to show that $X\in {\mathbb P}i^1_{\mathfrak{b}eta+\alpha+2m+1}(\kappa)^+$ and $X\in {\mathscr R}_0({\mathscr R}^{\alpha+m}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))^+$. By our inductive hypothesis $Q:={\mathscr R}^{\alpha+m}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+{\mathop{\rm sub}}seteq{\mathbb P}i^1_{\mathfrak{b}eta+\alpha+2m-1}(\kappa)^+$, and thus by Lemma \ref{lemma_baumgartner_bagaria} we have $X\in{\mathbb P}i^1_{\mathfrak{b}eta+\alpha+2m+1}(\kappa)^+$. Let us show that $X\in{\mathscr R}_0({\mathscr R}^{\alpha+m}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))^+$. Fix a regressive function $f:[X]^{<\omega}\to \kappa$ and a club $C{\mathop{\rm sub}}seteq\kappa$. Since $X\in{\mathscr R}^{\alpha+m+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ there is a set $H\in{\mathscr R}^{\alpha+m}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ homogeneous for $f$. By Lemma \ref{lemma_complexity}, the fact that $H\in{\mathscr R}^{\alpha+m}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ can be expressed by a ${\mathbb P}i^1_{\mathfrak{b}eta+\alpha+2m}$ sentence $\varphi$ over $(V_\kappa,\in,H)$. Since $X\cap C\in{\mathbb P}i^1_{\mathfrak{b}eta+\alpha+2m+1}(\kappa)^+$, there is a $\xi\in X\cap C$ with $\xi>\alpha+m,\mathfrak{b}eta$, for which $H\cap\xi\in{\mathscr R}^{\alpha+m}({\mathbb P}i^1_\mathfrak{b}eta(\xi))^+$. Thus $X\in{\mathscr R}_0({\mathscr R}^{\alpha+m}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))^+$.
Conversely, suppose $X\in I^+$. Let $f:[X]^{<\omega}\to \kappa$ be a regressive function. Suppose that every set which is homogeneous for $f$ is in ${\mathscr R}^{\alpha+m}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$. By Lemma \ref{lemma_complexity}, this can be expressed by a ${\mathbb P}i^1_{\mathfrak{b}eta+\alpha+2m+1}$ sentence $\varphi$ over $(V_\kappa,\in,f)$. Thus the set $C=\{\xi<\kappa\mid (V_\xi,\in,f\cap V_\xi)\models\varphi\}$ is in ${\mathbb P}i^1_{\mathfrak{b}eta+\alpha+2m+1}(\kappa)^*$. Since $X\in I^+$, it follows that $X$ is not the union of a set in ${\mathscr R}_0({\mathscr R}^{\alpha+m}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))$ and a set in ${\mathbb P}i^1_{\mathfrak{b}eta+\alpha+2m+1}(\kappa)$, and since $X\setminus C\in{\mathbb P}i^1_{\mathfrak{b}eta+\alpha+2m+1}(\kappa)$, we see that $X\cap C\in{\mathscr R}_0({\mathscr R}^{\alpha+m}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))^+$. Hence there is a $\xi\in X\cap C$ with $\xi>\alpha+m,\mathfrak{b}eta$ such that there is a set $H{\mathop{\rm sub}}seteq X\cap C\cap\xi$ in ${\mathscr R}^{\alpha+m}({\mathbb P}i^1_\mathfrak{b}eta(\xi))^+$ homogeneous for $f$. This contradicts $\xi\in C$.
\end{proof}
\mathfrak{b}egin{remark}\langlebel{remark_hierarchy}
We would like to use Theorem \ref{theorem_indescribability_in_infinite_ramseyness} to prove an analogue of Corollary \ref{corollary_beta_to_beta_plus_one_hierarchy}, which would say that the strength of the hypothesis ``$\exists\kappa$ $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$'' increases as $\mathfrak{b}eta$ increases. However, there is an added complication, as illustrated in Corollary \ref{corollary_collapse} and Remark \ref{remark_first_collapse}, which is that even if $\mathfrak{b}eta_0<\mathfrak{b}eta_1<\kappa$, it may be that $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa))$ is equivalent to $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))$, if $\alpha$ is large enough. Thus, in order to show that the hypotheses $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ form a hierarchy as $\mathfrak{b}eta$ increases, we will need to determine at what $\alpha$ do the hypotheses $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa))$ and $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))$ become equivalent.
\end{remark}
\mathfrak{b}egin{remark}
Using Theorem \ref{theorem_indescribability_in_infinite_ramseyness}, it is possible to formulate a characterization of $\kappa\in{\mathscr R}^{\alpha+m+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ in terms of the relevant ideals along the lines of Corollary \ref{corollary_necessity} above. Moreover, one can show that reference to the ideals in such a characterization is, in fact, necessary. We leave the details to the reader.
\end{remark}
Let us prove Theorem 1.2 mentioned in Section \ref{section_introduction}. That is, we will show that for any two ordinals $\mathfrak{b}eta_0<\mathfrak{b}eta_1<\kappa$, the two increasing chains of ideals $\langle{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa))\mid\alpha<\kappa\rangle$ and $\langle{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))\mid\alpha<\kappa\rangle$ are eventually equal, and we determine the precise index at which the equality begins (see \textsc{Figure}\ \ref{figure_culmination} for an illustration of this result).
\mathfrak{b}egin{theorem_intro}
Suppose $\mathfrak{b}eta_0<\mathfrak{b}eta_1$ are in $\{-1\}\cup\kappa$ and let $\sigma=\mathop{\rm ot}\nolimits(\mathfrak{b}eta_1\setminus\mathfrak{b}eta_0)$. Define $\alpha=\sigma\cdot\omega$. Suppose $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))^+$ so that the ideals under consideration are nontrivial. Then $\alpha$ is the least ordinal such that ${\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa))={\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))$.
\end{theorem_intro}
\mathfrak{b}egin{proof}
First, let us show ${\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa))={\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))$. Since $\mathfrak{b}eta_0<\mathfrak{b}eta_1$, it is clear that ${\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa)){\mathop{\rm sub}}seteq{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))$. Let us show that ${\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa))\supseteq{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))$. If $\sigma=\mathop{\rm ot}\nolimits(\mathfrak{b}eta_1\setminus\mathfrak{b}eta_0)=n$ is finite then $\alpha=\omega$ and the result follows from Corollary \ref{corollary_collapse} since $ {\mathscr R}^\omega({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa))={\mathscr R}^\omega({\mathbb P}i^1_{\mathfrak{b}eta_0+n}(\kappa))$. Suppose $\sigma=\mathop{\rm ot}\nolimits(\mathfrak{b}eta_1\setminus\mathfrak{b}eta_0)\geq\omega$. Then $\alpha=\sigma\cdot\omega$ is a limit of limits. Let us show that ${\mathscr R}^\xi({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa)){\mathop{\rm sub}}seteq{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa))$ for each limit $\xi<\alpha$. Fix a limit ordinal $\xi<\alpha$. For ordinals $\zeta<\alpha$ we define $i(\zeta)$ to be the least $i<\omega$ such that $\zeta<\sigma\cdot i$. Notice that if we let $\gamma$ be the greatest limit ordinal which is less than or equal to $\sigma$ then $\mathfrak{b}eta_1=\mathfrak{b}eta_0+\sigma\leq\mathfrak{b}eta_0+\gamma+2m+1<\mathfrak{b}eta_0+\sigma\cdot 2$ for some odd natural number $2m+1<\omega$. Now, by Theorem \ref{theorem_indescribability_in_infinite_ramseyness}, we have
\mathfrak{b}egin{align}
{\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa){\mathop{\rm sub}}seteq{\mathbb P}i^1_{\mathfrak{b}eta_0+\gamma+2m+1}(\kappa){\mathop{\rm sub}}seteq{\mathscr R}^{\gamma+m+1}({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa)).\langlebel{equation_ind}
\end{align}
Applying the Ramsey operator $\xi$ times to (\ref{equation_ind}) yields
\[{\mathscr R}^\xi({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa)){\mathop{\rm sub}}seteq{\mathscr R}^{\gamma+m+1+\xi}({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa))\]
Since $i(\gamma)+i(\xi)<\omega$ it follows that $\gamma+m+1+\xi=\gamma+\xi<\sigma\cdot i(\gamma)+\sigma\cdot i(\xi)$ must be less than $\alpha$. Thus ${\mathscr R}^\xi({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa)){\mathop{\rm sub}}seteq{\mathscr R}^{\alpha}({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa))$.
Next, let us show that if $\hat{\alpha}<\alpha$ then ${\mathscr R}^{\hat{\alpha}}({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa)){\mathop{\rm sub}}setneq{\mathscr R}^{\hat{\alpha}}({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))$. If $\sigma=\mathop{\rm ot}\nolimits(\mathfrak{b}eta_1\setminus\mathfrak{b}eta_0)$ is finite, in which case $\alpha=\omega$, then the result follows from Theorem \ref{theorem_proper_containments_in_finite_diagram}. On the other hand, if $\sigma$ is infinite, then $\alpha=\sigma\cdot\omega>\omega$ and $\alpha$ is a limit of limits. Let $\mathfrak{b}ar{\alpha}$ be a limit ordinal with $\hat{\alpha}<\mathfrak{b}ar{\alpha}+1<\alpha$. It suffices to show that ${\mathscr R}^{\mathfrak{b}ar{\alpha}+1}({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa)){\mathop{\rm sub}}setneq{\mathscr R}^{\mathfrak{b}ar{\alpha}+1}({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))$. Let
\[S=\{\xi<\kappa\mid\xi\in{\mathscr R}^{\mathfrak{b}ar{\alpha}+1}({\mathbb P}i^1_{\mathfrak{b}eta_0}(\xi))\}.\]
Since $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))^+$, it follows from Lemma \ref{lemma_set_of_nons_is_positive} that $S\notin{\mathscr R}^{\mathfrak{b}ar{\alpha}+1}({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa))$. Furthermore, by Lemma \ref{lemma_complexity}, the fact that $S\notin{\mathscr R}^{\mathfrak{b}ar{\alpha}+1}({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa))$ is ${\mathbb P}i^1_{\mathfrak{b}eta_0+\mathfrak{b}ar{\alpha}+2}$-expressible over $V_\kappa$ and so the set $C=\kappa\setminus S$ is in ${\mathbb P}i^1_{\mathfrak{b}eta_0+\mathfrak{b}ar{\alpha}+2}(\kappa)^*$.
By Theorem \ref{theorem_indescribability_in_infinite_ramseyness}, ${\mathbb P}i^1_{\mathfrak{b}eta_0+\sigma+\mathfrak{b}ar{\alpha}+1}(\kappa){\mathop{\rm sub}}seteq{\mathscr R}^{\mathfrak{b}ar{\alpha}+1}({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))$. Since $\mathfrak{b}ar{\alpha}<\alpha=\sigma\cdot\omega$, it follows that $\mathfrak{b}eta_0+\mathfrak{b}ar{\alpha}+2<\mathfrak{b}eta_0+\sigma+\mathfrak{b}ar{\alpha}+1$ and thus ${\mathbb P}i^1_{\mathfrak{b}eta_0+\mathfrak{b}ar{\alpha}+2}(\kappa){\mathop{\rm sub}}seteq{\mathscr R}^{\mathfrak{b}ar{\alpha}+1}({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))$. This implies that $C\in{\mathscr R}^{\mathfrak{b}ar{\alpha}+1}({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))^*$ and thus $S\in {\mathscr R}^{\mathfrak{b}ar{\alpha}+1}({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))$.
\end{proof}
\mathfrak{b}egin{figure}
\centering
\mathfrak{b}egin{tikzpicture}[x=0.18cm,y=0.18cm]
\tiny
\foreach \i in {0,10,20,40} {
\foreach \x in {1,...,6} {
\foreach \y in {1,...,6} {
\node[circle,draw=black, fill=black, inner sep=0pt,minimum size=1pt] (a\i\x\y) at ({6*(6-6*pow(\x+1,-0.1))-1.5+\i},{6*(6-6*pow(\y+1,-0.1))-1.5}) {};
}
}
\mathfrak{d}raw[rounded corners=0.1cm] (\i,0) rectangle (5.7+\i,5.7);
\node[circle,draw=black, fill=white, inner sep=0pt,minimum size=3pt] (w\i) at ({6*(6-6*pow(3+1,-0.1))-1.5+\i-0.3},7.5) {};
\foreach \j in {1,2.65,6} {
\mathfrak{d}raw ({6*(6-6*pow(\j+1,-0.1))-1.5+\i},6) -- (w\i);
}
\foreach \y in {2,...,6} {
\node[circle,draw=black, fill=black, inner sep=0pt,minimum size=1pt] (w\i\y) at ({6*(6-6*pow(3+1,-0.1))-1.5+\i-0.3},{7.5+6*(6-6*pow(\y+1,-0.1))-2.5}) {};
}
\node[circle,draw=black, fill=white, inner sep=0pt,minimum size=3pt] (w\i7) at ({6*(6-6*pow(3+1,-0.1))-1.5+\i-0.3},{7.5+6*(6-6*pow(7+1,-0.1))-2.5}) {};
\mathfrak{d}raw (w\i)--(w\i7);
\node (ell\i) at ({6*(6-6*pow(3+1,-0.1))-1.5+\i-0.3},15) {$\vdots$};
\mathfrak{d}raw[rounded corners=0.1cm] (-1,-1) rectangle (26.7,17);
\node[circle,draw=black, fill=white, inner sep=0pt,minimum size=3pt] (lima) at ({6*(6-6*pow(3+1,-0.1))-1.5+10-0.3},22) {};
\foreach \k in {0,10,20} {
\mathfrak{d}raw ({6*(6-6*pow(3+1,-0.1))-1.5+\k-0.3},18) -- (lima);
}
\node[circle,draw=black, fill=white, inner sep=0pt,minimum size=3pt] (limb) at ({6*(6-6*pow(3+1,-0.1))-1.5+40-0.3},17) {};
\foreach \y in {2,...,6} {
\node[circle,draw=black, fill=black, inner sep=0pt,minimum size=1pt] (lim\y) at ({6*(6-6*pow(3+1,-0.1))-1.5+40-0.3},{7.5+6*(6-6*pow(\y+1,-0.1))-2.5+10}) {};
}
\node[circle,draw=black, fill=white, inner sep=0pt,minimum size=3pt] (lim7) at ({6*(6-6*pow(3+1,-0.1))-1.5+40-0.3},{7.5+6*(6-6*pow(7+1,-0.1))-2.5+10}) {};
\mathfrak{d}raw (limb) -- (lim7);
\foreach \y in {2,...,6} {
\node[circle,draw=black, fill=black, inner sep=0pt,minimum size=1pt] (up10\y) at ({6*(6-6*pow(3+1,-0.1))-1.5+10-0.3},{7.5+6*(6-6*pow(\y+1,-0.1))-2.5+10+5}) {};
}
\node[circle,draw=black, fill=white, inner sep=0pt,minimum size=3pt] (up107) at ({6*(6-6*pow(3+1,-0.1))-1.5+10-0.3},{7.5+6*(6-6*pow(7+1,-0.1))-2.5+10+5}) {};
\foreach \y in {2,...,6} {
\node[circle,draw=black, fill=black, inner sep=0pt,minimum size=1pt] (up40\y) at ({6*(6-6*pow(3+1,-0.1))-1.5+40-0.3},{7.5+6*(6-6*pow(\y+1,-0.1))-2.5+10+5}) {};
}
\node[circle,draw=black, fill=white, inner sep=0pt,minimum size=3pt] (up407) at ({6*(6-6*pow(3+1,-0.1))-1.5+40-0.3},{7.5+6*(6-6*pow(7+1,-0.1))-2.5+10+5}) {};
\mathfrak{d}raw (lim7) -- (up407);
\mathfrak{d}raw (lima) -- (up107);
\node (fdots10) at ({6*(6-6*pow(3+1,-0.1))-1.5+10-0.3},{7.5+6*(6-6*pow(7+1,-0.1))-2.5+10+5+3}) {$\vdots$};
\node (fdots40) at ({6*(6-6*pow(3+1,-0.1))-1.5+40-0.3},{7.5+6*(6-6*pow(7+1,-0.1))-2.5+10+5+3}) {$\vdots$};
\node[circle,draw=black, fill=white, inner sep=0pt,minimum size=3pt] (fnode) at (28,38) {};
\mathfrak{d}raw (fdots10.north) -- (fnode);
\mathfrak{d}raw (fdots40.north) -- (fnode);
\node[minimum size=0pt] (upperbound) at (58,38) {};
\node (middle) at (55,19) {$\alpha=\mathop{\rm ot}\nolimits(\mathfrak{b}eta_1\setminus\mathfrak{b}eta_0)\cdot\omega$};
\node (lowerbound) at (58,0) {};
\mathfrak{d}raw (53,38) -- (upperbound);
\mathfrak{d}raw (53,0) -- (lowerbound);
\mathfrak{d}raw[->] (middle) -- (55,0);
\mathfrak{d}raw[->] (middle) -- (55,38);
}
\node (beta0) at ({6*(6-6*pow(2+1,-0.1))-1.5+10},{6*(6-6*pow(1+1,-0.1))-1.5-4}) {$\mathfrak{b}eta_0$};
\mathfrak{d}raw (a1021) -- (a1026);
\mathfrak{d}raw (a4031) -- (a4036);
\node (beta1) at ({6*(6-6*pow(3+1,-0.1))-1.5+40},{6*(6-6*pow(1+1,-0.1))-1.5-4}) {$\mathfrak{b}eta_1$};
\node (poop) at (28,40) {${\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa))={\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))$};
\end{tikzpicture}
\caption{\tiny For $\mathfrak{b}eta_0,\mathfrak{b}eta_1<\kappa$ the ideal chains $\langle{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa))\mid\alpha<\kappa\rangle$ and $\langle{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))\mid\alpha<\kappa\rangle$ become equal at $\alpha=\mathop{\rm ot}\nolimits(\mathfrak{b}eta_1\setminus\mathfrak{b}eta_0)\cdot\omega$.}\langlebel{figure_culmination}
\end{figure}
\mathfrak{b}egin{corollary}\langlebel{corollary_main_redundnacy}
If $\kappa\in {\mathscr R}^\kappa([\kappa]^{<\kappa})^+$ then for all $\mathfrak{b}eta_0,\mathfrak{b}eta_1<\kappa$, assuming the ideals involved are nontrivial, we have
\[{\mathscr R}^\kappa({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa))={\mathscr R}^\kappa({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa)).\]
\end{corollary}
As a direct corollary of Theorem \ref{theorem_culmination} we derive the following, which is the analogue of Theorem \ref{theorem_proper_containments_in_finite_diagram} (1) for the ideals ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ when $\alpha>\omega$.\footnote{Below we will derive the analogue of Theorem \ref{theorem_proper_containments_in_finite_diagram} (2) for $\alpha>\omega$ as a consequence of Theorem \ref{theorem_infinite_ideal_diagram}.}
\mathfrak{b}egin{corollary}
Suppose $\mathfrak{b}eta_0<\mathfrak{b}eta_1$ are in $\{-1\}\cup\kappa$. If $\alpha<\mathop{\rm ot}\nolimits(\mathfrak{b}eta_1\setminus\mathfrak{b}eta_0)\cdot\omega$ and $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))^+$, then ${\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa)){\mathop{\rm sub}}setneq{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))$.
\end{corollary}
Next, we show that for $\omega\leq\alpha<\kappa$ and $\mathfrak{b}eta_0<\mathfrak{b}eta_1<\kappa$, the hypothesis $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))^+$ implies that there are many $\xi<\kappa$ which satisfy $\xi\in{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_0}(\xi))^+$, \emph{assuming $\mathfrak{b}eta_0$ and $\mathfrak{b}eta_1$ are far enough apart}. Thus, the hypotheses of the form $\kappa\notin{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ provide a strictly increasing refinement of Feng's original hierarchy (see \textsc{Figure}\ \ref{figure_a_refinement_of_the_ramsey_hierarchy}).
\mathfrak{b}egin{theorem}\langlebel{theorem_hierarchy_result_for_infinite_alpha}
Suppose $\mathfrak{b}eta_0<\mathfrak{b}eta_1$ are in $\{-1\}\cup\kappa$ and $\alpha<\mathop{\rm ot}\nolimits(\mathfrak{b}eta_1\setminus\mathfrak{b}eta_0)\cdot\omega$. If $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))^+$ then the set
\[\{\xi<\kappa\mid\xi\in{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_0}(\xi))^+\}\]
is in ${\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))^*$.
\end{theorem}
\mathfrak{b}egin{proof}
Suppose $\alpha$ is a successor. That is, $\alpha=\mathfrak{b}ar{\alpha}+m+1$ where $\mathfrak{b}ar{\alpha}$ is a limit ordinal and $m<\omega$. Since $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))^+$ and $\mathfrak{b}eta_0<\mathfrak{b}eta_1$ we have $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa))^+$, which is expressible by a ${\mathbb P}i^1_{\mathfrak{b}eta_0+\mathfrak{b}ar{\alpha}+2(m+1)}$ sentence $\varphi$ by Lemma \ref{lemma_complexity}. Since $\alpha=\mathfrak{b}ar{\alpha}+m+1<\mathop{\rm ot}\nolimits(\mathfrak{b}eta_1\setminus\mathfrak{b}eta_0)\cdot\omega$ it follows that $\mathfrak{b}eta_0+\mathfrak{b}ar{\alpha}+2(m+1)<\mathfrak{b}eta_1+\mathfrak{b}ar{\alpha}+2m+1$.\footnote{This is because $\mathop{\rm ot}\nolimits(\mathfrak{b}eta_1\setminus\mathfrak{b}eta_0)\cdot\omega$ is a limit ordinal, and thus adding any finite number of copies of $\mathop{\rm ot}\nolimits(\mathfrak{b}eta_1\setminus\mathfrak{b}eta_0)$ to $\mathfrak{b}eta_0+\mathfrak{b}ar{\alpha}+2(m+1)$ will produce an ordinal which is less than $\mathfrak{b}eta_1$.} Now by Theorem \ref{theorem_indescribability_in_infinite_ramseyness}, we see that ${\mathbb P}i^1_{\mathfrak{b}eta_1+\mathfrak{b}ar{\alpha}+2m+1}(\kappa){\mathop{\rm sub}}seteq{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))$, and thus, the set
\[C=\{\xi<\kappa\mid (V_\xi,\in)\models\varphi\}=\{\xi<\kappa\mid\xi\in{\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_0}(\kappa))\}\]
is in ${\mathscr R}^\alpha({\mathbb P}i^1_{\mathfrak{b}eta_1}(\kappa))^*$.
The fact that the result holds for successors easily implies that it holds for limits.
\end{proof}
\mathfrak{b}egin{figure}
\centering
\mathfrak{b}egin{tikzpicture}[x=0.75cm,y=0.75cm]
\tiny
\node (elli) at (0,-0.2) {};
\node[draw, rounded corners=0.1cm] (rwpi1-1) at (0,1)
{${\mathbb P}i_\alpha$-Ramsey};
\node (rwpi1w) at (0,2)
{$\alpha$-${\mathbb P}i^1_{\mathfrak{b}eta_0}$-Ramsey};
\node[right] (rwpi1wextra) at (rwpi1w.east) {$\longleftarrow$ choose $\mathfrak{b}eta_0<\kappa$ such that $\alpha<\mathop{\rm ot}\nolimits(\mathfrak{b}eta_0)\cdot\omega$
};
\node (rwpi1w*2) at (0,3)
{$\alpha$-${\mathbb P}i^1_{\mathfrak{b}eta_1}$-Ramsey};
\node[right] (rwpi1w*2extra) at (rwpi1w*2.east) {$\longleftarrow$ choose $\mathfrak{b}eta_1<\kappa$ such that $\alpha<\mathop{\rm ot}\nolimits(\mathfrak{b}eta_1\setminus\mathfrak{b}eta_0)\cdot\omega$};
\node[draw, rounded corners=0.1cm] (rw+1pi1-1) at (0,4.5) {${\mathbb P}i_{\alpha+1}$-Ramsey};
\node (rw+1pi1w) at (0,6) {};
\mathfrak{d}raw[-...] (elli)--(rwpi1-1);
\mathfrak{d}raw (rwpi1-1)--(rwpi1w);
\mathfrak{d}raw (rwpi1w)--(rwpi1w*2);
\mathfrak{d}raw[-...] (rwpi1w*2)--(rw+1pi1-1);
\mathfrak{d}raw[-...] (rw+1pi1-1)--(rw+1pi1w);
\end{tikzpicture}
\caption{\tiny If $\omega\leq\alpha<\kappa$ or if $\alpha<\omega$ is even, by choosing $\mathfrak{b}eta$'s appropriately, the $\alpha$-${\mathbb P}i^1_\mathfrak{b}eta$-cardinals yield a hierarchy of hypotheses strictly between Feng's \cite{MR1077260} ${\mathbb P}i_\alpha$-Ramsey and ${\mathbb P}i_{\alpha+1}$-Ramsey cardinals (see Theorem \ref{theorem_hierarchy_result_for_infinite_alpha}).}\langlebel{figure_a_refinement_of_the_ramsey_hierarchy}
\end{figure}
Next we use Theorem \ref{theorem_indescribability_in_infinite_ramseyness} to show that, if substantial care is taken, Theorem \ref{theorem_finite_ideal_diagram} can, in a sense, be extended to the ideals ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ for $\alpha>\omega$.
\mathfrak{b}egin{theorem}\langlebel{theorem_infinite_ideal_diagram}
Suppose $\kappa$ is a cardinal, $\omega<\alpha<\kappa$ is a successor ordinal and $\mathfrak{b}eta<\kappa$ is an ordinal such that $\kappa\in{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. Let $\mathfrak{d}elta$ be the greatest ordinal such that $\omega^\mathfrak{d}elta\leq\alpha$, let $m,n<\omega$ and $\gamma<\omega^\mathfrak{d}elta$ be the unique ordinals such that $\alpha=\omega^\mathfrak{d}elta m+\gamma+n+1$ where $\gamma$ is a limit ordinal.
\mathfrak{b}egin{enumerate}
\item If $m=1$ and $\gamma=0$ then
\[{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))={\mathscr R}^{\omega^\mathfrak{d}elta+n+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\overline{{\mathscr R}_0({\mathscr R}^{\omega^\mathfrak{d}elta+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathscr R}^n({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta+1}(\kappa))}.\]
\item Otherwise, if $m>1$ or $\gamma>0$, then
\mathfrak{b}egin{align*}{\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))&={\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma+n+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))\\
&=\overline{{\mathscr R}_0({\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathscr R}^{\omega^\mathfrak{d}elta (m-1)+\gamma+n+1}({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta}(\kappa))}.
\end{align*}
\end{enumerate}
\end{theorem}
\mathfrak{b}egin{proof}
We proceed by induction on $\alpha$. The base case is $\alpha=\omega+1$. In this case $m=1$, $\gamma=0$ and $n=0$, so it suffices to show that
\[{\mathscr R}^{\omega+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\overline{{\mathscr R}_0({\mathscr R}^\omega({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathbb P}i^1_{\mathfrak{b}eta+\omega+1}(\kappa)},\]
but this follows directly from Theorem \ref{theorem_indescribability_in_infinite_ramseyness}.
We show that the result holds for $\alpha$ assuming it holds for all smaller successor ordinals. Suppose $\alpha=\omega^\mathfrak{d}elta m+\gamma+n+1$ as in the statement of the theorem.
Let us show that (1) holds. Assume $m=1$ and $\gamma=0$. If $n=0$ then the result follows directly from Theorem \ref{theorem_indescribability_in_infinite_ramseyness}. Suppose $n\geq 1$. Let
\[I=\overline{{\mathscr R}_0({\mathscr R}^{\omega^\mathfrak{d}elta+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathscr R}^n({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta+1}(\kappa))}.\]
To prove that (1) holds we will show that $X\in{\mathscr R}^{\omega^\mathfrak{d}elta+n+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ if and only if $X\in I^+$.
Suppose $X\in{\mathscr R}^{\omega^\mathfrak{d}elta+n+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. By Remark \ref{remark_ideal_generated}, it will suffice to show that $X\in {\mathscr R}_0({\mathscr R}^{\omega^\mathfrak{d}elta+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))^+$ and $X\in {\mathscr R}^n({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta+1}(\kappa))^+$. By assumption, every regressive function $f:[X]^{<\omega}\to \kappa$ has a homogeneous set in ${\mathscr R}^{\omega^\mathfrak{d}elta+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. By our inductive hypothesis we have
\[{\mathscr R}^{\omega^\mathfrak{d}elta+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\overline{{\mathscr R}_0({\mathscr R}^{\omega^\mathfrak{d}elta+n-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathscr R}^{n-1}({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta+1}(\kappa))}.\]
Thus every regressive function $f:[X]^{<\omega}\to \kappa$ has a homogeneous set in ${\mathscr R}^{n-1}({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta+1}(\kappa))^+$, in other words, $X\in{\mathscr R}^n({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta+1}(\kappa))^+$. Now let us show that $X\in{\mathscr R}_0({\mathscr R}^{\omega^\mathfrak{d}elta+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. Fix a regressive function $f:[X]^{<\omega}\to\kappa$ and a club $C{\mathop{\rm sub}}seteq\kappa$. Since $X\in{\mathscr R}^{\omega^\mathfrak{d}elta+n+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$, there is a set $H\in{\mathscr R}^{\omega^\mathfrak{d}elta+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ homogeneous for $f$. The fact that $H\in{\mathscr R}^{\omega^\mathfrak{d}elta+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ is expressible over $(V_\kappa,\in,H)$ by a ${\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta+2n}$ sentence $\varphi$. Since $X\cap C\in{\mathscr R}^{\omega^\mathfrak{d}elta+n+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ and, by Theorem \ref{theorem_indescribability_in_infinite_ramseyness}, ${\mathscr R}^{\omega^\mathfrak{d}elta+n+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+{\mathop{\rm sub}}seteq{\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta+2n+1}(\kappa)^+$, it follows that there is a $\xi\in X\cap C$ such that $H\cap \xi\in{\mathscr R}^{\omega^\mathfrak{d}elta+n}({\mathbb P}i^1_\mathfrak{b}eta(\xi))^+$. Hence $X\in{\mathscr R}_0({\mathscr R}^{\omega^\mathfrak{d}elta+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))^+$.
Conversely, suppose $X\in I^+$. Let $f:[X]^{<\omega}\to \kappa$ be a regressive function. For the sake of contradiction, let us assume that every homogeneous set for $f$ is in ${\mathscr R}^{\omega^\mathfrak{d}elta+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$. By Lemma \ref{lemma_complexity}, this is expressible over $(V_\kappa,\in,X,f)$ by a ${\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta+2n+1}$ sentence $\varphi$. Hence the set
\[C=\{\xi<\kappa\mid (f\upharpoonright\xi=f\cap V_\xi) \langlend (V_\xi,\in,X\cap V_\xi, f\cap V_\xi) \models\varphi\}\]
is in ${\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta+2n+1}(\kappa)^*$. By Corollary \ref{corollary_indescribability_in_finite_ramseyness}, we have ${\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta+2n+1}(\kappa)^*{\mathop{\rm sub}}seteq{\mathscr R}^n({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta+1}(\kappa))^*$, and so $C\in{\mathscr R}^n({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta+1}(\kappa))^*$. Since $X\in I^+$ it follows that $X$ is not the union of a set in ${\mathscr R}_0({\mathscr R}^{\omega^\mathfrak{d}elta+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ and a set in ${\mathscr R}^n({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta+1}(\kappa))$. Furthermore, since $X=(X\cap C)\cup(X\setminus C)$ and $X\setminus C\in{\mathscr R}^n({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta+1}(\kappa))$, it follows that $X\cap C\in{\mathscr R}_0({\mathscr R}^{\omega^\mathfrak{d}elta+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. This implies that there is a $\xi\in X\cap C$ for which there is a set $H{\mathop{\rm sub}}seteq X\cap C\cap\xi$ in ${\mathscr R}^{\omega^\mathfrak{d}elta+n}({\mathbb P}i^1_\mathfrak{b}eta(\xi))^+$ homogeneous for $f$. This contradicts the fact that $\xi\in C$. This establishes that (1) holds.
To show that (2) holds, suppose $m>1$ or $\gamma>0$. Let
\[I=\overline{{\mathscr R}_0({\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathscr R}^{\omega^\mathfrak{d}elta (m-1)+\gamma+n+1}({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta}(\kappa))}.\]
We will prove that $X\in{\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma+n+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ if and only if $X\in I^+$.
Suppose $X\in{\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma+n+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. This implies that every regressive function $f:[X]^{<\omega}\to \kappa$ has a homogeneous set in ${\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. We will show that
\mathfrak{b}egin{align}
{\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+{\mathop{\rm sub}}seteq{\mathscr R}^{\omega^\mathfrak{d}elta (m-1)+\gamma+n}({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta}(\kappa))^+.\tag{$*$}\langlebel{equation_complicated_containment}
\end{align}
If $n\geq 1$ then by applying our inductive hypothesis to the successor ordinal $\alpha€™'=\omega^\mathfrak{d}elta m+\gamma+n<\alpha$, we obtain
\[{\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\overline{{\mathscr R}_0({\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma+n-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup {\mathscr R}^{\omega^\mathfrak{d}elta(m-1)+\gamma+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))}\]
and thus (\ref{equation_complicated_containment}) holds. If $n=0$, to prove (\ref{equation_complicated_containment}) we must show that
\[{\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))\supseteq{\mathscr R}^{\omega^\mathfrak{d}elta(m-1)+\gamma}({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta}(\kappa)).\]
Choose $Z\in{\mathscr R}^{\omega^\mathfrak{d}elta (m-1)+\gamma}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$. Then there is a successor ordinal $\eta+k+1<\gamma$, where $\eta$ is a limit ordinal and $k<\omega$, such that $Z\in{\mathscr R}^{\omega^\mathfrak{d}elta(m-1)+\eta+k+1}({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta}(\kappa))$. By our inductive hypothesis applied to the successor ordinal $\alpha'=\omega^\mathfrak{d}elta m+\eta+k+1<\alpha$, we have
\[{\mathscr R}^{\omega^\mathfrak{d}elta m+\eta+k+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\overline{{\mathscr R}_0({\mathscr R}^{\omega^\mathfrak{d}elta m+\eta+k}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathscr R}^{\omega^\mathfrak{d}elta(m-1)+\eta+k+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))}\]
and thus $Z\in{\mathscr R}^{\omega^\mathfrak{d}elta (m-1)+\eta+k+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)){\mathop{\rm sub}}seteq{\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$. This establishes (\ref{equation_complicated_containment}), which implies that every regressive function $f:[X]^{<\omega}\to \kappa$ has a homogeneous set in ${\mathscr R}^{\omega^\mathfrak{d}elta(m-1)+\gamma+n}({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta}(\kappa))^+$, and hence $X\in{\mathscr R}^{\omega^\mathfrak{d}elta(m-1)+\gamma+n+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$.
Next, let us show that $X\in{\mathscr R}_0({\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))^+$. Fix a regressive function $f:[X]^{<\omega}\to \kappa$ and a club $C{\mathop{\rm sub}}seteq\kappa$. Since $X\in{\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma+n+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$, there is a set $H\in{\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ homogeneous for $f$. By Lemma \ref{lemma_complexity}, the fact that $H\in{\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ can be expressed over $(V_\kappa,\in,X,f,H)$ by a ${\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta m+\gamma+2n+1}$ sentence $\varphi$. Since $X\cap C\in{\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma+n+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ and, by Theorem \ref{theorem_indescribability_in_infinite_ramseyness},
\[{\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma+n+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+{\mathop{\rm sub}}seteq{\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta m +\gamma+2n+1}(\kappa)^+,\] it follows that there is a $\xi\in X\cap C$ such that $H\cap \xi\in{\mathscr R}^{\omega^\mathfrak{d}elta m +\gamma+n}({\mathbb P}i^1_\mathfrak{b}eta(\xi))^+$. This implies that $X\in{\mathscr R}_0({\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))^+$. By Remark \ref{remark_ideal_generated}, this suffices to show that $X\in I^+$.
Conversely, suppose $X\in I^+$. Fix a regressive function $f:[X]^{<\omega}\to \kappa$. For the sake of contradiction, suppose every homogeneous set for $f$ is in ${\mathscr R}^{\omega^\mathfrak{d}elta m +\gamma+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$. This can be expressed over $(V_\kappa,\in,X,f)$ by a ${\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta m+\gamma+2n+1}$ sentence $\varphi$. Hence, the set
\[C=\{\xi<\kappa\mid (f\upharpoonright\xi=f\cap V_\xi)\langlend(V_\xi,\in,X\cap V_\xi,f\cap V_\xi)\models\varphi\}\]
is in ${\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta m+\gamma+2n+1}(\kappa)^*$. Since, by Theorem \ref{theorem_indescribability_in_infinite_ramseyness}, it follows that ${\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta m +\gamma+2n+1}(\kappa){\mathop{\rm sub}}seteq{\mathscr R}^{\omega^\mathfrak{d}elta(m-1)+\gamma+n+1}({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta}(\kappa))$, it follows that $C\in{\mathscr R}^{\omega^\mathfrak{d}elta(m-1)+\gamma+n+1}({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta}(\kappa))^*$. Since $X=(X\cap C)\cup(X\setminus C)$ is not the union of a set in ${\mathscr R}_0({\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))$ and a set in ${\mathscr R}^{\omega^\mathfrak{d}elta(m-1)+\gamma+n+1}({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta}(\kappa))$, it follows that $X\cap C\in{\mathscr R}_0({\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma+n}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))^+$. This implies that there is a $\xi\in X\cap C$ for which there is a set $H{\mathop{\rm sub}}seteq X\cap C\cap \xi$ in ${\mathscr R}^{\omega^\mathfrak{d}elta m +\gamma+n}({\mathbb P}i^1_\mathfrak{b}eta(\xi))^+$ homogeneous for $f$. This contradicts $\xi\in C$. This establishes (2).
\end{proof}
An argument similar to that of Theorem \ref{theorem_proper_containments_in_finite_diagram} can be used to show that the ideal containments suggested by the statement of Theorem \ref{theorem_infinite_ideal_diagram} are proper.
\mathfrak{b}egin{theorem}
Under the hypotheses of Theorem \ref{theorem_infinite_ideal_diagram}, the following hold.
\mathfrak{b}egin{enumerate}
\item If $m=1$ and $\gamma=0$ then
\[{\mathscr R}^n({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta+1}(\kappa)){\mathop{\rm sub}}setneq{\mathscr R}^{\omega^\mathfrak{d}elta+n+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)).\]
\item If $m>0$ or $\gamma>0$ then
\[{\mathscr R}^{\omega^\mathfrak{d}elta(m-1)+\gamma+n+1}({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta}(\kappa)){\mathop{\rm sub}}setneq{\mathscr R}^{\omega^\mathfrak{d}elta m+\gamma+n+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)).\]
\end{enumerate}
\end{theorem}
\mathfrak{b}egin{proof}
Since the containment follow easily from Theorem \ref{theorem_infinite_ideal_diagram}, it remains to show the properness of the containments.
For (1), let $S=\{\xi<\kappa\mid\xi\in {\mathscr R}^n({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta+1}(\kappa))\}$. By Lemma \ref{lemma_set_of_nons_is_positive}, $S\in{\mathscr R}^n({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta+1}(\kappa))^+$. By Corollary \ref{corollary_ramseyness_reflects_indescribability}, it follows that $\kappa\setminus S\in {\mathscr R}^{n+1}([\kappa]^{<\kappa})^*{\mathop{\rm sub}}seteq {\mathscr R}^{\omega^\mathfrak{d}elta+n+1}([\kappa]^{<\kappa})^*{\mathop{\rm sub}}seteq {\mathscr R}^{\omega^\mathfrak{d}elta+n+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^*$. Thus $S\in{\mathscr R}^{\omega^\mathfrak{d}elta+n+1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))\setminus {\mathscr R}^n({\mathbb P}i^1_{\mathfrak{b}eta+\omega^\mathfrak{d}elta+1}(\kappa))$.
The argument for (2) is similar.
\end{proof}
\section{Generic embeddings}\langlebel{section_generic_embeddings}
By considering properties of generic ultrapowers obtained by forcing with large cardinal ideals, we obtain characterizations of such ideals in terms of generic elementary embeddings. In what follows we obtain generic embedding characterizations of ${\mathbb P}i^1_\mathfrak{b}eta$-indescribable as well as Ramsey subsets of cardinals. It should not be hard to find \emph{small embedding} \cite{MR3913154} characterizations of ${\mathbb P}i^1_\mathfrak{b}eta$-indescribable sets which resemble our generic embeddings. However, it is not clear whether the characterization of Ramsey sets in Theorem \ref{theorem_generic_embedding_characterization_of_ramseyness} below can be rephrased in terms of small embeddings. Thus, the following question of \cite{MR3913154} remains open: can one characterize Ramsey cardinals using small embeddings?
Before providing a motivating example, let us recall a few basic facts about generic ultrapowers. If $\kappa$ is a regular uncountable cardinal and $I$ is an ideal on $\kappa$ and $S\in I^+$ then $I\upharpoonright S=\{X{\mathop{\rm sub}}seteq\kappa\mid X\cap S\in I\}$ is an ideal on $\kappa$ extending $I$ and notice that $S\in (I\upharpoonright S)^*$. We write $P(\kappa)/I$ to denote the usual atomless\footnote{We write $P(\kappa)/I$ when we really mean $P(\kappa)/I-\{[\varnothing]\}$.} boolean algebra obtained from $I$. If $G$ is $(V,P(\kappa)/I)$-generic then we let $U_G$ be the canonical $V$-ultrafilter obtained from $G$ extending the dual filter $I^*$. The appropriate version of {\L}os's Theorem can be easily verified, and thus we obtain a canonical generic elementary embedding $j:V\to V^\kappa/U_G$ in $V[G]$ where $j(x)=[\alpha\mapsto x]_U$. If $I$ is a normal ideal then the generic ultrafilter $U_G$ is $V$-normal and the critical point of the corresponding, possibly illfounded, generic ultrapower $j:V\to V^\kappa/U_G{\mathop{\rm sub}}seteq V[G]$ is $\kappa$. When $I$ is a normal ideal, the corresponding generic ultrapower embedding $j$ is wellfounded on the ordinals up to $\kappa^+$. See \cite[Lemma 22.14]{MR1940513} or \cite[Section 2]{MR2768692} for more details.
\mathfrak{b}egin{definition}
When we say \emph{there is a generic elementary embedding $j:V\to M{\mathop{\rm sub}}seteq V[G]$} we mean that there is some forcing poset ${\mathbb P}$ such that whenever $G$ is $(V,{\mathbb P})$-generic then, in $V[G]$, there are definable classes $M$, $E$ and $j$ such that $j:(V,\in)\to (M,E){\mathop{\rm sub}}seteq V[G]$ is an elementary embedding, where $(M,E)$ is possibly not wellfounded.
\end{definition}
The following proposition is an easy application of generic ultrapowers obtained by forcing with $P(\kappa)/{\mathop{\rm NS}}_\kappa$.
\mathfrak{b}egin{proposition}[Folklore]\langlebel{proposition_stationarity_characterizations}
Suppose $\kappa>\omega$ is a regular cardinal. The following are equivalent.
\mathfrak{b}egin{enumerate}
\item $S{\mathop{\rm sub}}seteq\kappa$ is stationary.
\item There is a generic elementary embedding $j:V\to M{\mathop{\rm sub}}seteq V[G]$ with critical point $\kappa$ such that $\kappa \in j(S)$.
\end{enumerate}
\end{proposition}
It is natural to wonder: to what extent can Proposition \ref{proposition_stationarity_characterizations} be generalized from the nonstationary ideal to other natural ideals, such as ideals associated to certain large cardinals?
\mathfrak{b}egin{proposition}\langlebel{proposition_generic_embedding_indescribability}
Suppose $\kappa$ is a cardinal, $\mathfrak{b}eta<\kappa$ is an ordinal and $S{\mathop{\rm sub}}seteq\kappa$. The following are equivalent.
\mathfrak{b}egin{enumerate}
\item $S$ is ${\mathbb P}i^1_\mathfrak{b}eta$-indescribable.
\item There is a generic elementary embedding $j:V\to M{\mathop{\rm sub}}seteq V[G]$ with critical point $\kappa$ such that $\kappa\in j(S)$ and for all $A\in V_{\kappa+1}^V$ and all ${\mathbb P}i^1_\mathfrak{b}eta$ sentences $\varphi$ we have
\[((V_\kappa,\in,A)\models\varphi)^V\implies ((V_\kappa,\in,A)\models\varphi)^M.\]
\end{enumerate}
\end{proposition}
\mathfrak{b}egin{proof}
Suppose $S$ is ${\mathbb P}i^1_\mathfrak{b}eta$-indescribable. Let $G{\mathop{\rm sub}}seteq P(\kappa)/({\mathbb P}i^1_\mathfrak{b}eta(\kappa)\upharpoonright S)$ be generic over $V$ and let $j:V\to M:=V^\kappa/G$ be the corresponding generic ultrapower embedding. Since $S\in G$ we have $\kappa\in j(S)$. Fix $A\in V_{\kappa+1}^V$ and fix a ${\mathbb P}i^1_\mathfrak{b}eta$ sentence $\varphi$ such that $((V_\kappa,\in,A)\models\varphi)^V$. Since the set
\[C:=\{\xi<\kappa\mid(V_\xi,\in,A\cap V_\xi)\models\varphi\}\]
is in the filter ${\mathbb P}i^1_\mathfrak{b}eta(\kappa)^*$, it follows that $S\cap C\in ({\mathbb P}i^1_\mathfrak{b}eta(\kappa)\upharpoonright S)^*{\mathop{\rm sub}}seteq G$, and thus $\kappa\in j(C)$. This implies $((V_\kappa,\in,A)\models\varphi)^M$.
Conversely, suppose $j:V\to M$ is a generic elementary embedding satisfying $(2)$. Let us show that $S$ is ${\mathbb P}i^1_\mathfrak{b}eta$-indescribable. Fix an $A\in V_{\kappa+1}^V$ and a ${\mathbb P}i^1_\mathfrak{b}eta$ sentence $\varphi$ such that $((V_\kappa,\in,A)\models\varphi)^V$. By elementarity and by (2), there is some $\xi\in S$ such that $((V_\xi,\in,A\cap V_\xi)\models\varphi)^V$, thus $S$ is ${\mathbb P}i^1_\mathfrak{b}eta$-indescribable.
\end{proof}
Let us show that the ideals ${\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ can be characterized in terms of generic elementary embeddings. Taking $m=1$ and $\mathfrak{b}eta=-1$ in the following theorem yields a characterization of the Ramsey ideal and of Ramsey cardinals.
\mathfrak{b}egin{theorem}\langlebel{theorem_generic_embedding_characterization_of_ramseyness}
Suppose $\kappa$ is a cardinal, $1\leq m<\omega$, $\mathfrak{b}eta\in\{-1\}\cup\kappa$ and $S{\mathop{\rm sub}}seteq\kappa$. The following are equivalent.
\mathfrak{b}egin{enumerate}
\item $S\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$
\item There is a generic elementary embedding $j:V\to M$ with critical point $\kappa$ such that $\kappa\in j(S)$ and the following properties hold.
\mathfrak{b}egin{enumerate}
\item For all $A\in V_{\kappa+1}^V$ and all ${\mathbb P}i^1_{\mathfrak{b}eta+2m}$ sentences $\varphi$ we have
\[((V_\kappa,\in,A)\models\varphi)^V\implies ((V_\kappa,\in,A)\models\varphi)^M.\]
\item For all regressive functions $f:[S]^{<\omega}\to \kappa$ in $V$ we have
\[M\models(\exists H\in {\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+)(\text{$H$ is homogeneous for $f$}).\]
\end{enumerate}
\end{enumerate}
\end{theorem}
\mathfrak{b}egin{proof}
Suppose $S\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$. Let $G{\mathop{\rm sub}}seteq P(\kappa)/({\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))\upharpoonright S)$ be generic over $V$ and let $j:V\to M:=V^\kappa/G$ be the corresponding generic ultrapower. Since $S\in G$ we have $\kappa\in j(S)$. By Corollary \ref{corollary_indescribability_in_finite_ramseyness}, we have
\[{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))=\overline{{\mathscr R}_0({\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa)))\cup{\mathbb P}i^1_{\mathfrak{b}eta+2m}(\kappa)}.\]
Since ${\mathbb P}i^1_{\mathfrak{b}eta+2m}(\kappa)^*{\mathop{\rm sub}}seteq{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^*{\mathop{\rm sub}}seteq G$ it follows, by an argument similar to that in the proof of Proposition \ref{proposition_generic_embedding_indescribability}, that (a) holds. Fix a regressive function $f:[S]^{<\omega}\to \kappa$ in $V$. Since $S\in{\mathscr R}^m({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ there is a set $H\in P(S)\cap{\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+\cap V$ which is homogeneous for $f$. Clearly $H=j(H)\cap \kappa$ and $f=j(f)\cap (\kappa\times\kappa)$ are in $M$, and $M$ thinks that $H$ is homogeneous for $f$. By Lemma \ref{lemma_complexity} the fact that $H\in{\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$ is expressible by a ${\mathbb P}i^1_{\mathfrak{b}eta+2m}$ sentence over $V_\kappa$, and thus by (a) we see that $M\models$ ``$H\in{\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))^+$''.
Conversely, suppose (2) holds. Fix a regressive function $f:[S]^{<\omega}\to \kappa$ in $V$. For the sake of contradiction suppose that in $V$, every subset of $S$ which is homogeneous for $f$ is in the ideal ${\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$. By Lemma \ref{lemma_complexity}, this can be expressed by a ${\mathbb P}i^1_{\mathfrak{b}eta+2m}$ sentence over $V_\kappa$, thus by (2)(a), $M$ thinks that every homogeneous set for $f$ is in the ideal ${\mathscr R}^{m-1}({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$. This contradicts (2)(b).
\end{proof}
Using Lemma \ref{lemma_complexity} and Theorem \ref{theorem_indescribability_in_infinite_ramseyness}, an argument similar to that of Theorem \ref{theorem_generic_embedding_characterization_of_ramseyness} gives a generic embedding characterization of certain ideals of the form ${\mathscr R}^\alpha({\mathbb P}i^1_\mathfrak{b}eta(\kappa))$ for $\alpha>\omega$.
\mathfrak{b}egin{thebibliography}{AHKZ77}
\mathfrak{b}ibitem[AHKZ77]{MR0460120}
F.~G. Abramson, L.~A. Harrington, E.~M. Kleinberg, and W.~S. Zwicker.
\newblock Flipping properties: a unifying thread in the theory of large
cardinals.
\newblock {\em Ann. Math. Logic}, 12(1):25--58, 1977.
\mathfrak{b}ibitem[Bag19]{MR3894041}
Joan Bagaria.
\newblock Derived topologies on ordinals and stationary reflection.
\newblock {\em Trans. Amer. Math. Soc.}, 371(3):1981--2002, 2019.
\mathfrak{b}ibitem[Bau75]{MR0384553}
J.~E. Baumgartner.
\newblock Ineffability properties of cardinals. {I}.
\newblock pages 109--130. Colloq. Math. Soc. J\'anos Bolyai, Vol. 10, 1975.
\mathfrak{b}ibitem[Bau77]{MR0540770}
James~E. Baumgartner.
\newblock Ineffability properties of cardinals. {II}.
\newblock In {\em Logic, foundations of mathematics and computability theory
({P}roc. {F}ifth {I}nternat. {C}ongr. {L}ogic, {M}ethodology and {P}hilos. of
{S}ci., {U}niv. {W}estern {O}ntario, {L}ondon, {O}nt., 1975), {P}art {I}},
pages 87--106. Univ. Western Ontario Ser. Philos. Sci., Vol. 9. Reidel,
Dordrecht, 1977.
\mathfrak{b}ibitem[Bri]{Brickhill:Thesis}
Hazel Brickhill.
\newblock {\em {G}eneralising the {N}otions of {C}losed {U}nbounded and
{S}tationary {S}et}.
\newblock Thesis (Ph.D.)--University of Bristol.
\mathfrak{b}ibitem[BW]{BrickhillWelch}
Hazel Brickhill and Philip~D. Welch.
\newblock Generalisations of stationarity, closed and unboundedness and of
{J}ensen's {${\mathbb B}ox$}.
\newblock (\emph{preprint}).
\mathfrak{b}ibitem[CG15]{MR3348040}
Brent Cody and Victoria Gitman.
\newblock Easton's theorem for {R}amsey and strongly {R}amsey cardinals.
\newblock {\em Ann. Pure Appl. Logic}, 166(9):934--952, 2015.
\mathfrak{b}ibitem[CGH]{carmody_gitman_habic}
Erin Carmody, Victoria Gitman, and Miha Habi\v{c}.
\newblock A {M}itchell-like order for {R}amsey and {R}amsey-like cardinals.
\newblock (\emph{to appear in Fundamenta Mathematicae}).
\mathfrak{b}ibitem[EH58]{MR95124}
P.~Erd{\H o}s and A.~Hajnal.
\newblock On the structure of set-mappings.
\newblock {\em Acta Math. Acad. Sci. Hungar.}, 9:111--131, 1958.
\mathfrak{b}ibitem[ER52]{MR0065615}
P.~Erd{\H o}s and R.~Rado.
\newblock Combinatorial theorems on classifications of subsets of a given set.
\newblock {\em Proc. London Math. Soc. (3)}, 2:417--439, 1952.
\mathfrak{b}ibitem[ER56]{MR81864}
P.~Erd{\H o}s and R.~Rado.
\newblock A partition calculus in set theory.
\newblock {\em Bull. Amer. Math. Soc.}, 62:427--489, 1956.
\mathfrak{b}ibitem[ET43]{MR0008249}
P.~Erd{\H o}s and A.~Tarski.
\newblock On families of mutually exclusive sets.
\newblock {\em Ann. of Math. (2)}, 44:315--329, 1943.
\mathfrak{b}ibitem[Fen90]{MR1077260}
Qi~Feng.
\newblock A hierarchy of {R}amsey cardinals.
\newblock {\em Ann. Pure Appl. Logic}, 49(3):257--277, 1990.
\mathfrak{b}ibitem[For10]{MR2768692}
Matthew Foreman.
\newblock Ideals and generic elementary embeddings.
\newblock In {\em Handbook of set theory. {V}ols. 1, 2, 3}, pages 885--1147.
Springer, Dordrecht, 2010.
\mathfrak{b}ibitem[Git07]{MR2710923}
Victoria Gitman.
\newblock {\em Applications of the proper forcing axiom to models of {P}eano
arithmetic}.
\newblock ProQuest LLC, Ann Arbor, MI, 2007.
\newblock Thesis (Ph.D.)--City University of New York.
\mathfrak{b}ibitem[Git11]{MR2830415}
Victoria Gitman.
\newblock Ramsey-like cardinals.
\newblock {\em J. Symbolic Logic}, 76(2):519--540, 2011.
\mathfrak{b}ibitem[GW11]{MR2830435}
Victoria Gitman and P.~D. Welch.
\newblock Ramsey-like cardinals {II}.
\newblock {\em J. Symbolic Logic}, 76(2):541--560, 2011.
\mathfrak{b}ibitem[HL]{Holy-Lucke}
Peter Holy and Philipp L\"ucke.
\newblock Small models, large cardinals, and induced ideals.
\newblock (\emph{preprint}).
\mathfrak{b}ibitem[HLN19]{MR3913154}
Peter Holy, Philipp L\"{u}cke, and Ana Njegomir.
\newblock Small embedding characterizations for large cardinals.
\newblock {\em Ann. Pure Appl. Logic}, 170(2):251--271, 2019.
\mathfrak{b}ibitem[HS18]{MR3800756}
Peter Holy and Philipp Schlicht.
\newblock A hierarchy of {R}amsey-like cardinals.
\newblock {\em Fund. Math.}, 242(1):49--74, 2018.
\mathfrak{b}ibitem[Jec03]{MR1940513}
Thomas Jech.
\newblock {\em Set theory}.
\newblock Springer Monographs in Mathematics. Springer-Verlag, Berlin, 2003.
\newblock The third millennium edition, revised and expanded.
\mathfrak{b}ibitem[Kan03]{MR1994835}
Akihiro Kanamori.
\newblock {\em The higher infinite}.
\newblock Springer Monographs in Mathematics. Springer-Verlag, Berlin, second
edition, 2003.
\newblock Large cardinals in set theory from their beginnings.
\mathfrak{b}ibitem[Mit79]{MR534574}
William Mitchell.
\newblock Ramsey cardinals and constructibility.
\newblock {\em J. Symbolic Logic}, 44(2):260--266, 1979.
\mathfrak{b}ibitem[Ram29]{MR1576401}
F.~P. Ramsey.
\newblock On a {P}roblem of {F}ormal {L}ogic.
\newblock {\em Proc. London Math. Soc. (2)}, 30(4):264--286, 1929.
\mathfrak{b}ibitem[SNW19]{MR3922802}
Dan Saattrup~Nielsen and Philip Welch.
\newblock Games and {R}amsey-like cardinals.
\newblock {\em J. Symb. Log.}, 84(1):408--437, 2019.
\mathfrak{b}ibitem[SW11]{MR2817562}
I.~Sharpe and P.~D. Welch.
\newblock Greatly {E}rd\"{o}s cardinals with some generalizations to the
{C}hang and {R}amsey properties.
\newblock {\em Ann. Pure Appl. Logic}, 162(11):863--902, 2011.
\mathfrak{b}ibitem[Wei12]{MR2959668}
Christoph Wei\ss.
\newblock The combinatorial essence of supercompactness.
\newblock {\em Ann. Pure Appl. Logic}, 163(11):1710--1717, 2012.
\end{thebibliography}
\end{document}
|
\begin{document}
\title{A construction of distance cospectral graphs}
\author{Kristin Heysse\thanks{Dept.\ of Mathematics, Iowa State University, Ames, IA 50011, USA\newline ({\tt [email protected]})}}
\maketitle
\begin{abstract}
The distance matrix of a connected graph is the symmetric matrix with columns and rows indexed by the vertices and entries that are the pairwise distances between the corresponding vertices. We give a construction for graphs which differ in their edge counts yet are cospectral with respect to the distance matrix. Further, we identify a subgraph switching behavior which constructs additional distance cospectral graphs. The proofs for both constructions rely on a perturbation of (most of) the distance eigenvectors of one graph to yield the distance eigenvectors of the other.
\end{abstract}
\section{Introduction}\label{sec:introduction}
Spectral graph theory explores the relationship between a graph and the eigenvalues (i.e., spectrum) of a matrix associated with that graph. There are a handful of common ways to associate a matrix to a graph, and the spectrum of each matrix holds a variety of information about the graph (see \cite{BH}). However, each matrix also has limitations in what information its spectrum can contain. This is seen in the existence of \emph{cospectral graphs}, or graphs that are fundamentally different yet yield the same spectrum for a particular matrix.
By exploring cospectral graphs, we further our understanding of the limitations of each type of matrix. One of the most well-known constructions of cospectral graphs for the adjacency matrix is Godsil-McKay switching. This is done by defining specific subsets of the vertices of a particular graph and constructing a cospectral mate by exchanging edges and non-edges between these subsets. Godsil and McKay \cite{GM} prove the adjacency matrices of two graphs related by this edge switching are similar, and therefore the graphs are cospectral.
In this paper, we consider cospectral graphs for the \emph{distance matrix}. The distance matrix $D^{(G)}=\left[d_{ij}^{(G)}\right]$ of a connected graph $G=(V(G),E(G))$ is a symmetric matrix such that $d_{ij}^{(G)}$ is the distance, or length of the shortest path, between vertices $i$ and $j$. Its multiset of eigenvalues is the \emph{distance spectrum} of $G$ and two graphs are considered to be distance cospectral if their distance spectra are the same. There has been extensive work done on the distance spectra of graphs (see \cite{survey} for a survey of recent results).
However, relatively little is known in regard to distance cospectral pairs. McKay \cite{MK} gives a construction for distance cospectral trees by considering any rooted tree and identifying the root with the root of one of two particular trees. Further, he proves the complement graphs of trees constructed in this fashion are also distance cospectral. Both proofs rely on manipulation of the distance characteristic polynomial. This is the only known distance cospectral graph construction in the literature, and we note that pairs constructed in this manner must contain the same number of edges. In particular, prior to this paper it was not known whether a family could be constructed where distance cospectral pairs could have differing numbers of edges.
In this paper, we give a construction for distance cospectral graphs with differing numbers of edges in Section~\ref{sec:diff}, and in Section~\ref{sec:switch} we describe a local edge switching behavior which produces more distance cospectral graphs. While these distance switching pairs do not differ in number of edges, they do account for all distance cospectral pairs on seven vertices (see Figure~\ref{sevens}). Finally, in Section~\ref{sec:conc}, we consider further questions of interest for the distance matrix. We complete the introduction with an elementary discussion of graph identification, a process which will be used in subsequent sections.
\subsection{Graph Identification}
\label{ssec:GI}
Throughout our constructions, we will frequently make use of \emph{graph identification}, therefore we define it here and state some observations about distances between vertices in graphs formed in this way. Let $G,K$ be graphs and let $u \in V(G)$, $v \in V(K)$. We construct the graph $GK(u,v)$ by identifying the vertices $u$ and $v$ into a new vertex $uv$ in the graph $G\cup K$. When clear context allows, we will denote this graph $GK$.
Consider calculating the distance between two vertices $x,y$ of $GK$. We can easily do this by considering if $x$ and $y$ are in the $G$ portion of $GK$ or the $K$ portion of $GK$.
\begin{itemize}
\item If $x,y$ are both in the $G$ portion, $d^{(GK)}_{xy}=d^{(G)}_{xy}$.
\item If $x,y$ are both in the $K$ portion, $d^{(GK)}_{xy}=d^{(K)}_{xy}$.
\item If $x$ is in the $G$ portion and $y$ is in the $K$ portion, $d^{(GK)}_{xy}=d^{(G)}_{xu}+d^{(K)}_{vy}$.
\end{itemize}
These claims can be verified by noticing that a shortest path between vertices in the same portion will be fully contained in that portion. Further, if two vertices are not in the same portion, any path between them must include the vertex $uv$.
\section{Distance cospectral graphs with differing numbers of edges}
\label{sec:diff}
Consider the two graphs $G$ and $H$ are shown below, each with vertices labeled zero through nine.
\begin{figure}
\caption{Graphs $G$ (left) and $H$ (right).}
\label{GH}
\end{figure}
We immediately note $G$ has 17 edges and $H$ has 16 edges. For future reference, we give the distance matrices of both graphs below.
\[D^{(G)} =\left(\begin{array}{rrrrrrrrrr}
0 & 1 & 1 & 1 & 2 & 1 & 2 & 2 & 2 &
2 \\
1 & 0 & 2 & 2 & 3 & 1 & 3 & 1 & 2 &3 \\
1 & 2 & 0 & 1 & 1 & 2 & 1 & 3 & 3 &1 \\
1 & 2 & 1 & 0 & 1 & 1 & 1 & 2 & 2 &2 \\
2 & 3 & 1 & 1 & 0 & 2 & 2 & 3 & 3 &1 \\
1 & 1 & 2 & 1 & 2 & 0 & 2 & 1 & 1 &3 \\
2 & 3 & 1 & 1 & 2 & 2 & 0 & 3 & 3 &1 \\
2 & 1 & 3 & 2 & 3 & 1 & 3 & 0 & 2 &4 \\
2 & 2 & 3 & 2 & 3 & 1 & 3 & 2 & 0 &4 \\
2 & 3 & 1 & 2 & 1 & 3 & 1 & 4 & 4 &0
\end{array}\right) D^{(H)}=\left(\begin{array}{rrrrrrrrrr}
0 & 1 & 1 & 1 & 2 & 1 & 2 & 2 & 2 &2 \\
1 & 0 & 2 & 2 & 3 & 1 & 3 & 1 & 2 &3 \\
1 & 2 & 0 & 1 & 1 & 1 & 1 & 3 & 2 &1 \\
1 & 2 & 1 & 0 & 2 & 2 & 2 & 2 & 1 &2 \\
2 & 3 & 1 & 2 & 0 & 2 & 2 & 4 & 3 &1 \\
1 & 1 & 1 & 2 & 2 & 0 & 2 & 2 & 1 &2 \\
2 & 3 & 1 & 2 & 2 & 2 & 0 & 4 & 3 &1 \\
2 & 1 & 3 & 2 & 4 & 2 & 4 & 0 & 1 &4 \\
2 & 2 & 2 & 1 & 3 & 1 & 3 & 1 & 0 &3 \\
2 & 3 & 1 & 2 & 1 & 2 & 1 & 4 & 3 &0
\end{array}\right)\]
\begin{theorem} For any graph $K$ and any vertex $v \in V(K)$, and for $u \in \{0,1\}$, the graphs $GK(u,v)$ and $HK(u,v)$ are distance cospectral.
\label{thrm:tens}
\end{theorem}
\begin{proof}
When identifying the graph $K$ onto $G$, we will enforce that the vertices will be labeled as follows. The vertex $v$ will have the same label as $u$, and the remaining vertices will be labeled with the set $\{10,11,12,\ldots,n\}$. We similarly label $HK$. Let $D^{(GK)}$ be the distance matrix of $GK$ and similarly $D^{(HK)}$ for $HK$. The proof given will handle the case where $u=0$. The case where $u=1$ is done similarly.
Let $(\lambda, x)$ be an eigenpair for $D^{(GK)}$ where $\lambda \neq -\frac{1}{2}$. We claim the vector $y:=x+\Delta$ is an eigenvector of $D^{(HK)}$ for eigenvalue $\lambda$, where
\[ \Delta_i = \left\{ \begin{array}{c l} 0& i \in \{0,1,10,11,12,\ldots, n\} \\ \alpha & i \in \{2,9\} \\ -\alpha & i \in \{4,6\} \\ \beta & i \in \{3,7\} \\ -\alpha-\beta & i=5 \\ \alpha-\beta & i = 8 \end{array} \right. \]
where
\[\alpha = \frac{-x_3-x_5-x_7-x_8}{2\lambda+1} \: \text{ and } \: \beta=\frac{\lambda+1}{2\lambda+1}\left(x_5+x_8\right)-\frac{\lambda}{2\lambda+1}\left(x_3+x_7\right).\]
To prove this, we will consider $(D^{(HK)}y)_i$ for all $i$. By inspection of the two matrices, $d_{ij}^{(H)}=d_{ij}^{(G)}$ for all $j \in \{0,1,2,\ldots 9\}$ for $i \in \{0,1\}$. A straightforward algebraic substitution and simplification proves $(D^{(HK)}y)_i=\lambda y_i$ for $i \in \{0,1\}$.
Consider $i \in \{10,11,\ldots,n\}$. We will fully elaborate the steps taken in the following work, as similar processes will be repeated frequently.
\[ (D^{(HK)}y)_i = \sum_{j=0}^n d_{ij}^{(HK)} y_j = \sum_{j=0}^9 d_{ij}^{(HK)} (x_j+\Delta_j) +\sum_{j=10}^n d_{ij}^{(HK)} (x_j+\Delta_j) \]
We immediately break the summation into the first ten vertices and the rest, as we will need to treat each group separately. We also substitute the definition of $y$. Next, we use the observations from Section~\ref{ssec:GI} to break the distances in $HK$ to distances in $H$ and $K$, recalling that $v$ is the vertex in the graph $K$ we identify with $0$ in $H$ to create $HK$.
\begin{align*}
&= \sum_{j=0}^9 (d_{iv}^{(K)}+d_{0j}^{(H)}) (x_j+\Delta_j) +\sum_{j=10}^n d_{ij}^{(K)} (x_j+\Delta_j) \\
&= \sum_{j=0}^9 (d_{iv}^{(K)}+d_{0j}^{(G)})(x_j+\Delta_j) +\sum_{j=10}^n d_{ij}^{(K)} (x_j+\Delta_j)
\end{align*}
We can substitute $d_{0j}^{(G)}$ for $d_{0j}^{(H)}$ by inspection of the first rows of the matrices $D^{(G)}$ and $D^{(H)}$. We continue by regrouping terms and recombining sums of distances in $G$ and $K$ to be distances in $GK$, again by the observations from Section~\ref{ssec:GI}.
\begin{align*}
&=\sum_{j=0}^9 (d_{iv}^{(K)}+d_{0j}^{(G)})x_j+\sum_{j=10}^n d_{ij}^{(K)} x_j+\sum_{j=0}^9 (d_{iv}^{(K)}+d_{0j}^{(G)})\Delta_j \\
&=\sum_{j=0}^n d_{ij}^{(GK)} x_j+\sum_{j=0}^9 (d_{iv}^{(K)}+d_{0j}^{(G)})\Delta_j \\
&= \lambda x_i+\alpha(3d_{iv}^{(K)}-3d_{iv}^{(K)}+d_{02}^{(G)}-d_{04}^{(G)}-d_{05}^{(G)}-d_{06}^{(G)}+d_{08}^{(G)}+d_{09}^{(G)})
\\ & \hspace{10 pt} +\beta(2d_{iv}^{(K)}-2d_{iv}^{(K)}+d_{03}^{(G)}-d_{05}^{(G)}+d_{07}^{(G)}-d_{08}^{(G)}) \\
&=\lambda x_i = \lambda y_i.
\end{align*}
The last few steps result from the fact that $(\lambda,x)$ is an eigenpair for $D^{(GK)}$ and by direct computation and substitution. This proves $(D^{(HK)}y)_i=\lambda y_i$ for $i \in \{10,11,\ldots, n\}$.
We now consider the vertices $\{2,3,\ldots,8\}$ by showing the case where $i=2$ and considering how the work generalizes. For this case, notice that $d_{2j}^{(H)}=d_{2j}^{(G)}$ for $j\not \in \{5,8\}$ and $d_{2j}^{(H)}=d_{2j}^{(G)}-1$ for $j \in \{5,8\}$.
\begin{align*}
(D^{(HK)}y)_2 &= \sum_{j=0}^n d_{2j}^{(HK)} y_j \\
&= \sum_{j=0}^9 d_{2j}^{(HK)} (x_j+\Delta_j) +\sum_{j=10}^n d_{2j}^{(HK)} (x_j+\Delta_j) \\
&= \sum_{j=0}^9 d_{2j}^{(H)} (x_j+\Delta_j) +\sum_{j=10}^n (d_{20}^{(H)}+ d_{vj}^{(K)}) (x_j+\Delta_j) \\
&= \sum_{\substack{j=0 \\ j\neq 5,8}}^9 d_{2j}^{(G)} (x_j+\Delta_j)+(d_{25}^{(G)}-1)(x_5+\Delta_5) \\
&\hspace{20pt}+(d_{28}^{(G)}-1)(x_8+\Delta_8) +\sum_{j=10}^n (d_{20}^{(G)}+d_{vj}^{(K)}) (x_j+\Delta_j) \\
&= \sum_{j=0}^9 d_{2j}^{(G)} x_j+\sum_{j=10}^n (d_{20}^{(G)}+d_{vj}^{(K)}) x_j+\sum_{j=0}^9 d^{(G)}_{2j}\Delta_j -x_5-x_8-\Delta_5-\Delta_8 \\
&= \sum_{j=0}^n d_{2j}^{(GK)} x_j+\sum_{j=0}^9 d^{(G)}_{2j}\Delta_j -x_5-x_8-\Delta_5-\Delta_8 \\
&= \lambda x_2 +\alpha(d_{22}^{(G)}-d_{24}^{(G)}-d_{25}^{(G)}-d_{26}^{(G)}+d_{28}^{(G)}+d_{29}^{(G)})
\\ & \hspace{20 pt} +\beta(d_{23}^{(G)}-d_{25}^{(G)}+d_{27}^{(G)}-d_{28}^{(G)})-x_5-x_8-\Delta_5-\Delta_8 \\
&= \lambda x_2-\beta-x_5-x_8-(-\alpha-\beta)-(\alpha-\beta) \\
&= \lambda (x_2+\alpha)-\lambda\alpha+\beta-x_5-x_8 \\
&= \lambda y_2-\lambda\alpha+\beta-x_5-x_8 \\
\end{align*}
Let $c_2$ be the ``remainder'' terms, specifically $c_2:=-\lambda\alpha+\beta-x_5-x_8$. To finish the claim that $(D^{(HK)}y)_2=\lambda y_2$, it would suffice to show $c_2=0$:
\begin{align*}-\lambda \alpha +\beta-x_5-x_8 &= -\lambda\left(\frac{-x_3-x_5-x_7-x_8}{2\lambda+1}\right)-x_5-x_8\\
&\hspace{20pt} +\left(\frac{\lambda+1}{2\lambda+1}\left(x_5+x_8\right)-\frac{\lambda}{2\lambda+1}\left(x_3+x_7\right)\right) \\
&= (x_3+x_7)\left(\frac{\lambda}{2\lambda+1}-\frac{\lambda}{2\lambda+1}\right)+(x_5+x_8)\left(\frac{\lambda}{2\lambda+1}-1+\frac{\lambda+1}{2\lambda+1}\right) \\ &=0.
\end{align*}
Therefore, by the definition of $\alpha$ and $\beta$, the claim holds for $i=2$. Repeating this process, we calculate the remainder terms $c_i$ for $i \in \{3,4,\ldots,8\}$ (meaning $(D^{(HK)}y)_i=\lambda y_i+c_i$ for all $i$) in a similar fashion. These are listed below.
\begin{align*}
c_2 &= -\lambda\alpha+\beta-x_5-x_8 \\
c_3 &= -\lambda\beta -2\alpha-\beta+x_4+x_5+x_6-x_8 \\
c_4 &= \lambda\alpha + \alpha+\beta +x_3+x_7 \\
c_5 &= \lambda\alpha+\lambda\beta +3\beta-x_2+x_3+x_7-x_9 \\
c_6 &= \lambda\alpha + \alpha+\beta +x_3+x_7 \\
c_7 &= -\lambda\beta -2\alpha-\beta+x_4+x_5+x_6-x_8 \\
c_8 &= -\lambda \alpha+\lambda\beta -2\alpha+\beta-x_2-x_3-x_7-x_9 \\
c_9 &= -\lambda\alpha+\beta-x_5-x_8
\end{align*}
Similarly to the case where $i=2$, our goal is to show that all remaining $c_i$ are equal to zero. Substitution of $\alpha$ and $\beta$ suffices for $c_4$.
To prove $c_3$ and $c_5$, we consider combinations of particular rows of $D^{(GK)}$. We claim the following three equations hold:
\begin{equation}
2x_3+x_4+2x_5+x_6+2x_7=\lambda\left(x_2-x_4-x_5-x_6+x_8+x_9\right),
\label{rowsum1}
\end{equation}
\begin{equation}
x_2-x_3-3x_5-x_7-3x_8+x_9 = \lambda\left(-x_2-x_3+x_4+2x_5+x_6-x_7-x_9\right),
\label{rowsum2}
\end{equation}
and
\begin{equation}
x_2+x_3+x_4-x_5+x_6+x_7-3x_8+x_9 = \lambda\left(-x_3-x_7+x_5+x_8\right).
\label{rowsum3}
\end{equation}
We will only prove~\eqref{rowsum1}, as this proof can be generalized into proofs for~\eqref{rowsum2} and \eqref{rowsum3}. Let $D_i^{(GK)}$ denote the $i$th row of the matrix $D^{(GK)}$. Further, let $e_i$ be the $i$th standard row vector. Consider the following sum and difference of rows of $D^{(GK)}$
\[m:=D^{(GK)}_2-D^{(GK)}_4-D^{(GK)}_5-D^{(GK)}_6+D^{(GK)}_8+D^{(GK)}_9.\]
By definition, the $i$th entry of $m$ is
\[m_i=d^{(GK)}_{i2}-d^{(GK)}_{i4}-d^{(GK)}_{i5}-d^{(GK)}_{i6}+d^{(GK)}_{i8}+d^{(GK)}_{i9}.\]
If $i \in \{0,1,\ldots,9\}$, $d_{ij}^{(GK)}=d_{ij}^{(G)}$ for $j \in \{0,1,\ldots,9\}$, therefore the first 10 entries of $m$ can be computed directly from $D^{(G)}$. Consider $i \in \{10,11,\ldots, n\}$. Recall $d_{ij}^{(GK)}=d_{iv}^{(K)}+d_{0j}^{(G)}$ for $j \in \{0,1,\ldots,9\}$. In this case, the $i$th entry of $m$ is
\begin{align*}
m_i &=3d_{iv}^{(K)}-3d_{iv}^{(K)}+d_{02}^{(G)}-d^{(G)}_{04}-d^{(G)}_{05}-d^{(G)}_{06}+d^{(G)}_{08}+d^{(G)}_{09} \\
&= d_{02}^{(G)}-d^{(G)}_{04}-d^{(G)}_{05}-d^{(G)}_{06}+d^{(G)}_{08}+d^{(G)}_{09} \\
&=0
\end{align*}
therefore we can write the following equation
\[D^{(GK)}_2-D^{(GK)}_4-D^{(GK)}_5-D^{(GK)}_6+D^{(GK)}_8+D^{(GK)}_9=2e_3+e_4+2e_5+e_6+2e_7.\]
Because $x$ is an eigenvector of $D^{(GK)}$, $D_i^{(GK)}x=(D^{(GK)}x)_i=\lambda x_i$ for all $i$. By multiplying by $x$ on both sides of the equation above on the right, we see
\begin{align*}
\left(D^{(GK)}_2-D^{(GK)}_4-D^{(GK)}_5-D^{(GK)}_6+D^{(GK)}_8+D^{(GK)}_9\right)x&=\left(2e_3+e_4+2e_5+e_6+2e_7\right)x \\
\lambda(x_2-x_4-x_5-x_6+x_8+x_9)&=2x_3+2x_5+2x_7+x_4+x_6
\end{align*}
which is~\eqref{rowsum1}. Equations~\eqref{rowsum2} and~\eqref{rowsum3} follow similarly by considering appropriate row combinations.
With these three equations, we can prove $c_3$ and $c_5$ are zero. We begin the work for $c_3$ by substituting the definitions of $\alpha$ and $\beta$:
\begin{align*}
c_3 &= -\lambda\beta -2\alpha-\beta+x_4+x_5+x_6-x_8 \\
&=(-\lambda-1)\left(\frac{\lambda+1}{2\lambda+1}\left(x_5+x_8\right)-\frac{\lambda}{2\lambda+1}\left(x_3+x_7\right)\right) \\& \hspace{20 pt} -2\left(\frac{-x_3-x_5-x_7-x_8}{2\lambda+1} \right)+x_4+x_5+x_6-x_8 \\
&= \frac{\lambda^{2} {\left(x_{3} - x_{5} + x_{7} - x_{8}\right)} + \lambda{\left(x_{3} + 2 x_{4} + 2x_{6} + x_{7} - 4x_{8}\right)}}{2 \lambda + 1} \\& \hspace{20 pt} + \frac{ 2 x_{3} + x_{4} + 2 x_{5} + x_{6} + 2 x_{7}}{2\lambda+1}
\end{align*}
Consider the last term above. The numerator is the left hand side of~\eqref{rowsum1}, and we can substitute the right hand side.
\begin{align*}
&= \frac{\lambda^{2} {\left(x_{3} - x_{5} + x_{7} - x_{8}\right)} + \lambda{\left(x_{3} + 2 x_{4} + 2x_{6} + x_{7} - 4x_{8}\right)}}{2 \lambda + 1} \\& \hspace{20 pt} + \frac{\lambda\left(x_2-x_4-x_5-x_6+x_8+x_9\right)}{2\lambda+1} \\
&= \frac{\lambda^{2} {\left(x_{3} - x_{5} + x_{7} - x_{8}\right)} + \lambda{\left(x_2+x_{3} + x_{4}-x_5 + x_{6} + x_{7} - 3x_{8}+x_9\right)}}{2 \lambda + 1}
\end{align*}
Here we see the linear combination of terms that is multiplied by $\lambda$ is the left hand side of~\eqref{rowsum3}. Similarly to before, we will substitute the right hand side and cancel.
\begin{align*}
&= \frac{\lambda^{2} {\left(x_{3} - x_{5} + x_{7} - x_{8}\right)} + \lambda\left({\lambda\left(-x_3-x_7+x_5+x_8\right)}\right)}{2 \lambda + 1} =0
\end{align*}
This proves $c_3=0$. A similar substitution of~\eqref{rowsum2} and \eqref{rowsum3} yield $c_5=0$. Finally, notice $c_8=c_5-2c_6$, and thus $c_8$ is also zero. This validates the claim that $y$ is an eigenvector of $D^{(HK)}$.
We note the mapping of eigenpairs of $D^{(GK)}$ where $\lambda\neq -\frac{1}{2}$ to those of $D^{(HK)}$ where $\lambda\neq -\frac{1}{2}$ is injective. Suppose $(\lambda,x),(\lambda,x')$ are eigenpairs of $D^{(GK)}$ such that $y=y'$, or equivalently $x+\Delta=x'+\Delta'$. We will show $\Delta=\Delta'$ by showing $\alpha=\alpha'$ and $\beta=\beta'$.
\begin{align*}
y_3+y_5+y_7+y_8 &= y'_3+y'_5+y'_7+y'_8 \\
x_3+x_5+x_7+x_8+2\alpha-2\alpha+2\beta-2\beta &= x'_3+x'_5+x'_7+x'_8+2\alpha'-2\alpha'+2\beta'-2\beta' \\
x_3+x_5+x_7+x_8 &= x'_3+x'_5+x'_7+x'_8 \\
\frac{x_3+x_5+x_7+x_8}{2\lambda+1} &= \frac{x'_3+x'_5+x'_7+x'_8}{2\lambda+1} \\
-\alpha &=-\alpha' \\
\alpha &= \alpha'
\end{align*}
To prove $\beta=\beta'$, we recall that
\[-c_2=\lambda\alpha-\beta+x_5+x_8=0\]
therefore, since $\lambda \alpha= \lambda\alpha'$,
\begin{align*}
\lambda\alpha-\beta+x_5+x_8 &=\lambda\alpha'-\beta'+x_5'+x_8' \\
-\beta+x_5+x_8 &=-\beta'+x_5'+x_8' \\
-\beta+x_5-\alpha-\beta+x_8+\alpha-\beta+2\beta &=-\beta'+x_5'-\alpha'-\beta'+x_8'+\alpha'-\beta'+2\beta' \\
y_5+y_8+\beta &=y_5'+y_8'+\beta' \\
\beta &= \beta'.
\end{align*}
Therefore the mapping is injective as claimed. Further, the mapping is also surjective, as we could have started with the graph $HK$ and performed the perturbation in reverse to get eigenpairs of $D^{(GK)}$.
What remains to be considered are eigenpairs where $\lambda=-\frac{1}{2}$, if any exist. However, since the map is bijective where defined, the dimensions of the eigenspaces for all eigenvalues not equal to $-\frac{1}{2}$ must be the same for both $D^{(GK)}$ and $D^{(HK)}$. Because the sum of the dimensions of all eigenspaces must be $n$, the multiplicity of $-\frac{1}{2}$ as an eigenvalue must be the same for both $D^{(GK)}$ and $D^{(HK)}$. Therefore the dimensions of all eigenspaces are the same, and the graphs $GK$ and $HK$ are distance cospectral as claimed.
\end{proof}
We note that the theorem yields a construction for large distance cospectral families with a variety of edge counts. Consider identifying $k$ copies of $G$ at a single vertex, namely vertex $0$ of each copy. By repeated applications of the theorem, we can exchange out copies of $G$ with copies of $H$ one at a time. Doing this, we construct $k+1$ graphs which are mutually distance cospectral and with edge counts $\{16k,16k+1,\ldots,17k\}$.
\section{Distance switching}
\label{sec:switch}
The proof in Section~\ref{sec:diff} relied on a perturbation of the distance eigenvectors of one graph to yield the distance eigenvectors of another. In this section, we explore a similar technique when considering pairs of distance cospectral graphs related by restricted edge switching. Suppose a graph $G$ has the following two properties. First, $G$ has one of the graphs in Figure~\ref{can} as an induced subgraph.
\begin{figure}
\caption{Subgraph switching candidates}
\label{can}
\end{figure}
Second, we can partition the vertices in $V(G) \setminus \{g_1,g_2,h_1,h_2\}$ into two sets, $A$ and $B$, such that for all $v \in A$
\[ d_{vg_1}^{(G)}+d_{vg_2}^{(G)}-d_{vh_1}^{(G)}-d_{vh_2}^{(G)}=-2,\]
and for all vertices $v \in B$
\[ d_{vg_1}^{(G)}+d_{vg_2}^{(G)}-d_{vh_1}^{(G)}-d_{vh_2}^{(G)}=0.\]
We construct a new graph $H$ as follows. Let $V(H)=V(G)$, and
\[E(H)=E(G)\setminus \{(s,g_1),(s,g_2)\} \cup \{(s,h_1),(s,h_2)\}.\]
We note this switching is somewhat similar to Godsil-McKay switching. Godsil and McKay's construction for local switching requires a switching set $D$ and a partition of the remaining vertices into sets $\{C_i\}$ where for every vertex in $v \in D$ and every set $C_i$, $v$ is either adjacent to all, none, or exactly half of the vertices in $C_i$. The switching is done by exchanging edges for non-edges between $D$ and the sets $C_i$ where the vertices in $D$ are adjacent to half of the vertices in $C_i$. See Section 2.1 of~\cite{GM} for a full explanation of the construction, including further requirements on the sets $C_i$ not stated here. If we consider the switching set $D$ to be the singleton $s$ and one of the $C_i$ of the partition to be $\{g_1,g_2,h_1,h_2\}$, the construction of $H$ can be likened to Godsil and McKay's construction.
Because $V(H)=V(G)$ and because we will be referencing distances between vertices in both $G$ and $H$, we will frequently reference the vertex set as simply $V$.
\begin{theorem} If for all $v \in B$, $d_{vu}^{(H)}=d_{vu}^{(G)}$ for all $u \in V$ and if for all $w \in A$, $d_{wu}^{(H)} = d_{wu}^{(G)}$ for all $u \in V\setminus \{g_1,g_2,h_1,h_2\}$ and
\[d_{wg_i}^{(H)}=d_{wg_i}^{(G)}+ 1 \: \text{ and } \: d_{wh_i}^{(H)}=d_{wh_i}^{(G)}- 1 \]
for $i \in \{1,2\}$, then $G$ and $H$ are distance cospectral.
\label{thrm:switching}
\end{theorem}
\begin{proof}
We first define a function $c$ on the vertices to be
\[c(v)= d_{vg_1}^{(G)}+d_{vg_2}^{(G)}-d_{vh_1}^{(G)}-d_{vh_2}^{(G)}.\]
By our assumptions on $G$ and direct computation, we can establish that
\[c(v) =\left\{ \begin{array}{r l} -2 & v \in A \\ 0 & v \in B \\ -k & v \in \{g_1,g_2\} \\ k & v \in \{h_1,h_2\} \end{array} \right. \]
where $k=1$ for the subgraph on the left in Figure~\ref{can}, $k=2$ for the subgraph in the middle of Figure~\ref{can}, and $k=0$ for the subgraph on the right of Figure~\ref{can}.
Suppose $(\lambda,x)$ is an eigenpair for the matrix $D^{(G)}$ for $\lambda\neq -k$. We claim $y:=x+\Delta$ is an eigenvector of $D^{(H)}$ for eigenvalue $\lambda$, where
\[ \Delta_i=\left\{
\begingroup \renewcommand*{\arraystretch}{1.5}
\begin{array}{c l} 0& i \not \in \{g_1,g_2,h_1,h_2\} \\ \frac{\sum_{j \in A}x_j}{\lambda+k}& i \in \{g_1,g_2\} \\ -\frac{\sum_{j \in A}x_j}{\lambda+k}& i \in \{h_1,h_2\}. \end{array}
\endgroup \right. \]
To prove $y$ is indeed an eigenvector of $D^{(H)}$, we will show $(D^{(H)}y)_i=\lambda y_i$ for each vertex $i$. First suppose $i \in B$. We immediately note that $d_{iv}^{(G)}=d_{iv}^{(H)}$ for all $v \in V$ by the hypotheses of the theorem. Further, recall $c(i)=0$ for all $i \in B$. We therefore have
\begin{align*}
(D^{(H)} y)_i & = \sum_{j \in V} d_{ij}^{(H)} y_j \\
&= \sum_{j \in V} d_{ij}^{(G)} (x_j+\Delta_j) \\
&= \sum_{j \in V} d_{ij}^{(G)}x_j + \frac{\sum_{j \in A}x_j}{\lambda+k}\left(d_{ig_1}^{(G)}+d_{ig_2}^{(G)}-d_{ih_1}^{(G)}-d_{ih_2}^{(G)}\right) \\
&= \sum_{j \in V} d_{ij}^{(G)}x_j + \frac{\sum_{j \in A}x_j}{\lambda+k}c(i) \\
&= \sum_{j \in V} d_{ij}^{(G)}x_j \\
& =\lambda x_i =\lambda y_i.
\end{align*}
Now suppose $i \in \{g_1,g_2\}$. We know that for all vertices $v \in B$, $d_{iv}^{(H)}=d_{iv}^{(G)}$. Further, for all vertices $u \in A$, $d_{ui}^{(H)}=d_{ui}^{(G)}+ 1$ and $c(i)=-k$ for $i \in \{g_1,g_2\}$. Combining these facts, we see that
\begin{align*}
(D^{(H)} y )_i & = \sum_{j \in V} d_{ij}^{(H)} y_j \\
&= \sum_{j \in A} (d_{ij}^{(G)}+1) (x_j+\Delta_j) +\sum_{j \in B} d_{ij}^{(G)} (x_j+\Delta_j) + \sum_{j \in \{g_1,g_2,h_1,h_2\} }d_{ij}^{(G)} (x_j+\Delta_j) \\
&= \sum_{j \in V}d^{(G)}_{ij}x_j +\sum_{j \in A} x_j+\sum_{j \in \{g_1,g_2,h_1,h_2\}} d^{(G)}_{ij}\Delta_j \\
& =\sum_{j \in V} d_{ij}^{(G)}x_j +\sum_{j \in A}x_j + \frac{\sum_{j \in A}x_j}{\lambda+k}\left(d_{ig_1}^{(G)}+d_{ig_2}^{(G)}-d_{ih_1}^{(G)}-d_{ih_2}^{(G)}\right) \\
& =\sum_{j \in V} d_{ij}^{(G)}x_j + \sum_{j \in A}x_j + \frac{\sum_{j \in A}x_j}{\lambda+k}c(i) \\
&=\lambda x_i + \sum_{j \in A}x_j -k \frac{\sum_{j \in A}x_j}{\lambda+k} \\
& =\lambda x_i + \frac{\lambda \sum_{j \in A}x_j}{\lambda+k} \\
&= \lambda \left(x_i+\frac{\sum_{j \in A}x_j}{\lambda+k}\right) = \lambda y_i. \\
\end{align*}
A similar algebraic computation suffices for the case $i \in \{h_1,h_2\}$.
What remains to be checked are the vertices $i \in A$. We know $d_{iu}^{(H)} = d_{iu}^{(G)}$ for all $u \in V\setminus \{g_1,g_2,h_1,h_2\}$. Further, $d_{ig_\ell}^{(H)}=d_{ig_\ell}^{(G)}+ 1$ and $d_{ih_\ell}^{(H)}=d_{ih_\ell}^{(G)}- 1$ for $\ell \in \{1,2\}$. Finally, recall that $c(i)=-2$ for all $i \in A$. Therefore
\begin{align*}
(D^{(H)} y)_i & = \sum_{j \in V} d_{ij}^{(H)} y_j \\
&= \sum_{\substack{j \in V \\ j \not\in \{g_1,g_2,h_1,h_2\}}} d_{ij}^{(G)} (x_j+\Delta_j) +\sum_{j \in \{g_1,g_2\}}(d_{ij}^{(G)}+1)(x_j+\Delta_j)+\sum_{j \in \{h_1,h_2\}}(d_{ij}^{(G)}-1)(x_j+\Delta_j) \\
&= \sum_{j \in V} d_{ij}^{(G)} x_j + x_{g_1}+x_{g_2}-x_{h_1}-x_{h_2} \\
& \hspace{1 in}+\frac{\sum_{j \in A}x_j}{\lambda+k}\left( d_{ig_1}^{(G)}+1+d_{ig_2}^{(G)}+1-(d_{ih_1}^{(G)}-1)-(d_{ih_2}^{(G)}-1)\right) \\
&= \sum_{j \in V} d_{ij}^{(G)} x_j + x_{g_1}+x_{g_2}-x_{h_1}-x_{h_2} +\frac{\sum_{j \in A}x_j}{\lambda+k}\left( d_{ig_1}^{(G)}+d_{ig_2}^{(G)}-d_{ih_1}^{(G)}-d_{ih_2}^{(G)}+4\right) \\
&= \sum_{j \in V} d_{ij}^{(G)} x_j + x_{g_1}+x_{g_2}-x_{h_1}-x_{h_2} +\frac{\sum_{j \in A}x_j}{\lambda+k}\left( c(i)+4\right) \\
&= \sum_{j \in V} d_{ij}^{(G)} x_j + x_{g_1}+x_{g_2}-x_{h_1}-x_{h_2}+\frac{2\sum_{j \in A}x_j}{\lambda+k}.
\end{align*}
We pause here to prove the following equality:
\[ x_{g_1}+x_{g_2}-x_{h_1}-x_{h_2}+\frac{2\sum_{j \in A}x_j}{\lambda+k} =0.\]
To do this, let $D_i^{(G)}$ denote the $i$th row of the matrix $D^{(G)}$ and $e_{i}$ denote the $i$th standard row vector. We claim
\[D_{g_1}^{(G)}+D_{g_2}^{(G)}-D_{h_1}^{(G)}-D_{h_2}^{(G)} = -2\sum_{j \in A} e_{j}-ke_{g_1}-ke_{g_2}+ke_{h_1}+ke_{h_2}.\]
Suppose $m :=D_{g_1}^{(G)}+D_{g_2}^{(G)}-D_{h_1}^{(G)}-D_{h_2}^{(G)}$, and consider the $j$th entry of $m$:
\[m_j = d_{jg_1}^{(G)}+d_{jg_2}^{(G)}-d_{jh_1}^{(G)}+d_{jh_2}^{(G)}.\]
This by definition is $c(j)$, and the claim follows.
With this in mind, we multiply both sides by $x$ on the right. Because $x$ is an eigenvector, we know $D_{j}^{(G)}x=(D^{(G)}x)_j=\lambda x_{j}$ for all $j$. Therefore we have
\begin{align*}
\left(D_{g_1}^{(G)}+D_{g_2}^{(G)}-D_{h_1}^{(G)}-D_{h_2}^{(G)} \right)x &= \left(-2\sum_{j \in A}x_j-ke_{g_1}-ke_{g_2}+ke_{h_1}+ke_{h_2}\right)x \\
\lambda x_{g_1}+\lambda x_{g_2}-\lambda x_{h_1}-\lambda x_{h_2} &=-2 \sum_{j \in A}x_j-kx_{g_1}-kx_{g_2}+kx_{h_1}+kx_{h_2} \\
(\lambda+k)(x_{g_1}+x_{g_2}-x_{h_1}-x_{h_2})&=-2\sum_{j \in A}x_j \\
x_{g_1}+x_{g_2}-x_{h_1}-x_{h_2} &= \frac{-2\sum_{j \in A}x_j}{\lambda+k}
\end{align*}
which proves the equality. Returning to our case,
\begin{align*}
(D^{(H)} y)_i & = \sum_{j \in V} d_{sj}^{(G)} x_j + x_{g_1}+x_{g_2}-x_{h_1}-x_{h_2}+\frac{2\sum_{j \in A}x_j}{\lambda+k} \\ &=\lambda x_i = \lambda y_i
\end{align*}
which finishes the case for $i \in A$. Thus the vector $y$ is an eigenvector as claimed.
We note that this mapping of eigenpairs of $D^{(G)}$ with $\lambda\neq -k$ to eigenpairs of $D^{(H)}$ for $\lambda \neq -k$ is bijective. Suppose there are two distinct eigenvectors $x$ and $x'$ for $D^{(G)}$ with the same eigenvalue $\lambda \neq -k$ that map to the same eigenvector $y$ for $D^{(H)}$. Then $y_i=x_i=x_i'$ for all $i \not \in \{g_1,g_2,h_1,h_2\}$. If $i \in \{g_1,g_2\}$, then for all $j \in A$,
\begin{align*}
x_i+\frac{\sum_{j \in A}x_j}{\lambda+k} = y_i &= x'_i+\frac{\sum_{j \in A}x'_j}{\lambda+k} \\
x_i+\frac{\sum_{j \in A}x_j}{\lambda+k} = & \: x'_i+\frac{\sum_{j \in A}x_j}{\lambda+k} \\
x_i = &\: x'_i
\end{align*}
and similarly if $i \in \{h_1,h_2\}$. This implies $x=x'$, and the mapping is injective. Certainly the map is also surjective because we could have perturbed instead the eigenvectors of $H$. Because of this, we notice that the dimensions of all eigenspaces are the same for $D^{(G)}$ and $D^{(H)}$ for $\lambda\neq -k$.
What remains to be considered are eigenpairs $(\lambda,x)$ for $\lambda=-k$, if any exist. However, since the mapping is bijective where defined, the dimensions of the eigenspaces for all eigenvalues not equal to $-k$ are the same for both $D^{(G)}$ and $D^{(H)}$. Because the sum of all eigenspaces must be the order of $G$, the multiplicity of $-k$ must be the same for both graphs. Thus the graphs $G$ and $H$ are distance cospectral as claimed.
\end{proof}
We note that while this edge switching behavior may seem restrictive, it does explain all pairs of distance cospectral graphs on seven vertices, checked by exhaustive search. Figure~\ref{sevens} shows these graphs, arranged in the table in order of the induced subgraph contained from Figure~\ref{can}. We point out that the vertex $s$ is shown at the bottom of every embedding.
\begin{figure}
\caption{All distance cospectral pairs on seven vertices.}
\label{sevens}
\end{figure}
Further, once we have found a pair $G,H$ that follows this switching behavior, we claim that we can construct infinitely many more pairs using graph identification.
\begin{corollary} Let $G, H$ be a distance cospectral pair of graphs given by Theorem~\ref{thrm:switching}, and let $u \in V(G)\setminus \{g_1,g_2,h_1,h_2\}$. For any graph $K$ and any vertex $v \in V(K)$, the graphs $GK(u,v), HK(u,v)$ are distance cospectral.
\end{corollary}
\begin{proof}
We first require some notation. Let $A_G$ and $B_G$ be the partition of $V(G)\setminus \{g_1,g_2,h_1,h_2\}$ given by the construction preceding Theorem~\ref{thrm:switching}.
We need only to show that this new pair of graphs satisfies the original switching construction and the hypotheses of Theorem~\ref{thrm:switching}. Certainly $GK$ contains one of the induced subgraphs in Figure~\ref{can} because $G$ does.
To construct the partition $A,B$ of $GK$, we will extend the partition $A_G,B_G$ in a predicable way. Notice first that for any vertex $x$ in the $G$ portion of $GK$ and for any vertex $w \in \{g_1,g_2,h_1,h_2\}$,
\[d_{xw}^{(GK)}=d_{xw}^{(G)}\]
by the construction of the graph. Therefore, if $x \in A_G$, it follows that $x \in A$ for $GK$, and similarly for $x \in B_G$.
We now aim to partition the vertices in the $K$ portion of $GK$. Suppose $w$ is such a vertex. For any vertex $x$ in the $G$ portion of $GK$, by Section~\ref{ssec:GI}, we know
\[d_{wx}^{(GK)} = d_{wv}^{(K)}+d_{ux}^{(G)}.\]
This implies
\begin{align*}
d_{wg_1}^{(GK)}+d_{wg_2}^{(GK)}-d_{wh_1}^{(GK)}-d_{wh_2}^{(GK)} &=d_{wv}^{(K)}+d_{ug_1}^{(G)}+d_{wv}^{(K)}+d_{ug_2}^{(G)} -d_{wv}^{(K)}-d_{uh_1}^{(G)}-d_{wv}^{(K)}-d_{uh_2}^{(G)} \\ &= d_{ug_1}^{(G)}+d_{ug_2}^{(G)}-d_{uh_1}^{(G)}-d_{uh_2}^{(G)}
\end{align*}
which is either $-2$ or $0$, depending on if $u$ is in $A_G$ or in $B_G$. Thus the necessary partition holds for all vertices in $GK$. Specifically, if $u \in A_G$, all vertices in the $K$ portion of $GK$ are in $A$. Similarly, if $u \in B_G$, all vertices in the $K$ portion of $GK$ are in $B$.
Certainly the graph $HK$ is the graph which is formed by the switching construction on $GK$. We now must prove $HK$ meets the hypotheses of the theorem. We start by showing that for any two vertices $x,y$ such that neither is in $\{g_1,g_2,h_1,h_2\}$, $d_{xy}^{(GK)}=d_{xy}^{(HK)}$. Suppose $x,y$ are two such vertices. First we consider if both are in the $K$ portion of $HK$. Then, by the construction of the graph \[d_{xy}^{(GK)}=d_{xy}^{(K)}=d_{xy}^{(HK)}.\]
If both are in the $H$ portion of $HK$, then because $H$ met the hypotheses of the theorem applied to the pair $G,H$, we know $d_{xy}^{(GK)}=d_{xy}^{(HK)}$. Now consider if $x\in H$ and $y \in K$. We again use the fact that any path between vertices in $H$ and $K$ must pass through the identified vertex, and we can write
\[d_{xy}^{(HK)}=d_{xu}^{(H)}+d_{vy}^{(K)}\] and we have two instances of the previous cases, where both vertices are in $H$ and $K$.
We now need to consider the distances between $\{g_1,g_2,h_1,h_2\}$ and the remaining vertices in the graph. Suppose $w \in A$. If $w$ is in the $H$ portion of $HK$, then because $H$ meets the conditions of Theorem~\ref{thrm:switching},
\[d_{wg_i}^{(HK)}=d_{wg_i}^{(GK)}+ 1 \: \text{ and } \: d_{wh_i}^{(HK)}=d_{wh_i}^{(GK)}- 1 \]
for $i \in \{1,2\}$.
If $w$ is in the $K$ portion of $HK$, then we notice that by the extension of $A_G$ and $B_G$ into $A$ and $B$, we know $u \in A_G$.
This means
\[d_{ug_i}^{(H)}=d_{ug_i}^{(G)}+ 1 \: \text{ and } \: d_{uh_i}^{(H)}=d_{uh_i}^{(G)}- 1 \]
for $i \in \{1,2\}$.
We can therefore write
\[ d_{wg_i}^{(HK)}=d_{wv}^{(K)}+d_{ug_i}^{(H)}=d_{wv}^{(K)}+d_{ug_i}^{(G)}+ 1=d_{wg_i}^{(GK)}+1\]
and
\[ d_{wh_i}^{(HK)}=d_{wv}^{(K)}+d_{uh_i}^{(H)}=d_{wv}^{(K)}+d_{ug_i}^{(G)}- 1=d_{wg_i}^{(GK)}-1\]
for $i \in \{1,2\}$.
If $w \in B$, we follow a parallel argument and use the fact that $u$ must be in $B_G$. Therefore $GK,HK$ meet the conditions of Theorem~\ref{thrm:switching}, and $GK$ and $HK$ are distance cospectral.
\end{proof}
\section{Conclusion}
\label{sec:conc}
We have established two constructions for distance cospectral pairs (and indeed, large distance cospectral families), including one where graphs have differing numbers of edges. It is interesting to note that distance cospectral graphs with differing numbers of edges are rare. Other than the graphs show in Section~\ref{sec:diff}, there are only two distance cospectral pairs on ten vertices or fewer. These are shown in Figure~\ref{fig:others}.
\begin{figure}
\caption{Distance cospectral graph pairs with differing numbers of edges.}
\label{fig:others}
\end{figure}
This emphasis on the edge count fits in a larger question of what the spectrum of any matrix can tell about the graph's structure. For well studied matrices, the questions of whether cospectral pairs exist with differing number of components or whether pairs exist where one graph is bipartite and one is not have been answered. Only one of these questions is relevant for the distance matrix, since the distance matrix is not defined for disconnected graphs. It would be interesting to know if distance cospectral pairs exist where one graph is bipartite and the other is not; no such pair exists on ten vertices or fewer. We hope to see exploration of this problem and more work for distance cospectral constructions in the future.
\end{document}
|
\begin{document}
\def\spacingset#1{\renewcommand{\baselinestretch}
{#1}\small\normalsize} \spacingset{1}
\if11
{
\title{\bf A unified framework on defining depth for point process using function smoothing}
\author{Zishen Xu, Chenran Wang, Wei Wu \\
Department of Statistics, Florida State University\\
Tallahassee, FL 32306-4330}
\maketitle
} \fi
\if01
{
\begin{center}
{\LARGE\bf Point process depth using function smoothing}
\end{center}
} \fi
\begin{abstract}
The notion of statistical depth has been extensively studied in multivariate and functional data over the past few decades. In contrast, the depth on temporal point process is still under-explored. The problem is challenging because a point process has two types of randomness: 1) the number of events in a process, and 2) the distribution of these events. Recent studies proposed depths in a weighted product of two terms, describing the above two types of randomness, respectively. In this paper, we propose to unify these two randomnesses under one framework by a smoothing procedure. Basically, we transform the point process observations into functions using conventional kernel smoothing methods, and then adopt the well-known functional $h$-depth and its modified, center-based, version to describe the center-outward rank in the original data. To do so, we define a proper metric on the point processes with smoothed functions. We then propose an efficient algorithm to estimated the defined ``center''. We further explore the mathematical properties of the newly defined depths and study asymptotics. Simulation results show that the proposed depths can properly rank the point process observations. Finally, we demonstrate the new method in a classification task using a real neuronal spike train dataset.
\end{abstract}
\noindent
{\it Keywords:} statistical depth, point process, function smoothing, $h$-depth, proper metric, spike trains
\spacingset{1.5}
\section{Introduction}
The statistical depth is a method to indicate the centrality of a data point with respect to a data cloud. It can provide a center-outward structure that can be used to understand the empirical distribution, similar to the empirical quantiles in univariate data. The concept of depth was firstly proposed by \citep{tukey1975mathematics} to handle multivariate data. Since then the notion of depth has been widely studied by mathematicians and statisticians. Different forms of depth were proposed and analyzed, based on different usages and criteria. The types of data that those depths were mainly applied to were multivariate data and functional data. Some well-known multivariate depths include the half-space depth \citep{tukey1975mathematics}, the convex hull peeling depth \citep{barnett1976ordering}, the Oja depth \citep{oja1983descriptive}, the simplicial depth \citep{liu1990notion}, and the Mahalanobis depth \citep{liu1993quality}. A study by \cite{zuo2000general} discussed desirable properties of multivariate depth, which include affine invariance, maximality at the center, monotonicity relative to the deepest points, and vanishing at infinity. As a generalization of multivariate data from finite dimension to infinite dimension, functional data received a lot of attentions recently and functional depths were also well studied. To name a few, \cite{cuevas2007robust} proposed the $h$-depth, which applied the Gaussian kernel and $\mathbb{L}_2$ norm to form the depth. \cite{cuesta2008random} extended the idea of half-space depth and defined the random Tukey depth. In 2009, the band depth and modified band depth were introduced by \cite{lopez2009concept}, which were very commonly used in functional data problems. Similar to the work by \cite{zuo2000general}, \cite{nieto2016topologically} examined the desirable properties of functional depth, including distance invariance, maximality at center, strictly decreasing with respect to the deepest point, upper semi-continuity, and receptivity to convex hull width across the domain and continuity.
As a special type of data, observation from an orderly temporal point process is made up by an ascending sequence of event times. Such observation contains two types of randomness: the number of events and the time locations of these events. If we treat each observation as a vector of the event times, then the dimension of this vector will be a random variable. Given this dimension, this vector will also be random with ascending entries.
We point out that the study on the notion of depth of point process is relatively new.
So far the only existing work in this area was done in \cite{liu2017generalized}, where the depth structure was built in two steps: For an observation $s$, 1) estimating the probability of getting the number of events $P(|s|)$; 2) given the number of events, compute the conditional depth $D(s$ $|$ $|s|)$. The estimation of probability was done through a normalized Poisson mass function and the conditional depth adopted the Mahalanobis depth for multivariate data. The final depth was the multiplication of these two with a weight power $r$: $D(s)=P(|s|)^rD(s$ $|$ $|s|)$. This depth structure involves both types of randomness and satisfies good mathematical properties such as invariant to time-shift and linear transformation, monotone on rays, and upper semi-continuous. However, the combination of the randomness on the number of events and the event time distribution is not natural that the impact of the number of events on the final depth should be adjusted by the hyper-parameter $r$. The selection of the hyper-parameter could be tricky because an inappropriate value will make one of the two randomnesses dominates the depth value. In addition, the two-step procedure deals with the number of events and the event time independently. A more desirable method should be able to combine both steps in one framework, where the two types of randomness can be measured at the same time.
To deal with these issues, we propose another approach to process the point process observation: a ``transformation'' through smoothing method \citep{wand1994kernel}. The idea is to smooth the observed point process sequence with an appropriate kernel function. Through this operation, the event time vector will be replaced by a function curve. In Section \ref{sec: Prop Met}, we will see that the point process observation and the function curve are one-to-one matched if the kernel function used for smoothing satisfies certain mild conditions. According to this bijective relation, we are able to apply the methods on functional data to the point process observations, such as metrics on functions and functional depths. In this way, both types of randomness are taken into account under one framework. For individual event time, it determines the location of the kernel function curve. Because we take the sum of the kernel functions in the smoothing procedure, a larger number of events will enhance the smoothed curve vertically. In addition, irrespective of the number of entries in the vector, its smoothed curve will be just one function. That is, vectors with different dimensions can be natuarally compared.
Therefore, this approach will not suffer the same issues in the framework proposed by \cite{liu2017generalized} and it has the advantage to utilize existing depth methods for functional data.
The rest of this manuscript is organized as follows. In section \ref{sec: Methods}, we will introduce the details of transforming a vector to a smoothed curve.
We will also propose a proper metric for the space of point process observations, followed by a discussion of the properties for this metric. With this metric, we will continue to define new depth methods for the point process observations.
In section \ref{sec: AsyTheory}, we will examine the asymptotic theory of the proposed depths. In section \ref{sec: AppResult}, we will apply our methods on simulations and experimental datasets to validate the effectiveness of the new method. Finally in section \ref{sec: SumFW}, a summary of the paper will be shown, followed by the future work. All mathematical proofs and algorithmic details are given in appendices in the supplementary material.
\section{Methods}
\label{sec: Methods}
\subsection{Equivalent Representation via Function Smoothing}
Our goal is to rank point process observations on a finite interval $[0, T]$ via a kernel smoothing method. We will at first introduce the type of smoothing kernels for this purpose.
\subsubsection{Kernel Functions}
\label{basis}
The basic idea of kernel smoothing on an orderly temporal point process is to assign a kernel to each observed point and then sum over all the assigned kernels to get a smooth function. Often the kernel is a probability density function of a given distribution such as a Gaussian kernel.
The kernel function will depend on the time interval $[0, T]$ for the point process and we denote it as $K(\cdot; T)$.
In general, we propose to use any kernel function which satisfies the following four basic requirements. These requirements are needed in order to achieve good mathematical properties for the new depth function:
\begin{enumerate}
\item Continuous and non-negative: $K(\cdot; T)$: $(-\infty, \infty) \rightarrow [0, \infty)$ is continuous; \label{c1}
\item Positive at zero: $K(0; T)>0$;
\item Linear independence with shifting: for any $n$ ($n \in \mathbb N$) different values $t_1< t_2< \cdots < t_n$, we have: $\sum_{i=1}^{n} \alpha_i K(x-t_i; T) \doteq 0$ for any $x \in [0,T]$ $\iff$ $\alpha_1= \cdots = \alpha_n = 0$; \label{c3}
\item Scale invariance: for any $x \in [0,T]$ and constant $\alpha>0$, $K(\alpha x; \alpha T) \equiv K(x;T)$; \label{c4}
\end{enumerate}
We define a kernel function to be ``proper'' if it satisfies the above four conditions. To have an example of a proper function, we can consider the following Gaussian kernel function:
\begin{equation}
K_{G}(x;T)=c_1 e^{ -\frac{c_2}{T^2}x^2 }
\label{eq:gk}
\end{equation}
where $c_1$, $c_2$ are two positive constants. This result is given in the following lemma, where the proof is given in Appendix A.
\begin{lemma}
The Gaussian kernel in Eqn. \eqref{eq:gk} is a proper kernel.
\label{lem:gk}
\end{lemma}
\subsubsection{Proper Metric}
\label{sec: Prop Met}
Before moving on to the depth function, we look for a proper metric to measure the difference between two observed point processes. Since the two point processes, in general, may have different number of events, multivariate measurements cannot be applied directly due to inconsistency in dimensions. As smoothing functions are included in this depth, here we will use the distance on smoothed processes as the distance on point processes. Let the time interval of the point process be $[0, T]$. We define the set of observed point process with given dimension $l>0$ as: $\Omega_l=\{ x=(x_1,x_2, \cdots, x_l) \ \in \mathbb R^l \quad | \quad 0 \leq x_1 \leq x_2 \leq \cdots \leq x_l \leq T \}$. For the case of $l=0$, there is no observed event in $[0,T]$, so $\Omega_0$ is the set of 0-length vector, i.e. $\Omega_0=\{\phi_0\}$ where $\phi_0$ is the event time vector of no event. Then $\Omega = \cup_{l=0}^{\infty} \Omega_l$ is the space of all point processes. Each process $x=(x_1,x_2, \cdots, x_l) \in \Omega_l$ with $l>0$ can be represented using a Dirac delta function in the form:
$x = \sum_{i=1}^l \delta(\cdot - x_i). $
Let $K(\cdot; T)$ be the smoothing kernel. Then the smoothed process is a function on $[0, T]$ in the form:
\begin{equation}
f_{x}(t) = \sum_{i=1}^{l} K(t-x_i; T).
\label{eq:smooth}
\end{equation}
In general, the space of smoothed processes for $l$ events with $l>0$ is $\mathbb{F}_l=\{ f_{x}: [0,T] \to \mathbb{R} \quad | \quad f_{x}(t)=\sum_{i=1}^{l} K(t-x_i) \text{ where } x=(x_1,x_2, \cdots, x_l) \in \Omega_l \}$. For $l=0$,
the smoothed process is $f_{\phi_{0}}(t) \equiv 0$ and $\mathbb{F}_0=\{ f_{\phi_{0}} \}$. The space of all smoothing processes is then $\mathbb{F} = \cup_{l=0}^{\infty} \mathbb{F}_l$.
We point out that the correspondence between a point process and its smoothed version is one-to-one. This is given in the following lemma (the proof is in Appendix B).
\begin{lemma}
The smoothing procedure is a bijective mapping from $\Omega$ to $\mathbb{F}$.
\label{lem:bijec}
\end{lemma}
Now we are ready to define a metric on the point process space. This definition is based on the classical $\mathbb L^p$ norm on the smoothed processes. This is formally given as follows:
\begin{definition}
For any two point processes $s$, $t \in \Omega$ on $[0, T]$ and the correspondent $f_{s}, f_{t} \in \mathbb{F}$ given by Eqn. \eqref{eq:smooth}, we define a distance function $d_{K,p}$ between $s$ and $t$ as:
\begin{equation}
d_{K,p}(s,t)=\| f_{s}-f_{t} \|_p
\label{eq:metric}
\end{equation}
where $\|.\|_p \ (p \ge 1)$ is the classical $\mathbb{L}^p$ norm on $[0, T]$.
\label{def:metric}
\end{definition}
Note that the smoothing kernel $K(\cdot; T)$ and $\mathbb{L}^p$ norm can influence the distance value. However, we can prove that the distance $d_{K,p}$ in Eqn. \eqref{eq:metric} is a proper metric on $\Omega$ for any $K$ and $p$. This is given in the following theorem (the proof is in Appendix C).
\begin{theorem}
If the smoothing kernel $K$ satisfies the four conditions to be proper, then the function $d_{K,p}$ in Definition \ref{def:metric} is a proper metric in the point process space $\Omega$. That is, it satisfies non-negativity, identity of indiscernibles, symmetry, and triangle inequality.
\label{thm:metric}
\end{theorem}
Based on Lemma \ref{lem:bijec} and Theorem \ref{thm:metric}, we know that the point process space $\Omega$ and smoothed process space $\mathbb{F}$ are one-to-one, and an $\mathbb L^p$ norm on $\mathbb F$ can be used to define a proper metric on $\Omega$. This metric helps solve the problem in $\Omega$ where a conventional vector metric cannot be directly used as different processes may have different cardinalities. Based on this result, we need to answer two natural questions:
\begin{enumerate}
\item For two point processes with the same cardinality, if the event times in the two processes are very close, will the distance $d_{K,p}$ between them be close, too?
\item Conversely, if the distance $d_{K,p}$ between two point processes is close, will they have the same cardinality and their events times are also close?
\end{enumerate}
Question 1 examines if the $d_{K,p}$ distance is continuous with respect to the event times. We claim that this is true and the conclusion is stated in Proposition \ref{prop:cont} as follows. The detailed proof is given in Appendix D.
\begin{proposition}
For any $k \in \mathbf N$,
suppose $y=(y_1,y_2, \cdots, y_k)$ is an observed point process in $\Omega_k$. Let
$x^{(n)}=(x_1^{(n)},x_2^{(n)}, \cdots, x_{k}^{(n)}), n = 1, 2, \cdots$ be a sequence of processes in $\Omega_k$. If
$\lim_{n\to \infty} x^{(n)}=y$, or equivalently, $ \lim_{n\to \infty} x_i^{(n)}=y_i $, $i=1,2, \cdots, k$, then for any observed point process $z=(z_1,z_2, \cdots, z_l) \in \Omega$,
\begin{equation*}
\lim_{n\to \infty} d_{K,p}(x^{(n)},z)=d_{K,p}(y,z), \quad \textrm{and} \quad \lim_{n\to \infty} d_{K,p}(x^{(n)},y)=0.
\end{equation*}
\label{prop:cont}
\end{proposition}
Question 2 examines the inverse continuity.
We claim that this inverse continuity is also true and the result is stated in Proposition \ref{prop:invcont} (see Appendix E for a detailed proof).
\begin{proposition}
For any $k \in \mathbf N$,
suppose $y=(y_1,y_2, \cdots, y_k)$ is an observed point process in $\Omega_k$. Let
$x^{(n)}=(x_1^{(n)},x_2^{(n)}, \cdots, x_{k_n}^{(n)})$ be a sequence of processes in $\Omega_{k_n}, n = 1, 2, \cdots$.
If $\lim_{n\to \infty} d_{K,p}(x^{(n)},y)=0$, then
$\lim_{n\to \infty} x^{(n)}=y$.
That is,
$$ 1) k_n=k \textrm{ for $n$ sufficiently large}, \quad \textrm{and} \quad 2) \lim_{n\to \infty} x_i^{(n)}=y_i, i=1,2, \cdots, k.$$
\label{prop:invcont}
\end{proposition}
\subsection{Desirable Properties for Depth on Point Process}
\label{sec:des_prop}
In general, a depth definition is defined to provide a measure of centrality of a given data point within a data cloud. To measure if such goal is achieved, one often examines desirable mathematical properties corresponding to the centrality measurement. For instance, \cite{zuo2000general} proposed the desirable properties for multivariate depth. Later, \cite{nieto2016topologically} provided the desirable properties for functional depth. The commonly studied properties are (1)
linear invariance, (2) vanishing at infinity, (3) maximality at the center, and (4) monotonicity.
\noindent Motivated by previous studies on desirable properties for multivariate and functional depths, we propose the following five desirable properties for the depth on point process: Suppose $D(s;P_S)$ is the depth function for observed event time vector $s$ with respect to the probability space $(\Omega, \mathcal F, P_S)$ for a random point process $S$ on interval $[0,T]$, we expect that $D$ satisfies
\begin{enumerate}[label={\bfseries\itshape P\arabic*}]
\item Continuity: $D(s;P_S)$ is continuous with respect to $s$ in $\Omega$. \label{des_prop1}
\item Linear invariance:
For any event time vector $s=(s_1,s_2, \cdots, s_k)$ on interval $[0,T]$, we transform it to $\tilde{s}=(as_1+b,as_2+b, \cdots, as_k+b)$ on interval $[b,aT+b]$ where $a,b \in \mathbb{R}$ are two constants. Then $D(\tilde{s};P_{\tilde{S}})=D(s;P_S)$. \label{des_prop2}
\item Vanishing at infinity: When the number of events goes to infinity, the depth should go to 0: $D(s;P_S)\to0$ as $|s|\to\infty$.\label{des_prop3}
\item Unique maximum at the center: There exists $s_c \in \Omega$, such that $D(s_c;P_S)=\max_{s\in\Omega}D(s,P_S)$. Also, for any $x (\neq s_c) \in\Omega$, $D(x;P_S)<D(s_c;P_S)$. We refer to $s_c$ as the ``center''. \label{des_prop4}
\item Monotone decreasing from the center: For any $s_1.s_2\in\Omega$, suppose the center is $s_c$, if $d_{K,p}(s_1,s_c)<d_{K,p}(s_2,s_c)$, then we have $D(s_1,P_S)>D(s_2,P_S)$. \label{des_prop5}
\end{enumerate}
\noindent
We point out that the ``center'' in \ref{des_prop4} was originally defined as a point with symmetry in the data cloud. For example, when dealing with zero-mean multivariate normal samples, we will consider the origin as center, and the corresponding Mahalanobis depth is uniquely maximized at this point. However, for point process data in a finite domain $[0, T]$, it is difficult to define a geometrically symmetric central point in $\Omega$. Notwithstanding, we expect the proposed depth has a unique maximum point, which results in our notion of ``center''.
Here we examine if the above five properties are satisfied by the generalized Mahalanobis depth on point process \citep{liu2017generalized}: 1) If two point processes are close to each other, they should have the same number of events and close event times in the time order.
As the classical Mahalanobis depth is continuous with respect to the input vector, the generalized Mahalanobis depth on point process satisfies \ref{des_prop1}; 2) Linear transformation on the time will not change the number of events, so $P(|s|)^r$ will keep the same. Because the classical Mahalanobis depth is linear invariant, \ref{des_prop2} is also satisfied; 3) The Poisson mass function will go to 0 as the input goes to infinity, so $P(|s|) \to 0$ as $|s|\to\infty$. Thus, \ref{des_prop3} holds, too; 4) $D(s$ $|$ $|s|)$ has maximum value to be 1. However, this maximum in general can be achieved at multiple processes. For example, if the total intensity $\Lambda$ is an integer, both the population mean with dimension $\Lambda$ and the population mean with dimension $\Lambda-1$ will achieve the maximum depth. Thus, \ref{des_prop4} does not hold as the solution to the maximum is not unique; 5) As a result, \ref{des_prop5} does not hold either. In summary, the generalized Mahalanobis depth on point process only satisfies \ref{des_prop1} to \ref{des_prop3}.
Similar to the discussion about the Mahalanobis depth, we will refer to \ref{des_prop1} - \ref{des_prop5} to explore these properties for other depth functions in the following sections of this paper.
\subsection{$h$-depth on Point Process}
\label{sec:hd}
We have defined a proper metric in the point process space $\Omega$. This metric is based on a smoothing procedure where a point process is equivalently represented by a function in $\mathbb F$. In this section, we will exploit a commonly used functional depth, called $h$-depth, to define an ``$h$-depth'' for point processes in $\Omega$.
At first, we review the notion of $h$-depth \citep{cuevas2007robust} for functional data. Assume $X$ is a functional random variable in the probability space $(\Lambda, \mathcal{F}_\Lambda, P_\Lambda)$, where $\Lambda$ is a subset of $\mathbb L^2([0, T])$. Let $\lVert \cdot \rVert$ denote the conventional $\mathbb L^2$ norm. For any function $z \in \Lambda$, its $h$-depth is defined as:
\begin{equation}
HD(z; P_\Lambda) = \mathbb E(G_h(\lVert z-X \rVert)),
\label{eq:h_fd}
\end{equation}
where $G_h(\cdot) = \exp (-\frac{t^2}{2h})$ is a modified Gaussian kernel with parameter $h \ (>0)$ and $X$ is a random function on the probability space $(\Lambda, \mathcal{F}_\Lambda, P_\Lambda)$.
To simplify notation, we use $HD(z)$ to represent $HD(z; P_\Lambda)$ when the measure $P_\Lambda$ is implicitly known.
Given a set of i.i.d. random functions $X_1, X_2, \cdots, X_n \in \Lambda$, the sample version of the $h$-depth of $z$ is given as
\begin{equation*}
HD_n(z) =\frac{1}{n}\sum_{i=1}^n G_h(\lVert z-X_i \rVert)=\frac{1}{n}\sum_{i=1}^n \exp (-\frac{\lVert z-X_i \rVert^2}{2h}).
\end{equation*}
One important property of the $h$-depth in Eqn. \eqref{eq:h_fd} is its continuity. This is given in the following lemma, where the proof can be found in Appendix F.
\begin{lemma}
If the functional random variable $X$ satisfies $\mathbb E\lVert X \rVert < \infty$, then $HD(\cdot)$ is a continuous function on $\Lambda$.
\label{lem:hcont}
\end{lemma}
Based on the notion of $h$-depth on functional data (in Eqn. \eqref{eq:h_fd}) and the proper metric on point process (in Eqn. \eqref{eq:metric}), we can formally introduce the $h$-depth to point process observations as follows:
\begin{definition}
Let $S$ be a random point process on $[0, T]$ in the probability space $(\Omega, \mathcal F, P_S)$. The $h$-depth of any $s \in \Omega$ is defined as:
\begin{equation}
D(s; P_S) = \mathbb E(G_h(\lVert f_s-f_S \rVert))
\label{eq:hdep}
\end{equation}
where $ f_s$ and $f_S$ are smoothed curves for $s$ and $S$ (by Eqn. $\eqref{eq:metric}$) and $G_h(t) = \exp (-\frac{t^2}{2h})$.
\label{def:hdep}
\end{definition}
Definition \ref{def:hdep} provides the population version of the depth. In practice, we should follow the sample version of $h$-depth on the given observations, which is given below.
\begin{definition}
Let $\{S_i\}_{i=1}^{N}$ be a sample of event time vectors from a point process on $[0, T]$. The empirical $h$-depth of any $s \in \Omega$ is defined as:
\begin{equation}
\hat{D}(s; \{S_i\}_{i=1}^{N}) = \frac{1}{N}\sum_{i=1}^{N} G_h(\lVert f_s-f_{S_i} \rVert),
\label{eq:hdep}
\end{equation}
where $ f_s$ and $f_{S_i} $ are smoothed curves for $s$ and $S_i $, $i=1,2, \cdots, N$, and $G_h(t) = \exp (-\frac{t^2}{2h})$ with parameter $h \ (>0)$.
\label{def:hdep_emp}
\end{definition}
With the definitions for both the population and empirical versions of the $h$-depth on point process, we now examine the mathematical properties based on the discussion in section \ref{sec:des_prop}. The basic result can be summarized in the following proposition.
\begin{proposition}
Let $D(\cdot)$ denote the $h$-depth on point process in Definition \ref{def:hdep}. Then it satisfies the following properties:
\begin{itemize}
\item \ref{des_prop1}: If $\mathbb E(|S|) < \infty$, then the depth $D(s;P_S)$ is continuous with respect to $s$.
\item \ref{des_prop2}: If the parameter $h$ is proportional to the interval length $T$, i.e. $h=C T$ for some constant $C>0$, then the depth $D(s;P_S)$ is invariant with respect to a linear transformation on the time interval.
\item \ref{des_prop3}: $D(s; P_S) \rightarrow 0$ when $|s| \rightarrow \infty$.
\end{itemize}
\label{thm:hprop}
\end{proposition}
\noindent The detailed proof is given in Appendix G. Note that \ref{des_prop4} and \ref{des_prop5} are not satisfied because $h$-depth in general may not have a unique maximum point in the data cloud.
\subsection{Modified $h$-depth on Point Process}
\label{sec:mhd}
The center has been a critical notion in statistical depths. However, as we have pointed out in Sec. \ref{sec:hd}, the $h$-depth for a point process may not have a center. In this section, we propose to modify the $h$-depth by including a ``center''-based process in the definition.
\subsubsection{Definition and Properties}
Many commonly used depth functions have the ``center'' as the point with maximum depth.
Based on this idea, we propose a center-based new depth function on point process using the smoothing method. The formal definition is given as follows:
\begin{definition}
Let $s_c$ be a given ``center'' point process in $\Omega$ on $[0, T]$. For any $s \in \Omega$, its center-based $h$-depth is defined to be:
\begin{equation}
D(s;s_c) = \exp (- \frac{\| f_s - f_{s_c} \|^2}{2h}),
\label{eq:hdep_cen}
\end{equation}
where $h > 0$ is a parameter. $ f_s$ and $f_{s_c}$ are smoothed processes of $s$ and $s_c$, respectively.
\label{def:hdep_cen}
\end{definition}
\noindent {\bf Remark 1:} We point out that Definition \ref{def:hdep_cen} is a modified version of Definition \ref{def:hdep}.
In fact, the classical $h$-depth takes the form
$
\mathbb{E}[\exp(-\frac{ \| z-X \|^2}{2h})],
$
with $X$ to be a functional random variable. If the order of the exponential function and the expectation is switched, then we have
$
\exp[\mathbb{E}(-\frac{ \| z-X \|^2}{2h})]
=\exp(-\frac{ \mathbb{E} \| z-\mathbb{E}X\|^2+ \mathbb{E}\|\mathbb{E}X-X \|^2+2 \mathbb{E}<z-\mathbb{E}X, \mathbb{E}X-X>}{2h})
= \exp(-\frac{ \| z-\mathbb{E}X\|^2+\mathbb{E}(\|\mathbb{E}X-X \|^2}{2h}) \propto
\exp(-\frac{\| z-\mathbb{E}X\|^2}{2h} )$.
In this way we obtain the center-based $h$-depth $D(s;s_c)$.
\noindent {\bf Remark 2:} The $\mathbb{L}^2$ norm in the definition can be generalized to $\mathbb{L}^p$, with $1\le p<\infty$.
With a center process given in the definition, this modified $h$-depth for point process is expected to satisfy more desirable mathematical properties than the classical one. Indeed, all the desirable properties in section \ref{sec:des_prop} are satisfied and the details are given in the following proposition.
\begin{proposition}
Let $D(\cdot;s_c)$ denote the center-based $h$-depth on point process in Definition \ref{def:hdep_cen} and the metric in $\Omega$ is the $\mathbb{L}^2$ norm $d_{K,2}(s,t)=\| f_{s}-f_{t} \|$. Then $D(\cdot;s_c)$ satisfies the following desirable mathematical properties in section \ref{sec:des_prop}:
\begin{itemize}
\item \ref{des_prop1}: If $|s|<\infty$, then depth $D(s;s_c)$ is continuous with respect to $s$.
\item \ref{des_prop2}: If the parameter $h$ is proportional to the interval length $T$, i.e. $h=CT$ for some constant $C>0$, then the depth $D(s;s_c)$ is invariant with respect to a linear transformation on the time interval.
\item \ref{des_prop3}: $D(s;s_c) \rightarrow 0$ when $|s| \rightarrow \infty$.
\item \ref{des_prop4}: $D(s_c;s_c) = \max_{s\in \Omega} D(s;s_c)$ and $\forall t(\neq s_c) \in\Omega$, $D(t;s_c)< D(s_c;s_c)$.
\item \ref{des_prop5}: For any $s_1,s_2 \in \Omega$, if $d_{K,2}(s_1,s_c)<d_{K,2}(s_2,s_c)$, then $D(s_1;s_c)>D(s_2;s_c)$.
\end{itemize}
\label{prop:hprop_cen}
\end{proposition}
\noindent The detailed proof for each property is provided in Appendix H. Note that this is a clear advantage over the classical $h$-depth method, where only properties \ref{des_prop1}-\ref{des_prop3} are satisfied.
\subsubsection{Estimation of the Center}
\label{center_estimation}
In practice, there is no prior knowledge of the center of point process, so a proper estimation of the center will be needed. In this study, we adopt the common notion of the Karcher mean in a metric space \citep{grove1973conjugatec}. Since we have a well-defined metric between any two point processes in Definition \ref{def:metric}, the Karcher mean can be defined as:
\begin{definition}
Let $S$ be a random point process on $[0, T]$ in the probability space $(\Omega, \mathcal F, P_S)$. The Karcher mean of $S$ is defined to be
\begin{equation}
\mu_{K} = \argmin_{t \in \Omega} \mathbb E[d_{K,2}^2(t,S)],
\end{equation}
where $d_{K,2}(\cdot, \cdot)$ is the metric with kernel function $K(\cdot;T)$ and $\mathbb{L}^2$ norm in Definition \ref{def:metric}.
\label{def:karcher}
\end{definition}
\noindent Note that, in general the solution of the minimization may not be unique, so $\mu_{K}$ is a set of points. Based on the Karcher mean in Definition \ref{def:karcher}, its empirical version is given as follows:
\begin{definition}
Let $\{S_i\}_{i=1}^{N}$ be a collection of $N$ observed point processes on $[0, T]$. The empirical Karcher mean of this process, based on $\{S_i\}_{i=1}^{N}$, is defined to be
\begin{equation}
\bar{S}_K^{(N)} = \argmin_{t \in \Omega} \frac{1}{N} \sum_{i=1}^{N} d_{K,2}^2(t,S_i)
\end{equation}
\label{def:ekarcher}
\end{definition}
\noindent The empirical Karcher mean also may not be unique, and therefore $\bar{S}_K^{(N)}$ is a set of solution points, too. For simplicity, we define the following notation:
\begin{equation*}
SSD(t;S_1,S_2, \cdots, S_N)=\sum_{i=1}^{N} \| f_t - f_{S_i} \|_2^2
\end{equation*}
where $SSD$ stands for ``Sum of Squared Distance'' and $t\in\Omega$ is the event time vector. With this definition, the empirical Karcher mean is just $\bar{S}_K^{(N)}=\argmin_{t \in \Omega} SSD(t;S_1,S_2, \cdots, S_N)$. For any element in $\bar{S}_K^{(N)}$, its dimension has a finite upper bound. The result is formally given as follows, where the proof is given in Appendix I.
\begin{theorem}
Any solution in the empirical Karcher mean of a point process has an upper bound in dimension, i.e. $\exists D_K^{(N)}\in\mathbb{N}^+$ such that $\dim{(\bar{S})}\leq D_K^{(N)},$ $\forall \bar{S} \in \bar{S}_K^{(N)}$.
\label{KMB}
\end{theorem}
\noindent We will introduce a theoretically-proven convergent algorithm to estimate $\bar{S}_K^{(N)}$ where this upper bound $D_K^{(N)}$ provides an effective searching range. Such algorithm is a combination of simulation annealing and line search. We will at first briefly review the simulation annealing and line search, and then introduce an approach to combine these two methods.
\begin{itemize}[leftmargin=*]
\item RJMCMC annealing\\
Simulated annealing \citep{geman1984stochastic, van1987simulated} is a method that makes optimization feasible if we are able to generate samples. To find the global minimum of function $f(x)$, we can build a density function $\pi_i(x) \propto \exp{[-\frac{1}{T_i}f(x)]}$ where $T_i$ is a pre-determined decreasing sequence of positive numbers called temperature and $\lim_{i\to\infty} T_i=0$ (e.g. $ T_i=C / \log(1+i) $ for constant $C>0$). Under weak regularity assumptions on $f$, the density $\pi_i$ will be concentrated on the global minimum points of $f$. After a large number of iterations of computing $\pi_i$ and sampling from it, we will have samples passing the global minimum point.
Simulation annealing provides a method to find the minimum of a function, but to apply it on the optimization of $SSD$, a tool that is able to generate vectors with different dimensions will be needed. In this paper, we adopt the reversible-jump Markov Chain Monte Carlo (RJMCMC) method, which was first introduced by \cite{green1995reversible}. Its main idea is the Metropolis Hasting method \citep{metropolis1953equation}, but generalizes to allow the candidate to have different dimensions. In this way, it can be used to get samples from densities where the dimension of the random vector is not fixed. One condition to use the RJMCMC method is that the number of possible dimensions should be finite. According to Theorem \ref{KMB}, the range of dimension is $1, \cdots, D_K^{(N)}$, so RJMCMC is feasible for the center estimation.
Combining the two methods, we are able to find the Karcher mean through sampling from $\pi_i(t) \propto \exp{[-\frac{1}{T_i} SSD(t;S_1,S_2, \cdots, S_N)]}$. Because of RJMCMC, the annealing process can search over different dimensions automatically. An example RJMCMC annealing algorithm is provided in Appendix J, where the input to the algorithm can be adjusted before application.
\item Line search \\
According to Theorem \ref{KMB}, because the dimensions to search is finite $1,2\cdots D_K^{(N)}$, optimizing within each dimension and comparing the output across dimensions will be another feasible approach, which corresponds to the
``line search'' idea. An advantage of this method is that the gradient of the $SSD$ can be computed explicitly with the input dimension fixed, as shown in the following proposition.
\begin{proposition}
If the dimension of the input event time vector $t$ is given and the distance function is $d_{k,2}$, then the gradient of $SSD$ with respect to $t$ will be:
\begin{equation*}
\frac{\partial SSD}{\partial t}(t; S_1,S_2, \cdots, S_N)=-2\int_{0}^{T} K'(x-t)[N \vec{1}_{|t|}^T K(x-t;T) - \sum_{i=1}^{N} \vec{1}_{|S_i|}^T K(x-S_i;T) ] dx
\end{equation*}
where $t$ and $S_i$ are event time vectors. $x$ is scaler and $|t|$ is the dimension of vector $t$. All operations are done element-wise.
\noindent If the modified Gaussian kernel $K_G(t;T)=c_1 e^{-\frac{c_2}{T^2}t^2}$ is used to smooth the process, then the gradient can be further simplified to:
\begin{equation*}
\frac{\partial SSD}{\partial t}(t; S_1,S_2, \cdots, S_N)
=
\frac{4c_1^2c_2}{T^2}[N g(t \vec{1}_{|t|}^T,\vec{1}_{|t|} t^T) \vec{1}_{|t|}-
\sum_{i=1}^{N} g(t \vec{1}_{|t|}^T, \vec{1}_{|S_i|} S_i^T)\vec{1}_{|S_i|} ]
\end{equation*}
with the function $g(\cdot,\cdot)$ to be
\begin{eqnarray*}
g(x,y)& = &e^{-\frac{c_2}{2T^2}(x-y)^2} \{ \frac{T^2}{4c_2}[ e^{-\frac{2c_2}{T^2}(\frac{x+y}{2})^2} - e^{-\frac{2c_2}{T^2}(T-\frac{x+y}{2})^2}] \\
& &- \sqrt{\frac{\pi}{8c_2}}T(x-y) [ \Phi(\frac{2\sqrt{c_2}}{T}(T-\frac{x+y}{2}))-\Phi(-\frac{\sqrt{c_2}(x+y)}{T}) ] \}
\end{eqnarray*}
where $\Phi$ is the cumulative distribution function for standard Normal distribution $N(0,1)$; $\vec{1}_d=[1,1, \cdots, 1]^T\in\mathbb{R}^d$. All operations are done element-wise.
\label{prop:gradient}
\end{proposition}
The proof and computational details are provided in Appendix L. Due to this property, gradient-based methods, such as stochastic gradient descent, can be applied to efficiently do the optimization within a given dimension. Then a comparison of the output will return the solution to the minimum $SSD$, which is the empirical Karcher mean. An example line search algorithm based on gradient method is provided in Appendix K.
\item Combined method\\
Both RJMCMC annealing method and line search method can be used to estimate the empirical Karcher mean, but they have some disadvantages. For the RJMCMC annealing, although it can search over dimensions automatically, it converges slowly to the optimal solution.
For line search, it can converge fast to the solution given the dimension, but it needs to search every dimension in the dimension range. As a result, the time cost for the entire line search algorithm is also very large.
Based on their characteristics, if we combine the two methods: do RJMCMC annealing first as a ``pre-train'' process to locate a range of optimal dimensions and then use line search to do optimization within the narrowed dimension range, the computation efficiency is expected to be significantly improved. Moreover, the output event time vectors from the RJMCMC annealing can be used as the initial values for the line search. In this way, the number of iterations in the line search will be reduced and it can also help avoid local optimal points. The combined algorithm following this idea is shown in Algorithm \ref{alg:comb}. The application of this algorithm on simulations and real data will be shown in section \ref{sec: AppResult}.
\end{itemize}
\begin{algorithm}[!ht]
\caption{Combined method to find empirical Karcher mean}
\begin{algorithmic}
\label{alg:comb}
\STATE {\textbf{Input}: the observed event time vectors $S_1, S_2, \cdots, S_N$. \\
\textit{For RJMCMC annealing}: The maximum number of iterations $n_{max}$; Initial value $x_0$; Initial dimension $k_0$ to be the dimension of $x_0$; the upper bound of dimension $D_K^{(N)}$. \\
\textit{For line search}: The batch size $B$; the learning rate $r$; the maximum number of epochs $ep_{max}$; convergence indicator $\epsilon$. \\
\textit{For combination}: The number of dimensions to be kept in RJMCMC annealing: $d_r\in \mathbb{N}$. }
\STATE { \textbf{(1) Pre-train:} \\
Use RJMCMC annealing (such as Algorithm 2) with input $S_1, S_2, \cdots, S_N$, $n_{max}$, $x_0$, $k_0$ and $D_K^{(N)}$ to find the top $d_r$ number of events $\{k_{0,i}\}_{i=1}^{d_r}$ and the corresponding event time vectors $\{x_{0,i}\}_{i=1}^{d_r}$ that produce the smallest $SSD$ values.}
\STATE { \textbf{(2) Optimization:} \\
Use line search (such as Algorithm 3) with input $S_1, S_2, \cdots, S_N$, $B$, $r$, $ep_{max}$, $\epsilon$ as parameters, and the output from RJMCMC annealing: $\{k_{0,i}\}_{i=1}^{d_r}$ and $\{x_{0,i}\}_{i=1}^{d_r}$ as searching dimensions and initial values, to find the minimum solution of the $SSD$: $\bar{S}_K^{(N)}$ }
\STATE {\textbf{Output}: $\bar{S}_K^{(N)}$ is the empirical Karcher mean.}
\end{algorithmic}
\end{algorithm}
\section{Asymptotic Theory}
\label{sec: AsyTheory}
In this section, we will show that the empirical depth converges to the population depth as the sample size goes to infinity for $h$-depth and modified $h$-depth, defined in section \ref{sec: Methods}. Firstly, for the $h$-depth, we have the following conclusion:
\begin{theorem}
Based on Definitions \ref{def:hdep} and \ref{def:hdep_emp}, let $D(\cdot; P_S)$ represent the $h$-depth for a point process on $[0, T]$ in the probability space $(\Omega, \mathcal F, P_S)$ and $\hat{D}(\cdot; \{S_i\}_{i=1}^{N})$ is the corresponding sample version with sample size $N$. Then, for any input $s \in \Omega$,
\begin{equation}
\hat{D}(s; \{S_i\}_{i=1}^{N}) \to D(s; P_S) \text{ a.s. }
\end{equation}
\end{theorem}
\noindent This theorem implies the convergence of sample $h$-depth to the population $h$-depth, and can be easily proven using the Strong Law of Large Numbers.
For the modified $h$-depth, the convergence of the depth value will be based on the convergence of the estimated center. As defined in section \ref{center_estimation}, the Karcher mean can be treated as the solution in $\Omega$ that minimizes the average of squared distances, where the distance is $d_{k,2}$ with $\mathbb{L}^2$ norm. In fact, we can generalize the Karcher mean definition by using any $d_{k,p}$ distance for $p \geq 1$, given in the following form:
\begin{definition}
Let $S$ be a random point process on $[0, T]$ in the probability space $(\Omega, \mathcal F, P_S)$. The generalized Karcher mean of $S$ is defined to be
\begin{equation}
\mu_{K} = \argmin_{t \in \Omega} \mathbb E[d_{K,p}^2(t,S)],
\label{eq:kmp}
\end{equation}
where $d_{K,p}(\cdot, \cdot)$ is the metric with kernel function $K(\cdot;T)$ and $\mathbb{L}^p$ norm where $p\geq 1$.
\end{definition}
\begin{definition}
Let $\{S_i\}_{i=1}^{N}$ be a collection of $N$ independent point processes on $[0, T]$. The empirical generalized Karcher mean of this process, based on $\{S_i\}_{i=1}^{N}$, is defined as
\begin{equation}
\bar{S}_K^{(N)} = \argmin_{t \in \Omega} \frac{1}{N} \sum_{i=1}^{N} d_{K,p}^2(t,S_i)
\label{eq:kmps}
\end{equation}
\end{definition}
\noindent It is apparent that Definitions \ref{def:karcher} and \ref{def:ekarcher} are special cases with $p=2$ in the generalized definitions, respectively. Then the convergence theorem is given as follows:
\begin{theorem}
\label{asy:theory}
$S$ is a random point process on $[0, T]$ in the probability space $(\Omega, \mathcal F, P_S)$, where the number of events $|S|$ has a constant upper bound $D>0$. $\{S_i\}_{i=1}^{N}$ is the set of independent event time vectors from $S$. Then we have:
\begin{enumerate}
\item The minimum average of squared distance in $\{S_i\}_{i=1}^{N}$ will converge to the minimum expected squared distance in $(\Omega, \mathcal F, P_S)$ almost surely, in other words,
\begin{equation}
\min_{t \in \Omega} \frac{1}{N} \sum_{i=1}^{N} d_{K,p}^2(t,S_i)
\to
\min_{t \in \Omega} \mathbb E[d_{K,p}^2(t,S)]
\quad \text{a.s.}
\label{mineq1}
\end{equation}
\item The empirical generalized Karcher mean converges to the generalized Karcher mean almost surely, in other words,
\begin{equation}
\bar{S}_K^{(N)} \to \mu_{K} \quad \text{a.s.}
\label{argmineq1}
\end{equation}
The almost-surely convergence from set $\bar{S}_K^{(N)}$ to set $\mu_{K}$ means $\Limsup_{n\to\infty} \bar{S}_K^{(N)} \subset \mu_{K} \text{ a.s.}$, where $\Limsup_{n\to\infty} \bar{S}_K^{(N)}$ is the Kuratowski upper limit \citep{kuratowski2014topology}:
\begin{equation*}
\Limsup_{n\to\infty}\bar{S}_K^{(N)} = \{ x\in\Omega \quad | \quad \liminf_{N\to\infty} d_{K,p}(x,\bar{S}_K^{(N)})=0 \}
\end{equation*}
where $d_{K,p}(x,\bar{S}_K^{(N)})=\inf\{ d_{K,p}(x,\bar{S}) \quad | \quad \bar{S} \in \bar{S}_K^{(N)} \}$.
\item Let $D(\cdot; s_c)$ be the modified $h$-depth in Definition \ref{def:hdep_cen} with center $s_c$. Suppose the Karcher mean contains a unique element: $\mu_{K}=\{s_c^{(p)}\}$. $\hat{s}_c^{(N)}$ is any element from the empirical Karcher mean $\bar{S}_K^{(N)}$ based on $\{S_i\}_{i=1}^{N}$. Then, for any $s \in \Omega$,
\begin{equation}
D(s; \hat{s}_c^{(N)}) \to D(s; s_c^{(p)}) \text{ a.s. }
\end{equation}
\end{enumerate}
\noindent Remark 1: In Eqns. \eqref{eq:kmp}, \eqref{eq:kmps}, and \eqref{mineq1}, $\min$ is used instead of $\inf$ because the minimum value can be achieved in both the empirical version and the population version.
\noindent Remark 2: The theorem will still hold when the square power of the distance in Eqn. \eqref{mineq1} and in the definitions of $\bar{S}_K^{(N)}$ and $\mu_{K}$ are generalized to any $r \geq 1$, i.e. replacing $d_{K,p}^2$ by $d_{K,p}^r$ with any $r \in [1,\infty)$ in the Karcher mean and empirical Karcher mean.
\label{asyT1}
\end{theorem}
\noindent The proof of Theorem \ref{asy:theory}, together with the two remarks, is shown in Appendix M. With the first two points of the theorem, we can confirm that the minimum of the $SSD$ converges to the minimum of the population expected squared distance, irrespective of the optimal solution sets in sample and population. It also indicates the convergence of the estimated center set to its population version. The last point of the theorem suggests that the modified $h$-depth with sample Karcher mean as center converges to the one with population Karcher mean as the center.
\section{Application Results}
\label{sec: AppResult}
In this section, we will apply the proposed depth framework, $h$-depth and modified $h$-depth, on point process observations in simulations and real experimental data.
\subsection{Simulation Studies}
\label{App:sim}
\subsubsection{Homogeneous Poisson Process}
\label{sim:hpp}
We will at first illustrate the depth methods on observations from HPP($\lambda$) in a finite time interval, where $\lambda$ is a constant mean value. 100 independent realizations from HPP(0.045) on $[0, 100]$ are generated, where the raster plot of these processes is shown in Figure \ref{fig:hpp_sample}(a). Basically, the number of events in each process follows a Poisson distribution with mean 4.5 (varying from 1 to 10 in these 100 realizations), and the event time in every observation is uniformly distributed on [0, 100]. We then smooth the realizations using a kernel in Eqn. \eqref{eq:gk} with $c_1=1, c_2=10$. The smoothed processes are shown in Figure \ref{fig:hpp_sample}(b).
\begin{figure}
\caption{Simulation on HPP(0.045).
(a) 100 realizations from HPP(0.045) on interval $[0, 100]$. Each row represents one realization, where each dot is one event.
(b) Smoothed processes of these 100 realizations using a modified Gaussian kernel. }
\label{fig:hpp_sample}
\end{figure}
Based on the $h$-depth and modified $h$-depth in section \ref{sec: Methods}, we compute the sample depth value for each given process where the constant $h$ is set to $T=100$. For the modified $h$-depth, the center is determined in four ways: set by prior information, estimated by RJMCMC annealing, line search and combined method. As the Poisson process is homogeneous, we can intuitively set the center as $(20,40,60,80)$. The output of the center estimation is shown in Table \ref{tab:hchpp}. We can see that all the estimated centers have lower $SSD$ than the intuitive center. In particular, the combined method has the superior performance. It has the smallest $SSD$ and also the smallest time cost. We will only use this method for the modified $h$-depth in comparison with other depth methods.
\begin{table}[h]
\centering
\centering
\begin{tabular}{|c|c|c|c|}
\hline
\textbf{Method} & \textbf{Estimated Center} & \textbf{SSD} & \textbf{Time Cost (s)} \\ \hline
Intuitive & $[20.00,40.00,60.00,80.00]$ & 14217 & $ - $ \\ \hline
RJMCMC Annealing & $[15.79,33.52,72.28,76.51]$ & 13964 & 256.12\\ \hline
Line Search & $[17.07,34.13,63.84,81.79]$ & 13877 & 207.14\\ \hline
Combined & $[23.02,28.29,67.59,78.88]$ & 13858 & 38.91\\ \hline
\end{tabular}
\caption{Center estimation output for the HPP simulation}
\label{tab:hchpp}
\end{table}
\begin{figure}
\caption{Top 5 (red) and bottom 5 (blue) processes ranked by the depth values in the HPP simulation.
(a) By the $h$-depth.
(b) By the modified $h$-depth with intuitive center in Table \ref{tab:hchpp}
\label{fig:hpp_top5}
\end{figure}
\begin{figure}
\caption{Color-mapped smoothed processes based on depth values for the HPP simulation, where top 5 (red) and bottom 5 (blue) are marked with thick lines.
(a) By the $h$-depth.
(b) By the modified $h$-depth with the intuitive center.
(c) By the modified $h$-depth with the estimated center using the combined method. }
\label{fig:hpp_heat}
\end{figure}
The depth values for all 100 processes are computed and the observations are then ranked. For better illustration, instead of showing all the ranked processes, we display only the top 5 and bottom 5 in Figure \ref{fig:hpp_top5}. For comparison, we also adopt the generalized Mahalanobis depth for HPP case with the power weight $r=1$ and include the ranking result. It can be seen that the outputs from the $h$-depth and modified $h$-depth are similar to the output from the Mahalanobis depth. That is, observations with more uniformly distributed events turn to have larger depth values. In addition, the process with the number of events around the mean 4.5 have larger depth values.
We then display the corresponding smoothed processes in Figure \ref{fig:hpp_heat}. Note that the generalized Mahalanobis depth is not included as it is not based on smoothing functions. All three plots demonstrate a center-outward decreasing depth value structure that observations whose smoothing curves are closer to the middle of all the curves will have larger depth values. Overall, these simulation results are reasonable and consistent with the basic notion of center-outward ranks.
\subsubsection{Inhomogeneous Poisson Process}
\label{sim:ipp}
We will further apply the depth on point process using observations from IPP[$\lambda(t)$] to evaluate the perforamnce. The rate function $\lambda(t)$ is chosen as $\lambda(t)=3\phi(t;25,10)+2\phi(t;75,10)$ and the event time interval is still $[0,100]$, where $\phi(\cdot ;\mu,\sigma)$ is the density for normal distribution with mean $\mu$ and standard deviation $\sigma$. As $\lambda(t)$ contains two peaks, the simulated event times will mainly distribute around 25 and 75. The kernel function is also the modified version of Gaussian kernel with $c_1=1, c_2=25$
. 100 samples are generated. The point process plot and smoothing curve plot are shown in Figure \ref{fig:ipp_sample}.
\begin{figure}
\caption{An IPP simulation with a dual-peak intensity function. (a) 100 realizations from IPP[$\lambda(t)$] on interval [0,100]. Each row represents one realization, where each dot is one event. The intensity $\lambda(t)$ is shown on the top of all realizations. (b) Smoothed processes of the 100 realizations in (a) using the modified Gaussian kernel.
}
\label{fig:ipp_sample}
\end{figure}
Similar to section \ref{sim:hpp}, we let the constant $h$ equal $T=100$ in the $h$-depth and modified $h$-depth. Because there is no intuitive center in this case, only the estimated centers are used in the modified $h$-depth. We again use the RJMCMC annealing, line search and combined method. The output is shown in Table \ref{tab:hcipp}. It can be seen that the three estimated centers are very similar. The line search method and the combined method both have the lowest $SSD$, whereas the latter one is much more efficient. That is, the combined method results in the superior performance, and therefore we will only use this method for the modified $h$-depth in comparison with other depth methods.
\begin{table}[h]
\centering
\centering
\begin{tabular}{|c|c|c|c|}
\hline
\textbf{Method} & \textbf{Estimated Center} & \textbf{SSD} & \textbf{Time Cost (s)} \\ \hline
RJMCMC Annealing & $[13.73,27.94,32.69,62.85,79.38]$ & 14152 & 210.61\\ \hline
Line Search (SGD) & $[18.88,24.32,37.21,65.36,82.36]$ & 14063 & 159.05\\ \hline
Combined & $[18.83,24.32,37.22,65.36,82.38]$ & 14063 & 48.77 \\ \hline
\end{tabular}
\caption{Center estimation output for the IPP simulation}
\label{tab:hcipp}
\end{table}
\begin{figure}
\caption{Top 5 (red) and bottom 5 (blue) processes ranked by the depth values for the IPP simulation.
(a) By the $h$-depth.
(b) By the modified $h$-depth with center estimated using the combined method in Table \ref{tab:hcipp}
\label{fig:ipp_top5}
\end{figure}
\begin{figure}
\caption{Color-mapped smoothed processes based on depth values for the IPP simulation, where top 5 (red) and bottom 5 (blue) are marked with thick lines.
(a) By the $h$-depth.
(b) By the modified $h$-depth with center estimated using the combined method. }
\label{fig:ipp_heat}
\end{figure}
After computing the depth for all the observations, we obtain the ranking result. Figure \ref{fig:ipp_top5} shows the top 5 and bottom 5 ranked observations. Similar to the HPP case, we include the result from the generalized Mahalanobis depth with $r=1$ for comparison. It can be seen that the outputs from $h$-depth and modified $h$-depth are similar -- observations with event times around 25 and 75 and number of events around 5 have larger depth. In contrast, for the Mahalanobis depth, we can also observe that processes with number of events around 5 have larger depth. However, rather than concentrating at two peak time locations, the top 5 processes have event time more uniformly distributed. It indicates that $h$-depth and modified $h$-depth can better capture the pattern inside the sample. If further taking into account the true $\lambda(t)$ that the peak at 25 is higher than the peak at 75, we should expect to see more event times concentrated around 25 than around 75. Comparing Figure \ref{fig:ipp_top5} (a) and (b), we can see that for the modified $h$-depth, all the top-5 observations have more events around 25 than around 75, whereas this is not clearly shown for the $h$-depth.
The smoothed processes ranked by the $h$-depth and modified $h$-depth are shown in Figure \ref{fig:ipp_heat}. They both exhibit a clear center-outward structure. That is, observations whose smoothing curves are peaked at around 25 and 75 have relatively larger depth values.
In summary, based on the result from the HPP and IPP simulations, we can see that the proposed $h$-depth and modified $h$-depth both can properly build a center-outward rank on the given point process data.
\subsection{Experimental data application}
\label{App:exp}
We will examine the proposed depth framework using a real spike train recording, where the data were included in the Quantitative Single-Neuron Modeling Competition 2009 \citep{naud2009quantitative} and were accessible at \url{http://dx.doi.org/10.6080/K0PN93H3}.
The detailed experiment description can be found in \citep{carandini2007thalamic,sincich2007transmission}. Briefly, the experiment was performed on rhesus monkeys that retinal input (visual stimulus) was applied and extracellular potentials were recorded for both the retinal (pre-synaptic) and the geniculate (post-synaptic) simultaneously. There were 10 seconds stimulus, where the first 5 seconds were the same for all trials and the last 5 seconds were unique for each trial. In total, 76 trials were performed. Since the task in the competition was to predict the post-synaptic spikes given the pre-synaptic spikes, only 38 pairs of (post-synaptic, pre-synaptic) were fully given.
We will use these spike train data to test the smoothing depth framework through a classification task:
In the training set, we have labeled observations for pre-synaptic (abbreviated as ``pre-group'') and post-synaptic (abbreviated as ``post-group'').
In the testing set, we compute the depth for each observation in the two groups. The group with a larger depth value will be the predicted label.
\begin{figure}
\caption{The point process plot for the spike train observations. Blue: pre-group; Red: post-group. 10 example pairs were selected and displayed. }
\label{fig:exp_data1}
\end{figure}
Before the classification, an exploratory data analysis is performed. The spike times are within the 10 seconds range and the point process plot is shown in Figure \ref{fig:exp_data1}. 10 example pairs of pre-group, post-group observations are shown. As stated in the experiment description, the stimuli were the same for the first 5 seconds and different for the last 5 seconds. This well explains why the signals have very similar pattern in the first 5 seconds. Therefore, it is reasonable to separate the trial into two parts, first 5 seconds and last 5 seconds, and we can smooth them in different ways.
\begin{figure}
\caption{Smoothed observations for pre-group and post-group in the first 5 seconds (top panel) and last (bottom panel) 5 seconds. Blue dashed lines: pre-group; Red solid lines: post-group. The thick lines denote the estimated Karcher means in the two groups, repsectively.}
\label{fig:exp_data2}
\end{figure}
After separation, we apply the modified Gaussian kernel to smooth the observations with $c_1=1$, $c_2=100$ for first 5 seconds and $c_1=1$, $c_2=50$ for last 5 seconds. The smoothing curve plot is given in Figure \ref{fig:exp_data2}. Using the combined method in section \ref{center_estimation}, we estimate the Karcher mean for the two groups and the corresponding smoothing curves are shown as thick lines. It can be seen that the Karcher mean curves are able to catch the shape patterns in the first 5 seconds as the observations are nearly aligned, while in the last 5 seconds, the Karcher mean is nearly flat since the observations are very noisy.
The classification is done by a 4-fold cross validation.
We compute the accuracy and F1 score on the predicted labels versus the true labels to measure the classification performance. In addition to the classical $h$-depth and modified $h$-depth with center given, we conduct the classification through the generalized Mahalanobis depth on point process \citep{liu2017generalized}, as well as the band depth and modified band depth \citep{lopez2009concept} on the smoothed observations.
\begin{table}[ht]
\centering
\begin{tabular}{l|c|c|c|c|c|c|}
\hline
\multicolumn{1}{|c|}{\multirow{2}{*}{\textbf{Method}}} & \multicolumn{2}{c|}{\textbf{Accuracy}} & \multicolumn{2}{c|}{\textbf{F1 score (pre)}} & \multicolumn{2}{c|}{\textbf{F1 score (post)}} \\ \cline{2-7}
\multicolumn{1}{|c|}{} & first 5s & last 5s & first 5s & last 5s & first 5s & last 5s \\ \hline
\multicolumn{1}{|l|}{Classical $h$-depth} & 78.95\% & 65.79\% & 79.49\% & 61.76\% & 78.38\% & 69.05\% \\ \hline
\multicolumn{1}{|l|}{Modified $h$-depth} & \textbf{81.58\%} & \textbf{76.32\%} & 81.08\% & \textbf{75.68\%} & \textbf{82.05\%} & \textbf{76.92\%} \\ \hline
\multicolumn{1}{|l|}{Band depth} & 78.95\% & 61.84\% & 77.14\% & 53.97\% & 80.49\% & 67.42\% \\ \hline
\multicolumn{1}{|l|}{Modified band depth} & \textbf{81.58\%} & 71.05\% & \textbf{81.58\%} & 72.50\% & 81.58\% & 69.44\% \\ \hline
\multicolumn{1}{|l|}{Mahalanobis depth} & \textbf{81.58\%} & 73.68\% & \textbf{81.58\%} & 72.97\% & 81.58\% & 74.36\% \\ \hline
\end{tabular}
\caption{The classification result. F1 score (pre) and F1 score (post) are computed according to the recall and precision on pre-group classification and post-group classification. The boldface indicates best results in each column. }
\label{tab:exp_data}
\end{table}
The classification result is shown in Table \ref{tab:exp_data}. All methods are able to provide reasonable classification power on the spike train data. The overall results on the first 5 seconds is better than the last 5 seconds.
For the last 5 seconds, it can be seen that the modified $h$-depth has the best accuracy (76.32\%) and F1 scores (75.68\%, 76.92\%). For the first 5 seconds, the modified $h$-depth, modified band depth and Mahalanobis depth all have the best accuracy (81.58\%), while the modified $h$-depth is better on the post group (F1 score 82.05\%) and the other two are better on the pre-group (F1 score 81.58\%). In summary, we can see that the smoothing depth framework provides relatively accurate classification in the practical point process observations.
\section{Summary and Future Work}
\label{sec: SumFW}
In this paper, we at first proposed a kernel smoothing representation for point process observations.
We found that the smoothing procedure builds a bijective mapping between them. We then defined a proper metric distance bewteen the smoothing curves and transformed the problems from point processes to smooth functions.
Based on the notion of $h$-depth on functions, we proposed two methods to form the depth structure on point process observations, i.e. the classical $h$-depth and the modified $h$-depth with center given.
We then proposed a center-estimation method for the modified $h$-depth, with the idea of minimizing the Karcher mean through a combination of the RJMCMC annealing and line search.
During the tests on the simulated data, both classical $h$-depth and modified $h$-depth resulted in reasonable outcomes: a center-outward decreasing depth structure that observations with smoothing curves closed to the center will have higher depth.
In the real neuronal spike train data, we tested the new depth structures with a depth-based classification task and both methods resulted in accurate classifications.
Our investigation is only the starting point of the smoothing curve exploration and there is much to be further studied in the future. We have proposed the modified Gaussian kernel that satisfies the four basic requirements.
We will explore other kernel functions that also satisfy the requirements. In the modified $h$-depth, a combination of RJMCMC annealing and line search has been given as the method to estimate the Karcher mean, which serves as the ``center''. There should be other ways to define the ``center'' and other estimation methods. These are all very good topics to further develop depth function on point process. Finally, more functional depth methods, other than $h$-depth, can be adopted to smoothed point processes, such as the band depth and modified band depth. Their ranking performance needs thorough investigations.
\end{document}
|
\begin{document}
\title{\textbf{On a fractional Nirenberg problem, part II: existence of solutions}
}
\author{
Tianling Jin, \ \
YanYan Li, \ \
Jingang Xiong}
\date{\today}
\maketitle
\begin{abstract} This paper is a continuation of our earlier work
``[T. Jin, Y.Y. Li and J. Xiong, On a fractional Nirenberg problem, part I: blow up analysis and compactness of solutions, to appear in
J. Eur. Math. Soc.]", where compactness results were given on a fractional Nirenberg problem.
We prove two existence results stated there. We also obtain a fractional Aubin inequality.
\end{abstract}
\section{Introduction}
\noindent
Let $(\Sn,g_{\Sn})$, $n\geq 2$, be the standard sphere in $\R^{n+1}$. The fractional Nirenberg problem studied in \cite{JLX} is equivalent to solving:
\be\label{main equ}
P_\sigma(v)=c(n,\sigma)K v^{\frac{n+2\sigma}{n-2\sigma}}\quad \mbox{on } \Sn,
\ee
where $\sigma\in(0,1)$ is a constant, $K$ is a continuous function on $\Sn$,
\be\label{P sigma}
P_\sigma=\frac{\Gamma(B+\frac{1}{2}+\sigma)}{\Gamma(B+\frac{1}{2}-\sigma)},\quad B=\sqrt{-\Delta_{g_{\Sn}}+\left(\frac{n-1}{2}\right)^2},
\ee
$c(n,\sigma)=\Gamma(\frac{n}{2}+\sigma)/\Gamma(\frac{n}{2}-\sigma)$, $\Gamma$ is the Gamma function and $\Delta_{g_{\Sn}}$ is the Laplace-Beltrami operator on $(\Sn, g_{\Sn})$. See \cite{Br}.
The operator $P_{\sigma}$ can be seen more concretely on $\R^n$ using stereographic projection.
The stereographic projection from $\Sn\backslash \{N\}$ to $\R^n$ is the inverse of
\[
F: \R^n\to \Sn\setminus\{N\}, \quad y\mapsto \left(\frac{2y}{1+|y|^2}, \frac{|y|^2-1}{|y|^2+1}\right),
\]
where $N$ is the north pole of $\Sn$. Then
\[
(P_\sigma(\phi))\circ F= |J_F|^{-\frac{n+2\sigma}{2n}}(-\Delta)^\sigma(|J_F|^{\frac{n-2\sigma}{2n}}(\phi\circ F))\quad \mbox{for }\phi\in C^2(\Sn),
\]
where
\[
|J_F|=\left(\frac{2}{1+|y|^2}\right)^n,
\]
and $(-\Delta)^\sigma$ is the fractional Laplacian operator (see, e.g., page 117 of \cite{S}).
When $\sigma\in (0,1)$, Pavlov and Samko \cite{PS} showed that
\be\label{description of P sigma}
P_{\sigma}(v)(\xi)=P_{\sigma}(1)v(\xi)+c_{n,-\sigma}\int_{\Sn}\frac{v(\xi)-v(\zeta)}{|\xi-\zeta|^{n+2\sigma}}\,\ud vol_{g_{\Sn}}(\zeta)
\ee
for $v\in C^{2}(\Sn)$, where $c_{n,-\sigma}=\frac{2^{2\sigma}\sigma\Gamma(\frac{n+2\sigma}{2})}{\pi^{\frac{n}{2}}\Gamma(1-\sigma)}$ and $\int_{\Sn}$ is understood as $\lim\limits_{\va\to 0}\int_{|x-y|>\va}$.
When $K=1$,
\eqref{main equ} is the
Euler-Lagrange equation for a functional associated to the following sharp Sobolev inequality on $\Sn$ (see \cite{Be})
\be\label{pe1}
\left(\Xint-_{\mathbb{S}^n}|v|^{\frac{2n}{n-2\sigma}}\,\ud vol_{g_{\Sn}}\right)^{\frac{n-2\sigma}{n}}\leq \frac{\Gamma(\frac{n}{2}-\sigma)}{\Gamma(\frac{n}{2}+\sigma)}
\Xint-_{\mathbb{S}^n}vP_{\sigma}(v)\,\ud vol_{g_{\Sn}}\quad \mbox{for }v\in H^{\sigma}(\mathbb{S}^n),
\ee
where $\Xint-_{\mathbb{S}^n}=\frac{1}{|\mathbb{S}^n|}\int_{\mathbb{S}^n}$ and $H^{\sigma}(\mathbb{S}^n)$ is the closure of $C^{\infty}(\Sn)$ under the norm
\[
\begin{split}
\|u\|_{H^{\sigma}(\Sn)}:&=\int_{\Sn}vP_{\sigma}(v) \,\ud vol_{g_{\Sn}}\\
&=P_{\sigma}(1)\int_{\Sn}v^2\ud vol_{g_{\Sn}}+\frac{c_{n,-\sigma}}{2}\iint_{\Sn\times\Sn}\frac{(v(\xi)-v(\zeta))^2}{|\xi-\zeta|^{n+2\sigma}}\ud\xi\ud\zeta.
\end{split}
\]
The sharp Sobolev inequality on $\R^n$ is
\be\label{eq:sharp on rn}
\left(\int_{\mathbb{S}^n}|u|^{\frac{2n}{n-2\sigma}}\,\ud x\right)^{\frac{n-2\sigma}{n}}\leq \frac{\Gamma(\frac{n}{2}-\sigma)}{\omega_n^{\frac{2\sigma}{n}}\Gamma(\frac{n}{2}+\sigma)}
\|u\|^2_{\dot H^{\sigma}(\mathbb{R}^n)}\quad \mbox{for }u\in \dot H^{\sigma}(\mathbb{R}^n),
\ee
where $\omega_n$ is the volume of the unit sphere and $\dot H^{\sigma}(\mathbb{R}^n)$ is the closure of $C_c^{\infty}(\R^n)$ under the norm
\[
\begin{split}
\|u\|_{\dot H^{\sigma}(\R^n)}:&=\|(-\Delta)^{\sigma/2}u \|_{L^2(\R^n)}.
\end{split}
\]
The best constant and extremal functions of \eqref{eq:sharp on rn} were obtained in \cite{Lie83} and some classifications of solutions of \eqref{main equ} with $K\equiv 1$ can be found in \cite{CLO} and \cite{Li04}.
It is clear, by multiplying \eqref{main equ} by $v$, that a necessary condition for solving the problem is that
$K$ has to be positive somewhere. As in the classical case \cite{KW}, the following Kazdan-Warner type condition
\be\label{1.3}
\int_{\Sn}\langle \nabla_{g_{\Sn}} K, \nabla_{g_{\Sn}} \xi\rangle v^{\frac{2n}{n-2\sigma}}=0
\ee
gives another obstruction to solving \eqref{main equ}. The proof of \eqref{1.3} is given in \cite{JLX}.
Throughout the paper, we assume that $\sigma\in (0,1)$ and $n\ge 2$ without otherwise stated.
\begin{defn}
For $d>0$, we say that $K\in C(\Sn)$ has flatness order greater than $d$ at $\xi$ if, in some
local coordinate system $\{y_1,\cdots, y_n\}$ centered at $\xi$,
there exists a neighborhood $\mathscr{O}$ of $0$ such that $K(y)=K(0)+o(|y|^{d})$ in $\mathscr{O}$.
\end{defn}
\begin{thm}\label{K-M-E-S}
Let $\sigma\in (0,1)$, and $K\in C^{1,1}(\Sn)$ be an antipodally symmetric function, i.e., $K(\xi)=K(-\xi)$ $\forall~\xi\in \Sn$, which is positive somewhere on $\Sn$.
If there exists a maximum point of $K$ at which $K$ has flatness order greater than $n-2\sigma$,
then \eqref{main equ} has at least one positive $C^{2}$ solution.
\end{thm}
For $2\leq n<2+2\sigma$, $K\in C^{1,1}(\Sn)$ has flatness order greater than $n-2\sigma$ at every maximum point.
When $\sigma=1$, the above theorem was proved by Escobar and Schoen \cite{ES} for $n\ge 3$. On $\mathbb{S}^2$, the existence of solutions of $-\Delta_{g_{\Sn}}v+1=Ke^{2v}$ for such $K$ was proved by Moser \cite{Ms}. Theorem \ref{K-M-E-S} was stated in \cite{JLX} and it is proved in Section \ref{proof of kmes}.
\begin{thm}\label{general exist}
\label{main thm A} Suppose that $K\in C^{1,1}(\Sn)$ is a positive function satisfying that for every critical point
$\xi_0$ of $K$, in some geodesic normal coordinates $\{y_1, \cdots, y_n\}$ centered at $\xi_0$, there exist
some small neighborhood $\mathscr{O}$ of $0$ and positive constants $\beta=\beta(\xi_0)\in (n-2\sigma,n)$, $\gamma\in (n-2\sigma, \beta]$
such that $K\in C^{[\gamma],\gamma-[\gamma]}(\mathscr{O})$ (where $[\gamma]$ is the integer part of $\gamma$) and
\[
K(y)=K(0)+\sum_{j=1}^{n}a_{j}|y_j|^{\beta}+R(y), \quad \mbox{in } \mathscr{O},
\]
where $a_j=a_j(\xi_0)\neq 0$, $\sum_{j=1}^n a_j\neq 0$,
$R(y)\in C^{[\beta]-1,1}(\mathscr{O})$ satisfies\\ $\sum_{s=0}^{[\beta]}|\nabla^sR(y)||y|^{-\beta+s} \to 0$ as $y\to 0$.
If
\[
\sum_{\xi\in \Sn\mbox{ such that }\nabla_{g_{\Sn}}K(\xi)=0,\ \sum_{j=1}^na_j(\xi)<0}(-1)^{i(\xi)}\neq (-1)^n,
\]
where
\[
i(\xi)=\#\{a_j(\xi): \nabla_{g_{\Sn}}K(\xi)=0,a_j(\xi)<0,1\leq j\leq n\},
\]
then \eqref{main equ} has at least one $C^2$ positive solution. Moreover, there exists a positive constant $C$ depending only on $n, \sigma$ and $K$ such that for all positive $C^2$ solutions $v$ of \eqref{main equ},
\[
1/C\leq v\leq C\quad\mbox{and}\quad\|v\|_{C^2(\Sn)}\leq C.
\]
\end{thm}
For $n=3, \sigma=1$, the existence part of the above theorem was established by Bahri and Coron \cite{BC}, and the compactness part were given in Chang, Gursky and Yang \cite{CGY} and Schoen and Zhang \cite{SZ}. For $n\geq 4, \sigma=1$, the above theorem was proved by Li \cite{Li95}. The statement of Theorem \ref{general exist} and a proof of the compactness part were given in \cite{JLX}. In Section
\ref{proof of general exist}, we prove the existence part of the theorem. The proof is based on a perturbation result, Theorem \ref{perturbation}, and an application of the Leray-Schauder degree. In Section \ref{improved inequality}, we prove a fractional Aubin inequality.
\noindent\textbf{Acknowledgements:} T. Jin was supported in part by a University and Louis Bevier Dissertation Fellowship at Rutgers University and by Rutgers University School of Art and Science Excellence Fellowship. Y.Y. Li was supported in part by NSF (grant no. DMS-1065971 and DMS-1203961) and by Program for Changjiang Scholars and Innovative Research Team in University in China. J. Xiong was supported in part by
CSC project for visiting Rutgers University as a student and the First Class Postdoctoral Science Foundation of China (No. 2012M520002). He was very grateful to the Department of Mathematics at Rutgers University for the kind hospitality.
\section{Proof of Theorem \ref{K-M-E-S}}\label{proof of kmes}
Let $H^{\sigma}_{as}$ be the set of antipodally symmetric functions in $H^{\sigma}(\mathbb{S}^n)$, and let
\[
\lambda_{as}(K)=\inf_{v\in H^{\sigma}_{as}}\left\{\int_{\mathbb{S}^n}vP_{\sigma}(v): \int_{\mathbb{S}^n}K|v|^{\frac{2n}{n-2\sigma}}=1\right\}.
\]
We also denote $\omega_n$ as the volume of $\mathbb{S}^n$.
The proof of Theorem \ref{K-M-E-S} is divided into two steps.
\begin{prop}\label{less then exist1}
Let $K\in C^{1,1}(\mathbb{S}^n)$ be antipodally symmetric and positive somewhere. If
\begin{equation}\label{eq:less1}
\lambda_{as}(K)<\frac{P_{\sigma}(1)\omega_n^{\frac{2\sigma}{n}}2^{\frac{2\sigma}{n}}}{(\max_{\Sn}{K})^{\frac{n-2\sigma}{n}}},
\end{equation}
then there exists a positive and antipodally symmetric $C^{2}(\mathbb{S}^n)$ solution of \eqref{main equ}.
\end{prop}
\begin{prop}\label{less by test function1}
Let $K\in C^{1,1}(\mathbb{S}^n)$ be antipodally symmetric and positive somewhere. If there exists a maximum point of $K$ at which $K$ has flatness order greater than $n-2\sigma$, then
\begin{equation}\label{less than1}
\lambda_{as}(K)<\frac{P_{\sigma}(1)\omega_n^{\frac{2\sigma}{n}}2^{\frac{2\sigma}{n}}}{(\max_{\Sn}{K})^{\frac{n-2\sigma}{n}}}.
\end{equation}
\end{prop}
\begin{proof}[Proof of Theorem \ref{K-M-E-S}]
It follows from Proposition \ref{less then exist1} and Proposition \ref{less by test function1}.
\end{proof}
The proof of Proposition \ref{less then exist1} uses subcritical approximations. For $1<p<\frac{n+2\sigma}{n-2\sigma}$, we define
\[
\lambda_{as,p}(K)=\inf_{v\in H^{\sigma}_{as}}\left\{\int_{\mathbb{S}^n}vP_{\sigma}(v): \int_{\mathbb{S}^n}K|v|^{p+1}=1\right\}.
\]
We begin with a lemma
\begin{lem}\label{lem of existence subcritical}
Let $K\in C^{1,1}(\mathbb{S}^n)$ be antipodally symmetric and positive somewhere. Then $\lambda_{as,p}(K)$ is achieved by
a positive and antipodally symmetric $C^{2}(\Sn)$ function $v_{p}$, which satisfies
\begin{equation}\label{eq:subcritical1}
P_{\sigma}(v_p)=\lambda_{as,p}(K) K v_p^p\ \ \ \text{and}\ \ \ \int_{\mathbb{S}^n}Kv_p^{p+1}=1.
\end{equation}
\end{lem}
\begin{proof}
The existence of a nonnegative solution $v_p$ of \eqref{eq:subcritical1} follows from standard variational method and the inequality $\int_{\Sn}|v|P_{\sigma}(|v|)\leq \int_{\Sn} vP_{\sigma}(v)$ for all $v\in H^{\sigma}(\mathbb{S}^n)$.
The regularity and positivity of $v_p$ follows from Proposition 2.4 and Theorem 2.1 in \cite{JLX}.
\end{proof}
\begin{proof}[Proof of Proposition \ref{less then exist1}]
First of all, we see that
\[
\limsup_{p\to\frac{n+2\sigma}{n-2\sigma}}\lambda_{as,p}(K)\leq \lda_{as}(K).
\]
Indeed, for any $\va>0$, there exists a nonnegative function $v\in H^{\sigma}_{as}$ such that
\[
\int_{\Sn}vP_{\sigma}(v)<\lda_{as}+\va \ \mbox{ and }\ \int_{\Sn}Kv^{\frac{2n}{n-2\sigma}}=1.
\]
Let $V_p:=\int_{\Sn}Kv^{p+1}$. Since $\lim_{p\to\frac{n+2\sigma}{n-2\sigma}}V_p=\int_{\Sn}Kv^{\frac{2n}{n-2\sigma}}=1$, we have, for $p$ closed to $\frac{n+2\sigma}{n-2\sigma}$,
\[
\lda_{as, p}(K)\leq\int_{\Sn}{\frac{v}{V_p^{1/(p+1)}}P_{\sigma}\left(\frac{v}{V_p^{1/(p+1)}}\right)}\leq \lda_{as}(K)+2\va.
\]
Hence, we may assume that there exists a sequence $\{p_i\}\to\frac{n+2\sigma}{n-2\sigma}$ such
that $\lambda_{as,p_i}(K)\to \lambda$ for some $\lambda\leq\lambda_{as}(K)$. Since $\{v_{i}\}$, which is a sequence of minimizers in Lemma \ref{lem of existence subcritical} for $p=p_i$, is bounded in
$H^{\sigma}(\mathbb{S}^n)$, then there exists $v\in H^{\sigma}(\Sn)$ such that $v_{i}\rightharpoonup v$ weakly in $H^{\sigma}(\mathbb{S}^n)$ and $v$ is nonnegative. If $v\not\equiv 0$, it follows from \eqref{description of P sigma} that $v>0$ on $\Sn$, and we are done. Now we suppose that $v\equiv 0$. If $\{\|v_{i}\|_{L^{\infty}(\mathbb{S}^n)}\}$ is bounded, by the local estimates established in \cite{JLX} we have $\{\|v_{i}\|_{C^{2}(\mathbb{S}^n)}\}$ is bounded, too. Therefore, $v_{i}\to 0$ in $C^1(\Sn)$ which leads to $1=\int_{\mathbb{S}^n}K|v_{i}|^{p_i+1}\to 0$. This is a contradiction. Thus we may assume that $v_{i}(x_{i}):=\max_{\mathbb{S}^n}v_{i}\to\infty.$ Since $\Sn$ is compact, there exists a
subsequence of $\{x_{i}\}$, which will be still denoted as $\{x_{i}\}$, and $\bar x$ such that $x_{i}\to\bar x$.
Without loss of generality we assume that $\bar x$ is the south pole. Via the stereographic projection $F^{-1}$, \eqref{eq:subcritical1} becomes
\begin{equation}\label{eq:subcritical in plane1}
(-\Delta)^{\sigma}u_{i}(y)=\lambda_{as,p_i}(K)K\circ F(y)\left(\frac{2}{1+|y|^2}\right)^{\va_i}u_{i}^{p_i}(y),\ \ y\in\mathbb{R}^n
\end{equation}
where $v_{i}\circ F(y)=(\frac{1+|y|^2}{2})^{\frac{n-2\sigma}{2}}u_{i}(y)$ and $\va_i=\frac{n+2\sigma-p_i(n-2\sigma)}{2}$.
Thus for any $y\in\mathbb{R}^n$, $u_{i}(y)\leq 2^{\frac{n-2\sigma}{2}}u_{i}(y_{i})$
where $y_{i}:=F^{-1}(x_{i})\to 0.$ For simplicity, we denote $m_{i}:=u_{i}(y_{i})$. By our assumption on $v_{i}$ we have $m_{i}\to\infty.$
Define
\[
\tilde u_{i}(y)=(m_{i})^{-1}u_{i}\big((m_{i})^{\frac{1-p_i}{2\sigma}}y+y_{i}\big).
\]
From \eqref{eq:subcritical in plane1} we see that $\tilde u_{i}(y)$ satisfies, for any $y\in\mathbb{R}^n$,
\begin{equation}\label{eq:scaled subcritical in plane}
\begin{split}
(-\Delta)^{\sigma}\tilde u_{i}(y)=&\lambda_{as,p_i}(K)K\circ F(m_{i}^{\frac{1-p_i}{2\sigma}}y+y_{i})\\
&\quad\cdot\left(\frac{2}{1+|(m_{i})^{\frac{1-p_i}{2\sigma}}y+y_{i}|^2}\right)^{\va_i}\tilde u_{i}^{i}(y).
\end{split}
\end{equation}
Since $0<\tilde u_{i}\leq 2^{\frac{n-2\sigma}{2}}$, by the local estimates in \cite{JLX} $\{\tilde u_{i}\}$ is
bounded in $C^{2}_{loc}(\mathbb{R}^n)$.
Note that since $\{v_{i}\}$ is bounded in
$H^{\sigma}(\mathbb{S}^n)$, $\{\tilde u_{i}\}$ is bounded in $\dot H^{\sigma}(\R^n)$. Then there exists $u\in C^{2}(\mathbb{R}^n)\cap \dot H^{\sigma}(\R^n)$ such that, by passing to a subsequence, $\tilde u_{i}\to u$ in
$C^{2}_{loc}(\mathbb{R}^n)$, $u(0)=1$, $\tilde u_{i}\rightharpoonup u$ weakly in $\dot H^{\sigma}(\R^n)$ and $u$ weakly satisfies
\begin{equation}\label{eq:critical in plane1}
(-\Delta)^{\sigma} u=\lambda K(\bar x)u^{\frac{n+2\sigma}{n-2\sigma}}.
\end{equation}
Hence $\lambda>0$, $K(\bar x)>0$, and the solutions of \eqref{eq:critical in plane1} are classified in \cite{CLO} and \cite{Li04} (see also Theorem 1.5 in \cite{JLX}).
For $x\in\mathbb{S}^n$ and $r>0$, we denote $\mathcal{B}(x,r)$ be the geodesic ball centered at $x$ with
radius $r$ on $\mathbb{S}^n$, and for $y\in\mathbb{R}^n$ and $R>0$, we denote $B(y,R)$ be the
Euclidean ball in $\mathbb{R}^n$ of center $y$ and radius $R$. For any $R>0$, let $\Omega_i:=F(B(y_{i},m_{i}^{\frac{1-p_i}{2\sigma}}R))$, we have
\begin{equation*}
\begin{split}
&\int_{\Omega_i}Kv_{i}^{p_i+1}=\int_{B(y_{i},m_{i}^{\frac{1-p_i}{2\sigma}}R)}K\circ F(y)\left(\frac{2}{1+|y|^2}\right)^{\va_i}u_{i}^{p_i+1}\\
&=\int_{B(0,R)}m_{i}^{\frac{\va_i}{2}}K\circ F((m_{i})^{\frac{1-p_i}{2\sigma}}y+y_{i})\left(\frac{2}{1+|(m_{i})^{\frac{1-p_i}{2\sigma}}y+y_{i}|^2}\right)^{\va_i}\tilde u_{i}^{p_i}(y)\\
&\ge K(\bar x)\int_{B(0,R)}u^{\frac{2n}{n-2\sigma}}+o(1)
\end{split}
\end{equation*}
as $\ p_i\to\frac{n+2\sigma}{n-2\sigma}$, where we used that $K$ is positive near $\bar x$, $\va_i\to 0$ and $\tilde u_{i}\to u$ in $C^{2}_{loc}(\mathbb{R}^n)$.
Since $K$ and $v_{i}$ are antipodally symmetric,
we have, by taking $\delta$ small and $R$ sufficiently large,
\begin{equation}\label{norm less1}
\begin{split}
1=\int_{\mathbb{S}^n}Kv_{i}^{p_i+1}&\ge 2\int_{\mathcal{B}(x_1,\delta)}Kv_{i}^{p_i+1}+\int_{\{K<0\}}Kv_{i}^{p_i+1}\\
&=2K(\bar x)\int_{\mathbb{R}^n}u^{\frac{2n}{n-2\sigma}}+\int_{\{K<0\}}Kv_{i}^{p_i+1}.
\end{split}
\end{equation}
We claim that
\[
\int_{\{K<0\}}Kv_{i}^{p_i+1}\to 0\quad\mbox{as}\quad p_i\to\frac{n+2\sigma}{n-2\sigma}.
\]
Indeed, for any $\va>0$, it is not difficult to show, by blow up analysis, that $\|v_{i}\|_{L^{\infty}(\Omega_{\va/4})}\leq C(\va)$ where $\Omega_{\va}:=\{x\in \Sn: K(x)<-\va\}$ and $C(\va)$ is independent of $p_i$. By the local estimates established in \cite{JLX}, we have $\|v_{i}\|_{C^{2}(\Omega_{\va/2})}\leq C(\va)$ and hence $v_{i}\to 0$ in $C^1(\overline\Omega_{\va})$ (recall that we assumed that $v_{i}\rightharpoonup 0$ weakly in $H^{\sigma}(\Sn)$). Thus when $p_i$ is sufficiently close to $\frac{n+2\sigma}{n-2\sigma}$,
\[\int_{\Omega_{\va}}|K|v_{i}^{p_i+1}<\va.
\]
On the other hand, by H\"older inequality and Sobolev inequality,
\[
\int_{-\va\le K<0}|K|v_{i}^{p_i+1}<C(n,\sigma)\va\|v_{i}\|_{L^{\frac{2n}{n-2\sigma}}}^{p_i+1}\leq C(n,\sigma,\lambda_{as})\va,
\]
which finishes the proof of our claim. Thus, \eqref{norm less1} leads to
\be\label{norm less2}
1\ge 2K(\bar x)\int_{\mathbb{R}^n}u^{\frac{2n}{n-2\sigma}}+o(1).
\ee
By the sharp Sobolev inequality \eqref{pe1}, \eqref{eq:critical in plane1} and \eqref{norm less2}, we have
\begin{equation*}
\begin{split}
P_{\sigma}(1)\omega_n^{\frac{2\sigma}{n}}\leq \frac{\int_{\mathbb{R}^n}u(-\Delta)^{\sigma}u}{\left(\int_{\mathbb{R}^n}
u^{\frac{2n}{n-2\sigma}}\right)^{\frac{n-2\sigma}{n}}}&=\lambda K(\bar x)\left(\int_{\mathbb{R}^n}u^{\frac{2n}{n-2\sigma}}\right)^{\frac{2\sigma}{n}}
\\
&\leq \lambda_{as}(K)K(\bar x)(2K(\bar x))^{-2\sigma/n}\\
&\leq \lambda_{as}(K)2^{-2\sigma/n}(\max_{\Sn} K)^{1-2\sigma/n},
\end{split}
\end{equation*}
which contradicts with \eqref{eq:less1}.
\end{proof}
Next we shall prove Proposition \ref{less by test function1} using some test functions, which are inspired by \cite{Hebey, Ro}.
\begin{proof}[Proof of Proposition \ref{less by test function1}]
Let $\xi_1$ be a maximum point of $K$ at which $K$ has flatness order greater than $n-2\sigma$. Suppose $\xi_2$ is the antipodal point of $\xi_1$.
For $\beta>1$ and $i=1,2$ we define
\be\label{eq:bubble}
v_{i,\beta}(x)=\left(\frac{\sqrt{\beta^2-1}}{\beta-\cos r_i}\right)^{\frac{n-2\sigma}{2}},
\ee
where $r_i=d(x,\xi_i)$ is the geodesic distance between $x$ and $\xi_i$ on the sphere. It is clear that
\[
P_{\sigma}(v_{i,\beta})=P_{\sigma}(1)v_{i,\beta}^{\frac{n+2\sigma}{n-2\sigma}}\quad\mbox{and}\quad\int_{\mathbb{S}^n}v_{i,\beta}^{\frac{2n}{n-2\beta}}=\omega_n.
\]
Let
\[
v_{\beta}=v_{1,\beta}+v_{2,\beta},
\]
which is antipodally symmetric. Then
\begin{equation*}
\begin{split}
\int_{\mathbb{S}^n}v_{\beta}P_{\sigma}(v_{\beta})&=P_{\sigma}(1)\int_{\Sn}\displaystyle\sum_{i=1}^2 v_{i,\beta}^{\frac{n+2\sigma}{n-2\sigma}}\displaystyle\sum_{j=1}^2 v_{j,\beta}\\
&=P_{\sigma}(1)\int_{\Sn}\displaystyle\sum_{i=1}^2 v_{i,\beta}^{\frac{2n}{n-2\sigma}}+2 v_{1,\beta}^{\frac{n+2\sigma}{n-2\sigma}}v_{2,\beta}\\
&=P_{\sigma}(1)2\omega_n \left(1+\omega_n^{-1}\int_{\Sn} v_{1,\beta}^{\frac{n+2\sigma}{n-2\sigma}}v_{2,\beta}\right).
\end{split}
\end{equation*}
By direct computations with change of variables, we have
\[
\int_{\Sn}v_{1,\beta}^{\frac{n+2\sigma}{n-2\sigma}}v_{2,\beta}=A(\beta-1)^{\frac{n-2\sigma}{2}}+o\big((\beta-1)^{\frac{n-2\sigma}{2}}\big)
\]
for $\beta$ close to $1$, where
\[
A=2^{-\frac{n-2\sigma}{2}}\omega_{n-1}\int_0^{+\infty}\frac{2^nr^{n-1}}{(1+r^2)^{\frac{n+2\sigma}{2}}}dr>0.
\]
Choose a sufficiently small neighborhood $V_1$ of $\xi_1$ and let $V_2=\{x\in\Sn: -x\in V_1\}$. Then $K$ is positive in $V_1\cup V_2$ and
\begin{equation*}
\begin{split}
\int_{\mathbb{S}^n}K v_{\beta}^{\frac{2n}{n-2\sigma}}
&=\int_{\cup V_i}K\left(v_{1,\beta}+v_{2,\beta}\right)^{\frac{2n}{n-2\sigma}}+\int_{\mathbb{S}^n\backslash \cup V_i}Kv_{\beta}^{\frac{2n}{n-2\sigma}}\\
&=2\int_{V_1}K\left(v_{1,\beta}+v_{2,\beta}\right)^{\frac{2n}{n-2\sigma}}+\int_{\mathbb{S}^n\backslash \cup V_i}Kv_{\beta}^{\frac{2n}{n-2\sigma}}\\
&\geq 2\int_{V_1}K\left(v_{1,\beta}^{\frac{2n}{n-2\sigma}}+\frac{2n}{n-2\sigma}v_{1,\beta}^{\frac{n+2\sigma}{n-2\sigma}} v_{2,\beta}\right)+\int_{\mathbb{S}^n\backslash \cup V_i}Kv_{\beta}^{\frac{2n}{n-2\sigma}}.
\end{split}
\end{equation*}
Since $K(x)$ is flat of order $n-2\sigma$ at $\xi_1$, we have in $V_1$ that,
\[
K(x)=K(\xi_1)+o(1)|x-\xi_1|^{n-2\sigma}.
\]
Thus
\begin{equation*}
\begin{split}
\int_{\mathbb{S}^n}Kv_{\beta}^{\frac{2n}{n-2\sigma}}&\geq 2K(\xi_1)\int_{\Sn}v_{1,\beta}^{\frac{2n}{n-2\sigma}}+\frac{4nA}{n-2\sigma}K(\xi_1)(\beta-1)^{\frac{n-2\sigma}{2}}+o\big((\beta-1)^{\frac{n-2\sigma}{2}}\big)\\
&=2K(\xi_1)\omega_n\left(1+\frac{2nA}{n-2\sigma}\omega_n^{-1}(\beta-1)^{\frac{n-2\sigma}{2}}+o\big((\beta-1)^{\frac{n-2\sigma}{2}}\big)\right)
\end{split}
\end{equation*}
for $\beta$ close to $1$. Hence
\[
\frac{\int_{\mathbb{S}^n}v_{\beta}P_{\sigma}(v_{\beta})}{\left(\int_{\mathbb{S}^n}Kv_{\beta}^{\frac{2n}{n-2\sigma}}\right)^{\frac{n-2\sigma}{n}}}\leq
\frac{P_{\sigma}(1)\omega_n^{\frac{2\sigma}{n}}2^{\frac{2\sigma}{n}}}{K(\xi_1)^{\frac{n-2\sigma}{n}}}
\left(1-\frac{A}{\omega_n}(\beta-1)^{\frac{n-2\sigma}{2}}+o\big((\beta-1)^{\frac{n-2\sigma}{2}}\big)\right),
\]
which implies \eqref{less than1} holds.
\end{proof}
Theorem \ref{K-M-E-S} can be extended to positive functions $K$ which are invariant under some isometry group acting without fixed points (see \cite{Hebey, Ro}). Denote $Isom(\mathbb{S}^n)$ as the isometry group of the standard sphere $(\mathbb{S}^n, g_{\Sn})$.
Let $G$ be a subgroup of $Isom(\mathbb{S}^n)$. We say that $G$ acts without fixed points if for
each $x\in\mathbb{S}^n$, the orbit $O_{G}(x):=\{g(x)| g\in G\}$ has at least two elements.
We denote $|O_{G}(x)|$ be the number of elements in $O_{G}(x)$. A function $K$ is called $G$-invariant
if $K\circ g\equiv K$ for all $g\in G$.
\begin{thm}\label{invariance nirenberg}
Let $G$ be a finite subgroup of $Isom(\mathbb{S}^n)$ and act without fixed points.
Let $K\in C^{1,1}(\mathbb{S}^n)$ be a positive and G-invariant function.
If there exists $\xi_0\in\mathbb{S}^n$ such that $K$ has flatness order greater than $n-2\sigma$ at $\xi_0$, and for any $x\in\mathbb{S}^n$
\begin{equation}\label{eq:condition 1}
\frac{K(\xi_0)}{|O_{G}(\xi_0)|^{\frac{2\sigma}{n-2\sigma}}}\geq \frac{K(x)}{|O_{G}(x)|^{\frac{2\sigma}{n-2\sigma}}},
\end{equation}
then \eqref{main equ} possesses a positive and G-invariant $C^{2}(\mathbb{S}^n)$ solution.
\end{thm}
Let $H^{\sigma}_{G}$ be the set of G-invariant functions in $H^{\sigma}(\mathbb{S}^n)$. Let
\[
\lambda_{G}(K)=\inf_{v\in H^{\sigma}_{G}}\left\{\int_{\mathbb{S}^n}vP_{\sigma}(v): \int_{\mathbb{S}^n}K|v|^{\frac{2n}{n-2\sigma}}=1\right\}.
\]
Similar to Theorem \ref{K-M-E-S}, the proof of Theorem \ref{invariance nirenberg} is again divided into two steps.
\begin{prop}\label{less then exist}
Let $G$ be a finite subgroup of $Isom(\mathbb{S}^n)$. Let $K\in C^{1,1}(\mathbb{S}^n)$ be a positive and G-invariant function. If for all $x\in\mathbb{S}^n$,
\begin{equation}\label{eq:less}
\lambda_{G}(K)<\frac{P_{\sigma}(1)\omega_n^{\frac{2\sigma}{n}}|O_{G}(x)|^{\frac{2\sigma}{n}}}{K(x)^{\frac{n-2\sigma}{n}}},
\end{equation}
then there exists a positive G-invariant $C^{2}(\mathbb{S}^n)$ solution of \eqref{main equ}.
\end{prop}
\begin{prop}\label{less by test function}
Let $G$ be a finite subgroup of $Isom(\mathbb{S}^n)$ and act without fixed points.
Let $K\in C^{1,1}(\mathbb{S}^n)$ be a positive and G-invariant function. If $K$ has flatness order greater than $n-2\sigma$ at $\xi_1$ for some $\xi_1\in\mathbb{S}^n$, then
\begin{equation}\label{less than}
\lambda_{G}(K)<\frac{P_{\sigma}(1)\omega_n^{\frac{2\sigma}{n}}|O_{G}(\xi_1)|^{\frac{2\sigma}{n}}}{K(\xi_1)^{\frac{n-2\sigma}{n}}}.
\end{equation}
\end{prop}
Theorem \ref{invariance nirenberg} follows from Proposition \ref{less then exist} and Proposition \ref{less by test function} immediately. The proof of Proposition \ref{less then exist} uses subcritical approximations and blow up analysis, which is similar to that of Proposition \ref{less then exist1}. Proposition \ref{less by test function} can be verified by the following G-invariant test function
\[
v_{\beta}=\sum\limits_{i=1}^m v_{i,\beta},
\]
where $m=|O_G(\xi_1)|$, $O_{G}(\xi_1)=\{\xi_1,\dots,\xi_m\}$, $\xi_i=g_i(\xi_1)$ for some $g_i\in G$, $g_1=Id$, $v_{j,\beta}:=v_{1,\beta}\circ g_i^{-1}$ and $v_{1,\beta}$ is as in \eqref{eq:bubble}. We omit the detailed proofs of Propositions \ref{less then exist} and \ref{less by test function}, and leave them to the readers.
\section{Proof of Theorem \ref{general exist}}
\label{proof of general exist}
In this section, we first establish a perturbation result, see Theorem \ref{perturbation}. The method we shall use is smilar to that in \cite{Li95},
but we have to set up a framework to fit the fractional situation. Perturbation results in the classical Nirenberg problem
were obtained in \cite{CY87}, \cite{CY}, \cite{Li95} and many others.
For a conformal transformation $\varphi:\mathbb{S}^n\to \mathbb{S}^n$, we set
\[
T_{\varphi}v=v\circ\varphi|\det d\varphi|^{(n-2\sigma)/2n},
\]
where $|\det d\varphi|$ denotes the Jacobian of $\varphi$ satisfying
\[
\varphi^*g_{\Sn}=|\det d\varphi|^{2/n}g_{\Sn}.
\]
\begin{lem}\label{lempe1}
For any conformal transform $\varphi$ on $\Sn$ we have
\[
\int_{\mathbb{S}^n}T_{\varphi}vP_\sigma(T_{\varphi}v)\,\ud vol_{g_{\Sn}}=\int_{\mathbb{S}^n} vP_\sigma(v)\,\ud vol_{g_{\Sn}}
\]
and
\[
\int_{\mathbb{S}^n}|T_{\varphi}v|^{\frac{2n}{n-2\sigma}}\,\ud vol_{g_{\Sn}}=\int_{\mathbb{S}^n}|v|^{\frac{2n}{n-2\sigma}}\,\ud vol_{g_{\Sn}}
\]for all $v\in H^\sigma(\mathbb{S}^n)$.
\end{lem}
\begin{proof} We only prove the first equality.
Recall that $P_\sigma=P_\sigma^{g_{\Sn}}$. By the conformal invariance of $P_\sigma^g$,
\[
\begin{split}
\int_{\mathbb{S}^n} vP_\sigma(v)\,\ud vol_{g_{\Sn}}&=\int_{\mathbb{S}^n} v \circ \varphi P^{\varphi^*g_{\Sn}}_\sigma(v\circ \varphi)\,\ud vol_{\varphi^*g_{\Sn}}\\&
=\int_{\mathbb{S}^n} v \circ \varphi P^{|\det \varphi|^{2/n}g_{\Sn}}_\sigma(v\circ \varphi)|\det \varphi|\,\ud vol_{g_{\Sn}}\\&
=\int_{\mathbb{S}^n} v \circ \varphi |\det \varphi|^{-\frac{n+2\sigma}{2n}} P^{g_{\Sn}}_\sigma(v\circ \varphi |\det \varphi|^{\frac{n-2\sigma}{2n}})|\det \varphi|\,\ud vol_{g_{\Sn}}\\
&= \int_{\mathbb{S}^n}T_{\varphi}vP_\sigma(T_{\varphi}v)\,\ud vol_{g_{\Sn}}.
\end{split}
\]
\end{proof}
For $P\in \mathbb{S}^n$, $1\leq t<\infty$, we recall a conformal transform (see, e.g., \cite{CY87})
\be\label{phi}
\varphi_{P,t}:\mathbb{S}^n\to \mathbb{S}^n,\quad y\mapsto t y,
\ee
where $y$ is the stereographic projection coordinates of points on $\mathbb{S}^n$ while the stereographic projection is performed with $P$
as the north pole to the equatorial plane of $\mathbb{S}^n$. The totality of such a set of conformal transforms is diffeomorphic to the unit ball
$B^{n+1}$ in $\R^{n+1}$, with the identity transformation identified with the origin in $B^{n+1}$ and
\[\varphi_{P,t}\leftrightarrow ((t-1)/t)P=:p\in B^{n+1}\]
in general. We denote $\varphi_p=\varphi_{P,t}$.
Let
\[
\M=\left\{v \in H^{\sigma}(\mathbb{S}^n):\Xint-_{\mathbb{S}^n}|v|^{\frac{2n}{n-2\sigma}}\,\ud vol_{g_{\Sn}}=1\right\},
\]
\[
\M_0=\left\{v\in \M:\Xint-_{\mathbb{S}^n} x|v|^{\frac{2n}{n-2\sigma}}\,\ud vol_{g_{\Sn}}=0\right\}.
\]
Define
\[
\varpi:\M_0\times B^{n+1}\to \M
\]
by
\[
v=\varpi(w,p)=T^{-1}_{\varphi_p} w,\quad w\in \M_0.
\]
\begin{lem} \label{lem c2 diff}
$\varpi: \M_0\times B_1\to \M$ is a $C^2$ diffeomorphism.
\end{lem}
\begin{proof}
The proof is the same as that of Lemma 5.4 in \cite{Li95}.
\end{proof}
Consider the following functional on $\M$
\[
E_{K}(v)=\frac{\Xint-_{\mathbb{S}^n}vP_\sigma(v)\,\ud vol_{g_{\Sn}}}{\left(\Xint-_{\mathbb{S}^n}K|v|^{2n/(n-2\sigma)}\,\ud vol_{g_{\Sn}}\right)^{(n-2\sigma)/n}},
\]
where $K>0$.
By \eqref{pe1} we see that the functional $E_K$ has a lower bound over $\M$ provided $K$ is positive and bounded.
By Lemma \ref{lempe1}, $E_K$ is in fact defined on $\M_0$ when $K$ is a constant.
Due to the classification of extremas of \eqref{pe1},
$\min_{w\in \M_0}E_1=P_{\sigma}(1)$ is achieved only by $-1$ and $1$.
Note that both $\M$ and $\M_0$ are $C^2$ surfaces in the Hilbert space $H^\sigma(\Sn)$.
\begin{lem}\label{lempe2}
Let $T_1\M$ denote the tangent space of $\M$ at $v=1$, then we have
\[
\begin{split}
T_1\M&=\left\{\phi: \int_{\mathbb{S}^n}\phi=0\right\}\\&
=\mathrm{span}\{\mbox{spherical harmonics of degree }\geq 1\}.
\end{split}
\]
\end{lem}
\begin{lem}\label{lempe3}
Let $T_1\M_0$ denote the tangent space of $\M_0$ at $v=1$, then we have
\[
T_1\M_0=\mathrm{span}\{\mbox{spherical harmonics of degree } \geq 2\}.
\]
\end{lem}
The above two Lemmas follow from direct computations and some elementary properties of spherical harmonics.
The following lemma can be proved by the Implicit Function Theorem (see Lemma 6.4 in \cite{Li95}).
\begin{lem}\label{lempe4}
For $\tilde{w}\in T_1\M_0$, $\tilde{w}$ close to $0$, there exist $\mu(\tilde{w})\in \R$, $\eta(\tilde{w})\in \R^{n+1}$ being $C^2$ functions such that
\be\label{pe2}
\Xint-_{\mathbb{S}^n}|1+\tilde{w}+\mu+\eta\cdot x|^{2n/(n-2\sigma)}=1
\ee
and
\be\label{pe3}
\int_{\Sn}|1+\tilde{w}+\mu+\eta\cdot x|^{2n/(n-2\sigma)}x=0.
\ee
Furthermore, $\mu(0)=0, \eta(0)=0, D\mu(0)=0$ and $D\eta(0)=0$.
\end{lem}
Let us use $\tilde{w}\in T_1\M_0$ as local coordinates of $w\in \M_0$ near $w=1$, and $\tilde{w}=0$ corresponds to $w=1$.
Let
\[
\tilde{E}(\tilde{w})=E_1(w)=\Xint-_{\mathbb{S}^n}wP_\sigma(w),
\]
where $\tilde{w}\in T_1\M_0$ and $w=1+\tilde{w}+\mu(\tilde{w})+\eta(\tilde{w})\cdot x$ as in Lemma \ref{lempe4}.
It is well-known (see, e.g. \cite{Mo}) that $P_\sigma$ has eigenfunctions the spherical harmonics and
eigenvalues
\[
\lda_k=\frac{\Gamma(k+\frac{n}{2}+\sigma)}{\Gamma(k+\frac{n}{2}-\sigma)}, \quad k\geq 0,
\]
with multiplicity $(2k+n-1)(k+n-2)!/(n-1)!k!$. Note that $\lda_0=P_{\sigma}(1)$.
Since $P_\sigma$ is a linear operator, it follows from Lemma \ref{lempe2}, Lemma \ref{lempe3} and Lemma \ref{lempe4} that
\[
\tilde{E}(\tilde{w})=P_{\sigma}(1)(1+2\mu(\tilde{w}))+\Xint-_{\mathbb{S}^n}\tilde{w}P_\sigma(\tilde{w})+o(\|\tilde{w}\|^2_{H^\sigma(\mathbb{S}^n)}).
\]
By \eqref{pe2}, it follows that
\[
\begin{split}
\mu(\tilde{w})&=\frac12 D^2\mu(0)(\tilde{w},\tilde{w})+o(\|\tilde{w}\|^2_{H^\sigma(\mathbb{S}^n)})\\&
=-\frac12\cdot \frac{n+2\sigma}{n-2\sigma}\Xint-_{\Sn}\tilde{w}^2+o(\|\tilde{w}\|^2_{H^\sigma(\mathbb{S}^n)}).
\end{split}
\]
Note that $\lda_1=\frac{n+2\sigma}{n-2\sigma}P_{\sigma}(1)$. Therefore, we have
\be\label{pe4}
\tilde{E}(\tilde{w})=P_{\sigma}(1)+\Xint-_{\mathbb{S}^n}(\tilde{w}P_\sigma(\tilde{w})-\lda_1\tilde{w}^2)
+o(\|\tilde{w}\|^2_{H^\sigma(\mathbb{S}^n)}).
\ee
Set $Q(\tilde{w}):=\Xint-_{\mathbb{S}^n}(\tilde{w}P_\sigma(\tilde{w})-\lda_1\tilde{w}^2)$.
By Lemma \ref{lempe3}, we see that for any $\tilde{w},\tilde{v}\in T_1\M_0$
\be \label{pe5}
\begin{split}
D^2Q(\tilde{w})(\tilde{v},\tilde{v})&=2\Xint-_{\mathbb{S}^n}\tilde{v}P_\sigma(\tilde{v})-
\lda_1\tilde{v}\tilde{v}\\&
\geq 2(1-\frac{\lda_1}{\lda_2})\|\tilde{v}\|^2_{H^\sigma(\Sn)},
\end{split}
\ee
which means the quadratic form $Q(\tilde{w})$ is positive definite in $T_1\M_0$. Moreover, it follows from \eqref{pe1} that
for some $\va_1=\va_1(n,\sigma)>0$,
\be\label{pe6}
\|E_K|_{\M_0}-E_1|_{\M_0}\|_{C^2(B_{\va_1}(1))}\leq O(\va),
\ee
provided $\|K-1\|_{L^\infty(\mathbb{S}^n)}\leq \va$. Here $B_{\va_1}(1)$ denotes the ball in $\M_0$ of
radius $\va_1$ centered at $1$.
It is elementary to compute that for any $\tilde{w}\in T_1\M_0$, we have, for any constant $c$, that
\[
\langle DE_K|_{\M_0}(1),\tilde{w}\rangle=-2P_{\sigma}(1)\left(\Xint-_{\mathbb{S}^n}K\right)^{(2\sigma-2n)/n}\Xint-_{\mathbb{S}^n}(K-c)\tilde{w}.
\]
It follows that
\[
\begin{split}
| \langle DE_K|_{\M_0}(1),\tilde{w}\rangle|&\leq C\|K-c\|_{L^{2n/(n+2\sigma)}}\|\tilde{w}\|_{L^{2n/(n-2\sigma)}}\\&
\leq C\|K-c\|_{L^{2n/(n+2\sigma)}}\|\tilde{w}\|_{H^{\sigma}}.
\end{split}
\]
Therefore,
\be\label{pe7}
\| DE_K|_{\M_0}(1)\|\leq C\|K-c\|_{L^{2n/(n+2\sigma)}}.
\ee
\begin{lem}\label{lempe5}
Let $K\in C^1(\Sn)$. There exist $\va_2=\va_2(n,\sigma)>0$, $\va_3=\va_3(n,\sigma)>0,$ such that, if
$\|K-1\|_{L^\infty(\mathbb{S}^n)}\leq \va \leq \va_2$,
\[
\min_{w\in \M_0,\ \|w-1\|_{H^\sigma}\leq \va_3} E_K(w)
\]
has a unique minimizer $w_K$. Furthermore, $D^2E_K|_{\M_0}(w_K)$ is positive definite and
\be\label{pe8}
w_K>0\quad \mbox{on }\mathbb{S}^n,
\ee
\be\label{pe9}
\|w_K-1\|_{H^\sigma}\leq C(n,\sigma)\inf_{c\in \R}\|K-c\|_{L^{2n/(n+2\sigma)}},
\ee
\be\label{pe10}
\|w_K-1\|_{L^{\infty}}+\|P_\sigma(w_K-1)\|_{L^{\infty}}\leq o_{\va}(1),
\ee
where $o_{\va}(1)$ denotes some quantity depending only on $n,\sigma$ which tends to $0$ as $\va\to 0$.
If $\sigma\geq \frac{1}{2}$, then there exists $C(n,\sigma,\va_2)>0$ such that
\be\label{gradient estimate for wK}
\|\nabla w_K\|_{L^2}\leq C(n,\sigma,\va_2) \inf_{\tilde c\in [1/2,2]}\|K-\tilde c\|_{L^2}.
\ee
\end{lem}
\begin{proof}
It follows from \eqref{pe4}, \eqref{pe5} and \eqref{pe6} that the minimizing problem has a unique minimizer $w_K$
and $D^2E_K|_{\M_0}(w_K)$ is positive definite. \eqref{pe9} follows from \eqref{pe5}, \eqref{pe6}, \eqref{pe7} and some standard functional analysis arguments.
Since $w_K$ is a constrained local minimum, $w_K$ satisfies the Euler-Lagrange equation for some
Lagrange multiplier $\Lda_K\in \R^{n+1}$:
\be\label{pe11}
P_\sigma(w_K)=(\lda_KK-\Lda_K\cdot x)|w_K|^{4\sigma/(n-2\sigma)}w_K\quad \mbox{on } \mathbb{S}^n,
\ee
where
\[
\lda_K=\frac{\Xint-_{\mathbb{S}^n}w_KP_\sigma(w_K)}{
\Xint-_{\mathbb{S}^n}K|w_K|^{\frac{2n}{n-2\sigma}}
}.
\]
It is clear that $|\lda_K-c(n,\sigma)|=O(\va)$ (recall that $c(n,\sigma)$ is defined in \eqref{main equ}, which equals to $P_{\sigma}(1)$). Since $P_\sigma$ is a self-adjoint operator and $P_\sigma(\Lda_K\cdot x)=\lda_1(n,\sigma)\Lda_K\cdot x$,
multiplying \eqref{pe11} by $\Lda_K\cdot x$ and integrating over both sides we have
\be\label{estimate for Lda}
\begin{split}
&\lda_1(n,\sigma)\int_{\mathbb{S}^n}w_K\Lda_K\cdot x\\&=\lda_K\int_{\mathbb{S}^n}K|w_K|^{4\sigma/(n-2\sigma)}w_K\Lda_K\cdot x-
\int_{\mathbb{S}^n}(\Lda_K\cdot x)^2|w_K|^{4\sigma/(n-2\sigma)}w_K.
\end{split}
\ee
Making use of the fact that $\|K-1\|_{L^\infty}\leq \va$ and $\|w_K-1\|_{H^\sigma}\leq O(\va)$, we conclude that
\be\label{eq:lda bound}
|\Lda_K|=O(\va).
\ee
Set $w_K=w_K^+-w_K^-$. Note that $\int_{\mathbb{S}^n}|w_K^-|^{2n/(n-2\sigma)}\leq\int_{\mathbb{S}^n}|w_K-1|^{2n/(n-2\sigma)}\leq o_\va(1)$. On the other hand, we have, by multiplying \eqref{pe11} by
$-w_K^-$,
\[
\begin{split}
C\int_{\Sn}(w_K^-)^{\frac{2n}{n-2\sigma}}
&\geq \int_{\mathbb{S}^n}w_K^-P_\sigma(-w_K)\\
&\geq\int_{\mathbb{S}^n}w_K^-P_\sigma(w_K^-)\\%\leq o_\va(1)\int_{\mathbb{S}^n}w_K^-P_\sigma(w_K^-),
&\geq c(n,\sigma)\left(\int_{\Sn}(w_K^-)^{\frac{2n}{n-2\sigma}}\right)^{\frac{n-2\sigma}{n}},
\end{split}
\]where we used \eqref{pe1}.
Therefore, we conclude that $w_K^-=0$. Then \eqref{pe8} follows from \eqref{description of P sigma} and \eqref{pe11}.
It follows from \eqref{pe9}, \eqref{pe11}, \eqref{eq:lda bound}, Lemma 2.2 in \cite{JLX} and Proposition 2.4 in \cite{JLX} that
\be\label{equation:wK bound}
\|w_K-1\|_{L^{\infty}} \leq o_{\va}(1),
\ee
which, together with \eqref{pe11}, leads to
\[
\|P_\sigma(w_K-1)\|_{L^{\infty}} \leq o_{\va}(1).
\]
Then \eqref{pe10} follows immediately.
By \eqref{equation:wK bound} and \eqref{pe9}, we can see that for any $\tilde c\in [1/2,2]$,
\[
\begin{split}
|\lda_K-P_{\sigma}(1)/\tilde c|&\leq C(n,\sigma,\va_2)(\|w_k-1\|_{H^{\sigma}}+\|w_k-1\|_{L^1}+\|K-\tilde c\|_{L^1})\\
&\leq C(n,\sigma,\va_2)\|K-\tilde c\|_{L^{2n/(n+2\sigma)}}.
\end{split}
\]
From \eqref{estimate for Lda}, \eqref{pe8}, \eqref{equation:wK bound} and the fact that $w_K\in \M_0$, we have that
\[
|\Lda_K|^2\leq C(n,\sigma)|\Lda_K|\left(\int_{\Sn}|K-\tilde c|+\int_{\Sn}|w_K-1|\right).
\]
By \eqref{pe9} and H\"older inequalities we have
\[
|\Lda_K|\leq C(n,\sigma)\|K-\tilde c\|_{L^{2n/(n+2\sigma)}}.
\]
Thus
$$\|(\lda_KK-\Lda_K\cdot x)w_K^{\frac{n+2\sigma}{n-2\sigma}}-P_{\sigma}(1)\|_{L^2}\le C(n,\sigma,\va_2)\|K-\tilde c\|_{L^2}.$$
Since
\[
P_{\sigma}(w_K-1)=(\lda_KK-\Lda_K\cdot x)w_K^{\frac{n+2\sigma}{n-2\sigma}}-P_{\sigma}(1),
\]
by the spherical expansion of $w_K-1$ and eigenvalues of $P_{\sigma}$, it's easy to see that, for $\sigma\geq \frac{1}{2}$,
\[
\|w_K-1\|^2_{H^1}\leq \int_{\Sn} \big(P_{\sigma}(w_K-1)\big)^2 \le C(n,\sigma,\va_2)\|K-\tilde c\|^2_{L^2}.
\]
Hence \eqref{gradient estimate for wK} holds.
\end{proof}
For $P\in\Sn,\ t\ge 1$, we write $v\in \M$ as $v=\varpi (w,p)=T^{-1}_{\varphi_p}w,\ w\in\M_0,\ p=sP,\ s=(t-1)/t$. Write $E_K(v)$ in the $(w,p)$ variables:
\[
I(w,p):=E_K(v)=E_{K\circ\varphi_p}(w).
\]
Consider, for each $p\in B_1$, that
\[
\min_{w\in\M_0,\ \|w-1\|_{H^{\sigma}}\leq \va_3}I(w,p)=\min_{w\in\M_0,\ \|w-1\|_{H^{\sigma}}\leq \va_3}E_{K\circ\varphi_{p}}(w).
\]
It follows from Lemma \ref{lempe5} that for $\|K-1\|_{L^{\infty}(\Sn)}\leq \va\leq \va_2$,
the minimizer exists and we denote it as $w_p$ where $p=(t-1)P/t$, $P\in \Sn$. Set $v_p=T^{-1}_{\varphi_p}w_p$.
We also know from \eqref{pe8} that $w_p>0$ on $\Sn$. As illustrated in \eqref{pe11}, we have for some $\Lda_p\in\R^{n+1}$ that
\be\label{eq:lag-mul}
P_\sigma(w_p)=(\lda_pK\circ\varphi_p-\Lda_p\cdot x)w_p^{(n+2\sigma)/(n-2\sigma)}\quad \mbox{on } \mathbb{S}^n,
\ee
where
\[
\lda_p=\frac{\Xint-_{\mathbb{S}^n}w_p P_\sigma(w_p)}{
\Xint-_{\mathbb{S}^n}K\circ\varphi_p w_p^{\frac{2n}{n-2\sigma}}
}.
\]
It follows from the Kazdan-Warner type condition \eqref{1.3} that
\[
\int_{\Sn}\langle\nabla(\lda_pK\circ\varphi_p-\Lda_p\cdot x),\nabla x\rangle w_p^{\frac{2n}{n-2\sigma}}=0.
\]
Namely,
\be\label{eq:lag-mul-2}
\sum_{j=1}^{n+1}\Lda_p^j\int_{\Sn}\langle\nabla x_j,\nabla x_i\rangle w_p^{\frac{2n}{n-2\sigma}}=\lda_p\int_{\Sn}\langle\nabla(K\circ\varphi_p),\nabla x_i\rangle w_p^{\frac{2n}{n-2\sigma}},\ 1\leq i\leq n+1.
\ee
It follows from the implicit function theorem that $w_p$ depends $C^2$ on $p$. Hence,
we have, together with the fact that $\int_{\Sn}\langle\nabla x_j,\nabla x_i\rangle w_p^{\frac{2n}{n-2\sigma}}$
is a positive definite matrix, that both $\lda_p$ and $\Lda_p$ depend $C^2$ on $p$.
Let
\[
\begin{split}
\mathcal{N}_1&=\{w\in \M_0\ |\ \|w-1\|_{H^{\sigma}}\le \va_3\},\\
\mathcal{N}_2(\tilde t)&=\{v\in \M\ |\ v=\varpi (w,p) \ \mbox{for some } w\in\mathcal{N}_1 \\
& \quad\quad\mbox{and } p=sP, P\in \Sn, s=\frac{t-1}{t}, 1\le t<\tilde t\},\\
\mathcal{N}_2&=\mathcal{N}_2(\infty),\\
\mathcal{N}_3(\tilde t)&=\{v\in H^{\sigma}\setminus\{0\}\ |\ \|v\|^{-1}_{L^{2n/(n-2\sigma)}}v\in \mathcal{N}_2(\tilde t)\},\\
\mathcal{N}_3&=\mathcal{N}_3(\infty).
\end{split}
\]
\begin{thm}\label{perturbation}
Suppose $\sigma\geq \frac{1}{2}$. There exists some constant $\va_4=\va_4(n)\in (0,\va_2)$
such that for any $T_1>0$ and any nonincreasing positive continuous function $\omega(t) (1\leq t<\infty)$
satisfying $\lim_{t\to\infty}\omega(t)=0$, if a nonconstant function $K\in C^1(\Sn)$ satisfies, for $t\geq T_1$, that
\[
\|K-1\|_{L^{\infty}(\Sn)}\leq \va_4,
\]
\be\label{condition for not degree}
\|K\circ\varphi_{P,t}-K(P)\|^2_{L^2(\Sn)}\leq \omega(t)\left|\int_{\Sn}K\circ\varphi_{P,t}(x)x\right|
\ee
for all $P\in\Sn$ and
\be\label{eq:degree not zero}
\deg\left(\int_{\Sn}K\circ\varphi_{P,t}(x)x,B^{n+1},0\right) \neq 0,
\ee
Then \eqref{main equ} has at least one positive solution. Furthermore, for any $\al\in (0,1)$ satisfying that $\al+2\sigma$ is not an integer, there exist some positive constants $C_2$ depending only on $n,\al, \sigma, T_1$ and $\omega$ such that for all $ C\ge C_2$,
\be\label{eq: degree}
\begin{split}
&\deg(v-(P_{\sigma})^{-1}K|v|^{4\sigma/(n-2\sigma)}v,\\
&\quad\quad \mathcal{N}_3(t)\cap\{v\in C^{2\sigma+\al}\ |\ \|v\|_{C^{2\sigma+\al}}<C, 0\})\\
&\quad=(-1)^n\deg\left(\int_{\Sn}K\circ\varphi_{P,t}(x)x,B^{n+1},0\right) .
\end{split}
\ee
\end{thm}
\begin{proof}
For $P\in\Sn$ and $t\geq 1$, we set
\[
\begin{split}
&A(P,t)=\frac{1}{n}|\Sn|^{-1}\int_{\Sn}\langle\nabla(K\circ\varphi_{P,t}),\nabla x\rangle w_{p}^{2n/(n-2\sigma)},\\
&G(P,t)=|\Sn|^{-1}\int_{\Sn}K\circ\varphi_{P,t}(x)x.
\end{split}\]
It is clear that $G(P,t)\neq 0$ for all $P\in\Sn$ and $t>T_1$. We write
\[
A(P,t)=G(P,t)+I+II,
\]
where
\[
\begin{split}
I&=|\Sn|^{-1}\int_{\Sn}\big(K\circ\varphi_{P,t}-K(P)\big)x\big(w_p^{2n/(n-2\sigma)}-1\big),\\
II&=-\frac{1}{n}|\Sn|^{-1}\int_{\Sn}\big(K\circ\varphi_{P,t}-K(P)\big)\langle\nabla x, \nabla(w_p^{2n/(n-2\sigma)})\rangle.
\end{split}
\]
Using Cauchy-Schwartz inequality, \eqref{pe9}, \eqref{pe10} and \eqref{gradient estimate for wK}, we have
\[
\begin{split}
|I|&\leq C \|K\circ\varphi_{P,t}-K(P)\|_{L^2(\Sn)}\|w_p^{2n/(n-2\sigma)}-1\|_{L^2(\Sn)}\\
&\leq C \|K\circ\varphi_{P,t}-K(P)\|_{L^2(\Sn)} \|K\circ\varphi_{P,t}-K(P)\|_{L^{2n/(n+2\sigma)}(\Sn)}\\
&\leq C \omega(t)|G(p,t)|.\\
|II|&\leq C\|K\circ\varphi_{P,t}-K(P)\|_{L^2(\Sn)}\|\nabla(w_p^{2n/(n-2\sigma)})|_{L^2(\Sn)}\\
&\leq C \|K\circ\varphi_{P,t}-K(P)\|_{L^2(\Sn)}\|K\circ\varphi_{P,t}-K(P)\|_{L^2(\Sn)}\\
&\leq C \omega(t)|G(p,t)|.
\end{split}
\]
It follows immediately that for large $t$,
\[
A(P,t)\cdot G(P,t)\geq (1-C\omega(t))|G(P,t)|^2.
\]
Therefore,
\[
\deg(A(P,t),B^{n+1},0)=\deg(G(P,t),B^{n+1},0).
\]
Since the matrix $[\int_{\Sn}\langle\nabla x_j,\nabla x_i\rangle w_p^{\frac{2n}{n-2\sigma}}]$ is positive definite, we have from \eqref{eq:lag-mul-2} that
\be\label{eq:degree equal}
\deg(\Lda_p, B^{n+1}_s,0)=\deg(A(P,t),B^{n+1},0)=\deg(G(P,t),B^{n+1},0)
\ee
for $s$ sufficiently closed to 1.
It follows from \eqref{eq:degree equal} and our hypothesis that for $s$ sufficiently closed to 1,
\[
\deg(\Lda_p, B^{n+1}_s,0)\neq 0.
\]
Therefore $\Lda_p$ has to have a zero inside $B^{n+1}$ which immediately implies that \eqref{main equ} has at least one positive solution.
Next we evaluate $\partial_{p}I(w_{p_0},p)|_{p=p_0}$ for $p_0=(t_0-1)P_0/t, P_0\in\Sn, t_0\ge 1$.
For $\|K-1\|_{L^{\infty}(\Sn)}\leq\va\leq\va_{4}$, for each $p\in B^{n+1}$, there exists a unique $w_p\in\M_0$, $\|w_p-1\|\leq \va_3$, such that
\[
\begin{split}
I(w_{p},p)&=\min_{w\in \M_0, \|w-1\|<\va_3}I(w,p),\\
w_p&>0\quad\mbox{on }\Sn,\\
D^2_w I(w_p,p)&\quad\mbox{is positive definite},\\
\|w_p-1\|_{H^{\sigma}}&\leq C\|K\circ\varphi_{P,t}-K(p)\|_{L^{2n/(n+2\sigma)}(\Sn)},\\
|w_K-1|+|P_\sigma(w_K-1)|&\leq o_{\va}(1).
\end{split}
\]
It can be seen from \eqref{eq:lag-mul} that $v_{p_0}=T^{-1}_{\varphi_{p_0}}w_{p_0}$ satisfies
\[
P_\sigma(v_{p_0})=(\lda_{p_0}K-\Lda_{p_0}\cdot \varphi^{-1}_{p_0})v_{p_0}^{(n+2\sigma)/(n-2\sigma)}\quad \mbox{on } \mathbb{S}^n.
\]
It follows that for any $\psi\in C^{\infty}(\Sn)$, we have
\[
\begin{split}
&\partial_v E_K(v_{p_0})\psi=-2\left(\Xint-_{\Sn}Kv_{p_0}^{2n/(n-2\sigma)}\right)^{\frac{2\sigma-n}{n}}\Xint-_{\Sn}\Lda_{p_0}\cdot \varphi^{-1}_{p_0}v_{p_0}^{\frac{n+2\sigma}{n-2\sigma}}\psi\\
&\partial_{p}I(w_{p_0},p)|_{p=p_0}\\
&=\partial_v E_K(v_{p_0})(\pa_p(T^{-1}_{\varphi_p}w_{p_0})|_{p=p_0})\\
&=-2\left(\Xint-_{\Sn}Kv_{p_0}^{2n/(n-2\sigma)}\right)^{\frac{2\sigma-n}{n}}\Xint-_{\Sn}\Lda_{p_0}\cdot \varphi^{-1}_{p_0}v_{p_0}^{\frac{n+2\sigma}{n-2\sigma}}(\pa_p(T^{-1}_{\varphi_p}w_{p_0})|_{p=p_0})\\
&=-\frac{n-2\sigma}{n}\left(\Xint-_{\Sn}Kv_{p_0}^{2n/(n-2\sigma)}\right)^{\frac{2\sigma-n}{n}}\Xint-_{\Sn}\Lda_{p_0}\cdot \varphi^{-1}_{p_0}(\pa_p(T^{-1}_{\varphi_p}w_{p_0})^{2n/(n-2\sigma)}|_{p=p_0})\\
&=-\frac{n-2\sigma}{n}\left(\Xint-_{\Sn}Kv_{p_0}^{2n/(n-2\sigma)}\right)^{\frac{2\sigma-n}{n}}\pa_p\left(\Xint-_{\Sn}\Lda_{p_0}\cdot \varphi^{-1}_{p_0}(T^{-1}_{\varphi_p}w_{p_0})^{\frac{2n}{n-2\sigma}}\right)\bigg|_{p=p_0}\\
&=-\frac{n-2\sigma}{n}\left(\Xint-_{\Sn}Kv_{p_0}^{2n/(n-2\sigma)}\right)^{\frac{2\sigma-n}{n}}\pa_p\left(\Xint-_{\Sn}\Lda_{p_0}\cdot \varphi^{-1}_{p_0}\circ\varphi_pw_{p_0}^{\frac{2n}{n-2\sigma}}\right)\bigg|_{p=p_0}.
\end{split}
\]
By Appendix A in \cite{Li95}, the matrix
\[
\pa_p\left(\Xint-_{\Sn}\Lda_{p_0}\cdot \varphi^{-1}_{p_0}\circ\varphi_pw_{p_0}^{\frac{2n}{n-2\sigma}}\right)\bigg|_{p=p_0}
\]
is invertible with positive determinant. Therefore, for $t$ large with $s=(t-1)/t$, we have
\be\label{eq:degree-1}
(-1)^{n+1}\deg(\Lda_{p}, B_s, 0)=\deg(\pa_p I(w_p,p), B_s, 0).
\ee
Given Theorem B.1 in \cite{Li95} and Appendix \ref{spaces}, the rest of the proof of \eqref{eq: degree} is similar to that in page 386 of \cite{Li95} and we omit them here.
\end{proof}
Next we will give sufficient conditions for $K$ to satisfy \eqref{condition for not degree}.
The proof of Lemma 6.6 in \cite{Li95} indeed shows the following
\begin{lem}\label{lem condition for not degree}
Suppose $K\in C^{1,1}(\Sn)$ satisfies for some constant $A_1>0$, $K(P)\geq A_1$ for all $P\in\Sn$, and there exists some constant $0<\va_1<1$, such that for each critical point $P_0\in\Sn$ of $K$, there exists some $\beta=\beta(P_0)\in(1,n)$ such that in some geodesic normal coordinate system centered at $P_0$,
\[
K(y)=K(0)+Q^{(\beta)}(y)+R(y),\quad |y|<\va_1,
\]
where $Q^{(\beta)}(\lda y)=\lda^{\beta}Q^{(\beta)}(y)$ for any $\lda>0$, $y\in\R^n$ and
\[
A_6|y|^{\beta-1}\leq |\nabla Q^{(\beta)}(y)|\leq A_7|y|^{\beta-1},
\]
for some positive constants $A_6, A_7$. Here $R(y)$ satisfies $|R(y)||y|^{-\beta}+|\nabla R(y)||y|^{1-\beta}\leq \eta(|y|)$ for some continuous function $\eta$ with $\lim_{r\to 0^+}\eta(r)=0$. Suppose also that for some constant $d>0$, $|\nabla K(P)|\geq d$ for all $P\in\Sn$ with $\min\{|P-P_0||\nabla K(P_0)=0\}\ge \va_1/20$. Then there exists some positive constant $C_3$ depending on $n$, $A_1$, $A_6$, $A_7$, $d$, $\va_1$, $\min\{\beta-1, n-\beta\}$, $\eta$ and the modulo of continuity of $\nabla K$, such that for $P\in\Sn$, $\min\{|P-P_0||\nabla K(P_0)=0\}\ge C_3/t$, we have
\be\label{eq:lem condition for not degree}
\|K\circ\varphi_{P,t}-K(P)\|^2_{L^2(\Sn)}\leq o\left(\left|\int_{\Sn}K\circ\varphi_{P,t}(x)x\right|\right)
\ee
as $t\to\infty$.
\end{lem}
The following is Lemma 6.7 in \cite{Li95}.
\begin{lem}\label{lem condition degree not zero}
Suppose $K\in C^{1,1}(\Sn)$ satisfies for some constant $A_1>0$, $K(P)\geq A_1$ for all $P\in\Sn$, and there exists some constant $0<\va_1<1$, such that for each critical point $P_0\in\Sn$ of $K$, there exists some $\beta=\beta(P_0)\in(1,n)$ such that in some geodesic normal coordinate system centered at $P_0$,
\[
K(y)=K(0)+Q^{(\beta)}(y)+R(y),\quad |y|<\va_1,
\]
where $Q^{(\beta)}(\lda y)=\lda^{\beta}Q^{(\beta)}(y)$ for any $\lda>0$, $y\in\R^n$ and
\[
A_6|y|^{\beta-1}\leq |\nabla Q^{(\beta)}(y)|\leq A_7|y|^{\beta-1},
\]
for some positive constants $A_6, A_7$, $R(y)$ denotes some quantity satisfying\\ $\lim_{y\to 0}R(y)|y|^{-\beta} =0$ and $\lim_{y\to 0}|\nabla R(y)||y|^{1-\beta}=0$. Suppose also that for some constant $d>0$, $|\nabla K(P)|\geq d$ for all $P\in\Sn$ with $\min\{|P-P_0||\nabla K(P_0)=0\}\ge \va_1/20$, and
\[
\left(
\begin{array}{l}
\int_{\R^n}\nabla Q^{(\beta)}(y+\eta)(1+|y|^2)^{-n}\,\ud y\\[2mm]
\int_{\R^n}Q^{(\beta)}(y+\eta)\frac{|y|^2-1}{|y|^2+1}(1+|y|^2)^{-n}\,\ud y
\end{array} \right)\neq 0 \quad \forall\ \eta\in \R^n,
\]
Then \eqref{eq:lem condition for not degree} holds. In particular, if $K$ is not identically equal to a constant, we have
\[
\int_{\Sn}K\circ\varphi_{P,t}(x)x\neq 0
\]
for large $t$.
\end{lem}
\begin{cor}\label{cor1 of existence}
Suppose $K\in C^{1,1}(\Sn)$ is some positive function satisfying for each critical point $P_0\in\Sn$ of $K$, there exists some $\beta=\beta(P_0)\in(1,n)$ such that in some geodesic normal coordinate system centered at $P_0$,
\[
K(y)=K(0)+Q^{(\beta)}(y)+R(y),\quad \mbox{for all }y\mbox{ close to }0,
\]
where $Q^{(\beta)}(\lda y)=\lda^{\beta}Q^{(\beta)}(y)$ for any $\lda>0$, $y\in\R^n$ and
$R(y)$ denotes some quantity satisfying $\lim_{y\to 0}R(y)|y|^{-\beta} =0$ and $\lim_{y\to 0}|\nabla R(y)||y|^{1-\beta}=0$. Suppose also that
\[
|\nabla Q^{(\beta)}(y)|\sim |y|^{\beta-1}\quad \mbox{for all }y\mbox{ close to }0,
\]
\[
\left(
\begin{array}{l}
\int_{\R^n}\nabla Q^{(\beta)}(y+\eta)(1+|y|^2)^{-n}\,\ud y\\[2mm]
\int_{\R^n}Q^{(\beta)}(y+\eta)\frac{|y|^2-1}{|y|^2+1}(1+|y|^2)^{-n}\,\ud y
\end{array} \right)\neq 0, \quad \forall\ \eta\in \R^n,
\]
Then for $t$ large enough,
\[
\int_{\Sn}K\circ\varphi_{P,t}(x)x\neq 0\quad\mbox{for all}\quad P\in\Sn.
\]
If we further assume that
\[
\|K-1\|_{L^{\infty}(\Sn)}\leq \va_4
\]
and
\[
\deg\left(\int_{\Sn}K\circ\varphi_{P,t}(x)x,B^{n+1},0\right) \neq 0,
\]
then for $\sigma\in [1/2,1)$, \eqref{main equ} has at least one positive $C^2$ solution $v$. Furthermore, for every $0<\al<1$ satisfying that $\al+2\sigma$ is not an integer and sufficiently large positive constant $C_2$, we have
\[
\begin{split}
&\deg(v-(P_{\sigma})^{-1}K|v|^{4\sigma/(n-2\sigma)}v,\\
&\quad\quad \mathcal{N}_3(t)\cap\{v\in C^{2\sigma+\al}\ |\ \|v\|_{C^{2\sigma+\al}}<C_2, 0\})\\
&\quad=(-1)^n\deg\left(\int_{\Sn}K\circ\varphi_{P,t}(x)x,B^{n+1},0\right) .
\end{split}
\]
\end{cor}
\begin{proof}
It follows from Theorem \ref{perturbation} and Lemma \ref{lem condition degree not zero}.
\end{proof}
\begin{cor}\label{cor2 of existence}
Suppose $K\in C^{1,1}(\Sn)$ is some positive function satisfying for each critical point $P_0\in\Sn$ of $K$, there exists some $\beta=\beta(P_0)\in(1,n)$ such that in some geodesic normal coordinate system centered at $P_0$,
\[
K(y)=K(0)+Q^{(\beta)}(y)+R(y),\quad \mbox{for all }y\mbox{ close to }0,
\]
where $Q^{(\beta)}(\lda y)=\sum_{j=1}^{n}a_{j}|y_j|^{\beta}$, $a_j=a_j(\xi_0)\neq 0$, $\sum_{j=1}^n a_j\neq 0$, and
$R(y)$ denotes some quantity satisfying $\lim_{y\to 0}R(y)|y|^{-\beta} =0$ and $\lim_{y\to 0}|\nabla R(y)||y|^{1-\beta}=0$. Then for $t$ large enough,
\[
\int_{\Sn}K\circ\varphi_{P,t}(x)x\neq 0\quad\mbox{for all}\quad P\in\Sn,
\]
and
\[
\begin{split}
&\deg\left(\int_{\Sn}K\circ\varphi_{P,t}(x)x,B^{n+1},0\right)\\
&\quad=\sum_{\xi\in \Sn\mbox{ such that }\nabla_{g_{\Sn}}K(\xi)=0,\ \sum_{j=1}^na_j(\xi)<0}(-1)^{i(\xi)}- (-1)^n,
\end{split}
\]
where
\[
i(\xi)=\#\{a_j(\xi): \nabla_{g_{\Sn}}K(\xi)=0,a_j(\xi)<0,1\leq j\leq n\}.
\]
If we further assume that
\[
\|K-1\|_{L^{\infty}(\Sn)}\leq \va_4
\]
and
\[
\sum_{\xi\in \Sn\mbox{ such that }\nabla_{g_{\Sn}}K(\xi)=0,\ \sum_{j=1}^na_j(\xi)<0}(-1)^{i(\xi)}\neq(-1)^n,
\]
then for $\sigma\in [1/2,1)$, \eqref{main equ} has at least one positive $C^2$ solution $v$. Furthermore, for every $0<\al<1$ satisfying that $\al+2\sigma$ is not an integer and sufficiently large positive constant $C_2$, we have
\[
\begin{split}
&\deg(v-(P_{\sigma})^{-1}K|v|^{4\sigma/(n-2\sigma)}v,\\
&\quad\quad \mathcal{N}_3(t)\cap\{v\in C^{2\sigma+\al}\ |\ \|v\|_{C^{2\sigma+\al}}<C_2, 0\})\\
&\quad=(-1)^n\sum_{\xi\in \Sn\mbox{ such that }\nabla_{g_{\Sn}}K(\xi)=0,\ \sum_{j=1}^na_j(\xi)<0}(-1)^{i(\xi)}-1.
\end{split}
\]
\end{cor}
\begin{proof}
It follows from the proof of Corollary 6.2 in \cite{Li95} and Corollary \ref{cor1 of existence}.
\end{proof}
\begin{thm}\label{thm:biggest one}
Let $\sigma \in (0,1)$. Suppose that $K\in C^{1,1}(\Sn)$, for some constant $A_1>0$,
\[
1/A_1\leq K(\xi)\leq A_1\quad \mbox{for all }\xi\in \Sn.
\]
Suppose also that for any critical point $\xi_0$ of $K$,
under the stereographic projection coordinate system $\{y_1, \cdots, y_n\}$ with $\xi_0$ as south pole, there exist
some small neighborhood $\mathscr{O}$ of $0$, a positive constant $L$, and $\beta=\beta(\xi_0)\in(n-2\sigma, n)$ such that
\[
\|\nabla^{[\beta]}K\|_{C^{\beta-[\beta]}(\mathscr{O})}\leq L
\]
and
\[
K(y)=K(0)+Q_{(\xi_0)}^{(\beta)}(y)+R_{(\xi_0)}(y)\quad \mbox{in }\mathscr{O},
\]
where $Q_{\xi_0}^{(\beta)}(y)\in C^{[\beta]-1,1}(\mathbb{S}^{n-1})$ satisfies
$Q_{\xi_0}^{(\beta)}(\lda y)=\lda^{\beta}Q_{\xi_0}^{(\beta)}(y)$, $\forall \lda >0$, $y\in \R^n$,
\[
|\nabla Q^{(\beta)}(y)|\sim |y|^{\beta-1}\quad y\in \mathscr{O},
\]
\[
\left(
\begin{array}{l}
\int_{\R^n}\nabla Q^{(\beta)}(y+\eta)(1+|y|^2)^{-n}\,\ud y\\[2mm]
\int_{\R^n}Q^{(\beta)}(y+\eta)(1+|y|^2)^{-n}\,\ud y
\end{array} \right)\neq 0, \quad \forall\ \eta\in \R^n,
\]
and
\[
\left(
\begin{array}{l}
\int_{\R^n}\nabla Q^{(\beta)}(y+\eta)(1+|y|^2)^{-n}\,\ud y\\[2mm]
\int_{\R^n}Q^{(\beta)}(y+\eta)\frac{|y|^2-1}{|y|^2+1}(1+|y|^2)^{-n}\,\ud y
\end{array} \right)\neq 0, \quad \forall\ \eta\in \R^n,
\]
and $R_{\xi_0}(y)\in C^{[\beta]-1,1}(\mathscr{O})$ satisfies $\lim_{y\to 0}\sum_{s=0}^{[\beta]}|\nabla^sR|{\xi_0}(y)|y|^{-\beta+s}=0$. Then for any $\va, \delta>0$, there exists a positive constant $C(K, n,\delta,\va)$ such that for all $\va\le\mu\le 1$ and for all $\delta\le\sigma\le 1-\delta$, every positive solution $v$ of \eqref{main equ} with $K$ replaced by $K_\mu=\mu K+(1-\mu)$ satisfies
\be\label{eq:final1}
1/C(K,n,\delta,\va)\leq v\leq C(K,n,\delta,\va)\quad \mbox{on }\Sn.
\ee
Also, for large $t$,
\be\label{eq:final2}
\int_{\Sn}K\circ\varphi_{P,t}(x)x\neq 0 \quad\mbox{for all}\quad P\in \Sn.
\ee
If we further assume that
\be\label{eq:final3}
\deg\left(\int_{\Sn}K\circ\varphi_{P,t}(x)x,B^{n+1},0\right)\neq 0,
\ee
then \eqref{main equ} has at least one $C^2$ positive solution.
\end{thm}
\begin{proof}
Given that $\delta\le \sigma\le 1-\delta$, the estimates established in \cite{JLX} depend on $\delta$ instead of $\sigma$. Hence, \eqref{eq:final1} has actually been proved in \cite{JLX}. \eqref{eq:final2} follows from Lemma \ref{lem condition degree not zero}. In the following we will show the existence part. We first consider the case $\sigma\in[1/2,1)$.
Claim: there exists some constant $\va_7>0$ such that for $0\le\mu\le\va_7$ we have $\|K_u-1\|_{L^{\infty}(\Sn)}<\va_4$, and if $v$ is any solution of \eqref{main equ} with $K=K_\mu (0\le \mu\le \va_7)$ and $\varpi^{-1}v= (w,p)$, $(w,p)\in\M_0\times B^{n+1}$, then $w\in\mathcal{N}_1$.
This claim can be proved by contradiction. Suppose along a subsequence of $\mu\to 0$, there exists $v_\mu$ satisfying \eqref{main equ} with $K=K_\mu$, but $\|w-1\|_{H^{\sigma}}\ge \va_3$ where $\varpi^{-1}v_\mu= (w_\mu,p_\mu)$, $(w_\mu,p_\mu)\in\M_0\times B^{n+1}$. It follows from Theorem 5.2 in \cite{JLX} that after passing to a subsequence, either $\{v_\mu\}$ stays bounded in $L^{\infty}(\Sn)$ or it has precisely one isolated simple blow up point. It is clear that $w_\mu$ satisfies
\be\label{eq:wmu}
P_{\sigma}(w_\mu)=c(n,\sigma)(K_\mu\circ\varphi_\mu)w_\mu^{(n+2\sigma)/(n-2\sigma)},
\ee
where $\varphi_\mu$ is the conformal transformation corresponding to $p_\mu$. It follows that $w_\mu\in C^{2\sigma+\al}$ for any $\al\in (0,1)$ satisfying that $2\sigma+\al$ is not an integer.
It is not difficult to see from the estimates on isolated simple blow up point in \cite{JLX}, \eqref{eq:wmu} and local estimates established in \cite{JLX} that in either case we have, after passing to a subsequence,
\[
w_\mu\to w \quad \mbox{in } C^{\beta}(\Sn)\quad\mbox{and}\quad w_\mu\rightharpoonup w \quad\mbox{weakly in } H^{\sigma}(\Sn)
\]
for some $w\in \M_0, w>0, \beta\in (0,1)$. Sending $\mu$ to $0$, we have
\[
P_{\sigma}(w)=c(n,\sigma)w^{(n+2\sigma)/(n-2\sigma)}.
\]
It follows that $w\equiv 1$. Using \eqref{eq:wmu} again, we have
\[
\|w_\mu-1\|_{H^{\sigma}}=\int_{\Sn} (w_\mu-1)P_{\sigma}(w_\mu-1)\leq C\int_{\Sn}|w_\mu-1|\to 0\quad \mbox{as}\quad \mu\to 0.
\]
This is a contradiction. The claim is proved.
On the other hand, it follows from Theorem 5.2 and Theorem 5.3 in \cite{JLX} and the Harnack inequality that there exists some constant $C^*>1$ such that for all $\va_7\le\mu\le 1$,
\[
1/C^* \le v_\mu \le C^*.
\]
where $v_\mu$ is any solution of \eqref{main equ} with $K=K_\mu$.
It follows from the homotopy property of the Leray Schauder degree and Corollary \ref{cor1 of existence} that
\[
\begin{split}
&\deg(v-(P_{\sigma})^{-1}Kv^{(n+2\sigma)/(n-2\sigma)}, C^{2\sigma+\al}(\Sn)\cap\{1/C^* \le v_\mu \le C^*\},0)\\
&\quad=\deg(v-(P_{\sigma})^{-1}K_{\va_7}v^{(n+2\sigma)/(n-2\sigma)}, C^{2\sigma+\al}(\Sn)\cap\{1/C^* \le v_\mu \le C^*\},0)\\
&\quad=(-1)^n\deg\left(\int_{\Sn}K_{\va_7}\circ\varphi_{P,t}(x)x,B^{n+1},0\right)\\
&\quad=(-1)^n\deg\left(\int_{\Sn}K\circ\varphi_{P,t}(x)x,B^{n+1},0\right)\\
&\quad\neq 0.
\end{split}
\]
The existence of solutions of \eqref{main equ} for $\sigma\ge 1/2$ follows immediately.
For the case $\sigma\in (0,1/2)$, we consider the problem for $\sigma_t=t\sigma+2(1-t)/3$, and the existence for $\sigma$ follows from a degree argument.
\end{proof}
\begin{proof}[Proof of the existence part of Theorem \ref{general exist}]
It follows from Theorem \ref{thm:biggest one} and Corollary \ref{cor2 of existence}.
\end{proof}
\section{A fractional Aubin inequality}\label{improved inequality}
Let
\[
\M^p=\left\{v \in H^{\sigma}(\mathbb{S}^n):\Xint-_{\mathbb{S}^n}|v|^{p}\,\ud v_{g_{\Sn}}=1\right\},
\]
\[
\M_0^p=\left\{v\in \M:\Xint-_{\mathbb{S}^n} x|v|^{p}\,\ud v_{g_{\Sn}}=0\right\}.
\]
The Sobolev inequality \eqref{pe1} states that
\[
\min_{v\in \M^{\frac{2n}{n-2\sigma}} }\Xint- vP_{\sigma}(v)\geq P_{\sigma}(1).
\]
\begin{prop}\label{aubin inequality}
For $\sigma\in (0,1)$, $n\geq 2$, $2<p\leq\frac{2n}{n-2\sigma}$, given any $\va>0$, there exists some constant $C_{\va}\ge 0$ such that
\be\label{eq:aubin inequality}
\inf_{v\in \M^{p}_0}\left\{2^{\frac{2}{p}-1}(1+\va)\Xint-_{\Sn} v P_{\sigma}(v)+C_{\va}\Xint- _{\Sn}v^2\right\}\geq P_{\sigma}(1).
\ee
\end{prop}
When $\sigma=1$, the above proposition was proved by Aubin \cite{Au79}. See also \cite{D1} for such inequality in some higher order Sobolev spaces.
\begin{proof}
First of all, by H\"older inequality, \eqref{pe1} and \eqref{description of P sigma}, we have for all $v\in H^{\sigma}(\Sn)$,
\be\label{p-sobolev}
\begin{split}
\left(\int_{\Sn}v^p\right)^{\frac 2p}&\leq K^2\int_{\Sn}vP_{\sigma}(v)\\
&=K^2P_{\sigma}(1)\int_{\Sn}v^2+\frac{K^2c_{n,-\sigma}}{2}\iint_{\Sn\times\Sn}\frac{(v(x)-v(y))^2}{|x-y|^{n+2\sigma}},
\end{split}
\ee
where $K^2:=|\Sn|^{\frac 2p -1}(P_{\sigma}(1))^{-1}$.
Let $\eta\in(0,\frac12)$ be chosen later. Let $\Lambda$ be the space of first spherical harmonics. As shown in \cite{Au79}, there exists $\{\xi_i\}_{i=1,\cdots,k}\subset\Lambda$ such that $1+\eta<\sum_{i=1}^k|\xi_i|^{\frac 2p}<1+2\eta$ with $|\xi_i|<2^{-p}$. Let $h_i\in C^1(\Sn)$ be such that $h_i\xi_i\ge 0$ on $\Sn$ and
\[
\big||h_i|^2-|\xi_i|^{\frac 2p}\big|<\left(\frac\eta k\right)^p.
\]
Then
\[
1<\sum_{i=1}^k|h_i|^2<1+3\eta,
\]
and by the mean value theorem
\[
\big||h_i|^p-|\xi_i|\big|\leq\frac p2 \left(\frac\eta k\right)^p.
\]
For any nonnegative $v\in H^{\sigma}(\Sn)$, we have,
\[
\begin{split}
\left(\int_{\Sn}v^p \right)^{\frac 2p}=\|v^2\|_{L^{\frac p2}(\Sn)}&\le \|\sum_{i=1}^k|h_i|^2v^2\|_{L^{\frac p2}(\Sn)}\\
&\le \sum_{i=1}^k\||h_i|^2v^2\|_{L^{\frac p2}(\Sn)}= \sum_{i=1}^k\left(\int_{\Sn}|h_i|^pv^p \right)^{\frac 2p}.
\end{split}
\]
Given $f: \Sn\to\R$, denote $f_+=\max(f,0)$ and $f_-=\max(-f,0)$. For $v\in \M_0^p$, one has that
\[
\int_{\Sn}\xi_{i+}v^p=\int_{\Sn}\xi_{i-}v^p.
\]
Hence for a nonnegative function $v\in \M_0^p$, it follows from \eqref{p-sobolev} and $h_i\xi_i\ge 0$ that
\[
\begin{split}
\left(\int_{\Sn}|h_i|^pv^p \right)^{\frac 2p}&=\left(\int_{\Sn}h_{i+}^pv^p +\int_{\Sn}h_{i-}^pv^p \right)^{\frac 2p}\\
&\leq \left(\int_{\Sn}\xi_{i+}v^p +\va_0^p v^p+\int_{\Sn}h_{i-}^pv^p \right)^{\frac 2p}\\
&\leq 2^{\frac 2p}\left(\int_{\Sn}\va_0^p v^p+\int_{\Sn}h_{i-}^pv^p \right)^{\frac 2p}\\
&\leq 2^{\frac 2p}\left(\int_{\Sn}(\va_0+h_{i-})^pv^p \right)^{\frac 2p}\\
&\leq 2^{\frac 2p}\left(K^2P_{\sigma}(1)\int_{\Sn}(h_{i-}+\va_0)^2v^2)+\frac{K^2c_{n,-\sigma}}{2} I\right).
\end{split}
\]
where $\va_0=(\frac p2)^{1/p}\frac {\eta}{k}$,
\[
\begin{split}
I&=\iint_{\Sn\times\Sn}\frac{((h_{i-}(x)+\va_0)v(x)-(h_{i-}(y)+\va_0)v(y))^2}{|x-y|^{n+2\sigma}}.
\end{split}
\]
Since
\[
\begin{split}
&((h_{i-}(x)+\va_0)v(x)-(h_{i-}(y)+\va_0)v(y))^2\\
&=(h_{i-}(x)-h_{i-}(y))^2v(x)^2 +(h_{i-}(y)+\va_0)^2(v(x)-v(y))^2\\
&\quad +2(h_{i-}(x)-h_{i-}(y))v(x)(h_{i-}(y)+\va_0)(v(x)-v(y)),\\
\end{split}
\]
we have
\[
\begin{split}
I
&\leq \int_{\Sn}v^2(x)\int_{\Sn}\frac{(h_{i-}(x)-h_{i-}(y))^2}{|x-y|^{n+2\sigma}}+\int_{\Sn}(h_{i-}(y)+\va_0)^2\int_{\Sn}\frac{(v(x)-v(y))^2}{|x-y|^{n+2\sigma}}\\
&\quad+2C_1\left(\iint_{\Sn\times\Sn}\frac{(v(x)-v(y))^2}{|x-y|^{n+2\sigma}}\right)^{\frac 12}\left(\iint_{\Sn\times\Sn}\frac{v^2(x)(h_{i-}(x)-h_{i-}(y))^2}{|x-y|^{n+2\sigma}}\right)^{\frac 12}\\
&\leq C_2\int_{\Sn}v^2+\int_{\Sn}(h_{i-}(y)+\va_0)^2\int_{\Sn}\frac{(v(x)-v(y))^2}{|x-y|^{n+2\sigma}}\\
&\quad+\frac{\eta}{k}\iint_{\Sn\times\Sn}\frac{(v(x)-v(y))^2}{|x-y|^{n+2\sigma}}+C_\eta \int_{\Sn}v^2(x)\int_{\Sn}\frac{(h_{i-}(x)-h_{i-}(y))^2}{|x-y|^{n+2\sigma}}\\
&\leq C\int_{\Sn}v^2+\int_{\Sn}(h_{i-}(y)+\va_0)^2\int_{\Sn}\frac{(v(x)-v(y))^2}{|x-y|^{n+2\sigma}}+\frac{\eta}{k}\iint_{\Sn\times\Sn}\frac{(v(x)-v(y))^2}{|x-y|^{n+2\sigma}},
\end{split}
\]
where in the second inequality we have used Cauchy-Schwarz inequality, $C_1=\max|h+\va_0|^2$, $C_2=\max\int_{\Sn}\frac{(h_{i-}(x)-h_{i-}(y))^2}{|x-y|^{n+2\sigma}}\ud y$, $C_\eta>0$ depends only on $C_1$ and $\eta$, $C=C_2+C_2C_\eta$.
Also we can do exactly the same in terms of $h_{i+}$. Hence
\[
\begin{split}
&2\left(\int_{\Sn}v^p\right)^{\frac 2p}\\
&\leq 2^{\frac{2}{p}}\sum_{i=1}^k\frac{K^2c_{n,-\sigma}}{2}\int_{\Sn}\big((h_{i-}(y)+\va_0)^2+(h_{i+}(y)+\va_0)^2\big)\int_{\Sn}\frac{(v(x)-v(y))^2}{|x-y|^{n+2\sigma}}\\
&+2^{\frac{2}{p}}\sum_{i=1}^k(2\frac{\eta}{k})\iint_{\Sn\times\Sn}\frac{(v(x)-v(y))^2}{|x-y|^{n+2\sigma}}+C\int_{\Sn}v^2.
\end{split}
\]
Hence for any $\va>0$, we can choose $\eta$ sufficiently small such that
\[
\begin{split}
\left(\int_{\Sn}v^p\right)^{\frac 2p}&\leq 2^{\frac 2p -2}(K^2c_{n,-\sigma}+\va)\iint_{\Sn\times\Sn}\frac{(v(x)-v(y))^2}{|x-y|^{n+2\sigma}}+C\int_{\Sn}v^2\\
&=2^{\frac 2p -1}(K^{2}+\va c^{-1}_{n,-\sigma})(\int_{\Sn}vP_{\sigma}(v)-P_{\sigma}(1)\int_{\Sn}v^2)+C\int_{\Sn}v^2.
\end{split}
\]
Then the proposition follows immediately from the above and that for $v\in H^{\sigma}(\Sn)$,
\[
\int_{\Sn}|v|P(|v|)\leq \int_{\Sn}vP(v).
\]
\end{proof}
\begin{prop}\label{aubin-sobolev inequality}
For $n\ge 2$, there exist some constants $a^*<1$ and some $p^*<\frac{2n}{n-2\sigma}$ both of which depend only on $n$ and $\sigma$, such that for all $p^*\leq p\leq \frac{2n}{n-2\sigma}$,
\be\label{eq:aubin-sobolev}
\inf_{v\in \M^p_0} a^*\Xint-_{\Sn} v P_{\sigma}(v)+(1-a^*)P_{\sigma}(1)\Xint-_{\Sn} v^2\geq P_{\sigma}(1).
\ee
\end{prop}
When $\sigma=1$, the above proposition was proved by Chang and Yang \cite{CY} (see \cite{Li96} for another proof). See also \cite{D1} for such inequality in some higher order Sobolev spaces. Here we adapt the arguments in \cite{Li96} to show \eqref{eq:aubin-sobolev}.
\begin{proof}
For $v\in H^{\sigma}(\Sn), a>0$, set
\[
I_a(v)=a\Xint-_{\Sn} v P_{\sigma}(v)+(1-a)P_{\sigma}(1)\Xint-_{\Sn} v^2
\]
and
\[
m_{a,p}=\inf_{v\in \M^p_0} I_a(v).
\]
By standard variational methods, $m_{a,p}$ is achieved for $a>0$ and $2\leq p<\frac{2n}{n-2\sigma}$. Moreover, it is easy to see that
\be\label{eq:lim-sub}
\begin{split}
m_{a,p}&\leq P_{\sigma}(1)\quad\mbox{for all }0\leq a\leq 1,\ 2\leq p\leq\frac{2n}{n-2\sigma},\\
\lim\limits_{a\to 1}m_{a,p}&=P_{\sigma}(1)\quad\mbox{uniformly for }2\leq p\leq \frac{2n}{n-2\sigma}.
\end{split}
\ee
Indeed, the inequality \eqref{eq:lim-sub} follows from by taking the test function $v\equiv 1$. The equality in \eqref{eq:lim-sub} follows from Sobolev inequality and H\"older inequality.
We argue by contradiction. Suppose that \eqref{eq:aubin-sobolev} fails. Then there exist sequences $\{a_k\}$, $\{p_k\}\subset \R$, $\{v_k\}\subset \M^{p_k}_0$, such that $a_k<1, a_k\to 1, p_k<\frac{2n}{n-2\sigma}, p_k\to\frac{2n}{n-2\sigma}, v_k\geq 0$ and
\be\label{eq:contradiction}
I_{a_k}(v_k)=m_{a_k,p_k}<P_{\sigma}(1).
\ee
By \eqref{eq:contradiction} and \eqref{eq:aubin inequality}, there exists some positive constant $C(n,\sigma)$ independent of $k$ such that
\[
\|v_k\|_{H^{\sigma}(\Sn)}\leq C(n,\sigma),\quad\int_{\Sn}v_k^2\geq 1/C(n,\sigma).
\]
After passing to a subsequence, we have that $v_k\to \bar v$ weakly in $H^{\sigma}(\Sn)$ for some $\bar v\in H^{\sigma}(\Sn)\setminus\{0\}$.
The Euler-Lagrange equation (see, e.g., \eqref{pe11}) satisfied by $v_k$ is
\be\label{eq:minimizer-contrdiction}
a_kP_{\sigma}(v_k)+(1-a_k)P_{\sigma}(1)v_k=m_kv_k^{p_k-1}+\Lambda_k\cdot xv_k^{p_k-1},
\ee
where $m_k=m_{a_k,p_k}$ and $\Lambda_k\in \R^{n+1}$. Multiplying \eqref{eq:minimizer-contrdiction} by $v_k$ and integrating over $\Sn$, we have, by \eqref{eq:lim-sub}
\be\label{eq:lim-k}
\lim\limits_{k\to\infty}\left(\Xint-_{\Sn}v_kP_{\sigma}(v_k)\right)=P_{\sigma}(1).
\ee
We claim that $|\Lambda_k|=O(1)$. Suppose the contrary, we let $\xi_k=\Lambda_k/|\Lambda_k|$ and after passing to a subsequence $\xi=\lim_{k\to\infty}\xi_k\in\Sn$. Let $\eta\in C^{\infty}(\Sn)$ be any smooth test function. Multiplying \eqref{eq:minimizer-contrdiction} by $\eta/|\Lambda_k|$, integrating it over $\Sn$ and sending $k\to\infty$, we have $\int_{\Sn}\xi\cdot x \bar v^{\frac{n+2\sigma}{n-2\sigma}}\eta=0$. Hence $\bar v=0$ which is a contradiction.
It is clear that $\bar v$ satisfies
\[
P_{\sigma}(\bar v)=P_{\sigma}(1)\bar v^{\frac{n+2\sigma}{n-2\sigma}}+\Lambda\cdot x\bar v^{\frac{n+2\sigma}{n-2\sigma}},
\]
where $\Lambda=\lim_{k\to\infty}\Lambda_k$. The Kazdan-Warner type identity in \cite{JLX} gives us
\[
\int_{\Sn}\nabla(P_{\sigma}(1)+\Lambda\cdot x)\nabla x\bar v^{\frac{2n}{n-2\sigma}}=0.
\]
It follows that $\Lambda=0$. Hence $\int_{\Sn}\bar vP_{\sigma}(\bar v)=P_{\sigma}(1)\int_{\Sn}\bar v^{\frac{2n}{n-2\sigma}}$. This together with \eqref{pe1} leads to $\Xint-_{\Sn}\bar v^{\frac{2n}{n-2\sigma}}\geq 1$. On the other hand, $\Xint-_{\Sn}\bar v^{\frac{2n}{n-2\sigma}}\leq \liminf\limits_{k\to\infty}v_k^{p_k}=1$. Hence
\[
\begin{cases}
&\Xint-_{\Sn}\bar v^{\frac{2n}{n-2\sigma}}=1,\\
&\Xint-_{\Sn}\bar vP_{\sigma}(\bar v)=P_{\sigma}(1).
\end{cases}
\]
This together with \eqref{eq:lim-k} leads to $v_k\to \bar v$ in $H^{\sigma}(\Sn)$. Clearly $\bar v\in\M_0^{\frac{2n}{n-2\sigma}}$ and hence $\bar v\equiv 1$.
In the following we will expand $I_a(v)$ for $v\in\M^p_0$ near $1$. Similar to Lemma \ref{lempe3},
\[
T_1\M^p_0=\mathrm{span}\{\mbox{spherical harmonics of degree } \geq 2\}.
\]
We need the following lemma which is a refined version of Lemma \ref{lempe4} and it can be proved in a similar way.
\begin{lem}\label{lempe4-p}
For $\tilde{w}\in T_1\M_0^p$, $\frac{2n-2\sigma}{n-2\sigma}\le p\le\frac{2n}{n-2\sigma}$, $\tilde{w}$ close to $0$, there exist $\mu(\tilde{w})\in \R$, $\eta(\tilde{w})\in \R^{n+1}$ being $C^2$ functions such that
\be\label{pe2-p}
\Xint-_{\mathbb{S}^n}|1+\tilde{w}+\mu+\eta\cdot x|^p=1
\ee
and
\be\label{pe3-p}
\int_{\Sn}|1+\tilde{w}+\mu+\eta\cdot x|^px=0.
\ee
Furthermore, $\mu(0)=0, \eta(0)=0, D\mu(0)=0$ and $D\eta(0)=0$, and $\mu, \eta$ have uniform (with respect to $p$) $C^2$ modulo of continuity near $0$.
\end{lem}
As before we will use $\tilde w$ as local coordinates of $v\in \M^p_0$.
Let
\[
\tilde{E}(\tilde{w})=I_a(v)=a\Xint-_{\Sn} v P_{\sigma}(v)+(1-a)P_{\sigma}(1)\Xint-_{\Sn} v^2,
\]
where $\tilde{w}\in T_1\M_0$ and $v=1+\tilde{w}+\mu(\tilde{w})+\eta(\tilde{w})\cdot x$ as in Lemma \ref{lempe4-p}.
Hence
\[
\tilde{E}(\tilde{w})=P_{\sigma}(1)(1+2\mu(\tilde{w}))+a\Xint-_{\mathbb{S}^n}\tilde{w}P_\sigma(\tilde{w})+(1-a)P_{\sigma}(1)\Xint-_{\Sn}\tilde w^2+o(\|\tilde{w}\|^2_{H^\sigma(\mathbb{S}^n)}).
\]
Since
\[
\mu(\tilde w)=-\frac{p-1}{2}\Xint-_{\Sn}\tilde w^2+o(\|\tilde{w}\|^2_{H^\sigma(\mathbb{S}^n)}),
\]
we have
\[
\tilde{E}(\tilde{w})=P_{\sigma}(1)+a\Xint-_{\mathbb{S}^n}\tilde{w}P_\sigma(\tilde{w})-(p-2+a)P_{\sigma}(1)\Xint-_{\Sn}\tilde w^2+o(\|\tilde{w}\|^2_{H^\sigma(\mathbb{S}^n)}).
\]
For $a$ close to $1$ and $p$ close to $\frac{2n}{n-2\sigma}$, we have that $(p-2+a)P_{\sigma}(1)$ is close to $\frac{n+2\sigma}{n-2\sigma}P_{\sigma}(1)$, which is the first eigenvalue of $P_{\sigma}$. Similar to \eqref{pe5}, there exists some positive constant $C(n,\sigma)$ determined by the difference of the first and the second eigenvalues of $P_{\sigma}$ such that for $a$ close to $1$ and $p$ close to $\frac{2n}{n-2\sigma}$ we have
\[
a\Xint-_{\mathbb{S}^n}\tilde{w}P_\sigma(\tilde{w})-(p-2+a)P_{\sigma}(1)\Xint-_{\Sn}\tilde w^2\geq \frac{1}{C(n,\sigma)}\Xint-_{\Sn}\tilde wP_{\sigma}(\tilde w),
\]
which leads to that for $k$ large we have $I_{a_k}(v_k)\geq P_{\sigma}(1)$. This is a contradiction.
\end{proof}
\appendix
\section{Bessel potential spaces and conformally invariant operators on spheres}\label{spaces}
In this section, we recall some results for $P_\sigma$ and Bessel potential spaces on spheres which can be found in \cite{PS}, \cite{R}, \cite{Stri} and \cite{T}.
Let $\Delta_{g_{\Sn}}$ be the Laplace-Beltrami operator on the standard sphere. For $s>0$ and $1< p<\infty$, the Bessel potential space $H^{s}_p(\Sn)$
is the set consisting of all functions $u\in L^p(\Sn)$ such that
$(1-\Delta_{g_{\Sn}})^{s/2}u\in L^p(\Sn)$, with the norm $\|u\|_{H^{s}_p(\Sn)}:=\|(1-\Delta_{g_{\Sn}})^{s/2}u\|_{L^p(\Sn)}$. When $p=2$, $H_2^{\sigma}(\Sn)$ coincides with the Hilbert space $H^{\sigma}(\Sn)$ which is the closure of $C^{\infty}(\Sn)$ under the norm
\[
\|u\|_{H^{\sigma}(\Sn)}:=\int_{\Sn}vP_{\sigma}v \,\ud vol_{g_{\Sn}}
\]
with equivalent norms.
If $sp<n$, then the embedding $H^{s}_p(\Sn) \rightarrow L^{\frac{np}{n-sp}}(\Sn)$ is continuous and the embedding $H^{s}_p(\Sn) \hookrightarrow L^q(\Sn)$ is compact for $q<\frac{np}{n-sp}$. If $0<s-\frac{n}{p}<1$, then the embedding $H^{s}_p(\Sn) \rightarrow C^{s-\frac{n}{p}}(\Sn)$ is continuous.
It is also well-known (see, e.g., \cite{Mo}) that $P_{\sigma}$ is the inverse of the spherical Riesz potential
\be\label{P sigma inverse}
R_{2\sigma}(f)(\xi)=\frac{\Gamma(\frac{n-2\sigma}{2})}{2^{2\sigma}\pi^{n/2}\Gamma (\sigma)}\int_{\Sn}\frac{f(\zeta)}{|\xi-\zeta|^{n-2\sigma}}\,\ud vol_{g_{\Sn}}(\zeta),\quad f\in L^p(\Sn).
\ee
\begin{prop}[Pavlov and Samko \cite{PS}] \label{norms are same}
For any function $u\in L^p(\Sn)$, then $u\in H^{s}_p(\Sn)$ if and only if there exists a function $v\in L^p(\Sn)$ such that
$u=R_{s}(v)$. Moreover, there exists a positive constant $C_1$ depending only on $n,s,p$ such that
\[
\frac{1}{C_1} \|u\|_{H^{s}_p(\Sn)}\leq \|v\|_{L^p(\Sn)}\leq C_1 \|u\|_{H^{s}_p(\Sn)}.
\]
\end{prop}
\small
\noindent T. Jin
\noindent Department of Mathematics, Rutgers University\\
110 Frelinghuysen Road, Piscataway, NJ 08854, USA
\noindent\emph{Current address:}
\noindent Department of Mathematics, The University of Chicago\\
5734 S. University Avenue, Chicago, IL, 60637 USA\\[1mm]
Email: \textsf{[email protected]}
\noindent Y.Y. Li
\noindent Department of Mathematics, Rutgers University\\
110 Frelinghuysen Road, Piscataway, NJ 08854, USA\\
Email: \textsf{[email protected]}
\noindent J. Xiong
\noindent School of Mathematical Sciences, Beijing Normal University\\
Beijing 100875, China
\noindent\emph{Current address:}
\noindent Beijing International Center for Mathematical Research, Peking University\\
Beijing 100871, China\\[1mm]
Email: \textsf{[email protected]}
\end{document}
|
\begin{document}
\title[Transmission of coherent information at the onset of interactions]{Transmission of coherent information at the onset of interactions}
\author{Emily Kendall$^{1,2,3}$, Barbara \v{S}oda$^{1,2}$ and Achim Kempf$^{1,2}$}
\address{$^1$Perimeter Institute for Theoretical Physics, 31 Caroline St N, Waterloo, Ontario, N2L 2Y5, Canada}
\address{$^2$Departments of Applied Mathematics and Physics, and Waterloo Centre for Astrophysics, University of Waterloo, Waterloo, Ontario, N2L 3G1, Canada}
\address{$^3$Department of Physics, The University of Auckland, Private Bag 92019, Auckland, New Zealand}
\ead{[email protected] and [email protected]}
\begin{abstract}
In this work, we investigate the parameters governing the rate at which a quantum channel arises at the onset of an interaction between two systems, $A$ and $B$. In particular, when system $A$ is pre-entangled with an ancilla, $\tilde{A}$, we quantify the early-time transmission of pre-existing entanglement by calculating the leading order change in coherent information of the complementary channel ($A\rightarrow B'$).
We show that, when $A$ and $B$ are initially unentangled and $B$ is pure, there is no change in coherent information to first order, while the leading (second) order change is divergent. However, this divergence may be regulated by embedding the conventional notion of coherent information into what we call the family of $n$-coherent informations, defined using $n$-R\'enyi entropies.
We find that the rate of change of the $n$-coherent information at the onset of the interaction is governed by
a quantity, which we call the $n$-exposure,
which captures the extent to which the initial coherent information of $A$ with $\tilde{A}$ is exposed to or `seen by' the interaction Hamiltonian between $A$ and $B$.
We give examples in qubit systems and in the light-matter interaction.
$$$$
\end{abstract}
\submitto{\JPA}
\section{Introduction}
Over the past decade, progress in the field of quantum information has seen rapid acceleration, with the physical realisation of a large number of quantum technologies in disciplines such as communication, computing, machine learning, and cryptography. Recently, for example, successful quantum key distribution over distances of thousands of kilometers has been achieved using a ground-to-satellite quantum communication network \cite{Chen2021}. Other recent advances include the development of programmable, scalable photonic quantum chips \cite{Arrazola2021}, and the demonstration of computational performance advantages over classical methods, see, e.g., \cite{King2021}.
A key challenge in the general field of quantum technologies is to control coherence and entanglement during quantum processes. Depending on the application, different behaviours are desired. Often, it is necessary that entanglement be transferred efficiently from one system to another, such as in quantum communication within or in between quantum processors. Conversely, in other applications it can be important to retain pre-existing entanglement for as long as possible, in particular, to minimize its transfer to the environment \cite{Shor1995, Monz2009}.
It is therefore important to explore the factors that determine the efficiency with which quantum information is transmitted between two systems as they start interacting. A key tool with which to investigate this is the coherent information \cite{Schumacher1996}. The coherent information serves as an indicator of the degree of preservation of initial entanglement as a quantum system undergoes a quantum channel.
Moreover, the coherent information is of particular importance in the determination of the capacity of a quantum channel. Specifically, the quantum channel capacity is determined via maximising the coherent information over input states and many parallel uses of the channel \cite{Lloyd1997, Gyongyosi2018, Klesse2007, Cuevas2017}. Via other optimizations, the coherent information also yields the one-way and two-way distillable entanglements.
In the present work, we analyze the factors that determine the rate at which pre-existing coherent information is transferred at the onset of an interaction. We adopt a perturbative framework, in which we Taylor expand the evolution of the coherent information at early times. Because the coherent information is defined as a difference between von Neumann entropies, we calculate the changes in these entropies perturbatively to yield the overall change in coherent information.
We find that, while the von Neumann entropies themselves are well-defined for any given channel configuration, the leading order expansion coefficient in their time evolution may diverge, thereby yielding a divergent leading order change in the coherent information. To overcome this problem, we
extend our analysis by considering a generalisation to a class we refer to as the $n-$coherent information. Whereas the traditional coherent information is defined as a difference of von Neumann entropies, the $n-$coherent information is analogously defined as a difference between the $n^{th}$ R\'enyi entropies. From this perspective, the traditional coherent information may be referred to as the `1-coherent information', since the von Neumann entropy is the limit of the $n^{th}$ R\'enyi entropy as $n\rightarrow 1$.
There are a number of benefits to considering the full family of $n-$R\'enyi entropies rather than only the von Neumann entropy. First, we find that for all integers $n>1$, there are no divergences in the leading order expansion coefficient, and we are therefore able to obtain finite values for the early time behaviour of the corresponding $n-$coherent information. Furthermore, by determining the behavior of the entire family of $n-$R\'enyi entropies we capture a more comprehensive picture of the dynamics. For example, the von Neumann entropy of a density matrix yields little specific information about that density matrix. By contrast, knowledge of the full family of integer R\'enyi entropies for $n>1$ of a density matrix allows one to reconstruct the entire spectrum of the density matrix \cite{Li2008, Flammia:2009axf, PhysRevA.78.032329, 2020PhRvA.102f2413W}. Recall also that the family of R\'enyi entropies with $n>1$ satisfy the additivity and majorisation criteria of suitable information measures \cite{Renyi_og}.
Using the perturbative method, we find that the leading contribution to the time evolution of the $n$-coherent information occurs at second order for all $n$. Moreover, we show that to second order, the evolution of the $n-$coherent information is independent of the free Hamiltonians of either subsystem. This markedly simplifies the computations, and implies that complicated resonance phenomena do not affect the leading order behaviour of the coherent information, since such phenomena necessarily involve the free Hamiltonians in their mathematical descriptions \cite{Hsiang:2019aen, 2022arXiv220108734W}.
Interestingly, we show that the second order change in the $n$-coherent information is not a simple function of the degree of pre-existing entanglement. Instead, it depends upon the extent to which the initial entanglement in the $A\tilde{A}$ system is accessible to the interaction Hamiltonian $H_{int}$ of $A$ with $B$. Specifically, we identify a new quantity, which we term the $n-$exposure, to which the leading order change in the $n-$coherent information is proportional. This is an important point, as it implies that initial preparations of the $\tilde{A}A$ system which possess the same level of entanglement may nevertheless exhibit vastly different behaviours at the onset of interaction with system $B$. Therefore, the notion of $n$-exposure could help, for example, to identify regions in a system's space of states (such as regions in the Bloch sphere in the simple case of a qubit), that are desirable because they have little $n-$exposure and are therefore relatively resistant to transmission of quantum information to the environment.
To illustrate the utility of the $n-$coherent information and $n-$exposure, we apply our results to simple physical scenarios. We first apply our approach to the quantum Rabi model \cite{rabi1} of the light-matter interaction. In this simple model, we can solve for the time evolution of the qubit exactly. This allows us to verify our perturbative results non-perturbatively. We can then also use the $n-$exposure to identify which regions in the state space have greater propensity to transmit pre-existing entanglement at the onset of interactions.
We also apply our results to a simple scenario involving a qutrit as the input to a quantum channel in order to explore the utility of the $n-$exposure in larger systems. We explicitly demonstrate regions of state space which are prone to either retaining or transmitting pre-existing entanglement at the onset of the the interaction.
The structure of this paper is as follows. In Section \ref{sec:prelim}, we prove that free Hamiltonians do not contribute to second order in our calculations and may therefore be neglected in the following sections without loss of generality. We then discuss the notion of coherent information in Section \ref{sec:coherent_information} and describe the quantum channel setup which we will work with throughout the remainder of the paper. In Section \ref{sec:n_1}, we consider the early time evolution of the traditional coherent information, and demonstrate the divergences which can arise in derivatives of the von Neumann entropy. In Section \ref{sec:n_geq_2} we introduce the generalised $n$-coherent information, and demonstrate that, unlike the traditional coherent information, the leading order behaviour is well-defined and finite across state space for $n>1$. In Sections \ref{sec:rabi} and \ref{sec:qutrit} we apply our results to the light-matter interaction and to a qutrit scenario. In Section \ref{sec:conclusion} we discuss the potential significance of our results in the context of quantum communication and computing technologies and address the scope for future work.
\section{Preliminaries}\label{sec:prelim}
In this work, we calculate the transfer of coherent information perturbatively in time, i.e., for the onset of the interaction between two systems. We begin by showing that the transfer of coherent information sets in at second order and that the presence of the free Hamiltonians of the two interacting systems does not contribute to the evolution of the quantities of interest until third order.
This means that, in the remainder of this paper, we will be able to work to leading (i.e., second) order while neglecting the free Hamiltonians.
To see this, let us start by considering a system $AB$ composed of two subsystems $A$ and $B$, described by density matrix $\rho(t)$, which is separable at $t=0$ such that: $\rho_0 = \rho_A\otimes\rho_B$. We allow the total Hamiltonian $\hat{H}$ for the time evolution of the combined system $AB$ to be fully general, i.e., of the form:
\begin{equation}
\hat{H} := \sum_j \hat{A}_j\otimes\hat{B}_j,
\end{equation}
where the operators $\{\hat{A}_j\}$ act only on subsystem $A$, and the operators $\{\hat{B}_j\}$ act only on subsystem $B$. Note that this general expression for $\hat{H}$ encompasses both interaction terms (with non-trivial action on both subsystems), as well as the free evolution of each subsystem through terms of the form $\hat{A_j}\otimes\mathbb{I}$ (for subsystem $A$) and $\mathbb{I}\otimes\hat{B}_j$ (for subsystem $B$). We now expand the time-evolved state of the total system in the Schr\"odinger picture as:
\begin{equation}\label{eq:rho_full}
\rho(t) = e^{it\hat{H}}\rho_o e^{-it\hat{H}}=\rho_0+it\big[\hat{H}, \rho_0\big] + \frac{(it)^2}{2!}\Big[\hat{H}, \big[\hat{H}, \rho_0\big]\Big] + ...
\end{equation}
A key quantity for which we wish to prove that its time evolution is independent of the free Hamiltonians to second perturbative order is the $n$-purity, $\gamma_n$. The $n$-purity of subsystem $B$ is defined as
\begin{equation}
\gamma_{n,B}(t) := \Tr_B\big[\rho_B(t)^n\big] = \Tr_B\big[\Tr_A[\rho(t)]^n\big],
\end{equation}
and likewise for subsystem $A$. The first and second time derivatives of the $n$-purity of $B$ are as follows:
\begin{align}
\dot{\gamma}_{n,B}(t) &= n\Tr_B\Big[\Tr_A[\rho(t)]^{n-1}\Tr_A[\dot{\rho}(t)]\Big],\label{eq:gamma_dot}\\[1em]
\ddot{\gamma}_{n,B}(t)&=n\Tr_B\Bigg[\Tr_A[\dot{\rho}(t)]\sum_{j=0}^{n-2}\Tr_A[\rho(t)]^j\Tr_A[\dot{\rho}(t)]\Tr_A[\rho(t)]^{n-2-j}\nonumber\\
&\qquad\qquad+\Tr_A[\rho(t)]^{n-1}\Tr_A[\ddot{\rho}(t)]\Bigg].\label{eq:gamma_ddot}
\end{align}
We will first show that, irrespective of the precise form of $\hat{H}$, $\dot{\gamma}_{n,B}(t)$ and $\dot{\gamma}_{n,B}(t)$ vanish at $t=0$. To do this, we use that $\Tr_A[\rho(0)] = \rho_B$, and Equation \ref{eq:rho_full} to find that $\dot{\rho}(0) = i[\hat{H},\rho_0]$. Now, substituting the full expression for $\hat{H}$ we have:
\begin{align}
\dot{\gamma}_{n,B}(0) &= in\Tr_B\left[\rho_B^{n-1}\Tr_A\Big[\sum_j\hat{A}_j\otimes\hat{B}_j, \rho_A\otimes\rho_B\Big]\right]\nonumber\\[1em]
&=in\sum_j\Tr_A[\hat{A}_j\rho_A]\Tr_B\big[\rho_B^{n-1}[\hat{B}_j,\rho_B]\big] =0
\end{align}
The expression vanishes because the trace over system $B$ vanishes for every $j$ due to the cyclicity of the trace.
We now consider Equation \ref{eq:gamma_ddot} at $t=0$. Again we note that $\Tr_A[\rho(0)] = \rho_B$ and from Equation \ref{eq:rho_full} we have $\ddot{\rho}(0) = -\big[\hat{H},[\hat{H},\rho_0]\big]$. Hence, \begin{equation}
\ddot{\gamma}_{n,B}(0) = -n\Tr_B\Bigg[\Tr_A\big[[\hat{H},\rho_0]\big]\sum_{j=0}^{n-2}\rho_B^j\Tr_A\big[[\hat{H},\rho_0]\big]\rho_B^{n-2-j} + \rho_B^{n-1}\Tr_A\big[\hat{H},[\hat{H},\rho_0]\big]\Bigg].
\end{equation}
Substituting in the general expression for $\hat{H}$ and simplifying we find:
\begin{align}\label{eq:full_2nd}
\ddot{\gamma}_{n,B}(0)=-2n\sum_{jk}\Bigg(&\Tr_A\big[\hat{A}_j\rho_A\big]\Tr_A\big[\hat{A}_k\rho_A\big]\Tr_B\big[\rho_B^{n-1}[\hat{B}_j,\rho_B]\hat{B}_k\big]\nonumber\\
&+\Tr_A\big[\hat{A}_j\hat{A}_k\rho_A\big]\Tr_B\big[\rho_B^{n-1}[\rho_B\hat{B}_j,\hat{B}_k]\big]\Bigg).
\end{align}
To prove that the free Hamiltonians do not contribute here, we must show that any terms of the form $\hat{A}\otimes\mathbb{I}$ or $\mathbb{I}\otimes\hat{B}$ do not contribute to the sum. To this end, let us consider the Hamiltonian $\hat{H} = \sum_j \hat{A}_j\otimes\hat{B}_j$ where the term with index $j=m$ is of the form $\hat{H}_A\otimes\mathbb{I}_B$. We can now divide the sum over $j,k$ in Equation \ref{eq:full_2nd} as follows:
\begin{equation}\label{eq:sum_division}
\sum_{jk} = \sum_{j,k\neq m} + \sum_{j,(k=m)} + \sum_{k, (j=m)}.
\end{equation}
There is no contribution from $\hat{H_A}\otimes \mathbb{I}_B$ in the first term on the right of the above equation. Hence, we need only consider the two final terms. We have:
\begin{align}
-2n\sum_{j,(k=m)}\Big(&\Tr_A\big[\hat{A}_j\rho_A\big]\Tr_A\big[\hat{H}_A\rho_A\big]\Tr_B\big[\rho_B^{n-1}[\hat{B}_j,\rho_B]\mathbb{I}_B\big]\nonumber\\
&+\Tr_A\big[\hat{A}_j\hat{H}_A\rho_A\big]\Tr_B\big[\rho_B^{n-1}[\rho_B\hat{B}_j,\mathbb{I}_B]\big]\Big)\nonumber\\
-2n\sum_{k,(j=m)}\Big(&\Tr_A\big[\hat{H}_A\rho_A\big]\Tr_A\big[\hat{A}_k\rho_A\big]\Tr_B\big[\rho_B^{n-1}[\mathbb{I}_B,\rho_B]\hat{B}_k\big]\nonumber\\
&+\Tr_A\big[\hat{H}_A\hat{A}_k\rho_A\big]\Tr_B\big[\rho_B^{n-1}[\rho_B\mathbb{I}_B,\hat{B}_k]\big]\Big).
\end{align}
We see immediately that there is no contribution from these partial sums because each trace over B either includes a commutator with the identity, or is of the form $\Tr_B\big[\rho_B^{n-1}\hat{B}_j\rho_B-\rho_B^n\hat{B}_j\big]$, which is zero by the cyclicity of the trace. Hence, we see that terms of the form $\hat{H}_A\otimes\mathbb{I}_B$ do not contribute to Equation \ref{eq:full_2nd}. Let us now consider terms of the form $\mathbb{I}_A\otimes\hat{H}_B$. Again, we will assume that the term of this form is indexed by m, and split the double sum over $i,j$ as shown in Equation \ref{eq:sum_division}. Again, the first partial sum, which does not include $j=m$ or $k=m$ does not contain any instances of the $\mathbb{I}_A\otimes\hat{H}_B$ term. Looking at the final two partial sums, we have:
\begin{align}
-2n\sum_{j,(k=m)}\Big(&\Tr_A\big[\hat{A}_j\rho_A\big]\Tr_A\big[\mathbb{I}_A\rho_A\big]\Tr_B\big[\rho_B^{n-1}[\hat{B}_j,\rho_B]\hat{H}_B\big]\nonumber\\
&+\Tr_A\big[\hat{A}_j\mathbb{I}_A\rho_A\big]\Tr_B\big[\rho_B^{n-1}[\rho_B\hat{B}_j,\hat{H}_B]\big]\Big)\nonumber\\
-2n\sum_{k,(j=m)}\Big(&\Tr_A\big[\mathbb{I}_A\rho_A\big]\Tr_A\big[\hat{A}_k\rho_A\big]\Tr_B\big[\rho_B^{n-1}[\hat{H}_B,\rho_B]\hat{B}_k\big]\nonumber\\
&+\Tr_A\big[\mathbb{I}_A\hat{A}_k\rho_A\big]\Tr_B\big[\rho_B^{n-1}[\rho_B\hat{H}_B,\hat{B}_k]\big]\Big).
\end{align}
We now use the fact that $\Tr_A[\mathbb{I}_A\rho_A] = \Tr_A[\rho_A] = 1$, and rename the index $k\rightarrow j$ in the second sum to write
\begin{align}
-2n\sum_{j}\Tr_A[\hat{A}_j\rho_A]\Tr_B\Big[\rho_B^{n-1}\Big(&[\hat{B}_j,\rho_B]\hat{H}_B+[\rho_B\hat{B}_j,\hat{H}_B]\nonumber\\
&+[\hat{H}_B,\rho_B]\hat{B}_j +[\rho_B\hat{H}_B,\hat{B}_j]\Big)\Big]=0,
\end{align}
where again we have made use of the cyclicity of the trace.
Hence, we have shown that terms of the form $\hat{A}\otimes\mathbb{I}$ and $\mathbb{I}\otimes\hat{B}$ (i.e. free Hamiltonians) do not contribute to either the first or second time derivatives of the $n$-purity, and therefore we can neglect free evolution when working to second perturbative order in the following sections without loss of generality.
\section{Quantum channels and coherent information}\label{sec:coherent_information}
The controlled isolation or transfer of quantum information among quantum systems is of great importance in quantum technologies \cite{Ladd2010, ASPELMEYER2004}. Of particular interest is the degree to which pre-existing quantum correlations with an ancilla system as measured, e.g., by coherent information (or also, e.g., by negativity) are preserved or transmitted under the action of a quantum channel.
Let us now consider the direct channel described in the introduction, i.e., the channel from the density matrix of system $A$ at the initial time to the density matrix of system $A$ at a later time, namely after $A$ interacted with a system $B$. In this interaction, $A$ may transmit some of its pre-existing quantum correlations with an ancilla, $\tilde{A}$, to system $B$. Therefore, we also consider the complementary channel from the density matrix of system $A$ at the initial time to the density matrix of system $B$ after the onset of the interaction.
We assume that among the three systems, $A$, $\tilde{A}$ and $B$, systems $A$ and $\tilde{A}$ are initially entangled, such that $\tilde{A}$ purifies $A$. $B$ is assumed initially unentangled with both $A$ and $\tilde{A}$, and for now we will assume that system $B$ is initially pure. Therefore, the total tripartite system $A\tilde{A}B$ is also pure. We then consider an interaction which takes place between systems $A$ and $B$ only. This arrangement is illustrated in Figure \ref{fig:setup}.
\begin{figure}
\caption{A tripartite system in which, initialy, $A$ is purified by $\tilde{A}
\label{fig:setup}
\end{figure}
We consider the direct quantum channel $A\rightarrow A'$ and the complementary channel $A\rightarrow B'$. Here, primes represent the time evolved systems.
We will study the coherent information of these channels since they are the building blocks for the channel capacity. Moreover, as mentioned before, maximising over all possible input states and parallel executions of the channel gives rise to the overall quantum channel capacity \cite{Lloyd1997, Leditzky2017, Led2018}.
The coherent information for our two channels is defined as
\begin{align}\label{eq:CI_defn}
I^d &= S(A') - S((A\tilde{A})'),\\
I^c &= S(B') - S((B\tilde{A})'),
\end{align}
where the superscripts $d$ and $c$ stand for `direct' and `complementary', respectively, and $S$ represents the von Neumann entropy.
Though the coherent information is not an entanglement monotone, by definition, the coherent information quantifies the degree to which the entropy of a subsystem exceeds that of its supersystem. Since, classically, this quantity can never be positive, a positive coherent information indicates the presence of quantum correlations between the subsystems \cite{wilde_2017}.
Since system $B$ starts out pure and system $B\tilde{A}$ starts out mixed, the coherent information, $I^c$, of the complementary channel starts out negative and therefore requires at least a finite amount of time to turn positive. Since $I^c=-I^d$, this means that $I_d$, which starts out positive, needs to decrease. Our aim here is to find out what determines the speed with which the two channel's coherent informations change at the onset of interactions.
\section{Perturbative expansion of the coherent information}\label{sec:n_1}
We now study the evolution of the coherent information perturbatively, calculating the leading order derivatives of the von Neumann entropies in Equation \ref{eq:CI_defn}. We will find that there can be divergences in the leading derivatives, which will motivate the introduction of a generalisation of the coherent information in Section \ref{sec:n_geq_2}.
Let us begin by revisiting the definition of the coherent information, Equation \ref{eq:CI_defn}, which can be re-expressed as follows:
\begin{align}\label{eq:CI_def}
I^d &= S(A') - S(B'),\\
I^c &= S(B') - S(A').
\end{align}
Here we have made use of the fact that the entire tripartite system $A\tilde{A}B$ is pure, and therefore any bipartition of the whole must result in a symmetric configuration of entropies. Since the coherent information of the complementary channel is the negative of that of the direct channel, it is sufficient to study the latter.
To compute the leading order change in $I^d$ at the onset of the quantum channel, we note that the von Neumann entropies in Equation \ref{eq:CI_def} can be expressed as the limit of the $n$-R\'enyi entropy, $H_n$, as $n\rightarrow 1$. That is
\begin{equation}
S(A) = \lim_{n\rightarrow 1} H_{n}(A) = \lim_{n\rightarrow 1} \frac{1}{1-n}\log\big( \gamma_{n,A}\big),
\end{equation}
where $\gamma_{n,A} = \Tr_A[\rho_A^n]$. To obtain the time evolution of the von Neumann entropy, we can therefore compute the evolution of $H_n$ in the limit $n\rightarrow 1$. Thereby we will find that it is not possible to obtain an analytic expression for the leading time derivative of the von Neumann entropy which is valid across the entire state space.
To see this, let us now consider the case where the interaction Hamiltonian between systems $A$ and $B$ is of the form $H_{int} = \hat{A}\otimes\hat{B}$, generating the unitary time evolution operator $U = \exp\big(it \hat{A}\otimes\hat{B}\big)$. As illustrated in Figure \ref{fig:setup}, systems $A$ and $B$ are assumed initially unentangled with a combined density matrix of the form $\rho_A\otimes\rho_B$. In previous work, see (\cite{KendallKempf}), we already showed that for this situation, the first derivative of the $n$-R\'enyi entropy vanishes for either subsystem, and that the leading (second) order is given by
\begin{equation}\label{eq:deriv_gen}
\ddot{H}_n(A)|_{t=0} =
\frac{\ddot{\gamma}_{n,A}}{(1-n)\gamma_{n,A}}\bigg|_{t=0}=\frac{-2n(\Delta B)^2 \Tr_A\Big[\rho_A^{n-1}\big[\hat{A}, \rho_A\big]\hat{A}\Big]}{(n-1)\Tr_{A}\big[\rho_A^n\big]},
\end{equation}
and likewise for system $B$.\footnote{The simplicity of Equation \ref{eq:deriv_gen} highlights the advantage of this perturbative approach. By contrast, in Appendix \ref{app:non-pert} we demonstrate the complexity arising in an exact calculation, from which expressions of this kind cannot be straight-forwardly derived.} In (\cite{KendallKempf}) we assumed the absence of free Hamiltonians of either subsystem. In fact, as we now showed in Section \ref{sec:prelim}, these results hold even with free Hamiltonians as they do not contribute to second order in the evolution of $\gamma$.
In order to obtain the leading derivative of the von Neumann entropy, we must now evaluate Equation \ref{eq:deriv_gen} in the limit $n\rightarrow 1$. We will first attempt to find an analytic expression for the second time derivative as $n\rightarrow 1$. This limit exists, except for states possessing one or more vanishing eigenvalues. To see this, let us first set $n=1+\varepsilon$, and re-express Equation \ref{eq:deriv_gen} accordingly. We have
\begin{equation}\label{eq:epsilon_case}
\ddot{H}_{1+\varepsilon}(A)|_{t=0} = \frac{-2(1+\varepsilon)(\Delta B)^2 \Tr_A \Big[\rho_A^\varepsilon \big[\hat{A}, \rho_A\big]\hat{A}\Big]}{\varepsilon \Tr_A\big[\rho_A^{1+\varepsilon}\big]},
\end{equation}
which we may re-express as:
\begin{equation}
\ddot{H}_{1+\varepsilon}(A)|_{t=0}=\frac{-2(1+\varepsilon)(\Delta B)^2 \Tr_A \Big[\exp\big(\varepsilon \log \rho_A\big) \big[\hat{A}, \rho_A\big]\hat{A}\Big]}{\varepsilon \Tr_A\big[\rho_A^{1+\varepsilon}\big]}.
\end{equation}
Let us now consider the trace term in the numerator. It can be expanded as a power series in $\varepsilon$:
\begin{equation}
\Tr_A \Big[\exp\big(\varepsilon \log \rho_A\big) \big[\hat{A}, \rho_A\big]\hat{A}\Big] = \Tr_A \Big[(1 + \varepsilon \log \rho_A + ...) \big[\hat{A}, \rho_A\big]\hat{A}\Big].
\end{equation}
Note that, due to the cyclicity of the trace, the first term in this expansion vanishes since
\begin{equation}
\Tr_A\Big[\big[\hat{A},\rho_A\big]\hat{A}\Big] = \Tr_A\Big[\hat{A}\rho_A\hat{A}\Big] - \Tr_A\Big[\rho_A\hat{A}\hat{A}\Big] = 0.
\end{equation}
Hence, prior to taking any limits, Equation \ref{eq:epsilon_case} can be written as:
\begin{align}
\ddot{H}_{1+\varepsilon}(A)|_{t=0} &=\frac{-2(1+\varepsilon)(\Delta B)^2 \Tr_A \Big[\big(\varepsilon \log \rho_A + \mathcal{O}(\varepsilon^2)\big) \big[\hat{A}, \rho_A\big]\hat{A}\Big]}{\varepsilon \Tr_A\big[\rho_A^{1+\varepsilon}\big]},\nonumber\\
&=\frac{-2(1+\varepsilon)(\Delta B)^2 \Tr_A \Big[\big(\log \rho_A + \mathcal{O}(\varepsilon)\big) \big[\hat{A}, \rho_A\big]\hat{A}\Big]}{\Tr_A\big[\rho_A^{1+\varepsilon}\big]}.
\end{align}
For density matrices $\rho_A$ whose eigenvalues are all nonzero, we obtain an analytic expression for the limit $\varepsilon \rightarrow 0$:
\begin{equation}\label{eq:ddot_H1}
\ddot{H}_{1}(A)|_{t=0} = -2(\Delta B)^2 \Tr_A \Big[\log \rho_A \big[\hat{A}, \rho_A\big]\hat{A}\Big],
\end{equation}
It can be re-expressed in terms of the eigenvalues ($\lambda_i$) of $\rho_A$: \begin{equation}\label{eq:n_1_case}
\ddot{H}_{1}(A)|_{t=0} = -2(\Delta B)^2 \sum_{i,j} \log(\lambda_j)(\lambda_i - \lambda_j)\vert a_{ij}\vert^2.
\end{equation}
Taking the limit $n\rightarrow 1$ (or equivalently $\varepsilon \rightarrow 0$) is non-trivial for states $\rho_A$ possessing a vanishing eigenvalue. In fact, if $\rho_A$ possesses a vanishing eigenvalue then we cannot neglect higher order terms in the above expansion, as this would require that the matrix elements of $\varepsilon \log(\rho_A)$ are $ << 1$.
The problem for states with a vanishing eigenvalue, say $\lambda$, arises from the noncommutativity of the two limits $\varepsilon \rightarrow 0$ and $\lambda \rightarrow 0$. To see this, let us consider the trace term in the numerator of Equation \ref{eq:epsilon_case}, expressed in the eigenbasis of $\rho_A$:
\begin{equation}
\Tr_A \Big[ \rho_A^{\varepsilon} \big[\hat{A}, \rho_A\big]\hat{A}\Big] = \sum_{i,j} \lambda_j^{\varepsilon}(\lambda_i - \lambda_j)\vert a_{ij}\vert^2 = \sum_{i,j} (\lambda_j^{\varepsilon}\lambda_i - \lambda_j^{1+\varepsilon})\vert a_{ij}\vert^2.
\label{trte}
\end{equation}
The right hand side of the above equation illustrates the issue at hand. Namely, we must consider the term $\lambda^\varepsilon$, as both quantities tend to zero. However,
\begin{equation}
\lambda^\varepsilon = \begin{cases}
0, \text{ for } \lambda = 0, \varepsilon > 0\\
1, \text{ for } \lambda > 0, \varepsilon = 0
\end{cases}
\end{equation}
In Appendix \ref{numer}, we numerically illustrate the small $\varepsilon$ and $\lambda$ behavior of the trace term.
The noncommutativity of the two limits $\varepsilon \rightarrow 0$ and $\lambda \rightarrow 0$ is a kind of instability in the sense that dimensions in the Hilbert space which initially have zero probability tend to immediately become populated. An initial divergence does not imply that their corresponding probability increases to a large value, but merely that the rise possesses a large acceleration initially. We discuss this further in Appendix \ref{app:caution}.
To summarize our findings so far: when System $A$ (of Figure \ref{fig:setup}) possesses only finite eigenvalues $\{\lambda_i\}$, we obtain a well defined limit of $\ddot{H}_{1+\varepsilon}(A)\vert_{t=0}$ as $\varepsilon\rightarrow 0$, i.e., for the von Neumann entropy. Conversely, as one or more $\lambda_i\rightarrow 0$, $\ddot{H}_{1+\varepsilon}(A)$ diverges. This includes the case where all eigenvalues but one approach zero, i.e. a pure state. Hence, while the von Neumann entropy itself always approaches a finite limit when one or more eigenvalues tend to zero, its second time derivative does not necessarily do so, see Appendix \ref{app:exact_plots}.
This fact is important also because, in the setup illustrated in Figure \ref{fig:setup}, system $B$ is initially pure by design, i.e., $\rho_B$ initially possesses some vanishing eigenvalues. Consequently, the leading order change in the coherent information diverges irrespective of the state of System $A$, due to the presence of $\ddot{S}(B)$ in the expression. Indeed, there are a number of further subtleties associated with the perturbative expansion, which we discuss in Appendix \ref{app:caution}.
In the next section, we therefore introduce a generalisation of the coherent information which is stable as it does not require the taking of the limit $\epsilon\rightarrow 0$.
\section{Generalisation: $n$-coherent information, $n$-durability and $n$-exposure}\label{sec:n_geq_2}
In the previous section we demonstrated that the leading order change in the coherent information suffers a divergence
(see Figure \ref{fig:setup}), tracing back to the fact that the coherent information is conventionally defined as the difference of von Neumann entropies with the von Neumann entropy being the potentially divergent limit as $n\rightarrow 1$ of the class of $n$-R\'enyi entropies. In this section, we therefore generalise the notion of coherent information to the entire class of $n$-R\'enyi entropies. In the literature, studies of the wider class of $n$-R\'enyi entropies are greatly increasing in significance due to the usefulness of these entropies in extracting information about the entanglement spectrum of a quantum system, providing richer information than the von Neumann entropy alone \cite{Islam2015, Li2008}. We note also that work is ongoing in developing novel entanglement measures from the R\'enyi entropies
\cite{Song2016, San2010, Wang2016}.
\subsection{The $n$-coherent information}
We now define the class of $n$-coherent informations, $I_n$, based on the $n$-R\'enyi entropy, through
\begin{align}\label{eq:n_Ic}
I_{n}^d &= H_n(A') - H_n((A\tilde{A})') = H_n(A') - H_n(B') ,\\
I_{n}^c &= H_n(B') - H_n((B\tilde{A})') = H_n(B') - H_n(A'),
\end{align}
where the superscripts $d$ and $c$ refer to the direct and complementary channels, respectively. In particular, we have:
\begin{equation}
I^d_n=-I^c_n \label{sinv}
\end{equation}
To see this, recall that the two reduced density matrices of two subsystems making up a pure system possess the same nonzero eigenvalues including their multiplicities.
The $1$-coherent information is the traditional coherent information based on the von Neumann entropy.
As we will now show, for integer $n>1$ the leading order change of the $n$-coherent information remains well-defined across the whole state space of System $A$, in contrast to the $1$-coherent information, thereby serving as a regulator. Furthermore, as we will discuss, the $n$-R\'enyi entropies may themselves serve as measures of quantum correlations.
Let us recall here that the $n$-R\'enyi entropy obeys the usual axioms of entropies except for the axiom of subadditivity, except in the cases $n=0$ or $n=1$, see, for example, \cite{RevModPhys.50.221}. This raises the question as to whether the $n$-coherent information for $n\notin\{0,1\}$, besides serving as a regulator for the traditional $1$-coherent information, also serves a direct role in quantum information. Indeed, as was shown in \cite{2002quant.ph..4093V} the $n$-R\'enyi entropies for all $n$ obey an axiom of weak subadditivity:
\begin{equation}\label{eq:hayden}
H_n(\rho_A) - H_0(\rho_B) \leq H_n(\rho_{AB}) \leq H_n(\rho_A) + H_0(\rho_B).
\end{equation}
Here, $H_0 = \log(d)$, where $d$ is the Hilbert space dimension of the quantum system, is also called the max-entropy or the Hartley entropy.
This implies that our notion of $n$-coherent information obeys:
\begin{align}
H_n(\rho_A) - H_n(\rho_{AB}) \leq H_0(\rho_B)\\
H_n(\rho_A) - H_n(\rho_{AB}) \geq - H_0(\rho_B)
\end{align}
and therefore:
\begin{equation}
- H_0(\rho_B) \leq I_n^c \leq H_0(\rho_B),
\end{equation}
where $I_n^c$ is precisely our notion of the $n-$coherent information. This property of the new notion of $n$-coherent information, in the form of inequality Eq. \ref{eq:hayden}, was shown in \cite{2002quant.ph..4093V} to be crucial for proving a lower bound on quantum communication complexity, namely a lower bound on the number of qubits that two parties need to exchange to be able to turn a specified joint state into another specified joint state.
By contrast, the traditional ($1$-)coherent information provides a lower bound to the quantum channel capacity. Since the $1$-coherent information is a limiting case of the general $n$-coherent information, this motivates further exploration of the way in which the the quantum channel capacity may be viewed as a limiting case of the above quantum communication complexity. We will revisit this notion in Section \ref{sec:conclusion}.
\subsection{The $n$-durability}
To evaluate the leading order time evolution of the $n$-coherent information, let us recast Equation \ref{eq:deriv_gen} in the form
\begin{equation}\label{eq:definitions}
\ddot{H}_n(A)|_{t=0} = \frac{2n(\Delta B)^2 D_{n,A}}{(n-1)},
\end{equation}
where we defined $D_{n,A}$ as:
\begin{equation}\label{eq:durability}
D_{n,A} := -\frac{\Tr_A\Big[\rho_A^{n-1}\big[\hat{A}, \rho_A\big]\hat{A}\Big]}{\gamma_{n,A}}.
\end{equation}
We will refer to the quantity $D_{n,A}$ as the `$n$-durability', for reasons which will become clear as we progress.
First, we restrict our attention to integer $n\geq 1$ and we notice that the $n$-durability is a strictly positive quantity. To see this, consider the trace term as expressed in terms of the eigenvalues of the $\rho_A$:
\begin{equation}\label{eq:n_g_1_case}
\Tr_A \Big[ \rho_A^{n-1} \big[\hat{A}, \rho_A\big]\hat{A}\Big] = \sum_{i,j} \lambda_j^{n-1}(\lambda_i - \lambda_j)\vert a_{ij}\vert^2.
\end{equation}
Noting that $0 \leq \lambda_i\leq 1$, let us consider two eigenvalues, $\lambda_x$ and $\lambda_y$ with $\lambda_x < \lambda_y$. We will therefore have a positive contribution to the sum of the form $\lambda_x^{n-1}(\lambda_y-\lambda_x)\vert a_{xy}\vert^2$. However, we then also have a negative contribution to the sum of the form $-\lambda_y^{n-1}(\lambda_y-\lambda_x)\vert a_{xy}\vert^2$. This negative contribution always outweighs the positive contribution as $\lambda_y^{n-1} > \lambda_x^{n-1}$. Hence, the $n$-durability defined with the negative sign as in Equation \ref{eq:durability} obeys $D_{n,A}\geq 0$.
Not only is the $n$-durability a positive quantity, it also has the useful property that it reduces to the variance in the case of pure states. To see this, we note that if $\rho_A$ is pure then $\rho_A^2 = \rho_A$, and
\begin{align}\label{eq:derive_purity}
D_{n,A}= -\frac{\Tr_{A}\Big[\rho_A^{n-1}\big[\hat{A},\rho_A\big]\hat{A}\Big]}{\Tr_A[\rho_A^n]} & = -\frac{\Tr_{A}\Big[\rho_A\big[\hat{A},\rho_A\big]\hat{A}\Big]}{\Tr_A[\rho_A]} \nonumber\\
& = -\Tr_{A}\Big[\rho_A\hat{A}\rho_A\hat{A}\Big] + \Tr_A\Big[\rho_A\hat{A}^2\Big] \nonumber\\
& = -\sum_i\Big( \bra{i}\ket{\psi}\bra{\psi}\hat{A}\ket{\psi}\bra{\psi}\hat{A}\ket{i} - \bra{i}\ket{\psi}\bra{\psi}\hat{A}^2\ket{i} \Big)\nonumber\\
& = \bra{\psi}\hat{A}^2\ket{\psi} - \bra{\psi}\hat{A}\ket{\psi}^2\nonumber\\
& = (\Delta A)^2,
\end{align}
where we defined $\rho_A = \ket{\psi}\bra{\psi}$ and $\{\ket{i}\}$ is a set of orthonormal basis vectors, one of which can be chosen equal to $\ket{\psi}$. Hence:
\begin{equation}\label{eq:frag_purity}
D_{n,A} \xrightarrow{\text{purity}} (\Delta A)^2.
\end{equation}
We note here already that the variance does not constitute a bound on the $n$-durability, as we will explicitly see later.
Let us now consider the role that the $n$-durability plays in the early time evolution of the $n$-coherent information. The leading order change in the $n$-coherent information of the direct channel ($\delta I_n^d$) is given by
\begin{align}\label{eq:delta_n}
\delta I_n^d &= \frac{t^2}{2}\left(\ddot{H}_n(A)\big|_{t=0} -\ddot{H}_n(B)\big|_{t=0} \right) \nonumber\\[1em]
&=\frac{n t^2}{n-1}\Big((\Delta B)^2 D_{n,A}-(\Delta A)^2D_{n,B}\Big)\nonumber\\[1em]
&=-\frac{n t^2(\Delta B)^2}{n-1}\Big((\Delta A)^2 - D_{n,A}\Big),
\end{align}
Here, we used the fact that System B is pure at $t=0$ such that, through Equation \ref{eq:frag_purity}, $D_{n,B} = (\Delta B)^2$. By the same reasoning, we see that if system $A$ is pure, then $D_{n,A} = (\Delta A)^2$ and $\delta I^d_n =0$, as expected.\footnote{See Appendix \ref{app:caution} for a discussion of the non-trivial limit of Equation \ref{eq:delta_n} as $n\rightarrow 1$.}
\subsection{The $n$-exposure}
Equation \ref{eq:delta_n} demonstrates that it is not merely the overall amount of quantum correlations that system $A$ initially possesses with the ancilla $\tilde{A}$ which determines the rate at which the $n$-coherent information changes at the onset of interaction. Rather, the $n$-coherent information is sensitive to the amount of system $A$'s entanglement which is `exposed' to the action of interaction Hamiltonian, as determined by the difference between the variance and the $n$-durability. We will therefore refer to this difference as the `$n$-exposure', $E_{n,A}$:
\begin{equation}\label{eq:ee}
E_{n,A}:=\Big((\Delta A)^2 - D_{n,A}\Big)
\end{equation}
We see from Equation \ref{eq:frag_purity} that in the case that system $A$ is pure, $E_{n,A}$ is zero. This is of course to be expected because if system A does not possess quantum correlations with $\tilde{A}$ to begin with, then no quantum correlations can be transferred during the interaction and the $n$-coherent information should not change. Conversely, if system A possesses quantum correlations with $\tilde{A}$, the exposure is, in general, non-zero.
However, as we anticipated, the value of the $n$-exposure depends not only on the absolute extent to which System $A$ is entangled or quantum correlated with $\tilde{A}$, but also on the extent to which these quantum correlations are `accessible' or `exposed' to the operator $\hat{A}$ in the interaction Hamiltonian. For illustration, consider the following scenario:
Let system A be comprised of two subsystems, $A_1$ and $A_2$, with a bipartite density matrix of the form $\rho_{A_1}\otimes\rho_{A_2}$. Assume that operator $\hat{A}$ acts only on system $A_1$, such that we may represent the operator on the Hilbert space of $A_1$ and $A_2$ as $\hat{A}_1\otimes\hat{\mathbb{I}}_2$. Let us now evaluate the numerator and denominator of $D_{n,A}$. The numerator is given by:
\begin{align}
&\Tr_{A_1}\Tr_{A_2}\Big[\rho_{A_1}^{n-1}\otimes\rho_{A_2}^{n-1}\big[\hat{A_1}\otimes\hat{\mathbb{I}}_2, \rho_{A_1}\otimes\rho_{A_2}\big]\hat{A}_1\otimes\hat{\mathbb{I}}_2\Big]\nonumber\\
&=\Tr_{A_1}\Tr_{A_2}\Big[\rho_{A_1}^{n-1}\big[\hat{A}_1,\rho_{A_1}\big]\hat{A_1}\otimes\rho_{A_2}^n\Big]\nonumber\\
&=\Tr_{A_1}\Big[\rho_{A_1}^{n-1}\big[\hat{A}_1,\rho_{A_1}\big]\hat{A_1}\Big]\Tr_{A_2}\Big[\rho_{A_2}^n\Big],
\end{align}
while the denominator ($n$-purity) is given by:
\begin{align}
\gamma_{n,A} &= \Tr_{A_1}\Tr_{A_2}\Big[\rho_{A_1}^n\otimes\rho_{A_2}^n\Big]\nonumber\\
&=\Tr_{A_1}\Big[\rho_{A_1}^n\Big]\Tr_{A_2}\Big[\rho_{A_2}^n\Big].
\end{align}
Hence, the $\Tr_{A_2}\big[\rho_{A_2}^n\big]$ terms cancel in the numerator and denominator, and we find that
\begin{equation}
E_{n,A}=(\Delta A)^2 - D_{n,A_1},
\end{equation}
where, as it should be, the only contribution to the variance comes from system $A_1$. Hence, there even exist extreme cases in which system $A$ overall is highly entangled with system $\tilde{A}$ (from Figure \ref{fig:setup}), but where this entanglement can be entirely accounted for by subsystem $A_2$ while subsystem $A_1$ may not be entangled with $\tilde{A}$ at all. In this case, while system $A$ as a whole is highly entangled with the ancilla $\tilde{A}$, the degree to which that entanglement is `exposed' to the action of the operator $\hat{A}$ is vanishing, since operator $\hat{A}$ has non-trivial action only on subsystem $A_1$. Notice that, due to our results in section \ref{sec:prelim}, remarkably, this conclusion holds true, to second order in time, even in the presence of free Hamiltonians on system $A$, which generically includes an interaction between $A_1$ and $A_2$.
\subsection{Properties of the $n$-exposure}
The $n$-exposure determines the leading order change of the $n$-coherent information of the direct and complementary channels that arise at the onset of the interaction of systems $A$ and $B$.
Intuitively, the $n$-exposure is the extent to which pre-existing entanglement between $A$ and $\tilde{A}$ is `exposed' to the interaction Hamiltonian of $A$ and $B$. The higher the exposure, the faster the coherent informations of the direct and complementary channels change at the onset of the interaction.
We found that, in the case of interaction Hamiltonians of the form $\hat{A}\otimes\hat{B}$, the $n$-exposure is the difference between two terms: $E_{n,A}= (\Delta A)^2 -D_{n,A}$, where $(\Delta A)^2$ is the variance in the observable $\hat{A}$ and where $D_{n,A}$ is the $n$-durability of system $A$. Both are properties of system $A$ only: to calculate the $n$-exposure of system $A$ merely requires knowledge of the operator $\hat{A}$ of the interaction Hamiltonian and the initial reduced density matrix, $\rho_A$.
For initially pure states of $A$, the $n$-durability equals the variance, i.e., the $n$-exposure vanishes for pure states.
For initial states of $A$ that are mixed, the $n$-durability can be smaller than the variance. In this case, the $n$-exposure is positive, implying that the $n$-coherent information of the complementary channel rises while, due to Equation \ref{sinv}, that of the direct quantum channel drops at the onset of the interaction.
Conversely, there also exist initial mixed states of $A$ for which the $n$-durability exceeds the variance. In such cases, the $n$-exposure is negative, and the $n$-coherent information of the complementary channel drops while that of the direct channel rises at the onset of the interaction.
To further illustrate the significance of negative and positive changes in the $n$-coherent information, let us consider the example of $n=2$. For $n=2$, the $n$-R\'enyi entropy is a simple function of the purity, $\Tr[\rho^2]$,
\begin{equation}
H_2(A) = - \log(\gamma_{2,A}),
\end{equation}
where $\gamma_2$ is the purity. The purity is useful in that even for mixed bipartite states it is possible to prove that entanglement exists between the subsystems if the purity of one subsystem is lower than the purity of the bipartite system as a whole \cite{Horodecki1996, Islam2015}. In our case, if
\begin{equation}
\gamma_{2}(A) < \gamma_{2}(\tilde{A}A),
\end{equation}
or equivalently,
\begin{equation}\label{eq:ineq_2}
H_{2}(A) > H_{2}(\tilde{A}A),
\end{equation}
then entanglement must exist between subsystems $A$ and $\tilde{A}$. In fact, the same inequality applies for any $n$-R\'enyi entropy \cite{HORODECKI1996377}. This inequality is known to be a valuable tool to infer the presence of entanglement under non-ideal experimental conditions \cite{Mintert2007}. Furthermore, we note that the $2$-R\'enyi entropy provides a direct lower bound on the von Neumann entropy, while stricter bounds can be obtained using higher order R\'enyi entropies \cite{Daley2012}:
\begin{align}
H_1(A) &\geq H_2(A),\\
H_1(A) &\geq 2H_2(A) - H_3(A).
\end{align}
Hence, the $n$-coherent information behaves similarly to the traditional $1$-coherent information in the sense that positivity indicates the presence of quantum correlations. In order, for example, to prevent loss of quantum correlations to an environment system $B$,
it is advisable to avoid input states for which the $n$-coherent information decreases to leading order. This is to prevent the $n$-coherent information from becoming negative, at which point we would no longer fulfill inequality \ref{eq:ineq_2} that guarantees bipartite entanglement between $A$ and $\tilde{A}$. Meanwhile, input states which lead to an increase in the $n$-coherent information are desirable, as they provide a `safe zone' in which the inequality is increasingly fulfilled.
Therefore, while the $n$-coherent information does not constitute a direct measure of bipartite entanglement, its behavior does provide an indication of which input states of $A$ are more or less vulnerable to losing quantum correlations with $\tilde{A}$ to an environment, $B$. Indeed, in order to obtain the quantum channel capacity from the coherent information, one maximises over all possible input states of the $\tilde{A}A$ system and over multiple parallel uses of the channel. In this optimisation, some states will induce a positive change in the coherent information, while others will lead to a negative change. The preferred input state depends on whether the desired outcome is the transmission or retention of pre-existing quantum correlations between $A$ and $\tilde{A}$.
In the next section, we will apply the notion of $n$-coherent information to the light-matter interaction. We will explore how the leading order behaviour of the $n$-coherent information, as determined by the $n$-exposure, varies with the input state. We will focus on the $n=2$ case for simplicity, but note that a similar analysis can be performed straightforwardly for other $n$.
\section{Application to the light-matter interaction}\label{sec:rabi}
In practice, much of quantum communication relies upon the interaction between light and matter. In this section, we apply our results to a simplified model of the light-matter interaction, namely the quantum Rabi model. At its core, this model describes a two-level quantum system coupled to a single mode of a massless scalar field \cite{rabi1, rabi2, rabi4}. This model is of particular utility as it can be implemented through a variety of experimental techniques such as Josephson junctions \cite{rabi3}, trapped ions \cite{rabi5}, and superconductors \cite{rabi6}.
In the present context, the qubit and the field are chosen initially unentangled and in a product state $\rho_f\otimes\rho_q$. We assume the initial state of the field, $\rho_f$, to be the vacuum state $
\rho_f = \ket{0}\bra{0}
$.
We allow the initial state, $\rho_q$, of the qubit to be arbitrary. Expressed in terms of the eigenstates of the $\sigma_z$ Pauli operator it reads
\begin{equation}
\rho_q = \delta\ket{z^+}\bra{z^+} + \alpha\ket{z^+}\bra{z^-} + \alpha^*\ket{z^-}\bra{z^+} + (1-\delta)\ket{z^-}\bra{z^-},
\end{equation}
where $\sigma_z\ket{z^+} = \ket{z^+}$ and $\sigma_z\ket{z^-} = -\ket{z^-}$. We then consider the interaction of a single field mode with the qubit, governed by an interaction Hamiltonian $H_{\text{int}}$ of the form
\begin{equation}
H_{\text{int}} = \nu\sigma_z\otimes (a + a^\dagger)
\end{equation}
where $a$ and $a^\dagger$ are the annihilation and creation operators of the mode considered and where $\nu$ is the effective coupling constant. Since we will neglect the free Hamiltonians, we can set $\nu = 1$ and instead absorb the $\nu$-dependence in the $t$-dependence. We will now solve the problem non-perturbatively in order to then obtain an exact expression for the time evolution of the $n$-R\'enyi entropy, and compare its second time derivative to our Equation \ref{eq:deriv_gen}. Note that while the particularly simple form of the interaction Hamiltonian invoked here allows for a non-perturbative calculation to be performed, this is not always the case. See Appendix \ref{app:non-pert} for details.
We first express the time-evolved system as:
\begin{equation}
\rho(t) = e^{it\sigma_z\otimes(a+a^\dagger)}(\rho_q\otimes\rho_f) e^{-it\sigma_z\otimes(a+a^\dagger)}.
\end{equation}
For the density matrix of the qubit this gives, see \cite{KendallKempf} for details:
\begin{equation}\label{eq:time_ev_rho}
\rho_q(t) = \delta\ket{z^+}\bra{z^+} + \alpha e^{-2t^2}\ket{z^+}\bra{z^-} + \alpha^* e^{-2t^2}\ket{z^-}\bra{z^+} + (1-\delta)\ket{z^-}\bra{z^-}.
\end{equation}
Working in the eigenbasis of the qubit, we have:
\begin{equation}
H_n(\rho_q(t)) = \frac{1}{1-n}\log\left(\sum_i\lambda_i^n\right),\\[1em]
\end{equation}
\begin{equation}
\dot{H}_n(\rho_q(t)) = \frac{\sum\limits_i n\lambda_i^{n-1}\dot{\lambda}_i}{\sum\limits_i \lambda_i^n},
\end{equation}
\begin{equation}
\ddot{H}_n(\rho_q(t)) = \frac{1}{1-n}\left(\frac{\sum\limits_i\big(n(n-1)\lambda_i^{n-2}\dot{\lambda}_i^2 + n\lambda_i^{n-1}\ddot{\lambda}_i\big)}{\sum\limits_i \lambda_i^n} - \frac{\Big(\sum\limits_i n\lambda_i^{n-1}\dot{\lambda}_i\Big)^2}{\Big(\sum\limits_i\lambda_i^n\Big)^2}\right).
\end{equation}
Let us therefore consider the eigenvalues of $\rho_q(t)$. Diagonalising Equation \ref{eq:time_ev_rho}, we obtain:
\begin{equation}
\lambda^{\pm}(t) = \frac{1\pm\sqrt{1-4(\delta - \delta^2 - \vert\alpha\vert^2 e^{-4t^2})}}{2},
\end{equation}
such that
\begin{equation}
\dot{\lambda}^{\pm}(t) = \mp\frac{8\vert\alpha\vert^2 t e^{-4t^2}}{\sqrt{1-4(\delta - \delta^2 - \vert\alpha\vert^2 e^{-4t^2})}},
\end{equation}
and
\begin{equation}
\ddot{\lambda}^{\pm}(t) = \mp\left(\frac{8\vert\alpha\vert^2e^{-4t^2}\big(1 -8t^2\big)}{\sqrt{1-4(\delta - \delta^2 - \vert\alpha\vert^2 e^{-4t^2})}}+\frac{128\vert\alpha\vert^4 t^2 e^{-8t^2}}{\big(1-4(\delta - \delta^2 - \vert\alpha\vert^2 e^{-4t^2})\big)^{3/2}}\right).
\end{equation}
From here, we readily see that $\dot{\lambda}^{\pm}\vert_{t=0} = 0$, such that
\begin{equation}
\ddot{H}_n(\rho_q(t=0)) = \frac{n\sum\limits_i \lambda_i^{n-1}\ddot{\lambda}_i}{(1-n)\sum\limits_i\lambda_i^n}\Bigg\vert_{t=0},
\end{equation}
where
\begin{equation}
\ddot{\lambda}^{\pm}\big\vert_{t=0} = \mp \frac{8\vert\alpha\vert^2}{\sqrt{1-4(\delta - \delta^2 - \vert\alpha\vert^2)}}
\end{equation}
Denoting the $t=0$ eigenvalues as $\lambda^{\pm}$, we finally obtain:
\begin{equation}\label{eq:qubit_result}
\ddot{H}_n(\rho_q(t=0)) = \frac{-8n\vert\alpha\vert^2\big({\lambda^{-}}^{n-1} - {\lambda^{+}}^{n-1}\big)}{\big(n-1\big)\big(\lambda^+ - \lambda^-\big)\big({\lambda^{-}}^n + {\lambda^{+}}^n\big)}
\end{equation}
Let us now compare this result to Equation \ref{eq:deriv_gen}. In Equation \ref{eq:deriv_gen}, all quantities are evaluated at $t=0$. Therefore, we simply work with the initial qubit state $\rho_q$. In order to then evaluate Equation \ref{eq:deriv_gen}, we re-express the system in the eigenbasis of $\rho_q$. We have:
\begin{equation}
\lambda^{\pm} = \frac{1\pm\sqrt{1-4(\delta -\delta^2-\vert\alpha\vert^2)}}{2},
\end{equation}
\begin{equation}
\ket{\lambda^+} = \frac{1}{\sqrt{1+\vert x\vert^2}}
\Big(\ket{z^+} + x\ket{z^-}\Big), \qquad
\ket{\lambda^-} = \frac{1}{\sqrt{1+\vert y\vert^2}}
\Big(\ket{z^+} + y\ket{z^-}\Big),
\end{equation}
where
\begin{equation}
x := \frac{-(\delta - \lambda^+)}{\alpha}, \qquad y := \frac{1-(\delta + \lambda^+)}{\alpha}.
\end{equation}
We may now re-express $\sigma_z = \ket{z^+}\bra{z^+} - \ket{z^-}\bra{z^-}$ in terms of the eigenbasis of $\rho_q$. We have:
\begin{align}
\sigma_z = \frac{1}{\vert x-y\vert^2}\bigg(& (1+\vert x\vert^2)(\vert y\vert^2 -1)\ket{\lambda^+}\bra{\lambda^+}\nonumber\\
&+\sqrt{(1+\vert x\vert^2)(1+\vert y\vert^2)}(1-yx^*)\ket{\lambda^+}\bra{\lambda^-}\nonumber\\
&+\sqrt{(1+\vert x\vert^2)(1+\vert y\vert^2)}(1-xy^*)\ket{\lambda^-}\bra{\lambda^+}\nonumber\\
&+(1+\vert y\vert^2)(\vert x\vert^2 -1))\ket{\lambda^-}\bra{\lambda^-}\bigg).
\end{align}
Working in this eigenbasis, we have:
\begin{align}
\Tr_q\Big[\rho_q^{n-1}[\sigma_z,\rho_q]\sigma_z\Big] &= \sum_{i,j}\lambda_j^{n-1}(\lambda_i-\lambda_j)\vert{\sigma_z}_{ij}\vert^2\nonumber\\
&=\big({\lambda^-}^{n-1}-{\lambda^+}^{n-1}\big)\big(\lambda^+ - \lambda^-\big)\vert {\sigma_z}_{+-}\vert^2\nonumber\\
&=\frac{4\vert\alpha\vert^2\big({\lambda^-}^{n-1}-{\lambda^+}^{n-1}\big)}{\big(\lambda^+ - \lambda^-\big)},
\end{align}
where the characteristic equation, ${\lambda^+}^2 - \lambda^+ + \delta - \delta ^2 - \vert\alpha\vert^2 =0$, is used to convert the $\sigma_z$ components into expressions in terms of $\lambda^{\pm}$. Taking into account that the field variance in this case is $1$, and substituting the above result into Equation \ref{eq:deriv_gen}, we again arrive at Equation \ref{eq:qubit_result} for $\ddot{H}_n(\rho_q(t=0))$, which indeed confirms the validity of the perturbative approach we have employed in the previous sections.
Using these non-perturbatively verified results, we can assess how the $n$-exposure depends upon the precise configuration of the qubit. We have:
\begin{equation}
E_{n,q} = (\Delta \sigma_z)^2 - D_{n, q} = 4(\delta - \delta^2) +\frac{4\vert\alpha\vert^2\left({\lambda^-}^{n-1} - {\lambda^+}^{n-1}\right)}{\left(\lambda^+-\lambda^-\right)\left({\lambda^-}^{n} + {\lambda^+}^{n}\right)}.
\end{equation}
From this expression, we see that the $n$-exposure does not depend on the phase of $\alpha$. Indeed, the $n$-exposure can be expressed in terms of $\delta$ and $\vert\alpha\vert^2$ alone.
As discussed in Section \ref{sec:n_1}, the leading order behaviour of the $n\rightarrow 1$ coherent information is non-trivial. This is due to the divergence in the second time derivative of the von Neumann entropy of the environment system.\footnote{Note that the divergent second derivative does not imply a divergence in the von Neumann entropy itself - see Appendix \ref{app:exact_plots}.} Hence, we will restrict our attention to $n>1$ and choose the simplest case ($n=2$) to illustrate the features of the $n$-exposure. In Figure \ref{fig:non-bloch}, we present a contour plot of the 2-exposure as a function of these two variables.\footnote{Note that the positivity of the eigenvalues requires $\vert\alpha\vert^2 \leq \delta - \delta ^2$.} We also include isocurves of the 2-R\'enyi entropy. Crucially, we see that the isocurves of the exposure follow different trajectories to the isocurves of the entropy through the $(\delta,\vert\alpha\vert^2)$ plane. This illustrates that the leading order change in the 2-coherent information is not simply a function of the total amount of entanglement present, but depends upon the precise configuration of the qubit. The right hand side of Figure \ref{fig:non-bloch} highlights this phenomenon by plotting the variation in the 2-exposure along isocurves of constant 2-R\'enyi entropy.
\begin{figure}
\caption{Left: contour plot of the 2-exposure of the qubit across the $(\delta,\vert\alpha\vert^2)$ plane (green). Also included are contours of the 2-R\'enyi entropy, which do not align with the 2-exposure contours. Right: variation in the 2-exposure as a function of $\delta$ for constant values of 2-R\'enyi entropy.}
\label{fig:non-bloch}
\end{figure}
One may also consider the $n$-exposure of the qubit in the Bloch sphere representation. We first set:
\begin{equation}
\rho_q = \frac{1}{2}(\mathbb{I} + a_x\sigma_x +a_y\sigma_y +a_z\sigma_z),
\end{equation}
where the Bloch vector is $\Vec{a}=(a_x,a_y,a_z)$. This then leads to the following transformation:
\begin{equation}
\delta = \frac{1+a_z}{2}, \qquad \vert\alpha\vert^2 = \frac{\vert a_x\vert^2 + \vert a_y\vert^2}{4}.
\end{equation}
We may use these expressions to convert Figure \ref{fig:non-bloch} into the Bloch sphere representation. We note that in this representation, the exposure is independent of the phase in the $(a_x, a_y)$ plane, so that it is sufficient to plot only a cross section in the $(a_x, a_z)$ plane. We illustrate the 2-exposure in the Bloch sphere representation in Figure \ref{fig:bloch}. Again, we also plot isocurves of the 2-R\'enyi entropy to illustrate that the exposure is a non-trivial function of the distribution of the entanglement in the state space of the qubit.
In particular, we note that in order to minimise exposure at the onset of an interaction, we should tune the distribution of the entanglement in the initial system. In this example we see from Figure \ref{fig:bloch} that for the same initial 2-R\'enyi entropy, the 2-exposure is minimised along the $a_x$ axis, while it is maximised along the $a_z$ axis. This indicates the optimal state of the qubit in the Bloch sphere representation in order to minimise the leading order change in the second R\'enyi entropy. The same analysis can be applied to more complex systems in order to identify the ideal initial configuration for the minimisation or maximisation of the exposure.
\begin{figure}
\caption{2-exposure of the qubit in the Bloch sphere representation. Again we see that the isocurves of the 2-R\'enyi entropy are disaligned with those of the exposure, indicating that it is not simply the amount of entanglement, but the way in which it is distributed that determines the exposure. In this case, if avoidance of transmission is desired then the equatorial direction is `safer' than the polar direction.}
\label{fig:bloch}
\end{figure}
Figures \ref{fig:non-bloch} and \ref{fig:bloch} illustrate that in the qubit case, the 2-exposure is always positive. This is due to the simplicity of the qubit state space. In the next section, however, we will consider a qutrit system, for which the higher dimensionality leads to more complex results.
\section{Application to qutrit systems}\label{sec:qutrit}
In this section, we provide a visualisation of the 2-exposure for a simple qutrit system. As we augment the qutrit density matrix, we will see how the exposure can vary in non-trivial ways, and in particular, that it can change sign.
To this end, let us first consider the general form of the qutrit density matrix:
\begin{equation}
\rho =
\begin{pmatrix}
\omega_{x} & \frac{-ia_z - q_z}{2} & \frac{-ia_y - q_y}{2} \\
\frac{ia_z - q_z}{2} & \omega_{y} & \frac{-ia_x - q_x}{2} \\
\frac{ia_y - q_y}{2} & \frac{ia_x - q_x}{2} & \omega_{z}
\end{pmatrix}.
\end{equation}
While this generic qutrit state possesses eight degrees of freedom, we may restrict our attention to the case where all off-diagonal matrices are purely imaginary ($q_j = 0$). We then have:
\begin{equation}
\rho =
\begin{pmatrix}
\omega_{x} & \frac{-ia_z}{2} & \frac{-ia_y}{2} \\
\frac{ia_z}{2} & \omega_{y} & \frac{-ia_x}{2} \\
\frac{ia_y}{2} & \frac{ia_x}{2} & \omega_{z}
\end{pmatrix},\\[1em]
\end{equation}
where $0\leq a_j \leq 1$, $0\leq \omega_j \leq 1$, and $\sum_j \omega_j = 1$. The non-negativity of this simplified density matrix is ensured through the following conditions \cite{Kurzynski2016}:
\begin{equation}\label{eq:cond_1}
4\omega_j\omega_k \geq a_l^2,
\end{equation}
\begin{equation}\label{eq:cond_2}
4\omega_j\omega_k\omega_l \geq \omega_j a_j^2 + \omega_k a_k^2 + \omega_l a_l^2.
\end{equation}
From here, we can simplify further by enforcing $\omega_j = \omega_k = \omega_l = 1/3$. In this case, conditions \ref{eq:cond_1} and \ref{eq:cond_2} can be re-written as a single condition, namely
\begin{equation}
\frac{4}{9} \geq a_x^2 + a_y^2 + a_z^2,
\end{equation}
which describes a space of $a_x, a_y$, and $a_z$ values enclosed by an octant of a sphere of radius $2/3$. Given a choice of an operator acting on the qutrit system, we may then examine the variation of the exposure as we move through this space of states.
The unitary dynamics of a qutrit can be described through a set of three different types of transformations \cite{Kurzynski2016}. Namely, rotations, single-axis twisting, and dual-axes counter-twisting. We can build the full space of Hamiltonians from the following three matrices:
\begin{equation}
S_x =
\begin{pmatrix}
0 & 0 & 0 \\
0 & 0 & -i \\
0 & i & 0
\end{pmatrix}, \qquad
S_y =
\begin{pmatrix}
0 & 0 & i \\
0 & 0 & 0 \\
-i & 0 & 0
\end{pmatrix}, \qquad
S_z =
\begin{pmatrix}
0 & -i & 0 \\
i & 0 & 0 \\
0 & 0 & 0
\end{pmatrix}.
\end{equation}
We may now calculate the 2-exposure for different qutrit configurations and different Hamiltonians according to Equation \ref{eq:ee}. Since our space of possible states is a three-dimensional octant of a sphere, we represent the results as cross-sections in the $(a_x, a_y)$ plane. In Figure \ref{fig:qutrits} we illustrate the exposure at the $a_z=0.5$ plane for two arbitrary Hamiltonians. We also include isocurves of constant 2-R\'enyi entropy, demonstrating that the $n$-exposure is not simply dependent upon the total amount of entanglement present, but rather on the precise distribution of this entanglement. For each example Hamiltonian we also see that, depending upon the precise configuration of the input state, the 2-exposure can be either positive or negative, indicating, e.g., in the case where $B$ is an environment, `safe' and `unsafe' configurations, respectively.
\begin{figure}
\caption{Left: 2-exposure across the $a_x,a_y$ plane at $a_z=0.5$ for the Hamiltonian $S_x^2$. Right: 2-exposure for the Hamiltonian $S_yS_z+S_zS_y$ across the $a_x,a_y$ plane at $a_z=0.0$. In this case we see regions of negative exposure. Also plotted are the 2-R\'enyi entropy isocurves.}
\label{fig:qutrits}
\end{figure}
\section{Conclusions and Outlook}\label{sec:conclusion}
In this work, we have considered a setup where a system $A$ is initially entangled with and purified by an ancilla $\tilde{A}$. System $A$ then starts to interact with a system $B$ and may, therefore, become entangled with $B$. Through the interaction, $A$ may lose some of its quantum correlations with $\tilde{A}$ and $B$ may acquire quantum correlations with $\tilde{A}$. We identified the maps of reduced density matrices, from $A$ to $A'$ and from $A$ to $B'$ (primes denoting a later time) the direct and the complementary channel respectively.
Because the quantum capacity of a quantum channel is defined by optimization of the coherent information over input states and parallel channel uses, we have calculated the leading order change in coherent information of each channel. This, in turn, poses a lower bound on the quantum channel capacity.
In quantum technologies, depending on the application, it can be desirable to
maximize the direct channel, in order to protect existing quantum correlations from leakage to an environment $B$. Conversely, it may be desirable to maximize the complementary channel for purposes of quantum communication from $A$ to $B$. It is therefore of interest to determine the factors which govern the leading order change in coherent information at the onset of a quantum channel, so that quantum systems may be initialised in a configuration most favourable to the application.
To calculate the leading order behaviour of the coherent information, we first showed that the free Hamiltonians do not contribute to leading (second) order. This dramatically simplifies the calculations, as the interaction Hamiltonian may be considered alone. Moreover, this has the important implication that resonance phenomena, which require free Hamiltonians, do not contribute to leading order. This is of particular significance in systems which make use of the light-matter interaction, in which resonance phenomena are commonly encountered. The significance of this finding will be investigated further in future work.
We then found that the leading order term in the perturbative expansion of the coherent information diverges when system $B$ is initially pure. We therefore generalized the notion of coherent information to the family of $n$-coherent information, via the $n$-R\'enyi entropies. The $n$-coherent information is always regular for $n>1$.
Calculation of the dynamics of the $n$-coherent information, and therefore of the $n$-R\'enyi entropies, is desirable because it implicitly also determines the dynamics of the entire spectrum of the underlying density matrix. This is because the family of $n$-R\'enyi entropies (unlike the von Neumann entropy alone) is sufficient to reconstruct the spectrum of the density matrix.
A further, useful property of the $n$-coherent information arises from the fact that the $n$-R\'enyi entropies of two subsystems that together constitute a pure supersystem are identical, as is the case also for the von Neumann entropy. Therefore, and because $H_n(\tilde{A})^{''}=0$, calculating the leading order $n$-R\'enyi entropies and $n$-coherent informations of the direct and complementary channels implies also knowledge of the leading order of all other coherent informations that can be formed among the three subsystems $A,\tilde{A},B$. For example, when $(H_n(A)-H_n(AB))$ becomes positive, this indicates that quantum correlations between $A$ and $B$ are becoming established, and we have $(H_n(A)-H_n(AB))^{''}=H_n(A)^{''}$ since $H_n(AB)^{''} = H_n(\tilde{A})^{''}=0$.
As it is known that the traditional (1-)coherent information serves as a lower bound on the quantum channel capacity, it is interesting to ask whether the $n-$coherent information performs an analogous role within quantum information. Indeed, through weak subadditivity, we find that the $n-$coherent information plays an important role in determining bounds on quantum communication complexity \cite{2002quant.ph..4093V}. Whereas the quantum channel capacity represents the highest rate at which quantum information may be transferred over a noisy quantum channel, the quantum communication complexity represents the minimum number of qubits which must be transferred in order to perform a particular quantum transformation. In future work, we will explore the connection between these bounds on quantum communication in greater detail.
We then explicitly calculated the leading order time evolution of the $n-$coherent information of the direct and complementary channels, for the case of interaction Hamiltonians of the form $\hat{A}\otimes \hat{B}$ and showed that the leading order is determined by a quantity that we call the $n$-exposure. Intuitively, the $n$-exposure quantifies how much of the pre-existing quantum correlations in system $A\tilde{A}$ are exposed to the interaction Hamiltonian between systems $A$ and $B$. The larger the exposure, the faster does the coherent information of the direct and complementary channel change. Importantly, we have shown that the $n-$exposure is not proportional to the amount of pre-existing entanglement (as measured by the von Neumann entropy), nor is it proportional to the $n-$R\'enyi entropy. Instead, it depends non-trivially on the initial state of $A$, such that two initial configurations with the same initial entanglement may exhibit entirely different behaviours at the onset of the quantum channel. We illustrated how the $n$-exposure dependends on the initial state of $A$, and on the interaction Hamiltonian, in a simple model of the light-matter interaction and in qutrit systems.
In future work, it will be very interesting to calculate the analog of the $n$-exposure for interaction Hamiltonians of the more general form $\sum_r \hat{A}_r\otimes\hat{B}_r$. Interestingly, in this case the contributions to the leading order change in the $n$-coherent information of the direct and complementary channels do not simply factorize into separate contributions from systems $A$ and $B$. It will then also be interesting to revisit the limit $n\rightarrow 1$ (see also Appendix \ref{app:caution}).
In practice, for example in the running of quantum processors, it may be possible to use the notion of exposure to optimize their performance, for example, by extremizing the exposure to maximize a desired transfer of quantum correlations, or conversely, to minimize exposure in order to reduce leakage of quantum correlations to an environment.
We also anticipate that the notion of $n-$exposure may prove useful in the implementation of quantum algorithms when certain qubits are idle. In particular, one may rotate a qubit in its Bloch sphere along an isocurve of $n$-R\'enyi entropy, thus preserving its $n$-coherent information with the rest of the qubits, but to a location in the Bloch sphere that decreases its exposure to decohering. This principle should also be extendable to multiple qubits at a time, possibly utilizing supervised machine learning with a cost function that contains the exposure. Work in this direction is in progress.
In this work we have focused on the dynamics of the coherent information,
which we note is not itself an entanglement monotone. Indeed, given that our study involves a tripartite system, traditional measures of entanglement such as the von Neumann entropy are insufficient to describe true tripartite entanglement. Hence, work is ongoing to extend our results using the logarithmic negativity as an entanglement monotone of tripartite systems. Nevertheless, positivity of the coherent information does imply the presence of quantum correlations, and coherent information remains a critical tool in the assessment of quantum channel capacity.
$$$$
\bf Acknowledgements. \rm AK acknowledges support through a Discovery Grant of the National Science and Engineering Council of Canada (NSERC), a Discovery Project grant of the Australian Research Council (ARC), and a Google Faculty Research Award.
B\v{S} is supported in part by the Perimeter Institute, which is supported in part by the Government of Canada through the Department of Innovation, Science
and Economic Development Canada and by the Province of Ontario through the Ministry of Economic Development, Job Creation and
Trade.
\section*{References}
\appendices
\section{Non-perturbative calculation}\label{app:non-pert}
While we have taken a perturbative approach throughout this work, we illustrate here the exact calculation of the time-evolved state of System $A$. We find that the exact form of $\rho_A$ is not easily raised to the $n^\text{th}$ power in general, as is required for the calculation of the $n$-R\'enyi entropies. While there are particularly simple interaction Hamiltonians such as that invoked in Section \ref{sec:rabi} which enable exact calculations to be performed, this is not often the case.
This highlights the utility of our perturbative method. Let us first express the time-evolved state of system $A$ as:
\begin{align}
\rho_{A}(t) &= \Tr_{B}\Big[e^{it\hat{A}\otimes\hat{B}}\rho_A\otimes\rho_B e^{-it\hat{A}\otimes\hat{B}}\Big]\nonumber\\[1em]
&= \sum_r \bra{b_r}e^{it\hat{A}\otimes\hat{B}}\rho_A\otimes\rho_B e^{-it\hat{A}\otimes\hat{B}}\ket{b_r}\nonumber\\[1em]
&= \sum_r e^{it\hat{A}b_r}\rho_Ae^{-it\hat{A}b_r}\rho_{Brr},
\end{align}
where, working in the eigenbasis of $\hat{B}$:
\begin{equation}
\rho_{Bij} = \bra{b_i}\rho_B\ket{b_j}.
\end{equation}
Representing $\rho_A$ in the eigenbasis of $\hat{A}$:
\begin{equation}
\rho_A = \sum_{ij}\rho_{Aij}\ket{a_i}\bra{a_j},
\end{equation}
we have:
\begin{equation}
\rho_{A}(t) = \sum_{ijk}\rho_{Aij}\rho_{Bkk}e^{itb_k(a_i-a_j)}\ket{a_i}\bra{a_j},
\end{equation}
Such that
\begin{equation}
\gamma_{A,n}(t)=\Tr_A[\rho_A(t)^n],
\end{equation}
where $\rho_A(t)$ is a matrix whose matrix element $\rho_A(t)_{ij}$ is given by
\begin{equation}
\rho_A(t)_{ij} = \rho_{Aij}\sum_k e^{itb_k(a_i-a_j)}\rho_{Bkk}.
\end{equation}
\section{Qubit von Neumann entropy in quantum Rabi model}\label{app:exact_plots}
In fig. \ref{fig:time_ev}. we plot the exact time evolution of the von Neumann entropy of a qubit system undergoing the quantum Rabi interaction as described in Section \ref{sec:rabi}. We illustrate that, while the second time derivative diverges for a pure input state, the von Neumann entropy itself remains finite. Nevertheless, the divergence of the second time derivative renders the second order perturbative analysis of the (1-)coherent information unsuitable as a measure of entropy transfer.
\begin{figure}
\caption{Left: the exact time evolution of the von Neumann entropy of the qubit under the quantum Rabi interaction. The $\vert\alpha\vert^2 = 0.25$ case corresponds to a pure input state. Right: the second time derivative of the von Neumann entropy of the qubit for the quantum Rabi interaction. While the second derivative is infinite at $t=0$ for a pure state, this quickly falls off to a finite (indeed negative) value.}
\label{fig:time_ev}
\end{figure}
\section{Numerical illustration of the noncommutativity of the limits $\epsilon \rightarrow 0$ and $\lambda \rightarrow 0$.}
\label{numer}
We expect that for finite eigenvalues in the small $\varepsilon$ limit the trace term in Equation (\ref{trte}) reduces to:
\begin{equation}\label{eq:zero_lim}
\lim_{\varepsilon\rightarrow 0}\sum_{i,j} (\lambda_j^{\varepsilon}\lambda_i - \lambda_j^{1+\varepsilon})\vert a_{ij}\vert^2=\sum_{i,j} (\lambda_i - \lambda_j)\vert a_{ij}\vert^2 = 0,
\end{equation}
however, numerical analysis is required to determine the range of eigenvalues for which this vanishing trace term is small enough to overcome the $\varepsilon$ in the denominator of Equation \ref{eq:epsilon_case}.
Because the values of $a_{ij}$ are operator-dependent, let us illustrate the principle by simply setting $\vert a_{ij}\vert = 1$ for all $i,j$ and computing the following:
\begin{equation}\label{eq:to_plot_num_0}
\sum\limits_{i,j} \lambda_j^{\varepsilon}(\lambda_i - \lambda_j),
\end{equation}
which we refer to as the trace term. We also compute the regularised version, incorporating the $\varepsilon$ in the denominator:
\begin{equation}\label{eq:to_plot_num}
\frac{(1+\varepsilon)\sum\limits_{i,j} \lambda_j^{\varepsilon}(\lambda_i - \lambda_j)}{\varepsilon}.
\end{equation}
We illustrate the results of these computations for a qutrit system in Figure \ref{fig:n_1_numerical}.\footnote{We choose a qutrit rather than a qubit because in the latter case a single vanishing eigenvalue represents a pure state. Conversely, a qutrit system may have one vanishing eigenvalue without being pure. Hence, the qutrit system is the more general case.} We set $\lambda_0 = 0.5$, such that $0\leq\lambda_1\leq 0.5$ and $\lambda_2 = 1 - \lambda_0 - \lambda_1$. We first plot the trace term, Equation \ref{eq:to_plot_num_0}, on its own to illustrate that this tends to zero as $\varepsilon\rightarrow 0$, and then plot Equation \ref{eq:to_plot_num} to illustrate the range of eigenvalues for which the $\varepsilon$ in the denominator out-competes the vanishing trace term.
\begin{figure}
\caption{Left: plot of the sum \ref{eq:to_plot_num_0}
\label{fig:n_1_numerical}
\end{figure}
On the left of Figure \ref{fig:n_1_numerical}, we indeed see that when $\varepsilon$ is small, Equation \ref{eq:to_plot_num_0} $\approx 0$ for most values of $\lambda$. However, in small $\lambda$ limit this trace term demonstrates an abrupt change in value. This abrupt change, however, is not representative of a physical change in the second time derivative. Rather, the physical behaviour is represented on the right hand side, where crucially we include the $\varepsilon$ in the denominator. The right hand side of Fig \ref{fig:n_1_numerical} thus illustrates that for relatively large eigenvalues, the magnitude of the leading order change in the von Neumann entropy is small, while in the case of one or more vanishing eigenvalues, the leading order change diverges. A further example of this behaviour is given in Appendix \ref{app:extra_plot}, where a more general form of the interaction Hamiltonian $\hat{A}$ is used.
\section{Supplementary example: leading order change in von Neumann entropy}\label{app:extra_plot}
While our second order calculations may not provide clarity in terms of the ($n\rightarrow 1$) coherent information, we have demonstrated numerically that they are useful in quantifying the leading order rate of change in the von Neumann entropy of a single subsystem at the onset of an interaction. We have illustrated this numerically in Figure \ref{fig:n_1_numerical}, and provide in Figure \ref{fig:n_1_numerical_2} a further numerical example. In this latter case, we choose an interaction Hamiltonian for which $\vert a_{ij}\vert \neq 1$ for all $i,j$. Instead, we choose an arbitrary Hermitian operator as the interaction Hamiltonian, $\hat{A}_{\text{test}}$, which acts on a qutrit system with eigenvalues $\lambda_0 = 0.5$, $0\leq\lambda_1\leq 0.5$, and $\lambda_2 = 1 - \lambda_0 - \lambda_1$. The interaction Hamiltonian $\hat{A}_{\text{test}}$ is:
\begin{equation}
\hat{A}_{\text{test}} = \begin{pmatrix}
0.2 & 0.1 & 0.5 \\
0.1 & 0.3 & 0.5 \\
0.5 & 0.5 & 0.5
\end{pmatrix},
\end{equation}
and the corresponding plots of
\begin{equation}\label{eq:trace_unreg}
\Tr_A\Big[\rho_A^\varepsilon\big[\hat{A}_{\text{test}}, \rho_A\big] \hat{A}_{\text{test}}\Big],
\end{equation}
and
\begin{equation}\label{eq:reg_trace}
\frac{1+\varepsilon}{\varepsilon}\Tr_A\Big[\rho_A^\varepsilon\big[\hat{A}_{\text{test}}, \rho_A\big] \hat{A}_{\text{test}}\Big],
\end{equation}
are illustrated on the left and right sides of Figure \ref{fig:n_1_numerical_2}, respectively. These quantities are related to $\ddot{H}_{1+\varepsilon}(A)\vert_{t=0}$ according to Equation \ref{eq:epsilon_case} in Section \ref{sec:n_1}. Importantly, we see here that non-trivial asymmetry can exist in the distribution of Equation \ref{eq:reg_trace} across the space of possible states of System $A$, illustrating the way in which the leading order change in the von Neumann entropy of this system at the onset of interaction is sensitive to the precise configuration of the initial input state.
\begin{figure}
\caption{Left: Plot of Equation \ref{eq:trace_unreg}
\label{fig:n_1_numerical_2}
\end{figure}
\section{Further discussion of the \texorpdfstring{$n\rightarrow 1$ ($\varepsilon \rightarrow 0$)} \text{ case}}\label{app:caution}
In Section \ref{sec:n_1}, we attempted to quantify the leading (second) order change in the coherent information at the onset of a quantum channel, as illustrated in Figure \ref{fig:setup}. We found, however, that the second order change in the von Neumann entropy diverges in the case of pure states. As a consequence, the leading order change in the coherent information, given by $\ddot{S}(A)-\ddot{S}(B)$, will itself always diverge, since system $B$ is pure by definition.
One may naturally ask whether it is possible to quantify how the speed of the divergence depends upon the state of System $A$. However, we demonstrate here that a number of issues arise when we attempt to answer this question.
Let us revisit the expression for the leading order change in the $n$-coherent information as $n\rightarrow 1$ (or $\varepsilon \rightarrow 0$ where $n=1+\varepsilon$). This of course corresponds to the conventional definition of the coherent information, which utilises the von Neumann entropy. Representing the $n^{th}$ R\'enyi entropy as $H_n$, we have:
\begin{align}
\delta I_{1+\varepsilon}^d &= \frac{t^2}{2}\left(\ddot{H}_{1+\varepsilon}(A)\big|_{t=0} -\ddot{H}_{1+\varepsilon}(B)\big|_{t=0} \right), \\[1em]
&=\frac{(1+\varepsilon) t^2}{-\varepsilon}\Big((\Delta B)^2 D_{1+\varepsilon,A}-(\Delta A)^2D_{1+\varepsilon,B}\Big).\label{eq:caution}
\end{align}
Because System $B$ is pure by design, $D_{1+\varepsilon, B} = (\Delta B)^2$. Hence, we may factor out the $(\Delta B)^2$ to obtain:
\begin{equation}
\delta I_{1+\varepsilon}^d=\frac{(1+\varepsilon) t^2(\Delta B)^2}{\varepsilon}\Big((\Delta A)^2 - D_{1+\varepsilon,A}\Big),
\end{equation}
From here, we may attempt to characterise the speed of the divergence by neglecting the prefactor of $t^2(\Delta B)^2(1+\varepsilon)/\varepsilon$, which is independent of System $A$, and simply consider the magnitude of the term $(\Delta A)^2 - D_{1+\varepsilon,A}$. However, we know from Section \ref{sec:n_1} that for $\varepsilon\rightarrow 0$, $D_{1+\varepsilon, A}$ is approximately zero across the whole state space of System $A$, except where one or more eigenvalues vanish. Consequently, we expect $(\Delta A)^2 - D_{1+\varepsilon,A}\approx (\Delta A)^2$ except at the positions in state space corresponding to vanishing eigenvalues. Much like the left side of Figure \ref{fig:n_1_numerical}, the distribution of this factor would therefore exhibit a sharp jump, approaching a discontinuity as $\varepsilon \rightarrow 0$. Such behaviour cannot be representative of a physical quantity, as this would suggest that states which were arbitrarily close to purity would exhibit vastly different behaviour to states which were exactly pure. Indeed, we showed in Section \ref{sec:n_1} that the $1/\varepsilon$ prefactor cannot be neglected if we want to obtain a smooth distribution across state space. However, if we are to incorporate this prefactor into the difference term $(\Delta A)^2 - D_{1+\varepsilon, A}$, we simply obtain a divergence as $\varepsilon\rightarrow 0$ because of the finite value of the $(\Delta A)^2$ term. Hence, we conclude that this approach is unsuitable for characterising the speed of the divergence in $\delta I^d_{1+\varepsilon}$.
One might also consider the possibility that System $B$ is not completely pure. In such a case, we could not factor out $(\Delta B)^2$, and we must instead consider the difference $(\Delta B)^2 D_{1+\varepsilon,A}-(\Delta A)^2D_{1+\varepsilon,B}$. If we were to assume that system $B$ was not completely pure, both $D_{1+\varepsilon,A}$ and $D_{1+\varepsilon,B}$ approach zero, and there appears to be a competition between the two terms which could be regularised by the factor of $1/\varepsilon$, similar to the right hand of Figure \ref{fig:n_1_numerical}. However, it is important to note that in the derivation of Equation \ref{eq:caution} we required that the tripartite system $A\tilde{A}B$ be pure. Furthermore, we required that systems $A$ and $B$ are initially unentangled such that $\rho_{AB} = \rho_A\otimes\rho_B$. Hence, we cannot simply assume that system $B$ is impure while ensuring the validity of Equation \ref{eq:caution}.
One approach, however, may be to decompose system $B$ into two subsystems $B_1$ and $B_2$, such that $B$ as a whole is pure, but the individual subsystems are not. One may then restrict the interaction Hamiltonian such that it acts trivially on one subsystem, i.e. $\hat{B} = \hat{B}_1\otimes\mathbb{I}_2$. However, this constitutes a different and more complicated physical scenario to that which we have been considering in this work. Indeed, if $B_1$ and $B_2$ are entangled, we have:
\begin{equation}
\rho_B = \sum_{i,j,k,l} a_{ijkl}\ket{i}\bra{j}_1\otimes\ket{k}\bra{l}_2 := \sum_{i,j,k,l} a_{ijkl}\sigma_1{}_{ij}\otimes \sigma_2{}_{kl}.
\end{equation}
Because this is not a product state, we cannot simply exchange $B$ for $B_1$ in the durability expression. Instead we have:
\begin{align}
&\Tr_B\Big[\rho_B^{n-1}\big[\hat{B},\rho_B\big]\hat{B}\Big] =\\
&\Tr_{1,2}\Bigg[\Big(\sum_{pqrs} a_{pqrs}\sigma_1{}_{pq}\otimes\sigma_2{}_{rs}\Big)^{n-1}\Big(\sum_{ijkl} a_{ijkl}\Big[\hat{B}_1,\sigma_1{}_{ij}\Big]\hat{B}_1\otimes\sigma_2{}_{kl}\Big)\Bigg].
\end{align}
This represents an interesting problem, as it may be more plausible to consider that System $B$ is not initially completely pure. However, quantifying this slight impurity for the purpose of computing the difference term is non-trivial, and we leave further exploration of this to future work.
We anticipate that incorporating higher perturbative orders would be useful in regularising the divergences of the $n\rightarrow 1$ case. In particlar, it is likely that vanishing higher derivatives may suppress the magnitude of the entanglement transferred to the environment, even as the second derivative appears to diverge. However, we note that while the free Hamiltonians can be neglected to second perturbative order, they would need to be incorporated at higher orders. This would then significantly increase the complexity of the computations. Hence, we relegate such an approach to future work.
\end{document}
|
\begin{document}
\title{Resource Saving via Ensemble Techniques for Quantum Neural Networks}
\begin{abstract}
Quantum neural networks hold significant promise for numerous applications, particularly as they can be executed on the current generation of quantum hardware. However, due to limited qubits or hardware noise, conducting large-scale experiments often requires significant resources. Moreover, the output of the model is susceptible to corruption by quantum hardware noise. To address this issue, we propose the use of ensemble techniques, which involve constructing a single machine learning model based on multiple instances of quantum neural networks. In particular, we implement bagging and AdaBoost techniques, with different data loading configurations, and evaluate their performance on both synthetic and real-world classification and regression tasks. To assess the potential performance improvement under different environments, we conduct experiments on both simulated, noiseless software and IBM superconducting-based QPUs, suggesting these techniques can mitigate the quantum hardware noise. Additionally, we quantify the amount of resources saved using these ensemble techniques. Our findings indicate that these methods enable the construction of large, powerful models even on relatively small quantum devices.
\end{abstract}
\section{Introduction}
The emerging field of quantum machine learning \cite{biamonte2017quantum} holds promise for enhancing the accuracy and speed of machine learning algorithms by utilizing quantum computing techniques. Although the potential of quantum machine learning is expected to be advantageous for certain classes of problems in chemistry, physics, material science, and pharmacology \cite{cerezo2022challenges}, its applicability to more conventional use cases remains uncertain \cite{schuld2022quantum}. Notably, utilizable quantum machine learning algorithms generally need to be adapted to run on `NISQ' devices \cite{preskill2018quantum}, that are current noisy quantum computer, no error corrected and with modest number of qubits and circuit depth capabilities. In the quantum machine learning scenario, the quantum counterparts of classical neural networks, quantum neural networks \cite{abbas2021power}, have emerged as the de facto standard model for solving supervised and unsupervised learning tasks in the quantum domain.
While quantum neural networks have generated much interest, they presently have some issues. The first is {\em barren plateau} \cite{mcclean2018barren} characterised by the exponentially-fast decay of the loss gradient's variance with increasing system size. This problem may be exacerbated by various factors, such as having overly-expressive quantum circuits \cite{holmes2022connecting}. To address this issue, quantum neural networks need to be carefully designed \cite{larocca2022diagnosing} and to incorporate expressibility control techniques such as projection \cite{huang2021power} and bandwidth control \cite{canatar2022bandwidth}. The second problem,
which is the one addressed in this work, concerns the amount of resources required to run quantum neural networks (the limited number of total qubits -currently up to over a hundred- and the low fidelity of operations on current quantum devices severely restrict the size of the quantum neural network in terms of input dimension and layers).
In order to address the latter issue, we propose employing of NISQ-appropriate implementation of ensemble learning \cite{zhang2012ensemble}, a widely used technique in classical machine learning for tuning the bias and variance of a specific machine learning mechanism via the construction of a stronger classifier using multiple weak components, such that the ensemble, as a whole, outperforms the best individual classifier. The effectiveness of ensemble systems has been extensively demonstrated empirically and theoretically \cite{de2014essai}, although there does not currently exist any overarching theoretical framework capable of e.g. covering the requirements of ensemble components diversity to guarantee its out-performance. We here seek to provide and quantify a motivation for employing classical ensemble techniques in relation to NISQ-bases quantum neural networks, which we address via the following three arguments.
The first argument concerns the potential for the superior performance of an ensemble system composed of small quantum neural networks compared to a single larger quantum neural network. This notion is based on the rationale that while quantum neural networks are inherently powerful machine learning models, they exhibit intrinsic variance due to the nature of highly non-convex loss landscape, implying that different predictors will result from randomly-initialised stochastic gradient descent training, in common with classical neural networks. (Modern deep learning practice often deliberately overparameterises the network in order to render the loss more convex \cite{oymak2020toward}, with the asymptotic case of infinitely wide neural networks exhibiting a fully convex loss landscape, making it effectively a linear model \cite{jacot2018neural}). Although overparameterization in quantum neural networks has been studied theoretically \cite{larocca2021theory, liu2022representation, incudini2022quantum} and has been shown to be beneficial to generalization performances within certain settings, the increase in resource requirements makes this approach almost completely impractical on NISQ devices. In the classical literature, however, it has been demonstrated that ensemble techniques can perform comparably to the largest (generally overparameterized) models with significantly fewer resources (especially in relation to overall model parameterization), c.f. for example \cite[Figure 2]{geiger2020scaling}.
The second argument pertains to the resource savings achievable by ensemble systems, particularly in terms of the number of qubits, gates, and training samples required. For example, the boosting ensemble technique involves progressive dividing of the training dataset into multiple, partially overlapping subsets on the basis of their respective impact on the performance of the cumulative ensemble classifier created by summing of the partial weak classifiers trained on previously-selected data subsets. This enables the ensemble quantum neural network to be constructed in parallel with individual quantum neural networks operating on datasets of reduced size. The random subspace technique, by contrast, trains each base predictor on a random subset of features, but also provides an advantage in terms of the overall number of qubits and gates required. Employing the random subspace technique in a quantum machine learning setting would parallel the various quantum circuit splitting techniques (c.f. for example \cite{lowe2022fast}), and divide-and-conquer approaches, that have been utilized in the field of quantum chemistry \cite{yoshikawa2022quantum} and quantum optimization \cite{asproni2020accuracy}.
Our third argument, which is specific to quantum computing, examines the potential of ensembles' noise-canceling ability. Previous works have demonstrated that ensembles can enhance the performance of several noisy machine-learning tasks (see \cite{zhang2011robust}). Our investigation aims to determine whether and to what extent these techniques can reduce the impact of noise during the execution on a NISQ device \emph{at the applicative level}. This approach differs from most current approaches, which aim to reduce noise at a lower level, as described in \cite{larose2022mitiq}.
We here examine the impact of ensemble techniques based on bagging (bootstrap aggregation) and boosting ensembles in a quantum neural network setting across seven variant data loading schemes. Bagging techniques are selected for their applicability in high-variance settings, i.e. those exhibiting significant fluctuations in relation to differ initialisations and differ sample subselections; contrarily, boosting techniques are effective in relation to high-bias models, i.e. those which are relatively insensitive to data subsampling.
Our first objective is to quantify the amount of resources (in particular, the number of qubits, gates, parameters, and training samples) saved by the respective approaches. Secondly, we evaluate the performance using quantum neural networks as base predictors to solve a number of representative synthetic and real-world regression and classification tasks. Critically, the accuracy and loss performance of these approaches are assessed with respect to the number of layers of the quantum neural networks in a simulated environment. We thus obtain a layer-wise quantification of performance that addresses one of the fundamental questions in architecting deep neural systems, namely, how many layers of abstraction to incorporate? Note that this question is fundamentally different in a quantum setting compared to classical
neural systems; in the latter, the possibility of multi-level feature learning exists, and thus the potential for indefinite performance improvement with neural layer depth \cite{incudini2022quantum}. This contrast with the quantum neural networks, in which an increase in the number of layers affects the expressibility of the ansatz and thus might introduce a barren plateau \cite{holmes2022connecting}.
Finally, the noise-canceling capabilities of ensembles will be investigated by testing a synthetic linear regression task on IBM's superconductor-based quantum processing unit (QPU) Lagos.
\paragraph{Contributions}
Our contributions are the following:
\begin{itemize}
\item We evaluate various ensemble schemes that incorporate bagging and boosting techniques into quantum neural networks, and quantify the benefits in terms of resource savings, including the number of qubits, gates, and training samples required for these approaches.
\item We apply our approach to the IBM Lagos superconductor-based quantum processing unit to investigate the potential advantages of bagging techniques in mitigating the effects of noise during the execution of quantum circuits on NISQ devices.
\item We conduct a layer-wise analysis of quantum neural network performance in the ensemble setting with a view to determining the implicit trade-off between ensemble advantage and layer-wise depth.
\end{itemize}
\section{Related Works}
The quest for quantum algorithms able to be executed on noisy small-scale quantum systems led to the concept of Variational Quantum Circuits (VQCs), i.e. quantum circuits based on a hybrid quantum-classical optimization framework \cite{cerezo2021variational,mitarai_2018}. VQCs are currently believed to be promising candidates to harness the potential of QC and achieve a quantum advantage \cite{tilly2022variational,di2022quask,liu2021rigorous}. VQCs rely on a hybrid quantum-classical scheme, where a parameterized quantum circuit is iteratively optimized with the help of a classical co-processor. This way, low-depth quantum circuits can be efficiently designed and implemented on the available NISQ devices; the noisy components of the quantum process are mitigated by the low number of quantum gates present in the VQCs. The basic structure of a VQC include a data encoding stage, where classical data are embedded into a complex Hilbert space as quantum states, a processing of such quantum states via an ansatz made of parameterized rotation gates and entangling gates, and finally a measurement of the circuit to retrieve the expected outcome. Many different circuit architectures and ansatzes have been proposed for VQCs \cite{benedetti2021hardware,choquette2021quantum,farhi2014quantum,patil2022variational}, depending on the structure of the problem or on the underlying quantum hardware. VQCs demonstrated remarkable performances and a good resilience to noise in several optimization tasks and real-world applications. For example, researchers in \cite{schuld2020circuit} introduced a circuit-centric quantum classifier based on VQC that could effectively be implemented on a near-term quantum device. It correctly classified quantum encoded data and demonstrated to be robust against noise. Authors in \cite{mitarai_2018} proposed a VQC that successfully approximated high-dimensional regression and classification functions with a limited number of qubits.
VQCs are incredibly well-suited for the realization of quantum neural networks with a constraint on the number of qubits \cite{massoliALeap2022}. A quantum neural network is usually composed of a layered architecture able to encode input data into quantum states and perform heavy manipulations in a high-dimensional feature space. The encoding strategy and the choice of the circuit ansatz are critical for the achievement of superior performances over classical NNs: more complex data encoding with hard-to-simulate feature maps could lead to a concrete quantum advantage \cite{havlivcek2019supervised}, but too expressive quantum circuits may exhibit flatter cost landscapes and result in untrainable models \cite{holmes2022connecting}. An example of quantum neural network was given in \cite{macaluso2020variational}, where a shallow NN was employed to perform classification and regression tasks using both simulators and real quantum devices. In \cite{zhao2021qdnn}, authors proposed a multi-layer Quantum Deep Neural Network (QDNN) with three variational layers for an image classification task. They managed to prove that QDNNs have more representation capacity with respect to classical deep NN. A hybrid Quantum-classical Recurrent Neural Network (QRNN) was presented in \cite{ceschiniHybrid2022} to solve a time series prediction problem. The QRNN, composed of a quantum layer as well as two classical recurrent layers, demonstrated superior performances over the classical counterpart in terms of prediction error.
However, quantum neural networks suffer from some non-negligible problems, which deeply affect their performances and limit their impact in the quantum ecosystem. Firstly, they are still subject to quantum noise, and it gets worse as the number of layers (i.e., the depth of the quantum circuit) increases \cite{wang2022quantumnat,liang2021can}. Secondly, barren plateaus phenomena may occur depending on the ansatz and the number of qubits chosen, reducing the trainability of such models \cite{holmes2022connecting,cerezo2021cost,mcclean2018barren}. Finally, data encoding on NISQ devices continues to represent an obstacle when the number of features is considerable \cite{massoliALeap2022}, making them hard to implement and train \cite{ceschiniHybrid2022}.
In classical ML, ensemble learning has been investigated for years to improve generalization and robustness over a single estimator \cite{seni2010ensemble,zhang2012ensemble}. Ensembling is based on the so-called ``wisdom of the crowd'' principle, namely it combines the predictions of several base estimators with the same learning algorithm to build a single stronger model. Despite there are many different ensemble methods, the latter can be easily grouped into two different categories: bagging methods, which build and train several estimators independently and then compute an average of their predictions \cite{altman2017ensemble}, and boosting methods, which in turn train the estimators sequentially so that the each one corrects the predictions of the prior models and output a weighted average of such predictions \cite{buhlmann2012bagging}. Ensemble methods for NNs have also been extensively studied, yielding remarkable performances in both classification and regression tasks \cite{osman2020effective,sagi2018ensemble,berkhahn2019ensemble}.
In the quantum setting, the adoption of an ensemble strategy has received little consideration in the past few years, with very few approaches focusing on near-term quantum devices and VQC ensembles. In \cite{schuld2018quantum, abbas2020quantum}, the authors exploit the superposition principle to obtain an exponentially large ensemble wherein each instance is weighted according to its accuracy on the training dataset. However, they make use of a fault-tolerant approach rather than considering limited quantum resources. A similar approach is explored in \cite{leal2021training}, where authors create an ensemble of Quantum Binary Neural Networks (QBNNs) with reduced computational training cost without taking into consideration the amount of quantum resources necessary to build the circuit. An efficient strategy for bagging with quantum circuits is proposed in \cite{macaluso2020quantum} instead. Very recently, \cite{stein2022eqc} has proposed a distributed framework for ensemble learning on a variety of NISQ quantum devices, although it requires many NISQ devices to be actually implemented. A quantum ECOC multiclass ensemble approach was proposed in \cite{Windridge2018QuantumEO}. In \cite{qin2022improving}, the authors investigated the performance enhancement of a majority-voting-based ensemble system in the quantum regime. Authors in \cite{krisnanda2023wisdom} studied the role of ensemble techniques in the context of quantum reservoir computing. Finally, an analysis of robustness to hardware error as applied to quantum reinforcement learning, and presenting compatible results, is given in \cite{skolik2023robustness}.
In this paper, we propose a classical ensemble learning approach to the outputs of several quantum neural networks in order to reduce the quantum resources for a given quantum model and provide superior performances in terms of error rate over single quantum neural network instances. To the best of our knowledge, no one has ever proposed such an ensemble framework for VQCs. We also compare both bagging and boosting strategy to provide an analysis on the most appropriate ensemble methods for quantum neural networks in a noiseless setting. An error analysis with respect to the number of layers of the quantum neural networks reveals that bagging models greatly outperform the baseline model with low number of layers, with remarkable performances as the number of layers increase. Finally, we apply our approach to the IBM Lagos superconductor-based QPU to investigate the potential advantages of bagging techniques in mitigating the effects of noise during the execution of quantum circuits on NISQ devices.
\section{Background and Notation}
We provide a brief introduction to the notation and concepts used in this work. The sets $\mathcal{X}$ and $\mathcal{Y}$ represent the set of features and targets, respectively. Typically, $\mathcal{X}$ is equal to $\Real^d$, with $d$ equal to the dimensionality in input, whereas $\mathcal{Y}$ is equal to $\Real$ for regression tasks and $\mathcal{Y}$ is equal to $\{ c_1, ..., c_k \}$ for $k$-ary classification tasks. Sequences of elements are indexed in the apex with $x^{(j)}$, where the $i$-th component is denoted as $x_i$. The notation $\epsilon \sim \mathcal{N}(\mu, \sigma^2)$ indicates that the value of $\epsilon$ is randomly sampled from a univariate normal distribution with mean $\mu$ and variance $\sigma^2$. We use the function $\llbracket P \rrbracket$ to denote one when the predicate $P$ is true and zero otherwise.
\subsection{Models in quantum machine learning}
We define the state of a quantum system as the density matrix $\rho$ having unitary trace and belonging to the Hilbert space $\Hilbert \equiv \mathbb{C}^{2^n \times 2^n}$ where $n$ is the number of qubits. The system starts in the state $\rho_0 = \ketbra{0}{0}$. The evolution in a closed quantum system is described by a unitary transformation $U = \exp(-it H)$, $t \in \Real$, $H$ Hermitian operator, and acts like $\rho \mapsto U^\dagger \rho U$. The measurement of the system in its computational basis $\{ \Pi_i = \ketbra{i}{i} \}_{i=0}^{2^n-1}$ applied to the system in the state $\rho$ will give outcome $i \in 0, 1, ..., 2^n-1$ with probability $\Trace[\Pi_i \rho \Pi_i]$ after which the state collapses to $\rho' = \Pi_i \rho \Pi_i / \Trace[\Pi_i \rho \Pi_i]$. A different measurement operation is given by the expectation value of an observable $O = \sum_i \lambda_i \Pi_i$ acting on the system in state $\rho$, whose value is $\expval{O} = \Trace[\rho O]$.
Quantum computation can be described using a quantum circuit, a sequence of gates (i.e. elementary operations) acting on one or more qubits of the system terminating with the measurement operation over some or all of its qubits. The output of the measurement can be post-processed using a classical function. "The set of gates available shall be \emph{universal}", i.e. the composition of such elementary operation allows the expression of any unitary transformation with arbitrary precision. An exemplar universal gate set is composed of parametric operators
$R_x^{(i)}(\theta) = \mathrm{exp}(-i\frac{\theta}{2} \sigma_x^{(i)})$,
$R_y^{(i)}(\theta) = \mathrm{exp}(-i\frac{\theta}{2} \sigma_y^{(i)})$,
$R_z^{(i)}(\theta) = \mathrm{exp}(-i\frac{\theta}{2} \sigma_z^{(i)})$, and the operator
$\mathrm{CNOT}^{(i,j)} = \mathrm{exp}(-i\frac{\pi}{4} (I-\sigma_z^{(i)})(I-\sigma_x^{(j)}))$. The gate $I$ is the identity.
The matrices $\sigma_x = \smqty(0 & 1 \\ 1 & 0), \sigma_y = \smqty(0 & 1 \\ 1 & 0), \sigma_z = \smqty(0 & 1 \\ 1 & 0)$ are the Pauli matrices. The apex denotes explicitly the qubits in which the transformation acts.
Quantum machine learning forms a broad family of algorithms, some of which require fault-tolerant quantum computation while others are ready to execute on current generation `NISQ' (noisy) quantum devices. The family of NISQ-ready techniques of interest in this document is denoted \emph{variational quantum algorithms} \cite{cerezo2021variational}.
These algorithms are based on the tuning of a cost function $C(\theta)$ dependent on a set of parameters $\theta \in [0, 2\pi]^P$ and optimized classically (possibly via gradient descent-based techniques) to obtain the value $\theta^* = \arg\min_\theta C(\theta)$. Optimization through gradient-descent thus involves computation of the gradient of $C$. This can be done using finite difference methods or else the parameter-shift rule \cite{schuld2019evaluating}. The parameter-shift rule is particularly well-suited for NISQ devices as it can utilise a large step size relative to finite difference methods, making it less sensitive to noise in calculations.
In general, $C(\theta)$ is a function corresponding to a parametric quantum transformation $U(\theta)$ of a length polynomial in the number of qubits, the set of input states $\{ \rho_i \}$, and the set of observables $\{ O_k \}$.
Specifically, a \emph{quantum neural network} is a function in the form
\begin{equation}
f(x; \theta) = \Trace[U^\dagger(\theta)V^\dagger(x) \rho_0 V(x) U(\theta) O]
\end{equation}
where $\rho_0$ is the initial state of the system, $V(x)$ is a parametric quantum circuit depending on the input parameters $x \in \mathcal{X}$, $U(\theta)$ is a parametric quantum circuit named an \emph{ansatz} that depends on the trainable parameters $\theta \in [0, 2\pi)^P$, and $O$ is an observable.
Given the training dataset $\{ (x^{(i)}, y^{(i)}) \}_{i=1}^M \in (\mathcal{X} \times \mathcal{Y})^M$, the cost function of a quantum neural network, being a supervised learning problem, is the empirical risk
\begin{equation}
C(\theta) = \sum_{i=1}^M \ell(f(x^{(i)}; \theta), y^{(i)})
\end{equation}
where $\ell: \mathcal{Y} \times \mathcal{Y} \to \Real$ is any convex loss function, e.g. the mean square error.
The quantum neural network constitutes a linear model in the Hilbert space of the quantum system as a consequence of the linearity of quantum dynamics. It behaves, in particular, as a {\em kernel machine} that employs the unitary $V(x)$ as the feature map $\rho \mapsto \rho_x = V(x)\rho$, while the variational ansatz $\rho \mapsto \rho_\theta = U(\theta)\rho$ adjusts the model weights. Note that although the model is linear in the Hilbert space of the quantum system, the measurement projection makes it nonlinear in the parameter space, enabling a set of rich dynamics. quantum neural networks can have a layer-wise structure, i.e., $U(\theta) = \prod_{i=1}^\ell U_i(\theta_i)$, which provides it with further degrees of freedom for optimization (however, due to the lack of nonlinearity between the layers, the model does not possess the hierarchical feature learning capabilities of classical neural networks).
The selection of the ansatz is thus a crucial aspect in defining the quantum neural network, and it is required to adhere to certain classifier-friendly principles. Expressibility is one such, being the property governing the extent of the search space that can be explored by the optimization method. Although there are various ways to formalize expressibility, one of the most widely used definitions is based on the generation of state ensembles $\{ \rho_\theta = U(\theta)\rho_0 \mid \theta \in \Theta \}$ that are similar to Haar-random (i.e. uniform) distributions of states. Expressible unitaries are those for which the operator norm of a certain expression involving the Haar measure and the state ensemble is small. However, expressible circuits are susceptible to the barren plateau problem, where the variance of the gradient decreases exponentially with the number of qubits, making parameter training infeasible. The varieties of ansatz and their expressibilities are presented in \cite{sim2019expressibility}. Expressibility is tightly connected to the concept of controllability in quantum optimal control, and authors in \cite{larocca2022diagnosing} show that the asymptotic limit of the number of layers $\ell \to \infty$ in the expressible circuits are the controllable ones, i.e. those whose ansatz is underlied by a Lie algebra matching the space of skew-Hermitian matrices $\mathfrak{u}(2^n)$.
\subsection{Ensemble techniques}
The purpose of using ensemble systems is to improve the generalization performance through reducing the bias or variance of a decision system. Such a result is obtained by training several models and combining the outcomes according to a combination rule. A large body of literature on ensemble techniques exists; the reader is referred to \cite{zhang2012ensemble} for a general overview.
\begin{figure}
\caption{Taxonomy of the three aspects characterizing an ensemble system.}
\label{fig:ensemble_taxonomy}
\end{figure}
The idea behind the ensemble system may be motivated by Condorcet's jury theorem \cite{de2014essai}: a jury of $m$ peers, each having probability $p = \frac{1}{2} + \epsilon, 0 < \epsilon \ll 1,$ of giving the correct answer, implies that the probability of the verdict given by majority voting to be correct is
\begin{equation}
p_\text{jury} = \sum_{k = \lceil m/2 \rceil + 1}^m \binom{m}{k} p^k (1-p)^{m-k}
\end{equation}
and quickly approaches $1$ as $m\to \infty$. The theorem, broadly interpreted, suggests that a combination of small, individually ineffective machine learning models $h_1, ..., h_m$ (\emph{weak learners}) can be combined to constitute a more powerful one, with arbitrarily good performance depending on the nature of data manifold and the base classifiers $h_\text{ens}$ (\emph{strong learner}).
According to \cite{zhang2012ensemble}, three aspects characterize an ensemble system: a data selection strategy, the composition plus training strategies of the single model instances, and the combination rule of its output. Some of the possible choices are summarized in Figure \ref{fig:ensemble_taxonomy}.
The data selection strategy determines how the data should be distributed to the individual instances. If all instances are trained on the same dataset, their predictions will be highly correlated, resulting in similar output. The \emph{bootstrapping} technique creates smaller, overlapping subsets by sampling with replacement from the dataset, which are then assigned to different instances. Alternatively, the \emph{pasting} technique can be used for processing larger datasets by subsampling without replacement. Another approach is to divide the dataset by randomly assigning different sets of features with replacement, known as the random subspace technique (when the bootstrapping and random subspace techniques are combined, the result is the {\em random patch} technique).
\begin{figure}
\caption{Comparison between bagging (left) and `vanilla' boosting (right) techniques. The bagging ensemble trains the models in parallel over a subset of the dataset drawn uniformly; each prediction is then merged via an average function. The boosting ensemble trains the models sequentially, the first predictor draws the samples uniformly, and the subsequent models draw the elements from a probability distribution biased toward previously misclassified items.}
\label{fig:bagging_boosting_stacking}
\end{figure}
There are numerous schemes for combining predictors, with \emph{bagging} being the most straightforward and commonly used. Bagging, short for bootstrap aggregation, involves the creation of multiple homogeneous model instances trained on bootstrapped datasets. An instance of a bagging scheme is the random forest, which involves bagging decision trees trained on differing sample subsets (in some cases, random forests may favor a random patch data selection strategy over bagging). Another predictor combination scheme is \emph{boosting}, which involves training a sequence of predictors via subsampling data according to the following strategy: an initial predictor is trained on a uniformly drawn subset of samples, while the $i$-th instance of the predictor is trained on a subset of elements that the previous ensemble classifier incorrectly predicted. The ensemble is itself the convex cumulative sum over predictors. Numerous variations of boosting exist, one of the most notable being AdaBoost \cite{freund1997decision}. Contrary to vanilla boosting, AdaBoost employs an exponential loss such that the ensemble error function allows for the fact that it is only the sign of outcome that is significant. These two scheme are illustrated in Figure \ref{fig:bagging_boosting_stacking}. The other major ensemble scheme is {\em stacking} in which a collection of heterogeneous classifiers trained on the same dataset are combined via an optimised meta-classifier.
The combination rule merges the output of individual models $h_1, ..., h_m$. In classification tasks i.e. where the label output is discrete $y \in C = \{c_1, ..., c_k\}$, the most commonly used rule is majority voting. This is calculated as $y_\text{ens} = \arg \max_{c \in C} \sum_{i=1}^m \llbracket h_i(x) = c \rrbracket$. Where there exists prior knowledge regarding the performance of individual predictors, positive weights $w_i$ can be assigned, such that the output is a weighted majority vote. The ensemble prediction in this case will be $y_\text{ens} = \arg \max_{c \in C} \sum_{i=1}^m w_i \llbracket h_i(x) = c \rrbracket$. Alternatively, the \emph{borda count} method sorts labels in descending order by likelihood, with the ensemble prediction being the highest ranking sum. Nevertheless, averaging functions can also be utilised for ensemble classifiers. For regression tasks where $y \in \Real$, common combination rules are (possibly weighted) mean, minimum, and maximum.
\section{Discussion}
Ensemble techniques, while well-established in the classical realm, have been largely overlooked in the quantum literature, leaving a number of open questions in this setting, such as whether bagging techniques, which reduce variance, can be deployed as effectively as boosting techniques, which reduce bias (both of which are also data-manifold and base-model dependent). It is also unclear as to the relative resource saving in terms of circuit size (number of qubits) and depth (number of gates), and also samples required for training, that can be obtained by using an ensemble of quantum neural networks instead of a single, large quantum network. Furthermore, it is not currently well understood the extent to which an ensemble system can mitigate hardware noise. Our experiments are designed to explore these questions.
To investigate the first two aspects, we conduct a suite of experiments within a simulation environment, employing seven distinct ensemble schemes with varying strategies for data selection, model training and decision combination applied to four synthetic and real-world datasets, encompassing both regression and classification tasks. Specifically, we analyze: a synthetic linear regression dataset, the Concrete Compressive Strength regression dataset, the Diabetes regression dataset, and the Wine classification dataset, which are widely used benchmarks for evaluating machine learning models.
Six of the proposed techniques are classified as bagging methods, employing bootstrapped data to generate the ensemble, while the seventh is a sequential boosting technique, namely AdaBoost. In particular, we implemented the AdaBoost.R2 version \cite{drucker1997improving} for the regression tasks and the AdaBoost SAMME.R version \cite{hastie2009multi} for the classification problem. The bagging ensembles are characterized by two parameters: the sample ratio $r_n \in [0,1]$, which determines the percentage of training samples used for each base predictor (with replacement), and the feature ratio $r_f \in [0,1]$, which indicates the percentage of features used for each predictor (without replacement). We test six bagging schemes by varying $(r_n, r_f) \in \{0.2, 1.0\} \times \{0.3, 0.5, 0.8\}$. For both the classification and regression tasks, the outputs of the base predictors are combined via averaging. In the case of the AdaBoost ensemble, the training set for each base predictor has the same size and dimensionality as the original training set. However, the samples are not uniformly drawn but are selected and weighted based on the probability of misclassification by previous classifiers composing the cumulative ensemble; single predictors are hence combined using a weighted average. Each ensemble system comprises 10 base predictors. The characteristics of these ensemble schemes are summarized in Table \ref{tab:simulated_ensemble}, where FM identifies the baseline quantum neural network model, whereas Bag\_$r_f$\_$r_n$ represents a bagging model with $r_f$ percentage of the features and $r_n$ percentage of the samples. Our experiments aim to evaluate the performance of each of the ensemble frameworks in comparison to the baseline model, as well as to assess the overall resource saving, including the number of qubits and overall parametric requirements.
\begin{table}[htbp]
\centering
\begin{tabular}{llllll}
\toprule
\multirow{2}{*}{Model} & \multicolumn{2}{c}{Data Loading} & \multirow{2}{*}{Ensemble} & \multirow{2}{*}{\#BP} & \multirow{2}{*}{Rule} \\\cline{2-3}
& RSBS ($r_f$) & BST ($r_n$) & & & \\ \midrule
FM & - & - & - & - & - \\
Bag\_0.3\_0.2 & 0.3 & 0.2 & Bagging & 10 & Avg \\
Bag\_0.3\_1.0 & 0.3 & 1.0 & Bagging & 10 & Avg \\
Bag\_0.5\_0.2 & 0.5 & 0.2 & Bagging & 10 & Avg \\
Bag\_0.5\_1.0 & 0.5 & 1.0 & Bagging & 10 & Avg \\
Bag\_0.8\_0.2 & 0.8 & 0.2 & Bagging & 10 & Avg \\
Bag\_0.8\_1.0 & 0.8 & 1.0 & Bagging & 10 & Avg \\
AdaBoost & 1.0 & 1.0 & AdaBoost & 10 & W.Avg \\
\bottomrule
\end{tabular}
\caption{Characteristics of the baseline benchmark model (0) and ensemble systems (I to VII). The ensemble system is identified by its broad data loading method (BST for Boosting and RSBS for Random Subspace), predictor composition \& training type (Ensemble), number of base predictors (\#BP), composition rule (Rule, with Avg representing the average function and W.Avg representing weighted average).}
\label{tab:simulated_ensemble}
\end{table}
To investigate the impact of quantum hardware noise, we conduct additional experiments on the IBM Lagos QPU. Such a device is a 7-qubit superconducting-based quantum computer. The topology of Lagos is depicted in Figure \ref{fig:lagos}. Specifically, we compare the performance of the baseline model FM with that of the Bag\_0.8\_0.2 configuration on the linear regression dataset. Our goal is to determine whether ensemble techniques can effectively mitigate quantum noise, and whether the difference in performance between single predictors and ensemble systems is more pronounced within a simulated environment in comparison with real-world execution on quantum hardware.
\begin{figure}
\caption{Topology of IBM Lagos quantum processing unit}
\label{fig:lagos}
\end{figure}
\subsection{Experimental setup}\label{sec:methods}
This section outlines experimental protocols used to evaluate the performance of the various ensemble approaches in terms of both the experimental structure and specific parameters/settings used to configure the algorithm and hardware.
\paragraph{Choice of quantum neural networks} We utilize a quantum neural network of the form $f(x; \theta) = \Trace[U^\dagger(\theta)V^\dagger(x) \rho_0 V(x) U(\theta) O]$, which operates on $n$ qubits, with $n$ corresponding to the number of features in the classification/regression problem. For the feature map, we opted for the simple parametric transformation $V(x) = \bigotimes_{i=1}^n R_y^{(i)}(x_i)$. This choice was motivated by the findings in \cite{kubler2021inductive}, suggesting that more complex feature maps can lead to unfavorable generalization properties, incorporation of which may thus unnecessarily bias our findings. (In \cite{lloyd2020quantum}, various feature maps are compared).
The ansatz is implemented with the parametric transformations structured layer-wise with, for $\ell$ the number of layers, a total of $3\ell n$ parameters. It is thus defined as:
\begin{align}
\nonumber
U_\ell(\theta) = &
\prod_{k=1}^\ell
\Bigg[
\left(\bigotimes_{i=1}^n R_x^{(i)}(\theta_{3kn+2n+i})\right)
\left(\prod_{i=1}^{n-1} \mathrm{CX}^{(i, i+1)}\right)
\left(\bigotimes_{i=1}^n R_z^{(i)}(\theta_{3kn+n+i})\right) \\
& \qquad\qquad\qquad\qquad\qquad
\left(\prod_{i=1}^{n-1} \mathrm{CX}^{(i, i+1)}\right)
\left(\bigotimes_{i=1}^n R_x^{(i)}(\theta_{3kn+i})\right)
\Bigg]
\end{align}
The role of CNOT gates is the introduction of entanglement in the system, which would otherwise be efficiently classical simulable.
We select as the observable $O = \sigma_z^{(0)}$, which operates on a single qubit. Local observables like this one are less susceptible to the barren plateau problem than global ones, for example, $O = \otimes_{i=1}^n \sigma_z^{(i)}$ (as noted in \cite{cerezo2021cost}). The quantum neural network described in our investigation is pictured in Figure \ref{fig:qnn}.
\begin{figure}
\caption{Quantum Neural Network used to classify the linear regression dataset, having $5$ qubits and $\ell=1$ layers. The rotational gates parameterized by the feature $x_i$ form the feature map, while those parameterized via the $\theta$s form the ansatz.}
\label{fig:qnn}
\end{figure}
\paragraph{Training of the model} To train models, we utilize a standard state-of-the-art gradient descent-based algorithm, ADAM. The Mean Squared Error (MSE) was selected as the loss function and error metric to evaluate the performances of the models in the regression tasks, as it is a standard error metric in supervised learning. MSE was selected as the loss function to train the networks because it is more sensitive to larger errors. Categorical Cross Entropy (CCE) was used as the loss function for the classification task instead, while Accuracy score was employed as error metric to assess the goodness of the classification. Given the output $f$ of the model, the computation of its gradient $\nabla f$, which is required to calculate the gradient of the loss function, is accomplished using the parameter-shift rule \cite{schuld2019evaluating}, since the commonly-used finite difference method $\nabla f(x; \theta) \approx (f(x;\theta)-f(x;\theta+\epsilon))/\epsilon$ is highly susceptible to hardware noise. The optimization hyper-parameters used are the learning rate, set to $0.1$, and the number of training epochs, which was selected through empirical investigation (specifically, we carry out 150 training epochs to obtain the simulated results, while for QPU-based results, we perform just 10 epochs due to technological constraints on current hardware).
\paragraph{Datasets} We assess the performance of our approach using both synthetic and real-world datasets, across both regression and classification problems. The linear regression dataset is artificially generated with parametric control over the number of samples $n$, the dimensionality $d$, and the noise variance $\sigma$. It is procedurally generated by randomly sampling a weight vector $w$ uniformly over $[-1,1]^d$ such that the training set $\{(x^{(i)}, y^{(i)})\}_{i=1}^n$ is constructed with $x^{(i)}$ uniformly sampled from $[-1,1]^d$, $y^{(i)} = w\cdot x^{(i)}+\epsilon^{(i)}$, and $\epsilon^{(i)}$ sampled from a normal distribution with zero mean and variance $\sigma$. In our case we have $n = 250$ (jointly the training and testing datasets), $d = 5$ and $\sigma=0.1$. The other datasets involved in the experiments are the \emph{Concrete Compressive Strength} dataset, the \emph{Diabetes} dataset, and the \emph{Wine} dataset.
The first of these is a multivariate regression problem calculating the strength of the material based on its age and ingredients. The second is a multivariate regression problem correlating the biological and lifestyle characteristic of patients to their insulin levels. The third one is a multivariate, three-class classification problem investigating the geographic origin of wine samples from their chemical characteristics. All are freely available and open source. Table \ref{tab:datasets} summarizes the characteristics of these datasets. Every dataset is divided into 80\% train samples and 20\% test samples. Moreover, in a data preprocessing phase, raw data were scaled in the range $[-1,1]$ to best suit the output of the quantum neural networks; the scaler was fitted using training data only. No other preprocessing technique, i.e. PCA, has been applied.
\begin{table}[htbp]
\centering
\begin{tabular}{lllrrl}
\toprule
Dataset & Source & Nature & \# Features & \# Samples & Task \\\midrule
Linear & - & Synthetic & 5 & 250 & Regression \\
Concrete & UCI & Real-world & 8 & 1030 & Regression \\
Diabetes & Scikit-Learn & Real-world & 10 & 442 & Regression \\
Wine & UCI & Real-world & 13 & 178 & Classification \\\bottomrule
\end{tabular}
\caption{Characteristics of the datasets analyzed. UCI stands for the open source \emph{UCI Repository}. \emph{Scikit-Learn} is an open-source software library for Python3. The number of features does not include the target.}
\label{tab:datasets}
\end{table}
\paragraph{Implementation details} Our implementation is written in Python3, and utilizes Pennylane as a framework to define and simulate quantum circuits, with the Pennylane-Qiskit plugin used to execute circuits on IBM Quantum devices via the Qiskit software stack. To improve simulation times, we employed the JAX linear algebra framework as the simulation backend. By using JAX, the quantum circuit can be just-in-time compiled to an intermediate representation called XLA, which can significantly speed up simulation times (by up to a factor of 10). Our simulations were run on a commercial computer with an AMD Ryzen 7 5800X (8-core CPU with a frequency of 3.80 GHz) and 64 GB of RAM.
The experiments on the noise canceling properties of ensemble systems were conducted on the \texttt{ibm\_lagos} quantum processing unit, which consists of 7 qubits arranged in the topology $\{(0,1);(1,2);(1,3); (3,4); (4,5); (4,6)\}$. The single-gate fidelity and CNOT fidelity of this QPU did not exceed $2.89e^{-4}$ and $8.63e^{-3}$, respectively (according to the latest calibration available).
\subsection{Resource efficiency of quantum neural network ensembles}
Besides performance, resource efficiency is a key argument for the utilization of quantum neural network ensembles. Efficiency can be measured by various metrics: for example, number of qubits, gates, parameters, and training samples required to achieve comparable performance.
To determine the potential savings in the number of qubits we here deploy the random subspace technique (also known as {\em attribute bagging} or {\em attribute bootstrap aggregation}). Our experiments (cf Figure \ref{fig:net_struct}) suggest a potential saving of 20\% to 80\% of the total qubit budget via this approach. However, such a saving is made at the cost of the ensemble was a whole having the potential for less rich class-discrimination behaviour, dependent on both the sampling required to achieve full feature coverage and the nature of the underlying data manifold. A positive consequence of reducing the number of qubits, though, is that each quantum circuit will have fewer gates and parameters, resulting in improved noise robustness on real hardware (i.e less decoherence, higher overall fidelity), as well as faster gradient calculation (individual gradient calculations require $P+1$ quantum circuit evaluations for $P$ parameters). This allows for a saving of the parameter budget of up to 75\% in the indicated experimental regime, while the saving on gates corresponds proportionately (cf Figure \ref{fig:qnn}). Savings for each dataset and ensemble technique are as depicted in Figure \ref{fig:net_struct}.
\begin{figure}
\caption{Number of qubits \& parameters employed in individual experiments.}
\label{fig:net_struct}
\end{figure}
\subsection{Simulated Domain Experiments}
Initially, we evaluate our method in a simulated environment, one free of noise, such that the output estimation is infinitely precise. This differs significantly from execution on a NISQ quantum processing unit, which introduces various types of hardware error (such as decoherence and infidelity of operations) as well as sampling error caused via the measurement operation.
We examine the performance of both the baseline models and ensemble systems in a scenario where the number of layers (i.e. quantum neural network depth) is gradually increased.
To establish robustness to random initialization of parameters (that is, susceptibility to local minima effects), each simulation is repeated ten times.
\subsubsection{Experiment I}
The first experiment seeks to perform linear regression on a synthetic noisy 5-dimensional dataset. The function generating the targets is as follows: $y = w \cdot x + \epsilon$, where $x \in (-1,1)^5 \subseteq \Real{}^5$, $w \in \Real{}^5$ is randomly generated from a uniform distribution having as support the range $-1$ to $1$, and $\epsilon$ is a Gaussian noise of mean zero and standard deviation $0.1$. The total number of samples composing this synthetic dataset is 250. Each experimental data point instantiates a layer number, a number of bagged features, and a percentage of training data points available to the ensemble.
The results of the first experiment are indicated in Figure~\ref{fig:linear_all}. Both FM and AdaBoost achieve the lowest MSE generalisation error of about 0.021 at 10 layers, reaching a performance plateau at 5 layers. The bagging models utilising 80\% of the features are able to reach satisfactory results with 10 layers, which are only 0.03 - 0.05 points higher than the error obtained by the best performing models. In general, it appears that quantum bagging models with a high number of features are able to generalize well on unseen data in this setting, even with only 20\% of the training samples (unsurprisingly, the performance of bagging models with only 20\% of training samples are worse than those of the counterparts using 100\% of the training samples). Nevertheless, they still achieve remarkable results and show impressive generalization capabilities, confirming the effectiveness of bagged quantum models in generalizing well with relatively little training data \cite{caro2022generalization}.
It is also notable that all of the bagging models have a lower MSE generalisation error as compared to FM and AdaBoost when the number of layers is low. In particular, with just 1 layer, all of the bagging models outperform FM and AdaBoost. However, as the number of layers increases, the performances of bagging models begin to plateau more rapidly than FM and Adaboost which, in contrast, continue their trend of decreasing error with increasing circuit depth. This is consistent with the notion that as base classifiers become expressive their risk of overfitting increases (i.e. they develop an intrinsically low bias). Adaboost, in particular, is known to be most effective in relation to weak, under-fitting base classifiers.
Finally, the decreasing error trend seen in the more complex bagging models as well as the FM and AdaBoost models is not visible in relation bagging with 30\% of the features. We conjecture that since this bagging configuration utilises only 1 qubit, it cannot appropriately model the evolution of the quantum state with respect to the input. Hence, despite leveraging 10 different submodels of 1 qubit (i.e., one feature) each, the performance of bagging models with 30\% of the features cannot improve as the number of layers increases (adding more layers in this case translates in performing rotations on the single qubit only, without the possibility of further CNOTs or other entangling gate operations). This result hence highlights the importance of entanglement in quantum neural network models as a means of improving performance.
\begin{figure}
\caption{Evolution of MSE error with respect to the number of quantum neural network layers in Experiment I. Each experimental data point instantiates a layer number, a number of bagged features and a percentage of training data points available to the ensemble.}
\label{fig:linear_all}
\end{figure}
\subsubsection{Experiment II}
The second experiment seeks to assess the performance of the respective ensemble techniques on the Concrete Compressive Strength dataset, which consists in 1030 samples of 8 features. The target value to predict in this regression case is hence the concrete compressive strength, measured in Megapascal (MPa), a highly nonlinear function of age and composition of the material.
The results of the regression experiment are in line with the findings of Experiment I, and are reported in Figure~\ref{fig:concrete_all}. FM, AdaBoost and the two bagging models applied in relation to 80\% of features achieve comparable results at 10 layers, with the Bag.\_0.8\_1.0 configuration obtaining the lowest MSE error, followed by Bag.\_0.8\_0.2, FM and finally by AdaBoost. Also in this case, the differential between bagging models with 20\% of samples and with 100\% of samples is marginal, confirming the effectiveness of bagging quantum models in relation to reduced training dataset size. In contrast with Experiment I, bagging models having 30\% of available features now have 2 qubits, and therefore demonstrate a relative improvement in test error when $l=2$. However, their expressive power soon saturates and their error curves plateau.
In general, the generalization capability of bagging models decreases monotonically with the number of layers, in contrast to FM and AdaBoost. In fact, they exhibit episodes of overfitting when utilising 5 (and up to 7) layers, while bagging appears to be able to evade this outcome. This is again not surprising, since AdaBoost is designed to reduce bias, while bagging ensembles are designed to reduce variance.
All of the bagging models analyzed still outperform FM and AdaBoost at a low number of layers, suggesting that they may be the right choice for implementation on NISQ devices, or else when there is any necessity of implementing low-depth quantum circuits. As in the first experiment, it is also of interest to note that all the bagging models with $l=1$ here have very similar MSE values, while their performances vary as the number of layers increases. This may indicate that the MSE value reached at $l=1$ is the optimal for that family of bagging models, given their expressibility. Moreover, a sharp decrease in MSE beyond the first layers would appear to be a common pattern, both with respect to the ensembles and the FM model. For example, at $l \geq 3$, the MSE error of FM and AdaBoost dramatically decrease, while bagging models with 50\% of the features exhibit this trend between $l=1$ and $l=2$. (A future analysis of this topic might seek to exploit this characteristic in order to predict {\em a priori} how many layers one would need to attain an error level within a given bound).
\begin{figure}
\caption{Evolution of MSE error with respect to the number of quantum neural network layers in Experiment II.}
\label{fig:concrete_all}
\end{figure}
\subsubsection{Experiment III}
The dataset used in Experiment III is the reference Diabetes dataset from Scikit-learn, consisting of 10 numerical features, including age, sex, body mass index, blood serum measurements, and also a target variable, a quantitative measure of disease progression one year after baseline. The dataset is composed of 442 instances and is often used for non-trivial regression analysis in ML.
Figure~\ref{fig:diabete_all} illustrates the results of this experiment. The performance of the quantum models is notably different from those of the previous two experiments. It may be seen that the best performing models are the bagging models containing 80\% of the features, while FM and AdaBoost achieve satisfactory results up to 6 layers, at which point their MSE begins to increase. At $l=10$, every model has stabilized, however. Bag.\_0.8\_1.0 and Bag.\_0.8\_0.2 have an MSE of respectively 8.8\% and 6.1\% lower than that of FM. AdaBoost has an MSE comparable to the error of Bag.\_0.3\_1.0, being only 0.9\% higher than FM. Bagging models with 50\% of the features have surprisingly good results, better than those of FM and very close to bagging models with 80\% of the features.
As in Experiment I and II, a very sharp MSE reduction between $l=1$ and $l=3$ is evident for all of the models. Less complex models like bagging with 30\% and 50\% of the features immediately reach a plateau, while the error curves for bagging with 80\% of the features, FM and AdaBoost evolves as the number of parameters increases. Considering layer numbers between $l=6$ and $l=8$, it is clear that FM and AdaBoost overfit as the number of model parameters increases, and thus they perform poorly on test data. In particular, they overfit to such an extent that they almost reach the same performance level of the simplest bagging models with 30\% of the features. The latter show no indication of overfitting however, in common with bagging models having 50\% of the features. Bagging with 80\% of the features shows light overfitting when $l>6$, but still achieve the best results from among all of the tested algorithms.
The robustness of bagging models to overfitting with respect to AdaBoost and FM arises from their ability to reduce variance via averaging of decorrelated error across the predictions of each submodel. By contrast, when the number of layers is high, AdaBoost and FM utilise a model that is too complex and expressive for the underlying task, leading to overfitting. In concordance with Experiment II, this results suggests that attribute bagging is an effective solution to overfitting in the NISQ setting in common with that of the classical domain.
In addition, this experiment also highlights more markedly the discrepancy between the error level of bagging models with the same number of features but a distinct number of training samples. The difference between the MSE of the bagging model with 30\% and 20\% of samples and that with 100\% of samples is now far more apparent, suggesting that when the variance of the dataset is very high, even bagging models require a sufficient threshold of training samples to perform well in the NISQ setting.
\begin{figure}
\caption{Evolution of MSE error with respect to the number of quantum neural network layers in Experiment III.}
\label{fig:diabete_all}
\end{figure}
\subsubsection{Experiment IV}
For the classification task in Experiment IV, we used the reference UCI Wine dataset. It is a multi-class classification dataset corresponding to the results of a chemical analysis of wines grown within a specific region of Italy. It consists of 13 numerical features representing various chemical properties, such as alcohol, malic acid, and ash content, and a target variable indicating the class of the wine. The dataset has 178 samples and is a common baseline ML benchmark for low-parametric complexity classifiers.
Results from Experiment IV are reported in Figure~\ref{fig:wine_all}. Although they cannot be directly compared to the previous results due to the intrinsically different nature of the problem, there are few comparative insights that can be gained from the respective plot of Accuracy curves. First, all the models except bagging with 30\% of the features achieve the same accuracy score of 97.2\% using 10 layers. The performances of Bag.\_0.3\_0.2 and Bag.\_0.3\_1.0 are still relatively strong, however, having an accuracy score of 94.2\% and 96.9\% respectively. Given the very low complexity of these two models, this is a striking result.
A further notable aspect of the Accuracy curves is that all ensemble models converge with far fewer layers than FM. In particular, they require 3 layers in order to reach a performance plateau on average, after which they saturate and the accuracy score reaches saturation as well. By contrast, FM struggles to achieve a comparable accuracy score, only achieving an accuracy greater than 90\% when $l \geq 7$. This means that the ensemble models are able to learn and capture the complex relationships between the input features far more efficiently than FM, which requires a much deeper architecture to attain comparable results. This observation is particularly relevant when considering the implementation of these models on NISQ devices, where the number of qubits and the coherence time are severely limited.
Moreover, as expected, bagging models with 100\% of the samples obtain a higher accuracy score than their counterparts with 20\% of the features given the same number of layers. This suggests that using more training samples can improve the performance of ensemble models provided that the number of layers is low, as it allows them to better capture the underlying patterns of class discriminability in the data.
\begin{figure}
\caption{Evolution of Accuracy score with respect to quantum neural network depth in Experiment IV.}
\label{fig:wine_all}
\end{figure}
\subsection{Experiments executed on superconducting-based QPU}
For the real-hardware evaluation, we compare the performance of the baseline quantum neural network with the Bag\_0.8\_0.2 ensemble on the same synthetic linear regression dataset used in Experiment I. We selected the Bag\_0.8\_0.2 model as representative ensemble technique for its outstanding performance in the simulated experiments despite the low number of training samples. To ensure statistical validity, we repeat each experiment 10 times. However, due to technological constraints on real quantum hardware, we analyze only the linear dataset with a quantum neural network having a single layer.
\begin{figure}
\caption{Comparison of average performance of the baseline model and the Bag\_0.8\_0.2 ensemble technique on IBM quantum hardware. (\ref{fig:ibm_execution:a}
\label{fig:ibm_execution:a}
\label{fig:ibm_execution:b}
\label{fig:ibm_execution}
\end{figure}
Figure \ref{fig:ibm_execution} presents the real-world experimental findings, which indicate that the bagging ensemble reduces the expected mean square error by one-third and the expected variance by half when executed on quantum hardware, compared to the baseline model. Such results demonstrate that the noise-canceling capabilities of ensemble technique can be effectively exploited to work on NISQ devices in realistic settings. Additionally, the performance of the ten bagging models varied significantly, underlining the need to reinitialise the ensemble multiple times and validate it against a suitable validation dataset to ensure that the best model is selected.
\section{Conclusion}
We propose the use of ensemble techniques for practical implementation of quantum machine learning models on NISQ hardware. In particular, we justify the application of these techniques based on their capacity for significant reduction in resource usage, including in respect to the overall qubit, parameter, and gate budget, which is achieved via the random subspace (attribute bagging) technique. This resource-saving is especially crucial for noisy hardware, which is typically limited to a small number of qubits, being vulnerable to decoherence, noise, and operational errors. Consequently, the contribution of ensemble techniques may be seen as a form of quantum noise reduction.
To establish this, we evaluated and compared various configurations of bagging and boosting ensemble techniques on synthetic and real-world datasets, tested in both a simulated, noise-free environment and a superconducting-based QPU by IBM, and subtending a range of layer depths.
Our experimental findings showed that bagging ensembles can effectively train quantum neural network instances using fewer features and qubits, which leads to ensemble models with superior performance compared to the baseline model. Reducing the number of features in bagging models of quantum neural networks directly translates into a reduction in the number of qubits, that is a desirable characteristics for practical quantum applications. Ensembles of quantum neural network can also help addressing some of the toughest challenges associated with noise and decoherence in NISQ devices, as well as to mitigate barren plateau effects. These can be key considerations in the development of quantum machine learning models, particularly when working with limited resources on modern quantum systems.
Moreover, bagging models were found to be extremely robust to overfitting, being able to effectively capture the underlying patterns in the data with high generalization ability. This makes them better suited for tasks where generalization is important, such as in real-world applications. However, it is important to notice that the effectiveness of bagging quantum models diminishes with a decrement in the number of features, which suggests that complex bagging models are still needed to obtain satisfactory results. Using only a subset of the features can reduce the computational complexity of the model and prevent overfitting, but it may also result in a loss of information and a decrease in performance. On the contrary, the number of training samples do not seem to have a deep impact on bagging quantum models, hence this bagging strategy may be used when executing quantum neural network instances on real hardware in order to deal with long waiting queues and job scheduling issues. In this regard, having a low number of training data leads to faster training procedures and quantum resource savings. The training of ensembles can also be done in parallel on multiple QPUs in a distributed learning fashion. Therefore, it is important to strike a balance between model complexity and performance to achieve the best possible outcomes.
Additionally, the fact that the bagging models outperform FM and AdaBoost at low number of layers suggests that the former models are better suited for low-depth quantum circuits, which have limited capacity and are prone to noise and errors. For quantum machine learning tasks with NISQ devices, using bagging models with a low number of layers may be a good strategy to achieve good generalization performance while minimizing the impact of noise and errors in the circuit.
Overall, our results suggest that ensembles of quantum neural network models can be a promising avenue for the development of practical quantum machine learning applications on NISQ devices, both from a performance and resource usage perspective. A careful evaluation of the trade-offs between model complexity, performance, quantum resources available and explainability may be necessary to make an informed decision.
In a future work, we plan to further investigate the relationship between ensembles and quantum noise, which is a key consideration when developing quantum neural network models. Our findings could potentially contribute to the development of more efficient and accurate quantum machine learning algorithms, which could have significant implications for real-world applications.
\section*{Declaration}
\subsection*{Authors' contributions}
MI, MG, and AC had the initial idea, implemented the interface for executing experiments on the IBM QPUs, performed the experiments, and analyzed the data. MG, SV, DW, AM, and MP supervised the project. All authors contributed to the manuscript.
\subsection*{Availability of data and materials}
The data and source code utilized in our study are freely accessible at \url{https://github.com/incud/Classical-ensemble-of-Quantum-Neural-Networks}. The procedural generation code for the Linear Regression dataset is also accessible at the same URL. In addition, the UCI Repository provides open access to Concrete and Wine datasets, which can be found at \protect\url{https://https://archive.ics.uci.edu/ml/index.php}. The Diabetes dataset provided by Scikit-Learn is also freely available and included with the Python3 package.
\appendix
\section{Detailed plots}\label{apx:detailed_plots}
We provide some additional plots of the simulated experiments. In particular, we compare the different configurations of bagging and boosting techniques and their variance. Figure \ref{fig:linear_erb}, \ref{fig:concrete_erb}, \ref{fig:diabete_erb}, \ref{fig:wine_erb} shows the results for the Linear, Concrete, Diabetes, and Wine datasets, respectively.
\begin{figure}
\caption{Comparison of the performance of the baseline model and ensemble systems on the Linear Regression dataset. It exhibits the MSE and standard deviation, with a semi-transparent area, of the ensemble schemes in comparison to the baseline models. The top-left image shows ensembles with Random Subspace at 30\% of the features, top-right shows ensembles with Random Subspace at 50\%, bottom-left displays ensembles with Random Subspace at 80\%, and bottom-right illustrates AdaBoost.}
\label{fig:linear_erb}
\end{figure}
\begin{figure}
\caption{Comparison of the performance of the baseline model and ensemble systems on the Concrete Compressive Strength dataset. It exhibits the MSE and standard deviation, with a semi-transparent area, of the ensemble schemes in comparison to the baseline models. The top-left image shows ensembles with Random Subspace at 30\% of the features, top-right shows ensembles with Random Subspace at 50\%, bottom-left displays ensembles with Random Subspace at 80\%, and bottom-right illustrates AdaBoost.}
\label{fig:concrete_erb}
\end{figure}
\begin{figure}
\caption{Comparison of the performance of the baseline model and ensemble systems on the Diabetes dataset. It exhibits the MSE and standard deviation, with a semi-transparent area, of the ensemble schemes in comparison to the baseline models. The top-left image shows ensembles with Random Subspace at 30\% of the features, top-right shows ensembles with Random Subspace at 50\%, bottom-left displays ensembles with Random Subspace at 80\%, and bottom-right illustrates AdaBoost.}
\label{fig:diabete_erb}
\end{figure}
\begin{figure}
\caption{Comparison of the performance of the baseline model and ensemble systems on the Wine dataset. It exhibits the average accuracy and standard deviation, with a semi-transparent area, of the ensemble schemes in comparison to the baseline models. The top-left image shows ensembles with Random Subspace at 30\% of the features, top-right shows ensembles with Random Subspace at 50\%, bottom-left displays ensembles with Random Subspace at 80\%, and bottom-right illustrates AdaBoost.}
\label{fig:wine_erb}
\end{figure}
\end{document}
|
\begin{document}
\begin{abstract} In this paper we develop a Gidas-Ni-Nirenberg technique for polyharmonic equations and systems of Lane-Emden type. As far as we are concerned with Dirichlet boundary conditions, we prove uniqueness of solutions up to eighth order equations, namely which involve the fourth iteration of the Laplace operator. Then, we can extend the result to arbitrary polyharmonic operators of any order, provided some natural boundary conditions are satisfied but not for Dirichlet's: the obstruction is apparently a new phenomenon and seems due to some loss of information though far from being clear. When the polyharmonic operator turns out to be a power of the Laplacian, and this is the case of Navier's boundary conditions, as byproduct uniqueness of solutions holds in a fairly general context. New existence results for systems are also established.
\end{abstract}
\maketitle
\section{Introduction}
\noindent From the seminal paper of Gidas-Ni-Nirenberg \cite{GidasNiNirenberg79}, it is well known that the Lane--Emden equation
\begin{equation}\label{GNN}\begin{cases}
-\Delta u = \abs{u}^p\,, \, &\text{ in } B_1 \subset \mathbb{R}^N, N > 2,\\
u=0\,, \, &\text{ on } \partial B_1
\end{cases} \end{equation}
with $1 < p < \frac{N+2}{N-2}$ has at most one, actually exactly one nontrivial solution, which is positive, radially symmetric and strictly decreasing in the radial variable ($B_1$ denotes the unit ball centered at the origin of $\mathbb{R}^N$). This result has been extended in many different directions and in particular to the biharmonic operator subject to Dirichlet boundary conditions
\begin{equation}\label{biheq} \begin{cases}
\Delta^2 u = \abs{u}^{p}\,, \, &\text{ in } B_1 \subset \mathbb{R}^N, N > 4\\
u=\frac{\partial u}{\partial \nu}=0\,, \, &\text{ on } \partial B_1
\end{cases} \end{equation}
in \cite{Dalmasso95,FerreroGazzolaWeth07}, see also \cite{Dalmasso99} for the sublinear case, namely when $p<1$.
\noindent Uniqueness results have been also proved for the Lane--Emden system
\begin{equation}\label{hsys} \begin{cases}
\begin{aligned}
-\Delta u &= \abs{v}^{q}\,, \\
-\Delta v &= \abs{u}^{p}\,,
\end{aligned} & \text{ in } B_1 \subset \mathbb{R}^N, N > 2\\
u=v=0\,, & \text{ on } \partial B_1
\end{cases} \end{equation}
with $p, q>1$, see \cite{Dalmasso04} and then extended in \cite{CuiWangShi07} to systems with more than two equations.
\noindent More recently, the non-variational situation has been addressed in \cite{Schiera18}, where uniqueness of solutions is established for the following system
\begin{equation}\label{deliasys} \begin{cases}
\begin{aligned}
\Delta^2 u=\abs{v}^q\,, \\
-\Delta v= \abs{u}^p\,,
\end{aligned} & \text{ in } B_1\subset \mathbb{R}^N, N > 4 \\
u=\frac{\partial u}{\partial \nu}=v=0\,, & \text{ on } \partial B_1\,.
\end{cases}\,
\end{equation}
\noindent In what follows $\Delta^\alpha(\cdot):=\Delta(\Delta^{\alpha-1}(\cdot))$, $\alpha\in\mathbb{N}$, $\alpha\geq 1$, denotes the iterated Laplace operator, the so-called polyharmonic operator. Clearly, this definition does not take into account boundary conditions according to which the iterated operator can be a power or not of the Laplacian. Higher order problems are more sensitive to boundary conditions with respect to the second order case, and the richness of plenty of physically significant boundary values is the challenge which prevents to using standard tools from elliptic theory for second order operators, such as the maximum principle which fails in general for domains which are not slight perturbations of the ball; sufficient conditions for the validity of a general maximum principle have been addressed in \cite{CASTA}.
\subsection*{Main results} Before stating our main results let us make precise the notion of solution which in this context is always assumed to be in the classical pointwise sense. Our main results are the following:
\begin{theorem}\label{teo:eqnDir}
There exists at most one nontrivial solution to
\begin{equation}\label{eqnDir}
\begin{cases}
(-\Delta)^{\alpha} u=\abs{u}^p\,, \, & \text{ in } B_1 \subset \mathbb{R}^N, N > 2\alpha\\
\frac{\partial^k u}{\partial \nu^k}=0, \, & \text{ on } \partial B_1\,, \, k \le \alpha-1
\end{cases}
\end{equation}
with $p > 1$ and $1 \le \alpha \le 4$.
\end{theorem}
\noindent Notice that in the case $\alpha=1$, \eqref{eqnDir} reduces to the Lane--Emden equation \eqref{GNN} and for $\alpha=2$ to the biharmonic equation \eqref{biheq}. The boundary conditions in \eqref{eqnDir} are the higher order Dirichlet boundary conditions for which the polyharmonic operator fails to be the power of the Laplace operator. Those conditions are particularly relevant form the point of view of applications, see \cite{GazzolaGrunauSweers10}, as well as from the theoretical point of view, as they prevent to use reduction methods, such as decomposing the equation into a system of lower order equations. Actually the result of Theorem \ref{teo:eqnDir} is stronger, in the sense that yields uniqueness of solutions in the sharp range of existence $1<p<(N+2\alpha)/(N-2\alpha)$ as a consequence of \cite[Theorem 8]{pucci_serrin} and \cite[Theorems 7.17--7.18]{GazzolaGrunauSweers10}.
\noindent We then extend Theorem \ref{teo:eqnDir} to systems of polyharmonic equations as follows
\begin{theorem}\label{teo:sysDir}
There exists at most one nontrivial solution to
\begin{equation}\label{sysDir}
\begin{cases}
(-\Delta)^{\alpha_j} u_j=\abs{u_{j+1}}^{p_j},\, j =1, \dots, m-1 \\
&\, \text{ in } B_1,\\
(-\Delta)^{\alpha_m} u_m=\abs{u_{1}}^{p_m} \\
\frac{\partial^k u_j}{\partial \nu^k}=0, \, k=0, \dots, \alpha_j-1, \, j =1, \dots, m, & \, \text{ on } \partial B_1,
\end{cases}
\end{equation}
with $p_j \ge 1$ for any $j$, $\prod_{j=1}^m p_j >1$, $N > 2 \max \{ \alpha_j \}_j$ and $1 \le \alpha_j\le 4$ for any $j =1, \dots, m$, where $m \ge 1$.
Moreover, let $\alpha_j \in \mathbb{N}$ and $p_j >1$ for any $j$. Then, there exists a classical nontrivial solution to \eqref{sysDir} if there exists $l \in \{ 1, \dots, m \}$ such that
\begin{equation}\label{SerrinMulti}
N+2 \sum_{k=1}^m \alpha_{k+l} \prod_{j=0}^{k-1} p_{j+l} - N \prod_{j=1}^m p_j \ge 0,
\end{equation}
where $p_{k+m}:=p_k$ and $\alpha_{k+m}:=\alpha_k$ for any $k=1, \dots, m$.
\end{theorem}
\noindent If $m=1$ then \eqref{sysDir} reduces to \eqref{eqnDir} with $\alpha=\alpha_1$, and \eqref{SerrinMulti} coincides with the Serrin exponent $\frac{N}{N-2\alpha}$. In the case $m=2$, $\alpha_1=\alpha$, $\alpha_2=\beta$, $p_1=q$, $p_2=p$, \eqref{SerrinMulti} reduce to the Serrin curves:
\[ 2\beta q + N + 2\alpha pq - N pq \ge 0, \quad 2\alpha p + N + 2\beta pq - N pq \ge 0. \]
\noindent Notice that for $m=2$, $\alpha_1=1$ and $\alpha_2=1$, \eqref{sysDir} reduces to \eqref{hsys} whereas when $m=2$, $\alpha_1=2$ and $\alpha_2=1$ we have \eqref{deliasys}.
\noindent As we are going to see, when trying to extend the proof of Theorem \ref{teo:eqnDir} to the case $\alpha \ge 5$, one has to face technical difficulties due to the fact that Dirichlet boundary conditions prescribe the behavior only of the first $\alpha-1$ derivatives of the solution, and no information apparently can be retained for higher order derivatives. However, in this context new boundary conditions show up in a natural fashion for which we have the following
\begin{corollary}\label{teo:Var}
There exists at most one nontrivial solution to
\begin{equation}\label{sys:Var}
\begin{cases}
(-\Delta)^{\alpha} u=\abs{u}^{p}, & \text{ in } B_1, \\
\Delta^{2k} u=0,\, 2k \le \alpha-1, & \text{ on } \partial B_1 \\
\frac{\partial}{\partial \nu} \Delta^{2k} u= 0, \, 2k+1 \le \alpha-1, &\text{ on } \partial B_1
\end{cases}
\end{equation}
with $N > 2 \alpha$, $p > 1$ and $\alpha \in \mathbb{N}$, $\alpha\geq 1$.
\end{corollary}
\noindent Boundary conditions considered in \eqref{sys:Var}, on one side from the mathematical point of view enable us to split the equation into a system of equations subject to Dirichlet boundary conditions, on the other side, the Physical constraint makes vanishing higher order momenta along the boundary.
\noindent As far as we are concerned with the so-called Navier boundary conditions, for which the polyharmonic operator is actually a power of the Laplacian and classical reduction methods apply, we have as byproduct of the previous results the following
\begin{corollary}\label{teo:Navier}
There exists at most one nontrivial solution to
\begin{equation}\label{sys:Nav}
\begin{cases}
\begin{aligned}
&(-\Delta)^{\alpha_j} u_j=\abs{u_{j+1}}^{p_j},\, j =1, \dots, m-1, \\
&&\\
&(-\Delta)^{\alpha_m} u_m=\abs{u_{1}}^{p_m},
\end{aligned} \hspace{-.2cm}\text{ in } B_1 , \\
\\
\Delta^k u_j=0, \, k=0, \dots, \alpha_j-1, \, j =1, \dots, m \text{ on } \partial B_1
\end{cases}
\end{equation}
with $p_j \ge 1$ for any $j$, $\prod_{j=1}^m p_j >1$, $\alpha_j \in \mathbb{N}$, $m \ge 1$ and $N > 2 \max \{ \alpha_j \}_j$.
\end{corollary}
\noindent We mention that here the case $m=3$ and $\alpha_j=1$ was covered in \cite{CuiWangShi07}. Nonexistence results above the critical curve for \eqref{sys:Nav}, in the variational case $m=2$, $\alpha_1=\alpha_2$ have been established in \cite{liu_guo_zhang}. Existence of solutions below the critical curve follows buying the line of \cite{clement_felmer_mitidieri}, as it has been detailed in \cite{Schiera19} where also the non-variational case is tackled.
\section{Polyharmonic equations with Dirichlet boundary conditions: proof of Theorem \ref{teo:eqnDir}}\label{sec:eqn}
\noindent Let us first recall the following preliminary results:
\begin{lemma}[Theorem 5.7 in \cite{GazzolaGrunauSweers10}]\label{HopfPoly}
Let $u$ be a nontrivial solution to \eqref{eqnDir}. Then $u>0$ on $B_1$ and for every $x \in \partial B_1$ one has
\[ \begin{cases}
\Delta^{\alpha/2} u(x) >0, & \text{ for $\alpha$ even,}\\
-\frac{\partial}{\partial \nu} \Delta^{(\alpha-1)/2} u(x) >0, & \text{ for $\alpha$ odd.}
\end{cases} \]
\end{lemma}
\begin{lemma}[Theorem 7.1 in \cite{GazzolaGrunauSweers10}]\label{lem:rad}
Let $u$ be a nontrivial solution to \eqref{eqnDir}.
Then it is radially symmetric and strictly decreasing in the radial variable.
\end{lemma}
\noindent We next prove a key ingredient for what follows:
\begin{lemma}\label{lem:shape}
Let $u$ be a nontrivial solution to \eqref{eqnDir}. Then, $\Delta^s u(0)<0$ if $1 \le s < \alpha$ is odd, and in this case it is increasing until the first zero, $\Delta^s u(0)>0$ if $1 \le s<\alpha$ is even, and in this case it is decreasing up to the first zero.
Moreover, if $\alpha \ge 2$ is even, then the following properties hold:
\begin{itemize}
\item $\Delta^{\alpha-j} u$ has exactly $\alpha-j+1$ zeros (including the last one in $r=1$) and $\alpha-j$ critical points in $(0, 1)$ if $\alpha-1 \ge j \ge \alpha/2+1$, exactly $j$ zeros and $j-1$ critical points in $(0, 1)$ if $1 \le j \le \alpha/2$;
\item $\Delta^s u(1)=0$ if $s \le \alpha/2 -1 $, $\Delta^s u(1) > 0$ if $s \ge \alpha/2$, and $(\Delta^s u)'(1)=0$ if $s \le \alpha/2-1$, $(\Delta^s u)'(1)\ge 0$ if $s \ge \alpha/2$.
\end{itemize}
\noindent If $\alpha \ge 3$ is odd, then we have:
\begin{itemize}
\item $\Delta^{\alpha-j} u$ has exactly $\alpha-j+1$ zeros (including the last one in $r=1$) and $\alpha-j$ critical points in $(0, 1)$ if $\alpha-1 \ge j \ge (\alpha+1)/2$, exactly $j$ zeros and $j-1$ critical points in $(0, 1)$ if $1 \le j \le (\alpha-1)/2$;
\item $\Delta^s u(1)=0$ if $s \le (\alpha-1)/2$, $\Delta^s u(1) < 0$ if $s \ge (\alpha+1)/2$, and $(\Delta^s u)'(1)=0$ if $s \le (\alpha-3)/2$, $(\Delta^s u)'(1) \le 0$ if $s \ge (\alpha-1)/2$.
\end{itemize}
\noindent (See \autoref{fig:Delta}).
\end{lemma}
\begin{proof}
\begin{figure}
\caption{\emph{Qualitative graphs of $\Delta^s u(r)$ on the interval $[0,1]$, where $s=5,4,3,2,1$ respectively, and $u$ satisfies \eqref{eqnDir}
\label{fig:Delta}
\end{figure}
\noindent We prove only the case in which $\alpha$ is even, the odd case being similar. Recall that
\begin{equation}\label{radialLaplace} r^{N-1}(\Delta^{j} u)'(r)=\int_0^r s^{N-1} (\Delta^{j+1} u)(s) \, ds \end{equation}
for any integer $j\geq 1$.
By \eqref{radialLaplace}, $(\Delta^{\alpha-1} u)'>0$ and as a consequence $\Delta^{\alpha-1} u$ has at most one zero. If $\alpha=2$, then in view of \autoref{HopfPoly} $\Delta u(1)>0$, hence $\Delta u$ has exactly one zero, and the proof is complete.
If $\alpha \ge 4$, then we conclude that $\Delta^{\alpha-2} u $ has at most two zeros. Indeed, again by \eqref{radialLaplace}, it is decreasing up to the endpoint $r_* \ge r_0$, where $r_0$ is such that $\Delta^{\alpha-1} u (r_0)=0$. Notice that if $r_* < 1$, then $(\Delta^{\alpha-2} u )'(r_*)=0$.
Therefore, it holds
\[ r^{N-1}(\Delta^{\alpha-2} u)'(r)=\int_{r_*}^r s^{N-1} (\Delta^{\alpha-1} u)(s) \, ds, \]
and since $\Delta^{\alpha-1} u >0$ beyond $r_* \ge r_0$, then $(\Delta^{\alpha-2} u)'(r) >0$ for any $r \ge r_*$.
\noindent Analogously, one concludes that $\Delta^{\alpha-j} u$ has at most $j$ zeros and $j-1$ critical points in $(0, 1)$, $j \le \alpha-1$.
In particular, $\Delta^{\alpha/2-1} u$ has at most $\alpha/2+1$ zeros and $\alpha/2$ critical points in $(0, 1)$.
Moreover, by Dirichlet boundary conditions, $\Delta^{\alpha/2-1} u(1)=0$, $(\Delta^{\alpha/2 -1} u )'(1)=0$
and $(\Delta^{\alpha/2 -1} u )''(1)=u^{(\alpha)}(1)=\Delta^{\alpha/2} u(1)> 0$ by \autoref{HopfPoly}.
Then, $\Delta^{\alpha/2-1} u$ should be decreasing and positive near 1.
\noindent Now, assume that $\Delta^{\alpha/2-1} u$ has exactly $\alpha/2+1$ zeros and $\alpha/2$ critical points in $(0, 1)$. Then $\Delta^{\alpha/2} u$ must have exactly $\alpha/2$ zeros, and by iteration $\Delta^{\alpha-j} u$ has exactly $j$ zeros, with $j \le \alpha/2+1$.
In particular, this means that $\Delta^{\alpha/2-1} u$ is positive near 0 and has a even number of zeros, if $\alpha/2 - 1$ is even; or it is negative near 0 and has a odd number of zeros, if $\alpha/2 - 1$ is odd. In any case, $\Delta^{\alpha/2-1} u$ should be increasing near $1$, a contradiction. Hence $\Delta^{\alpha/2-1} u$ must have one zero less, namely at most $\alpha/2$ zeros (including also the last one in $r=1$) and at most $\alpha/2 -1$ critical points in $(0, 1)$.
\noindent Now, let us consider $\Delta^{\alpha/2-2} u$. Since $\Delta^{\alpha/2-1} u$ has at most $\alpha/2$ zeros, of which the last one is in $r=1$, then it changes sing at most $\alpha/2$ times, and therefore $\Delta^{\alpha/2-2} u$ has at most $\alpha/2 - 1$ critical points, and $\alpha/2$ zeros in $(0,1)$.
Notice that $\Delta^{\alpha/2-2} u (1)=0$. Moreover, $(\Delta^{\alpha/2-2} u)^{(j)}(1)=0$ for any $j \le 3$ and $(\Delta^{\alpha/2-2} u)^{(4)}(1)=\Delta^{\alpha/2} u(1)> 0$. This means that $\Delta^{\alpha/2-2} u$ is decreasing and positive near 1. However, as above, this is possible only if $\Delta^{\alpha/2 -2} u$ has at most $\alpha/2-1$ zeros (including also the last one in $r=1$) and at most $\alpha/2 -2$ critical points.
\noindent Next we iterate the procedure. Then, at each step we lose one critical point.
Thus, $\Delta^{\alpha-j} u$ has at most $\alpha-j+1$ zeros (including the last one in $r=1$) and $\alpha-j$ critical points in $(0, 1)$ if $j \ge \alpha/2+1$, at most $j$ zeros and $j-1$ critical points in $(0, 1)$ if $j \le \alpha/2$.
In particular, $\Delta u$ has at most 1 critical point. We know that $\Delta u (0) = u''(0) <0$, as $u'(0)=0$ and $u' <0$ in $(0, 1)$.
We have two cases: $\Delta u$ is increasing and negative, reaches a positive maximum and decreases to 0, or it is always negative and has no critical points.
However, we know that $\Delta u(1)=0$ and $\Delta u$ is decreasing in the last interval, as $(\Delta u)^{(j)}(1)=0$ for any $j \le \alpha -3$ and $(\Delta u)^{(\alpha -2)}(1) = u^{(\alpha)}(1)=\Delta^{\alpha/2} u(1) >0$ by \autoref{HopfPoly}.
Then necessarily $\Delta u$ is increasing and negative, reaches a positive maximum and decreases to 0, namely has exactly one critical point.
\noindent As a consequence, $\Delta^2 u$ has at least 2 critical points, however since it has at most 2 critical points due to what proved above, it turns out to have exactly 2 critical points.
Moreover, $\Delta^2 u(0)>0$, and it is decreasing until the first zero.
\noindent Iteratively, we conclude that $\Delta^{\alpha-j} u$ has exactly $\alpha-j+1$ zeros (including the last one in $r=1$) and $\alpha-j$ critical points in $(0, 1)$ if $j \ge \alpha/2+1$, exactly $j$ zeros and $j-1$ critical points in $(0, 1)$ if $j \le \alpha/2$.
Moreover, $\Delta^s u(0)<0$ if $s$ is odd, and in this case it is increasing until the first zero, $>0$ if $s$ is even, and in this case it is decreasing before the first zero.
Further, by boundary conditions, $\Delta^s u(1)=0$ if $s \le \alpha/2 -1 $, $\Delta^s u(1) > 0$ if $s \ge \alpha/2$, and $(\Delta^s u)'(1)=0$ if $s \le \alpha/2-1$, $(\Delta^s u)'(1)\ge 0$ if $s \ge \alpha/2$.
\end{proof}
\subsection{Proof of \autoref{teo:eqnDir} in the case $\alpha=3$}
\label{sec:3}
Let $u$ be a nontrivial solution to
\begin{equation}\label{eqn3}
\begin{cases}
-\Delta^3 u=\abs{u}^p, & \text{ in } B_1\\
u=\frac{\partial u}{\partial \nu}=\frac{\partial^2 u}{\partial \nu^2}=0, & \text{ on } \partial B_1.
\end{cases}
\end{equation}
By \autoref{lem:rad} and \autoref{HopfPoly}, $u$ is positive, radially symmetric and strictly decreasing.
In particular, since the maximum is attained at $0$, we have $u'(0)=0$.
Moreover,
\[
r^{N-1}(\Delta^2 u)'(r)=\int_0^r s^{N-1} (\Delta^3 u)(s) \, ds .
\]
As a consequence,
\begin{equation}\label{ce}
(\Delta^2 u)'(0)=\lim_{r \to 0} \frac{\int_0^r s^{N-1} (\Delta^3 u)(s) \, ds}{r^{N-1}}=0.
\end{equation}
Moreover,
\[
r^{N-1}(\Delta u)'(r)=\int_0^r s^{N-1} (\Delta^2 u)(s) \, ds
\]
and therefore
\begin{equation}\label{ce2}
(\Delta u)'(0)=\lim_{r \to 0} \frac{\int_0^r s^{N-1} (\Delta^2 u)(s) \, ds}{r^{N-1}}=0.
\end{equation}
\noindent Let $w$ be another nontrivial solution to \eqref{eqn3} and set
\[ \tilde{w}(r)=\lambda^s w(\lambda r) , \]
where $s$ is chosen such that $\tilde{w}$ satisfies
\[ \begin{cases}
-\Delta^3 \tilde w=\abs{\tilde w}^p, \, r \le 1/\lambda\\
\tilde w(1/\lambda)= \tilde w'(1/\lambda)= \tilde w''(1/\lambda)=0
\end{cases} \]
namely $s=\frac{6}{p-1}$,
whereas $\lambda >0$ is such that
\begin{equation}\label{lambda}
\tilde{w}(0)= u(0).
\end{equation}
\noindent \textbf{Claim}:
\begin{equation}\label{claim}
\Delta \tilde w(0)=\Delta u(0), \, \Delta^2 \tilde w(0)=\Delta^2 u(0).
\end{equation}
\noindent Let us suppose for instance $\Delta^2( u- \tilde w)(0)>0$ and $\Delta( u- \tilde w)(0) >0$. Notice that by continuity $\Delta^2( u- \tilde w) >0$ on $[0, \delta)$ and $\Delta( u- \tilde w) >0$ on $[0, \varepsilon)$ for some $\delta, \varepsilon$ sufficiently small. Moreover $u- \tilde w >0$ on $(0, \varepsilon]$: indeed, if there exists $a \le \varepsilon$ such that $u(a)-\tilde w(a) \le 0$, then $\Delta(u-\tilde w)>0$ implies $u- \tilde w <0$ on $[0, a)$, which is a contradiction.
\noindent Hence we can choose $R_1$ such that
\begin{multline*} R_1= \sup \{ r \le \min \{1, 1/\lambda \} : \, (u- \tilde w)(s) >0, \, \Delta( u- \tilde w)(s) >0, \\
\, \Delta^2(u-\tilde w)(s) >0, \, s \in (0, r) \} .\end{multline*}
We have
\begin{equation}\label{R_1}
(u- \tilde w)(R_1) >0, \, \Delta(u-\tilde w)(R_1)>0.
\end{equation}
Indeed, let us assume by contradiction that $(u-\tilde w)(R_1)=0$. Then, since $\Delta(u-\tilde w) >0$ on $[0, R_1)$ we would have by the maximum principle $u-\tilde w <0$ on $[0, R_1)$.
Analogously, if $\Delta(u-\tilde w)(R_1)=0$, then $\Delta(u-\tilde w) <0$ on $(0, R_1)$, a contradiction.
As a consequence, \eqref{R_1} holds. Moreover, either $R_1<\min \{1, 1/\lambda \}$, and in this case $\Delta^2( u- \tilde w)(R_1)=0$, or $R_1=\min \{1, 1/\lambda \}$.
\noindent In the first case, by applying the maximum principle to $-\Delta^3(u- \tilde w) =u^p- \tilde w^p>0$, one has $\Delta^2( u- \tilde w) <0$ on $(R_1, R_1+ \delta)$ for $\delta$ sufficiently small.
We can set $R_2$ such that
\begin{multline*} R_2= \sup \{ r \le \min \{1, 1/\lambda \} : \, (u- \tilde w)(s) >0, \, \Delta( u- \tilde w)(s) >0, \, \\
\Delta^2(u-\tilde w)(s) <0, \, s \in (R_1, r) \} .\end{multline*}
As above, we have
\[ \Delta^2( u- \tilde w)(R_2) <0, \, (u-\tilde w)(R_2)>0 \]
and either $R_2<\min \{1, 1/\lambda \}$, which implies $\Delta(u-\tilde w)(R_2)=0$, or $R_2=\min \{1, 1/\lambda \}$. Indeed, if $\Delta^2( u- \tilde w)(R_2)=0$, then by applying the maximum principle to $-\Delta^3(u - \tilde w) =u^p- \tilde w^p>0$ on $B_{R_2} \setminus \overline{B_{R_1}}$ we have $\Delta^2( u- \tilde w) >0$ on $(R_1, R_2)$; on the other hand, if $(u- \tilde w)(R_2)=0$, then $u- \tilde w <0$ on $[0, R_2)$, as $\Delta(u- \tilde w)>0$.
\noindent We now apply iteratively the same reasoning as above to get a sequence (which can be finite or infinite)
\[ 0 =R_0 < R_1 < R_2 < \dots \le \min \{1, 1/\lambda \} \]
such that
\[ u(R_{3k})=\tilde w(R_{3k}), \, \Delta^2 u(R_{3k+1})=\Delta^2 \tilde w(R_{3k+1}), \, \Delta u(R_{3k+2})=\Delta \tilde w(R_{3k+2}), \]
$k \ge 0$, as long as $R_k < \min \{1, 1/\lambda \} $, see \autoref{table3}.
\begin{table}\centering\caption{Sign of $u- \tilde w$, $\Delta(u- \tilde w)$, $\Delta^2( u- \tilde w)$. }
\label{table3}\begin{adjustbox}{max width=\textwidth}
\begin{tabular}{r|cccc}
&$(u-\tilde w)(s)$&$\Delta(u- \tilde w)(s)$&$\Delta^2( u- \tilde w)(s)$\\ \hline
$s=0$&=0&>0&>0\\ \hline
$s \in (0, R_1)$&>0&>0&>0\\ \hline
$s=R_1$&>0&>0&=0\\ \hline
$s \in (R_1, R_2)$&>0&>0&<0\\ \hline
$s=R_2$&>0&=0&<0\\ \hline
$s \in (R_2, R_3)$&>0&<0&<0\\ \hline
$s=R_3$&=0&<0&<0\\ \hline
$s \in (R_3, R_4)$&<0&<0&<0\\ \hline
$\vdots$ &$\vdots$ &$\vdots$ &$\vdots$
\end{tabular}
\end{adjustbox}
\end{table}
\noindent If it is infinite, then we take the limit $R_*=\lim_{i \to \infty} R_i \le \min \{ 1, 1/\lambda \}$ and by continuity and differentiability, it holds
\[ (u- \tilde w )(R_*)=0, \, \Delta( u- \tilde w)(R_*)=0, \, \Delta^2(u- \tilde w)(R_*)=0 \]
and
\[ (u'- \tilde w' )(R_*)=0, \, (\Delta( u- \tilde w))'(R_*)=0, \, (\Delta^2(u- \tilde w))'(R_*)=0. \]
Now, one defines
\[ U(r)=(u(r), -\Delta u(r), \Delta^2 u(r)) \quad 0 \le r \le 1 \]
and
\[ W(r)=(\tilde{w}(r), -\Delta \tilde{w} (r), \Delta^2 \tilde w(r)) \quad 0 \le r \le 1/\lambda. \]
Hence, for any $0 \le r \le R_*$ one has
\begin{multline}\label{UW_1} U(r)- W(r)=\\
\int_{r}^{R_*} \frac{s}{N-2} \left( 1 - \left(\frac{s}{r} \right)^{N-2} \right) (F(U(s))- F(W(s))) \, ds \end{multline}
where we set $F(x,y, z)=(y, z, x^p)$.
Since $p>1$, then $F$ is locally Lipschitz continuous, hence by the Gronwall Lemma, \eqref{UW_1} implies $U=W$ on $[0, R_*]$.
This is in contradiction with the assumption $\Delta^2( u- \tilde w)(0)>0$.
\noindent On the other hand, if the sequence stops at a maximum value $R_k$ then on $(R_{k-1}, R_k=\min \{1, 1/\lambda \} ]$ one of the following is verified, see \autoref{table3}:
\begin{itemize}
\item $u- \tilde w$ and $\Delta(u- \tilde w)$ have the same sign
\item $u- \tilde w$ and $\Delta^2(u- \tilde w)$ have opposite sign.
\end{itemize}
\noindent Let for instance $u- \tilde w >0$ and $\Delta(u- \tilde{w}) \ge 0 $. Then,
\[ 0 < (u- \tilde w)(\min \{1, 1/\lambda \})= \begin{cases}
u(1/\lambda) & \text{if $\lambda >1$}\\
0 & \text{if $\lambda=1$}\\
- \tilde w (1) & \text{if $\lambda <1$}
\end{cases} \]
which implies $\lambda >1$, whereas by Hopf lemma $$0< (u'- \tilde w' )(\min \{1, 1/\lambda \})=(u' - \tilde w')(1/\lambda)=u'(1/\lambda)<0$$ thus a contradiction.
\noindent Let now $u- \tilde w \ge 0$, $\Delta( u- \tilde w) < 0$ and $\Delta^2(u- \tilde w) <0$. Hence $(u - \tilde w)(\min \{1, 1/\lambda \}) \ge 0$, and therefore $\lambda \ge 1$. Moreover, $\Delta u(1/\lambda)=\Delta( u- \tilde w)(1/\lambda) < 0$, whereas by Hopf Lemma and \autoref{lem:shape} $(\Delta u)'(1/\lambda) \le (\Delta( u- \tilde w))'(1/\lambda) < 0$.
\noindent By \autoref{lem:shape}, in particular we have that $\Delta u$ increases until reaches a point $r_0$ and then decreases. Since $\Delta u(1)=0$, $\Delta u$ attains its maximum in $r_0$ and $(\Delta u)'<0$, $\Delta u >0$ on $(r_0, 1)$, whereas $(\Delta u)' >0$ on $(0,r_0)$.
Therefore, we cannot find a point such that $(\Delta u)'<0$ and $\Delta u <0$, hence we reach again a contradiction.
\noindent Since we get to a contradiction in all possible cases, we can not have $\Delta^2( u- \tilde w)(0)>0$ and $\Delta( u- \tilde w)(0) >0$. In a similar fashion, one proves that also the other possible choices for the sign of $\Delta^2( u- \tilde w)(0)$ and $\Delta( u- \tilde w)(0)$ yield a contradiction, hence the claim \eqref{claim} holds.
\noindent Now, in view of \eqref{lambda} and \eqref{claim}, and since by \eqref{ce} and \eqref{ce2}
\begin{multline} u'(0)=\tilde w'(0)= (\Delta^2 u)'(0)= (\Delta^2 \tilde w)'(0)\\
=\tilde z'(0)=(\Delta u)'(0)=(\Delta \tilde w)'(0)=0, \end{multline}
for any $r \le \min \{ 1, 1/\lambda \}$ one has
\begin{multline}\label{UW} U(r)- W(r)=\\
\int_0^r \frac{s}{N-2} \left( 1 - \left(\frac{s}{r} \right)^{N-2} \right) (F(W(s))- F(U(s))) \, ds \end{multline}
where $F(x,y, z)=(y, z, x^p)$.
Since $p>1$, then $F$ is locally Lipschitz continuous, hence by the Gronwall Lemma, \eqref{UW} implies $U=W$ on $[0, \min \{1, 1/\lambda\}]$.
\noindent Finally, $0<u(1/\lambda)=\tilde w(1/\lambda)=0$ if $\lambda >1 $, whereas $0=u(1)=\tilde w(1)>0$ if $\lambda <1 $, thus $\lambda=1$ and $u=w$. \qed
\subsection{Proof of \autoref{teo:eqnDir} in the case $\alpha=4$}
Let $u, w$ be two nontrivial solutions to
\begin{equation}\label{eqn4}
\begin{cases}
\Delta^4 u=\abs{u}^p, & \text{ in } B_1\\
u=\frac{\partial u}{\partial \nu}=\frac{\partial^2 u}{\partial \nu^2}=\frac{\partial^3 u}{\partial \nu^3}=0, & \text{ on } \partial B_1.
\end{cases}
\end{equation}
Choose $\lambda, s$ such that $\tilde w(r)=\lambda^s w(\lambda r)$ satisfies \eqref{eqn4} on $B_{1/\lambda}$ and $u(0)=\tilde w(0)$. We want to prove that
\begin{equation}\label{claim4}
\Delta^ku(0)=\Delta^k \tilde w (0), \quad k=0, \dots, 3.
\end{equation}
For instance, assume that
\[ \Delta(u- \tilde w)(0)>0, \quad \Delta^2(u- \tilde w)(0)<0, \quad \Delta^3(u- \tilde w)(0)>0. \]
Considerations below hold with some modifications also for other choices of the above signs.
Let us define
\begin{multline*} R_1= \sup \{ r \le \min \{1, 1/\lambda \} : \, (u- \tilde w)(s) >0, \, \Delta( u- \tilde w)(s) >0, \\
\, \Delta^2(u-\tilde w)(s) < 0, \, \Delta^3(u-\tilde w)(s) >0, s \in (0, r) \} .\end{multline*}
By the maximum principle, $(u- \tilde w)(R_1)>0$ and $\Delta^3(u- \tilde w)(R_1)>0$, whereas $\Delta(u-\tilde w)(R_1)$ and $\Delta^2(u- \tilde w)(R_1)$ may be $=0$.
If for instance $\Delta(u-\tilde w)(R_1)=0$, then by considering
\begin{multline*} R_2= \sup \{ r \le \min \{1, 1/\lambda \} : \, (u-\tilde w)(s)>0, \, \Delta(u-\tilde w)(s) < 0, \\
\Delta^2(u- \tilde w)(s) <0, \, \Delta^3(u- \tilde w)(s) >0, \, s \in (R_1, r) \} \end{multline*}
we have that $\Delta(u-\tilde w)(R_2)<0$ and $\Delta^3(u- \tilde w)(R_2)>0$, whereas $(u-\tilde w)(R_2)$ and $\Delta^2(u- \tilde w)(R_2)$ may be $=0$. We now iterate to get a sequence $\{ R_j \}$ (finite or infinite) such that for any $j$ one or two among $(u- \tilde w)(R_j), \Delta(u-\tilde w)(R_j), \Delta^2(u- \tilde w)(R_j), \Delta^3(u- \tilde w)(R_j)$ is $=0$.
\noindent If $\{ R_j \}$ is infinite, then we reach a contradiction as in \autoref{sec:3} by applying the Gronwall Lemma with $F(x, y, z, w)=(y, z, w, x^p)$. Let us assume that $\{ R_j \}$ is finite.
We want to exclude the possibility that on $(R_j, R_{j+1})$ for some $j$ we have
\[ (u- \tilde w)<0, \, \Delta(u-\tilde w)>0, \, \Delta^2(u-\tilde w)<0, \, \Delta^3(u-\tilde w)>0 \]
(or opposite signs).
In order for this to happen, since in $(0, R_1)$
\[ (u- \tilde w)>0, \, \Delta(u-\tilde w)>0, \, \Delta^2(u-\tilde w)<0, \, \Delta^3(u-\tilde w)>0 \]
we need that $(u-\tilde w)(R_k)=0$ for an odd number of $k \le j$, $\Delta(u-\tilde w)(R_k)=0$ for an even number of $k \le j$, $ \Delta^2(u-\tilde w)(R_k)=0$ for an even number of $k \le j$, and $ \Delta^3(u-\tilde w)(R_k)=0$ for an even number of $k \le j$.
However, let us assume that the number of $k\le j$ such that $(u-\tilde w)(R_k)=0$ is $n$. Then, the number of zeros of $\Delta(u-\tilde w)$ must be $\ge n$, since $u-\tilde w$ can be $0$ only if $\Delta(u-\tilde w)$ has been $=0$ before.
There are three possible cases:
\begin{enumerate}
\item The number of zeros of $\Delta(u-\tilde w)$ is $n$;
\item The number of zeros of $\Delta(u-\tilde w)$ is $n+1$ (if we stop after a zero of $\Delta(u-\tilde w)$ and before $(u-\tilde w)$ vanishes again);
\item The number of zeros of $\Delta(u-\tilde w)$ is equal to $n+2$. This last case happens when $\Delta(u-\tilde w)=0$ for two consecutive times, without having $(u-\tilde w)=0$ in the between.
Notice that such a situation may happen just once, since at the last step the four columns turn out to have the same sign and hence cannot be $0$ again, see a model case in \autoref{table4}.
\end{enumerate}
\begin{table}\centering\caption{Sign of $u- \tilde w$, $\Delta(u- \tilde w)$, $\Delta^2(u- \tilde w)$, $\Delta^3(u- \tilde w)$ in a special case. }
\label{table4}\begin{adjustbox}{max width=\textwidth}
\begin{tabular}{r|ccccc}
&$(u-\tilde w)(s)$&$\Delta( u- \tilde w)(s)$&$ \Delta^2(u-\tilde w)(s)$&$ \Delta^3(u-\tilde w)(s)$\\ \hline
$\vdots$ &$\vdots$ &$\vdots$ &$\vdots$ &$\vdots$\\ \hline
$s \in (R_{j}, R_{j+1})$&<0&<0&>0&>0\\ \hline
$s=R_{j+1}$&<0&=0&>0&>0\\ \hline
$s \in (R_{j+1}, R_{j+2})$&<0&>0&>0&>0\\ \hline
$s=R_{j+2}$&<0&>0&>0&=0\\ \hline
$s \in (R_{j+2}, R_{j+3})$&<0&>0&>0&<0\\ \hline
$s=R_{j+3}$&<0&>0&=0&<0\\ \hline
$s \in (R_{j+3}, R_{j+4})$&<0&>0&<0&<0\\ \hline
$s=R_{j+4}$&<0&=0&<0&<0\\ \hline
$s \in (R_{j+4}, \min \{1, 1/\lambda \})$&<0&<0&<0&<0\\ \hline
\end{tabular}
\end{adjustbox}
\end{table}
\noindent Assume $n$ odd. In order to have an even number of zeros of $\Delta(u-\tilde w)$ we have to consider the second case, namely the number of zeros of $\Delta(u-\tilde w)$ must be $n+1$.
Now, $\Delta(u-\tilde w)$ might be zero in $R_1$ even if $ \Delta^2(u-\tilde w)$ has not vanished yet. Hence the number of zeros of $ \Delta^2(u-\tilde w)$ can be $n$, $n+1$ or $n+2$. Recall we need that $\Delta^2(u- \tilde w)$ has an even number of zeros, and that $n$ is odd, hence we conclude that $\Delta^2(u- \tilde w)$ has $n+1$ zeros. We deduce as above that $ \Delta^3(u-\tilde w)$ can have $n$, $n+1$ or $n+2$ zeros, and in turn $n+1$ since their number has to be even. However, this implies that $u-\tilde w$ should have at least $n+1$ zeros. This is a contradiction, since the number of zeros of $u-\tilde w$ is $n$ by assumption.
\noindent As a consequence, we conclude that the following configuration is not possible
\[ (u- \tilde w)<0, \, \Delta(u-\tilde w)>0, \, \Delta^2(u-\tilde w)<0, \, \Delta^3(u-\tilde w)>0, \]
and the same holds true having opposite signs.
Therefore, one of the following (or reversed) is verified on $(R_k, \min \{1, 1/\lambda \})$:
\begin{itemize}
\item $(u- \tilde w)>0$, $\Delta(u-\tilde w)>0$;
\item $(u- \tilde w)>0$, $\Delta(u-\tilde w)<0$, $\Delta^2(u-\tilde w)<0$;
\item $(u- \tilde w)>0$, $\Delta(u-\tilde w)<0$, $\Delta^2(u-\tilde w)>0$, $\Delta^3(u-\tilde w)>0$.
\end{itemize}
\noindent By \autoref{lem:shape}, $\Delta^3 u$ is increasing. Moreover, $\Delta^3 u(0) <0$, and $\Delta^2 u$ is first positive and decreasing, then negative, reaches its minimum in this interval and then increases to a positive value $\Delta^2 u(1)$. As a consequence, $\Delta u(0)<0$, then increases, reaches a positive maximum value and then decreases to 0.
\noindent Assume that in the last interval the following holds
\[ (u- \tilde w)>0, \, \Delta(u-\tilde w)>0. \]
If both the first and the second column have $n$ zeros, then we apply the Hopf lemma and we obtain $0>u'(1/\lambda)=(u'-\tilde w')(1/\lambda)>0$, a contradiction. Otherwise, it means that the second column has $n+2$ zeros, which in turn gives that the third column has $n+1$ zeros, and the last one has $n$ zeros, thus $ \Delta^2(u-\tilde w)>0$ and $\Delta^3(u-\tilde w)>0$, see \autoref{table4}. Then, by applying Hopf lemma,
\[ 0<(\Delta^2 (u- \tilde w))'(1/\lambda)\le (\Delta^2 u)'(1/\lambda) \]
as $(\Delta^2 \tilde w)'(1/\lambda)\ge 0$, and $0<\Delta^2 (u-\tilde w)(1/\lambda)< \Delta^2 u(1/\lambda)$. Moreover, $\Delta u(1/\lambda)=\Delta(u- \tilde w)(1/\lambda)>0$ and $(\Delta u)'(1/\lambda)=(\Delta (u-\tilde w))'(1/\lambda)>0$. However, by \autoref{lem:shape}, there does not exist a point such that $\Delta^2 u>0$, $(\Delta^2 u)'>0$, $\Delta u>0$ and $(\Delta u)'>0$.
\noindent Assume that
\[ (u- \tilde w)>0, \, \Delta(u-\tilde w)<0, \, \Delta^2(u-\tilde w)<0. \]
Then $\lambda >1$, $0>\Delta(u-\tilde w)(1/\lambda)=\Delta u(1/\lambda)$. If $\Delta^2(u-\tilde w)$ does not change sign after the last zero of $\Delta(u-\tilde w)$, then we can apply Hopf to get $(\Delta u)'(1/\lambda)=(\Delta(u-\tilde w))'(1/\lambda)<0$.
However, it cannot exists a point such that $\Delta u(1/\lambda)<0$ and $(\Delta u)'(1/\lambda)<0$ by \autoref{lem:shape}.
If we cannot apply Hopf, then it means that the third column has $n+2$ zeros, which is not possible.
\noindent Assume finally that
\[ (u- \tilde w)>0, \, \Delta(u-\tilde w)<0, \, \Delta^2(u-\tilde w)>0, \, \Delta^3(u-\tilde w)>0, \]
Again $\lambda >1$ and $0>\Delta u(1/\lambda)$. Moreover, $0<\Delta^2 (u-\tilde w)(1/\lambda)< \Delta^2 u(1/\lambda)$ and by Hopf
\[ 0<(\Delta^2 (u- \tilde w))'(1/\lambda)\le (\Delta^2 u)'(1/\lambda) \]
as $(\Delta^2 \tilde w)'(1/\lambda) \ge 0$ by \autoref{lem:shape}.
However, such a point cannot exists, hence we have a contradiction.
As in Section \ref{sec:3}, we conclude that \eqref{claim4} holds, then $u=\tilde w$, which in turn gives $u=w$. \qed
\subsection*{Open problem} Consider $\alpha \ge 5$, and take two different solutions $u, w$. One can naturally parametrize $w$ as $\tilde w(r)=\lambda^s w(\lambda r)$, where $s=\frac{2\alpha}{p-1}$, and $\lambda$ is such that $\tilde w(0)=u(0)$. Again, it is easy to prove that the uniqueness result follows once we prove that $\Delta^k(u- \tilde w)(0)=0$. One builds a table as above, and gets a sequence $\{ R_j \}$. If it is infinite, then one extends considerations above choosing a suitable $F$ to apply Gronwall. The main difficulty turns out to be the proof of the contradiction in the finite case, equivalently, the extension of the following lemma to $\alpha \ge 5$.
\begin{lemma}
Let $2 \le \alpha \le 4$.
Then the following configuration:
\[ (-\Delta)^k (u-\tilde w)<0, \, k=0, \dots, \bar{k} \]
and
\[ (-\Delta)^{\bar{k}+1} (u-\tilde w)>0, \]
for some $\bar{k}$, cannot occur at the last step.
\end{lemma}
As a consequence we have
\begin{lemma}\label{lem:zeros}
Let $2 \le \alpha \le 4$. Assume that $u-\tilde w$ has $n$ zeros and that $u(0)=\tilde w(0)$ and
\[ (-\Delta)^k (u-\tilde w) (0)<0, \, k=1, \dots, \alpha-1 \]
holds. Then $\Delta^{\alpha-1}(u-\tilde w)$ must have at least $n+1$ zeros.
\end{lemma}
Indeed, if not, then at least two consecutive columns have the same sign, and we get a contradiction.
\begin{remark}\label{rmk:zeros}
One can prove in the same way as \autoref{lem:zeros} that, if $\alpha \le 4$ and
\[ (-\Delta)^k (u-\tilde w)(0) <0, \, k=0, \dots, \alpha-1 \]
holds, and $u-\tilde w$ has $n$ zeros, then $\Delta^{\alpha-1}(u-\tilde w)$ must have at least $n$ zeros. This will be useful in the next Section.
\end{remark}
\section{Proof of Theorem \ref{teo:sysDir}}
\subsection{Existence}
Next we extend to system \eqref{sys:Var} the existence results obtained in \cite{Schiera18} in the case of systems of two equations, see also \cite{AziziehClementMitidieri02} for $p$-Laplacian systems. In what follows we recall the main steps in the proof, and the necessary changes required to treat the case in which one has $m >2$ equations.
\noindent \textit{Step 1}. An auxiliary system. If $\prod_{j=1}^m p_j>1$, and if the only classical solution to \eqref{sysDir} is the trivial one, then there exists an unbounded sequence of solutions $(t_n, u_{1,n}, \dots, u_{m,n})$ to the following
\[
\begin{cases}
\begin{aligned}
&(-\Delta)^{\alpha_j} u_{j,n}=(t_n^{\theta_j}+\abs{u_{j+1,n}})^{p_j},\, j =1, \dots, m-1 \\
&(-\Delta)^{\alpha_m} u_{m,n}=(t_n^{\theta_m}+\abs{u_{1,n}})^{p_m}
\end{aligned} & \text{ in } B_1 \subset \mathbb{R}^N, \\
\frac{\partial^k u_{j,n}}{\partial \nu^k}=0, \, k=0, \dots, \alpha_j-1, \, j =1, \dots, m & \text{ on } \partial B_1,
\end{cases}
\]
where $N > 2 \max \{ \alpha_j \}_j $ and $\theta_j$ are such that
\begin{equation}\label{theta} \theta_j p_j > \theta_{j-1}, \, \forall j. \end{equation}
For instance, one can call
\[ a_j=1+j (\prod_{k=1}^m p_k - 1) \]
and choose
\[ \theta_j=\frac{a_{j-1}}{\prod_{k=2}^j p_k} \]
for $j=2, \dots, m$, and $\theta_1=1$.
The proof of this step relies on a fixed point lemma due to Azizieh and Cl\'ement \cite[Lemma A.2]{AziziehClement02}, see \cite[Proposition 1]{Schiera18} for the case $m=2$.
\noindent \textit{Step 2}. Blow up analysis. We can assume without loss of generality that
\[ \frac{t_n^{\theta_j}}{\norm{u_{j,n}}_{\infty}} \to 0, \, j=1,\dots, m, \]
as follows by choosing
\[ \tilde u_{j,n}=\frac{u_{j,n}}{t_n^{\theta_{j-1}}}, \, \, \lambda_{j,n}=t_n^{\theta_jp_j - \theta_{j-1}} \]
and applying the comparison principle. Here we exploit \eqref{theta} to have $\lambda_n \to \infty$.
Moreover, assume that the maximum of $u_{k,n}$ is attained in $0$ for any $k$.
We define
\[ \hat u_{j,n} (y)=\frac{u_{j,n} (C_n^{-1} y)}{A_{j,n}}, \]
where
\[ A_{j,n}=C_n^{\sigma_j}, \]
\[ C_n=\sum_{j} \norm{u_{j,n}}_{\infty}^{1/\sigma_j} \]
and moreover
\[ \sigma_1=\frac{2\sum_{k=1}^{m} \alpha_k \prod_{j=1}^{k-1} p_j}{\prod_{j=1}^m p_j -1}, \, \sigma_j=-2\alpha_j+p_j\sigma_{j+1}. \]
This by a limit procedure gives a nontrivial solution to
\begin{equation}\label{sysRN}
\begin{cases}
(-\Delta)^{\alpha_j} u_{j,n}=\abs{u_{j+1,n}}^{p_j},\, j =1, \dots, m-1 \, &\text{ on } \mathbb{R}^N \\
(-\Delta)^{\alpha_m} u_{m,n}=\abs{u_{1,n}}^{p_m} \, &\text{ on } \mathbb{R}^N
\end{cases} \end{equation}
due to our choice of the parameters $A_{j,n}$ and $\sigma_j$.
This limit solution is nontrivial since
\[ \sum_{i \ne k} \norm{u_{i,n}}_{\infty}^{1/\sigma_i} \le \norm{u_{k,n}}_{\infty}^{1/\sigma_k}(m-1) \]
for at least one value $k$. Indeed, if not, then upon summation
\[ (m-1)\sum \norm{u_{i,n}}_{\infty}^{1/\sigma_i} > (m-1)\sum \norm{u_{i,n}}_{\infty}^{1/\sigma_i}, \]
a contradiction.
Assume for instance that $k=1$ and call
\[ b_n=\frac{\sum_{i \ne 1} \norm{u_{i,n}}_{\infty}^{1/\sigma_i}}{\norm{u_{1,n}}_{\infty}^{1/\sigma_1}} \le m-1. \]
Then,
\[ (\hat u_{1,n})^{1/\sigma_1}(0)=\frac{\norm{u_{1,n}}_{\infty}^{1/\sigma_1}}{\sum\norm{u_{i,n}}^{1/\sigma_i}}= \frac{1}{1+b_n} \ge \frac{1}{m}, \]
and in particular the limit is nontrivial.
\noindent \textit{Step 3}. We prove that the maximum of $u_{k,n}$ is attained in $0$ for any $k$, as the following Lemma shows.
\begin{lemma}\label{lemma2}
Let $(u_1, \dots, u_m)$ be a nontrivial solution to
\[
\begin{cases}
\begin{aligned}
&(-\Delta)^{\alpha_j} u_{j,n}=f_j(u_{j+1}),\, j =1, \dots, m-1 \\
&(-\Delta)^{\alpha_m} u_{m,n}=f_m(u_1)
\end{aligned} & \text{ in } B_1 \subset \mathbb{R}^N, \\
\frac{\partial^k u_{j,n}}{\partial \nu^k}=0, \, k=0, \dots, \alpha_j-1, \, j =1, \dots, m & \text{ on }\partial B_1,
\end{cases}
\]
where $N > 2 \max \{ \alpha_j \}_j$ and $f_j : [0, \infty) \to \mathbb{R}$ are continuous, positive and non decreasing.
Then $u_1, \dots, u_m$ are radially symmetric and strictly decreasing in the radial variable.
\end{lemma}
\noindent The proof is analogous to that of \cite[Proposition 3]{Schiera18}.
\noindent \textit{Step 4}. Finally, we notice that \cite[Theorem 19.1]{MitidieriPohozaev01} can be extended easily to the case of $m>2$ equations as follows.
\begin{theorem}
Let $p_j >1$, $\alpha_j \in \mathbb{N}$, $j=1,\dots, m$, and assume that there exists $l \in \{ 1, \dots, m \}$ such that
\[
N+2 \sum_{k=1}^m \alpha_{k+l} \prod_{j=0}^{k-1} p_{j+l} - N \prod_{j=1}^m p_j \ge 0,
\]
where we impose $p_{k+m}=p_k$ and $\alpha_{k+m}=\alpha_k$ for any $k=1, \dots, m$. Assume further that $(u_1, \dots, u_m)$ is a weak solution to
\eqref{sysRN}.
Then $u_j=0$ for any $j=1,\dots, m$.
\end{theorem}
\subsection{Uniqueness}
\noindent We first give the proof in the case $m=2$ and then we proceed inductively. System \eqref{sysDir} reads as follows
\[ \begin{cases}
\begin{aligned}
(-\Delta)^{\alpha} u=\abs{v}^q \\
(-\Delta)^{\beta} v= \abs{u}^p
\end{aligned} &\text{ in } B_1, \\
\frac{\partial^{r} u}{\partial \nu^{r}}=0, \, r=0, \dots, \alpha-1, & \text{ on }\partial B_1, \\
\frac{\partial^{r} v}{\partial \nu^{r}}=0, \, r=0, \dots, \beta-1, & \text{ on }\partial B_1.
\end{cases} \]
\noindent Assume without loss of generality that $\alpha \le \beta$.
We take two nontrivial solutions $(u, v)$ and $(w, z)$, and the parametrization
\[ \tilde w(r)=\lambda^s w(\lambda r), \, \tilde z(r)=\lambda^t z(\lambda r) \]
where $t=\frac{2\alpha p + 2\beta}{pq-1}$, $s=\frac{2\beta q + 2\alpha}{pq-1}$. Notice that $s, t$ are well defined if $pq \ne 1$.
Moreover we build the same table as in the previous sections with columns
\[ u-\tilde w, \, \Delta(u-\tilde w), \dots, \Delta^{\alpha-1} (u-\tilde w), \, v-\tilde z, \dots, \Delta^{\beta -1} (v-\tilde z) \]
if $\alpha$ is even, whereas
\[ u-\tilde w, \, \Delta(u-\tilde w), \dots, \Delta^{\alpha-1} (u-\tilde w), \, -v+\tilde z, \dots, \Delta^{\beta -1} (-v+\tilde z)\]
if $\alpha$ is odd.
\noindent Assume that (for even $\alpha$, and similarly for odd $\alpha$)
\begin{equation}\label{initialconf}
\begin{split}
(u- \tilde w)(0)=0, \, (-\Delta)^{k}(u- \tilde w)(0)<0, \, k=1, \dots, \alpha-1, \\
(-\Delta)^{k}(v- \tilde z)(0)<0, \, k=0, \dots, \beta-1
\end{split}
\end{equation}
is the initial configuration of the columns. We obtain a sequence $\{ R_j \}$ as in \autoref{sec:eqn} and assume that this is finite.
\noindent Let $n$ be the number of zeros of the first column. Then by \autoref{lem:zeros}, the $\alpha$-th column has at least $n+1$ zeros, and as a consequence the next one must have $n$, $n+1$ or more zeros. Knowing that the $(\alpha+\beta)$-th column has $n$ or $n-1$ zeros, one has (again by \autoref{lem:zeros}, see \autoref{rmk:zeros}) that the $(\alpha +1)$-th column cannot have strictly more than $n$ zeros. Hence, it has $n$ zeros. However, $(v-\tilde z)(s)$ has opposite sign with respect to $(u- \tilde w)(s)$ in $(0, R_1)$, hence they have opposite sign in the last interval as well. Therefore, $(u-\tilde w)(\min \{1, 1/\lambda \})>0$ implies $\lambda >1$, whereas $(v-\tilde z)(\min \{1, 1/\lambda \})<0$ gives $\lambda <1$, a contradiction.
\noindent If $\alpha=1$, then as above we prove that the column $\tilde z- v$ cannot have strictly more than $n$ zeros. However, it must have at least $n$ zeros, as $u(0)=\tilde w(0)$, thus exactly $n$ zeros. Again, we have a contradiction.
\noindent Let us assume that for another initial configuration $\mathcal{A}$ we do not reach a contradiction as above. In $(0, R_1)$ the signs of the columns from the second to the last one are the same as in $\mathcal{A}$, and the first column must have the same sign as the second one, due to the maximum principle and the assumption $u(0)=\tilde w(0)$. Let us call $\mathcal{A}_1$ the configuration in $(0, R_1)$, given $\mathcal{A}$ in $0$.
It turns out that one can reach the configuration $\mathcal{A}_1$ starting from \eqref{initialconf}.
Indeed, given \eqref{initialconf}, all the columns from the second to the second-to-last can be $=0$ in $R_1$. Then, it is sufficient to impose $=0$ in $R_1$ the columns which have different signs with respect to $\mathcal{A}_1$. If the first column has different sign, then it is enough to note that, once the second column has changed sign, the first column can be $=0$ and change sign as well. Analogously, one can change the sign of the last column once the first one has been $=0$. See \autoref{tableconf} for an example.
\begin{table}\centering\caption{Passing from \eqref{initialconf} to the configuration $u-\tilde w<0$, $\Delta(u-\tilde w)<0$, $(v- \tilde z)>0$, $\Delta (v- \tilde z)>0$, $\Delta^2(v-\tilde z)>0$. }
\label{tableconf}
\begin{adjustbox}{max width=\textwidth}
\begin{tabular}{r|cccccc}
&$(u-\tilde w)(s)$&$\Delta( u- \tilde w)(s)$&$ (v- \tilde z)(s)$&$ \Delta (v- \tilde z)(s)$&$\Delta^2(v-\tilde z)(s)$\\ \hline
$s=0$&=0&>0&<0&>0&<0\\ \hline
$s \in (0, R_1)$&>0&>0&<0&>0&<0\\ \hline
$s=R_1$&>0&=0&=0&>0&<0\\ \hline
$s \in (R_1, R_2)$&>0&<0&>0&>0&<0\\ \hline
$s=R_2$&=0&<0&>0&>0&<0\\ \hline
$s \in (R_2, R_3)$&<0&<0&>0&>0&<0\\ \hline
$s=R_3$&<0&<0&>0&>0&=0\\ \hline
$s \in (R_3, R_4)$&<0&<0&>0&>0&>0\\ \hline
$\vdots$ &$\vdots$ &$\vdots$ &$\vdots$ &$\vdots$ &$\vdots$
\end{tabular}
\end{adjustbox}
\end{table}
Therefore, if from any other initial configuration $\mathcal{A}$ we do not have a contradiction, then this would be possible given \eqref{initialconf} as well.
\noindent We have thus proved that the sequence $\{R_j\}$ has to be infinite. However, in this case we reach a contradiction as in the previous sections, as we apply Gronwall with
\[ U(r)=(u(r), -\Delta u(r), \dots, (-\Delta)^{\alpha-1} u(r), v(r), \dots, (-\Delta)^{\beta-1} v(r))\]
for $ 0 \le r \le 1$ and
\[ W(r)=(\tilde{w}(r), -\Delta \tilde w(r), \dots, (-\Delta)^{\alpha-1} \tilde w(r), \tilde z(r), \dots (-\Delta)^{\beta-1} \tilde z(r)) \]
for $0 \le r \le 1/\lambda$ and
$$F(x_1, x_2, \dots, x_{\alpha}, y_1, \dots, y_{\beta})=(x_2, x_3, \dots, x_{\alpha-1}, y_1^q, y_2, \dots, y_{\beta -1}, x_1^p)\ .$$
\noindent This proves that in $0$ all the columns are zero. Therefore, again by Gronwall's Lemma, we have $u=\tilde w$ and $v=\tilde z$, which in turn gives $(u,v )=(w, z)$.
\noindent The proof in the case $m>2$ follows by induction, once we parametrize a second solution $(w_1, \dots, w_m)$ as follows
\[ \tilde w_i(r)=\lambda^{s_i} w(\lambda r), \, i=1, \dots, m, \]
where $\lambda$ is chosen such that $\tilde w_1(0)=u_1(0)$, whereas
\[ s_1=\frac{2\sum_{j=1}^{m} \alpha_j \prod_{k=1}^{j-1} p_k}{\prod_{k=1}^m p_k-1} \]
and
\[ s_{i+1}=\frac{s_i+2\alpha_i}{p_i}, \, i=1, \dots, m-1. \]
Assuming as induction hypothesis that the last column corresponding to the first $m$ equations can not have less zeros than the first one, and taking $m=1$ as the base case (see \autoref{lem:zeros}), then one proves that that property holds for $m+1$ as well, by the same arguments as above.
More precisely, the induction hypothesis implies that the last column corresponding to the first $m$ equations must have at least one zero more than the first one. By exploiting \autoref{rmk:zeros}, and knowing that the last column has at most $n$ zeros, one proves as above that $u_1-\tilde w_1$ and $u_{\alpha}-\tilde w_{\alpha}$ must have opposite signs at the last step, which gives the contradiction. As for the case $\{ R_j \}$ infinite, the contradiction follows by applying Gronwall's lemma.
\noindent The proof of Theorem \ref{teo:sysDir} is now complete.
\begin{remark}
Notice that the restriction $\alpha_j \le 4$ is necessary as we need to exploit \autoref{lem:zeros}. Actually, if we could extend \autoref{lem:zeros} to higher order operators, then it would be possible to extend \autoref{teo:sysDir} to more general operators as well.
\end{remark}
\subsection{Some natural boundary conditions: proof of Corollary \ref{teo:Var}}
\noindent Notice that \eqref{sys:Var} can be written as a system of $\sum \lceil \alpha_j/2 \rceil$ equations with Dirichlet boundary conditions. Let for instance $\alpha$ be even, and set $u_k=\Delta^{2k} u$. Then
\[ \begin{cases}
(-\Delta)^{\alpha} u=\abs{u}^p, \, & \text{ in } B_1\\
\Delta^{2k} u=0, \, 2k \le \alpha-1, & \text{ on }\partial B_1 \\
\frac{\partial}{\partial \nu} \Delta^{2k} u= 0, \, 2k+1 \le \alpha-1, & \text{ on }\partial B_1
\end{cases} \]
reads as
\[ \begin{cases}
\begin{aligned}
&\Delta^2 u_j=\abs{u_{j+1}},\, j =1, \dots, \alpha/2-1, \\
&\Delta^2 u_{\alpha/2}=\abs{u_{1}}^{p},
\end{aligned} & \text{ in }B_1\\
u_j=\frac{\partial u_j}{\partial \nu}=0, \, j =1, \dots, \alpha/2 &\text{ on } \partial B_1,
\end{cases} \]
which is a particular case of \eqref{sysDir}.
\noindent Let us point out that the boundary conditions in \eqref{sys:Var} satisfy the complementing condition \cite{AgmonDouglisNirenberg59}, which here read as follows
\begin{definition}\label{def:ADN}
We say that the complementing condition holds for
\[ \begin{cases}
(-\Delta)^{\alpha} u=\abs{u}^p, \, & \text{ in }B_1\\
B_j(x, D) u=h_j, \, \text{ for } j=1, \dots, \alpha, & \text{ on }\partial B_1
\end{cases} \]
if, for any nontrivial tangent vector $\tau (x)$, the polynomials in $t$ $B_j'(x; \tau + t \nu)$ are linearly independent modulo the polynomial $(t-i \abs{\tau})^{\alpha}$, where $B_j'$ represents the highest order part of $B_j$.
\end{definition}
\noindent Consider the particular case $\alpha=4$ and let $\abs{\tau}=1$. Then $B_1'(x, \tau + t \nu)=1$, $B_2'(x, \tau + t \nu)=t$, $B_3'(x, \tau + t \nu)=t^4+1$ and $B_4'(x, \tau + t \nu)=t^5+t$. Dividing these polynomials by $(t- i)^4$, we get $1$, $t$, $4it^3+6t^2-4it$ and $-10t^3+20it^2+16t-4i$ as remainders, which are linearly independent.
The general case follows from the system \eqref{sysDir}. Indeed, one can extend \autoref{def:ADN} to the case of systems and prove that a system of $m$ equations with Dirichlet boundary conditions satisfy this extended condition, see \cite{AgmonDouglisNirenberg64}.
\subsection{Navier's boundary conditions: proof of Corollary \ref{teo:Navier}}
Recall that, given a nontrivial solution to \eqref{sys:Nav}, then it is positive, radially symmetric and strictly decreasing in the radial variable, see \cite[Theorem 7.3]{GazzolaGrunauSweers10}. This reduces the problem to system \eqref{sysDir} and thus \autoref{teo:Navier} follows from \autoref{teo:sysDir}.
Let for instance $m=1$. Then
\[ \begin{cases}
(-\Delta)^{\alpha} u=\abs{u}^p, \, & \text{ in } B_1\\
\Delta^k u=0, \, k \le \alpha-1, & \text{ on } \partial B_1
\end{cases} \]
becomes
\[ \begin{cases}
\begin{aligned}
&-\Delta u_j=\abs{u_{j+1}},\, j =1, \dots, \alpha-1, \\
&-\Delta u_{\alpha}=\abs{u_{1}}^{p},
\end{aligned} & \text{ in } B_1 \\
u_j=0, \, j =1, \dots, \alpha & \text{ on } \partial B_1,
\end{cases} \]
where $u_j=\Delta^j u$.
\begin{bibdiv}
\begin{biblist}
\bib{AgmonDouglisNirenberg59}{article}{
Author = {Agmon, S.},
Author={Douglis, A.},
Author={Nirenberg, L.},
Journal = {Comm.~Pure Appl.~Math.},
Number = {4},
Pages = {623--727},
Title = {Estimates near the boundary for solutions of elliptic partial differential equations satisfying general boundary conditions. I. },
Volume = {12},
Year = {1959}}
\bib{AgmonDouglisNirenberg64}{article}{
Author = {Agmon, S.},
Author={Douglis, A.},
Author={Nirenberg, L.},
Journal = {Comm.~Pure Appl.~Math.},
Number = {1},
Pages = {35--92},
Title = {Estimates near the boundary for solutions of elliptic partial differential equations satisfying general boundary conditions. II. },
Volume = {17},
Year = {1964}}
\bib{AziziehClement02}{article}{
Author = {Azizieh, C.},
Author={Cl{\'e}ment, P.},
Journal = {J.~Differential Equations},
Number = {1},
Pages = {213--245},
Title = {A priori estimates and continuation methods for positive solutions of {$p$-Laplace} equations},
Volume = {179},
Year = {2002}}
\bib{AziziehClementMitidieri02}{article}{
Author = {Azizieh, C.},
Author = {Cl{\'e}ment, P.},
Author = {Mitidieri, E.},
Journal = {J.~Differential Equations},
Number = {2},
Pages = {422-442},
Title = {Existence and a priori estimates for positive solutions of $p$-Laplace systems},
Volume = {184},
Year = {2002}}
\bib{CASTA}{article}{
Author = {Cassani, D.},
Author ={Tarsia, A.},
Journal = {In Preparation},
Title = {Maximum principle for higher order operators in general domains},
}
\bib{ClementFleckingerMitidierideThelin00}{article}{
Author = {Cl{\'e}ment, P.},
Author = {Fleckinger, J.},
Author = {Mitidieri, E.},
Author = {de Th{\'e}lin, F.},
Journal = {J. Differential Equations},
Number = {2},
Pages = {455--477},
Title = {Existence of positive solutions for a nonvariational quasilinear elliptic system},
Volume = {166},
Year = {2000}}
\bib{clement_felmer_mitidieri}{article}{
Author = {Cl{\'e}ment, P.},
Author = {Felmer, P.L.},
Author = {Mitidieri, E.},
Journal = {Scuola Norm. Sup. Pisa Cl. Sci.},
Pages = {367--393},
Title = {Homoclinic orbits for a class of infinite-dimensional Hamiltonian systems},
Volume = {24},
Year = {1997}}
\bib{CuiWangShi07}{article}{
Author = {Cui, R.},
Author ={Wang, Y.},
Author ={Shi, J.},
Journal = {Nonlinear Anal.},
Number = {6},
Pages = {1710--1714},
Title = {Uniqueness of the positive solution for a class of semilinear elliptic systems},
Volume = {67},
Year = {2007}}
\bib{Dalmasso95}{article}{
Author = {Dalmasso, R.},
Journal = {Proc. Amer. Math. Soc.},
Number = {4},
Pages = {1177--1183},
Title = {Uniqueness theorems for some fourth-order elliptic equations},
Volume = {123},
Year = {1995}}
\bib{Dalmasso99}{article}{
Author = {Dalmasso, R.},
Journal = {Nonlinear Anal.},
Number = {1},
Pages = {131--137},
Title = {Existence and uniqueness results for polyharmonic equations},
Volume = {36},
Year = {1999}}
\bib{Dalmasso04}{article}{
Author = {Dalmasso, R.},
Journal = {Nonlinear Anal.},
Number = {3},
Pages = {341--348},
Title = {Existence and uniqueness of positive radial solutions for the Lane-Emden system},
Volume = {57},
Year = {2004}}
\bib{FerreroGazzolaWeth07}{article}{
Author = {Ferrero, A.},
Author = {Gazzola, F.},
Author = {Weth, T.},
Journal = {Ann. Mat. Pura Appl. (4)},
Number = {4},
Pages = {565--578},
Title = {Positivity, symmetry and uniqueness for minimizers of second-order Sobolev inequalities},
Volume = {186},
Year = {2007}}
\bib{GazzolaGrunauSweers10}{book}{
Author = {Gazzola, F.},
Author ={Grunau, H.C.},
Author ={Sweers, G.},
Publisher = {Springer-Verlag, Berlin},
Title = {Polyharmonic boundary value problems. Positivity preserving and nonlinear higher order elliptic equations in bounded domains},
Year = {2010}}
\bib{GidasNiNirenberg79}{article}{
Author = {Gidas, B.},
Author ={Ni, W.M.},
Author ={Nirenberg, L.},
Journal = {Comm. Math. Phys.},
Pages = {209--243},
Title = {Symmetry and related properties via the maximum principle},
Volume = {68},
Year = {1979}}
\bib{liu_guo_zhang}{article}{
Author = {Liu, J.},
Author = {Guo, Y.},
Author = {Zhang, Y.},
Journal = {J. Partial Differential Equations},
Title = {Existence of positive entire solutions for polyharmonic equations and systems},
Pages = {256--270},
Volume = {19},
Year = {2006}
}
\bib{MitidieriPohozaev01}{article}{
Author = {Mitidieri, E.},
Author= {Pohozaev, S.I.},
Journal = {Proc.~Steklov Inst.~Math.},
Number = {3},
Pages = {1--362},
Title = {A priori estimates and the absence of solutions of nonlinear partial differential equations and inequalities},
Volume = {234},
Year = {2001}}
\bib{MitidieriPohozaev03}{article}{
Author = {Mitidieri, E.},
Author= {Pohozaev, S.I.},
Journal = {Doklady Mathematics},
Number = {2},
Pages = {159--164},
Title = {The positivity property of solutions of some nonlinear elliptic inequalities in $\mathbb{R}^N$},
Volume = {393},
Year = {2003}}
\bib{pucci_serrin}{article}{
Author= {Pucci, P.},
Author={Serrin, J.},
Journal={Indiana Univ. Math. J.},
Number={3},
Pages={681--703},
Title={A general variational identity},
Volume={35},
Year={1986}}
\bib{Schiera18}{article}{
Author = {Schiera, D.},
Journal = {Nonlinear Anal.},
Pages = {130--153},
Title = {Existence of solutions to higher order {Lane-Emden} type systems},
Volume = {168},
Year = {2018}}
\bib{Schiera18_2}{article}{
Author = {Schiera, D.},
Journal = {Discrete Contin. Dyn. Syst. A},
Number={10},
Pages = {5145--5161},
Title = {Existence and non-existence results for variational higher order elliptic systems},
Volume = {38},
Year = {2018}}
\bib{Schiera19}{article}{
Author = {Schiera, D.},
Journal = {Ph.D. Thesis - In Preparation},
Title = {Existence, non-existence and uniqueness results for higher order elliptic systems},
Year = {2019}}
\end{biblist}
\end{bibdiv}
\end{document}
|
\begin{document}
\pagestyle{plain}
\title{Hyperplanes of finite-dimensional normed spaces with the maximal relative projection constant}
\author{Tomasz Kobos}
\subjclass{Primary 41A35, 41A65, 47A30, 52A21}
\keywords{Minimal projection, relative projection constant, finite-dimensional normed space}
\begin{abstract}
The \emph{relative projection constant} $\lambda(Y, X)$ of normed spaces $Y \subset X$ is defined as $\lambda(Y, X) = \inf \{ ||P|| : P \in \mathcal{P}(X, Y) \}$, where $\mathcal{P}(X, Y)$ denotes the set of all continuous projections from $X$ onto $Y$. By the well-known result of Bohnenblust for every $n$-dimensional normed space $X$ and its subspace $Y$ of codimension $1$ the inequality $\lambda(Y, X) \leq 2 - \frac{2}{n}$ holds. The main goal of the paper is to study the equality case in the theorem of Bohnenblust. We establish an equivalent condition for the equality $\lambda(Y, X) = 2 - \frac{2}{n}$ and present several applications. We prove that every three-dimensional space has a subspace with the projection constant less than $\frac{4}{3} - 0.0007$. This gives a non-trivial upper bound in the problem posed by Bosznay and Garay. In the general case, we give an upper bound for the number of $(n-1)$-dimensional subspaces with the maximal relative projection constant in terms of the facets of the unit ball of $X$. As a consequence, every $n$-dimensional normed space $X$ has an $(n-1)$-dimensional subspace $Y$ with $\lambda(Y, X) < 2-\frac{2}{n}$. This contrasts with the seperable case in which it is possible that every hyperplane has a maximal possible projection constant.
\end{abstract}
\maketitle
\section{Introduction}
Let $X$ be a real Banach space and $Y$ its closed subspace. A linear bounded operator $P:X \to Y$ is called a \emph{projection} if $P|_{Y}=\id_{Y} $. By $\mathcal{P}(X, Y)$ we denote the set of all projections from $X$ onto $Y$. The \emph{relative projection constant} of $Y$ is defined as
$$\lambda(Y, X) = \inf \{ ||P|| : P \in \mathcal{P}(X, Y) \}.$$
Moreover, if a projection $P: X \to Y$ satisfies $||P|| = \lambda(Y, X)$ then $P$ is called a \emph{minimal projection}.
Minimal projections have gained a considerable attention in the past years. Many authors have studied their properties in the context of functional analysis and approximation theory (see for example \cite{lewickichalm}, \cite{lewickichalm2}, \cite{cheneyinni}, \cite{cheneymorris}, \cite{cheneyfranchetti}, \cite{konig}, \cite{konig2}, \cite{lewicki}). Some of the obtained results are concerned with studying minimal projections in certain classical Banach spaces and some of them are of more general nature. Results provided in this paper belong to the second class. Our goal is to investigate some general properties of minimal projections in the setting of finite dimensional real normed spaces.
The problem of giving the upper bound for the relative projection constant in the case of an arbitrary subspace has already been studied quite intensively. One of the most fundamental results in this category is an old theorem of Bohnenblust on projections onto subspaces of codimension $1$ of finite dimensional real normed spaces.
\begin{twr}[Bohnenblust \cite{bohnenblust}]
\label{twbohnenblust}
Let $X$ be a real $n$-dimensional Banach space and let $Y \subset X$ be its $(n-1)$-dimensional subspace. Then $\lambda(Y, X) \leq 2 - \frac{2}{n}$.
\end{twr}
One can easily see that this estimation is optimal: if $X = \ell_{1}^n$ or $X = \ell_{\infty}^n$ and $Y = \ker f$, where $f(x) = x_1 + x_2 + \ldots + x_n$, then $\lambda(Y, X) = 2 - \frac{2}{n}$ (see Theorem \ref{projekcjal1}).
In the context of an arbitrary subspace we have the Kadec-Snobar Theorem:
\begin{twr}[Kadec, Snobar \cite{kadecsnobar}]
Let $X$ be a real $n$-dimensional Banach space and let $Y \subset X$ be its $k$-dimensional subspace. Then $\lambda(Y, X) \leq \min\{ \sqrt{k}, \sqrt{n-k} + 1 \}$.
\end{twr}
This estimation was further improved by several authors, see for example \cite{makai}.
Much less research has been done in the problem of \text{finding} a subspace with small projection constant in an arbitrary normed space. There is an old and still unanswered question of Bosznay and Garay.
\begin{problem}[Bosznay, Garay \cite{bosznay}]
\label{bosz}
For an integer $n \geq 3$ determine the value of $\sup_{X} \inf_{Y \subset X} \lambda(Y, X)$, where $X$ is a real $n$-dimensional normed space and $Y \subset X$ is a subspace of dimension at least $2$ and at most $n-1$.
\end{problem}
To our knowledge, the best known estimates in the Problem \ref{bosz} are these which hold for an arbitrary subspace $Y$. Even in the three-dimensional setting there seem to be a lack of any better bounds. The aim of the paper is to give some result in this direction.
We shall consider the case of the projections onto hyperplanes. To shed some light onto the question of Bosznay and Garay (and similar ones) we bring some attention to studying the equality case in the theorem of Bohnenblust. We provide the following characterization.
\begin{twr}
\label{warunek}
Let $X$ be an $n$-dimensional normed space and let $Y = \ker f$, where $f \in S_{X^{\star}}$, be an $(n-1)$-dimensional subspace of $X$. Then $\lambda(Y, X) = 2 - \frac{2}{n}$ if and only if there exist extreme points $x_1, x_2, \ldots, x_n$ of the unit ball of $X$ such that the following conditions are satisfied
\begin{itemize}
\item $f(x_1)=f(x_2) = \ldots = f(x_n),$
\item vectors $x_1, x_2, \ldots, x_n$ are linearly independent,
\item if an arbitrary vector $\mathbb{R}^n \ni x = \sum_{i=1}^{n} w_i x_i $ is written in the basis of $x_i$, the folowing inequality holds
$$\max_{i=1, 2, \ldots, n} \{ |w_1 + w_2 + \ldots + w_{i-1} - w_i + w_{i+1} + \ldots + w_n | \} \leq ||x||.$$
\end{itemize}
The third condition is equivalent to the fact that for every $1 \leq i \leq n$ the set
$$\{ x_1, x_2, \ldots, x_{i-1}, -x_i, x_{i+1}, \ldots, x_n\}$$
is contained in a facet of the unit ball.
\end{twr}
Proof of this theorem is provided in Section $2$. This equivalent condition has several consequences. Those of them, which hold in an arbitrary dimension are discussed further in Section $2$. For instance, we can easily obtain a upper bound for the number of hyperplanes with the maximal relative projection constant in terms of the number of facets of the unit ball (see Theorem \ref{sciany}). As a consequence, every $n$-dimensional subspace $X$ has an $(n-1)$-dimensional subspace $Y$ with $\lambda(Y, X) < 2 -\frac{2}{n}$. This is finite-dimensional phenomen as in the seperable case the situation can be different. We also provide purely geometric characterization of the equality $\lambda(Y, X) = 2 - \frac{2}{n}$ (see Corollary \ref{charakteryzacjan}). As an other application of Theorem \ref{warunek} we observe that every $n$-dimensional normed space $X$ which has an $(n-1)$-dimensional subspace with the maximal possible relative projection constant has also a two-dimensional subspace with \textbf{minimal} possible relative projection constant (equal to $1$) (see Corollary \ref{maxmin}).
In the Section $3$ we take a closer look a the three-dimensional case, in which something more can be said. In this setting, the condition $\lambda(Y, X) = \frac{4}{3}$ seems to be much more restrictive on the unit ball of $X$ than in the general case. This allows us to strengthen some results obtained in the preceeding section. In particular, we prove that the maximal possible number of subspaces $Y$ for which the equality $\lambda(Y, X) = \frac{4}{3}$ holds is equal to $4$ (see Theorem \ref{plaszczyzny}). Moreover, from Corollary \ref{maxmin} it follows that every three-dimensional normed space $X$, which posess a subspace $Y$ with $\lambda(Y, X) = \frac{4}{3}$ posess also a subspace $Z$ satisfying $\lambda(Y, Z)=1$. In Theorem \ref{maxmin3} we provide a stability version of this result, which gives some improvement in the three-dimensional case of Problem \ref{bosz} (see Corollary \ref{bosz3}). We note that the improvement is very small, but still, to our knowledge, it is the first non-trivial estimate in this direction. We suspect that the actual constant is much smaller than given in our corollary. While we are not aware of any results concerning Problem \ref{bosz}, we should mention the related papers \cite{franchetti2}, \cite{franchetti1} of Franchetti. Among other things, Franchetti have studied in them the connection between the relative projection constant $\lambda(Y, X)$ (where $Y$ is a hyperplane in not necessarily finite-dimensional Banach space $X$) and behaviour of the norm in the hyperplanes parallel to $Y$. Such a behaviour plays also a major role in the proof of our main Theorem \ref{warunek}. Neverthless, there does not seem to be overlap between our results and the results of Franchetti.
In the last section of the paper we propose some naturally arising questions which are suitable for further research.
\section{The general case}
Troughout the paper we shall always consider only real $n$-dimensional normed spaces $X$ with $n \geq 3$. The unit ball and the unit sphere of such a normed space $X$ will be denoted by $B_X$ and $S_X$ respectively. Let us also recall that a \emph{face} of a convex body $C \subset \mathbb{R}^n$ is the intersection of it with some supporting hyperplane. A face is called a \emph{facet} if it is $(n-1)$-dimensional, or in other words, it is not contained in an affine subspace of dimension $n-2$. The vectors from the canonical unit basis of $\mathbb{R}^n$ will be denoted by $e_1, e_2, \ldots, e_n$. By $\ell_1^n$ and $\ell_{\infty}^n$ we denote the space $\mathbb{R}^n$ equipped with the norm $||x||_1 = |x_1| + |x_2| + \ldots + |x_n|$ and $||x||_{\infty} = \max_{1 \leq i \leq n} |x_i|$ respectively. We shall often use a simple fact that if $X$ is an normed space and $Y = \ker f \subset X$ (where $f \in S_{X^{\star}})$ is a subspace of codimension $1$ then every projection $P:X \to Y$ can be written in the form $P(x) = x - f(x)r$ for some $r \in \mathbb{R}^n$ satisfying $f(r)=1$.
We begin with a lemma used already in the original paper of Bohnenblust. The result is well-known and often used in the study of minimal projections, but we provide its short proof.
\begin{lem}
\label{helly}
Let $X$ be an $n$-dimensional normed space and let $Y$ be its $(n-1)$-dimensional subspace. Suppose that for every extreme points $x_1,\ x_2,\ \ldots,\ x_n$ of the unit ball of $X$ there exists a projection $P:X \to Y$ such that $||P(x_i)|| \leq m$ for every $i=1, 2, \ldots, n$ and some positive real number $m>0$. Then there exists a projection $P:X \to Y$ such that $||P|| \leq m$.
\end{lem}
\begin{proof}
For an arbitrary extreme point $x_0 \not \in Y$ of the unit ball of $X$ let us denote by $\mathcal{P}_{x_0}$ the set of all projections $P: X \to Y$ such that $||P(x_0)|| \leq m$. It is not hard to verify that the set of all projections $\mathcal{P}(X, Y)$ forms an $(n-1)$-dimensional space and the set $\mathcal{P}_{x_0}$ is a compact and convex set of this space. According to our assumption, the intersection of any $n$ of the sets $\mathcal{P}_{x_0}$ is non-empty. From Helly's Theorem it follows that the intersection all sets of the form $\mathcal{P}_{x_0}$ is non-empty. Therefore, there exists a projection $P: X \to Y$ such that $||P(x_0)|| \leq m$ for an arbitrary extreme point $x_0$ of the unit ball. But the unit ball of $X$ is the convex hull of its extreme points and therefore $||P(x_0)|| \leq m$ for an arbitrary $x_0 \in B_X$. This concludes the proof.
\end{proof}
In the proof of Theorem \ref{twbohnenblust} Bohnenblust have managed to reduce the case of the general normed space to the case of the space $\ell_1^n$. One can therefore expect that it may be possible to use some more advanced results concerning the $\ell_1^n$ space in studying the relative projection constant of the hyperplanes. The result we refer to is the explicit formula for the relative projection constant of the hyperplane in $\ell_1^n$. It is quite complicated in the general case, however we need only some simpler consequences of it, given in Lemmas \ref{l1} and \ref{l12}.
\begin{twr}
\label{projekcjal1}
Let $Y=\ker f$ be an $(n-1)$-dimensional subspace of the space $\ell_1^n$ where $n \geq 3$. Suppose that functional $f$ is given by the vector $(f_1, f_2, \ldots, f_n)$ where $1 = f_1 \geq f_2 \geq \ldots \geq f_n \geq 0$. Let $1 \leq k \leq n$ be the largest integer such that $f_k>0$. Let $a_i = \sum_{j=1}^{i} f_j$, $ b_j=\sum_{j=1}^{i} f_j^{-1}$ for $1 \leq i \leq k$ and $\beta_i = \frac{b_i}{i-2}$ for $3 \leq i \leq k$. Let $3 \leq l \leq k$ be the largest integer such that both of the numbers $f_lb_{l-1}$ and $a_{l-1}$ are greater than $l-3$. Then $\lambda(Y, \ell_1^{n})=1+x$, where
\[x =
\begin{cases}
0 &\mbox{ if } k \leq 2\\
2 \left ( \left ( \beta_l - f_l^{-1} \right )(l-2) + a_lf_l^{-1} - l \right )^{-1} &\mbox{ if } k>2 \text{ and } a_l < l-2\\
2\left ( a_l \beta_l - l \right )^{-1} &\mbox{ if } k>2 \text{ and } a_l \geq l-2. \\
\end{cases}
\]
\end{twr}
\begin{proof}
See Theorem 2.2.13 in \cite{lewickihab} on page 57.
\end{proof}
\begin{lem}
\label{l1proj}
Let $Y=\ker f$ be an $(n-1)$-dimensional subspace of the space $\ell_1^n$ where $n \geq 3$. Suppose that functional $f \neq 0$ is given by the vector $(f_1, f_2, \ldots, f_n)$. Then $\lambda(Y, \ell_1^{n}) = 2 - \frac{2}{n}$ if and only if $|f_1|=|f_2| = \ldots = |f_n|$.
\end{lem}
\begin{proof}
Without loss of generality we may assume that $1 = f_1 \geq f_2 \geq \ldots \geq f_n \geq 0$. Suppose that $Y=\ker f$ is an $(n-1)$-dimensional subspace of $\ell_1^n$ satisfying $\lambda(Y, \ell_1^{n}) = 2 - \frac{2}{n}$. We shall use Theorem \ref{projekcjal1} and we adapt the notation of it. Obviously $k>2$. If $a_l \geq l-2$ then by the formula on minimal projection we have
$$2 - \frac{2}{n} = \lambda(Y, \ell_1^{n}) = 1 + 2\left ( a_l \beta_l - l \right )^{-1}$$
and thus
$$a_l \beta_l = \frac{2n}{n-2} + l.$$
However, from the Cauchy-Schwarz inequality and $l \leq n$ it follows that
$$a_l \beta_l = \frac{\left ( \sum_{i=1}^{l} f_i \right ) \left ( \sum_{i=1}^{l} f^{-1}_i \right )}{l-2} \geq \frac{l^2}{l-2} = \frac{2l}{l-2} + l \geq \frac{2n}{n-2} + l.$$
In consequence $l=n$ and $1=f_1=f_2=\ldots=f_n$ as the equality holds in the Cauchy-Schwarz inequality.
In the case $a_l < l-2$ we get
$$\left ( \beta_l-f_l^{-1} \right)(l-2) + a_lf^{-1}_l = \frac{2n}{n-2} + l.$$
Since
$$\left ( \beta_l-f_l^{-1} \right)(l-2) + a_lf^{-1}_l \geq \left ( \beta_l-f_l^{-1} \right) a_l + a_lf^{-1}_l = \beta_l a_l,$$
we can apply the Cauchy-Schwarz inequality like before and obtain $1=f_1=f_2 = \ldots = f_n$, which contradicts the assumption $a_l < l-2$.
To finish the proof of the lemma, it is enough to observe that for $1=f_1=f_2 = \ldots = f_n$ the norm of a minimal projection is equal to $2-\frac{2}{n}$ by the previous theorem.
\end{proof}
The next lemma follows the original idea of Bohnenblust used in the proof of his theorem.
\begin{lem}
\label{l1}
Let $X$ be an $n$-dimensional normed space and let $Y=\ker f$, where $f \in S_{X^{\star}}$, be its $(n-1)$-dimensional subspace. Suppose that $x_1,\ x_2,\, \ldots,\ x_n$ are unit vectors such that $\lambda(\ell_1^n, Z) \leq R$, where $Z=\ker g$, the functional $g$ is given by the vector $(f(x_1), f(x_2), \ldots f(x_n))$ and $R \geq 1$. Then there exists a projection $P:X \to Y$ such that $||P(x_i)|| \leq R$ for every $i=1, 2, \ldots, n$.
\end{lem}
\begin{proof}
Let $Q: \ell_1^n \to Z$ be a projection of norm at most $R$ and suppose that $Q(x) = x - g(x)r$ for some $r \in \mathbb{R}^n$ satisfying $g(r)=1$. In particular
$$||Q(e_i)|| = |1-f(x_i)r_i| + \sum_{j \neq i}|f(x_i)r_j| \leq R,$$
for every $1 \leq i \leq n$. Consider a linear mapping $P$ of the form $P(x) = x - f(x) \tilde{r}$, where $\tilde{r}=r_1x_1 + r_2x_2 + \ldots + r_nx_n$. It is a projection from $X$ onto $Y$ since $f(\tilde{r})=g(r)=1.$ Moreover
$$||P(x_i)|| = ||(1 - f(x_i)r_i)x_i - \sum_{j \neq i} r_jf(x_i)x_j|| \leq |1-f(x_i)r_i| + \sum_{j \neq i}|f(x_i)r_j| \leq R.$$
This shows that $P$ is a desired projection and the proof is finished.
\end{proof}
With the preceeding lemmas we are ready to give the proof of Theorem \ref{warunek}.
\emph{Proof of Theorem \ref{warunek}}.
Let us suppose first that the equality $\lambda(Y, X) = 2-\frac{2}{n}$ holds. By Lemma \ref{helly} there are extreme points $x_1, x_2, \ldots, x_n$ of the unit ball of $X$ such that
$$\max \{ ||P(x_1)||, ||P(x_2)||, \ldots, ||P(x_n)|| \} = 2-\frac{2}{n},$$
for every projection $P: X \to Y$. We shall prove that $x_1, x_2, \ldots, x_n$ satisfy the conditions of the theorem.
Since $||P(v)||=||P(-v)||$ for every projection $P: X \to Y$ and every $v \in X$, we can suppose that $f(x_i) \geq 0$ for $1 \leq i \leq n$. By combining Lemmas \ref{l1proj} and \ref{l1} we conclude that $f(x_1)=f(x_2)=\ldots=f(x_n)$. This shows that $x_i$ satisfy the first condition of the theorem.
For the second one, let us suppose that dimension of the subspace $V = \lin \{x_1, x_2, \ldots, x_n \}$ is at most $n-1$. Then dimension of the subspace $Y \cap V$ is at least $\dim V - 1 $ and from the Theorem \ref{twbohnenblust} we know that there exists a projection from $V$ onto $(Y \cap V)$ of the norm not greater than $2 - \frac{2}{\dim V} < 2 - \frac{2}{n}$. This contradicts the choice of $x_i$.
In order to establish the last condition, we shall consider the barycenter $g=\frac{x_1 + x_2 + \ldots + x_n}{n}$. By the triangle inequality it follows that
$$||g-x_i|| =\frac{1}{n} \left | \left |x_1 + x_2 + \ldots + x_{i-1} - (n-1)x_i + x_{i} + \ldots + x_n \right | \right | \leq \frac{2n-2}{n}=2-\frac{2}{n}.$$
for every $1 \leq i \leq n$. We claim that the equality $||g-x_i||=2-\frac{2}{n}$ holds for every $i$.
Indeed, for the sake of contradiction let us assume that $||g-x_n|| < 2-\frac{2}{n}$. For simplicity let us also denote $A = 2 - \frac{2}{n}$ and $B= \frac{||g-x_n||}{A}<1$. Then we have $ 0 < 2-A < 2-AB$. We can therefore consider
$$s=\lambda g + (1-\lambda)\frac{x_1 + x_2 + \ldots + x_{n-1}}{n-1}$$
for $\lambda$ satisfying $\frac{2-A}{2-AB} < \lambda < 1$. We claim that $||s-x_i|| < A$ for every $1 \leq i \leq n$. Indeed,
$$||s-x_1|| = \left | \left |\left ( \frac{1}{n-1} - \frac{\lambda}{n(n-1)} - 1 \right )x_1 + \left ( \frac{1}{n-1} - \frac{\lambda}{n(n-1)} \right ) \sum_{i=2}^{n-1} x_i + \frac{\lambda}{n}x_n \right | \right | \leq$$
$$\left ( 1 -\frac{1}{n-1} + \frac{\lambda}{n(n-1)} \right ) + (n-2) \left ( \frac{1}{n-1} - \frac{\lambda}{n(n-1)} \right ) + \frac{\lambda}{n} = \frac{2(n^2-2n+\lambda)}{n(n-1)} < 2 \frac{(n-1)^2}{n(n-1)}=A.$$
Similarly $||s-x_i|| < A$ for $i=2, 3, \ldots, n-1$. Furthermore,
$$||s-x_n||=\left | \left | \lambda(g-x_n) + (1-\lambda) \left( x_n - \frac{\sum_{i=1}^{n-1}x_i}{n-1} \right ) \right | \right | $$
$$\leq \lambda AB + (1-\lambda)2 = \lambda(AB - 2) + 2 < (A-2) + 2 = A.$$
This proves our claim.
Now let us consider the projection $P:X \to Y$ in the direction of $s$, i.e. $P(v)=v - \frac{f(v)}{f(s)}s$. Since $s \in \conv \{x_1, x_2, \ldots x_n\}$ we have $f(s)=f(x_i)$ for every $i$. Hence
$$||P(x_i)|| = ||x_i-s|| < 2 - \frac{2}{n},$$
for every $1 \leq i \leq n$. This contradicts the choice of $x_1, x_2, \ldots, x_n$ and our claim follows.
We have thus proved that the point $g$ is equidistant to every $x_i$ with the distance equal to $2 - \frac{2}{n}$. This means that for every $1 \leq i \leq n$ the point
$$\frac{1}{2n-2} (x_1 + x_2 + \ldots + x_{i-1} - (n-1)x_i + x_{i+1} + \ldots + x_n),$$
belonging to the convex hull of
$$\{x_1, x_2, \ldots, x_{i-1}, -x_i, x_{i+1}, \ldots, x_n\},$$
lies on the unit sphere of $X$. It clearly implies that the set above is contained in a \textbf{face} of the unit ball. But this face must be a facet as $x_i$ are linearly independent vectors. The third condition now follows from the fact that in the basis of $x_i$ the facet containing $x_1, x_2, \ldots, x_{i-1}, -x_i, x_{i+1}, \ldots, x_n$ is determined by the vector $(1, 1, \ldots, 1 -1, 1, \ldots, 1)$. This completes the proof of the first implication.
Let us now suppose that a subspace $Y$ and extreme points $x_1, x_2, \ldots, x_n$ of the unit ball satisfy all of the conditions. By applying an appropriate linear transformation, without loss of generality we can assume that $x_i=e_i$ is the $i$-th unit vector from the canonical basis of $\mathbb{R}^n$. Then $Y = \ker f$, where $f(v)=v_1+v_2 + \ldots + v_n$. For the sake of contradiction let us further suppose that there exists projection $P: X \to Y$ such that $||P(e_i)|| < 2 - \frac{2}{n}$. As the hyperplane containing $e_i$'s is parallel to $Y$ the projection $P$ acts on them as a translation. In other words, there exists some vector $w=(w_1, w_2, \ldots, w_n)$ such that $P(e_i)=e_i-w$ for every $i$ and $\sum_{i=1}^{n} w_i = 1$. Then
$$2 - \frac{2}{n} > ||P(e_i)|| \geq |w_1 + w_2 + \ldots + w_{i-1} - w_i + w_{i+1} + \ldots + w_n + 1|,$$
for every $1 \leq i \leq n$. Summation of all of these inequalities yields
$$2n - 2 > \sum_{i=1}^{n} |w_1 + w_2 + \ldots + w_{i-1} - w_i + w_{i+1} + \ldots + w_n + 1| \geq \left | (n-2) \sum_{i=1}^{n} w_i + n \right | = 2n-2.$$
We have obtained a contradiction that finishes the proof of the theorem.
\qed
The characterization above can be stated in purely geometric form, as in the following corollary.
\begin{cor}
\label{charakteryzacjan}
Let $X$ be an $n$-dimensional normed space. Then the following conditions are equivalent:
\begin{itemize}
\item There exists a subspace $Y$ of $X$ such that $\dim Y = n-1$ and $\lambda(Y, X) = 2-\frac{2}{n}$.
\item There exists a linear operator $T: \mathbb{R}^n \to \mathbb{R}^n$ such that $C \subset T(B_X) \subset P$,
\end{itemize}
where $B_X$ is the unit ball of $X$, $C$ is the cross-polytope $\{ x: |x_1| + |x_2| + \ldots + |x_n| \leq 1 \}$ and $P$ is the parallelotope bounded by hyperplanes: $\{x: x_1 + x_2 + \ldots + x_{i-1} - x_i + x_{i+1} + \ldots + x_n = \pm 1\}$ for $i=1, 2, \ldots, n$.
\end{cor}
\begin{proof}
It is enough to take $T$ to be the linear operator such that $T(x_i)=e_i$ for $i=1, 2, \ldots, n$.
\end{proof}
Next corollary of Theorem \ref{warunek} is
\begin{cor}
\label{maxmin}
Let $X$ be an $n$-dimensional normed space which posess an $(n-1)$-dimensional subspace $Y$ such that $\lambda(Y, X) = 2 - \frac{2}{n}$. Then $X$ posess also a two-dimensional subspace $Z$ such that $\lambda(Z, X)=1$.
\end{cor}
\begin{proof}
Let $x_1, x_2, \ldots, x_n$ be like in Theorem \ref{warunek}. From the third condition it follows that for $i \neq j$ the segments connecting pairs $(x_i, x_j), (x_i, -x_j), (-x_i, x_j), (-x_i, -x_i)$ all lie on the unit sphere of $X$. Thus the intersection $\lin\{x_i, x_j\} \cap B_X$ is a parallelogram. It is well-known that $\ell_{\infty}^m$ subspaces are always $1$-complemented (see also Lemma \ref{projekcja}) and the result follows.
\end{proof}
As yet another application of the characterization given in Theorem \ref{warunek} we provide an upper bound for the number of subspaces with maximal relative projection constant in terms of the number of facets of the unit ball.
\begin{twr}
\label{sciany}
Let $X$ be an $n$-dimensional normed space and let $N \geq 0$ be the number of facets of the unit ball of $X$. Then, the number of $(n-1)$-dimensional subspaces $Y \subset X$ such that $\lambda(Y, X) = 2 - \frac{2}{n}$ is not greater than $\binom{N}{n}$.
\end{twr}
\begin{proof}
Let $\mathcal{F}$ be the set of all facets of $B_X$. If subspace $Y$ satisfies $\lambda(Y, X)= 2 - \frac{2}{n}$ then by Theorem \ref{warunek} there exist unit vectors $x_1, x_2, \ldots, x_n$ lying in the hyperplane parallel to $Y$, such that for every $1 \leq i \leq n$ the set
$$\{x_1, x_2, \ldots, x_{i-1}, -x_i, x_{i+1}, \ldots, x_n\}$$
is contained in a different facet of $B_X$. Thus, to every such $Y$ there corresponds a set $F(Y) \subset \mathcal{F}$ of $n$ different facets of $B_X$. We will show that $F$ is an injection.
In this purpose, let us suppose that $n$ facets in $F(Y)$ are determined by the functionals $f_1, f_2, \ldots f_n \in S_{X^{\star}}$ and let $x_1, x_2, \ldots, x_n$ be as before. If $f = \frac{f_1+f_2 + \ldots + f_{n}}{n-2}$, then for every $1 \leq i \leq n$ we have
$$f(x_i) = \frac{(n-1) - 1}{n-2} = 1.$$
This shows that $Y=\ker f$ is uniquely determined by $F(Y)$ and the conclusion follows. Thus, the number of subspaces with the maximal projection costant is at most $\binom{N}{n}$ and the proof is finished.
\end{proof}
As an immediate consequence we have the following
\begin{cor}
\label{wniosek}
An arbitrary $n$-dimensional normed space $X$ posess a subspace $Y$ such that $\lambda(Y, X) < 2 - \frac{2}{n}$.
\end{cor}
We should remark that in the seperable case it is possible that every hyperplane has a maximal possible projection constant. It is well known that if $X$ is a seperable Banach space then $\lambda(Y, X) \leq 2$ for every hyperplane $Y$. And yet, by a result of Franchetti \cite{franchetti3} we have $\lambda(Y, L_1[0, 1]) = 2$ for every hyperplane $Y \subset L_1[0, 1]$.
\section{Three-dimensional case}
In the three-dimensional setting it is possible to establish some stronger results with similar methods. As we have seen in Corollary \ref{charakteryzacjan}, if a three-dimensional space $X$ posess a two-dimensional subspace $Y$ satisfying $\lambda(Y, X) = \frac{4}{3}$, then we can suppose that $C \subset B_X \subset P$, where $C$ is the octahedron $\{ x: |x_1| + |x_2| + |x_3| \leq 1 \}$ and $P$ is the parallelotope with set of vertices: $\{(\pm 1, 0, 0), (0, \pm 1, 0), (0, 0, \pm 1), (1, 1, 1), (-1, -1, -1)\}$. In terms of the norm, we have the following inequalities for an arbitrary vector $x=(x_1, x_2, x_3) \in \mathbb{R}^3$.
$$\max \{ |x_1+x_2-x_3|, |x_1-x_2+x_3|, |-x_1+x_2+x_3| \} \leq ||x|| \leq |x_1|+|x_2|+|x_3|.$$
Note however, that if $x_1\, x_2, x_3$ are not of the same sign then
$$\max \{ |x_1+x_2-x_3|, |x_1-x_2+x_3|, |-x_1+x_2+x_3| \} = |x_1|+|x_2|+|x_3|.$$
Therefore $||x|| = |x_1| + |x_2| + |x_3|$ in such case. This makes the condition much more restrictive on $X$ than in the case of a general dimension. In particular, we are able to determine the maximal possible number of two-dimensional subspaces with the relative projection constant equal to $\frac{4}{3}$. We omit the straightforward proof of the following auxillary lemma.
\begin{lem}
\label{nierownosc}
For arbitrary real numbers $-1 \leq x, y \leq 1$ the inequality $|x| + |y| + |x+y-1| \leq 3$ is true. Moreover, if the equality holds then $x=-1$ or $y-1$ or $x=y=1$.
\end{lem}
\begin{twr}
\label{plaszczyzny}
Let $X$ be a $3$-dimensional normed space. Then, the maximal possible number of $2$-dimensional subspaces $Y \subset X$ such that $\lambda(Y, X) = \frac{4}{3}$ is equal to $4$.
\end{twr}
\begin{proof}
If we take $X = \ell_{\infty}^3$ and $Y=\{x \in \mathbb{R}^3 : c_1x_1 + c_2x_2 + x_3 = 0\}$, where $c_1,\ c_2 \in \{-1, 1\}$ then $\lambda(Y, X) = \frac{4}{3}$. It is therefore enough to prove the upper bound.
Let us start by looking more closely at the case of $X = \ell_{\infty}^3$. Theorem \ref{warunek} implies that every two-dimensional subspace $Y \subset \ell_{\infty}^3$ with the maximal projection constant is parallel to a plane determined by some three of the vertices of the unit cube (not containing any symmetric pair). However, every three vertices lying on one face determine the subspace with the projection constant equal to $1$. Moroever, it is easy to see that every other plane is determined by exactly four $3$-element sets of vertices. This gives exactly four different planes with the maximal projection constant. In this case the statement of the theorem is therefore evident.
Now suppose that $X$ is an arbitrary $3$-dimensional normed space not linearly isometric to the $\ell_{\infty}^3$ and $Y \subset X$ is a subspace with $\lambda(Y, X) = \frac{4}{3}$. Without loss of generality we may assume that $Y = \{x \in \mathbb{R}^3 : x_1 + x_2 + x_3 = 0\}$ and the vectors given by Theorem \ref{warunek} are the vectors $e_1,\ e_2,\ e_3$ from the canonical unit basis of $\mathbb{R}^3$. Let $Z$ be some other subspace of $X$ satisfying $\lambda(Z, X) = \frac{4}{3}$ and denote by $z_1,\ z_2,\ z_3$ the extreme points of the unit ball given by Theorem \ref{warunek}, which are associated with the subspace $Z$. It is enough to show that $\{z_1, z_2, z_3\} = \{\varepsilon_1e_1, \varepsilon_2e_2, \varepsilon_3e_3\}$ for some $\varepsilon_1,\ \varepsilon_2,\ \varepsilon_3 \in \{-1, 1\}$.
Let $P =\{x \in \mathbb{R}^3 : x_1, x_2, x_3 \geq 0\}$ and $-P =\{x \in \mathbb{R}^3 : x_1, x_2, x_3 \leq 0\}$. Note that $z_1,\ z_2,\ z_3 \in (P \cup -P)$. Indeed, $z_i$'s are extreme points of the unit ball and from the remark opening this section it follows that in every part of the coordinate system different from $P$ and $-P$ the unit sphere of $X$ is a triangle with the vertices of the form $\pm e_1,\ \pm e_2,\ \pm e_3$, so these are the only possible extreme points. But these points clearly belong to $P \cup (-P)$.
Without loss of generality we can assume that $z_1,\ z_2 \in P$. Let $z_1 = (a_1, a_2, a_3)$ and $z_2=(b_1, b_2, b_3)$, where $a_i,\ b_i \geq 0$ for $i=1, 2, 3$. Due to the symmetry of the situation we have to consider only two cases: $a_i \geq b_i$ for $i=1, 2, 3$ or $a_1 \geq b_1,\ a_2 \geq b_2$ and $a_3 \leq b_3$. Let us start with the first one.
In this case
$$2 = ||z_1-z_2|| \leq |a_1-b_1| + |a_2 - b_2| + |a_3-b_3| = (a_1+a_2+a_3) - (b_1+b_2+b_3).$$
However,
$$1 = ||z_2|| \leq b_1 + b_2 + b_3.$$
Hence by adding the inequalities
$$|a_1+a_2-a_3| \leq 1, \: |a_1-a_2+a_3| \leq 1, \: |-a_1+a_2+a_3| \leq 1,$$
and using the triangle inequality we obtain that $a_1+a_2+a_3 \leq 3$. Thus $a_1+a_2+a_3=3$, $b_1+b_2+b_3=1$ and the equality holds in all of the estimations. From the equalities
$$|a_1+a_2-a_3| = |a_1-a_2+a_3| = |-a_1+a_2+a_3| = 1$$
it easily follows that $a_1=a_2=a_3=1$. Therefore, the point $(1, 1, 1)$ belongs to the unit sphere of $X$. This shows that the unit ball of $X$ is the parallelotope with the vertices $\{(\pm 1, 0, 0), (0, \pm 1, 0), (0, 0, \pm 1), (1, 1, 1), (-1, -1, -1)\}$. In particular $X$ is linearly isometric to $\ell_{\infty}^3$, which contradicts our assumption from the beginning of the proof.
Now suppose that $a_1 \geq b_1,\ a_2 \geq b_2,\ a_3 \leq b_3$. Then
$$2 = ||z_1-z_2|| \leq |a_1-b_1| + |a_2 - b_2| + |a_3-b_3| = (a_1+a_2-a_3) - (b_1+b_2-b_3).$$
However, both of the numbers $|a_1+a_2-a_3|,\ |b_1+b_2-b_3|$ are bounded by $1$. This means that $a_1+a_2-a_3=1$ and $b_1+b_2-b_3=-1$. . Adding the inequalities $|-b_1+b_2+b_3| \leq 1$ and $|b_1-b_2+b_3| \leq 1$ gives us $b_3 \leq 1$. Therefore $b_1=b_2=0$ and $b_3=1$. Note that in fact the absolute value of any coordinate of $z_i$'s is bounded by $1$ by the same argument.
Now we shall incorporate the third point $z_3$ into our reasoning. Let us write $z_3=(c_1, c_2, c_3)$ and suppose that $z_3 \in P$, or in other words that $c_i \geq 0$ for $i=1, 2, 3$. Then we have
$$2 = ||z_2-z_3|| \leq |c_1| + |c_2| + |1-c_3| = c_1 + c_2 - c_3 + 1,$$
so that $c_1 + c_2 - c_3 = 1$. Note that the Theorem \ref{warunek} applied to the plane determined by $z_1, z_2, z_3$ implies the equality $3 = ||z_1-z_2-z_3||$. But on the other hand, we can estimate the norm of this vector using the canonical unit basis of $\mathbb{R}^3$ obtaining the inequality
$$3=||z_1-z_2-z_3|| \leq |a_1 - c_1| + |a_2-c_2| + |a_3-c_3 - 1| = |a_1 - c_1| + |a_2-c_2| + |(a_1-c_1)-(a_2-c_2)-1|.$$
As $0 \leq a_1, a_2, c_1, c_2 \leq 1$ we can apply the Lemma \ref{nierownosc} to $x=a_1-c_1$ and $y=a_2-c_2$ obtaining that $x=y=1$ or $x=-1$ or $y=-1$. In the first case we have that $a_1-c_1=a_2-c_2=1$ which implies that $a_1=a_2=1$ and hence also $a_3=1$. We have thus once again arrived to the case of $\ell_{\infty}^3$ discussed before.
Without loss of generality let us therefore suppose that $a_2-c_2=-1$. Then $a_2=0$ and $c_2=1$. Thus $a_1-a_3=1$ which implies that $a_1=1$ and $a_3=0$. Moreover, $c_3=c_1$. In other words, we have proved that $z_1=e_1$ and $z_3=(c_1, 1, c_1)$. Consequently
$$3=||z_1+z_2-z_3|| \leq |1-c_1| + 1 + |1-c_1| = 3 - 2c_1,$$
which proves that $c_1=0$. This proves our claim in the case $z_3 \in P$.
Suppose now that $c_i \leq 0$ for $i=1, 2, 3$. This time we have
$$2 = ||z_2+z_3| \leq |c_1| + |c_2| + |1+c_3| = 1 - (c_1+c_2-c_3),$$
so that $c_1+c_2-c_3=-1$. Furthemore
$$3=||z_1-z_2+z_3|| \leq |a_1+c_1|+|a_2+c_2|+|a_3+c_3-1| = |a_1+c_1|+|a_2+c_2| + |(a_1+c_1) + (a_2+c_2)-1|.$$
According to our assumptions we have that $0 \leq |a_1+c_1|, |a_2+c_2| \leq 1$ and we can apply Lemma \ref{nierownosc} to $x=a_1+c_1$ and $y=a_2+c_2$. It implies that $a_1+c_1=a_2+c_2=1$ or $a_1+c_1=-1$ or $a_2+c_2=-1$. In the first case it follows that $a_1=a_2=1$ and in consequence $a_3=1$. Similarly like before this means that $X$ is linearly isometric to $\ell_{\infty}^3$, which contradicts our assumption. Therefore, without loss of generality let us assume that $a_2+c_2=-1$. Then it follows that $a_2=0$ and $c_2=-1$. Hence $a_1=1$, $a_3=0$ and $c_1=c_3$.
To finish the proof of the theorem we consider the vector $z_1+z_2-z_3=(1-c_1, -1, 1-c_1)$. On the one hand its norm is equal to $3$ (computing with the respect to the basis of $z_i$). On the other hand, its coordinates are not of the same sign and therefore, computing the norm with the respect to the canonical basis we obtain $3=(1-c_1) + 1 + (1- c_1) = 3-2c_1$, which again gives us $c_1=0$. This completes the proof.
\end{proof}
Rest of this section is devoted to developing the stability version of Corollary \ref{maxmin}. We need the following two lemmas. First of them is a more precise version of Lemma \ref{l1}.
\begin{lem}
\label{l12}
Let $Y=\ker f$ be an $2$-dimensional subspace of the space $\ell_1^3$ and let $0 \leq A < \frac{1}{3}$ be a real number. Suppose that functional $f \neq 0$ is given by the vector $(f_1, f_2, f_3)$, which satisfies
$$1=f_1 \geq f_2 \geq f_3 \geq 0, \qquad f_3 \leq r$$
where
$$r=\left(\frac{b-\sqrt{b^2-4}}{2} \right)^2 \: \text{ and } \: b=3\sqrt{\frac{1-A}{1-3A}} - 1.$$
Then $\lambda(Y, \ell_1^{3}) \leq \frac{4}{3}-A$.
\end{lem}
\begin{proof}
Note that $b \geq 2$ and in consequence $r \leq 1$. If $f_3=0$ then there is nothing to prove as by Theorem \ref{projekcjal1} we have $\lambda(Y, \ell_1^{3}) = 1 \leq \frac{4}{3}-A$. Let us therefore suppose that $f_3>0$. According to Theorem \ref{projekcjal1} we have
$$\lambda(Y, \ell_1^{n}) = 1+2\left ( (1+f_2+f_3)(1+f_2^{-1}+f_3^{-1}) - 3 \right )^{-1} = 1 + 2\left ( \frac{f_2}{f_3} + \frac{f_3}{f_2} + f_2 + f_3 + \frac{1}{f_2} + \frac{1}{f_3} \right)^{-1}.$$
Our thesis is therefore equivalent to the inequality
$$\frac{f_2}{f_3} + \frac{f_3}{f_2} + f_2 + f_3 + \frac{1}{f_2} + \frac{1}{f_3} \geq \frac{6}{1-3A},$$
under the given conditions.
Consider the function $g(u,v)$ defined as
$$g(u, v) = \frac{u}{v} + \frac{v}{u} + u + v + \frac{1}{u} + \frac{1}{v},$$
for $(u, v) \in \mathbb{R}^2$ satisfying $0 < v \leq u \leq 1$ and $v \leq r$.
By a straightforward calculus we easily obtain that under our assumptions, function $g$ is minimized for $g(1, r)$ or for $g(\sqrt{r}, r)$. However,
$$g(1, r) = 2\left ( r+\frac{1}{r} \right ) + 2 = \left ( \sqrt{r} + \frac{1}{\sqrt{r}} \right )^2 + \left ( r+\frac{1}{r} \right) \geq 2 \left ( \sqrt{r} + \frac{1}{\sqrt{r}} \right ) + \left ( r+\frac{1}{r} \right) = g(\sqrt{r}, r).$$
It is therefore enough to prove that
$$g(\sqrt{r}, r) = 2\left ( \sqrt{r} + \frac{1}{\sqrt{r}} \right ) + \left ( r+\frac{1}{r} \right ) \geq \frac{6}{1-3A}.$$
If we substitute $t= \sqrt{r} + \frac{1}{\sqrt{r}}$ then it rewrites as
$$t^2 + 2t - 2 \geq \frac{6}{1-3A}$$
or $(t+1)^2 \geq \frac{6}{1-3A}+3$. As $t$ is positive this is equivalent to $t \geq \sqrt{\frac{6}{1-3A}+3} - 1 = b$. After substituting $r=\left(\frac{b-\sqrt{b^2-4}}{2} \right)^2$ we easily check that in fact we have an equality. This finishes the proof of the lemma.
\end{proof}
\begin{lem}
\label{projekcja}
Let $X$ be an $n$-dimensional normed space and suppose that $x, y \in X$ are linearly independent unit vectors satisfying $||x+y||, ||x-y|| \geq 2-A$ for some $0 < A < 1$. Then $\lambda(Y, X) \leq \frac{1}{1-A}$ for $Y=\lin\{x, y\}$.
\end{lem}
\begin{proof}
Without loss of generality we may suppose that $Y = \{(v_1, v_2, 0, \ldots, 0): v_1, v_2 \in \mathbb{R} \}$ and $x=e_1+e_2, y=e_1-e_2$. Then $||e_1+e_2||=||e_1-e_2||=1$ and $||e_1||, ||e_2|| \geq 1-\frac{A}{2}$. We claim that
$$||v|| \leq ||v||_{\infty} \leq \frac{1}{1-A} ||v||,$$
for any $v \in Y$ (where $|| \cdot ||_{\infty}$ denotes the usual supremum norm). In fact, suppose that $v=(v_1, v_2, 0, \ldots, 0)$ for $v_1 \geq v_2 \geq 0$. Then
$$||v||=||v_1e_1 + v_2e_2|| = ||(v_1+v_2)e_1 + v_2(e_1-e_2)|| \geq (v_1+v_2)\left ( 1-\frac{A}{2} \right) - v_2.$$
For a fixed $v_1$ the expression above is a linear function of $v_2$. For $v_2=0$ it is equal to $v_1(1-\frac{A}{2})$ and for $v_2=v_1$ it is equal to $v_1(1-A)$. Thus $||v|| \geq v_1(1-A) = (1-A)||v||_{\infty}$. Furthermore
$$||v|| = \frac{1}{2}||(v_1-v_2)(e_1-e_2) + (v_1+v_2)(e_1+e_2)|| \leq \frac{1}{2} \left ((v_1-v_2) + (v_1+v_2) \right ) = v_1 = ||v||_{\infty}.$$
In the remaining cases the reasoning is analogous. This establishes our claim.
Consider linear functionals $p_1, p_2:Y \to \mathbb{R}$ defined as $p_i(v) =v_i$ for $i=1, 2$. From the previous part it follows that the norms of these functionals are bounded by $\frac{1}{1-A}$. Thus, the Hahn-Banach Theorem gives us extensions $\tilde{p_1}, \tilde{p_2}: X \to \mathbb{R}$ with the norm not exceeding $\frac{1}{1-A}$. Then $P(v)=(\tilde{p_1}(v), \tilde{p_2}(v), 0, \ldots, 0)$ is a desired projection from $X$ onto $Y$ with $||P|| \leq \frac{1}{1-A}$. Indeed
$$||P(v)|| \leq ||P(v)||_{\infty} = \max\{|\tilde{p_1}(v)|, |\tilde{p_2}(v)|\} \leq \frac{1}{1-A} ||v||.$$
This concludes the proof of the lemma.
\end{proof}
Now we are ready to give a stability version of Corollary \ref{maxmin}. We follow a similar reasoning to the one used in the proof of Theorem \ref{warunek}. Before stating the theorem let us introduce the function $\varphi(R)$ as
$$\varphi(R) = \left(\frac{\left ( 3\sqrt{\frac{1-R}{1-3R}} - 1 \right )-\sqrt{\left ( 3\sqrt{\frac{1-R}{1-3R}} - 1 \right )^2-4}}{2} \right)^{-2}-1.$$
The function $\varphi(R)$ will serve us for a quantitative description of the stability. We note that $\varphi$ is continuous and nonnegative on the interval $[0, \frac{1}{3})$ and moreover $\varphi(0)=0$.
\begin{twr}
\label{maxmin3}
Let $X$ be a $3$-dimensional normed space. Suppose that there exists a subspace $Y$ of $X$ such that $\dim Y = 2$ and $\lambda(Y, X) = \frac{4}{3}-R$ for some $R \geq 0$ satisfying $R + \varphi(R) \leq \frac{1}{3}$. Then there exists a $2$-dimensional subspace $Z$ of $X$ such that $\lambda(Z, X) \leq 1 + \frac{9(R+\varphi(R))}{4-12(R+\varphi(R))}$.
\end{twr}
\begin{proof}
Let $X$ and $Y$ be as stated in the theorem and suppose that $Y = \ker f$ for some $f \in S_{X^{\star}}$. As every projection of $X$ onto $Y$ has norm at least $\frac{4}{3}-R$, by Lemma \ref{helly} we conclude that there exist unit vectors $x, y, z \in X$ such that
$$\max \{ ||P(x)||, ||P(y)||, ||P(z)|| \} \geq \frac{4}{3}-R,$$
for every projection $P: X \to Y$. Consider the barycenter $g=\frac{x+y+z}{3}$ and let $C=\frac{6(R+\varphi(R))}{4-3(R+\varphi(R))}$. We claim that among the numbers $||g-x||, ||g-y||, ||g-z||$ there are at least two which are not less than $\frac{4}{3}-C$.
Suppose otherwise. We can assume that $||g-x||, ||g-y|| < \frac{4}{3}-C.$ Consider $s=\lambda g + (1-\lambda)z$ with $\lambda=\frac{2}{2+C}$. Then $0 < \lambda < 1$ and
$$||s-x|| = ||\lambda(g-x) + (1-\lambda)(z-x)|| < \lambda \left( \frac{4}{3} - C \right) + (1-\lambda)2 = 2 - \frac{2}{3}\lambda - \lambda C = \frac{8}{3C+6}.$$
Similarly $||s-y|| < \frac{8}{3C+6}$. Note also that
$$||g-z|| = \left | \left | \frac{x+y-2z}{3} \right | \right | \leq \frac{1}{3} + \frac{1}{3} + \frac{2}{3} = \frac{4}{3},$$
and hence
$$||s-z|| = \lambda ||g-z|| \leq \frac{4}{3} \lambda = \frac{8}{3C+6}.$$
By a small perturbation of $\lambda$ we can guarantee the strict inequality in the estimation above. In fact, if we replace $\lambda$ by a $\lambda'=\lambda-\varepsilon$ for a sufficiently small $\varepsilon>0$ then for $s'=\lambda' g + (1-\lambda')z$ we still have $||s'-x||, ||s'-y|| < \frac{8}{3C+6}$ but also $||s'-z|| = \frac{4}{3} \lambda' < \frac{8}{3C+6}$. We have thus proved the existence of $s \in \conv \{x, y, z \}$ such that $||s-x||, ||s-y||, ||s-z|| < \frac{8}{3C+6}$.
Since $||P(v)||=||P(-v)||$ for every projection $P: X \to Y$ and every $v \in X$, we can suppose that $f(x), f(y), f(z) \geq 0$. Without loss of generality let us further assume that $f(x) \geq f(y) \geq f(z)$. By Lemma \ref{l12} we have
$$\frac{f(x)}{f(z)} \leq \varphi(R) + 1,$$
as otherwise, by combining Lemmas \ref{l1proj} and \ref{l12} it would be possible to project $x, y, z$ onto $Y$ with a projection of norm smaller than $\frac{4}{3}-R$, contradicting the choice of $x, y, z$. Let $t \in \{x, y, z\}$. Since $s \in \conv\{ x, y, z\}$ it is clear that
$$ \frac{f(t)}{f(s)} - 1 \leq \frac{f(x)}{f(z)} - 1 \leq \varphi(R),$$
and similarly
$$\frac{f(t)}{f(s)} - 1 \geq \frac{f(z)}{f(x)} - 1 \geq \frac{1}{1+\varphi(R)} - 1 = \frac{-\varphi(R)}{1+\varphi(R)} \geq -\varphi(R).$$
In particular $\left | 1 - \frac{f(t)}{f(s)} \right | \leq \varphi(R)$.
Consider the projection $P:X \to Y$ in the direction of $s$, i.e. $P(v) = v - \frac{f(v)}{f(s)}s$ and let $t \in \{x, y, z\}$. It follows that
$$\left | \left |t - \frac{f(t)}{f(s)}s \right | \right | = \left | \left |t-s + (1-\frac{f(t)}{f(s)})s) \right | \right| \leq ||t-s|| + \left |1 - \frac{f(t)}{f(s)} \right | ||s|| < \frac{8}{3C+6} + \varphi(R)$$
$$=\frac{8}{3\frac{6(R+\varphi(R))}{4-3(R+\varphi(R))}+6} + \varphi(R)=\frac{8}{\frac{18(R+\varphi(R))}{4-3(R+\varphi(R))}+\frac{24-18(R+\varphi(R))}{4-3(R+\varphi(R))}} + \varphi(R)=\frac{32-24(R+\varphi(R))}{24}$$
$$=\frac{4}{3}-(R+\varphi(R)) + \varphi(R) = \frac{4}{3}-R.$$
We have obtained that $||P(t)|| < \frac{4}{3} - R$ for $t \in \{x, y, z \}$, which contradicts the choice of $x, y, z$. Our claim follows.
Now let us assume that $||g-y||, ||g-z|| \geq \frac{4}{3}-C$. We have
$$4-3C \leq ||3g-3z||=||x+y-2z|| \leq ||x+y|| +2,$$
and therefore $||x+y|| \geq 2-3C$. Moreover
$$4-3C \leq ||3g-3y||=||x-y+2z|| \geq ||x-y||+2,$$
and hence $||x-y|| \geq 2-3C$. From Lemma \ref{projekcja} we conclude that there exists a projection from X onto $\lin\{ x, y \}$ with the norm not exceeding
$$\frac{1}{1-3C}=\frac{4-3(R+\varphi(R))}{4-21\varphi(R)} = 1 + \frac{18(R+\varphi(R))}{4-21(R+\varphi(R))},$$
as desired.
\end{proof}
As a Corollary we obtain an improvement of the trivial bound in the three-dimensional case of the Problem \ref{bosz}.
\begin{cor}
\label{bosz3}
Every three-dimensional space $X$ posess a subspace $Y$ such that $\lambda(Y, X) < \frac{4}{3} - 0.0007$.
\end{cor}
\begin{proof}
By a numerical calculation one can check that for $R = 0.0007$ we have $R + \varphi(R) < \frac{1}{3}$ and that the inequality
$$1 + \frac{9(R+\varphi(R))}{4-12(R+\varphi(R))} < \frac{4}{3}-R$$
holds. Therefore if $X$ has a subspace $Y$ with $\lambda(Y, X) > \frac{4}{3} - 0.0007$ then be Theorem \ref{maxmin3} it also has a subspace $Z$ with $\lambda(Z, X) < \frac{4}{3} - 0.0007$.
\end{proof}
\section{Open problems}
In this section we suggest several open questions related to our results.
In Corollary \ref{wniosek} we have established that an arbitrary $n$-dimensional normed space $X$ has a subspace $Y$ with $\lambda(Y, X) < 2 - \frac{2}{n}$. From the standard compactness argument it easily follows that there exists $c>0$ such that every $n$-dimensional normed space $X$ has a subspace $Y$ with $\lambda(Y, X) \leq 2 - \frac{2}{n} - c$. It is natural to ask for determining the best possible constant $c$. In other words, we propose a variation of a Problem \ref{bosz} of Bosznay and Garay.
\begin{problem}
For an integer $n \geq 3$ determine the value of $\sup_{X} \inf_{Y \subset X} \lambda(Y, X)$, where $X$ is an $n$-dimensional normed space and $Y \subset X$ is a subspace of dimension $n-1$.
\end{problem}
It should be also noted that obtaining any lower bound greater than $1$ would also be significant. Most of the classical normed spaces have always projection of norm $1$, while we need a normed space with every projection of the norm not less than $c$ for some explicit $c>1$.
It is also natural to ask about improvement of Theorem \ref{sciany}.
\begin{problem}
For an integer $n \geq 3$ determine the maximal possible number of $(n-1)$-dimensional subspaces with the relative projection constant equal to $2-\frac{2}{n}$ that an $n$-dimensional normed space can have. Or at least give some upper bound depending only on $n$ and not on $X$.
\end{problem}
The last question we propose is concerned with Corollary \ref{maxmin}.
\begin{problem}
For an integer $n \geq 3$ determine the maximal possible integer $k$ such that every $n$-dimensional normed space $X$, which posess an $(n-1)$-dimensional subspace $Y$ such that $\lambda(Y, X) = 2 - \frac{2}{n}$, posess also a $k$-dimensional subspace $Z$ satisfying $\lambda(Z, X) = 1$.
\end{problem}
We have established that $k \geq 2$. It is very reasonable to suspect that $k=2$ may be the right answer for this question. Neverthless, providing a construction of a normed space satisfying such condition would be very interesting.
\end{document}
|
\begin{document}
\title[Higgs bundles and local systems]
{Higgs bundles and local systems on Riemann surfaces}
\author[Richard A. Wentworth]{Richard A. Wentworth}
\address{Department of Mathematics,
University of Maryland,
College Park, MD 20742, USA}
\email{[email protected]}
\thanks{R.W. supported in part by NSF grants DMS-1037094 and DMS-1406513.
The author also acknowleges support from NSF grants DMS 1107452, 1107263, 1107367 ``RNMS: GEometric structures And Representation varieties" (the GEAR Network).}
\date{\today}
\maketitle
\setcounter{tocdepth}{2}
\tableofcontents
\thispagestyle{empty}
\baselineskip=16pt
\section{Preface}
These notes are based on lectures given at
the Third International School on
Geometry and Physics at the Centre de Recerca Matem\`atica in
Barcelona, March 26--30, 2012.
The aim of the School's four lecture series was to give a rapid introduction to Higgs bundles, representation varieties,
and mathematical physics.
While the scope of these subjects is very broad,
that of these notes is far more modest.
The main topics covered here are:
\begin{itemize}
\item The Hitchin-Kobayashi-Simpson correspondence for Higgs bundles
on Riemann surfaces.
\item The Corlette-Donaldson theorem relating the moduli
spaces of Higgs bundles and semisimple representations of the
fundamental group.
\item A description of the oper moduli space and its
relationship to systems of holomorphic differential equations, Higgs bundles, and the Eichler-Shimura isomorphism.
\end{itemize}
These topics have been treated extensively in the literature. I have tried to condense the
key ideas into a presentation that requires as little background as possible.
With regard to the first item,
I give a complete proof of the Hitchin-Simpson theorem (Theorem \ref{thm:hitchin}) that combines techniques that have emerged since Hitchin's seminal paper \cite{Hitchin87a}. In the case of Riemann surfaces a direct proof for arbitrary rank which avoids introduction of the Donaldson functional can be modeled on Donaldson's proof of the Narasimhan-Seshadri theorem in \cite{Donaldson83} (such a proof was suggested in \cite{Simpson87}). Moreover, the Yang-Mills-Higgs flow can be used to extract minimizing sequences with desirable properties. A similar idea is used in the Corlette-Donaldson proof of the existence of equivariant harmonic maps (Theorem \ref{thm:corlette}). Indeed, I have sought in these notes to exhibit the parallel structure of the proofs of these two fundamental results.
Continuity of the two flows is the key to the relationship between the equivariant cohomology of the moduli space of semistable Higgs bundles on the one hand and the moduli space of representations on the other.
On first sight the last item in the list above is a rather different topic from the others, but it is nevertheless deeply related in ways that are perhaps still not completely understood. Opers
\cite{BeilinsonDrinfeld05} play an important role in the literature on the Geometric Langlands program \cite{Frenkel07}. My intention here is to give fairly complete proofs of the basic facts about opers and their relationship to differential equations and Higgs bundles (see also \cite{Simpson10}).
Due to the limited amount of time for the lectures
I have necessarily omitted many important aspects of this subject.
Two in particular are worth mentioning.
First, I deal only with vector bundles and do not
consider principal bundles with more general structure groups. For example, there is no discussion of representations into the various real forms of a complex Lie group. Since some of the other lectures at this introductory school will treat this topic in great detail I hope this omission will not be serious.
Second, I deal only with closed Riemann surfaces and
do not consider extra ``parabolic'' structures at marked points. In some sense this ignores an important aspect at the heart of the classical literature on holomorphic differential equations (cf.\ \cite{Simpson90, Boalch01}). Nevertheless, for the purposes of introducing the global structure of moduli spaces, I feel it is better to first treat the case of closed surfaces.
While much of the current research in the field is
directed toward the two generalizations above,
these topics are left for further reading.
I have tried to give references to essential results in these notes.
Any omissions or incorrect attributions are due solely to my own ignorance of the
extremely rich and vast literature, and
for these I extend my sincere apologies. Also,
there is no claim to originality of the proofs given here. A perusal of Carlos Simpson's foundational contributions to this subject is highly recommended for
anyone wishing to learn about Higgs bundles (see \cite{Simpson87, Simpson88, Simpson92, Simpson94a, Simpson94b}). In addition, the original articles of Corlette \cite{Corlette88}, Donaldson \cite{Donaldson83, Donaldson87}, and of course Hitchin \cite{Hitchin87a, Hitchin87b, Hitchin92} are indispensable. Finally, I also mention more recent survey articles \cite{BurgerIozziWienhard, BradlowGothenPrada, Guichard11} which treat especially the case of representations to general Lie groups.
I am grateful to the organizers, Luis \'Alvarez-C\'onsul,
Peter Gothen, and Ignasi Mundet i Riera, for inviting me to
give these lectures, and to the CRM for its hospitality. Additional thanks to Bill Goldman, Fran\c cois Labourie, Andy Sanders, and Graeme Wilkin for discussions related to the topics presented here, and to Beno\^ it Cadorel for catching several typos. The anonymous referee also made very useful suggestions, for which I owe my gratitude.
\centerline{\sc Notation}
\begin{itemize}
\item $X=$ a compact Riemann surface of genus $g\geq 2$.
\item $\pi=\pi_1(X,p)=$ the fundamental group of $X$.
\item $\mathbb H=$ the upper half plane in $\mathbb C$.
\item $\mathcal O=\mathcal O_X=$ the sheaf of germs of holomorphic functions on $X$.
\item $\mathcal K=\mathcal K_X=$ the canonical sheaf of $X$.
\item
$E=$ a complex vector bundle on $X$.
\item $H=$ a hermitian metric on $E$.
\item $\nabla=$ a connection on $E$.
\item $A$ (or $d_A$) $=$ a unitary connection on $(E, H)$.
\item $\mathcal C_E=$ the space of connections on a rank $n$ bundle $E$.
\item $\mathcal A_E=$ the space of unitary connections on $E$.
\item $\mathcal B_E=$ the space of Higgs bundles.
\item $\mathcal B_E^{ss}=$ the space of semistable Higgs bundles.
\item $\mathcal G_E$ (resp.\ $\mathcal G_E^\mathbb C$) $=$ the unitary (resp.\ complex) gauge group.
\item $\bar\partial_E=$ a Dolbeault operator on $E$, which is equivalent to a holomorphic structure.
\item $(\bar\partial_E,H)=$ the Chern connection.
\item $\mathcal E=$ sheaf of germs of holomorphic sections of a holomorphic bundle $(E,\bar\partial_E)$.
\item $\mathfrak g_E=$ the bundle of skew-hermitian endomorphisms of $E$.
\item $\End E=\mathfrak g_E^\mathbb C$ the endomorphism bundle of $E$.
\item
${\bf V}=$ a local system on $X$.
\item ${\bf V}_\rho=$ the local system associated to a representation $\rho:\pi\to \mathsf{GL}_n(\mathbb C)$.
\item $\underline R =$ the locally constant sheaf modeled on a ring $R$.
\item $L^p_k=$ the Sobolev space of functions/sections with
$k$ derivatives in $L^p$.
\item $C^{k,\alpha}=$ the space of functions/sections with $k$
derivatives being H\"older continuous with exponent $\alpha$.
\end{itemize}
\section{The Dolbeault Moduli Space} \label{sec:dolbeault}
\subsection{Higgs bundles}
\subsubsection{Holomorphic bundles and stability} \label{sec:stability}
Throughout these notes, $X$ will denote a closed Riemann
surface of genus $g\geq 2$ and $E\to X$ a complex
vector bundle. We begin with a discussion of the basic differential geometry of complex vector bundles. Good references for this material are Kobayashi's book \cite{Kobayashi87} and Griffiths and Harris \cite{GriffithsHarris78}.
A holomorphic structure on $E$ is equivalent
to a choice of {\bf $\bar\partial$-operator}, i.e.\ a $\mathbb C$-linear map
$$
\bar\partial_E: \Omega^0(X,E)\longrightarrow \Omega^{0,1}(X,E)
$$
satisfying the Leibniz rule: $\bar\partial_E(fs)=\bar\partial f\otimes
s+f\bar\partial_E s$, for a function $f$ and a section $s$ of $E$.
Indeed, if $\{s_i\}$ is a local holomorphic
frame of a holomorphic bundle, then the Leibniz rule uniquely determines the $\bar\partial$-operator on the underlying complex vector bundle. Conversely, since there is no integrability condition on Riemann surfaces, given a
$\bar\partial$-operator as defined above one
can always find local holomorphic frames (cf.\ \cite[\S 5]{AtiyahBott82}). When we want to specify the holomorphic
structure we write $(E,\bar\partial_E)$. We also introduce the
notation $\mathcal E$ for a sheaf of germs of holomorphic sections
of $(E,\bar\partial_E)$. We will sometimes confuse the terminology and call $\mathcal E$ a holomorphic bundle.
If $\mathcal S\subset\mathcal E$ is a holomorphic subbundle with
quotient $\mathcal Q$, then a smooth splitting $E=S\oplus Q$ allows
us to represent the $\bar\partial$-operators as
\begin{equation} \label{eqn:dbar}
\bar\partial_E=\left( \begin{matrix} \bar\partial_S&\beta\\
0&\bar\partial_Q\end{matrix}\right)
\end{equation}
where $\beta\in \Omega^{0,1}(X,\Hom(Q,S))$ is called the
{\bf second fundamental form}.
A hermitian metric $H$ on $E$ gives an orthogonal splitting.
In this case the subbundle $S$ is determined by its orthogonal projection operator $\pi$, which is an
endomorphism of $E$ satisfying
\begin{enumerate}
\item $\pi^2=\pi$;
\item $\pi^\ast=\pi$;
\item $\tr\pi$ is constant.
\end{enumerate}
The statement that $S\subset E$ be holomorphic is equivalent
to the further condition
$$
\hspace{-11.85cm} {\rm (iv)}\ (I-\pi)\bar\partial_E\,\pi=0\ .
$$
Notice that (i) and (iv) imply (iii), and that $\beta=-\bar\partial_E\pi$. Hence, there is a 1-1 correspondence between holomorphic subbundles of $\mathcal E$ and endomorphisms $\pi$ of the hermitian bundle $E$ satisfying conditions (i), (ii), and (iv). I should point out that the generalization of this description of holomorphic subsheaves to higher dimensions is a key idea of Uhlenbeck and Yau \cite{UhlenbeckYau86}.
A {\bf connection} $\nabla$ on $E$ is a $\mathbb C$-linear map
$$
\nabla: \Omega^0(X,E)\longrightarrow \Omega^{1}(X,E)\ ,
$$
satisfying the Leibniz rule: $\nabla(fs)=df\otimes
s+f\nabla s$, for a function $f$ and a section $s$.
Given a hermitian metric $H$, we call a connection {\bf unitary}
(and we will always then denote it by $A$ or $d_A$)
if it preserves $H$, i.e.
\begin{equation} \label{eqn:unitary}
d\langle s_1, s_2\rangle_H=\langle d_A s_1, s_2\rangle_H
+\langle s_1, d_A s_2\rangle_H\ .
\end{equation}
The curvature of a connection $\nabla$
is $F_\nabla=\nabla^2$ (perhaps more precise notation: $\nabla\wedge \nabla$).
If $\mathfrak g_E$ denotes the bundle of skew-hermitian endomorphisms of $E$ and $\mathfrak g_E^\mathbb C$ its complexification, then $F_A\in \Omega^2(X,\mathfrak g_E)$ for a unitary connection, and $F_\nabla\in \Omega^2(X,\mathfrak g_E^\mathbb C)$ in general.
\begin{remark} \label{rem:traceless}
We will mostly be dealing with connections on bundles that induce a fixed connection on the determinant bundle. These will correspond, for example, to representations into $\mathsf{SL}_n$ as opposed to $\mathsf{GL}_n$. In this case, the bundles $\mathfrak g_E$ and $\mathfrak g_E^\mathbb C$ should be taken to consist of traceless endomorphisms.
\end{remark}
Finally, note that a connection always induces a
$\bar\partial$-operator by taking its $(0,1)$ part. Conversely, a
$\bar\partial$-operator gives a unique unitary connection, called the
{\bf Chern connection}, which we will sometimes denote by
$d_A=(\bar\partial_E, H)$. The complex structure on $X$ splits $\Omega^1(X)$ into $(1,0)$ and $(0,1)$ parts, and hence also splits the connections. We denote these by, for example, $d'_A$ and $d''_A$, respectively. So for $d_A=(\bar\partial_E,H)$, $d''_A=\bar\partial_E$, and $d'_A$ is determined by
$
\partial\langle s_1,s_2\rangle_H=\langle d_A's_1, s_2\rangle_H
$,
for any pair of holomorphic sections $s_1$, $s_2$. Henceforth, I will mostly omit $H$ from the notation if there is no chance of confusion.
\begin{example} \label{ex:line-bundle-curvature}
Let $\mathcal L$ be a holomorphic line bundle with hermitian metric $H$. For a local holomorphic frame $s$ write $H_s=|s|^2$.
Then $F_{(\bar\partial_L,H)}=\bar\partial\partial\log H_s$, and the right hand side is independent of the choice of frame.
\end{example}
The transition functions of a collection of local trivializations of a holomorphic line bundle on the open sets of a covering of $X$ give a $1$-cocycle
with values in the sheaf $\mathcal O^\ast$ of germs of nowhere vanishing holomorphic functions. The set of isomorphism classes of line bundles is then $H^1(X,\mathcal O^\ast)$.
Recall that on a compact Riemann surface every holomorphic
line bundle has a meromorphic section. This
gives an equivalence between the categories of holomorphic
line bundles under tensor products and linear equivalence classes
of divisors $\mathcal D=\sum_{x\in X}m_x x$
with their additive structure (here $m_x\in \mathbb Z$ is zero for all but finitely many $x\in X$).
We shall denote by $\mathcal O (\mathcal D)$ the line
bundle thus associated to $\mathcal D$.
Furthermore, a divisor has a {\bf degree}, $\deg\mathcal D=\sum_{x\in X}m_x$.
We define this to be the degree of $\mathcal O(\mathcal D)$. Alternatively, from the exponential sequence
$$
0\longrightarrow \underline \mathbb Z\longrightarrow \mathcal O \stackrel{f\mapsto e^{2\pi i f}}{\xrightarrow{\hspace*{1.5cm}}}\mathcal O ^\ast \longrightarrow 0\ ,$$
we have the long exact sequence in cohomology:
$$
0\longrightarrow H^1(X,\mathbb Z)\longrightarrow H^1(X,\mathcal O)\longrightarrow H^1(X,\mathcal O^\ast)
\stackrel{c_1}{\xrightarrow{\hspace*{.75cm}}}
H^2(X,\mathbb Z)\longrightarrow 0\ .
$$
The fundamental class of $X$ identifies $H^2(X,\mathbb Z)\cong\mathbb Z$, and it is a standard exercise to show that under this identification: $\deg(\mathcal D)=c_1(\mathcal O(\mathcal D))$.
For a holomorphic vector bundle $\mathcal E$, we declare the degree $\deg\mathcal E :=\deg\det\mathcal E$.
Notice that the degree is topological, i.e.\ it does not depend on the holomorphic structure, just on the underlying complex bundle $E$. By the Chern-Weil theory, for any hermitian metric $H$ on $\mathcal E$ we have
\begin{equation} \label{eqn:chern-weil}
c_1(E)=\left[ \frac{\sqrt{-1}}{2\pi}\tr F_{(\bar\partial_E,H)}\right] =\left[ \frac{\sqrt{-1}}{2\pi}F_{(\bar\partial_{\det E},\det H)}\right] \ .
\end{equation}
Complex vector bundles on Riemann surfaces are classified topologically by their rank and degree.
We will also make use of the {\bf slope} (or normalized degree) of a bundle, which is defined by the ratio $\mu(E)=\deg E/\rank E$.
If a line bundle $\mathcal L=\mathcal O(\mathcal D)$ has a nonzero holomorphic section, then since $\mathcal D$ is linearly equivalent to an effective divisor (i.e.\ one with $m_x\geq 0$ for all $x$), $\deg\mathcal L\geq 0$. It follows that if $\mathcal E$ is a holomorphic vector bundle with a subsheaf $\mathcal S\subset\mathcal E$ and $\rank\mathcal S=\rank\mathcal E$, then $\deg\mathcal S\leq \deg\mathcal E$. Indeed, the assumption implies $\det\mathcal E\otimes (\det\mathcal S)^\ast$ has a nonzero holomorphic section. We will use this fact later on.
Notice that in the case above, $\mathcal Q=\mathcal E/\mathcal S$ is a torsion sheaf. In general, for any subsheaf $\mathcal S\subset \mathcal E$ of a holomorphic vector bundle, $\mathcal S$ is contained in a uniquely defined holomorphic subbundle $\mathcal S'$ of $\mathcal E$ called the {\bf saturation} of $\mathcal S$. It is obtained by taking the kernel of the induced map $\mathcal E\to \mathcal Q/{\rm Tor}(\mathcal Q)\to 0$. From this discussion we conclude that $\deg\mathcal S$ is no greater than the degree $\deg\mathcal S'$ of its saturation.
Let $\omega$ be the K\"ahler form associated to a choice of conformal metric on $X$. This will be fixed throughout, and for convenience we normalize
so that
$$\int_X\omega=2\pi\ .$$
The contraction:
$\Lambda: \Omega^2(X)\to \Omega^0(X)$, is defined by setting $\Lambda(f\omega)=f$ for any function $f$.
For a holomorphic subbundle $\mathcal S$ of a hermitian holomorphic bundle $\mathcal E$ with
projection operator $\pi$ we have the following useful formula, which follows easily from direct calculation using \eqref{eqn:chern-weil}.
\begin{equation} \label{eqn:degree}
\deg\mathcal S=\frac{1}{2\pi}\int_X\tr(\pi \sqrt{-1}\Lambda F_{(\bar\partial_E,H)})\, \omega
-\frac{1}{2\pi}\int_X |\beta|^2\, \omega\ .
\end{equation}
\begin{definition} \label{def:stability}
We say that $\mathcal E$ is {\bf stable}
(resp.\ {\bf semistable}) if for all
holomorphic subbundles $\mathcal S\subset \mathcal E$,
$0<\rank\mathcal S<\rank\mathcal E$,
we have $\mu(\mathcal S)<\mu(\mathcal E)$ (resp.\ $\mu(\mathcal S)\leq\mu(\mathcal E)$). We call $\mathcal E$ {\bf polystable} if it is a direct sum of stable bundles of the same slope.
\end{definition}
\begin{remark} \label{rem:tensor}
Line bundles are trivially stable. If $\mathcal E$ is (semi)stable and $\mathcal L$ is a line bundle, then $\mathcal E\otimes\mathcal L$ is also (semi)stable.
\end{remark}
Before giving an example, recall the notion of an extension
\begin{equation} \label{eqn:extension}
0\longrightarrow \mathcal S\longrightarrow\mathcal E\longrightarrow\mathcal Q\longrightarrow 0\ .
\end{equation}
The {\bf extension class} is
the image of the identity endomorphism under the coboundary map of the long exact sequence associated to \eqref{eqn:extension}
$$
H^0(X,\mathcal Q\otimes\mathcal Q^\ast)\longrightarrow H^1(X, \mathcal S\otimes \mathcal Q^\ast) \ .
$$
Notice that the isomorphism class of the bundle
$\mathcal E$ is unchanged under scaling, so the extension class (if not zero) should
be regarded as an element of the projective space $\mathbb P(H^1(X, \mathcal S\otimes \mathcal Q^\ast)
)$.
It is then an exercise to see that in terms of the second
fundamental form $\beta$, the extension class coincides
(projectively) with
the corresponding Dolbeault cohomology class $[\beta]\in
H^{0,1}_{\bar\partial}(X,S\otimes Q^\ast)$.
We
say that \eqref{eqn:extension} is {\bf split} if
the extension class is zero. Clearly, this occurs if and only there is an injection $\mathcal Q\to \mathcal E$ lifting the projection.
\begin{example}
Suppose $g\geq 1$.
Consider extensions of the type
$$
0\longrightarrow\mathcal O \longrightarrow\mathcal E\longrightarrow\mathcal O (p)\longrightarrow 0\ .
$$
These are parametrized by $H^1(X, \mathcal O(-p))\cong H^0(X, \mathcal K(p))^\ast \cong H^0(X,\mathcal K )^\ast$, which has dimension $g$. Any non-split extension of this type is stable. Indeed, if $\mathcal L\hookrightarrow\mathcal E$ is a destabilizing line subbundle, then $\deg\mathcal L\geq 1$.
The induced map $\mathcal L\to \mathcal O(p)$ cannot be zero, since then by the inclusion $\mathcal L\hookrightarrow \mathcal E$ it would lift to a nonzero map $\mathcal L\to\mathcal O $, which is impossible.
Hence, $\mathcal L\to\mathcal O (p)$ must be an isomorphism. Such an $\mathcal L$ would therefore split the extension.
\end{example}
A connection is {\bf flat} if its curvature vanishes. We say
that $\nabla$ is {\bf projectively flat} if
$\sqrt{-1}\Lambda F_\nabla=\mu$,
where $\mu$ is a constant (multiple of the identity).
Note that by our normalization of the area, $\mu=\mu(E)$.
In Section 4, we will prove Weil's criterion for when a
holomorphic bundle $\mathcal E$ admits a flat connection (i.e. $\nabla''=\bar\partial_E$, $F_\nabla=0$). Demanding that the connection be unitary imposes stronger conditions. This is the famous result of
Narasimhan-Seshadri.
\begin{theorem}[Narasimhan-Seshadri {\cite{NarasimhanSeshadri65}}] \label{thm:narasimhan-seshadri}
A holomorphic bundle $\mathcal E\to X$ admits a projectively
flat unitary connection if and only if $\mathcal E$ is polystable.
\end{theorem}
\noindent
In Section \ref{sec:hitchin} we will prove Theorem \ref{thm:narasimhan-seshadri} as a special case of the more general result on Higgs bundles (see Theorem \ref{thm:hitchin}).
\subsubsection{Higgs fields} \label{sec:higgs-fields}
A {\bf Higgs bundle} is a pair $(\mathcal E,\Phi)$ where $\mathcal E$ is
a holomorphic bundle and $\Phi$ is a holomorphic section of
$\mathcal K\otimes\End\mathcal E$. We will sometimes regard $\Phi$ as a
section of $\Omega^{1,0}(X,\mathfrak g_E^\mathbb C)$ satisfying $\bar\partial_{E}\Phi=0$.
\begin{definition} \label{def:higgs-stability}
We say that a pair $(\mathcal E,\Phi)$ is {\bf stable}
(resp.\ {\bf semistable})
if for all $\Phi$-invariant holomorphic subbundles
$\mathcal S\subset \mathcal E$, $0<\rank\mathcal S<\rank\mathcal E$, we
have $\mu(\mathcal S)<\mu(\mathcal E)$ (resp.\ $\mu(\mathcal S)\leq\mu(\mathcal E)$).
It is {\bf polystable} if it is a direct sum of Higgs bundles
of the same slope.
\end{definition}
The following is a simple but useful consequence of the definition and the additive properties of the slope on exact sequences.
\begin{lemma} \label{lem:semistable}
Let $f:(\mathcal E_1,\Phi_1)\to (\mathcal E_2,\Phi_2)$ be a holomorphic homomorphism of Higgs bundles, $\Phi_2 f=f\Phi_1$. Suppose $(\mathcal E_i,\Phi_i)$ is semistable, $i=1,2$, and $\mu(\mathcal E_1)>\mu(\mathcal E_2)$. Then $f\equiv 0$. If $\mu(\mathcal E_1)=\mu(\mathcal E_2)$ and one of the two is stable, then either $f\equiv 0$ or $f$ is an isomorphism.
\end{lemma}
\begin{proof}
Consider the first statement. Then if $f\not\equiv 0$, the assumption $\Phi_2 f=f\Phi_1$ implies that the image of $f$ is $\Phi_2$-invariant, so by the condition on slopes
$f$ must have a kernel. But then $\ker f$ is $\Phi_1$-invariant. So $\mu(\ker f)\leq \mu(\mathcal E_1)\leq \mu(\coker f)\leq \mu(\mathcal E_2)$; contradiction.
The second statement follows similarly.
\end{proof}
A {\bf Higgs subbundle} of $(\mathcal E,\Phi)$ is by definition a holomorphic subbundle $\mathcal S\subset \mathcal E$ that is $\Phi$-invariant. The restriction $\Phi_\mathcal S$ of $\Phi$ to $\mathcal S$ then makes $(\mathcal S,\Phi_\mathcal S)$ a Higgs bundle, where now the inclusion $\mathcal S\hookrightarrow\mathcal E$ gives a map of Higgs bundles. Similarly, $\Phi$ induces a Higgs bundle structure on the quotient $\mathcal Q=\mathcal E/\mathcal S$.
Given an arbitrary Higgs bundle,
the {\bf Harder-Narasimhan filtration of $(\mathcal E,\Phi)$}
is a filtration by Higgs subbundles
$$
0=(\mathcal E_0,\Phi_0)\subset (\mathcal E_1,\Phi_1)\subset\cdots\subset
(\mathcal E_\ell,\Phi_\ell)=(\mathcal E,\Phi)\ ,
$$
such that the quotients $(\mathcal Q_i, \Phi_{Q_i})=
(\mathcal E_i,\Phi_i)/(\mathcal E_{i-1}, \Phi_{i-1})$ are semistable (cf.\ \cite{HarderNarasimhan74}).
The filtration is also required to satisfy $\mu(\mathcal Q_i)>\mu(\mathcal Q_{i+1})$,
and one can show that the associated graded object
$
{\rm Gr}_{HN}(\mathcal E,\Phi)=\oplus_{i=1}^\ell (\mathcal Q_i, \Phi_{Q_i})
$
is uniquely determined by the isomorphism class of
$(\mathcal E,\Phi)$.
The collection of slopes $\mu_i=\mu(\mathcal Q_i)$
is an important invariant of the isomorphism class of the
Higgs bundle.
\begin{remark} \label{rem:maximal}
By construction, \emph{$\mu_i$ is the maximal slope of a Higgs subbundle of $\mathcal E/\mathcal E_{i-1}$} with its induced Higgs field. We can also interpret \emph{$\mu_i$ as the minimal slope of a Higgs quotient of $(\mathcal E_i,\Phi_i)$}. Indeed, $(\mathcal E_1,\Phi_1)$ is semistable, so this is trivially true if $i=1$. Suppose $\mathcal E_i\to \mathcal Q\to 0$ is a Higgs quotient for $i\geq 2$ and $\mu(\mathcal Q)\leq \mu_i$. If $\mathcal Q$ is the minimal such quotient, then it is semistable with respect to the induced Higgs field.
It follows from Lemma \ref{lem:semistable} that the induced map $\mathcal E_1\to \mathcal Q$ must vanish. Hence, the quotient passes to $\mathcal E/\mathcal E_1\to \mathcal Q\to 0$. Now by the same argument, $\mathcal E_2/\mathcal E_1\to \mathcal Q$ vanishes if $i\geq 3$.
Continuing in this way, we obtain a quotient $\mathcal Q_i\to \mathcal Q\to 0$. Now since $(\mathcal Q_i,\Phi_{\mathcal Q_i})$ is semistable and the quotient is nonzero, applying Lemma \ref{lem:semistable} once again, we conclude that $\mu_i\leq \mu(\mathcal Q)$.
\end{remark}
Consider the $n$-tuple of numbers $\vec\mu(\mathcal E,\Phi)=(\mu_1,\ldots, \mu_n)$ obtained
from the Harder-Narasimhan filtration by repeating each of the
$\mu_i$'s according to the ranks of the $\mathcal Q_i$'s.
We then get a vector $\vec\mu(\mathcal E, \Phi)$, called the
{\bf Harder-Narasimhan type} of $(\mathcal E,\Phi)$.
There is a natural partial
ordering on vectors of this type that is key to the stratification we desire.
For a pair $\vec\mu$, $\vec\lambda$ of $n$-tuple's satisfying
$\mu_1\geq\cdots\geq\mu_n$, $\lambda_1\geq
\cdots\geq\lambda_n$, and $\sum_{i=1}^n\mu_i=\sum_{i=1}^n\lambda_i$, we define
$$
\vec\lambda\leq\vec\mu \quad \iff\quad \sum_{j\leq k}\lambda_j\leq\sum_{j\leq k}\mu_j\quad\text{for all}\ k=1,\ldots, n\ .
$$
The importance of this ordering is that it defines a stratification
of the space of Higgs bundles. In particular, the Harder-Narasimhan type is upper semicontinuous.
This is the direct analog of the Atiyah-Bott stratification
for holomorphic bundles \cite[\S 7]{AtiyahBott82}.
There is a similar filtration of a semistable Higgs bundle
$(\mathcal E,\Phi)$,
where the successive quotients are stable, all with
slope $=\mu(E)$. This is called the {\bf Seshadri filtration} \cite{Seshadri67}
and its associated graded ${\rm Gr}_S(\mathcal E,\Phi)$ is therefore
polystable. When $\Phi\equiv 0$, we recover the usual Harder-Narasimhan and Seshadri filtrations of holomorphic bundles $\mathcal E$. We will denote these by ${\rm Gr}_{HN}(\mathcal E)$ and ${\rm Gr}_{S}(\mathcal E)$.
\begin{example}
Consider an extension \eqref{eqn:extension}
where $\rank\mathcal S=\rank\mathcal Q=1$ and $\deg\mathcal S>\deg\mathcal Q$.
Then the Harder-Narasimhan filtration of $\mathcal E$ is given by $0\subset \mathcal S\subset\mathcal E$.
\end{example}
\subsection{The moduli space}
\subsubsection{Gauge transformations} \label{sec:gauge}
Let $\mathcal A_E$ denote the space of unitary connections on a rank $n$ hermitian vector bundle $E$. If $\mathfrak g_E$ denotes the associated bundle of skew-hermitian endomorphisms of $E$, then one observes from the Leibniz rule that $\mathcal A_E$ is an infinite dimensional affine space modeled on $\Omega^1(X,\mathfrak g_E)$. By the construction of the Chern connection discussed in Section \ref{sec:stability}, we also see that $\mathcal A_E$ can be identified with the space of holomorphic structures on $E$. We will most often be interested in the case of \emph{fixed determinant}, i.e. where the induced holomorphic structure on $\det E$ is fixed.
The {\bf gauge group} is defined by
$$
\mathcal G_E=\{ g\in \Omega^0(X,\End E) : gg^\ast=I\}\ .
$$
In the fixed determinant case we also impose the condition that $\det g=1$ (see Remark \ref{rem:traceless}).
The gauge group acts on $\mathcal A_E$ by pulling back connections: $d_{g(A)}=g\circ d_A \circ g^{-1}$. On the other hand, because of the identification with holomorphic structures we see that the complexification $\mathcal G_E^\mathbb C$, the {\bf complex gauge group}, also acts on $\mathcal A_E$. Explicitly, if $\bar\partial_E=d''_A$, then $g(A)$ is the Chern connection of $g\circ \bar\partial_E \circ g^{-1}$.
The space of Higgs bundles is
$$
\mathcal B_E=\{ (A,\Phi)\in \mathcal A_E \times \Omega^0(X, K\otimes \mathfrak g_E^\mathbb C) : d''_A\Phi=0\}\ .
$$
Let $\mathcal B_E^{ss}\subset \mathcal B_E$ denote the subset of semistable Higgs bundles.
\begin{definition} \label{def:dolbeault}
The moduli space of rank $n$ semistable Higgs bundles (with fixed determinant) on $X$ is
$
\mathfrak M_D^{(n)}=\mathcal B^{ss}_E\bigr/ \negthinspace\negthinspace \bigr/\mathcal G_E^\mathbb C
$,
where the double slash means that the orbits of $(\mathcal E,\Phi)$ and ${\rm Gr}_S(\mathcal E,\Phi)$ are identified.
\end{definition}
We have not been careful about topologies. In fact, $\mathfrak M_D^{(n)}$ can be given the structure of a (possibly nonreduced) complex analytic space using the Kuranishi map (cf.\ \cite{Kobayashi87}). An algebraic construction using geometric invariant theory is given in \cite{Simpson94a}.
A second comment is that $\mathcal G_E^\mathbb C/\mathcal G_E$ may be identified with the space of hermitian metrics on $E$. This leads to an important interpretation when studying the behavior of functionals along $\mathcal G_E^\mathbb C$ orbits in $\mathcal A_E/\mathcal G_E$: we may either think of varying the complex structure $g(\bar\partial_E)$ with a fixed hermitian metric, or we may keep $\bar\partial_E$ fixed and vary the metric $H$ by
$\langle s_1, s_2\rangle_{g(H)}=\langle gs_1, gs_2\rangle_H$.
\subsubsection{Deformations of Higgs bundles}
Let $D''=d''_A+\Phi$, $D'=d'_A+\Phi^\ast$. The metric $\omega$ on $X$ and the hermitian metric on $E$ define $L^2$-inner products on forms with values in $E$ and $\End E$.
We have the K\"ahler identities
\begin{align}
\begin{split} \label{eqn:kahler}
(D'')^\ast&=-\sqrt{-1}[\Lambda, D'] \ ;\\
(D')^\ast&=\sqrt{-1}[\Lambda, D'']\ ,
\end{split}
\end{align}
(see \cite[p.\ 111]{GriffithsHarris78} for the case $\Phi= 0$; the case $\Phi\neq 0$ follows by direct computation).
The infinitesimal structure of the moduli space is governed by a deformation complex $C(A,\Phi)$, which is obtained by differentiating the condition $d''_A\Phi=0$ and the action of the gauge group.
\begin{equation} \label{eqn:deformation-complex}
C(A,\Phi)\ :\
0\longrightarrow \Omega^0(X,\mathfrak g_E^\mathbb C)
\stackrel{D''}{\xrightarrow{\hspace*{.5cm}}}
\Omega^{1,0}(X,\mathfrak g_E^\mathbb C)\oplus
\Omega^{0,1}(X,\mathfrak g_E^\mathbb C)
\stackrel{D''}{\xrightarrow{\hspace*{.5cm}}}
\Omega^{1,1}(X,\mathfrak g_E^\mathbb C)\to 0\ .
\end{equation}
Note that the holomorphicity condition on $\Phi$ guarantees that $(D'')^2=0$.
Serre duality gives an isomorphism $H^0(C(A,\Phi))\simeq H^2(C(A,\Phi))$. We call a Higgs bundle {\bf simple} if $H^0(C(A,\Phi))\simeq \mathbb C$ (or $\{0\}$ in the fixed determinant case).
\begin{remark} \label{rem:simple}
A stable Higgs bundle is necessarily simple.
Indeed, if $\phi\in \ker D''$, then $\phi$ is a holomorphic
endomorphism of $\mathcal E$ commuting with $\Phi$. In particular,
$\det\phi$ is a holomorphic function and is therefore constant. Also,
$\ker\phi$ is $\Phi$-invariant.
If $\phi$ is nonzero but not
an isomorphism
$$
0\longrightarrow \ker\phi\longrightarrow \mathcal E\longrightarrow \mathcal E/\ker\phi\longrightarrow 0\ .
$$
Since $\mathcal E/\ker\phi$ is also a subsheaf of $\mathcal E$,
stability implies both $\mu(\ker\phi)$ and
$\mu(\mathcal E/\ker\phi)$ are both less than $\mu(E)$, which is a contradiction.
Hence, $\phi$ is either zero or an isomorphism. But applying the same
argument to $\phi-\lambda$ for any scalar $\lambda$, we
conclude that $\phi$ is a multiple of the identity.
\end{remark}
\begin{proposition} \label{prop:tangent-space}
At a simple Higgs bundle $[A, \Phi]$,
$\mathfrak M_D^{(n)}$ is smooth of complex dimension \break $(n^2-1)(2g-2)$, and the tangent space may be identified with
\begin{equation} \label{eqn:h1}
H^1(C(A,\Phi))\simeq\left\{ (\varphi,\beta) : d''_A\varphi=-[\Phi,\beta]\ ,\ (d''_A)^\ast\beta=\sqrt{-1}\Lambda[\Phi^\ast,\varphi]\right\}\ .
\end{equation}
\end{proposition}
\begin{example} (cf.\ \cite{Hitchin87a, Hitchin92})
We now give important examples of stable Higgs bundles; namely, the \emph{Fuchsian} ones. First for rank 2. Fix a choice of square root $\mathcal K^{1/2}$ of the canonical bundle, and let $\mathcal E=\mathcal K^{1/2}\oplus \mathcal K^{-1/2}$. Then the part of the endomorphism bundle that sends $\mathcal K^{1/2}\to \mathcal K^{-1/2}$ is isomorphic to $\mathcal K^{-1}$. Tensoring by $\mathcal K$, it becomes trivial. Hence, the
$$
\Phi=\left(\begin{matrix} 0&0\\ 1&0\end{matrix}\right)\ ,
$$
makes sense as a Higgs field, and it is clearly holomorphic. While $\mathcal E$ is unstable as a holomorphic vector bundle
the Higgs bundle $(\mathcal E,\Phi)$ is stable, since the only $\Phi$-invariant sub-line bundle is $\mathcal K^{-1/2}$ which has negative degree.
Let us remark in passing that if we consider a different holomorphic structure $\mathcal V$
on $E$ given by the $\bar\partial$-operator
$$\bar\partial_E+\Phi^\ast=\left(\begin{matrix} \bar\partial_{\mathcal K^{1/2}} & \omega\\0& \bar\partial_{\mathcal K^{-1/2}}\end{matrix}\right)\ ,
$$
then $\mathcal V$ is the unique (up to isomorphism) non-split extension
$$
0\longrightarrow\mathcal K^{1/2}\longrightarrow\mathcal V\longrightarrow\mathcal K^{-1/2}\longrightarrow 0\ .
$$
We now compute the tangent space $\mathfrak M_D^{(2)}$ at $[(\mathcal E,\Phi)]$. Write
$$
\beta=\left(\begin{matrix} b&b_1\\ b_2&-b\end{matrix}\right)\qquad ,\qquad
\varphi=\left(\begin{matrix} \phi&\phi_1\\ \phi_2&-\phi\end{matrix}\right)\ ,
$$
and compute
$$
[\Phi,\beta]=\left(\begin{matrix} -b_1&0\\ 2b&b_1\end{matrix}\right) \qquad ,\qquad
\sqrt{-1}\Lambda[\Phi^\ast,\varphi]=\left(\begin{matrix} \phi_2&-2\phi\\ 0&-\phi_2\end{matrix}\right)\ .
$$
Then the conditions \eqref{eqn:h1} that $(\beta,\varphi)$ define a tangent vector are
$$
\bar\partial_E\varphi=\left(\begin{matrix} b_1&0\\ -2b&-b_1\end{matrix}\right) \qquad ,\qquad
\bar\partial_E^\ast\beta=\left(\begin{matrix} \phi_2&-2\phi\\ 0&-\phi_2\end{matrix}\right)\ .
$$
In particular, $\phi_1\in H^0(X,\mathcal K^2)$ and $b_2\in H_{\bar\partial}^{0,1}(X,K^\ast)\simeq H^0(X,\mathcal K^2)^\ast$. I claim that the other entries vanish. Indeed, the equations for $\phi$ and $b_1$ are
$
\bar\partial\phi=b_1
$, and $\bar\partial^\ast b_1=-2\phi$. But this implies $(\bar\partial^\ast\bar\partial+2)\phi=0$. Hence, $\phi$, and therefore also $b_1$, must vanish. The same argument works for $\phi_2$ and $b$. We therefore have an isomorphism
$$
T_{[\mathcal E_F,\Phi_F]}\mathfrak M_D^{(2)}\simeq H^0(X,\mathcal K^2)\oplus (H^0(X,\mathcal K^2))^\ast\ .
$$
For $n\geq 2$, there is a similar argument. Here we take
$$
\mathcal E_F=\mathcal K^{(n-1)/2}\oplus \mathcal K^{(n-3)/2}\oplus \cdots\oplus \mathcal K^{-(n-1)/2}\ ,
$$
and
$$
\Phi_F=\left(\begin{matrix} 0 & 0 & 0 & \cdots & 0 \\
1 & 0 & 0 & \cdots & \vdots \\
0&1&0&\cdots&\vdots \\
\vdots && \ddots &\ddots & \vdots \\
0&\cdots &0&1&0
\end{matrix}\right)\ .
$$
Notice that with respect to this splitting the $(ij)$ entry of $\varphi$ is a section of $\mathcal K^{j-i+1}$,
and the $(ij)$ entry of $\beta$ is in $\Omega^{0,1}(X,K^{j-i})$.
We obtain the following equations on the entries of a tangent vector $(\beta,\varphi)$,
\begin{align}
\begin{split} \label{eqn:phi-beta}
\bar\partial_E\varphi_{ij} & = \beta_{i-1,j}-\beta_{i,j+1}\ ; \\
\bar\partial_E^\ast\beta_{ij} & = \varphi_{i,j-1}-\varphi_{i+1,j} \ ,
\end{split}
\end{align}
where it is understood that terms with indices $\leq 0$ or $\geq n+1$ are set to zero.
Upon further differentiation as in the $n=2$ case, we find
\begin{align}
\begin{split} \label{eqn:L}
(L-\delta_{i1}-\delta_{jn})\varphi_{ij} & =\varphi_{i+1,j+1}+\varphi_{i-1,j-1} \ ; \\
(\widetilde L-\delta_{in}-\delta_{j1})\beta_{ij} & = \beta_{i+1,j+1}+\beta_{i-1,j-1}\ ,
\end{split}
\end{align}
where $L=\bar\partial_E^\ast\bar\partial_E+2$ and $\widetilde L=\bar\partial_E\bar\partial_E^\ast+2$.
I claim that $\varphi_{ij}=0$ (resp.\ $\beta_{ij}=0$) for $i\geq j$ (resp.\ $i\leq j$).
For example, by \eqref{eqn:L}, $L\varphi_{n1}=0$, and
since $L$ is a positive operator, $\varphi_{n1}$ vanishes.
More generally, fix $ 0\leq p\leq n-2$. Then for $0\leq \ell\leq n-p-1$, there are polynomials $P_\ell$ such that
\begin{equation} \label{eqn:kl}
\varphi_{p+\ell+1, \ell+1}=P_\ell(L)\varphi_{p+1,1}\ .
\end{equation}
Indeed, let $P_0(L)=1$, $P_1(L)=L$ if $p\neq 0$ and $P_1(L)=L-1$ if $p=0$. Suppose $P_k(L)$ has been defined for $0\leq k\leq \ell$, where $0<\ell< n-p-1$. Use \eqref{eqn:L} and \eqref{eqn:kl} to find:
\begin{align*}
L\varphi_{p+\ell+1,\ell+1}&= \varphi_{p+\ell+2, \ell+2}+ \varphi_{p+\ell, \ell} \\
LP_\ell(L) \varphi_{p+1, 1}&= \varphi_{p+\ell+2, \ell+2}+P_{\ell-1}(L) \varphi_{p+1,1}\ .
\end{align*}
Hence, we let $P_{\ell+1}(L)= LP_\ell(L)-P_{\ell-1}(L)$.
Since $L\geq 2$, we see from the recursive definition that $P_{\ell+1}(L)\geq P_{\ell}(L)$, and
hence for all $\ell\geq 1$, $P_{\ell}(L)\geq P_1(L)\geq1$, and $\geq 2$ if $p\neq 0$.
Taking $\ell=n-p-1$ in \eqref{eqn:kl}, we have
\begin{equation} \label{eqn:kn}
\varphi_{n,n-p}=P_{n-p-1}(L)\varphi_{p+1,1}\ .
\end{equation}
On the other, a similar argument implies
$
\varphi_{p+1,1}=P_{n-p-1}(L)\varphi_{n,n-p}
$,
from which we obtain
$$
0= (P^2_{n-p-1}(L)-1)\varphi_{p+1, 1}=(P_{n-p-1}(L)+1)(P_{n-p-1}(L)-1)\varphi_{p+1, 1}\ .
$$
Hence, $\varphi_{p+1,1}$ is in the kernel of $P_{n-p-1}(L)-1$. But then by the remark above, for $p\geq 1$, $\varphi_{p+1,1}$ must vanish.
Since $p\geq 1$ is arbitrary, this implies by \eqref{eqn:kl} that $\varphi_{ij}=0$ for all $i>j$.
In the case $p=0$, notice that for all $\ell\geq 1$, $P_\ell(L)$ is a polynomial of positive degree in $\bar\partial_E^\ast\bar\partial_E$ with nonnegative coefficients and constant term $=1$.
Indeed, by the definition
$$
P_{\ell+1}(L)-P_\ell(L)=(\bar\partial_E^\ast\bar\partial_E)P_\ell(L)+ P_{\ell}(L)-P_{\ell-1}(L)\ ,
$$
and so by induction $P_{\ell+1}(L)-P_\ell(L)$ has nonnegative coefficients and zero constant term.
In this case, $(P_{n-1}(L)-1)\varphi_{1,1}=0$ implies that $\varphi_{1,1}$ is holomorphic. Using \eqref{eqn:kl} again,
$$
\varphi_{\ell+1, \ell+1}=P_\ell(L)\varphi_{1,1}=(P_\ell(L)-1)\varphi_{1,1}+ \varphi_{1,1}=\varphi_{1,1}\ ,
$$
for all $\ell=0,\ldots, n-1$. But since $(\varphi_{ij})$ is traceless, it follows that in fact $\varphi_{ii}=0$ for all $i$.
The proof for $\beta_{ij}$ is exactly similar.
Going back to \eqref{eqn:phi-beta}, we see that $\varphi_{ij}$ (resp.\ $\beta_{ji}$) is holomorphic (resp.\ harmonic) if $i<j$. Moreover, for $p\geq 1$, \eqref{eqn:L} becomes
\begin{equation} \label{eqn:P}
(2-\delta_{i1}-\delta_{i n-p})\varphi_{i, i+p}=\varphi_{i+1, i+1+p}+\varphi_{i-1, i-1+p}\ .
\end{equation}
If $i=1$ this implies $\varphi_{1, p+1}=\varphi_{2, p+2}$. Suppose by induction that $\varphi_{k, k+p}=\varphi_{1,p+1}$ for all $k\leq i$. Then if $i+p\neq n$,
\eqref{eqn:P} implies
$$
2\varphi_{i,i+p}=\varphi_{i+1, i+1+p}+\varphi_{i-1,i-1+p} \quad \Longrightarrow\quad \varphi_{1,p+1}=\varphi_{i+1, i+1+p}\ .
$$
If $i+p=n$, we immediately get $\varphi_{in}=\varphi_{i-1, n-1}=\varphi_{1,p+1}$. Hence, all differentials $\varphi_{ij}$, $j-i=p$, are equal. The same argument applies to $\beta_{ij}$.
From this
we conclude that the map $(\varphi,\beta)\mapsto (\varphi_{12},\ldots, \varphi_{1n}, \beta_{21}, \ldots, \beta_{n1})$
gives an isomorphism
\begin{equation} \label{eqn:higgs-fuchs}
T_{[\mathcal E_F,\Phi_F]}\mathfrak M_D^{(n)}\simeq \bigoplus_{j=2}^n H^0(X,\mathcal K^j)\oplus (H^0(X,\mathcal K^j))^\ast\ .
\end{equation}
The rank $n$ holomorphic vector bundle
$\mathcal V$ whose $\bar\partial$-operator is
$\bar\partial_E+\Phi^\ast_{F}$ is unstable and
has a Harder-Narasimhan filtration $0=\mathcal V_0\subset\mathcal V_1\subset\cdots\subset \mathcal V_n=\mathcal V$, $\mathcal V_{j+1}/\mathcal V_j=\mathcal K^{-j+(n-1)/2}$, such that
$$
0\longrightarrow\mathcal V_j\longrightarrow\mathcal V_{j+1}\longrightarrow\mathcal K^{-j+(n-1)/2}\longrightarrow 0\ .
$$
is the (unique) non-split extension.
This is an example of an \emph{oper}. Opers will be discussed
in Section \ref{sec:opers}.
\end{example}
\subsubsection{The Hitchin map}
Given a Higgs bundle $(\mathcal E,\Phi)$, the coefficient of
$\lambda^{n-i}$ in the expansion of $\det(\lambda+\Phi)$ is a
holomorphic section of $\mathcal K^i$, $i=1,\ldots, n$. In the case of fixed
determinant that we will mostly be considering, $\tr \Phi=0$,
so the sections start with $i=2$.
These pluricanonical sections are clearly invariant under the
action (by conjugation) of $\mathcal G_E^\mathbb C$, so we have a
well-defined map, called the {\bf Hitchin map},
\begin{equation} \label{eqn:hitchin-map}
h: \mathfrak M_D^{(n)}\longrightarrow \bigoplus_{i=2}^n H^0(X,\mathcal K^i) \ .
\end{equation}
The structure of this map and its fibers turns out be
extremely rich (cf.\ \cite{Hitchin87b}).
In these notes, however, I will only discuss
the following important fact which will be proven in the next
section using Uhlenbeck compactness
(for algebraic proofs, see \cite{Nitsure91, Simpson92}).
\begin{theorem} \label{thm:hitchin-proper}
The Hitchin map is proper.
\end{theorem}
\subsection{The Hitchin-Kobayashi correspondence} \label{sec:hitchin}
\subsubsection{Stability and critical metrics}
{\bf Hitchin's equations} for Higgs bundles on a trivial bundle are
\begin{equation} \label{eqn:hitchin}
F_A+[\Phi,\Phi^\ast]=0\ .
\end{equation}
Here, $\Phi$ is regarded as an endomorphism valued $(1,0)$-form.
It will also be convenient to consider the case of bundles of nonzero degree. In this case the equations become
\begin{equation} \label{eqn:hitchin2}
f_{(A,\Phi)}:=
\sqrt{-1}\Lambda(F_A+[\Phi,\Phi^\ast])=\mu\ .
\end{equation}
Here we recall the normalization $\vol(X)=2\pi$, and
then on right hand side the scalar multiple of the identity
endomorphism necessarily satisfies $\mu=\mu(E)$.
There are two ways of thinking of \eqref{eqn:hitchin2}: for a
Higgs bundle $(\mathcal E, \Phi)$ a choice of hermitian
metric gives a Chern connection $A=(\bar\partial_E, H)$.
Hence, we may either view \eqref{eqn:hitchin2} as an equation
for a hermitian metric $H$, or alternatively (and
equivalently) we may fix $H$ and consider
$f_{(A,\Phi)}$ for all $(A,\Phi)$ in a
complex gauge orbit. We will often go
back and forth between these equivalent points of view.
The solutions to the equations \eqref{eqn:hitchin2} may be regarded
as the absolute minimum for the {\bf Yang-Mills-Higgs functional}
on the space of holomorphic pairs, defined as
\begin{equation} \label{eqn:ymh}
\YMH(A,\Phi)=\int_X \left|F_A+[\Phi,\Phi^\ast]\right|^2 \,\omega\ .
\end{equation}
The Euler-Lagrange equations for $\YMH$ are
\begin{equation} \label{eqn:critical}
d_A f_{(A,\Phi)}=0\ ,\ [\Phi, f_{(A,\Phi)}]=0\ .
\end{equation}
We call a metric {\bf critical} if \eqref{eqn:critical} is
satisfied.
In this case, it is easy to see the bundle $(\mathcal E,\Phi)$
splits holomorphically and isometrically as a direct
sum of Higgs bundles that are
solutions to \eqref{eqn:hitchin2} with possibly
different slopes.
\begin{proposition} \label{prop:easy}
If a Higgs bundle $(\mathcal E, \Phi)$ admits a metric satisfying
\eqref{eqn:hitchin2}, then $(\mathcal E,\Phi)$ is polystable.
\end{proposition}
\begin{proof}
Let $\mathcal S\subset\mathcal E$ be a proper $\Phi$-invariant subbundle. Let $\pi$ denote the orthogonal projection to $S$ and $\beta=-\bar\partial_E \pi$ the second fundamental form.
Then since $\mathcal S$ is invariant, $(I-\pi)\Phi\pi=0$, or
$$
\Phi\pi = \pi\Phi\pi \ ,\
\pi\Phi^\ast = \pi\Phi^\ast\pi\ .
$$
In particular, this implies
\begin{align}
\tr (\pi[\Phi, \Phi^\ast] )&= \tr (\pi\Phi\Phi^\ast )+ \tr(\pi \Phi^\ast\Phi) \notag \\
&= \tr( \pi\Phi\Phi^\ast )- \tr (\Phi\pi\Phi^\ast )\notag \\
&= \tr (\pi\Phi\Phi^\ast\pi) - \tr (\Phi\pi\Phi^\ast\pi) \notag \\
&= \tr (\pi\Phi\Phi^\ast\pi) - \tr(\pi \Phi\pi\Phi^\ast\pi)\notag \\
&= \tr (\pi\Phi(I-\pi)\Phi^\ast \pi)= \tr (\pi\Phi(I-\pi)(I-\pi)\Phi^\ast \pi) \notag \\
&= \tr \left\{ (\pi\Phi(I-\pi))(\pi\Phi(I-\pi))^\ast\right\} \ ;\notag \\
\tr(\pi\sqrt{-1}\Lambda [\Phi, \Phi^\ast]) &=|\pi\Phi(I-\pi)|^2 \label{eqn:pi-phi}\ .
\end{align}
Plugging \eqref{eqn:hitchin2} into \eqref{eqn:degree}, and using \eqref{eqn:pi-phi}, we have
$$
\deg\mathcal S=\rank(\mathcal S)\mu(\mathcal E)-\frac{1}{2\pi}( \Vert \pi\Phi(I-\pi)\Vert^2 + \Vert \beta\Vert^2)\ .
$$
This proves $\mu(\mathcal S)\leq \mu(\mathcal E)$. Moreover, equality holds if and only if the two terms on the right hand side above vanish; i.e.\ the holomorphic structure and Higgs field split.
\end{proof}
The main result we prove in this section is the converse to Proposition \ref{prop:easy}.
\begin{theorem}[Hitchin \cite{Hitchin87a}, Simpson
\cite{Simpson88}] \label{thm:hitchin}
If $(\mathcal E,\Phi)$ is polystable, then it admits a metric
satisfying \eqref{eqn:hitchin2}.
\end{theorem}
\begin{remark} \label{rem:rank-one}
The result is straightforward in the case of line bundles $\mathcal L$. Indeed, in rank $1$ the term $[\Phi,\Phi^\ast]$ vanishes, so \eqref{eqn:hitchin2} amounts to finding a constant curvature metric on $L$. If $H$ is any metric, let $H_\varphi=e^\varphi H$ for a function $\varphi$. Then
$
F_{(\bar\partial_L, H_\varphi)}=F_{(\bar\partial_L, H)}+\bar\partial\partial \varphi
$, and the problem is solved if we can find $\varphi$ such that
$$
\Delta\varphi=2\sqrt{-1}\Lambda(F_{(\bar\partial_L, H)})- 2\deg(L)\ .
$$
By the Hodge theorem the only condition to finding a solution to this equation is that the integral of the right hand side vanish (cf.\ \cite[p.\ 84]{GriffithsHarris78}), which it does by \eqref{eqn:chern-weil}.
\end{remark}
In order to prove Theorem \ref{thm:hitchin} in higher rank, it will be important to construct approximate critical metrics.
Let $0\subset (\mathcal E_1,\Phi_1)\subset \cdots\subset
(\mathcal E_\ell, \Phi_\ell)=(\mathcal E, \Phi)$
be the Harder-Narasimhan filtration of the Higgs bundle
$(\mathcal E,\Phi)$. We let $\mathcal Q_i=\mathcal E_i/\mathcal E_{i-1}$ and $\mu_i=\mu(\mathcal Q_i)$. Then there is a smooth splitting $E=\bigoplus_i Q_i$, and given a hermitian metric $H$ we can make this splitting orthogonal. Hence, there is a well-defined endomorphism
\begin{equation} \label{eqn:mu-gr}
\mu_{({\rm Gr}(\mathcal E,\Phi), H)}=\left(\begin{matrix} \mu_1 && \\ & \ddots & \\ && \mu_\ell \end{matrix}\right)\ .
\end{equation}
where the blocks $\mu_i$ have dimensions $\rank Q_i$.
\begin{definition} \label{def:approximate-critical}
We say that a metric on $(\mathcal E,\Phi)$ is $\varepsilon$-approximate critical if
$$
\sup \left| f_{((\bar\partial_E,H),\Phi)}-\mu_{({\rm Gr}(\mathcal E,\Phi), H)}\right| < \varepsilon\ .
$$
\end{definition}
Note that the $\bar\partial$-operator for $\mathcal E$ may be written in an upper triangular form with respect to this splitting, and the strictly upper triangular piece is determined by the extension classes. By acting with a complex gauge transformation that is block diagonal, the extension classes may be made arbitrarily small. If moreover the bundles $\mathcal Q_i$ with their induced Higgs fields admit Hermitian-Yang-Mills-Higgs connections, then we can sum these up and obtain the following (for more details, see \cite{DaskalWentworth04}).
\begin{lemma} \label{lem:approximate-critical}
Let $(\mathcal E,\Phi)$ be an unstable Higgs bundle of rank $n$,
and suppose that Theorem \ref{thm:hitchin} has been proven
for Higgs bundles of rank less than $n$.
Then for any $\varepsilon>0$ there is an
$\varepsilon$-approximate critical metric on $(\mathcal E,\Phi)$.
\end{lemma}
\subsubsection{Preliminary estimates}
Recall the map \eqref{eqn:hitchin-map}.
A crucial point is the following a priori estimate.
\begin{proposition} \label{prop:higgs-bound}
Let $(\mathcal E, \Phi)$ be a Higgs bundle.
There are constants $C_1, C_2 >0$ depending only on the metrics on $X$ and $E$, and on
$\Vert h[\mathcal E,\Phi]\Vert$, such that
$$\sup |\Phi|^2\leq
C_1 +
C_2\, \sup\left| \sqrt{-1}\Lambda(F_A+[\Phi,\Phi^\ast])\right|\ .$$
\end{proposition}
We need the following
\begin{lemma}[{cf.\ \cite[p.\ 27]{Simpson92}}] \label{lem:simpson}
For a matrix $P$
there are constants $C_1, C_2>0$
depending only on the eigenvalues of $P$ such that
$$
|[P,P^\ast]|^2\geq C_1|P|^4-C_2(1+|P|^2)\ .
$$
\end{lemma}
\begin{proof}
Choose a unitary basis such that $P=S+N$, where $S$ is diagonal and $N$ is strictly upper triangular. By assumption, $|S|$ is bounded. It is easy to see that it then suffices to show there is $C>0$ such that for all strictly upper triangular $N$, $|[N,N^\ast]|\geq C|N|^2$. Suppose not. Then by scaling we can find a sequence $N_j$, $|N_j|=1$, and $[N_j, N_j^\ast]\to 0$. After passing to a subsequence, we may assume $N_j\to N$, with $[N,N^\ast]=0$, $|N|=1$. But this is a contradiction. Indeed, if $a_1,\ldots, a_n$ and $b_1, \ldots, b_n$ are the rows and columns of $N$, then reading off the diagonal of $NN^\ast=N^\ast N$ implies $|a_i|^2=|b_i|^2$ for $i=1,\ldots, n$. But $b_1=0$, which from this equality implies $a_1=0$. This in turn implies $b_2=0$, and hence $a_2=0$. Continuing in this way, we conclude $N=0$; contradiction.
\end{proof}
We will also need the following computation.
\begin{align}
[[P,P^\ast],P] &= (PP^\ast-P^\ast P)P-P(PP^\ast-P^\ast P)\notag \\
&=2PP^\ast P-P^\ast P^2 -P^2 P^\ast \notag\\
\langle [[P,P^\ast],P], P\rangle =\tr ([[P,P^\ast],P]P^\ast) &=
\tr((2PP^\ast P-P^\ast P^2 -P^2 P^\ast)P^\ast)\notag \\
&=2\tr(PP^\ast)^2-2\tr( P^2 (P^\ast)^2 ) \notag\\
\langle \ad([P,P^\ast])P, P\rangle&=|[P,P^\ast]|^2 \label{eqn:ad}\ .
\end{align}
\begin{proof}[Proof of Proposition \ref{prop:higgs-bound}]
Regard $\Phi$ as a holomorphic section of $\mathcal K\otimes\End \mathcal E$. We also make use of three easy facts. First, if $H$ is a hermitian metric on $E$ and $\widehat H$ is the induced metric on $\End E$, then $F_{(\End \mathcal E,\widehat H)}= \ad F_{(\mathcal E, H)}$, where the adjoint indicates that the curvature endomorphism acts by commutation.
Second, if $\widehat H, h$ are hermitian metrics on $\End E$ and $K$, respectively, then
\begin{equation} \label{eqn:end-curvature}
F_{(\mathcal K\otimes\End \mathcal E, h\otimes \widehat H)}= F_{(\End \mathcal E,\widehat H)}+ F_{(\mathcal K, h)}\cdot I\ .
\end{equation}
Third, if $s$ is a holomorphic section of a vector bundle with unitary connection $A$ and curvature $F_A$, then we have the following Weitzenb\"ock formula:
\begin{equation} \label{eqn:weitzenbock}
\Delta|s|^2= 2|d_A s|^2 -2\langle \sqrt{-1}\Lambda F_A s,s\rangle\ .
\end{equation}
Indeed (cf.\ \eqref{eqn:kahler}),
\begin{align*}
\Delta|s|^2&=-2\bar\partial^\ast\bar\partial|s|^2=
2\sqrt{-1}\Lambda\partial\bar\partial |s|^2=2\sqrt{-1}\Lambda\partial\langle s, d'_A s\rangle \\
&=2\sqrt{-1}\Lambda\langle d'_A s, d'_A s\rangle +2\sqrt{-1}\Lambda\langle s, d''_Ad'_A s\rangle \\
&=2|d'_A s|^2 +2\sqrt{-1}\Lambda\langle s, F_A s\rangle \\
&= 2|d_A s|^2 -2\langle \sqrt{-1}\Lambda F_A s,s\rangle \ .
\end{align*}
Now using eqs. \eqref{eqn:ad}, \eqref{eqn:end-curvature}, and \eqref{eqn:weitzenbock},
along with Lemma \ref{lem:simpson},
we have
\begin{align*}
\Delta |\Phi|^2&\geq - 2\langle \sqrt{-1}\Lambda F_{(\mathcal K\otimes\End \mathcal E, h\otimes\widehat H)}\Phi,\Phi\rangle \\
&\geq -2\langle \sqrt{-1}\Lambda F_{(\End \mathcal E, \widehat H)}\Phi,\Phi\rangle- C_3|\Phi|^2 \\
&=-2\langle \ad (\sqrt{-1}\Lambda F_{(\mathcal E, H)})\Phi,\Phi\rangle- C_3|\Phi|^2 \\
&=2\langle \ad (\sqrt{-1}\Lambda [\Phi,\Phi^\ast])\Phi,\Phi\rangle
-2\langle \ad (\sqrt{-1}\Lambda (F_{(\mathcal E, H)}+[\Phi,\Phi^\ast])\Phi,\Phi\rangle- C_3|\Phi|^2 \\
&\geq C_1|\Phi|^4-C_2(1+|\Phi|^2)-C_4\sup\left|\sqrt{-1}\Lambda (F_{(\mathcal E, H)}+[\Phi,\Phi^\ast]\right||\Phi|^2\ .
\end{align*}
Now at a maximum of $|\Phi|^2$ the left hand side is nonpositive. Since $C_1>0$, the proposition follows immediately.
\end{proof}
\begin{remark}
Notice that the sign in \eqref{eqn:hitchin} is decisive for this argument (cf.\ \cite{Hitchin90}).
\end{remark}
Finally, the existence proof will be based on Donaldson's elegant argument in \cite{Donaldson83}. This requires the introduction of the functional $J=J(A,\Phi)$, defined as follows.
For a hermitian endomorphism $\phi$ of $E$, let
$$
\nu(\phi)=\sum_{i=1}^n|\lambda_i| \quad ,\quad
N^2(\phi)=\int_X \nu^2(\phi)\, \frac{\omega}{2\pi}\ ,
$$
where the $\lambda_i$ are the (pointwise) eigenvalues of $\phi$. Then we define
\begin{equation} \label{eqn:J}
J(A,\Phi)=N(f_{(A,\Phi)}-\mu(E))\ .
\end{equation}
We next prove the following two results of Donaldson (see \cite[Lemmas 2 $\&$ 3]{Donaldson83}), adapted here to the case of Higgs bundles.
\begin{lemma} \label{lem:donaldson-upper-bound}
Let $(A,\Phi)$ be a Higgs bundle with underlying bundle $\mathcal E$.
Suppose it fits into an extension of Higgs bundles $0\to\mathcal M\to\mathcal E\to\mathcal N\to 0$, and that
$\mu(\mathcal N)\leq \mu(\mathcal E)\leq \mu(\mathcal M)$. Then
$$
(\rank \mathcal M)(\mu(\mathcal M)-\mu(\mathcal E))+(\rank\mathcal N )(\mu(\mathcal E)-\mu(\mathcal N))\leq J(A,\Phi)\ .
$$
\end{lemma}
\begin{proof}
With respect to the orthogonal splitting $E=M\oplus N$, and letting $F_E$, $F_M$, and $F_N$ denote the curvature and induced curvatures of the Chern connection for $(\mathcal E, H)$,
we have
$$
\sqrt{-1}\Lambda F_E=\left(
\begin{matrix}
\sqrt{-1}\Lambda F_M+b_M & -(d''_A)^\ast\beta \\
-((d''_A)^\ast\beta)^\ast &\sqrt{-1}\Lambda F_N+b_N
\end{matrix}
\right)\ ,
$$
where $\beta$ is the second fundamental form, and
$$
b_M=-\sqrt{-1}\Lambda(\beta\wedge\beta^\ast)\ ,\ b_N=-\sqrt{-1}\Lambda(\beta^\ast\wedge\beta)\ .
$$
Notice that $\tr b_M=-\tr b_N=|\beta|^2$. Similarly, if we write $\displaystyle \Phi=\left(\begin{matrix} \Phi_M &\varphi\\ 0 &\Phi_N\end{matrix}\right)$, then
$$
[\Phi,\Phi^\ast]=\left(\begin{matrix} [\Phi_M,\Phi_M^\ast]+\varphi\wedge\varphi^\ast & \varphi\wedge\Phi_N^\ast+\Phi_M^\ast\wedge\varphi \\ \Phi_N\wedge\varphi^\ast+\varphi^\ast\wedge\Phi_M
& [\Phi_N,\Phi_N^\ast]+\varphi^\ast\wedge\varphi
\end{matrix}
\right)\ .
$$
It follows that
$$
f_{(A,\Phi)}=\left(\begin{matrix} f_M+b_M+\sqrt{-1}\Lambda\varphi\wedge\varphi^\ast & \cdots \\
\dots & f_N+b_N+\sqrt{-1}\Lambda\varphi^\ast\wedge\varphi
\end{matrix}
\right)\ .
$$
Hence, (cf.\ \cite[p.\ 271]{Donaldson83}),
\begin{align*}
\nu(f_{(A,\Phi)}-\mu(E)) &\geq \left| \tr(\sqrt{-1}\Lambda F_M)-(\rank \mathcal M)\mu(\mathcal E)+|\beta|^2+|\varphi|^2 \right| \\
&\qquad +\left| \tr(\sqrt{-1}\Lambda F_N)-(\rank \mathcal N)\mu(\mathcal E)-|\beta|^2-|\varphi|^2\right| \ ,
\end{align*}
and therefore
\begin{align*}
J(A,\Phi)&\geq \int_X\nu(f_{(A,\Phi)}-\mu(\mathcal E)) \frac{\omega}{2\pi} \\
&\geq \left|\int_X\left( \tr(\sqrt{-1}\Lambda F_M)-(\rank \mathcal M)\mu(\mathcal E)+|\beta|^2+|\varphi|^2\right) \frac{\omega}{2\pi} \right| \\
&\qquad +\left| \int_X \left(\tr(\sqrt{-1}\Lambda F_N)-(\rank \mathcal N)\mu(\mathcal E)-|\beta|^2-|\varphi|^2\right)\frac{\omega}{2\pi}\right| \\
&\geq (\rank \mathcal M)(\mu(\mathcal M)-\mu(\mathcal E)) + (\rank \mathcal N)(\mu(\mathcal E)-\mu(\mathcal N))\ .
\end{align*}
\end{proof}
\begin{lemma} \label{lem:donaldson-lower-bound}
Let $(A_0,\Phi_0)$ be a stable Higgs bundle of rank $n$ that
fits into an extension of Higgs bundles
$0\to\mathcal S\to\mathcal E\to\mathcal Q\to 0$.
Assume Theorem \ref{thm:hitchin} has been proven for
Higgs bundles of rank less than $n$.
Then we can choose a point $(A,\Phi)$ in the complex gauge orbit of $(A_0,\Phi_0)$ such that
$$
J(A,\Phi)< (\rank \mathcal S)(\mu(\mathcal E)-\mu(\mathcal S))+(\rank\mathcal Q)(\mu(\mathcal Q)-\mu(\mathcal E))\ .
$$
\end{lemma}
\begin{proof}
First, consider the Harder-Narasimhan filtrations of $(\mathcal S,\Phi_S)$ and $(\mathcal Q, \Phi_Q)$. By
applying Lemma \ref{lem:approximate-critical} we may
assume for any $\varepsilon>0$ that there is a metric on $S$ such that
$$
\sup \left| f_{((\bar\partial_S,H_S),\Phi_S)}-\mu_{({\rm Gr}(\mathcal S,\Phi_S), H_S)}\right| < \varepsilon\ ,
$$
and similarly for $Q$.
We endow $E=S\oplus Q$ with the sum of these two metrics.
This is equivalent to a pair $(A,\Phi)$ in the orbit of
$(A_0,\Phi_0)$.
Next, since $(A_0, \Phi_0)$ (and hence also $(A,\Phi)$)
is simple we may further assume that
$$-\bar\partial_{A_0}^\ast\beta +\sqrt{-1}\Lambda \left( \varphi\wedge
\Phi_Q^\ast+\Phi_S^\ast\wedge\varphi\right)=0
$$
(see \eqref{eqn:h1}).
This is accomplished via a complex gauge transformation of the
form $\displaystyle g=\left(\begin{matrix} 1&\phi\\0&1\end{matrix}\right)$. In particular, the $\bar\partial$-operators on $S$ and
$Q$ remain unchanged, and so the approximate critical
structure still holds.
With this understood, we perform a further gauge
transformation so that $(A,\Phi)$ coincides with
$(A_0,\Phi_0)$ but with $\beta$ and $\varphi$ scaled by $t$.
Then $f_{(A,\Phi)}-\mu(E)$ is block diagonal with entries
\begin{align}
\begin{split} \label{eqn:donaldson}
& f_S-\mu_{({\rm Gr}(\mathcal S,\Phi_S), H_S)}
+\mu_{({\rm Gr}(\mathcal S,\Phi_S), H_S)}-\mu(E)
+t^2\left(b_S+\sqrt{-1}\Lambda\varphi\wedge\varphi^\ast \right) \ ;\\
& f_Q-\mu_{({\rm Gr}(\mathcal Q,\Phi_Q), H_Q)}
+\mu_{({\rm Gr}(\mathcal Q,\Phi_Q), H_Q)}-\mu(E)
+t^2\left(b_Q+\sqrt{-1}\Lambda\varphi^\ast\wedge\varphi\right)\ .
\end{split}
\end{align}
Since $(\mathcal E,\Phi)$ is stable, $\mu(\mathcal E)$ is strictly bigger than the maximal slope of a subsheaf of $\mathcal S$, and strictly smaller than the minimal slope of a quotient of $\mathcal Q$.
This says that for $t$ and $\varepsilon$
chosen sufficiently small, the first line in \eqref{eqn:donaldson} is negative definite and the second is positive definite. It follows that
$$
\nu(f_{(A,\Phi)}-\mu(E))
\leq (\rank\mathcal S)(\mu(E) -\mu(S))+(\rank\mathcal Q)(\mu(\mathcal Q)-\mu(\mathcal E))
-2t^2\left( |\beta|^2+|\varphi|^2\right)+O(\varepsilon)\ .
$$
Without loss of generality, assume that
$\Vert\beta\Vert^2+\Vert\varphi\Vert^2=1$. By the argument in
\cite{Donaldson83} we may also assume $|\beta|$, $|\varphi|$ are
bounded uniformly in $\varepsilon$.
The result now follows by fixing $t$ and choosing
$\varepsilon$ sufficiently small.
\end{proof}
\subsubsection{The existence theorem}
We will prove the following in the next section where the Yang-Mills-Higgs flow will be introduced.
\begin{lemma} \label{lem:minimizing}
In any complex gauge orbit
there exists a sequence $(A_i, \Phi_i)$ satisfying the following conditions:
\begin{enumerate}
\item $(A_i, \Phi_i)$ is minimizing for $J$ ;
\item if
$f_{(A_j,\Phi_j)}=
\sqrt{-1}\Lambda(F_{A_j}+[\Phi_j,\Phi_j^\ast])$, then $\sup | f_{(A_j,\Phi_j)}|$ is bounded uniformly in $j$;
\item $\Vert d_{A_j} f_{(A_j,\Phi_j)}\Vert_{L^2}\to 0$ and $\Vert [f_{(A_j,\Phi_j)}, \Phi_i]\Vert_{L^2}\to 0$.
\end{enumerate}
\end{lemma}
Next, we will need one of the most fundamental results of gauge theory, stated here for the case of Riemann surfaces.
\begin{proposition}[Uhlenbeck {\cite{Uhlenbeck82}}] \label{prop:uhlenbeck}
Fix $p\geq 2$.
Let $\{A_j\}$ be a sequence of $L^p_1$-connections with
$\Vert F_{A_j}\Vert_{L^p}$ uniformly bounded.
Then there exists a sequence of unitary gauge
transformations $g_j\in L^p_2$ and a
smooth unitary connection $A_\infty$ such that
(after passing to a subsequence)
$g_j(A_j)\to A_\infty$ weakly in $L^p_1$ and strongly in $L^p$.
\end{proposition}
Assuming these results, we now prove the existence theorem.
\begin{proof}[Proof of Theorem \ref{thm:hitchin}]
It clearly suffices to assume $(\mathcal E,\Phi)$ is stable. Furthermore,
by Remark \ref{rem:rank-one}, we may proceed by induction. Assume that the result has been proven for all bundles of rank $<n=\rank E$.
\noindent
{\bf Step 1.} \emph{The limiting bundle $(\mathcal E_\infty, \Phi_\infty)$}.
Choose a minimizing sequence for $J$ as in Lemma \ref{lem:minimizing}.
Since the sequence lies in a single complex gauge orbit, the
image of the Hitchin map $h[A_i, \Phi_i]$ is unchanged.
Hence, by Proposition \ref{prop:higgs-bound} the $\Phi_i$ are uniformly
bounded. By Lemma \ref{lem:minimizing} (ii), this in turn implies that $\Vert F_{A_j}\Vert_{L^p}$
is bounded for any $p$. We therefore may assume by
Proposition \ref{prop:uhlenbeck} that there is a smooth
connection $A_\infty$ so that if we write
$\bar\partial_{A_j}=\bar\partial_{A_\infty}+a_j$, then $a_j\to 0$ weakly in
$L^p_1$.
By the Sobolev embedding theorem, we may assume in particular that
the $a_j\to 0$ in some $C^\alpha$. Notice that it follows that $F_{A_j}\to F_{A_\infty}$ weakly in $L^p$.
From the
holomorphicity condition
$$
0=\bar\partial_{A_j}\Phi_j=\bar\partial_{A_\infty}\Phi_j+[a_j,\Phi_j]\ .
$$
Elliptic regularity for $\bar\partial_{A_\infty}$ implies a bound
$\Vert \Phi_j\Vert_{L^2_1}\leq C\Vert \Phi_j\Vert_{L^2}$, say. Differentiating the previous equation gives
\begin{equation} \label{eqn:phi1}
\bar\partial_{A_\infty}^\ast\bar\partial_{A_\infty}\Phi_j+\bar\partial_{A_\infty}^\ast[a_j,\Phi_j]=0
\end{equation}
By the Cauchy-Schwarz inequality and the previous estimate we have
\begin{equation} \label{eqn:phi2}
\Vert \bar\partial_{A_\infty}^\ast[a_j,\Phi_j]\Vert_{L^2}\leq C_1\Vert a_j\Vert_{L^4_1}\Vert \Phi_j\Vert_{L^4}
+C_2\Vert \Phi_j\Vert_{L^2}\ .
\end{equation}
Now we may assume $\{a_j\}$ is bounded in $L^4_1$, and using elliptic regularity for the Laplacian $\bar\partial_{A_\infty}^\ast\bar\partial_{A_\infty}$ along with
the inclusions $L^2_1\hookrightarrow L^4$, $L^2_2\hookrightarrow C^\alpha$,
by \eqref{eqn:phi1} and \eqref{eqn:phi2} we have an estimate
$\Vert \Phi_j\Vert_{C^\alpha}\leq C\Vert \Phi_j\Vert_{L^2}$.
Since the $\Phi_j$ are uniformly bounded their $L^2$ norms are bounded, so
we may assume that $\Phi_j$ converges
in $C^\alpha$ to some $\Phi_\infty$.
Moreover,
by holomorphicity of the $\Phi_j$ we can write
$$
\bar\partial_{A_\infty}\Phi_\infty=\bar\partial_{A_\infty}(\Phi_\infty-\Phi_j)-[a_j, \Phi_j]\ ,
$$
and since $[a_j, \Phi_j]\to 0$ in $C^\alpha$ we see that
$\bar\partial_{A_\infty} \Phi_\infty=0$ weakly.
Hence, by Weyl's lemma $\Phi_\infty$ is actually holomorphic, and
thus $(\mathcal E_\infty, \Phi_\infty)$ is a Higgs bundle.
\noindent
{\bf Step 2.} \emph{Construction of a nonzero map $\mathcal E\to \mathcal E_\infty$}. Let $g_j$ be complex gauge transformations such that $g_j(A)=A_j$. Holomorphicity of $g_j$ implies $\bar\partial_{A_\infty}g_j+[a_j,g_j]=0$. By the exact same argument as in Step 1,
we have an estimate
$\Vert g_j\Vert_{C^\alpha}\leq C\Vert g_j\Vert_{L^2}$. Now rescale $g_j$ so that $\Vert g_j\Vert_{L^2}=1$. The $C^\alpha$-estimate above still holds for the rescaled map, so by compactness we may assume there is a continuous $g_\infty: \mathcal E\to \mathcal E_\infty$ such that $g_j\to g_\infty$ in $C^\alpha$. Because of the normalization, we know that $g_\infty\not\equiv 0$.
Moreover, it follows as in Step 1 that $g_\infty$ is holomorphic.
Finally, by the $C^\alpha$ convergence of $g_j$ and $\Phi_j$ and the fact that $g_j\Phi=\Phi_j g_j$, we have
$g_\infty\Phi=\Phi_\infty g_\infty$.
\noindent
{\bf Step 3.} \emph{The map $g_\infty$ is an isomorphism}. Suppose the contrary. Let $\mathcal S=\ker g_\infty$ and $\mathcal Q=\mathcal E/\mathcal S$. Then $\mathcal Q$ is a subsheaf of $\mathcal E_\infty$. Let $\mathcal M$ denote its saturation and $\mathcal N=\mathcal E_\infty/\mathcal M$.
Since $\Phi_\infty g_\infty=g_\infty\Phi$, the subbundle $\mathcal S$ is $\Phi$-invariant. Similarly, $\mathcal M$ is $\Phi_\infty$-invariant. Also,
from the discussion in Section \ref{sec:stability}, we have
\begin{align}
\begin{split} \label{eqn:slopes}
\mu(\mathcal Q)-\mu(\mathcal E) &\leq \mu(\mathcal M)-\mu(\mathcal E) \ ;\\
\mu(\mathcal E)-\mu(\mathcal S) &\leq \mu(\mathcal E)-\mu(\mathcal N)\ .
\end{split}
\end{align}
Then we have the following extensions of Higgs bundles (see \cite{Donaldson83}):
\begin{equation}
\begin{split} \label{eqn:donaldson-diagram}
\xymatrix{
0\ar[r] & \mathcal S \ar[r] & \mathcal E \ar[r] \ar[d]^{g_\infty} & \mathcal Q \ar[r]\ar[d] & 0 \\
0 & \mathcal N \ar[l] & \mathcal E_\infty \ar[l] & \mathcal M \ar[l]& 0 \ar[l]
}
\end{split}
\end{equation}
Applying Lemma \ref{lem:donaldson-upper-bound} to the bottom row of \eqref{eqn:donaldson-diagram} and Lemma \ref{lem:donaldson-lower-bound} to the top row implies
\begin{align*}
(\rank\mathcal M)( \mu(\mathcal M)-\mu(\mathcal E))+&(\rank\mathcal N)( \mu(\mathcal E)-\mu(\mathcal N)
) \leq J(A_\infty,\Phi_\infty) \\
&\leq\lim_{j\to\infty} J(A_j,\Phi_j)= \inf J(A,\Phi) \\
&< (\rank\mathcal S)( \mu(\mathcal E)-\mu(\mathcal S))+(\rank\mathcal Q)( \mu(\mathcal Q)-\mu(\mathcal E))\ ,
\end{align*}
where for the second line we
can use either the the lower semicontinuity of $J$ (see \cite{Donaldson83}) or the argument in \cite[Corollary 2.12 and Lemma 2.17]{DaskalWentworth04}.
But since $\rank\mathcal M=\rank\mathcal Q$ and $\rank\mathcal S=\rank\mathcal N$, this contradicts \eqref{eqn:slopes}.
\noindent
{\bf Step 4.} \emph{Solution to Hitchin's equations}.
Finally, I claim that the Higgs bundle $(A_\infty,\Phi_\infty)$
is a solution to \eqref{eqn:hitchin}. Indeed, by the remark following eq.\
\eqref{eqn:critical} this follows if we can show
$d_{A_\infty}f_{(A_\infty,\Phi_\infty)}=0$ and
$[f_{(A_\infty,\Phi_\infty)},\Phi_\infty]=0$. The second fact holds, since $[f_{(A_j,\Phi_j)},\Phi_j]\to 0$ in $L^2$ by assumption, and $f_{(A_j,\Phi_j)}$ (resp.\ $\Phi_j$) converges weakly in $L^p$ (resp.\ $C^\alpha$).
For the first claim, let $B$ be a test form. Then
\begin{align*}
\langle d_{A_\infty}f_{(A_\infty,\Phi_\infty)}, B\rangle_{L^2}&=
\langle f_{(A_\infty,\Phi_\infty)}, d_{A_\infty}^\ast B\rangle_{L^2} \\
&=
\lim_{j\to\infty}
\langle f_{(A_j,\Phi_j)}, d_{A_j}^\ast B\rangle_{L^2}
+
\lim_{j\to\infty}
\int_X\tr\left\{ f_{(A_j,\Phi_j)}[a_j, B^\ast ] \right\}\\
&=
\lim_{j\to\infty}
\langle d_{A_j}f_{(A_j,\Phi_j)}, B\rangle_{L^2}
+
\lim_{j\to\infty}
\int_X\tr\left\{ f_{(A_j,\Phi_j)}[a_j, B^\ast ] \right\}\ .
\end{align*}
The first term vanishes since $\Vert d_{A_j}f_{(A_j,\Phi_j)}\Vert_{L^2}\to 0$, and the second term vanishes since $f_j$ is bounded and $a_j\to 0$ in $C^\alpha$. Since $B$ is arbitrary, $d_{A_\infty}f_{(A_\infty,\Phi_\infty)}=0$, and
this completes the proof.
\end{proof}
The same type of argument leads to the
\begin{proof}[Proof of Theorem \ref{thm:hitchin-proper}]
Let $[A_j,\Phi_j]$ be a sequence of polystable Higgs bundles with $h[A_j,\Phi_j]$ bounded. By Theorem \ref{thm:hitchin} we may assume $(A_j,\Phi_j)$ satisfies \eqref{eqn:hitchin}. Since
$h[A_j,\Phi_j]$ is bounded, the pointwise spectrum of $\Phi_j$ is uniformly bounded. Therefore, Proposition \ref{prop:higgs-bound} provides uniform sup bounds on $|\Phi_j|$. Again using \eqref{eqn:hitchin} we have uniform bounds on $|F_{A_j}|$. Now Uhlenbeck compactness can be used to extract a convergent subsequence which also satisfies \eqref{eqn:hitchin} as in the proof of the existence theorem above.
\end{proof}
\subsubsection{The Yang-Mills-Higgs flow}
We define the {\bf Yang-Mills-Higgs flow } for a pair $(A,\Phi)$ by the equations
\begin{align}
\begin{split}\label{eqn:ymh-flow}
\frac{\partial A}{\partial t}&= -d_A^\ast(F_A+[\Phi, \Phi^\ast]) \ ;\\
\frac{\partial \Phi}{\partial t} &= [\Phi, \sqrt{-1}\Lambda(F_A+[\Phi, \Phi^\ast])]\ .
\end{split}
\end{align}
In the above, we only consider initial conditions where $\Phi$ is $d''_A$-holomorphic. Notice then that this holomorphicity condition is preserved along a solution to \eqref{eqn:ymh-flow}. Indeed, as in Donaldson \cite{Donaldson85}, the flow is tangent to the complex gauge orbit and exists for all $0\leq t<+\infty$.
The flow equations may be regarded as the $L^2$-gradient flow of the $\YMH$ functional.
They generalize the Yang-Mills flow equations. For more on this we refer to \cite{Hong01, Wilkin08} and the references therein. Here we limit ourselves to a discussion of a few key properties.
In particular, we justify the assumptions in the previous section.
As in \eqref{eqn:hitchin2}, set $f_{(A,\Phi)}= \sqrt{-1}\Lambda(F_A+[\Phi, \Phi^\ast])$.
\begin{lemma} \label{lem:ymh-flow1}
For all $t\geq 0$,
$$
\frac{d}{dt}\YMH(A,\Phi)=-2\Vert d_A f_{(A,\Phi)}\Vert_{L^2}^2-4\Vert[\Phi,f_{(A,\Phi)}\Vert^2_{L^2}\ .
$$
\end{lemma}
\begin{proof}
We have
$$
\frac{d}{dt}\YMH(A,\Phi)=2\int_X\tr(f_{(A,\Phi)}\dot f_{(A,\Phi)}) \omega\ .
$$
Now using dots to denote time derivatives,
\begin{align*}
\dot f_{(A,\Phi)}&=
\sqrt{-1}\Lambda\left(d_A\dot A+[\dot\Phi,\Phi^\ast]+[\Phi,\dot\Phi^\ast] \right)\\
&=
\sqrt{-1}\Lambda\left( -d_Ad_A^\ast(F_A+[\Phi,\Phi^\ast])+[[\Phi,f],\Phi^\ast]+[\Phi, [\Phi,f]^\ast]\right) \\
&=
-d_A^\ast d_A f_{(A,\Phi)}+
\sqrt{-1}\Lambda\left(
[\Phi, f_{(A,\Phi)}]\Phi^\ast+\Phi^\ast[\Phi, f_{(A,\Phi)}]+\Phi[\Phi, f_{(A,\Phi)}]^\ast+[\Phi, f_{(A,\Phi)}]^\ast\Phi\right)\ .
\end{align*}
Taking traces we get
\begin{equation} \label{eqn:fdot}
\tr(f_{(A,\Phi)}\dot f_{(A,\Phi)})
=-\tr(f_{(A,\Phi)} d_A^\ast d_A f_{(A,\Phi)})
-2\sqrt{-1}\Lambda\tr\left([\Phi, f_{(A,\Phi)}][\Phi, f_{(A,\Phi)}]^\ast\right)\ ,
\end{equation}
and the result follows by integration by parts.
\end{proof}
As a consequence of Lemma \ref{lem:ymh-flow1}, $\YMH$ decreases along the flow. Moreover, we have the following inequality
$$
\int_0^\infty dt\left\{2\Vert d_A f_{(A,\Phi)}\Vert_{L^2}^2+4\Vert[\Phi,f_{(A,\Phi)}\Vert^2_{L^2} \right\}
\leq \YMH(A_0,\Phi_0)\ .
$$
It follows that if $(A_j,\Phi_j)$ is a sequence with $\YMH(A_j,\Phi_j)$ uniformly bounded, then we may replace it with another sequence $(\widetilde A_j,\widetilde \Phi_j)$ with
$\YMH(\widetilde A_j,\widetilde \Phi_j)$
also uniformly bounded but such that
$d_{A_j}f_{(\widetilde A_j,\widetilde \Phi_j)}$
and $[\Phi_j,f_{(\widetilde A_j,\widetilde \Phi_j)}]$ converge to $0$ in $L^2$.
Now let's compute
\begin{align*}
\Delta \left|f_{( A,\Phi)}\right|^2&=-d^\ast d \left|f_{( A,\Phi)}\right|^2 =\ast d\ast d\tr f_{( A,\Phi)}^2 \\
&=2\ast d\ast \tr (f_{( A,\Phi)} d_A f_{( A,\Phi)}) \\
&=2\ast \tr( df_{( A,\Phi)} \wedge \ast d_A f_{( A,\Phi)})
-2\tr(f_{( A,\Phi)} d_A^\ast d_A f_{( A,\Phi)}) \\
&=2\left| df_{( A,\Phi)}\right|^2+4\left| [\Phi,f_{( A,\Phi)}]\right|^2+\frac{\partial}{\partial t}\left|f_{( A,\Phi)}\right|^2\ ,
\end{align*}
from \eqref{eqn:fdot}.
We have shown
\begin{lemma}
For all $t\geq 0$,
$$
\frac{\partial}{\partial t}\left|f_{( A,\Phi)}\right|^2
-\Delta \left|f_{( A, \Phi)}\right|^2
=
-2\left| d_A f_{( A, \Phi)}\right|^2-4\left| [\Phi, f_{( A, \Phi)}]\right|^2\ .
$$
\end{lemma}
In particular, $ \left|f_{( A,\Phi)}\right|$ is a subsolution of the heat equation, and so
$\sup \left|f_{( A,\Phi)}\right|$ is nonincreasing. In fact, one can use
an explicit argument with the heat kernel
to show that for $t\geq 1$, say, the
$\sup \left|f_{( A_t,\Phi_t)}\right|\leq C\YMH(A_0,\Phi_0)$
for a fixed constant $C$.
In particular,
if $(A_j,\Phi_j)$ is a sequence with $\YMH(A_j,\Phi_j)$ uniformly bounded, then we may replace it with another sequence $(\widetilde A_j,\widetilde \Phi_j)$ with
$f_{(\widetilde A_j,\widetilde \Phi_j)}$ uniformly bounded.
\begin{proof}[Proof of Lemma \ref{lem:minimizing}]
Choose $(A_j,\Phi_j)$ a minimizing sequence for $J$ in the complex gauge orbit of $(A,\Phi)$.
Note that $\YMH(A_j,\Phi_j)$ is then uniformly bounded. In addition,
by an argument similar to the one above (see \cite{DaskalWentworth04}), $J$ is also decreasing along the $\YMH$-flow.
Hence, replacing each $(A_j,\Phi_j)$ with a point along the $\YMH$-flow with initial condition $(A_j,\Phi_j)$ also gives a $J$-minimizing sequence. On the other hand, by the discussion in this section, we can choose points along the flow where items (ii) and (iii) are also satisfied. This completes the proof.
\end{proof}
Let $\mathcal B_E^{min}$ be the set of all Higgs bundles satisfying the Hitchin equations \eqref{eqn:hitchin2}.
The $\YMH$-flow sets up an infinite dimensional, singular Morse theory problem where $\mathcal B_E^{min}$ is the minimum of the functional, and Higgs bundles not in $\mathcal B_E^{min}$ but
satisfying \eqref{eqn:critical} play the role of higher critical points.
This Morse theory picture can actually be shown to be more than just an analogy. In particular, we have the following
\begin{theorem}[Wilkin {\cite{Wilkin08}}] \label{thm:wilkin}
The $\YMH$-flow gives a $\mathcal G_E$-equivariant deformation retraction
of $\mathcal B_E^{ss}$ onto $\mathcal B_E^{min}$.
\end{theorem}
\section{The Betti Moduli Space} \label{sec:betti}
\subsection{Representation varieties}
\subsubsection{Definition}
Fix a base point $p\in X$ and
set $\pi=\pi_1(X,p)$.
Let $\Hom(\pi,\mathsf{SL}_n(\mathbb C))$ denote the set of homomorphisms from $\pi$ to $\mathsf{SL}_n(\mathbb C)$. This has the structure of an affine algebraic variety.
Let
\begin{equation*}
\mathfrak M_B^{(n)} =\Hom(\pi,\mathsf{SL}_n(\mathbb C))\bigr/ \negthinspace\negthinspace \bigr/ \mathsf{SL}_n(\mathbb C)\ ,
\end{equation*}
denote the representation variety,
where the double slash indicates the invariant theoretic quotient by overall conjugation of $\mathsf{SL}_n(\mathbb C)$. Then $\mathfrak M_B^{(n)}$ is an irreducible affine variety of complex dimension $(n^2-1)(2g-2)$.
There is a surjective algebraic quotient map $\Hom(\pi,\mathsf{SL}_n(\mathbb C)) \to \mathfrak M_B^{(n)}$, and this is a geometric quotient on the open set of irreducible (or simple) representations. Points of $\mathfrak M_B^{(n)}$ are in 1-1 correspondence with conjugacy classes of semisimple (or reductive) representations, and every $\mathsf{SL}_n(\mathbb C)$ orbit in $\Hom(\pi,\mathsf{SL}_n(\mathbb C))$ contains a semisimple representation in its closure (for these results, see \cite{LubotzkyMagid85}). Following Simpson \cite{Simpson94a, Simpson94b} I will refer to $\mathfrak M_B^{(n)}$ as the {\bf Betti moduli space} of rank $n$.
Let $E\to X$ be a trivial rank $n$ complex
vector bundle. A flat connection $\nabla$ on $E$ gives rise
to a representation of $\pi$ as follows. Recall that we have
fixed a base point $p\in X$. We also fix a frame
$\{{\bf e}_i\}$ of $E_p$. For each loop $\gamma$ based at $p$, parallel
translation of the frame $\{{\bf e}_i\}$ defines an element of
$\mathsf{GL}_n(\mathbb C)$. Since the connection is flat this is independent of
the choice of path in the homotopy class. In this way we have
defined an element $\hol(\nabla)\in \Hom(\pi, \mathsf{GL}_n(\mathbb C))$. If $\nabla$
induces the trivial connection on $\det E$, the holonomy lies
in $\mathsf{SL}_n(\mathbb C)$, and we will assume this from now on.
Conversely, given a representation $\rho: \pi_1(X,p)\to
\mathsf{SL}_n(\mathbb C)$, we obtain a holomorphic bundle $\mathcal V_\rho$
with a flat connection $\nabla$ by the quotient
$
\mathcal V_\rho=\widetilde X\times \mathbb C^n/\pi
$,
where $\widetilde X$ is the universal cover of $X$, and the quotient identifies $(x,v)\sim (x\gamma,
v\rho(\gamma))$.
Let $\mathcal C_E$ denote the space of connections on $E$, and $\mathcal C^{flat}_E\subset\mathcal C_E$ the flat connections.
Let $\mathcal G_E^\mathbb C(p)$ denote the space of complex gauge
transformations that are the identity at $p$, acting on
$\mathcal C_E$ by conjugation ({\bf warning:} this is a different
action of $\mathcal G^\mathbb C_E$ from the one on the space of \emph{unitary} connections in Section \ref{sec:gauge}).
\begin{proposition} \label{prop:monodromy}
The holonomy map gives an $\mathsf{SL}_n(\mathbb C)$-equivariant homeomorphism
$$
\hol:
\mathcal C_E^{flat}/\mathcal G_E^\mathbb C(p) \buildrel \sim\over\longrightarrow \Hom(\pi,\mathsf{SL}_n(\mathbb C))\ .
$$
In particular, $\mathcal C_E^{flat}\bigr/ \negthinspace\negthinspace \bigr/\mathcal G_E^\mathbb C\simeq \mathfrak M_B^{(n)}$.
\end{proposition}
\subsection{Local systems and holomorphic connections}
\subsubsection{Definitions}
\begin{definition}
A {\bf complex $n$-dimensional local system} on $X$ is a sheaf of abelian groups that is locally isomorphic to the constant sheaf $\underline\mathbb C^n$.
\end{definition}
\noindent
Here $\underline\mathbb C$ denotes the locally constant sheaf modeled on $\mathbb C$. Clearly a local system ${\bf V}$ is a sheaf of modules over $\underline\mathbb C$.
\begin{definition}
Let $\mathcal V\to X$ be a holomorphic bundle. A {\bf holomorphic connection} on $\mathcal V$ is a $\mathbb C$-linear operator $\nabla: \mathcal V\to \mathcal K \otimes\mathcal V$ satisfying the Leibniz rule
\begin{equation} \label{eqn:leibniz}
\nabla(fs)=df\otimes s + f\nabla s\ ,
\end{equation}
for local sections $f\in \mathcal O $, $s\in \mathcal V$.
\end{definition}
For a local system ${\bf V}$
let $\mathcal V$ be the holomorphic bundle $\mathcal V=\mathcal O\otimes_{\underline\mathbb C}{\bf V}$.
Then $\mathcal V$ inherits a holomorphic connection as follows:
choose a local parallel frame $\{{\bf v}_i\}$ for ${\bf V}$.
Any local section of $\mathcal V$ may be written uniquely as $s=\sum_{i=1}^n f_i\otimes{\bf v}_i$, with $f_i\in \mathcal O$. Then \emph{define}
$
\nabla s=\sum_{i=1}^n df_i\otimes {\bf v}_i
$. Since the transition functions for ${\bf V}$ are constant this is well-defined independent of the choice of frame, and $\nabla$ also immediately satisfies the Leibniz rule.
Conversely, a holomorphic connection defines a \emph{flat} connection on the underlying complex vector bundle, since
in a local holomorphic frame the curvature $F_\nabla$ is necessarily of type $(2,0)$, and
on a Riemann surface there are no $(2,0)$-forms. In particular, the $\underline\mathbb C$-subsheaf ${\bf V}\subset \mathcal V$ of locally parallel sections $\nabla s=0$ defines a local system. This gives a categorical equivalence between local systems and holomorphic bundles with a holomorphic connection (see
\cite[Th\'eor\`eme 2.17]{Deligne70}).
A local system has a {\bf monodromy
representation} $\rho:\pi\to \mathsf{GL}_n(\mathbb C)$, obtained by developing local parallel frames.
Conversely, given $\rho$ we construct a local system as in the previous section.
We will sometimes denote these ${\bf V}_\rho$ and $\mathcal V_\rho$.
For simplicity, in these notes I will almost
always assume the monodromy lies in $\mathsf{SL}_n(\mathbb C)$, or in other words, $\det\mathcal V_\rho\simeq\mathcal O$ and the induced connection on $\det\mathcal V_\rho$ is trivial.
Not every holomorphic bundle $\mathcal V$ admits a holomorphic connection. In particular, such a connection is flat, and so by \eqref{eqn:chern-weil} a necessary condition is that $\deg\mathcal V=0$.
In fact, one can say more about the Harder-Narasimhan type of a bundle with a holomorphic connection.
\begin{proposition}[cf.\ \cite{EsnaultViehweg99, Bolibrukh02}] \label{prop:bound}
Suppose $\mathcal V$ is an unstable bundle with
an irreducible holomorphic connection, and let $\mu_1>\mu_2>\cdots>\mu_\ell$ be the Harder-Narasimhan type. Then for each $i=1,\ldots, \ell-1$, $\mu_i-\mu_{i+1}\leq 2g-2$.
\end{proposition}
\begin{proof}
Let $0\subset \mathcal V_1\subset\cdots\subset \mathcal V_\ell=\mathcal V$ be the Harder-Narasimhan filtration of $\mathcal V$. Then since the connection is irreducible the $\mathcal O$-linear map
$\mathcal V_i\stackrel{\nabla}{\xrightarrow{\hspace*{.5cm}}} \mathcal V/\mathcal V_i\otimes \mathcal K $
is nonzero for each $i=1,\ldots, \ell-1$.
Let $j\leq i$ be the smallest integer such that
$\mathcal V_j\to \mathcal V/\mathcal V_i\otimes \mathcal K $
is nonzero. Then it follows from the sequence$$
0\longrightarrow \mathcal V_{j-1}\longrightarrow \mathcal V_j \longrightarrow \mathcal Q_j\longrightarrow 0
$$
that there is a nonzero map $\mathcal Q_j\to \mathcal V/\mathcal V_i\otimes \mathcal K $.
With this fixed $j$, let $k\geq i$ be the largest integer such that $\mathcal Q_j\to \mathcal V/\mathcal V_k\otimes \mathcal K $ is nonzero.
It follows from
$$
0\longrightarrow \mathcal Q_{k+1}\longrightarrow \mathcal V/\mathcal V_k\longrightarrow \mathcal V/\mathcal V_{k+1}\longrightarrow 0
$$
that $\mathcal Q_j\to \mathcal Q_{k+1}\otimes \mathcal K $ is nonzero.
Since the $\mathcal Q_i$ are all semistable, we have by Lemma \ref{lem:semistable} that
$$
\mu_j=\mu(\mathcal Q_j)\leq \mu(\mathcal Q_{k+1}\otimes\mathcal K )=\mu_{k+1}+2g-2\ ,
$$
and the result follows, since $\mu_i-\mu_{i+1}\leq \mu_j-\mu_{k+1}$.
\end{proof}
\subsubsection{The Weil-Atiyah theorem} The goal of this section is to prove the following
\begin{theorem}[Weil {\cite{Weil38}}, Atiyah {\cite{Atiyah57}}] \label{thm:weil}
A holomorphic bundle $\mathcal V \to X$ admits a holomorphic connection if and only if each indecomposable factor of $\mathcal V$ has degree zero.
\end{theorem}
The proof I give here follows Atiyah.
The following construction will be useful (see {\cite[p.\ 193]{Atiyah57}}).
Any holomorphic bundle $\mathcal V\to X$ gives rise to a counterpart
$D(\mathcal V)$ as follows. First, as a smooth bundle $D(\mathcal V)=(V\otimes K)\oplus V$. With respect to this splitting define the $\mathcal O$-module structure by
$$
f(\varphi,s)=(f\varphi+s\otimes df, fs)\ ,\qquad \ f\in \mathcal O\ ,\ \varphi\in \mathcal V\otimes\mathcal K\ ,\ s\in \mathcal V\ .
$$
One checks that this gives $D(\mathcal V)$ the structure of a locally free sheaf over $\mathcal O$.
Then we have a compatible inclusion $\varphi\mapsto(\varphi,0)$ and projection $(\varphi,s)\mapsto s$ making $D(\mathcal V)$ into an extension
\begin{equation} \label{eqn:de}
0\longrightarrow \mathcal V\otimes\mathcal K\longrightarrow D(\mathcal V)\longrightarrow \mathcal V\longrightarrow 0\ .
\end{equation}
Observe that \eqref{eqn:de} \emph{splits if and only if $\mathcal V$ admits a holomorphic connection}. Indeed, such a $\nabla$ gives a splitting by $s\mapsto (\nabla s,s)$, and if \eqref{eqn:de} splits then there is a $\underline\mathbb C$-linear map $\mathcal V\to \mathcal V\otimes\mathcal K$ satisfying \eqref{eqn:leibniz}.
\begin{remark} \label{rem:de}
The construction is functorial with respect to subbundles. If $0=\mathcal V_0\subset\mathcal V_1\subset\cdots\subset\mathcal V_\ell=\mathcal V$ is a filtration of $\mathcal V$ by holomorphic subbundles, then
there is a filtration
$$0=D(\mathcal V_0)\subset D(\mathcal V_1)\subset\cdots\subset D(\mathcal V_\ell)=D(\mathcal V)\ .$$
\end{remark}
\begin{lemma} \label{lem:extension-class}
Given a holomorphic bundle $\mathcal V\to X$, let
$$[\beta]\in H^1(X,(\mathcal V\otimes\mathcal K)\otimes\mathcal V^\ast)\simeq H^{1,1}_{\bar\partial}(X,\End V)\ ,$$
denote the extension class. Then $[\tr\beta]=-2\pi\sqrt{-1}\, c_1(V)$.
\end{lemma}
\begin{proof}
Choose $s^{(i)}$ local holomorphic frames for $\mathcal V$ on $U_i$, and let $\psi_{ij}$ denote the transition functions: $s^{(i)}=s^{(j)}\psi_{ij}$.
We can define local splittings of \eqref{eqn:de} by $s^{(i)}f^{(i)}\mapsto s^{(i)}\otimes df^{(i)}$, for $f^{(i)}$ a vector of holomorphic functions on $U_i$.
In particular,
$$
f^{(j)}=\psi_{ij}f^{(i)}\ ,\
\partial f^{(j)}=\psi_{ij}(\psi_{ij}^{-1}\partial\psi_{ij}f^{(i)}+\partial f^{(i)})\ .
$$
Since the extension class is given by the image of $I$ under the map
$$
H^0(X,\End \mathcal V)\to H^1(X, \End\mathcal V\otimes \mathcal K)\ ,
$$
it follows from the local splitting above that $[\beta]$ is represented by the cocycle $[\psi_{ij}^{-1}d\psi_{ij}]$. Hence, $[\tr\beta]=[d\log\det\psi]$. On the other hand, if $h$ is a hermitian metric on $\det\mathcal V$, then
$$
h_i|s^{(i)}_1\wedge \cdots\wedge s^{(i)}_n|^2=h_j|s^{(j)}_1\wedge \cdots\wedge s^{(j)}_n|^2\ ,
$$
so $h_i|\det\psi_{ij}|^2=h_j$. This implies $d\log\det\psi_{ij}=\partial\log h_j-\partial\log h_i$. By the Dolbeault isomorphism $[\beta]$ is represented by $[\bar\partial\partial\log h_i]=[F_{(\bar\partial_{\det\mathcal V},h)}]=-2\pi\sqrt{-1}\, c_1(V)$ (see Example \ref{ex:line-bundle-curvature} and \eqref{eqn:chern-weil}).
\end{proof}
\begin{lemma} \label{lem:nilpotent}
If $\mathcal V\to X$ is an indecomposable holomorphic bundle and $\phi\in H^0(X,\End \mathcal V)$, Then there is $\lambda\in \mathbb C$ such that $\phi-\lambda I$ is nilpotent.
\end{lemma}
\begin{proof}
Since $\det(\phi-\lambda I)$ is holomorphic and $X$ is closed, the eigenvalues of $\phi$ must be constant. So without loss of generality assume $\ker\phi\neq \{0\}, \mathcal V$, and consider the sequence
\begin{equation}
\begin{matrix}
0 \longrightarrow & \ker\phi &\longrightarrow & \mathcal V & \longrightarrow & \coker \phi & \longrightarrow 0 \\
& \begin{sideways}$=$\end{sideways} &&&& \begin{sideways}$=$\end{sideways} & \\
& \mathcal S &&&& \mathcal Q &
\end{matrix}
\label{eqn:kernel}
\end{equation}
Write:
$$
\bar\partial_E=\left(\begin{matrix} \bar\partial_S&\beta \\ 0&\bar\partial_Q\end{matrix}\right)\qquad ,\qquad
\phi=\left(\begin{matrix} 0&\phi_1\\ 0&\phi_2\end{matrix}\right)\ .
$$
We wish to show $\phi_2=0$. First note that
$$
0=\bar\partial_E\phi=\left(\begin{matrix} 0&\bar\partial_E\phi_1+\beta\phi_2\\ 0&\bar\partial_Q\phi_2\end{matrix}\right)\ .
$$
So $\phi_2$ is holomorphic as an endomorphism of $\mathcal Q$. If $\phi_2\neq 0$, then it is an isomorphism. This is so because again the eigenvalues of $\phi_2$ are constant, and by assumption $0$ is not an eigenvalue. Hence, we can rewrite the upper right entry in the matrix equation above as: $\bar\partial_E(\phi_1\phi_2^{-1})+\beta=0$. But then the Dolbeault class of $\beta$ vanishes and \eqref{eqn:kernel} splits, contradicting the assumption that $\mathcal V$ be indecomposable.
\end{proof}
\begin{proof}[Proof of Theorem \ref{thm:weil}]
Suppose $\mathcal V$ has a holomorphic connection. Then by Remark \ref{rem:de}, $D(\mathcal V)$ splits. Moreover, since $D(\mathcal V)$ is natural with respect to subbundles, $D(\mathcal V_i)$ splits for each indecomposable factor of $\mathcal V$. But then by Lemma \ref{lem:extension-class}, $\deg(\mathcal V_i)=0$ for all $i$. Conversely, suppose $\mathcal V$ is indecomposable and $\deg(\mathcal V)=0$. It suffices to show $D(\mathcal V)$ splits. Now by Serre duality the extension class
$$
[\beta]\in H^1(X, \End(\mathcal V)\otimes \mathcal K)\simeq \left(H^0(X, \End(\mathcal V)) \right)^\ast\ ,
$$
and the perfect pairing is $\displaystyle (\beta,\phi)=\int_X \tr(\beta\phi)$. By Lemma \ref{lem:nilpotent} we may express $\phi=\lambda I+\phi_0$, where $\phi_0$ is nilpotent. Then by Lemma \ref{lem:extension-class},
\begin{equation}
(\beta,\phi)=(\beta,\phi_0)+\lambda(\beta, I)
=(\beta,\phi_0)+\lambda\int_X \tr \beta\\
=(\beta,\phi_0) -2\pi\sqrt{-1}\lambda\deg(E)
=(\beta,\phi_0)\ . \label{eqn:phi0}
\end{equation}
Set $\mathcal V_\ell=\mathcal V$, and
recursively define $\mathcal V_{i-1}$ to be the saturation of $\phi_0(\mathcal V_i)$. Note that $\mathcal V_{i-1}$ is a proper subbundle of $\mathcal V_i$, since otherwise the restriction of $\phi_0$ would be almost everywhere an isomorphism. Eventually the process terminates.
Adjust $\ell$ so that $\mathcal V_0=\{0\}$, $\mathcal V_1\neq \{0\}$. By Remark \ref{rem:de}, $\beta$ preserves the filtration
$0=\mathcal V_0\subset\mathcal V_1\subset\cdots\subset\mathcal V_\ell=\mathcal V$. Choose a hermitian metric on $V$ and let $\pi_i$ be orthogonal projection to $V_i$. Note that
$$
I=\sum_{i=1}^\ell(\pi_i-\pi_{i-1})=\sum_{i=1}^\ell(\pi_i-\pi_i\pi_{i-1})=\sum_{i=1}^\ell\pi_i(I-\pi_{i-1})\ ,
$$
and $
(I-\pi_i)\beta\pi_i=(I-\pi_{i-1})\phi\pi_i=0
$.
Then
\begin{align*}
\tr(\beta\phi_0)=
\tr(\phi_0\beta)&=\sum_{i=1}^\ell \tr(\phi_0\beta\pi_i(I-\pi_{i-1}))\\
&=\sum_{i=1}^\ell \tr((I-\pi_{i-1})\phi_0\beta\pi_i)\\
&=
\sum_{i=1}^\ell \tr((I-\pi_{i-1})\phi_0\pi_i\beta\pi_i)\\
&=0\ .
\end{align*}
So $(\beta,\phi_0)=0$, and by \eqref{eqn:phi0} we conclude $[\beta]=0$. The proof is complete.
\end{proof}
\subsection{The Corlette-Donaldson theorem} \label{sec:corlette}
\subsubsection{Hermitian metrics and equivariant maps}
Let $D=\mathsf{SU}_n\backslash\mathsf{SL}_n(\mathbb C)$ and $\rho: \pi\to \mathsf{SL}_n(\mathbb C)$. Then $\pi$ acts on the right on $D$ via the representation $\rho$. Following Donaldson, we give a concrete description of $D$ with its $\mathsf{SL}_n(\mathbb C)$-action. Set
$$
D=\{\text{positive hermitian $n\times n$ matrices $M$ with $\det M=1$}\}\ .
$$
Then the right $\mathsf{SL}_n$ action is given by $(M,g)\mapsto g^{-1}M(g^{-1})^\ast$. Note that the space $D$ may be interpreted as the space of hermitian inner products on $\mathbb C^n$ which induce a fixed one on $\det \mathbb C^n$. The invariant metric on $D$ is given by $|M^{-1}dM|^2=\tr(M^{-1}dM)^2$.
\begin{definition} \label{def:equivariant}
A map $u:\widetilde X\to D$ is $\rho$-equivariant if $u( x\gamma)=u(x)\rho(\gamma)$ for all $x\in X$, $\gamma\in \pi$.
\end{definition}
Let $E=\widetilde X\times \mathbb C^n /\pi$. We now claim that a hermitian metric on the bundle $E$ is equivalent to a choice of $\rho$-equivariant map, up to the choice of basepoints. Indeed, suppose $u:\widetilde X\to D$ is $\rho$-equivariant. By definition, a section of $E$ is a map $\sigma: \widetilde X\to \mathbb C^n$ such that $\sigma(x\gamma)=\sigma(x)\rho(\gamma)$. Hence, if we define
$\Vert\sigma\Vert^2(x)=\langle\sigma(x), \sigma(x)u(x)\rangle_{\mathbb C^n}$, then
$$
\Vert\sigma\Vert^2(x\gamma)=\langle\sigma(x)\rho(\gamma), \sigma(x)u(x)(\rho(\gamma)^{-1})^\ast\rangle_{\mathbb C^n}=\Vert\sigma\Vert^2(x)\ ,
$$
and so this is a well-defined metric on $E$. In the other direction, given a metric $H$, if $\sigma_i$ are sections, then
write $\langle \sigma_i,\sigma_j\rangle_H(x)=\langle \sigma_i(x),\sigma_j(x) u(x)\rangle_{\mathbb C^n}$, for a hermitian matrix valued function $u(x)$. Then
\begin{align*}
\langle \sigma_i(x),\sigma_j(x) u(x)\rangle_{\mathbb C^n}&=
\langle \sigma_i,\sigma_j\rangle_H(x)=\langle \sigma_i,\sigma_j\rangle_H(x\gamma) \\
&=\langle \sigma_i(x)\rho(\gamma),\sigma_j(x)\rho(\gamma)u(x\gamma)\rangle_{\mathbb C^n} \\
&=\langle \sigma_i(x),\sigma_j(x)\rho(\gamma)u(x\gamma)\rho(\gamma)^\ast\rangle_{\mathbb C^n}
\end{align*}
for all sections. Hence, $\rho(\gamma)u(x\gamma)\rho(\gamma)^\ast=u(x)$, and $u$ is $\rho$-equivariant.
\subsubsection{Harmonic metrics}
If $u:\widetilde X\to D$ is a continuously
differentiable $\rho$-equivariant map,
we define its energy as follows.
The derivative $du$ is a section of $T^\ast\widetilde X\otimes
u^\ast(TD)$.
We have fixed an invariant metric on $D$, so the norm
$e_u(x)=|du|^2(x)$. In fact, by equivariance, $e_u(x)$ is
invariant under $\pi$, so it gives a well-defined
function on $X$ which is called the {\bf energy density}.
The {\bf energy} of $u$ is then by definition
\begin{equation} \label{eqn:energy}
E_\rho(u)=\int_X e_u(x)\, \omega\ .
\end{equation}
Note that the energy only depends on the conformal structure on
$X$ and not the full metric.
The Euler-Lagrange equations for $E_\rho$ are easy to write
down. Define
\begin{equation} \label{eqn:tension}
\tau(u)=d_\nabla^\ast du\ .
\end{equation}
In the above we note that the bundle
$u^\ast(TD)$ has a connection $\nabla$: the pull-back of the
Levi-Civita connection on $D$. It is with respect to this
connection that $d_\nabla$ is defined. The tensor $\tau(u)$
is called the {\bf tension field}. It is a section of
$u^\ast(TD)$.
\begin{definition}\label{def:harmonic}
A $C^2$ $\rho$-equivariant map
$u$ is called {\bf harmonic}
if it satisfies
\begin{equation} \label{eqn:harmonic-map}
\tau(u)=0\ .
\end{equation}
\end{definition}
Eq.\ \eqref{eqn:harmonic-map} is a second order
elliptic nonlinear partial differential equation in $u$.
This statement is a slightly misleading because $u$ is a
mapping and not a collection of functions.
This annoying fact makes defining weak solutions a little tricky.
In the case of maps between compact manifolds (the non-equivariant problem)
one way to circumvent this issue is to use a Nash isometric embedding of the target into
a euclidean space and rewrite the equations in terms of coordinate functions (cf.\ \cite{Schoen83}). A more sophisticated technique, better suited to the equivariant problem, is to define the Sobolev
space theory intrinsically (cf.\ \cite{KorevaarSchoen93, KorevaarSchoen97, Jost94}).
On the other hand, if we \emph{assume} $u$ is Lipschitz continuous, then we can introduce local
coordinates $\{y^a\}$ on $D$ and write
\eqref{eqn:harmonic-map} locally. By Rademacher's theorem the pull-backs
$s_a=u^\ast(\partial/\partial y^a)$ give a local frame for
$u^\ast(TD)$ almost everywhere, and the connection forms for $\nabla$
in this frame are $\Gamma_{ab}^c(u) du^a\otimes s_c$, where
$\Gamma_{ab}^c(u)$ are the Christoffel symbols on $D$
evaluated along $u$. Writing $u=(u^1, \ldots, u^N)$ in terms
of the coordinates on $\{y^a\}$, it is easy to see that
the local expression of
\eqref{eqn:harmonic-map} becomes
\begin{equation} \label{eqn:harmonic-map-local}
-\tau(u)^a= \Delta u^a+\Gamma_{bc}^a(u) \nabla u^b\cdot \nabla
u^c=0\ .
\end{equation}
To be clear, the dot product in the second term refers to the
metric on $X$, and $\Delta$ is the Laplace operator on $X$.
Notice that this equation is conformally invariant with
respect to the metric on $X$, a
manifestation of the fact that the energy functional itself is
conformally invariant.
In light of the previous section,
$\rho$-equivariant maps are equivalent to choices of hermitian metrics.
Given a flat connection $\nabla$ and
hermitian metric on $E$ we can construct the equivariant map
in a more intrinsic way. First, lift $\nabla$ and $E$ to obtain
a flat connection on a trivial bundle on the universal cover
$\widetilde X$. We will use the same notation to denote this lifted
bundle and connection. If
we choose a base point $\hat p$ covering the base
point $p$ for $\pi_1(X,p)$, and we choose a unitary
frame $\{{\bf e}_i(\hat p)\}$ for the fiber $E_{\hat p}$, let
$\{{\bf e}_i(x)\}$ be given by parallel transport with respect to
$\nabla$. Then the map $u: \widetilde X\to D$ is given by
$x\mapsto \langle {\bf e}_i, {\bf e}_j\rangle(x)$.
It is $\rho$-equivariant and uniquely determined up
to the choice of $\hat p$ and the base point in $D$.
Conversely, if $u:\widetilde X\to D$ is any $\rho$-equivariant
map such that $u(\hat p)=I$, then $u$ defines a hermitian metric
for which it is the equivariant map constructed above.
Notice that there is an equivalence of the type we saw for
Higgs bundles. If $g\in \mathcal G_E^\mathbb C(p)$ then the
corresponding $\rho$-equivariant map obtained
from the pair $(g(\nabla),H)$ is the same as that for
$(\nabla, Hg)$. Finally, if we act by a constant
$g\in\mathsf{SL}_n(\mathbb C)$, the same is true,
but now the map is $(\rho\cdot g)$-equivariant.
The moral of the story is that finding a harmonic
metric is equivalent to finding a harmonic equivariant map in the $\mathcal G_E^\mathbb C$ orbit of $\nabla$.
Given the data $(\nabla, H)$, we may uniquely
write $\nabla=d_A+\Psi$ where,
$d_A$ is a unitary connection on $(E,H)$,
and $\Psi$ is a $1$-form with values in the bundle $\sqrt{-1}\mathfrak g_E$ of
hermitian endomorphisms.
We can explicitly define $\Psi$ with respect to a local frame $\{s_i\}$ by
\begin{equation}\label{eqn:psi}
\langle \Psi s_i, s_j\rangle=\frac{1}{2}\left\{
\langle \nabla s_i, s_j\rangle+\langle s_i,
\nabla s_j\rangle -d\langle s_i, s_j\rangle
\right\}\ .
\end{equation}
\begin{lemma}[{cf.\ \cite{Donaldson87}}] \label{lem:energy-psi}
The energy of the map defined above is given by $E_\rho(u)
=4\Vert \Psi\Vert^2$.
\end{lemma}
\begin{proof}
From the definition above and the fact that $d_A$ is unitary,
$$
du_{ij}=\langle d_A {\bf e}_i, {\bf e}_j\rangle+\langle {\bf e}_i, d_A{\bf e}_j\rangle\ .
$$
On the other hand, the ${\bf e}_i$ are parallel with respect to $\nabla$, so
$d_A{\bf e}_j=-\Psi{\bf e}_j$. Hence, $u^{-1}du=-2\Psi$.
\end{proof}
\begin{definition} \label{def:harmonic-metric}
We say that $H$ is a {\bf harmonic metric} if the map $u$ defined above is a harmonic map.
\end{definition}
\begin{proposition}[Corlette {\cite{Corlette88}}] \label{prop:semisimple}
If $\rho$ admits a harmonic metric then $\rho$ is semisimple.
\end{proposition}
\begin{proof}
Suppose that $H$ is a critical metric but that $\nabla$ is reducible. Let $V_1\subset V$ be a subbundle invariant with respect to the connection $\nabla$. Let $V_2$ be the orthogonal complement of $V_1$, and $H_1$, $H_2$ the induced metrics. We can express
$$
\nabla=\left( \begin{matrix} \nabla_1 & \beta \\ 0 &\nabla_2\end{matrix}\right)=
\left( \begin{matrix} d_{A_1}+ \Psi_1 & \beta \\ 0 &d_{A_2}+\Psi_2\end{matrix}\right)\ ,
$$
where $\beta\in \Omega^1(X, \Hom(V_2,V_1))$. It suffices to show that the connection splits, or in other words that $\beta\equiv 0$. The proposition then follows by induction.
Now using \eqref{eqn:psi} it follows that if $s_1, s_2$ are local sections of $V_1$, then
$\langle\Psi s_1, s_2\rangle=\langle\Psi_1 s_1, s_2\rangle$. Similarly, $\langle\Psi s_1, s_2\rangle=\langle\Psi_1 s_1, s_2\rangle$ for local sections of $V_2$. On the other hand, if $s_i\in V_i$, then
$\langle\Psi s_1, s_2\rangle=\frac{1}{2}\langle s_1, \beta s_2\rangle$.
It follows that
$$
\Psi=\left( \begin{matrix} \Psi_1 &\frac{1}{2} \beta \\ \frac{1}{2}\beta^\ast &\Psi_2\end{matrix}\right)\ .
$$
We now deform the metric $H$ to a family $H_t$ as follows: scale $H_1\mapsto e^{-(\rank V_2)t}H_1$, and $H_2\mapsto e^{+(\rank V_1)t}H_2$. This, of course, preserves the orthogonal splitting and the condition $\det H_t=1$.
But $H_t$ is a geodesic homotopy of $\rho$-equivariant maps, and so
by a result of Hartman the energy $E_\rho(u_t)$ is convex \cite{Hartman67}. On the other hand, by Lemma \ref{lem:energy-psi},
$$
\frac{1}{4}E_\rho(u_t)=\Vert\Psi_1\Vert^2_{H_1}+\Vert\Psi_2\Vert^2_{H_2}+\Vert\beta\Vert^2_{H}\, e^{-(\rank V)t/2}\ .
$$
In particular, $E_\rho(u_t)$ is bounded as $t\to \infty$. The only way $E_\rho(u_t)$ could have a critical point at $t=0$ is if $E_\rho(u_t)$ is constant, which implies $\beta\equiv 0$.
This completes the proof.
\end{proof}
\subsubsection{The Corlette-Donaldson Theorem}
In this section we prove the following
\begin{theorem}[Corlette {\cite{Corlette88}}, Donaldson {\cite{Donaldson87}}, Jost-Yau {\cite{JostYau91}}, Labourie {\cite{Labourie91}}] \label{thm:corlette}
Let $\rho:\pi\to \mathsf{SL}_n(\mathbb C)$ be semisimple.
Then there exists a
$\rho$-equivariant harmonic map $u:\widetilde X\to D$.
\end{theorem}
The following result can be compared to Lemma \ref{lem:minimizing}.
It will be proven when we discuss the harmonic map flow in the next section.
\begin{lemma} \label{lem:lipschitz}
For any $\rho:\pi\to \mathsf{SL}_n(\mathbb C)$ there is a sequence $u_j$ of
$\rho$-equivariant maps $u_j:\widetilde X\to D$ satisfying
the conditions:
\begin{enumerate}
\item $u_j$ is energy minimizing.
\item
The $u_j$ have a uniformly bounded Lipschitz constant.
\item $\tau(u_j)\to 0$ in $L^2$.
\end{enumerate}
\end{lemma}
\begin{lemma} \label{lem:reductive}
Let $\rho:\pi\to \mathsf{SL}_n(\mathbb C)$ be irreducible, and let $u_j:
\widetilde X\to D$ be a sequence of $\rho$-equivariant maps with a
uniform Lipschitz constant. Then $u_j(\hat p)$ is bounded.
\end{lemma}
\begin{proof}
Suppose not.
Set $h_j=u_j(\hat p)$ and choose $\varepsilon_j\to 0$ such that
(perhaps after passing to a subsequence) $\varepsilon_j h_j\to
h_\infty\neq 0$. Notice that $\det h_\infty=0$, so
$V=\ker h_\infty$ is a proper subspace of $\mathbb C^n$. I
claim $\rho(\pi)$ fixes $V$.
Indeed, if $\rho(\gamma)=g^{-1}$ and $v\in V$, then since $d(u_j(\hat p), u_j(\hat p)\cdot g^{-1})$ is uniformly bounded we have
$$
| \langle w, v h_j\rangle_{\mathbb C^n}-\langle w, v g h_j g^\ast\rangle_{\mathbb C^n}|\leq B\ ,
$$
for a constant $B$ independent of $j$, and all $w\in \mathbb C^n$. It follows that
$$
| \langle w, v\varepsilon_j h_j \rangle_{\mathbb C^n}-\langle wg, vg \varepsilon_j h_j \rangle_{\mathbb C^n}|\longrightarrow 0\ ,
$$
and since $vh_\infty=0$ we conclude that $\langle wg, vg h_\infty \rangle_{\mathbb C^n}=0$. Since $w$ was arbitrary, $v g\in V$.
\end{proof}
\begin{proof}[Theorem \ref{thm:corlette}]
By induction it suffices to prove the result for irreducible representations.
Let $u_j$ be a minimizing sequence as in Lemma \ref{lem:reductive},
the existence of which is guaranteed by Lemma
\ref{lem:lipschitz}. It follows from Ascoli's theorem
that there is a uniformly convergent subsequence, also
denoted $u_j$, with the limit $u_j\to u_\infty$ a Lipschitz
$\rho$-equivariant map.
I claim that we may arrange for $u_\infty$ to be a harmonic
map. Indeed,
since the convergence is uniform, we
may choose local coordinates and write $u^a$. Then since
$|du^a|$ is uniformly bounded, we may assume further
that $u_j\to
u_\infty$ weakly in $L^2_{1,loc.}$.
By the condition in Lemma \ref{lem:lipschitz} (iii),
the coordinates $u_\infty^a$ are in $L^2_{1,loc.}$ and form
a weak solution of \eqref{eqn:harmonic-map-local}.
Since $u_\infty$ is Lipschitz, elliptic regularity of the
Laplace operator implies $u_\infty\in L^2_{2,loc.}$.
By the remark following \eqref{eqn:harmonic-map-local}, we may
assume that the local metric on $X$ is euclidean. Now
differentiate to obtain:
\begin{align*}
\Delta(\nabla u^a_\infty)+\nabla(\Gamma^a_{bc}(u_\infty)\nabla
u^b_\infty\cdot\nabla u^c_\infty)&= 0\ ;\\
\Delta(\nabla^2 u^a_\infty)+\nabla^2(\Gamma^a_{bc}(u_\infty)\nabla
u^b_\infty\cdot\nabla u^c_\infty)&= 0\ .
\end{align*}
Notice that since $u_\infty$ is Lipschitz
the second term in the first equation is in $L^2$.
It then follows that $u^a_\infty\in L^2_{3,loc.}$. Because of the
inclusion $L^2_3\hookrightarrow L^4_2$, the second term of
the second equation above is then in $L^2$. This in turn implies
$u^a_\infty\in L^2_{4,loc.}$. Finally, $L^2_4 \subset C^{2,\alpha}$,
and so $u_\infty$ is a strong solution to the harmonic map equations
\eqref{eqn:harmonic-map}.
This completes the proof.
\end{proof}
\subsubsection{The harmonic map flow}
The harmonic map flow is defined by
\begin{equation} \label{eqn:harmonic-map-flow}
\dot u= -\tau(u)\ .
\end{equation}
Here $u_t$ is a family of $\rho$-equivariant maps.
Since $D$ has non-positive curvature, the flow is
very well-behaved.
Long time existence is proven in \cite{EellsSampson64, Hamilton75}.
The variation of the energy along the flow is given by
$$
\frac{d}{dt}E(u_t)=2\int_X \langle du, d\dot u\rangle
=2\int_X \langle d_\nabla^\ast du, \dot u\rangle\omega
=-2\int_X |\tau(u)|^2\omega \ .
$$
In particular, \emph{energy decreases along the flow}.
Moreover,
\begin{equation} \label{eqn:tau-bound}
2\int_0^\infty dt\int_X |\tau(u_t)|^2\omega\leq E(u_0)\ .
\end{equation}
We are now ready for the
\begin{proof}[Proof of Lemma \ref{lem:lipschitz}]
The proof
is based on the famous
Eells-Sampson-Bochner
formula for the change of the energy
density along the harmonic map flow \cite{EellsSampson64}. Let $u=u(t,x)$ be a solution to \eqref{eqn:harmonic-map-flow}, and $e=e_u(t,x)$. Then
$$
-\frac{\partial e}{\partial t}+\Delta e=
|\nabla du |^2+\Ric_X(du, du)-\Riem_D(du,du,du,du)
$$
Now since $\Riem_D\leq 0$ and $\Ric_X$ is
bounded below a negative constant, we have
$$
\frac{\partial e}{\partial t}-\Delta e \leq C\cdot e\ .
$$
Using an explicit argument with the heat kernel,
this inequality along with the fact that energy is decreasing imply an estimate of the following type
\begin{equation} \label{eqn:energy-bound}
\sup e_{u_t}\leq C\cdot E_{u_0}\ ,
\end{equation}
for $t\geq 1$, say,
where $C$ is depends only on the geometry of $X$ and $D$.
Now
let $u^{(j)}$ be an energy minimizing sequence
of $\rho$-equivariant maps. Let $u^{(j)}_t$
be the corresponding maps after the time $t$ flow of
\eqref{eqn:harmonic-map-flow}.
Then since energy is decreasing along the flow,
$u^{(j)}_{t_j}$ is
also energy minimizing for any choice of sequence $t_j$.
On the other hand,
the right hand of \eqref{eqn:energy-bound} is uniformly bounded,
so if we choose each $t_j\geq 1$, say, then $u^{(j)}_{t_j}$
is also uniformly Lipschitz. Finally, for each fixed initial
condition $u_0$, \eqref{eqn:tau-bound} implies $\tau(u_{t_j})\to
0$ in $L^2$ along some sequence.
By a diagonalization argument we can arrange for
$u^{(j)}_{t_j}$ to satisfy this property as well.
\end{proof}
\subsection{Hyperk\"ahler reduction}
\subsubsection{The moduli spaces are real isomorphic}
Using \eqref{eqn:psi}, given a hermitian metric we may identify the space of all connections
$$
\mathcal C_E=\left\{ (A,\Psi) \in \mathcal A_E\times \Omega^1(M,\sqrt{-1}\mathfrak g_E) \right\}\ .
$$
Then $\mathcal C_E$
is a \emph{hyperk\"ahler manifold}, and the action of the gauge group $\mathcal G$ has associated moment maps
\begin{equation} \label{E:momentmaps}
\mu_1(A,\Psi) = F_A+\tfrac{1}{2}[\Psi,\Psi] \ ,\
\mu_2(A,\Psi)= d_A\Psi \ ,\
\mu_3(A,\Psi)= d_A(\ast\Psi)\ .
\end{equation}
Let ${\bf m}=(\mu_1,\mu_2,\mu_3)$.
The hyperk\"ahler quotient is by definition
$$
{\bf m}^{-1}(0) \bigr/ \mathcal G
= \mu_1^{-1}(0)\cap \mu_2^{-1}(0)\cap \mu_3^{-1}(0) \bigr/ \mathcal G_E\ .
$$
The two pictures we have been discussing above are equivalent to a reduction of $\mathcal C_E$ in steps, but in two different ways. The first is the point of view of Hitchin and Simpson described in Section \ref{sec:hitchin}. Namely, the space of Higgs bundles is given by
$$
\mathcal B_E=\mu_2^{-1}(0)\cap \mu_3^{-1}(0)\subset \mathcal C_E\ ,
$$
where the relationship between $\Psi$ is obtained from $\Phi$ by $\Psi=\Phi+\Phi^\ast$,
and conversely $\Phi$ is the $(1,0)$ part of $\Psi$.
Just like for functions on surfaces, $\Psi$ harmonic if and only if $\Phi$ is holomorphic.
Now Theorem \ref{thm:hitchin} guarantees that the orbit of every polystable Higgs bundle intersects
locus $\mu_1^{-1}(0)$ in $\mathcal B^{ss}$. Hence, we have
$$
\mathfrak M_D^{(n)}=\mathcal B_E^{ss}\bigr/ \negthinspace\negthinspace \bigr/ \mathcal G_E^\mathbb C
={\bf m}^{-1}(0) \bigr/ \mathcal G_E
= \mu_1^{-1}(0)\cap \mu_2^{-1}(0)\cap \mu_3^{-1}(0) \bigr/ \mathcal G_E\ .
$$
The second point of view (e.g.\ Corlette and Donaldson, Section \ref{sec:corlette}) comes from the observation that the space of flat connections is
$$
\mathcal C_E^{flat}=\mu_1^{-1}(0)\cap \mu_2^{-1}(0) \subset \mathcal C_E\ .
$$
Given $\nabla\in \mathcal C_E^{flat}$,
the condition that the associated $\hol(\nabla)$-equivariant map be harmonic is precisely that $\nabla\in \mu_3^{-1}(0)$. Indeed, suppose $\delta\nabla$ is a variation of $\nabla$.
It follows from \eqref{eqn:psi} that $\delta\Psi=\delta\nabla+(\delta\nabla)^\ast$. In the case of a complex gauge transformation with $g^{-1}\delta g=\phi$, $\delta\nabla=\nabla\phi$, and
$$
\delta\Psi=d_A(\phi+\phi^\ast)+[\Psi, \phi-\phi^\ast]\ .
$$
It is easy to see that the second term will not contribute in the variation
$
\tr(\delta\Psi\wedge \ast\Psi)+\tr(\Psi\wedge\ast\delta\Psi)
$ (by direct computation, and also from the fact that unitary gauge transformations do not vary the associated equivariant map).
So from Lemma \ref{lem:energy-psi} we have
\begin{align*}
\delta E(u)&=4\int_X \tr(\delta\Psi\wedge \ast\Psi)+\tr(\Psi\wedge\ast\delta\Psi) \\
&=4\int_X \tr(d_A(\phi+\phi^\ast)\wedge \ast\Psi)+\tr(\Psi\wedge\ast d_A(\phi+\phi^\ast)) \\
&=-8\int_X\tr((\phi+\phi^\ast)d_A(\ast\Psi))\ .
\end{align*}
Since $\Psi$ is hermitian and $\phi$ is arbitrary, $\Psi$ is a critical point for the energy if and only if $d_A(\ast\Psi)=0$.
Now Theorem \ref{thm:corlette} guarantees that the orbit of every semisimple representation contains a harmonic map. It therefore follows that the holonomy map gives a homeomorphism
$$
\mathfrak M_B^{(n)}\simeq \mathcal C_E^{flat}\bigr/ \negthinspace\negthinspace \bigr/\mathcal G_E^\mathbb C\simeq \mu_1^{-1}(0)\cap \mu_2^{-1}(0)\cap \mu_3^{-1}(0) \bigr/ \mathcal G_E\ .
$$
So the Dolbeault and Betti moduli spaces coincide!
\begin{theorem}[\cite{Simpson94a, Simpson94b}]
The identification above gives a homeomorphism $\mathfrak M^{(n)}_D\simeq \mathfrak M^{(n)}_B$.
\end{theorem}
\subsubsection{Equivariant cohomology}
As in the case of the $\YMH$-flow, the harmonic map flow actually has continuity properties as $t\to \infty$. To describe this,
let $\mathcal G_E(p)\subset \mathcal G_E$ denote the subgroup of gauge transformations that are the identity at the point $p$.
Now the holonomy map gives a proper embedding
\begin{equation} \label{eqn:hol-embedding}
\hol : {\bf m}^{-1}(0)/\mathcal G_E(p)\hookrightarrow \Hom(\pi, \mathsf{SL}_n(\mathbb C))\ ,
\end{equation}
which is $\mathsf{SU}_n$-equivariant.
\begin{theorem}[{cf.\ \cite{DWW}}] \label{thm:representation-retract}
The inclusion \eqref{eqn:hol-embedding}
is an $\mathsf{SU}_n$-equivariant deformation retract.
\end{theorem}
An explicit retraction is defined using the harmonic map flow to define a flow on the space of representations.
Fix a lift $\tilde p\in \widetilde X$ of $p$.
Given $\rho\in \Hom(\pi, \mathsf{SL}_n(\mathbb C))$, choose $\nabla\in \mathcal C_E^{flat}$ with $\hol(\nabla)=\rho$.
The hermitian metric gives a unique $\rho$-equivariant lift $u:\widetilde X\to D$ with $u(\tilde p)=I$. Let $u_t$, $t\geq 0$, denote the solution to \eqref{eqn:harmonic-map-flow} with initial condition $u$. There is a unique continuous family $h_t\in \mathsf{SL}_n(\mathbb C)$, $h_t^\ast=h_t$, such that $h_0=I$, and $ h_tu_t(\tilde p)=z$. Notice that a different choice of flat connection $\widetilde \nabla$ with $\hol(\widetilde \nabla)=\rho$ will be related to $\nabla$ by a based gauge transformation $g$. The flow corresponding to $\widetilde \nabla$ is $\tilde u_t=g\cdot u_t$, and since $g(\tilde p)=I$, $\tilde h_t=h_t$. Hence, $h_t$ is well-defined by $\rho$. The flow on $\Hom(\pi, \mathsf{SL}_n(\mathbb C))$
is then defined by $\rho_t=h_t\rho h_t^{-1}$.
The result states that this flow defines a continuous retraction to $\hol\left({\bf m}^{-1}(0)/\mathcal G_E(p) \right) $. When $\rho$ is not semisimple, the flow converges to a semisimplification.
This result has consequences for computing the equivariant cohomology of moduli space \cite{AtiyahBott82, Kirwan84, Daskalopoulos92}.
In particular, Theorem \ref{thm:representation-retract} implies
$$H^\ast_{\mathsf{SU}_n}({\bf m}^{-1}(0)/\mathcal G_E(p))\simeq H^\ast_{\mathsf{SU}_n}(\Hom(\pi, \mathsf{SL}_n(\mathbb C))\ .$$
Note that since $\mathsf{SL}_n(\mathbb C)/\mathsf{SU}_n$ is contractible, on the right hand side we may take equivariant cohomology with respect to $\mathsf{SL}_n(\mathbb C)$.
On the other hand, Theorem \ref{thm:wilkin} implies
$$H^\ast_{\mathsf{SU}_n}({\bf m}^{-1}(0)/\mathcal G_E(p))= H^\ast_{\mathcal G_E}(\mathcal B^{min}_E) \simeq
H^\ast_{\mathcal G_E}(\mathcal B^{ss}_E)\ .$$
It follows that the equivariant cohomology of the space of representations may be computed by studying the equivariant Morse theory of $\YMH$ on $\mathcal B_E$ in the spirit of \cite{AtiyahBott82}. This is complicated, since $\mathcal B_E$ is singular. Some progress has been made using this approach (see \cite{DWWW, WentworthWilkin11}.
Figure 1 gives a cartoon of $\mathcal C_E$ with the subspaces $\mathcal C_E^{flat}$ and $\mathcal B_E$, and the flows that have been defined.
\setlength{\unitlength}{1cm}
\begin{picture}(14,8)
\put(4, 0){
{\scalebox{.65}{\includegraphics{higgs_bundles.pdf}}}}
\put(7,0){Figure 1.}
\end{picture}
\section{Differential Equations}
\subsection{Uniformization}
For more on the discussion in this section I refer to the classic text of Gunning \cite{Gunning66}.
\begin{definition}
The {\bf Schwarzian derivative} of a univalent
holomorphic function $f(z)$ defined on a domain in $\mathbb C$ is given by
$$
S(f)=\{f,z\}= \frac{f'''}{f'}-\frac{3}{2}\left(\frac{f''}{f'}\right)^2\ .
$$
\end{definition}
\noindent
By straightforward calculation one shows the following:
\begin{enumerate}
\item $S(f\circ g)=(S(f)\circ g) (g')^2+ S(g)\ ;$
\item $S(f)=0 \iff f$ is the restriction of a M\"obius transformation.
\end{enumerate}
A particular consequence of (i) and (ii) is then
$$
\hspace{-10cm} {\rm (iii)}\
S(f)=S(g) \Longrightarrow f=\phi\circ g\ ,
$$
where $\phi$ is a M\"obius
transformation.
The Schwarzian derivative gives a link between uniformization
and the monodromy of differential equations, as I briefly
explain here. Let $Q(z)$, $y(z)$ be locally defined
holomorphic functions, and consider the ODE
\begin{equation} \label{eqn:2nd-order-ode}
y''(z)+Q(z)y(z)=0\ .
\end{equation}
If $y_1, y_2$ are independent solutions of
\eqref{eqn:2nd-order-ode} and $y_2\neq 0$, then a calculation
shows that $f=y_1/y_2$ satisfies $S(f)=2Q$.
Note that for a univalent function $f$, $S=S(f)$ is not quite a tensor: rather, by (i) it transforms
with respect to local coordinate changes as
\begin{equation} \label{eqn:projective-connection}
S(w)(w')^2=S(z)-\{w,z\}\ ,
\end{equation}
so $S$ nearly transforms as a quadratic differential.
A collection $\{S(z)\}$ of local holomorphic functions on
$X$
transforming as in \eqref{eqn:projective-connection} is called
a {\bf projective connection}.
The space of projective connections on $X$ is an affine space
modeled on the space $H^0(X,\mathcal K^2)$ of holomorphic quadratic
differentials.
Next, consider the transformation properties of the solutions
$y$ to \eqref{eqn:2nd-order-ode}, where $2Q=S$ is an arbitrary
projective connection (cf.\ \cite{HawleySchiffer66}). If we assume $y$ is a local holomorphic
section of $\mathcal K^{-1/2}$, then we have
\begin{align*}
y(z)&= y(w) (w')^{-1/2}\ ; \\
y''(z) &= y''(w)(w')^{3/2}-\tfrac{1}{2}y(z)\{w,z\}\ ,
\end{align*}
and so
$$
y''(z)+\tfrac{1}{2}S(z)y(z)=(y''(w)+\tfrac{1}{2}S(w)y(w))(w')^{3/2}\ .
$$
We deduce that $Dy=y''+\tfrac{1}{2}Sy$ gives a well-defined
map of $\underline\mathbb C$-modules $D:\mathcal K^{-1/2}\to
\mathcal K^{3/2}$.
Therefore, given a projective connection
$S$
we have a rank $2$ local system ${\bf V}$, defined by the solution space to \eqref{eqn:2nd-order-ode}, $2Q=S$. Moreover, there is an exact sequence
of $\underline \mathbb C$-modules
$$
0\longrightarrow {\bf V} \longrightarrow \mathcal K^{-1/2}\longrightarrow \mathcal K^{3/2}\longrightarrow 0\ .
$$
Now assume $X$ has a uniformization as a hyperbolic surface.
So
$\rho_F:\pi\to
\mathsf{PSL}_2(\mathbb R)$ is a discrete and faithful representation such that $X$ is biholomorphic to $\mathbb H/\rho_F(\pi)$.
Let $u$ be a (multi-valued) inverse of the quotient map
$\mathbb H\to X$. In other words, $u$ is a univalent function $u:\widetilde X\to \mathbb H$
that is equivariant with respect to $\rho_F$.
Set $S_F(z)=S(u)(z)$. Then by items (i) and
(ii) above, for any $\gamma\in\pi$,
$$
S_F(\gamma z)=S(u)(\gamma z)=S(\rho_F(\gamma)\circ
u)(z)=S(u)(z)=S_F(z)\ .
$$
So $S_F$ is a well-defined projective connection on $X$.
Now the key point is the following: if $y_1, y_2$ are linearly independent solutions to
\eqref{eqn:2nd-order-ode} where $2Q=S_F$,
then $S(y_1/y_2)=S(u)$ and so
by (iii) above there is a M\"obius transformation $\phi$ such that $y_1/y_2=\phi\circ u$.
It follows that the (projective) monodromy of the local system associated
to \eqref{eqn:2nd-order-ode} in the case $2Q=S_F$
is conjugate to $\rho_F$.
If $S$ is any fixed choice of projective connection, one may ask for the holomorphic quadratic
differential $Q$ such that $S_F=S+Q$. This is the famous problem of
\emph{accessory parameters} (cf.\ \cite{Uniformization}).
\begin{remark} \label{rem:projective}
I want to clarify the following issue:
the bundle $\mathcal K^{1/2}$ involves a choice of square root
of
the canonical bundle (i.e.\ a {\bf spin structure}), of which there are $2^{2g}$ possibilities. This choice is \emph{precisely}
equivalent to a lift of the corresponding monodromy $\rho$ from
$\mathsf{PSL}_2(\mathbb C)$
to $\mathsf{SL}_2(\mathbb C)$. To see this, let
$\mathcal V_\rho=\mathcal O\otimes_{\underline \mathbb C}{\bf V}_\rho$, and notice
that $\mathcal V_\rho$ fits into an exact sequence (now of $\mathcal O$-modules)
\begin{equation} \label{eqn:v-rho}
0\longrightarrow \mathcal K^{1/2}_\rho\longrightarrow \mathcal V_\rho\longrightarrow \mathcal K^{-1/2}_\rho\longrightarrow 0\ ,
\end{equation}
where now we also label the choice of spin structure by $\rho$.
Since $\mathcal V_\rho$ has a holomorphic connection, by Theorem \ref{thm:weil},
\eqref{eqn:v-rho} cannot split.
On the other hand, the extensions are parametrized by the projective space of $H^1(X,\mathcal K)\simeq (H^0(X,\mathcal O))^\ast=\mathbb C$. So all the bundles $\mathcal V$ obtained in this way as $\rho$ varies are isomorphic, modulo the choice of $\mathcal K^{1/2}$.
Eq.\ \eqref{eqn:v-rho} also implies that $\mathcal V^\ast_\rho\otimes \mathcal K^{-1/2}_\rho$ has a nonzero
holomorphic section. Moreover, if we have such an exact sequence
for one spin structure, then \eqref{eqn:v-rho} cannot hold for any other
choice $\mathcal K^{1/2}$.
Indeed, the induced map $\mathcal K^{1/2}\to\mathcal K^{-1/2}_\rho$ would necessarily vanish, and so the inclusion $\mathcal K^{1/2}\to\mathcal V_\rho$ would lift to give an isomorphism
$\mathcal K^{1/2}\simeq \mathcal K_\rho^{1/2}$. So $\mathcal K^{-1/2}$ is determined by $\rho$.
Changing the lift of the projective monodromy $\rho$ to $\mathsf{SL}_2(\mathbb C)$ amounts to
$\rho\mapsto \rho\otimes \chi$ for some character $\chi:\pi\to \mathbb Z/2$. This
corresponds to tensoring $\mathcal V_\rho$ by a flat line
bundle $\mathcal L_\chi$ whose square is trivial. It follows that
from the condition that $H^0(X,\mathcal V_{\rho\otimes \chi}^\ast\otimes
\mathcal K^{-1/2}_{\rho\otimes \chi})\neq \{0\}$, and the argument given above, that $\mathcal K^{1/2}_{\rho\otimes \chi}=\mathcal K^{1/2}_\rho\otimes\mathcal L_\chi$.
\end{remark}
\subsection{Higher order equations}
\subsubsection{Invariance properties}
The structure outlined in the previous section for equations of the type \eqref{eqn:2nd-order-ode} extends to higher order equations.
We consider $n$-th order differential equations on $\mathbb H$:
\begin{equation} \label{eqn:n-diffeq}
y^{(n)}+ Q_2 y^{(n-2)} +\cdots + Q_n y=0\ .
\end{equation}
We would like an appropriate invariance property under coordinate changes in order to have solutions that are intrinsic to $X$. Motivated by the example of projective connections, we attempt to realize local solutions of \eqref{eqn:n-diffeq} in the sheaf $\mathcal K^{1-q}$,
where $n=2q-1$ and we have chosen a spin structure if $q$ is a half-integer.
Solutions to \eqref{eqn:n-diffeq} are given by the kernel of an operator
$\mathcal K ^{1-q}
\stackrel{D}{\xrightarrow{\hspace*{.75cm}}} \mathcal K ^q
$.
\begin{theorem} [cf.\ \cite{Itzykson91}, see also \cite{Wilczynski62, Hejhal75}]\label{thm:wk}
Let $D: \mathcal K ^{1-q}\to \mathcal K^q$ be $\underline\mathbb C$-linear and locally of the form
$$
Dy=y^{(n)}+ Q_2 y^{(n-2)} +\cdots + Q_n y\ .
$$
Then
$12Q_2/n(n^2-1)$
is a projective connection,
and for $k\geq 3$,
there exist $w_k$, linear combinations of $Q_j$, $j=2,\ldots, k$ and derivatives, with coefficients polynomials in $Q_2$,
such that $w_k$ transform as a $k$-differentials. Conversely, given one such operator and $k$ differentials $w_k$, $k=2,\ldots, n$, these conditions uniquely determine an operator $D$.
\end{theorem}
\noindent
The expressions for the $w_k$ are quite complicated. For example, we reproduce some of \cite[Table 1]{Itzykson91}:
\begin{align}
\begin{split} \label{eqn:wk}
w_2&= Q_2 \\
w_3&=Q_3-\frac{n-2}{2}Q_2' \\
w_4&= Q_4-\frac{n-3}{2}Q_3' + \frac{(n-2)(n-3)}{10}Q''_2-\frac{(n-2)(n-3)(5n+7)}{10n(n^2-1)}Q^2_2\ .
\end{split}
\end{align}
It follows from Theorem \ref{thm:wk} that the space of all such $D$ is an affine space modeled on the Hitchin base
$\bigoplus_{j=2}^n H^0(X,\mathcal K^j)$.
The map $D: \mathcal K ^{1-q}\to \mathcal K ^{q}$ is clearly locally surjective. Moreover, the Wronskian of any fundamental set of solutions $Dy_i=0$ is constant.
We therefore obtain a local system ${\bf V}$ and an exact sequence of sheaves over $\underline\mathbb C$.
\begin{equation} \label{eqn:n-diffop}
0\longrightarrow {\bf V} \stackrel{\varphi}{\xrightarrow{\hspace*{.75cm}}} \mathcal K ^{1-q}
\stackrel{D}{\xrightarrow{\hspace*{.75cm}}} \mathcal K ^q \longrightarrow 0\ .
\end{equation}
In this situation,
we say that the local system ${\bf V}$ {\bf is realized in} $\mathcal K ^{1-q}$.
\begin{remark}
If we tensor by a line bundle with a holomorphic connection and replace derivatives $y^{(j)}$ with derivatives in a local parallel frame of the line bundle, then we can consider local systems realized in $\mathcal L$:
\begin{equation} \label{eqn:ell}
0\longrightarrow {\bf V} \stackrel{\varphi}{\xrightarrow{\hspace*{.75cm}}} \mathcal L
\stackrel{D}{\xrightarrow{\hspace*{.75cm}}} \mathcal L\otimes\mathcal K ^n \longrightarrow 0\ ,
\end{equation}
where $\deg \mathcal L=-(n-1)(g-1)$.
\end{remark}
\subsubsection{The Riemann-Hilbert correspondence}
The goal of this section is to characterize which local systems can be realized as the monodromy of solutions to differential equations. To motivate the following, if ${\bf V}$ is a local system realized in $\mathcal L$, and $\mathcal V=\mathcal O\otimes_{\underline\mathbb C}{\bf V} $,
notice that in \eqref{eqn:ell} there is a surjective sheaf map $\mathcal V\to \mathcal L$ given by
$f\otimes {\bf v}\mapsto f \varphi({\bf v})$, for $f\in \mathcal O $, ${\bf v}\in {\bf V}$. In particular, $\mathcal V^\ast\otimes\mathcal L$ has a nonzero holomorphic section.
\begin{theorem} \label{thm:ohtsuki}
A representation $\rho: \pi\to \mathsf{SL}_n(\mathbb C)$ can be realized in $\mathcal L$ if and only if $\rho$ is irreducible,
$H^0(X,\mathcal V_\rho^\ast\otimes\mathcal L)\neq\{0\}$, and $\mathcal L^n=\mathcal K ^{-n(n-1)/2}$.
\end{theorem}
\begin{proof}
According to Hejhal \cite[Theorem 3]{Hejhal75},
the monodromy representation arising from a differential
operator $D$ is necessarily irreducible.
I shall give a proof of this fact below
(see Proposition \ref{prop:irreducible}). Accepting this point for the time being,
from the discussion above we also have a nonzero section of $ \mathcal V_\rho^\ast\otimes\mathcal L$.
Moreover, if $y_1,\ldots, y_n$ is an independent set of solutions $Dy_i=0$ on $\mathbb H$, then the Wronskian
$$
W(y_1,\ldots y_n)=\det\left( \begin{matrix}
y_1 & \cdots & y_n \\
y'_1 & \cdots & y'_n \\
\vdots && \vdots \\
y^{(n-1)}_1 & \cdots & y^{(n-1)}_n
\end{matrix}
\right)\ ,
$$
is a well-defined nowhere vanishing global holomorphic section of $\mathcal L^n\otimes\mathcal K^{n(n-1)/2}$ on $X$. The latter is therefore trivial.
This proves the necessity part of the assertion.
For the converse, we follow a classical argument using the Wronskian (cf.\
\cite{Ohtsuki82}).
Assume we have a nonzero holomorphic section $\varphi$ of
$\mathcal V_\rho^\ast\otimes\mathcal L$. This induces a map (also
denoted by $\varphi$):
${\bf V}_\rho\to \mathcal L$. Because $\rho$ is irreducible,
$\varphi$ is injective.
Because $\mathcal L^n=\mathcal K^{-n(n-1)/2}$ we can write
$\mathcal L=\mathcal L_0\otimes \mathcal K^{-(n-1)/2}$, where $\mathcal L_0$ has a
flat connection. If we express a section of $\mathcal L$ as ${\bf l}\otimes w$, where ${\bf l}$ is a parallel section of
$\mathcal L_0$, then we define $y'={\bf l}\otimes w'$. With this
understood, choose a local frame $\{{\bf v}_i\}$ for
${\bf V}_\rho$, and set
$$
Dy
=\det\left( \begin{matrix}
\varphi({\bf v}_1) & \cdots & \varphi({\bf v}_n) & y\\
\varphi({\bf v}_1)' & \cdots & \varphi({\bf v}_n)'& y' \\
\vdots && \vdots&\vdots \\
\varphi({\bf v}_1)^{(n)} & \cdots &\varphi({\bf v}_n)^{(n)} & y^{(n)}
\end{matrix}
\right)\ .
$$
Then if $y$ is a local holomorphic section of $\mathcal L$, $Dy$ is
a well-defined local holomorphic section of
$\mathcal L^{n+1}\otimes\mathcal K^{n(n+1)/2}=\mathcal L\otimes\mathcal K^n$.
Clearly, the kernel of $D$ is precisely ${\bf V}_\rho$. Moreover, since the monodromy of ${\bf V}_\rho$ is in $\mathsf{SL}_n(\mathbb C)$, it is easy to see that $Dy$ is actually globally defined on $X$. Finally,
$\mathcal L^n=\mathcal K^{-n(n-1)/2}$, so
$$
\det\left( \begin{matrix}
\varphi({\bf v}_1) & \cdots & \varphi({\bf v}_n) \\
\varphi({\bf v}_1)' & \cdots & \varphi({\bf v}_n)' \\
\vdots && \vdots \\
\varphi({\bf v}_1)^{(n-1)} & \cdots &\varphi({\bf v}_n)^{(n-1)}
\end{matrix}
\right)\ ,
$$
is a nonzero holomorphic function on $X$, which may therefore be set equal to $1$.
Hence, $Dy$ has the form \eqref{eqn:n-diffop}.
This completes the proof.
\end{proof}
\begin{example} \label{ex:fuchsian}
The lift of the monodromy of a projective connection defines a representation into $\mathsf{SL}_2(\mathbb C)$ which, via the irreducible embedding $\mathsf{SL}_2\hookrightarrow\mathsf{SL}_n$, gives a representation into $\mathsf{SL}_n(\mathbb C)$.
It is straightforward, if somewhat tedious, to calculate the differential equations associated to the local systems arising in this way. Below are some examples where we let $2Q$ to be a projective connection on $X$.
\begin{itemize}
\item $n=2$: $y''+Qy=0$;
\item $n=3$: $y'''+4Qy'+2Q'y=0$;
\item $n=4$: $y^{(4)}+10Qy''+10Q'y'+(9Q^2+3Q'')y=0$;
\item $n=5$: $y^{(5)}+20Qy'''+30Q'y''+(64Q^2+18Q'')y'+(64QQ'+4Q''')y=0$;
\item $n=6$: $y^{(6)}+35Qy^{(4)}+70Q'y'''+(63Q''+259Q^2)y''+
(28Q'''+518QQ')y'+$ \\
${}$\hskip3in $(130(Q')^2+155QQ''+5Q^{(4)}+225Q^3)y=0$.
\end{itemize}
Note that $w_3, w_4$ in \eqref{eqn:wk} vanish for these examples.
\end{example}
\subsection{Opers} \label{sec:opers}
\subsubsection{Oper structures}
In this section we introduce opers. For more details consult \cite{BeilinsonDrinfeld05,
BenZviFrenkel01a, BenZviFrenkel01b, BenZviBiswas04, JoshiPauly09, Simpson10}.
\begin{definition}[Beilinson-Drinfeld {\cite{BeilinsonDrinfeld05}}] \label{def:oper}
An $\mathsf{SL}_n$-{\bf oper} is a holomorphic bundle $\mathcal V \to X$, a holomorphic connection $\nabla$ inducing the trivial connection on $\det\mathcal V$, and a filtration
$
0=\mathcal V_0\subset \mathcal V_1\subset\cdots\subset \mathcal V_n=\mathcal V
$
satisfying
\begin{enumerate}
\item $\nabla\mathcal V_i\subset \mathcal V_{i+1}\otimes \mathcal K $;
\item the induced $\mathcal O$-linear map $\mathcal V_i/\mathcal V_{i-1} \stackrel{\nabla}{\xrightarrow{\hspace*{.6cm}}}
\mathcal V_{i+1}/\mathcal V_{i}\otimes \mathcal K $ is an isomorphism for $1\leq i\leq n-1$.
\end{enumerate}
\end{definition}
There is an action of $\mathcal G^\mathbb C$ on the space of opers which pulls back connections and filtrations.
Let ${\rm Op}_n$ denote the space of gauge equivalence classes of
$\mathsf{SL}_n$-opers on $X$. Given a holomorphic connection on a bundle $\mathcal V$, we shall call a filtration
$
0=\mathcal V_0\subset \mathcal V_1\subset\cdots\subset \mathcal V_n=\mathcal V
$
satisfying (i) and (ii) an {\bf oper structure}. Not every holomorphic connection admits an oper structure. For example, we have the following important
\begin{proposition} \label{prop:irreducible}
The holonomy representation of an oper is irreducible.
\end{proposition}
First we have
\begin{lemma} \label{lem:gr}
For any $\mathsf{SL}_n$-oper, $\det \mathcal V_j\simeq\mathcal L^j\otimes \mathcal K ^{nj-j(j+1)/2}$,
where $\mathcal L\simeq \mathcal V/\mathcal V_{n-1}$, and $\mathcal L^n\simeq \mathcal K ^{-n(n-1)/2}$.
\end{lemma}
\begin{proof}
To simplify notation, set $v_i=\det\mathcal V_i$, $\kappa=\mathcal K $, and
use additive notation for line bundle tensor products.
Then Definition \ref{def:oper} (ii)
gives $v_i-v_{i-1}=v_{i+1}-v_i+\kappa$, and so
\begin{align*}
v_j&=\sum_{i=1}^j (v_i-v_{i-1})=\sum_{i=1}^j (v_{i+1}-v_i+k)=v_{j+1}-v_1+j\kappa \\
v_{j+1}-v_j &= v_1-j\kappa\ .
\end{align*}
Now summing again
\begin{align*}
v_i-v_1&=\sum_{j=1}^{i-1} (v_{j+1}-v_{j})=(i-1)v_1-\frac{i(i-1)}{2}\kappa \\
v_{i} &= iv_1-\frac{i(i-1)}{2}\kappa \\
0&=v_n= nv_1-\frac{n(n-1)}{2}\kappa\ .
\end{align*}
Set $\mathcal L=v_1-(n-1)\kappa$, and this completes the proof.
\end{proof}
\begin{proof}[Proof of Proposition \ref{prop:irreducible}] (cf.\ \cite{JoshiPauly09})
Suppose that $(\mathcal V,\nabla)$ has an oper structure and $0\neq
\mathcal W\subset
\mathcal V$ is $\nabla$-invariant. Let $\mathcal W_i=\mathcal W\cap \mathcal V_i$.
I claim that the induced map
$$
\mathcal W_i/\mathcal W_{i-1}\longrightarrow \mathcal W_{i+1}/\mathcal W_i\otimes \mathcal K \ ,
$$
is an inclusion of sheaves for all $i=1, \ldots, n-1$.
Indeed, consider the commutative diagram of $\mathcal O$-modules:
$$
\xymatrix{
\mathcal W_i/\mathcal W_{i-1} \ar[d] \ar[r]&\mathcal W_{i+1}/\mathcal W_i\otimes
\mathcal K \ar[d] \\
\mathcal V_i/\mathcal V_{i-1} \ar[r]&\mathcal V_{i+1}/\mathcal V_i\otimes
\mathcal K
}
$$
The vertical arrows are inclusions and the lower horizontal
arrow is an isomorphism. This proves the claim.
Set $r_i=\rank(\mathcal W_i/\mathcal W_{i-1})$. By the claim, if
$r_i=0$, then $r_j=0$ for $j\leq i$. Let $1\leq \ell\leq n$ be the smallest
integer for which $r_\ell\neq 0$. It follows that $r_i=1$ if
and only if $\ell\leq i\leq n$.
Applying the inclusions recursively and using Lemma \ref{lem:gr}, we find
$$
\mathcal W_i/\mathcal W_{i-1}\hookrightarrow \mathcal V/\mathcal V_{n-1}\otimes
\mathcal K ^{n-i}\cong \mathcal K ^{(n-2i+1)/2}\ .
$$
In particular (see Section \ref{sec:stability}),
$$
\deg(\mathcal W_i/\mathcal W_{i-1})\leq (n-2i+1)(g-1)\ ,
$$
and so
$$
\deg \mathcal W=\sum_{i=\ell}^n\deg(\mathcal W_i/\mathcal W_{i-1})
\leq \sum_{i=\ell}^n(n-2i+1)(g-1)=-(n-\ell+1)(\ell-1)(g-1)\ .
$$
The right hand side is strictly negative unless $\ell=1$.
But since $\mathcal W$ has a holomorphic connection induced by
$\nabla$, $\deg \mathcal W=0$. Hence, the only possibility is $\ell=1$, which implies
$\mathcal W=\mathcal V$. This completes the proof.
\end{proof}
We now show that if a holomorphic connection admits an
oper structure, then that structure is
unique up to gauge equivalence.
For the next part of the discussion, it will be useful to have the following diagram in mind (cf.\ Lemma \ref{lem:gr}):
\begin{equation}
\begin{split} \label{eqn:big-diagram}
\xymatrix{
&&& 0 \ar[d] & \\
& 0 \ar[d] & & \mathcal L\otimes\mathcal K ^{n-j} \ar[d] & \\
0\ar[r] & \mathcal V_{j-1} \ar[r] \ar[d] & \mathcal V \ar[r] \ar[d]^{\begin{sideways}$\sim$ \end{sideways}} & \mathcal R_{j-1} \ar[r] \ar[d] & 0 \\
0\ar[r] & \mathcal V_{j} \ar[r] \ar[d] & \mathcal V \ar[r] & \mathcal R_{j} \ar[r] \ar[d] & 0 \\
& \mathcal L\otimes \mathcal K ^{n-j} \ar[d] && 0 & \\
& 0 &&&
}
\end{split}
\end{equation}
\begin{lemma} \label{lem:ext1}
$\displaystyle H^1(X, (\mathcal L\otimes \mathcal K ^{n-j})\otimes \mathcal R_i^\ast)=\begin{cases}
0 & i\geq j+1\ ; \\ H^1(X,\mathcal K ) & i=j\ . \end{cases}
$
\end{lemma}
\begin{proof}
Fix $j$ and do induction on $i$. If $i=n-1$, then $\mathcal R_{n-1}=\mathcal L$ and
$$
H^1(X, (\mathcal L\otimes \mathcal K ^{n-j})\otimes \mathcal R_{n-1}^\ast)=
H^1(X, \mathcal K ^{n-j})=\begin{cases} 0 & n-j-1>0\ ; \\
H^1(X,\mathcal K) & n=j+1\ .
\end{cases}
$$
Now the exact sequence $0\to\mathcal R_i^\ast\to \mathcal R_{i-1}^\ast\to\mathcal L^\ast\otimes\mathcal K^{i-n}\to 0$ gives the following
$$
H^1(X, (\mathcal L\otimes \mathcal K ^{n-j})\otimes \mathcal R_{i}^\ast)
\longrightarrow H^1(X, (\mathcal L\otimes \mathcal K ^{n-j})\otimes \mathcal R_{i-1}^\ast)\longrightarrow
H^1(X, \mathcal K ^{i-j})\longrightarrow 0\ .
$$
By induction the first term vanishes and the last two terms are isomorphic. This proves the lemma.
\end{proof}
\begin{lemma} \label{lem:ext2}
$\displaystyle H^1(X, \mathcal V_j\otimes \mathcal R_i^\ast)=\begin{cases}
0 & i\geq j+1\ ; \\ H^1(X,\mathcal K ) & i=j \ .\end{cases}
$
\end{lemma}
\begin{proof}
Fix $i$ and induct on $j$. Now $\mathcal V_1\simeq \mathcal L\otimes\mathcal K ^{n-1}$, so the result in this case follows from Lemma \ref{lem:ext1}. Next consider the exact sequence
$$
H^1(X,\mathcal V_{j-1}\otimes \mathcal R_i^\ast)\longrightarrow
H^1(X,\mathcal V_{j}\otimes \mathcal R_i^\ast)\longrightarrow
H^1(X,\mathcal L\otimes \mathcal K ^{n-j}\otimes \mathcal R_i^\ast)\longrightarrow 0\ .
$$
By induction, the first term vanishes and so the second and third terms are isomorphic. Again, the result follows from Lemma \ref{lem:ext1}.
\end{proof}
\begin{corollary} \label{cor:ext}
$\displaystyle H^1(X, \mathcal V_{j-1}\otimes (\mathcal L\otimes \mathcal K ^{n-j})^\ast)= H^1(X,\mathcal K )
$
\end{corollary}
\begin{proof}
Consider the exact sequence
$$
H^1(X,\mathcal V_{j-1}\otimes \mathcal R_j^\ast)\longrightarrow
H^1(X,\mathcal V_{j-1}\otimes \mathcal R_{j-1}^\ast)\longrightarrow
H^1(X, \mathcal V_{j-1}\otimes (\mathcal L_{j-1}\otimes \mathcal K ^{n-j})^\ast)\longrightarrow 0\ .
$$
By Lemma \ref{lem:ext2} the first term vanishes and the second is $\simeq H^1(X,\mathcal K )$.
\end{proof}
\begin{lemma} \label{lem:ext3}
The extension
$0\to \mathcal V_{j-1}\to \mathcal V_j\to \mathcal L\otimes \mathcal K ^{n-j}\to 0$,
is non-split.
\end{lemma}
\begin{proof}
Consider the diagram:
\begin{equation}
\begin{split} \label{eqn:ext-diagram}
\xymatrix{
& H^1(X,\mathcal V_{j-1}\otimes \mathcal R_j^\ast) \ar[d] \\
H^0(X,\mathcal R_{j-1}\otimes \mathcal R_{j-1}^\ast) \ar[d] \ar[r]^{I\mapsto[\beta]}
& H^1(X,\mathcal V_{j-1}\otimes \mathcal R_{j-1}^\ast) \ar[d]^g \\
H^0(X,\mathcal R_{j-1}\otimes (\mathcal L\otimes \mathcal K ^{n-j})^\ast) \ar[r]
& H^1(X,\mathcal V_{j-1}\otimes (\mathcal L \otimes\mathcal K ^{n-j})^\ast) \ar[d] \\
H^0(X,(\mathcal L\otimes \mathcal K ^{n-j})\otimes (\mathcal L \otimes\mathcal K ^{n-j})^\ast) \ar[u] \ar[ur]_{I\mapsto [\alpha]}
& 0
}
\end{split}
\end{equation}
By the comment following \eqref{eqn:extension},
$[\alpha]$ is the extension class of
$0\to \mathcal V_{j-1}\to \mathcal V_j\to \mathcal L\otimes \mathcal K ^{n-j}\to 0$,
and $[\beta]$ is the extension class of $0\to \mathcal V_{j-1}\to \mathcal V\to \mathcal R_{j-1}\to 0$.
By Lemma \ref{lem:ext2}, $g$ is injective. By tracing through the definition of the coboundary one has $[\alpha]=g[\beta]$. Finally, since $\mathcal V$ has a holomorphic connection and $\deg\mathcal V_{j-1}\neq 0$
by Lemma \ref{lem:gr}, it follows from Theorem \ref{thm:weil} that $[\beta]\neq 0$.
\end{proof}
Finally, we can state the result on the uniqueness of the underlying holomorphic structures.
\begin{proposition} \label{prop:bd}
Let $(\mathcal V, \nabla)$ be an $\mathsf{SL}_n$-oper.
Then the oper structure on $\mathcal V$
is uniquely determined by $\mathcal L=\mathcal V/\mathcal V_{n-1}$.
In particular, the isomorphism class of the bundle $\mathcal V$ is
fixed on every connected component of ${\rm Op}_n$.
\end{proposition}
\begin{proof}
By Lemma \ref{lem:gr}, $\mathcal V_1=\mathcal L\otimes\mathcal K^{n-1}$, and so is determined.
By Corollary \ref{cor:ext} and Lemma \ref{lem:ext3},
each $\mathcal V_{j}$ is successively determined by
$\mathcal V_{j-1}$ as the unique nonsplit extension of the
sequence appearing in Lemma \ref{lem:ext3}. Continuing
in this way until $j=n$, this proves the first statement. The
second statement follows as well,
since by Lemma \ref{lem:gr}
we also have $\mathcal L^n\simeq \mathcal K^{-n(n-1)/2}$,
and therefore
the set of possible $\mathcal L$'s
is discrete.
\end{proof}
\begin{corollary} \label{cor:oper-embedding}
The map sending an oper to its monodromy representation
gives an embedding ${\rm Op}_n\to \mathfrak M_B^{(n)}$.
\end{corollary}
\begin{proof}
Fix a representation $\rho: \pi\to \mathsf{SL}_n(\mathbb C)$, and suppose
that up to conjugation $\rho$ is the monodromy of opers
$(\mathcal V_\rho,\nabla_1)$ and $(\mathcal V_\rho,\nabla_2)$.
In light of Proposition \ref{prop:bd},
it suffices to show that
the line bundle $\mathcal L$ is uniquely determined by $\rho$.
Let $\mathcal L$ and $\mathcal M$ be line bundles of degree
$-(n-1)(g-1)$ such that $H^0(X,\mathcal V_\rho^\ast\otimes\mathcal L)\neq \{0\}$ and
$H^0(X,\mathcal V_\rho^\ast\otimes\mathcal M)\neq \{0\}$.
Let $\{\mathcal V_i\}$ be the oper structure for
$(\mathcal V_\rho,\nabla_1)$, and assume $\mathcal V_\rho/\mathcal V_{n-1}=\mathcal L$. If $\mathcal L$
and $\mathcal M$ are not isomorphic, it follows from
$$
0\longrightarrow \mathcal L^\ast\otimes\mathcal M\longrightarrow \mathcal V_\rho^\ast\otimes\mathcal M\longrightarrow
\mathcal V_{n-1}^\ast\otimes\mathcal M\longrightarrow 0\ ,
$$
that
$H^0(X,\mathcal V_{n-1}^\ast\otimes\mathcal M)\neq \{0\}$.
Now for $j\leq n-1$, $\deg \mathcal R_j^\ast\otimes\mathcal M<0$, so by
applying this argument successively we conclude
that
$H^0(X,\mathcal V_{1}^\ast\otimes\mathcal M)\neq \{0\}$.
But
$\mathcal V_1^\ast\otimes\mathcal M=\mathcal L^\ast\otimes\mathcal M\otimes\mathcal K^{1-n}$
also has negative degree, so we get a
contradiction.
\end{proof}
\begin{remark}
There are precisely $n^{2g}$ possibilities for the
line bundle $\mathcal L$ in Proposition \ref{prop:bd}.
These choices label the components of ${\rm Op}_n$. As in Remark \ref{rem:projective}, these correspond precisely to the $n^{2g}$ ways of lifting a monodromy representation in $\mathsf{PSL}_n(\mathbb C)$ to $\mathsf{SL}_n(\mathbb C)$.
For simplicity, from
now on we will always take $\mathcal L=\mathcal K^{-(n-1)/2}$ where if $n$ is even we assume a fixed choice of $\mathcal K^{1/2}$.
\end{remark}
\subsubsection{Opers and differential equations}
We first show how to
obtain an oper from a local system that is realized in
$\mathcal K ^{1-q}$, $n=2q-1$. So assume we are given the exact sequence
\eqref{eqn:n-diffop}, and set $\mathcal V=\mathcal V_n=\mathcal O\otimes_{\underline\mathbb C}{\bf V}$. For $k=1,\ldots, n-1$,
define
$$
\mathcal V_{n-k}=\bigl\{ \sum_{i=1}^n f_i\otimes {\bf v}_i : \sum_{i=1}^n f^{(j)}_i\varphi({\bf v}_i)=0\ ,\ j=0,\ldots, k-1\bigr\}\ .
$$
Then $\mathcal V_{n-k}\subset\mathcal V$ is a coherent subsheaf and we have exact sequences
\begin{equation} \label{eqn:v-sequence}
0\longrightarrow \mathcal V_{n-k-1}\longrightarrow \mathcal V_{n-k}\longrightarrow \mathcal K ^{1-q+k}\longrightarrow 0\ .
\end{equation}
Property (i) of Definition \ref{def:oper} is clearly satisfied. Furthermore, in view of \eqref{eqn:v-sequence}, the connection $\nabla$ induces an $\mathcal O $-linear map $\mathcal V_{n-k-1}\to \mathcal V_{n-k}/\mathcal V_{n-k-1}\otimes \mathcal K\simeq \mathcal K^{2-q+k} $, by
$$
\sum_{i=1}^n f_i\otimes {\bf v}_i \mapsto \sum_{i=1}^n f^{(k+1)}_i\varphi({\bf v}_i)\ ,
$$
and this is an isomorphism of sheaves. So property (ii) holds as well.
Conversely,
suppose that $\mathcal V$ is a rank $n$ holomorphic bundle with
holomorphic connection $\nabla$ that admits an oper structure.
By Lemma \ref{lem:gr} we have $\mathcal V/\mathcal V_{n-1}\simeq \mathcal K ^{1-q}$.
It follows that
for any $\mathsf{SL}_n$-oper (we continue to assume $\mathcal L=\mathcal K^{-(n-1)/2}$),
$H^0(X,\mathcal V^\ast\otimes\mathcal K ^{1-q})\neq\{0\}$. Since the monodromy of an oper is irreducible by Proposition \ref{prop:irreducible}, the hypotheses of Theorem \ref{thm:ohtsuki} are satisfied, and $(\mathcal V, \nabla)$ is realized in $\mathcal K ^{1-q}$.
\begin{theorem}[Beilinson-Drinfeld
{\cite{BeilinsonDrinfeld05}}] \label{thm:bd}
The embedding
above gives an isomorphism between the connected components of
$
{\rm Op}_n
$
and the $($affine$)$ Hitchin base
$ \bigoplus_{j=2}^n H^0(X, \mathcal K ^{j})
$.
\end{theorem}
\begin{corollary}[Teleman {\cite{Teleman60}}]
The monodromy of a differential equation \eqref{eqn:n-diffop}
$($or \eqref{eqn:ell}$)$ is never unitary.
\end{corollary}
\begin{proof}
If $\rho$ is the monodromy, then
from the correspondence above and Lemma \ref{lem:gr} we see
that $\mathcal V_\rho$ is an unstable bundle. But then from the easy
direction of Theorem \ref{thm:narasimhan-seshadri} (see Proposition \ref{prop:easy}),
$\mathcal V_\rho$ cannot admit a flat unitary connection.
\end{proof}
\subsubsection{Opers and moduli space}
The main goal of this section is to prove the following
\begin{theorem}
The map ${\rm Op}_n\hookrightarrow \mathfrak M_B^{(n)}$ is a
proper embedding.
\end{theorem}
\noindent
By the upper semicontinuity of the Harder-Narasimhan type (see Section \ref{sec:higgs-fields}),
this theorem is a direct consequence of the following
\begin{proposition} \label{prop:maximal-hn-type}
Among bundles with holomorphic connections,
opers have strictly maximal Harder-Narasimhan type.
\end{proposition}
We begin with
\begin{lemma} \label{lem:hn-type}
The Harder-Narasimhan filtration of a bundle $\mathcal V$ with an oper
structure is given by the oper filtration itself.
\end{lemma}
\begin{proof}
It suffices to show that for each $j=0, \ldots, n-1$, $\mathcal V_{j+1}/\mathcal V_j$ is the maximally destabilizing subsheaf of $\mathcal V/\mathcal V_j$. In order to do this, let $\mu_{max}(\mathcal V/\mathcal V_j)$ denote the maximal slope of a subsheaf of $\mathcal F\subset\mathcal V/\mathcal V_j$, $0<\rank\mathcal F<\rank(\mathcal V/\mathcal V_j)$. We make the inductive hypothesis that
$$
\mu_{max}(\mathcal V/\mathcal V_j)=\mu(\mathcal V_{j+1}/\mathcal V_j)=(n-1)(g-1)-j(2g-2)\ .
$$
Note that this is trivially satisfied for $j=n-1$. Now suppose $j\leq n-2$ and let $\mathcal F\to \mathcal V/\mathcal V_{j}$ be the maximally destabilizing subsheaf. Then $\mathcal F$ is semistable, and from the sequence
$$
0\longrightarrow \mathcal V_{j+1}/\mathcal V_j\longrightarrow \mathcal V/\mathcal V_j\longrightarrow \mathcal V/\mathcal V_{j+1}\longrightarrow 0\ ,
$$
and the inductive hypothesis, we have
$$
\mu(\mathcal F)\geq \mu(\mathcal V_{j+1}/\mathcal V_j)>\mu(\mathcal V_{j+2}/\mathcal V_{j+1})=\mu_{max}(\mathcal V/\mathcal V_{j+1})\ .
$$
It follows that the induced map $\mathcal F\to \mathcal V/\mathcal V_{j+1}$ must vanish. Therefore, $\mathcal F\simeq\mathcal V_{j+1}/\mathcal V_j$, and moreover the inductive hypothesis is satisfied for $j$.
This concludes the proof.
\end{proof}
\begin{proof}[Proof of Proposition \ref{prop:maximal-hn-type}] (cf.\ \cite[Theorem 5.3.1]{JoshiPauly09})
Let $(\mathcal V, \nabla)$ be an unstable bundle with holomorphic connection.
I claim that
it suffices to assume that $\nabla$ is irreducible. Indeed, in the case of rank $1$ there is nothing to prove. Suppose the result has been proven for rank $<n$ and suppose $(\mathcal V, \nabla)$ is reducible. Since the Harder-Narasimhan type is upper semicontinuous, we may assume there is a splitting $(\mathcal V,\nabla)=(\mathcal V_1,\nabla_1)\oplus (\mathcal V_2,\nabla_2)$, with $n_i=\rank\mathcal V_i\geq 1$. Then by the induction hypothesis, it suffices to assume the $\mathcal V_i$ have the Harder-Narasimhan types of rank $n_i$-opers. Indeed, if not then we can change the Harder-Narasimhan types of $\mathcal V_i$, without changing the ordering of the slopes for $\mathcal V$, so that $\mathcal V$ has a larger Harder-Narasimhan type.
Let
\begin{equation} \label{eqn:mu-oper}
\mu_i=\mu_i^{(n)}=\mu(\mathcal K ^{q-i})=(n+1-2i)(g-1)\ ,
\end{equation}
be the Harder-Narasimhan type of a rank $n$-oper (see Lemmas \ref{lem:hn-type} and \ref{lem:gr}).
If $\lambda_i$ is a reordering of the slopes $\{\mu_i^{(n_1)}, \mu_j^{(n_2)}\}$, we need to show
\begin{equation} \label{eqn:lambda-inequality}
\sum_{i=1}^k\lambda_i\leq \sum_{i=1}^k \mu_i^{(n)}\ ,
\end{equation}
for all $k=1,\ldots, n$, with strict inequality for some $k$. Assume $n_1\geq n_2$. Without changing the ordering of the slopes we can sequentially subtract even integers from the leading entries $\mu_i^{(n)}$, $\lambda_i=\mu_i^{(n_1)}$ for $2i\leq n_1-n_2$, and add the integers to last entries where $n_1+n_2+2\leq 2i$.
Notice that the multiplicities of the resulting first and last slopes in $\{\mu_i\}$ and $\{\lambda_i\}$ are equal and will cancel in the sums, so it suffices to consider the intervening sums. This reduces the problem to one of two cases:
$n_1=n_2$ or $n_1=n_2+1$ (and $n=n_1+n_2$), where it is straightforward to verify \eqref{eqn:lambda-inequality}.
With this understood, we may assume that $(\mathcal V,\nabla)$ is irreducible.
The Harder-Narasimhan type of an oper is given by \eqref{eqn:mu-oper}.
Let $\mathcal V_{i-1}\subset\mathcal V_{i}$, $i=1,\ldots, \ell$, be the Harder-Narasimhan filtration of $\mathcal V$,
and $\lambda_i=\mu(\mathcal V_i/\mathcal V_{i-1})$. Let $n_i=\rank(\mathcal V_i/\mathcal V_{i-1})$ and $d_i=n_i\lambda_i$.
Then it suffices to show
\begin{equation} \label{eqn:hn}
\sum_{i=1}^j n_i\lambda_i\leq \sum_{i=1}^{\rank(\mathcal V_j)}\mu_i\ ,
\end{equation}
for $j=1,\ldots, \ell$. The left hand side is just $\deg\mathcal V_j$
while the right hand side is
$$
\sum_{i=1}^{\rank(\mathcal V_j)}(n+1-2i)(g-1)=(g-1)\rank(\mathcal V_j)(n-\rank(\mathcal V_j))\ .
$$
Hence, \eqref{eqn:hn} is equivalent to
\begin{equation} \label{eqn:hn2}
\deg\mathcal V_j\leq (g-1)\bigl( \sum_{i=1}^{j}n_i\bigr)\bigl(n-\sum_{i=1}^{j}n_i\bigr)\ .
\end{equation}
Repeatedly apply Proposition \ref{prop:bound} to find
\begin{align*}
\lambda_j&\leq \lambda_{j+1}+2g-2 \\
\lambda_j&\leq \lambda_{j+2}+2(2g-2) \\
\lambda_j&\leq \lambda_{j+i}+i(2g-2) \\
\lambda_j&\leq \lambda_{\ell}+(\ell-j)(2g-2) \ ,
\end{align*}
for any $i\leq \ell-j$. This implies
\begin{align*}
\frac{n_{j+1}}{n_j}d_j&\leq d_{j+1}+(2g-2)n_{j+1} \\
\frac{n_{j+i}}{n_j}d_j&\leq d_{j+i}+i(2g-2)n_{j+i} \\
\frac{n_{\ell}}{n_j}d_j&\leq d_{\ell}+(\ell-j)(2g-2)n_{j+1} \ ,
\end{align*}
from which we have
\begin{equation} \label{eqn:inequality}
\bigl( \sum_{i=1}^{\ell-j} n_{i+j}\bigr)\frac{d_j}{n_j} \leq \sum_{i=1}^{\ell-j} d_{i+j}+(2g-2)\sum_{i=1}^{\ell-j} in_{i+j}
\end{equation}
Consider first the case $j=1$. Then \eqref{eqn:inequality} becomes
\begin{align}
\bigl( \sum_{i=2}^{\ell} n_{i}\bigr)\frac{d_1}{n_1} &\leq \sum_{i=2}^{\ell} d_{i}+(2g-2)\sum_{i=2}^{\ell} (i-1)n_{i} \notag \\
( n-n_{1})\frac{d_1}{n_1} &\leq -d_1+(2g-2)\sum_{i=2}^{\ell} (i-1)n_{i}\notag \\
d_1 &\leq \frac{n_1}{n}(2g-2)\sum_{i=2}^{\ell} (i-1)n_{i} \ . \label{eqn:hn3}
\end{align}
We claim that
\begin{equation} \label{eqn:claim1}
\frac{2}{n}\sum_{i=2}^{\ell} (i-1)n_{i} \leq n-n_1=\sum_{i=2}^\ell n_i\ .
\end{equation}
Note that this combined with \eqref{eqn:hn3} proves \eqref{eqn:hn2} in the case $j=1$.
To prove the claim, let $r_i=n_i-1\geq 0$. Then \eqref{eqn:claim1} becomes
\begin{align*}
2\sum_{i=2}^{\ell} (i-1)(r_{i}+1) &\leq n\sum_{i=2}^\ell (r_i+1) \\
2\sum_{i=2}^{\ell} (i-1)r_{i} +\ell(\ell-1) &\leq \left[\sum_{i=2}^\ell (r_i+1)\right]^2+n_1\sum_{i=2}^\ell (r_i+1)\ ,
\end{align*}
which holds if
$$
2\sum_{i=2}^{\ell} (i-1)r_{i} +\ell(\ell-1) \leq \left[\sum_{i=2}^\ell r_i + (\ell-1)\right]^2+(\ell-1)\ ,
$$
which in turn, after canceling like terms from both sides, holds if
$$
\sum_{i=2}^{\ell} (i-1)r_{i} \leq \sum_{i=2}^\ell (\ell-1)r_i \ ,
$$
and the latter is manifestly true since $r_i\geq 0$. Hence, \eqref{eqn:claim1} holds.
We now proceed by induction. So suppose that \eqref{eqn:hn2} holds for $j$. We show that it holds also for $j+1$. Adding \eqref{eqn:hn2} (for $j$) and \eqref{eqn:inequality} (for $j+1$) we have
\begin{align*}
\deg\mathcal V_{j+1}= \deg\mathcal V_j+d_{j+1}&\leq
(g-1)\sum_{i=1}^j n_i\sum_{i=1}^{\ell-j} n_{i+j} \\
&\qquad -\frac{n_{j+1}}{\sum_{i=2}^{\ell-j} n_{i+j}}\sum_{i=1}^{j+1} d_i+
\frac{n_{j+1}}{\sum_{i=2}^{\ell-j} n_{i+j}}(2g-2)\sum_{i=1}^{\ell-j-1}in_{i+j+1} \\
\frac{\sum_{i=1}^{\ell-j} n_{i+j}}{\sum_{i=2}^{\ell-j} n_{i+j}}\, \deg\mathcal V_{j+1}
&\leq
(g-1)\sum_{i=1}^j n_i\sum_{i=1}^{\ell-j} n_{i+j}+
\frac{n_{j+1}}{\sum_{i=2}^{\ell-j} n_{i+j}}(2g-2)\sum_{i=1}^{\ell-j-1}in_{i+j+1} \\
\frac{\deg\mathcal V_{j+1}}{n-\sum_{i=1}^{j+1} n_{i}}
&\leq
(g-1)\sum_{i=1}^j n_i+
\frac{n_{j+1}}{\sum_{i=1}^{\ell-j} n_{i+j}\sum_{i=2}^{\ell-j} n_{i+j}}(2g-2)\sum_{i=1}^{\ell-j-1}in_{i+j+1}\ ,
\end{align*}
where in going from the first inequality to the second we have used the fact that $\deg\mathcal V_{j+1}=\sum_{i=1}^{j+1}d_i$.
Hence, it suffices to show
$$
2\sum_{i=1}^{\ell-j-1}i n_{i+j+1}\leq \sum_{i=1}^{\ell-j}n_{i+j}\sum_{i=2}^{\ell-j} n_{i+j}\ .
$$
In terms of the $r_i$ defined above, this becomes
\begin{align*}
2\sum_{i=2}^{\ell-j}(i-1) (r_{i+j}+1)&\leq (r_{j+1}+1)\sum_{i=2}^{\ell-j} (r_{i+j}+1)+\left(\sum_{i=2}^{\ell-j} (r_{i+j}+1)\right)^2 \\
2\sum_{i=2}^{\ell-j}(i-1) r_{i+j}+(\ell-j)(\ell-j-1)&\leq (r_{j+1}+1)\sum_{i=2}^{\ell-j} r_{i+j}
+(r_{j+1}+1)(\ell-j-1) \\
&\qquad\qquad\qquad\qquad
+\left(\sum_{i=2}^{\ell-j} r_{i+j}+(\ell-j-1)\right)^2 \ .
\end{align*}
But this is a consequence of
$$
2\sum_{i=2}^{\ell-j}(i-1) r_{i+j}\leq 2\sum_{i=2}^{\ell-j}(\ell-j-1) r_{i+j}\ ,
$$
which obviously holds. This completes the proof of the maximality of the Harder-Narasimhan type. We now show that if the Harder-Narasimhan type of $(\mathcal V,\nabla)$ is maximal then the filtration $\{\mathcal V_i\}$ is an oper structure. Indeed, consider the $\mathcal O$-linear map $\nabla: \mathcal V_i\to \mathcal V/\mathcal V_{i+1}\otimes \mathcal K$. By Remark \ref{rem:maximal}, the minimal slope of a quotient of $\mathcal V_i$ is $\mu_i=\mu(\mathcal V_i/\mathcal V_{i-1})$, whereas the maximal slope of a subsheaf of $\mathcal V/\mathcal V_{i+1}\otimes\mathcal K$ is
$$\mu(\mathcal V_{i+2}/\mathcal V_{i+1}\otimes\mathcal K)=\mu_{i+2}+2g-2=\mu_{i+1}=\mu_i-(2g-2)<\mu_i\ .$$
Hence, the map above must be zero, and $\nabla\mathcal V_i\subset \mathcal V_{i+1}\otimes \mathcal K$. By irreducibility of the connection, $\mathcal V_i/\mathcal V_{i-1}\to \mathcal V_{i+1}/\mathcal V_i\otimes\mathcal K$ is nonzero. Since these are line bundles with the same degree, this map is an isomorphism. Therefore, conditions (i) and (ii) in Definition \ref{def:oper} are satisfied. This completes the proof.
\end{proof}
\subsection{The Eichler-Shimura isomorphism} Let us return in more detail to Example \ref{ex:fuchsian}.
For $q\in \frac{1}{2}\mathbb Z$, let $V_q$ denote the $2q-1$ dimensional irreducible representation of $\mathsf{SL}_2(\mathbb C)$. Let $\rho:\pi\to \mathsf{SL}_2(\mathbb C)$ be the (lift of the) monodromy of a projective connection on $X$.
We can realize the local system ${\bf V}_\rho$ in $\mathcal K^{-1/2}$ for some choice of spin structure. For $q\geq 3/2$, let ${\bf V}_q$ denote the local system obtained by composing $\rho$ with the representation $V_q$:
$$
\rho^{(n)}:\pi\longrightarrow\mathsf{SL}_2(\mathbb C)\longrightarrow \mathsf{SL}(V_q)\ .
$$
Then ${\bf V}_q$ is realized in $\mathcal K^{1-q}$, and we have
\begin{equation} \label{eqn:fuchsian-oper}
0\longrightarrow {\bf V}_q \longrightarrow \mathcal K ^{1-q}
\stackrel{D}{\xrightarrow{\hspace*{.75cm}}} \mathcal K ^q \longrightarrow 0\ .
\end{equation}
Since $q\geq 3/2$, $H^0(X,\mathcal K^{1-q})\simeq H^1(X, \mathcal K^q)^\ast =\{0\}$. This implies $H^0(X,{\bf V}_q)=H^2(X,{\bf V}_q)=\{0\}$, and the long exact sequence associated to \eqref{eqn:fuchsian-oper} becomes
$$
0\longrightarrow H^0(X,\mathcal K^{q}) \stackrel{\delta}{\xrightarrow{\hspace*{.5cm}}} H^1(X,{\bf V}_q)
\longrightarrow H^1(X,\mathcal K^{1-q}) \longrightarrow 0\ .
$$
The coboundary map $\delta$ is called {\bf Eichler integration}.
The reason for the terminology is the following: if $\omega$ is a global holomorphic section of $\mathcal K^q$, then on sufficiently small open sets $U_i$ we can solve the inhomogeneous equation $Dy_i=\omega\bigr|_{U_i}$. If we set ${\bf v}_{ij}=y_i-y_j$, then $\{{\bf v}_{ij}\}$ is a $1$-cocycle with values in ${\bf V}_q$ which represents $\delta\omega$.
In any case,
it follows that we have an isomorphism (cf.\ \cite{Eichler57, Shimura59, Gunning67})
\begin{equation} \label{eqn:eichler}
H^1(X,{\bf V}_q)\simeq H^0(X,\mathcal K^{q}) \oplus (H^0(X,\mathcal K^{q}))^\ast\ .
\end{equation}
Eq.\ \eqref{eqn:eichler} can be used to describe the tangent space to the Betti moduli space at $[\rho^{(n)}]$ (this was explained to me by Bill Goldman \cite{Goldman}). From Weil's description of the tangent space,
\begin{equation} \label{eqn:weil}
T_{[\rho^{(n)}]}\mathfrak M_B^{(n)}\simeq H^1(X, \End{\bf V}_q)\ .
\end{equation}
Now representations of $\mathsf{SL}_2(\mathbb C)$ are self-dual: $V_q^\ast\simeq V_q$. By the Clebsch-Gordon rule for decomposition of tensor product representations, we have
$$
\End V_q= (V_q\otimes V_q^\ast)_{\tr=0}\simeq (V_q\otimes V_q)_{\tr=0}=\bigoplus_{\stackrel{j=2}{j\in \mathbb Z}}^{2q-1} V_j
$$
(note that the trivial representation $V_{3/2}$ is eliminated by the traceless condition).
This decomposition translates into one for the local system. It follows that
$$
H^1(X, \End{\bf V}_q)=\bigoplus_{\buildrel {j=2}\over {j\in \mathbb Z}}^{2q-1} H^1(X,{\bf V}_j)\ .
$$
Combining this with eqs.\ \eqref{eqn:eichler} and \eqref{eqn:weil} we obtain
$$
T_{[\rho^{(n)}]}\mathfrak M_B^{(n)}\simeq
\bigoplus_{j=2}^n H^0(X,\mathcal K^j)\oplus (H^0(X,\mathcal K^j))^\ast\ .
$$
This should be compared with \eqref{eqn:higgs-fuchs}!
\end{document}
|
\begin{document}
\baselineskip=18pt
\title[The fitness of the strongest individual]{The fitness of the strongest individual in the subcritical GMS model}
\author[Carolina Grejo]{Carolina Grejo}
\address[F. Machado, A. Rold\'an, C. Grejo]{Statistics Department, Institute of Mathematics and Statistics, University of S\~ao Paulo, CEP 05508-090, S\~ao Paulo, SP, Brazil.}
\email{[email protected]}
\author[F\'abio Machado]{F\'abio Machado}
\email{[email protected]}
\thanks{Carolina Grejo was supported by CNPq (141965/2014-2), F\'abio Machado by CNPq (310829/2014-3) and Fapesp(09/52379-8) and Alejandro Roldan by CNPq (141046/2013-9).}
\author[Alejandro Rold\'an-Correa]{Alejandro Rold\'an-Correa}
\email{[email protected]}
\keywords{GMS model; random walk; Gauss hypergeometric function.}
\subjclass[2010]{60J20, 60G50, 33C05}
\date{\today}
\begin{abstract}
{We derive the strongest individual fitness distribution on a variation for a species
survival model proposed by Guiol, Machado and Schinazi~\cite{GMS11}. We point out
to the fact that this distribution relies on the Gauss hypergeometric function and when
$p=\frac{1}{2}$ on the hypergeometric function type I distribution.}
\end{abstract}
\maketitle
\section{Introduction}
\label{S: Introduction}
We consider a discrete time model beginning from an empty set. At each time $n\geq 1$, a new species is born with probability $p$ or there is a death (if the system is not empty) with probability $q=1-p$. Let $X_n$ be the total number of species at time $n$. $X_n$ is a random walk on ${\mathbb{Z}}_+$ that jumps to right with probability $p$ and jumps to left with probability $q$. When $X_n$ is at 0 the process jumps to 1 with probability $p$ or stays at 0 with probability $1-p$. We assign a random number to each new species. This number has a uniform distribution on $[0,1]$. We think of this number as a fitness associated to each species. These random numbers are independent to each other. When a death occurs, the individual with lowest fitness dies. This model, latter denominated GMS model, was first proposed and studied in Guiol {\it et al}~\cite{GMS11}. Some interesting variations were further studied in Guiol {\it et al}~\cite{GMS13}, Ben Ari {\it et al}~\cite{BMR11} and Skevi and Volkov~\cite{SV12}.
In Guiol {\it et al} \cite{GMS11} it is shown that there is a sharp phase transition for $p>1/2$.
For $R_n$, the set of species with fitness higher than $f_c=\frac{1-p}{p}$ at time $n$
approachs an uniform distribution in the following sense. For $f_c < a < b <1$
\[ \lim_{n \to \infty} \frac{|R_n \cap (a,b)|}{n} = p(b-a) \ \text{ a.s.}\]
On the other hand every specie born with fitness less than $f_c$ disappear after a finite (random) time.
The set of species present in the system whose fitness is smaller than $f_c$ becomes empty infinitely many times.
Here we focus on the case $p\leq1/2$ in order to understand
better the dynamics of this model. In this case, the process $X_n$ is recurrent and the system becomes empty infinitely many times. Therefore it is not interesting to study the distribution of the fitness of the species which are alive on the system in the long run. An interesting point is to study the distribution of the fitness of the strongest individual on each excursion between the epochs when the system becomes empty.
We propose a variation for the GMS model by considering that each time the system becomes empty, a set of $m$ individuals are introduced with independent set of fitness. This variation is meant to reinforce competition among species before the system becomes empty again.
\section{Results}
We deduce explicitly the distribution of the fitness of the strong\-est individual on excursions between the epochs when the system becomes empty. The last individual to die before the system becomes empty is the strongest on that excursion because the first ones to die are those individuals with the smallest fitness.
Observe that some excursions may have length 2. When this happens, the individual who is born, dies right away without competing with any other individual. To ensure that each excursion has competition among individuals in a sort of natural selection process, we introduce a change-over on the model: Each time after the system becomes empty, $m$ independent new species are placed on the system (instead of just 1) with probability $p$, or the system stays empty
with probability $1 - p$. We denote this variation by GMS($m$). In this set up GMS(1) is the original model.
\begin{figure}
\caption{GMS(1)}
\label{fig:sub1}
\caption{GMS(10)}
\label{fig:sub2}
\caption{Histograms of the fitnesses of the strongest individual on GMS(m) after 200,000 births and deaths for $p=1/4$.}
\label{fig:test}
\end{figure}
Figures \ref{fig:sub1} and \ref{fig:sub2}
show the role of the competition on the distribution
of the fitness of the strongest individual on each excursion. Short excursions are more commom on GMS(1) than on
GMS(10). That behaviour favors individuals with lower fitnesses to be the strongest ones. Competition
introduced in GMS(10) avoids that.
The next result computes the fitness distribution of the strongest individual to die right before the system becomes
empty on GMS($m$) model. It is shown in terms of the hypergeometric function of Gauss (see Luke~\cite{Luke}). This function is denoted by ${_2F_1}(a,b;c;z)$, namely,
\begin{equation}\label{HG}
{_2F_1}(a,b;c;z)=\sum_{k\geq 0}\frac{(a)_k (b)_k}{(c)_k}\frac{z^k}{k!}, \quad |z|<1,
\end{equation}
\noindent where $a$, $b$, $c$, are real numbers with $c\neq 0,-1,-2,\dots$, and $(a)_k$ is the coefficient Pochhammer, namely,
$$(a)_k=a(a+1)\cdots(a+k-1) \quad (a)_0=1.$$
\begin{theorem}\label{T:GMSm} Let $p\leq 1/2$ and $Z_m$ be the fitness of the strongest individual before the system
becomes empty on GMS($m$) model. Then $Z_m$ is a random variable with distribution
$$\mathbb{P}[Z_m\leq t]= (qt)^m {_2F_1}\left(\frac{m}{2},\frac{m+1}{2};m+1;4pqt\right) \hskip1cm 0\leq t <1.$$
\end{theorem}
\begin{cor}\label{T:GMS} Let $p\leq 1/2$ and $Z$ be the fitness of the strongest
individual before the system becomes empty on GMS($1$) model. Then
$$\mathbb{P}[Z\leq t]=\frac{1-\sqrt{1-4pqt}}{2 p}, \hskip1cm 0\leq t <1.$$
For $p= 1/2$, $Z$ follows a Beta distribution $B(1,1/2).$
\end{cor}
\begin{obs} By Theorem \ref{T:GMSm} we have $Z_m$ density probability function is
\begin{eqnarray}\label{hipertipo1}
f_m(t)
&=& \frac{d}{dt}\left[(qt)^m {_2F_1}\left(\frac{m}{2},\frac{m+1}{2};m+1;4pqt\right)\right]\nonumber\\
&=& mq^m t^{m-1} {_2F_1}\left(\frac{m}{2},\frac{m+1}{2};m;4pqt\right)
\end{eqnarray}
where the last line have been obtained by using Abramowitz and Stegun \cite[Eq. 15.2.4]{Abramowitz}. When $p=q=1/2$, the distribution of $1-Z_m$ is known as \textit{hypergeometric function type I distribution} (see Gupta and Nagar \cite[p. 298]{Nagar}).
\end{obs}
\begin{cor}\label{C:GMSm} $\mathbb{E}[Z_m]=1-\displaystyle\frac{q^m}{m+1} \ {_2F_1}\left(\frac{m}{2},\frac{m+1}{2};m+1;4pq\right)$
\end{cor}
\begin{obs} Considering Corollary~\ref{C:GMSm} when $p=1/2$, by using the following equality (see Gradshteyn and Ryzhik \cite[Eq. 7.512.11]{Gradshteyn}) $${_2F_1}(\alpha,\beta;\gamma;1)=\frac{\Gamma(\gamma)\Gamma(\gamma-\alpha-\beta)}{\Gamma(\gamma-\alpha)\Gamma(\gamma-\beta)}$$ we have that
\begin{eqnarray}
\mathbb{E}[Z_m]&=&1-\frac{q^m}{m+1} {_2F_1}\left(\frac{m}{2},\frac{m+1}{2};m+2;1\right) \nonumber \\
&=&1-\frac{1}{2^m(m+1)}\left[\frac{\Gamma(m+2)\Gamma(3/2)}{\Gamma(\frac{m+3}{2}+\frac{1}{2})\Gamma(\frac{m+3}{2})}\right] \nonumber \\
&=&1-\frac{\sqrt{\pi} \ m!}{2^{m+1}\Gamma(\frac{m+3}{2}+\frac{1}{2})\Gamma(\frac{m+3}{2})} \nonumber
\end{eqnarray}
\noindent where the last line has been obtained by using $\Gamma(3/2)=\sqrt{\pi}/2$. Now, using the duplication formula, namely,
\begin{equation}
\Gamma(2z)=\frac{\Gamma(z+\frac{1}{2})\Gamma(z)}{2^{1-2z}\sqrt{\pi}} \nonumber
\end{equation}
\noindent we get
\begin{equation}
\mathbb{E}[Z_m]=1-\displaystyle\frac{2}{(m+1)(m+2)} \nonumber
\end{equation}
\end{obs}
\section{Proofs}
\begin{proof}[Proof Theorem \ref{T:GMSm}]
For $n=0,1, \dots$ we define
\[ \tau_n = \inf \{l \ge 1 : X_{n+l} = 0, X_n=0 \}. \]
In words $\tau_n$ is the length of a excursion from 0 to 0. As the process $X_n$ is homogeneous,
the distribution of $\tau_n$ does not depend on $n$ so we consider the random variable
$\tau := \tau_0$. Besides, as $p\leq 1/2$ we have that $\mathbb{P}[\tau <\infty]=1$ and
$$\mathbb{P}[\tau =k+1]=\mathbb{P}[T_{-m}=k]=\frac{m}{k}{k \choose \frac{k-m}{2}} p^{(k-m)/2}q^{(k+m)/2}, \ k\geq m, \ k+m \text{ even}, $$
where $T_{-m}$ is the time of the first visit to $-m$ for a random walk on $\mathbb{Z}$ beginning at 0. (See Bhattacharya and Waymire \cite{Batachyara})\\
\noindent If $\tau=k+1,$ we see along that excursion, extra $\frac{k-m}{2}$ births and $\frac{k+m}{2}$ deaths.
The last death corresponds to the individual with the strongest fitness among all $\frac{k+m}{2}$ that were born. Hence,
$$\mathbb{P}[Z_m\leq t]=\sum_{k=m}^\infty \mathbb{P}[\tau=k+1]\mathbb{P}[\max(Y_1,...,Y_{\frac{m+k}{2}})\leq t],$$
\noindent
where $Y_1,...,Y_{\frac{m+k}{2}}$ are i.i.d. uniform random variables on $[0,1].$ Therefore,
$$\begin{array}{ccl}
\mathbb{P}[Z_m\leq t]&=&\displaystyle\sum_{k=m}^\infty \frac{m}{k}{k \choose \frac{k-m}{2}} p^{(k-m)/2}q^{(k+m)/2}t^{(k+m)/2} \textbf{1}_{\{m+k \text{ even}\}} \\ \\
&=&\displaystyle\sum_{l=m}^\infty \frac{m}{2l-m}{2l-m \choose l} p^{l-m}q^{l}t^{l} \hskip0.5cm (k+m=2l, l\geq m)\\ \\
&=&\displaystyle\sum_{j=0}^\infty \frac{m}{m+2j}{m+2j \choose m+j} p^{j}q^{m+j}t^{m+j} \hskip0.5cm (l=m+j)\\ \\
&=&(qt)^m \displaystyle\sum_{j=0}^\infty \frac{m}{m+2j}{m+2j \choose m+j} (pqt)^{j}\\
&=&(qt)^m \displaystyle\sum_{j=0}^\infty \frac{(m)_{2k}}{(m+1)_k} \frac{(pqt)^{j}}{k!}\\ \\
&=&(qt)^m {_2F_1}\left(\frac{m}{2},\frac{m+1}{2};m+1;4pqt\right) \\ \\
\end{array}$$
where the last line has been obtained by using $(a)_{2k}=\left(\frac{a}{2}\right)_k\left(\frac{a+1}{2}\right)_k 2^{2k}$ and the definition of Gauss hypergeometric function.
\end{proof}
\begin{proof}[Proof Corollary \ref{T:GMS}]
It is a particular case of Theorem \ref{T:GMSm} when $m=1$. In this situation
\begin{eqnarray}
qt \ {_2F_1}\left(\frac{1}{2},1;2;4pqt\right)
&=& qt \sum_{k\geq 0}\frac{(1/2)_k (1)_k}{(2)_k}\frac{(4pqt)^k}{k!} \nonumber\\
&=& \frac{1}{p} \sum_{k\geq 0} \frac{(2k)!}{(k+1)! \ k!}(pqt)^{k+1} \nonumber\\
&=& \frac{1-\sqrt{1-4pqt}}{2p}\nonumber
\end{eqnarray}
where the last line has been obtained by using $(1)_k(1/2)_k= 2^{-2k} (2k)!$ and the result given in Prudnikov {\it et al} \cite[Eq. 5.2.13.8]{Prud}.
\end{proof}
\begin{proof}[Proof Corollary \ref{C:GMSm}]
$$\begin{array}{ccl}
\mathbb{E}[Z_m]&=& \int_0^1\mathbb{P}[Z_m>t] \ dt \\ \\
&=&1-q^m\int_0^1 t^m {_2F_1}\left(\frac{m}{2},\frac{m+1}{2};m+1;4pqt\right) \ dt \\ \\
&=&1-\frac{q^m}{m+1} {_2F_1}\left(\frac{m}{2},\frac{m+1}{2};m+2;4pq\right)\\ \\
\end{array}$$
where the last line has been obtained by using the result given in Gradshteyn and Ryzhik \cite[Eq. 7.512.11]{Gradshteyn}.
\end{proof}
\noindent
\textbf{Acknowledgements:} The authors are thankful to Daniel Valesin and Rinaldo Schinazi for helpful discussions about the model. Thanks are also due to the anonymous referee for his/her
constructive comments, leading to an improved presentation.
\end{document}
|
\begin{document}
\title{The Solvability Of Magneto-heating Coupling Model
With Turbulent Convection Zone
And The Flow Fields
}
\titlerunning{Magneto-heating coupling model}
\author{Changhui Yao \and
Yanping Lin \and
Lixiu Wang \and
Xuefan Jia
}
\authorrunning{C. Yao, L.Wang and X. Jia}
\institute{ Corresponding author: Changhui Yao \at
School of Mathematics and Statistics, Zhengzhou University,450001, China.
\email{[email protected]} \\
Yanping Lin\at
Department of Applied Mathematics, Hong Kong Polytechnic University, Hong Kong.\email{[email protected]}.\\
Lixiu Wang \at Beijing Computational Science Research Center, Beijing 100193,
China. \email{[email protected]}.\\
Xuefan Jia \at
School of Mathematics and Statistics, Zhengzhou University,450001, China.
\email{[email protected]} \\
}
\date{Received: date / Accepted: date}
\maketitle
\begin{abstract}
In this paper, the magneto-heating coupling model is studied in details, with turbulent convection zone and the flow field involved. Our main work is to analyze the well-posed property of this model with the regularity techniques. For the magnetic field, we consider the space $H_0(curl)\cap H(div_0)$ and for the heat equation, we consider the space $H_0^1(\Omega)$. Then we present the weak formulation of the coupled magneto-heating model and establish the regularity problem. Using Roth's method, monotone theories of nonlinear operator, weak convergence theories, we prove that the limits of the solutions from Roth's method converge to the solutions of the regularity problem with proper initial data. With the help of the spacial regularity technique, we derive the results of the well-posedness of the original problems when the regular parameter $\epsilon\longrightarrow 0$. Moreover, with additional regularity assumption for both the magnetic field and temperature variable, we prove the uniqueness of the solutions.
\keywords{\ Magneto-heating coupling model \and Regularity
\and Well-posedness \and Stability}
\subclass{ 65N30 \and 65N15 \and 35J25}
\end{abstract}
\section{Introduction}
\label{intro}
It is well known that the manifestation of magnetohydrodynamic dynamo (MHD) processes
can be applied to demonstrate large-scale magnetic activities~\cite{MR01}.
Assume that the magnetic field ${\bf B}$, the electric field ${\bf E}$ and the electric
current density ${\bf J } $ are governed by the Maxwell's equations and constitutive
relations in the magnetohydrodynamic approximation, that is \cite{MR01},
\begin{eqnarray}
\label{equation:eq-1}
&& \partial_t{\bf B}+\nabla\times{\bf E}=0,\ \ \nabla\cdot {\bf B}=0,\\
\label{equation:eq-2}
&& \nabla\times{\bf B}=\mu {\bf J},\ \ {\bf J}=\sigma({\bf E}+{\bf U\times B}),
\end{eqnarray}
where $\mu$ and $\sigma$ are the magnetic permeability and the electric conductivity,
and ${\bf U}$ is the velocity of the fluid.
Large-scale magnetic and flow fields activities can also drive small-scale turbulent flows as well as large-scale global circulations in their interiors~\cite{MR02,MR03}. Then it is useful to introduce mean-field dynamo theory~\cite{MR04}, which describes the large-scale behavior of such fields. The magnetic and velocity fields can be divided into mean fields and deviations (called ``fluctuations"), ${\bf B}=\bar{\bf B}+{\bf b}$ and ${\bf U}=\bar{\bf U}+{\bf u}$. The equations (\ref{equation:eq-1})-(\ref{equation:eq-2}) can be averaged by
\begin{eqnarray}
\label{equation:eq-3}
&& \partial_t\bar{\bf B}+\nabla\times\bar{\bf E}=0,\ \ \nabla\cdot \bar{\bf B}=0,\\
\label{equation:eq-4}
&& \nabla\times\bar{\bf B}=\mu \bar{\bf J},\ \ \bar{\bf J}=\sigma(\bar{\bf E}+\bar{\bf U}\times\bar{\bf B}+\mathcal{E}),
\end{eqnarray}
where $\mathcal{E}$ is the mean electromotive force due to fluctuations; it is crucial variable for all mean-field electrodynamics:
$$\mathcal{E}=\overline{\bf u\times b}. $$
In order to discuss $\mathcal{E}$, its mean part $\bar{\bf U}$ and the fluctuations ${\bf u}$ are assumed to be known. Then the fluctuations ${\bf b}$ are determined by
\begin{eqnarray}
\label{equation:eq-5}
\eta \nabla\cdot\nabla {\bf b}+\nabla\times(\bar{\bf U}\times{\bf b}+{\bf G})
-\partial_t{\bf b}&=&-\nabla\times({\bf u}\times\bar{\bf B}),\\
\label{equation:eq-6}
{\bf G}&=&({\bf u}\times{\bf B})-\overline{{\bf u}\times{\bf B}}.
\end{eqnarray}
This equation implies that ${\bf b}$ can be considered as a sum ${\bf b}^{0}
+{\bf b}^{\bar{\bf B}}$, where ${\bf b}^{0} $ is independent of $\bar{\bf B}$ and ${\bf b}^{\bar{\bf B}}$ is a linear and homogeneous in $\bar{\bf B}$. This in turn leads to
$$ \mathcal{E}=\mathcal{E}^0+\mathcal{E}^{\bar{\bf B}}$$
in which $\mathcal{E}^0 $ is independent of $\bar{\bf B}$ and $\mathcal{E}^{\bar{\bf B}}$ is a linear and homogeneous in $\bar{\bf B}$.
\par
For simplicity, we assume that there is no mean motion, and ${\bf u}$ corresponds to a homogeneous isotropic turbulence. One can derive the relationship
\begin{eqnarray}
\label{equation:eq-7}
\mathcal{E}=\alpha\bar{\bf B}-\beta \nabla\times \bar{\bf B},
\end{eqnarray}
where the two coefficients, $\alpha$ and $\beta$, are independent of position and are determined by ${\bf u}$, and $\eta=\frac{1}{\mu\sigma}$. The term $\alpha\bar{\bf B}$ describes the $\alpha$-effect.
Substituting (\ref{equation:eq-7})into (\ref{equation:eq-3})-(\ref{equation:eq-4}), one can get
\begin{eqnarray}
\label{equation:eq-8}
\partial_t\bar{\bf B}+\nabla\times((\eta+\beta)\nabla\times\bar{\bf B}&=&
\nabla\times(\alpha\bar{\bf B})+\nabla\times(\bar{\bf U}\times\bar{\bf B}),\\
\label{equation:eq-9}
\nabla\cdot \bar{\bf B}&=&0.
\end{eqnarray}
Here $ \lambda=:\eta+\beta$ is the effective magnetic diffusivity, covering both magnetic diffusion at the microscopic level and the turbulent diffusion, respectively and it is also effected by the temperature. The $\alpha$ term represents the turbulent magnetic helicity. In order to deal with the feedback of the magnetic field on fluid motions (the Lorentz force), we employ a so-called $\alpha$-effect or $\alpha$-quench \cite{MR05} by the form
\begin{eqnarray}
\label{equation:eq-10}
\alpha(\bar{\bf B})=\frac{\alpha_0 f({\bf x},t) }{1+(\hat{R_m})^n|\bar{\bf B}/B_{eq}|^2},
\end{eqnarray}
where $\alpha_0>0$ is constant, $0\leq n\leq 2$, $f({\bf x}, t) $ is a model-oriented function, and the $\hat{R_m}$ dependent quenching expression should be regarded as a simplified steady state expression for the nonlinear dynamo~\cite{MR06}, ${B_{eq}}$ is the equipartition magnetic field and can be assumed as a constant. For the convenience, here and later, we still denote $\bar{\bf B}$ by ${\bf B}$ and simplify (\ref{equation:eq-8})-(\ref{equation:eq-9}) by the following form with
$\theta({\bf x},t)$ denoting the temperature at location ${\bf x}\in \Omega$ and time $t$.
\begin{eqnarray}
\partial_t{ \bf B}+\nabla\times(\lambda(\theta)\nabla\times {\bf B})
-{\Lambda}\nabla(\nabla\cdot{\bf B})
&=&R_\alpha\nabla\times(\frac{f({\bf x},t){\bf B}}{1+\gamma|{\bf B}|^2})\nonumber\\
\label{equation:eq-11}
&+&\nabla\times({\bf U\times B}), \hspace{0.1cm}\ \ in \ \ (0,T]\times\Omega,\\
\label{equation:eq-12}
\nabla \cdot{\bf B}&=&0,\ \ \hspace{2cm}\ in \ \ (0,T]\times\Omega,
\end{eqnarray}
where $\lambda(\theta)$ is bounded and strictly positive i.e. $0<\lambda_0\leq\lambda\leq \lambda_M< +\infty$, $\gamma$ is a constant parameter, $R_\alpha$ is a dynamo parameter in connection with the generation process of small scale turbulence.
With the boundary condition
\begin{equation}
\label{equation:eq-13}
\lambda(\theta)\nabla\times{\bf B}\times {\bf n}=0, \ \ on \ \ \partial\Omega,
\end{equation}
and the initial data
$$B({\bf x},0)={\bf B}_0({\bf x}).$$
\par
The local density of
Joule's heat equation generated by
$${\bf E} \cdot {\bf J}=\sigma(|\nabla\times {\bf B}|^2-\nabla\times {\bf B}\cdot({\bf U\times B})
-R_\alpha\nabla\times {\bf B}\cdot(\frac{f({\bf x},t){\bf B}}{1+\gamma|{\bf B}|^2})).$$
Thus, from Fourier¡¯s law and the conservation of energy \cite{MR07,MR08,MR09}, we see that $\theta({\bf x},t)$ satisfies
\begin{eqnarray}
{\partial_t\theta}-\nabla\cdot(\kappa \nabla\theta)&=&\sigma(\theta)(|\nabla\times {\bf B}|^2-\nabla\times {\bf B}\cdot({\bf U\times B})\nonumber\\
\label{equation:eq-14}
&-&R_\alpha\nabla\times {\bf B}\cdot(\frac{f({\bf x},t){\bf B}}{1+\gamma|{\bf B}|^2})),\ \ \ in\ (0,T]\times\Omega,
\end{eqnarray}
with the initial data and boundary conditions \cite{MR09}
\begin{eqnarray}
\label{equation:eq-15}
&&\theta({\bf x},0)=\theta_0,\ \ in \ \ \Omega,\\
\label{equation;eq-16}
&&\theta=\theta_0, \ \ on\ \ (0,T]\times\Gamma_1,\\
\label{equation:eq-17}
&&-\kappa\frac{\partial\theta}{\partial {\bf n}}=\zeta(\theta^4-\theta_0^4)+
\omega(\theta-\theta_0),\ \ on\ \ (0,T]\times\Gamma_2,
\end{eqnarray}
where $\theta_0\in L^\infty(\Omega\cup\Gamma_1)$ is the background temperature, $\partial\Omega=\Gamma_1\cup\Gamma_2$, $\zeta$ is the heat convection coefficient and $\omega$ the radiation coefficient, $\kappa$ is the thermal conductivity and other physical constants such as
density and specific heat have been normalized. ${\bf n}$ is the unit outer normal to $\Omega$. $\theta_0$ and $\kappa$ are reasonable to assume that
\begin{equation}
\label{equation:eq-18}
\theta_0\geq \theta_{min}>0, \kappa\geq \kappa_{min}>0,
\end{equation}
where $\theta_{min},\kappa_{min} $are positive constants.
For convenience, we define for the positive temperature function
$$\Psi(\theta)= \zeta\theta^4+
\omega\theta:=(\zeta|\theta|^3+
\omega)\theta.$$
Let $\theta=\xi+\theta_0$, we have
$$\Psi(\theta)=\Psi(\xi+\theta_0),\ \ \Psi(\theta)-\Psi(\theta_0)=\zeta(\theta^4-\theta_0^4)+
\omega(\theta-\theta_0).$$
We also define
\begin{eqnarray*}
&&q(\xi):=\sigma(\theta)=\sigma(\xi+\theta_0),\\
&&\mathcal{K}({\bf B})=(|\nabla\times {\bf B}|^2-\nabla\times {\bf B}\cdot({\bf U\times B})
-R_\alpha\nabla\times {\bf B}\cdot(\frac{f({\bf x},t){\bf B}}{1+\gamma|{\bf B}|^2})),
\end{eqnarray*}
and
$$Q_T=(0,T]\times \Omega.$$
\par
The phenomenon of magneto-heating has been the main point of interest for many researches. In \cite{MR13}, the authors aim to develop a mathematical model for magnetohydrodynamic flow of
biofluids through a hydrophobic micro-channel with periodically contracting and expanding
walls under the influence of an axially applied electric field, and
different temperature jump factors have also been
used to investigate the thermomechanical interactions at the fluid-solid interface. In \cite{MR14}, the authors aim is to investigate the mixed convection flow of an electrically conducting
and viscous incompressible fluid past an isothermal vertical surface with Joule heating in the presence
of a uniform transverse magnetic field fixed relative to the surface.
In \cite{MR15},they study the coupling of the equations of steady-state magnetohydrodynamics (MHD) with
the heat equation when the buoyancy effects due to temperature differences in the flow
as well as Joule effect and viscous heating are taken into account, wher the existence results of weak solutions are presented under certain conditions on the data and some uniqueness results are derived.
In \cite{MR16}, the authors study a coupled system of Maxwell¡¯s equations with nonlinear heat equation while they employ time discretization based on the
Rothe's method to provide energy estimates for discretized system and prove the existence
of a weak solutions to this coupled system with controlled Joule heating term.
\par
The most significant differences of our mathematical model compared to models stated in papers mentioned above can
be summed into three points:\\
$\bullet$ The model coupled with turbulent convection zone and the flow fields.\\
$\bullet$ The nonlinear term concluding $\alpha$-quench.\\
$\bullet$ The coefficient of magnetic diffusion is temperature dependent and the temperature field is controlled by mixed nonlinear boundaries.
\par
The outline of the paper is as follows:
\section{Preliminaries}
For any $p\leq 1$, let $L^p(\Omega)$ be the sobolev space with the norm
$$\|p\|_{L^p(\Omega)}=(\int_\Omega|p({\bf x})|^pd{\bf x})^{1/p}.$$
For $p=\infty$, $L^\infty(\Omega)$ denotes the space of essentially bounded functions with the norm
$$\|u\|_{L^\infty(\Omega)}=esssup|u({\bf x})|.$$
For $p=2$, $L^2(\Omega)$ denotes the Hilbert space equipped with the inner product and norm
$$(u,v)=\int_\Omega u({\bf x})v({\bf x})d{\bf x},\ \ \ \|u\|_0:=\|u\|_{L^2(\Omega)}=(u,u)^{1/2}. $$
Define $H^m(\Omega)=\{u\in L^2(\Omega): D^{\bf \varsigma} u\in L^2(\Omega), |{\bf \varsigma}|\leq m \}$, which is equipped with the following norm and semi-norm
$$\|u\|_{m,\Omega}=(\sum\limits_{|{\bf \varsigma}|\leq m}\|D^{\bf \varsigma}u\|_0^2)^{1/2},\ \ \ |u|_{m,\Omega}=(\sum\limits_{|{\bf \varsigma}|= m}\|D^{\bf \varsigma}u\|_0^2)^{1/2}.$$
The most frequently used spaces in the subsequent analysis
are the following two Sobolev spaces:
\begin{eqnarray*}
&&H(curl,\Omega)=\{{\bf u}\in L^2(\Omega)^3;\nabla\times{\bf u}\in L^2(\Omega) \},\\
&&H(div,\Omega)=\{{\bf u}\in L^2(\Omega)^3;\nabla\cdot{\bf u}\in L^2(\Omega) \}
\end{eqnarray*}
and their subspaces
\begin{eqnarray*}
&&H_0(curl,\Omega)=\{{\bf u}\in H(curl,\Omega), {\bf u}\times {\bf n}=0, \ on\ \ \partial\Omega \},\\
&&H(div_0,\Omega)=\{{\bf u}\in H(div,\Omega), \nabla\cdot{\bf u}=0\in \Omega \},
\end{eqnarray*}
which are the equipped with the inner product
$$({\bf u},{\bf v})_{H(curl, \Omega)}=({\bf u},{\bf v})+(\nabla\times {\bf u},\nabla\times {\bf v}),$$
$$({\bf u},{\bf v})_{H(div, \Omega)}=({\bf u},{\bf v})+(\nabla\cdot {\bf u},\nabla\cdot {\bf v}), $$
and the norm
$$ \|{\bf u}\|_{H(curl,\Omega)}^2=\|{\bf u}\|_0^2+\|\nabla\times {\bf u}\|_0^2, \ \ \|{\bf u}\|_{H(div,\Omega)}^2=\|{\bf u}\|_0^2+\|\nabla\cdot {\bf u}\|_0^2$$
To treat the constraint equation $\nabla\cdot {\bf B}=0$, we shall need the following subspace
$$\mathcal{V}=H_0(curl,\Omega)\cap H(div_0,\Omega) $$
with the inner product and norm
$$({\bf u},{\bf v})_{\bf V}= ({\bf u},{\bf v})+(\nabla\times {\bf u},\nabla\times {\bf v})+(\nabla\cdot {\bf u},\nabla\cdot {\bf v}),\ \
\|{\bf u}\|_{\bf V}^2=\|{\bf u}\|_0^2+\|\nabla\times {\bf u}\|_0^2 +\|\nabla\cdot {\bf u}\|_0^2.$$
We also need define the functional space for the radiative and conductive heat equation
$$H^1_0(\Omega)=\{v\in H^1(\Omega),v|_{\Gamma_1}=0 \},$$
$$
\mathcal{Y}=\{v\in H^1_0(\Omega)\cap L^5(\Gamma_2) \}, \|v\|_{\mathcal{Y}}:=\|v\|_1+\|v\|_{L^5(\Gamma_2)},
$$
$$W^{0,4}(curl,\Omega)=\{{\bf u}\in L^2(\Omega)^3, \nabla\times {\bf u}\in L^4(\Omega)^3 \}.$$
The coupling system (\ref{equation:eq-11})-(\ref{equation:eq-17}) can be
is equivalent to the following variational problem:
Find ${\bf B}\in L^2(0,T;{\mathcal{V}}) $ and $\xi \in L^2(0,T;\mathcal{Y})$
such that for any
$ {\bf\Phi}\in\mathcal{V}, \Upsilon\in\mathcal{Y}\cap L^\infty(\Omega)$
\begin{eqnarray}
(\partial_t{ \bf B},{\Phi})&+&(\lambda(\xi+\theta_0)\nabla\times {\bf B},\nabla\times{\Phi})
+{\Lambda}(\nabla\cdot{\bf B},\nabla\cdot{\Phi})=R_\alpha(\frac{f({\bf x},t){\bf B}}{1+\gamma|{\bf B}|^2}, \nabla\times{\Phi})\nonumber\\
\label{equation:eq-19}
&+&({\bf U\times B},\nabla\times{\Phi}), \ \ \forall {\Phi\in \mathcal{V}},\\
(\partial_t\xi, \Upsilon)&+&(\kappa\nabla\xi, \nabla\Upsilon)+<(\Psi(\xi+\theta_0)-\Psi(\theta_0)),\Upsilon>_{\Gamma_2}\nonumber\\
\label{equation:eq-20}
&=&
(q(\xi)\mathcal{K}({\bf B}),\Upsilon)-(\kappa\nabla\theta_0,\nabla\Upsilon),\forall \Upsilon\in \mathcal{Y}\cap L^\infty(\Omega),
\end{eqnarray}
where $<(\Psi(\xi+\theta_0)-\Psi(\theta_0)),\Upsilon>_{\Gamma_2}
=\int_{\Gamma_2}(\Psi(\xi+\theta_0)-\Psi(\theta_0))\Upsilon ds.$
In this paper, we consider the well-posedness of the coupling system (\ref{equation:eq-19})-(\ref{equation:eq-20}) with the regularity technique.
In order to be convenient for the following proofs, we introduce two nonlinear operators defined by: for a given constant $\tau>0$, let $\mathcal{P}:\mathcal{V}\longrightarrow \mathcal{V}'$ and $\mathcal{L}:\mathcal{Y}\longrightarrow \mathcal{Y}'$ such that
\begin{eqnarray}
\label{equation:eq-21}
<\mathcal{P}{\bf A},{ \Phi}>:&=&\frac{1}{\tau}({ \bf A},{ \Phi})
+(\lambda(\xi+\theta_0)\nabla\times {\bf A},\nabla\times{\Phi})
+{\Lambda}(\nabla\cdot{\bf A},\nabla\cdot{\Phi})\nonumber\\
&-&R_\alpha(\frac{f({\bf x},t){\bf A}}{1+\gamma|{\bf A}|^2}, \nabla\times{\Phi})
-({\bf U\times A},\nabla\times{\Phi}),\forall {\bf A},\Phi\in \mathcal{V},\\
<\mathcal{L}\omega,\Upsilon>:&=&\frac{1}{\tau}(\omega,\Upsilon)+(\kappa\nabla\omega, \nabla\Upsilon)\nonumber\\
\label{equation:eq-22}
&+&<\Psi(\omega+\theta_0)-\Psi(\theta_0),\Upsilon>_{\Gamma_2},
\forall \omega,\Upsilon\in \mathcal{Y}.
\end{eqnarray}
\begin{lemma}
\label{lemma:Lem-1}
There exits a constant $C_1$ dependent of $R_\alpha,\lambda_M, \|f\|_{L^\infty(0,T;L^\infty(\Omega))}$,$\|{\bf U}\|_{L^\infty(0,T;L^\infty(\Omega))}$, $C_2$ dependent of $ \zeta,\omega, \Gamma_2$, $C_3$ dependent of $\kappa$,
and parameters $\tau$ such that
\begin{eqnarray}
\label{equation:eq-23}
&&\|\mathcal{P}{\bf B}\|_{\mathcal{V}'}\leq C_1\|{\bf B}\|_{\mathcal{V}},\ \
\|\mathcal{L}\xi\|_{\mathcal{Y}'} \leq C_3\|\xi\|_1
+C_2(\sum_{j=1}^4\|\xi\|_{L^j(\Gamma_2)}^j).
\end{eqnarray}
\end{lemma}
\begin{proof}
Noting that $\frac{f({\bf x},t)}{1+\gamma|{\bf B}|^2}\leq 1$, $\lambda(\xi+\theta_0)\leq \lambda_M$ and
using Chaucy-Schwarz inequality, we have
\begin{eqnarray}
&&<\mathcal{P}{\bf B},{ \Phi}>:=\frac{1}{\tau}({ \bf B},{ \Phi})
+(\lambda\nabla\times {\bf B},\nabla\times{\Phi})
+{\Lambda}(\nabla\cdot{\bf B},\nabla\cdot{\Phi})\nonumber\\
&-&R_\alpha(\frac{f({\bf x},t){\bf B}}{1+\gamma|{\bf B}|^2}, \nabla\times{\Phi})
-({\bf U\times B},\nabla\times{\Phi}),\nonumber\\
&\leq& \frac{1}{\tau}\|{ \bf B}\|_0\|\Phi\|_0
+\lambda_M\|\nabla\times {\bf B}\|_0\|\nabla\times{\Phi}\|_0
+{\Lambda}\|\nabla\cdot{\bf B}\|_0\|\nabla\cdot{\Phi}\|_0\nonumber\\
&+&R_\alpha\|f({\bf x},t)\|_{{L^\infty(0,T;L^\infty(\Omega))}}
\|{ \bf B}\|_0\|\nabla\times{\Phi}\|_0\nonumber\\
&+&\|U({\bf x},t)\|_{L^\infty(0,T;L^\infty(\Omega))}
\|{ \bf B}\|_0\|\nabla\times{\Phi}\|_0\nonumber\\
\label{equation:eq-24}
&\leq&C_1\|{\bf B}\|_{\mathcal{V}}\|{\bf \Phi}\|_{\mathcal{V}},
\end{eqnarray}
where $C_1=\max\{\frac{1}{\tau}, \lambda_M,R_\alpha\|f({\bf x},t)\|_{L^\infty(0,T;L^\infty(\Omega))},\|U({\bf x},t)\|_{L^\infty(0,T;L^\infty(\Omega))}\}.$
\par
For the function $\theta_0>0$, we have
\begin{eqnarray}
&&|\Psi(\xi+\theta_0)-\Psi(\theta_0)|=|
(\zeta|\xi+\theta_0|^3+
\omega)(\xi+\theta_0)-(\zeta|\theta_0|^3+
\omega)\theta_0|\nonumber\\
\label{equation:eq-25}
&&\leq |\xi|(\zeta|\xi+\theta_0|^3+3\zeta\xi^2\theta_0+3\zeta\xi\theta_0^2
+3\zeta\theta_0^3+\omega).
\end{eqnarray}
Then there exists a constant $C_2$ dependent
of $ \zeta,\omega, \Gamma_2$ and $\|\theta_0\|_{L^\infty(\Omega)}$
such that
\begin{eqnarray}
&&|\int_{\Gamma_2}(\Psi(\xi+\theta_0)-\Psi(\theta_0))\Upsilon ds|\nonumber\\
&&\leq \|(\Psi(\xi+\theta_0)-\Psi(\theta_0))\|_{L^2(\Gamma_2)}
\|\Upsilon\|_{L^2(\Gamma_2)}\nonumber\\
\label{equation:eq-26}
&&\leq C_2\|\Upsilon\|_{L^2(\Gamma_2)}\sum_{j=1}^4\|\xi\|_{L^j(\Gamma_2)}^j.
\end{eqnarray}
Therefore, there exists a constant $C_3$ dependent of $\tau$ and $\kappa$ so that the boundness of the nonlinear operator $\mathcal{L}$ can be estimated by
\begin{eqnarray}
<\mathcal{L}\xi,\Upsilon>&\leq & \frac{1}{\tau}\|\xi\|_0\|\Upsilon\|_0+\kappa\|\xi\|_1\|\Upsilon\|_1
+(\sum_{j=1}^4\|\xi\|_{L^j(\Gamma_2)}^j)\|\Upsilon\|_{L^2(\Gamma_2)}\nonumber\\
&\leq& \max(\frac{1}{\tau},\kappa)\|\xi\|_1\|\Upsilon\|_1
+C_2(\sum_{j=1}^4\|\xi\|_{L^j(\Gamma_2)}^j)\|\Upsilon\|_{L^2(\Gamma_2)}\nonumber\\
\label{equation:eq-27}
&\leq& C_3\|\xi\|_1\|\Upsilon\|_1
+C_2(\sum_{j=1}^4\|\xi\|_{L^j(\Gamma_2)}^j)\|\Upsilon\|_{L^2(\Gamma_2)}
\end{eqnarray}
\end{proof}
\begin{lemma}
\label{lemma:Lem-2}
There exist a positive constant $C_4$ depending on $\tau, \kappa,\lambda_0, R_\alpha,
\|{ f}\|_{L^\infty(\Omega)},\|{\bf U}\|_{L^\infty(\Omega)}$
and $C_5$ depending on
$\tau, \kappa$
such that
\begin{eqnarray}
\label{equation:eq-28}
<\mathcal{P}{\bf B},{\bf B}>\geq C_4\|{\bf B}\|_{\mathcal{V}}^2
\ \ <\mathcal{L}\xi,\xi>\geq C_5\|\xi\|^2_1
+\frac{\zeta}{8}\|\xi\|^5_{L^5(\Gamma_2)}.
\end{eqnarray}
\end{lemma}
\begin{proof} From Young inequality and $\lambda(\xi+\theta_0)\geq \lambda_0$, we have
\begin{eqnarray}
<\mathcal{P}{\bf B},{\bf B}>&=&\frac{1}{\tau}({ \bf B},{ \bf B})
+(\lambda\nabla\times {\bf B},\nabla\times{\bf B})
+{\Lambda}(\nabla\cdot{\bf B},\nabla\cdot{\bf B})\nonumber\\
&-&R_\alpha(\frac{f({\bf x},t){\bf B}}{1+\gamma|{\bf B}|^2}, \nabla\times{\bf B})
+({\bf U\times \bf B},\nabla\times{\bf B})\nonumber\\
&\geq& \frac{1}{\tau}\|{\bf B}\|_0^2+\lambda_0\|\nabla\times {\bf B}\|_0^2
+{\Lambda}\|\nabla\cdot{\bf B}\|_0^2\nonumber\\
&-&R_\alpha\|f({\bf x},t)\|_{L^\infty(\Omega)}\|{\bf B}\|_0\|\nabla\times{\bf B}\|_0
-\|{\bf U}\|_{L^\infty(\Omega)}\|{\bf B}\|_0\|\nabla\times{\bf B}\|_0\nonumber\\
&\geq & \frac{1}{\tau}\|{\bf B}\|_0^2+\lambda_0\|\nabla\times {\bf B}\|_0^2
+{\Lambda}\|\nabla\cdot{\bf B}\|_0^2
-\frac{R_\alpha\|f\|_{L^\infty(\Omega)}}{4\epsilon_1}\|{\bf B}\|_0^2\nonumber\\
&-&\epsilon_1R_\alpha\|f\|_{L^\infty(\Omega)}\|\nabla\times{\bf B}\|_0^2
-\frac{\|{\bf U}\|_{L^\infty(\Omega)}}{4\epsilon_2}\|{\bf B}\|_0^2
-\epsilon_2\|{\bf U}\|_{L^\infty(\Omega)}\|\nabla\times{\bf B}\|_0^2\nonumber\\
&=&(\frac{1}{\tau}-\frac{R_\alpha\|f\|_{L^\infty(\Omega)}}{4\epsilon_1}
-\frac{\|{\bf U}\|_{L^\infty(\Omega)}}{4\epsilon_2})\|{\bf B}\|_0^2
+{\Lambda}\|\nabla\cdot{\bf B}\|_0^2\nonumber\\
&+&(\lambda_0-\epsilon_1R_\alpha\|f\|_{L^\infty(\Omega)}
-\epsilon_2\|{\bf U}\|_{L^\infty(\Omega)})\|\nabla\times{\bf B}\|_0^2\nonumber\\
\label{equation:eq-29}
&\geq & C_4\|{\bf B}\|_{\mathcal{V}}^2,
\end{eqnarray}
after taking $\epsilon_1,\epsilon_2$ and $\tau$ such that
\begin{eqnarray*}
C_4=\min (\frac{1}{\tau}-\frac{R_\alpha\|f\|_{L^\infty(\Omega)}}{4\epsilon_1}-\frac{\|{\bf U}\|_{L^\infty(\Omega)}}{4\epsilon_2}, \Lambda,\lambda_0-\epsilon_1R_\alpha\|f\|_{L^\infty(\Omega)}-\epsilon_2\|{\bf U}\|_{L^\infty(\Omega)} ).
\end{eqnarray*}
Now we consider the coercive of the nonlinear operator $\mathcal{L}$.
For the function
$$\Psi(t)=\zeta |t|^3t+\omega t,\ \ \Psi'(t)=4\zeta|t|^3+\omega>0,$$
we know that
$\Psi(t)$ is a monotone function, and
there holds
$$<\Psi(v)-\Psi(w),v-w>|_{\Gamma_2}\geq \frac{\zeta}{8}\|v-w\|^5_{L^5(\Gamma_2)}
+\omega\|v-w\|^2_{L^2(\Gamma_2)}, $$
then we have
$$<\Psi(\xi+\theta_0)-\Psi(\theta_0),\xi>|_{\Gamma_2}
\geq \frac{\zeta}{8}\|\xi\|^5_{L^5(\Gamma_2)}+\omega \|\xi\|^2_{L^2(\Gamma_2)},$$
and
$$ <\Psi(v+\theta_0)-\Psi(w+\theta_0),v-w>|_{\Gamma_2}
\geq \frac{\zeta}{8}\|v-w\|^5_{L^5(\Gamma_2)}+\omega\|v-w\|^2_{L^2(\Gamma_2)}.$$
Therefore, we have
\begin{eqnarray}
<\mathcal{L}\xi,\xi>&=&\frac{1}{\tau}(\xi,\xi)+(\kappa\nabla\xi, \nabla\xi)
+<\Psi(\xi+\theta_0)-\Psi(\theta_0),\xi>_{\Gamma_2}\nonumber\\
&\geq& \frac{1}{\tau}\|\xi\|_0^2+\kappa\|\nabla\xi\|^2_0
+\frac{\zeta}{8}\|\xi\|^5_{L^5(\Gamma_2)}+\omega \|\xi\|_{L^2(\Gamma_2)}^2\nonumber\\
\label{equation:eq-30}
&\geq& C_5\|\xi\|^2_1+\frac{\zeta}{8}\|\xi\|^5_{L^5(\Gamma_2)},
\end{eqnarray}
where $C_5$ is take as the $C_5=\min\{\frac{1}{\tau},\kappa \}$.
\end{proof}
\begin{lemma}
\label{lemma:Lem-31}
For the vector ${\bf A,B}$ and the parameter $\gamma>0$, there holds
\begin{eqnarray*}
|\frac{\bf B}{1+\gamma|{\bf B}|^2}
-\frac{\bf A}{1+\gamma|{\bf A}|^2}|\leq \frac{9}{4}|{\bf B}-{\bf A}|.
\end{eqnarray*}
\end{lemma}
\begin{proof}
By calculating, we have
\begin{eqnarray*}
&&|\frac{\bf B}{1+\gamma|{\bf B}|^2}
-\frac{\bf A}{1+\gamma|{\bf A}|^2}|\\
&&\leq \frac{|{\bf B-A}|}{1+\gamma|{\bf B}|^2}+\frac{\gamma|{\bf A}|(|{\bf A}|-|{\bf B}|)(|{\bf A}|+|{\bf B}|)}{(1+\gamma|{\bf A}|^2)(1+\gamma|{\bf B}|^2)}\\
&&\leq |{\bf B-A}|\frac{(1+2\gamma|{\bf A}|^2+\gamma|{\bf A}||{\bf B}|)}{(1+\gamma|{\bf A}|^2)(1+\gamma|{\bf B}|^2)}.
\end{eqnarray*}
By the symmetry, we have
\begin{eqnarray*}
&&|\frac{\bf A}{1+\gamma|{\bf A}|^2}
-\frac{\bf B}{1+\gamma|{\bf B}|^2}|\\
&&\leq |{\bf B-A}|\frac{(1+2\gamma|{\bf B}|^2+\gamma|{\bf A}||{\bf B}|)}{(1+\gamma|{\bf A}|^2)(1+\gamma|{\bf B}|^2)}.
\end{eqnarray*}
Therefore, we have
\begin{eqnarray*}
&&|\frac{\bf A}{1+\gamma|{\bf A}|^2}
-\frac{\bf B}{1+\gamma|{\bf B}|^2}|\\
&&\leq |{\bf B-A}|\frac{(1+\gamma|{\bf A}|^2+\gamma|{\bf B}|^2+\gamma|{\bf A}||{\bf B}|)}{(1+\gamma|{\bf A}|^2)(1+\gamma|{\bf B}|^2)}\\
&&\leq |{\bf B-A}| \frac{(1+\frac{3}{2}\gamma|{\bf A}|^2)(1+\frac{3}{2}\gamma|{\bf B}|^2)}{(1+\gamma|{\bf A}|^2)(1+\gamma|{\bf B}|^2)}\\
&&\leq \frac{9}{4}|{\bf B-A}|.
\end{eqnarray*}
\end{proof}
\begin{lemma}
\label{lemma:Lem-3}
The operator $\mathcal{P}$ and $\mathcal{L}$ is strictly
monotone in the sense that
\begin{eqnarray}\label{equation:eq-31}
<\mathcal{P}{\bf B}-\mathcal{P}{\bf A},{\bf B-A}>
\geq C_6\|{\bf B-A}\|_{\mathcal{V}}^2,
\end{eqnarray}
where $C_6$ is taken as $\min\{(\frac{1}{\tau}-\frac{R_\alpha\|f\|_{L^\infty(\Omega)}}{4\epsilon_3}
-\frac{\|{\bf U}\|_{L^\infty(\Omega)}}{4\epsilon_4}), {\Lambda},(\lambda_0-\epsilon_3R_\alpha\|f\|_{L^\infty(\Omega)}
-\epsilon_4\|{\bf U}\|_{L^\infty(\Omega)})\}$,
And
\begin{eqnarray}
\label{equation:eq-32}
<\mathcal{L}v-\mathcal{L}w,v-w>
\geq C_7\|v-w\|_1^2
+\frac{\zeta}{8}\|v-w\|^5_{L^5(\Gamma_2)},
\end{eqnarray}
where the constant $C_7$ can be taken as $C_7=\min (\tau^{-1},\kappa)$.
\end{lemma}
\begin{proof}From the Young inequality and Lemma 3, we have
\begin{eqnarray}
&&<\mathcal{P}{\bf B}-\mathcal{P}{\bf A},{\bf B-A}>\nonumber\\
&=&\frac{1}{\tau}({\bf B-A},{\bf B-A})
+\lambda(\nabla\times({\bf B-A}), \nabla\times({\bf B-A}))
+{\Lambda}(\nabla\cdot({\bf B-A}),\nabla\cdot({\bf B-A}))\nonumber\\
&-&R_\alpha(\frac{f({\bf x},t)}{1+\gamma|{\bf B}|^2}{\bf B}
-\frac{f({\bf x},t)}{1+\gamma|{\bf A}|^2}{\bf A},\nabla\times({\bf B-A}))
-({\bf U}\times ({\bf B-A}),\nabla\times({\bf B-A}))\nonumber\\
&\geq &\frac{1}{\tau}\|{\bf B-A}\|_0^2+\lambda_0\|\nabla\times ({\bf B-A})\|_0^2
+{\Lambda}\|\nabla\cdot({\bf B-A})\|_0^2
-\frac{9R_\alpha\|f\|_{L^\infty(\Omega)}}{16\epsilon_3}\|{\bf B-A}\|_0^2\nonumber\\
&-&\epsilon_3R_\alpha\|f\|_{L^\infty(\Omega)}\|\nabla\times({\bf B-A})\|_0^2
-\frac{\|{\bf U}\|_{L^\infty(\Omega)}}{4\epsilon_4}\|{\bf {\bf B-A}}\|_0^2
-\epsilon_4\|{\bf U}\|_{L^\infty(\Omega)}\|\nabla\times({\bf B-A})\|_0^2\nonumber\\
&=&(\frac{1}{\tau}-\frac{R_\alpha\|f\|_{L^\infty(\Omega)}}{4\epsilon_3}
-\frac{\|{\bf U}\|_{L^\infty(\Omega)}}{4\epsilon_4})\|{\bf B-A}\|_0^2
+{\Lambda}\|\nabla\cdot({\bf B-A})\|_0^2\nonumber\\
&+&(\lambda-\epsilon_3R_\alpha\|f\|_{L^\infty(\Omega)}
-\epsilon_4\|{\bf U}\|_{L^\infty(\Omega)})\|\nabla\times({\bf B-A})\|_0^2\nonumber\\
\label{equation:eq-33}
&\geq & C_6\|{\bf B-A}\|_{\mathcal{V}}^2,
\end{eqnarray}
where $C_6$ is taken as $\min\{(\frac{1}{\tau}-\frac{9R_\alpha\|f\|_{L^\infty(\Omega)}}{16\epsilon_3}
-\frac{\|{\bf U}\|_{L^\infty(\Omega)}}{4\epsilon_4}), {\Lambda},(\lambda_0-\epsilon_3R_\alpha\|f\|_{L^\infty(\Omega)}
-\epsilon_4\|{\bf U}\|_{L^\infty(\Omega)})\}$.
\par
Since $\Psi(\cdot)$ is a monotone function, we have
\begin{eqnarray}
&&<\mathcal{L}v-\mathcal{L}v,v-w>\nonumber\\
&=&\frac{1}{\tau}\|v-v\|_0^2
+\kappa\|\nabla (v-w)\|_0^2
+\int_{\Gamma_2}(\Psi(v+\theta_0)-\Psi(w+\theta_0))(v-w)ds\nonumber\\
&\geq&\frac{1}{\tau}\|v-v\|_0^2
+\kappa\|\nabla (v-w)\|_0^2
+\frac{\zeta}{8}\|v-w\|^5_{L^2(\Gamma_2)}+\omega\|v-w\|^2_{L^2(\Gamma_2)}\nonumber\\
\label{equation:eq-34}
&\geq & C_7\|v-w\|_1^2
+\frac{\zeta}{8}\|v-w\|^5_{L^5(\Gamma_2)}+\omega\|v-w\|^2_{L^2(\Gamma_2)},
\end{eqnarray}
where $C_7$ is take as the $C_7=\min (\tau^{-1},\kappa)$.
\end{proof}
\begin{lemma}
\label{lemma:Lem-4}
The nonlinear operator $\mathcal{P}:\mathcal{V}\longrightarrow \mathcal{V}'$ and
$\mathcal{L}:\mathcal{Y}\longrightarrow \mathcal{Y}'$ is hemi-continuous, that is
$$\mathcal{S}(s)=<\mathcal{P}({\bf R}+s{\bf Q}), {\bf \Phi}>,\ \
\mathcal{Z}(s)=<\mathcal{L}(v+su), {w}>$$
is continuous on $s\in [0,1]$,respectively, for any ${
\bf Q,R,\Phi}\in \mathcal{V}, u,v,w\in \mathcal{Y}$.
\end{lemma}
\begin{proof}
For convenience, we denote ${\bf Q}(s)={\bf R}+s{\bf Q}$.
For any $s,s_0\in [0,1]$, we have
\begin{eqnarray}
|\mathcal{S}(s)-\mathcal{S}(s_0)|
&=&<\mathcal{P}({\bf Q}(s))-\mathcal{P}({\bf Q}(s_0)), {\bf \Phi}>\nonumber\\
&=&\frac{1}{\tau}({\bf Q}(s)-{\bf Q}(s_0),{\bf \Phi})
+\lambda(\nabla\times({\bf Q}(s)-{\bf Q}(s_0)),\nabla\times{\bf \Phi})\nonumber\\
&+&\Lambda(\nabla\cdot({\bf Q}(s)-{\bf Q}(s_0)),\nabla\cdot{\bf \Phi})
-({\bf U}\times({\bf Q}(s)-{\bf Q}(s_0)),\nabla\times{\bf \Phi})\nonumber\\
&-&(R_\alpha\frac{f({\bf x},t)}{1+\gamma|{\bf Q}(s)|^2}{\bf Q}(s)
- R_\alpha\frac{f({\bf x},t)}{1+\gamma|{\bf Q}(s_0)|^2}{\bf Q}(s_0),\nabla\times{\bf \Phi})\nonumber\\
&\leq & \frac{1}{\tau}\|{\bf Q}\|_0\|{\bf \Phi}\|_0|s-s_0|
+\lambda_0\|\nabla\times{\bf Q}\|_0\|\nabla\times{\bf \Phi}\|_0|s-s_0 |\nonumber\\
&+&\Lambda\|\nabla\cdot{\bf Q}\|_0\|\nabla\cdot{\bf \Phi}\|_0|s-s_0 |
+\|{\bf U}\times{\bf Q}\|_0\|\nabla\times{\bf \Phi}\|_0|s-s_0 |\nonumber\\
&+&R_\alpha\|f({\bf x},t)\|_{L^\infty(\Omega)}\|{\bf Q}\|_0
\|\nabla\times{\bf \Phi}\|_0|s-s_0 |,
\end{eqnarray}
where we use $ \frac{1}{1+\gamma|{\bf Q}(s)|^2}\leq 1,\
\frac{1}{1+\gamma|{\bf Q}(s_0)|^2}\leq 1$. This shows that
$\mathcal{S}(s)$ is continuous on $ [0,1]$ for any ${\bf Q},{\bf R}\in \mathcal{Y}.$
\par
We also denote $u(t)=v+su, \forall u,v\in \mathcal{Y}, t\in [0,1]$.
Then for any $s,s_0\in [0,1]$, we have
\begin{eqnarray}
&&|\mathcal{Z}(s)-\mathcal{Z}(s_0)|
=<\mathcal{L}u(s)-\mathcal{L}u(s_0), {w}>\nonumber\\
&=&\frac{1}{\tau}(u(s)-u(s_0),w)+\kappa(\nabla(u(s)-u(s_0)),\nabla w)+\int_{\Gamma_2}
(\Psi(u(t))-\Psi(u(t_0)))wds\nonumber\\
&\leq&|s-s_0|(\frac{1}{\tau}\|u\|_0\|w\|_0+\kappa\|\nabla u\|_0\|\nabla w\|_0
+\int_{\Gamma_2}4\zeta((|u|+|v|)^4 +\omega|v|)wds,
\end{eqnarray}
which means $\mathcal{Z}(s)$ is continuous on $ [0,1]$ for any $u,v\in \mathcal{Y}$.
\end{proof}
\section{The Regularized Problem}
We have to notice the test function $\Upsilon\in L^\infty(\Omega)$
in (\ref{equation:eq-20}),which increases the difficulties deeply when analyzing
the well-posedness. In order deal with this problem, The Regularized techniques can be
employed: given the small parameter $0<\epsilon<1$, find
${\bf B}\in L^2(0,T;{\mathcal{V}}) $ and $\xi \in L^2(0,T;\mathcal{Y})$
such that
\begin{eqnarray}
(\partial_t{ \bf B},{\Phi})&+&(\lambda(\xi+\theta_0)\nabla\times {\bf B},\nabla\times{\Phi})
+{\Lambda}(\nabla\cdot{\bf B},\nabla\cdot{\Phi})=R_\alpha(\frac{f({\bf x},t){\bf B}}{1+\gamma|{\bf B}|^2}, \nabla\times{\Phi})\nonumber\\
\label{equation:eq-37}
&+&({\bf U\times B},\nabla\times{\Phi}), \ \ \forall {\Phi\in \mathcal{V}},\\
(\partial_t\xi, \Upsilon)&+&(\kappa\nabla\xi, \nabla\Upsilon)
+<(\Psi(\xi+\theta_0)-\Psi(\theta_0)),\Upsilon>_{\Gamma_2}\nonumber\\
\label{equation:eq-38}
&=&
([q(\xi)\mathcal{K}({\bf B})]_\epsilon,\Upsilon)-(\kappa\nabla\theta_0,\nabla\Upsilon),\forall \Upsilon\in
\mathcal{Y},
\end{eqnarray}
where $[\mathcal{D}]_\epsilon$ is the cut-off of $\mathcal{D}$ defined by
\begin{eqnarray*}
[\mathcal{D}]_\epsilon=\frac{\mathcal{D}}{1+\epsilon|\mathcal{D}|}, \ \ \epsilon>0.
\end{eqnarray*}
It is clear that
$[\mathcal{D}]_\epsilon\in L^\infty(\Omega)$. If $\mathcal{D}\in L^p(\Omega)$, the
$$\lim\limits_{\epsilon\longrightarrow 0} \|[\mathcal{D}]_\epsilon
- \mathcal{D} \|_{L^{p/2}(\Omega)}=0.$$
\subsection{Semi-discrete Approximation}
We will use Roth's method \cite{MR10} to explore the well-posedness of
solution of the regularized problem
(\ref{equation:eq-37})-(\ref{equation:eq-38}). Let $ N$ be a positive integer
and let an equidistant partition of $[0,T]$ be given by
$$t_n=n\tau, n=0,1,2,\cdots, N, \ \ \tau=T/N.$$
The semi-discrete approximation to (\ref{equation:eq-37})-(\ref{equation:eq-38})
can be formulated by : for
$\forall {\Phi\in \mathcal{V}}, \Upsilon\in
\mathcal{Y},$ find $ {\bf B}^n\in \mathcal {V}$ and
$\xi^n\in \mathcal{Y}, 1\leq n \leq N$ with initial data ${\bf B}^0={\bf B}_0({\bf x}), \xi^0=0$ such that,
\begin{eqnarray}
(\frac{{\bf B}^n-{\bf B}^{n-1}}{\tau},{\Phi})&+&
(\lambda(\xi^{n-1}+\theta_0)\nabla\times {\bf B}^n,\nabla\times{\Phi})
+{\Lambda}(\nabla\cdot{\bf B}^n,\nabla\cdot{\Phi})
\nonumber\\
\label{equation:eq-39}
&=&R_\alpha(\frac{f({\bf x},n\tau){\bf B}^n}{1+\gamma|{\bf B}^{n-1}|^2}, \nabla\times{\Phi})
+({\bf U\times }{\bf B}^{n},\nabla\times{\Phi}), \\
(\frac{\xi^n-\xi^{n-1}}{\tau}, \Upsilon)&+&(\kappa\nabla\xi^n, \nabla\Upsilon)
+<(\Psi(\xi^n+\theta_0)-\Psi(\theta_0)),\Upsilon>_{\Gamma_2}\nonumber\\
\label{equation:eq-40}
&=&
(q(\xi^{n-1})[\mathcal{K}({\bf B}^{n})]_\epsilon,\Upsilon)-(\kappa\nabla\theta_0,\nabla\Upsilon).
\end{eqnarray}
For convenience, we also denote the difference operator
$$\delta_\tau w=\frac{w^n-w^{n-1}}{\tau},\ in\ \ [t_{n-1},t_n].$$
Obviously, (\ref{equation:eq-39})-(\ref{equation:eq-40}) can be solved
sequentially since (\ref{equation:eq-39}) is independent of (\ref{equation:eq-40})
for a given ${\bf B}^{n-1}$ and (\ref{equation:eq-40}) can be solved after given by
${\bf B}^{n}$ in (\ref{equation:eq-39}) and $\xi^{n-1}$.
\subsection{Well-posedness of the Nonlinear Magnetic Equation}
Let $\tilde{\bf B}_\tau$ and ${\bf B}_\tau$ denote the piecewise constant
and piecewise linear interpolations using the discrete solutions, that is
\begin{eqnarray}
\label{equation:eq-41}
\tilde{\bf B}_\tau(\cdot,t)={\bf B}^n, {\bf B}_\tau(\cdot, t)
=L_n(t){\bf B}^n+(1-L_n(t)){\bf B}^{n-1},
\end{eqnarray}
for any $t\in [t_{n-1},t_n]$ and $1\leq n\leq N$ with $L_n(t)=(t-t_{n-1})/\tau$.
Obviously, we have
$$ \tilde{\bf B}_\tau\in L^2(0,T;\mathcal{V}),
{\bf B}_\tau\in C(0,T;\mathcal{V}).$$
We also denote $$\hat{\bf B}_\tau=\tilde{\bf B}_\tau(:,t-\tau),
\ \ \forall t\in(t_{n-1},t_n].$$
Let $\tilde{\xi}_\tau$ and ${\xi}_\tau$ denote the piecewise constant
and piecewise linear interpolations using the discrete solutions, that is
\begin{eqnarray}
\label{equation:eq-41}
\tilde{\xi}_\tau(\cdot,t)={\xi}^n, {\xi}_\tau(\cdot, t)
=L_n(t){\xi}^n+(1-L_n(t)){\xi}^{n-1},
\end{eqnarray}
for any $t\in [t_{n-1},t_n]$ and $1\leq n\leq N$.
We also denote $$\hat{\xi}_\tau=\tilde{\xi}_\tau(:,t-\tau)=\xi^{n-1},
\ \ \forall t\in(t_{n-1},t_n].$$
\begin{theorem}
\label{theorem:thm-5}
For any $1\leq n\leq N$ and for given ${\bf B}^{n-1}$, the weak formula
(\ref{equation:eq-39}) has a unique solution ${\bf B}^{n}\in \mathcal{V}$.
For a given $ {\bf B}^{n}\in\mathcal{V} $ and $ \xi^{n-1}\in \mathcal{Y}$,
the weak formula (\ref{equation:eq-40}) has a unique solution
$\xi^n\in \mathcal{Y}$.
\end{theorem}
\begin{proof}
We rewrite the weak formula (\ref{equation:eq-39}) as:
find ${\bf B}^{n}\in \mathcal{V}$ such that
\begin{eqnarray}
&&(\frac{{\bf B}^n}{\tau},{\Phi})+
(\lambda(\xi^{n-1}+\theta_0)\nabla\times {\bf B}^n,\nabla\times{\Phi})
+{\Lambda}(\nabla\cdot{\bf B}^n,\nabla\cdot{\Phi})\nonumber\\
&-&
R_\alpha(\frac{f({\bf x},n\tau){\bf B}^n}{1+\gamma|{\bf B}^{n-1}|^2}, \nabla\times{\Phi})
-({\bf U\times }{\bf B}^{n},\nabla\times{\Phi})
\nonumber\\
\label{equation:eq-42}
&=&(\frac{{\bf B}^{n-1}}{\tau},{\Phi}),\ \ \forall \Phi\in \mathcal{V},
\end{eqnarray}
which is equivalent to an nonlinear operator equation
\begin{eqnarray}
\label{equation:eq-43}
\mathcal{P}{\bf B}^n=F_{n-1},
\end{eqnarray}
where $F_{n-1}\in \mathcal{V}'$ defined by
$ (F_{n-1},\Phi)=(\frac{{\bf B}^{n-1}}{\tau},{\Phi}). $
We also rewrite (\ref{equation:eq-40}) as:
find $\xi^{n}\in \mathcal{Y}$ such that
\begin{eqnarray}
&&(\frac{\xi^n}{\tau}, \Upsilon)+(\kappa\nabla\xi^n, \nabla\Upsilon)
+<(\Psi(\xi^n+\theta_0)-\Psi(\theta_0)),\Upsilon>_{\Gamma_2}\nonumber\\
\label{equation:eq-44}
&&=(\frac{\xi^{n-1}}{\tau}, \Upsilon)+
(q(\xi^{n-1})[\mathcal{K}({\bf B}^{n})]_\epsilon,\Upsilon)-(\kappa\nabla\theta_0,\Upsilon),
\forall \Upsilon\in \mathcal{Y},
\end{eqnarray}
which is equivalent to an nonlinear operator equation
\begin{eqnarray}
\label{equation:eq-45}
\mathcal{L}{\xi}^n=H_{n-1},
\end{eqnarray}
where $H_{n-1}\in \mathcal{Y}'$ defined by
$$ (H_{n-1},\Upsilon)=(\frac{\xi^{n-1}}{\tau}, \Upsilon)+
(q(\xi^{n-1})[\mathcal{K}({\bf B}^{n})]_\epsilon,\Upsilon)-(\kappa\nabla\theta_0,\Upsilon),
\forall \Upsilon\in \mathcal{Y}. $$
From Lemma \ref{lemma:Lem-1}-Lemma \ref{lemma:Lem-4}, we know that
$\mathcal{P}$ and $\mathcal{L}$ are a bounded, coercive, strictly
monotone,and semi-continuous
operator on $\mathcal{V} $ and $\mathcal{Y}$, respectively.
From \cite{MR11,MR12}, we know that problem (\ref{equation:eq-42})
has a solution ${\bf B}^{n}\in \mathcal{V}$, and (\ref{equation:eq-44}).
has a solution $\xi^n\in \mathcal{Y}$.
\par
Now we have to prove the uniqueness of the solution.
Let ${\bf B}^{n},\check{\bf B}^{n}$ be the two solutions
of (\ref{equation:eq-42}).
From Lemma \ref{lemma:Lem-3}, we have
$$0=<\mathcal{P}{\bf B}^{n}-\mathcal{P}\check{\bf B}^{n},
{\bf B}^{n}-\check{\bf B}^{n}>
\geq C_6\|{\bf B}^{n}-\check{\bf B}^{n}\|_{\mathcal{V}}^2.$$
We can conclude $ {\bf B}^{n}=\check{\bf B}^{n}$ in $\Omega$,
which means the uniqueness of the solution of (\ref{equation:eq-42}).
Let $\xi^n,\check{\xi}^n$ be the two solutions of (\ref{equation:eq-44}).
From Lemma \ref{lemma:Lem-3}, we also have
$$ 0=<\mathcal{L}\xi^n-\mathcal{L}\check{\xi}^n,\xi^n-\check{\xi}^n>
\geq C_7\|\xi^n-\check{\xi}^n\|_1^2
+\frac{\zeta}{8}\|\xi^n-\check{\xi}^n\|_{L^5(\Gamma_2)}^2,$$
which means the uniqueness of the solution of (\ref{equation:eq-44}).
\end{proof}
\begin{lemma}
\label{lemma:Lem-6}
There exists two positive constants $C_8$ and $C$ dependent
of $ R_\alpha$, $ \|f({\bf x},t)\|_{L^\infty(0,T;L^\infty(\Omega))}$,
$\|{\bf U}\|_{L^\infty(0,T;L^\infty(\Omega))}$ such that
\begin{eqnarray}
\label{equation:eq-47}
&&\|{\bf B}^n\|_0^2
+\sum_{i=1}^n\tau\lambda_0\| \nabla\times {\bf B}^i\|_0^2
+\sum_{i=1}^n\tau\Lambda\| \nabla\cdot {\bf B}^i\|_0^2
\leq C_8\|{\bf B}^0\|_0^2.\\
\label{equation:eq-48}
&& \|{\bf B}_\tau\|_{L^\infty(0,T;L^2(\Omega))}
+\sqrt{\lambda_0}\| \nabla\times {\bf B}_\tau\|_{L^2(0,T;L^2(\Omega))}
+\Lambda\| \nabla\cdot {\bf B}_\tau\|_{L^2(0,T;L^2(\Omega))}
\leq C,\\
\label{equation:eq-49}
&&\|\tilde{\bf B}_\tau\|_{L^\infty(0,T;L^2(\Omega))}
+\sqrt{\lambda_0}\| \nabla\times \tilde{\bf B}_\tau\|_{L^2(0,T;L^2(\Omega))}
+\Lambda\| \nabla\cdot \tilde{\bf B}_\tau\|_{L^2(0,T;L^2(\Omega))}
\leq C.
\end{eqnarray}
\end{lemma}
\begin{proof}
Taking $\Phi={\bf B}^n$ in (\ref{equation:eq-39}), we have
\begin{eqnarray}
&&({\bf B}^n-{\bf B}^{n-1},{\bf B}^n)
+\tau\lambda_0(\nabla\times {\bf B}^n,\nabla\times {\bf B}^n)
+\tau\Lambda(\nabla\cdot {\bf B}^n,\nabla\cdot {\bf B}^n)\nonumber\\
\label{equation:eq-50}
&\leq&\tau R_\alpha(\frac{f({\bf x},t)}{1+\gamma|{\bf B}^{n-1}|^2}{\bf B}^n, \nabla\times {\bf B}^n )
+\tau({\bf U}\times {\bf B}^n, \nabla\times {\bf B}^n).
\end{eqnarray}
Since
$$2({\bf B}^n-{\bf B}^{n-1},{\bf B}^n)
\geq \|{\bf B}^n\|_0^2-\|{\bf B}^{n-1}\|_0^2,$$
summing up (\ref{equation:eq-50}) from $i=1,2,\cdots,n$, we have
\begin{eqnarray}
&&\frac{1}{2}(\|{\bf B}^n\|_0^2-\|{\bf B}^0\|_0^2)
+\sum_{i=1}^n\tau\lambda_0\| \nabla\times {\bf B}^i\|_0^2
+\sum_{i=1}^n\tau\Lambda\| \nabla\cdot {\bf B}^i\|_0^2\nonumber\\
&\leq&\sum_{i=1}^n(\tau R_\alpha\|f({\bf x},t)\|_{L^\infty(\Omega)}\|{\bf B}^i\|_0
\| \nabla\times {\bf B}^i\|_0)\nonumber\\
&+&\sum_{i=1}^n(\tau \|{\bf U}\|_{L^\infty(\Omega)}\|{\bf B}^i\|_0
\| \nabla\times {\bf B}^i\|_0)\nonumber\\
&\leq&\sum_{i=1}^n\tau R_\alpha\|f({\bf x},t)\|_{L^\infty(\Omega)}
(\frac{\|{\bf B}^i\|_0^2}{4\delta_i}+\delta_i
\| \nabla\times {\bf B}^i\|_0^2)\nonumber\\
\label{equation:eq-51}
&+&\sum_{i=1}^n\tau\|{\bf U}\|_{L^\infty(\Omega)}
(\frac{\|{\bf B}^i\|_0^2}{4\bar{\delta}_i}+\bar{\delta}_i
\| \nabla\times {\bf B}^i\|_0^2).
\end{eqnarray}
Taking $ \delta_i, \bar{\delta}_i$ such that
$$ \frac{1}{2\tau}-\frac{R_\alpha\|f({\bf x},t)\|_{L^\infty(\Omega)}}{4\delta_n}
-\frac{\|{\bf U}\|_{L^\infty(\Omega)}}{4\bar{\delta}_n}>0,$$
$$\lambda_0-R_\alpha\|f({\bf x},t)\|_{L^\infty(\Omega)}\delta_i
-\|{\bf U}\|_{L^\infty(\Omega)}\bar{\delta}_i>0,$$
based on Growall's inequality, we have
\begin{eqnarray}
\label{equation:eq-52}
\|{\bf B}^n\|_0^2
+\sum_{i=1}^n\tau\lambda_0\| \nabla\times {\bf B}^i\|_0^2
+\sum_{i=1}^n\tau\Lambda\| \nabla\cdot {\bf B}^i\|_0^2
\leq C_8\|{\bf B}^0\|_0^2,
\end{eqnarray}
where $C_8$ is independent of $ n$, which means
$$\|{\bf B}_\tau\|_{L^\infty(0,T;L^2(\Omega))}
+\sqrt{\lambda_0}\| \nabla\times {\bf B}_\tau\|_{L^2(0,T;L^2(\Omega))}
+\Lambda\| \nabla\cdot {\bf B}_\tau\|_{L^2(0,T;L^2(\Omega))}
\leq C.$$
Similarly, (\ref{equation:eq-49}) comes directly from (\ref{equation:eq-48}).
\end{proof}
\begin{lemma}
\label{lemma:Lem-7}
There exists two positive constants and $C_9$ and $C$ dependent
of $ \epsilon, \kappa, q_{\max},\zeta,\|\xi^0\|_{0},\|\nabla\theta_0\|_0$ such that
\begin{eqnarray}
\label{equation:eq-53}
&&\|{\xi}^n\|_0^2+\sum_{i=1}^n\tau\|\kappa \nabla \xi^i\|_0^2
+\sum_{i=1}^n\tau\omega\|\xi^i\|_{L^2(\Gamma_2)}^2
\leq C_9\|{\xi}^{0}\|_0^2+\|\nabla\theta_0\|_0.\\
\label{equation:eq-54}
&&\|\xi_\tau\|_{L^\infty(0,T;L^2(\Omega))}
+\|\nabla\xi_\tau\|_{L^2(0,T;L^2(\Omega))}
+\frac{\zeta}{8}\|\xi_\tau\|_{L^2(0,T;L^5(\Gamma_2))}\leq C,\\
\label{equation:eq-55}
&&\|\tilde{\xi}_\tau\|_{L^\infty(0,T;L^2(\Omega))}
+\|\nabla\tilde{\xi}_\tau\|_{L^2(0,T;L^2(\Omega))}
+\frac{\zeta}{8}\|\tilde{\xi}_\tau\|_{L^2(0,T;L^5(\Gamma_2))}\leq C.
\end{eqnarray}
\end{lemma}
\begin{proof}
\par
Taking $\Upsilon=\xi^n$ in (\ref{equation:eq-40}), we have
\begin{eqnarray}
({\xi^n-\xi^{n-1}}, \xi^n)&+&{\tau}(\kappa\nabla\xi^n, \nabla\xi^n)
+{\tau}<(\Psi(\xi^n+\theta_0)-\Psi(\theta_0)),\xi^n>_{\Gamma_2}\nonumber\\
\label{equation:eq-56}
&=&
{\tau}(q(\xi^{n-1})[\mathcal{K}({\bf B}^{n})]_\epsilon,\xi^n)-\tau(\kappa\nabla\theta_0,\nabla\Upsilon).
\end{eqnarray}
Firstly, we estimate the right hand of (\ref{equation:eq-56}). We should
notice
\begin{eqnarray}
\label{equation:eq-57}
|[\mathcal{K}({\bf B}^{n})]_\epsilon|=\frac{|\mathcal{K}({\bf B}^{n})|}{1+\epsilon|\mathcal{K}({\bf B}^{n})|}
\leq \frac{1}{\epsilon},
\end{eqnarray}
which leads to
\begin{eqnarray}
\label{equation:eq-58}
([q(\xi^{n-1})\mathcal{K}({\bf B}^{n})]_\epsilon,\xi^n)
\leq \frac{1}{\epsilon}\|\xi^n\|_0.
\end{eqnarray}
And we also have
$$ \tau|(\kappa\nabla\theta_0,\nabla\xi^n)|\leq \kappa\tau\|\nabla\theta_0\|_0\|\nabla\xi^n\|_0. $$
Since
$$2({\xi}^n-{\xi}^{n-1},{\xi}^n)
\geq \|{\xi}^n\|_0^2-\|{\xi}^{n-1}\|_0^2,$$
and
$$(\Psi(\xi^n+\theta_0)-\Psi(\theta_0),\xi^n)
\geq \frac{\zeta}{8}\|\xi^n\|^5_{L^5(\Gamma_2)}
+\omega \|\xi^n\|^2_{L^2(\Gamma_2)}\geq \frac{\zeta}{8} \|\xi^n\|^5_{L^5(\Gamma_2)},$$
summing up (\ref{equation:eq-56}) from $i=1,2,\cdots,n$, we have
\begin{eqnarray}
&&\frac{1}{2}(\|{\xi}^n\|_0^2-\|{\xi}^{0}\|_0^2)
+\sum_{i=1}^n\tau\|\kappa \nabla \xi^i\|_0^2
+\sum_{i=1}^n \frac{\zeta\tau}{8}\|\xi^i\|_{L^5(\Gamma_2)}^5\nonumber\\
\label{equation:eq-59}
&\leq & \sum_{i=1}^n(\frac{\tau }{\epsilon}\|\xi^i\|_0+\kappa\tau\|\nabla\theta_0\|_0\|\nabla\xi^i\|_0)\nonumber\\
&\leq &\sum_{i=1}^n (\frac{\tau^2}{4\epsilon^2\hat{\delta}_i}
+\hat{\delta}_i\|\xi^i\|_0^2+\frac{\kappa\tau}{4\varsigma}\|\nabla\theta_0\|_0^2
+\kappa\tau\varsigma\|\nabla\xi^i\|_0^2)
\end{eqnarray}
Taking $\hat{\delta}_n$ and $ \varsigma$ such that $\hat{\delta}_n+\kappa\tau\varsigma\leq \frac{1}{2}$,
and using Growall's inequality, we have
\begin{eqnarray}
\label{equation:eq-60}
\|{\xi}^n\|_0^2+\sum_{i=1}^n\tau\|\kappa \nabla \xi^i\|_0^2
+\sum_{i=1}^n\frac{\zeta\tau}{8}\|\xi^i\|_{L^5(\Gamma_2)}^5
\leq C_9(\|{\xi}^{0}\|_0^2+\|\nabla\theta_0\|_0^2),
\end{eqnarray}
where $C_9$ is independent of $n$, which means
$$ \|\xi_\tau\|_{L^\infty(0,T;L^2(\Omega))}
+\|\nabla\xi_\tau\|_{L^2(0,T;L^2(\Omega))}
+\frac{\zeta}{8}\|\xi_\tau\|_{L^2(0,T;L^5(\Gamma_2))}\leq C.$$
Similarly, (\ref{equation:eq-55}) comes directly from (\ref{equation:eq-54}).
\end{proof}
\subsection{The Existence of the Solution of the Regularized Problem}
From Lemma \ref{lemma:Lem-6}-Lemma \ref{lemma:Lem-7}, we can see that
the two discrete solutions of the Regularized Problem imply the boundedness in
$\mathcal{V}$ and $\mathcal{Y}$, respectively. Since
both $\mathcal{V}$ and $\mathcal{Y}$ are reflexive, there exist a subsequence of
$\tilde{\bf B}_\tau$ and a subsequence of ${\bf B}_\tau$, which have
common subscripts and are denoted the same notations such that
$$\tilde{\bf B}_\tau\rightharpoonup {\bf B},\ \
{\bf B}_\tau\rightharpoonup {\bf B},\ in \ \ L^2(0,T;\mathcal{V}),$$
where $ {\bf B}\in L^2(0,T;\mathcal{V})$ and "$\rightharpoonup $" denote the weak
convergence of the sequences.
Similarly,there exist a subsequence of
$\tilde{\xi}_\tau$ and a subsequence of ${\xi}_\tau$ with the same
subscripts such that
$$\tilde{\xi}_\tau \rightharpoonup \xi,\ \
{\xi}_\tau\rightharpoonup\xi , \ in \ \ L^2(0,T;\mathcal{Y}).$$
Furthermore, based on $L^2(Q_T)$ is embedded compactly into $L^1(Q_T)$, we
have
\begin{eqnarray}
\label{equation:eq-61}
&&\tilde{\bf B}_\tau\longrightarrow {\bf B},
\nabla\times \tilde{\bf B}_\tau\longrightarrow \nabla\times{\bf B},
\ \ \tilde{\xi}_\tau \longrightarrow \xi,\ \ in\ \ L^1(Q_T),\\
\label{equation:eq-62}
&&{\bf B}_\tau\longrightarrow {\bf B},
\nabla\times {\bf B}_\tau\longrightarrow \nabla\times{\bf B},
\ \ \ {\xi}_\tau \longrightarrow \xi,\ \ in\ \ L^1(Q_T),
\end{eqnarray}
where "$ \longrightarrow $" means strong convergence of the sequences.
In this subsection, we shall present the proof that the limits $ {\bf B}, \xi$
solve the regularized problem (\ref{equation:eq-37})- (\ref{equation:eq-38}).
Without causing the confusions, we always use $\{ \tilde{\bf B}_\tau\},
\{ {\bf B}_\tau\},\{ \tilde{\xi}_\tau\},\{ \xi_\tau\}$ to denote their convergence subsequences
in the rest of this section.
\begin{theorem}
The limit function ${\bf B}$ is the weak solution of problem
(\ref{equation:eq-37}) with initial dada
${\bf B}({\bf x},0)={\bf B}_0({\bf x})$.
\end{theorem}
\begin{proof}
\par
Remember that $C_0^\infty(\Omega)\subset \mathcal{V}$. For any
${\bf v}({\bf x},t)={ \Phi}({\bf x}) \phi(t)$ with ${ \Phi}({\bf x})\in C_0^\infty(\Omega)\ $
and $\phi(t)\in C_0^\infty(0,T)$, from (\ref{equation:eq-39})
after calculating, we have
\begin{eqnarray}
\label{equation:eq-63}
\int_{Q_T}\frac{ \partial {\bf B}_\tau}{\partial t} {\bf v}
&+&\int_{Q_T}\lambda(\hat{\xi}_\tau+\theta_0) \nabla\times \tilde{\bf B}_\tau \nabla\times {\bf v}
+\int_{Q_T}\Lambda \nabla\cdot \tilde{\bf B}_\tau \cdot\nabla\cdot{\bf v}\nonumber\\
&=&R_\alpha \int_{Q_T}\frac{f({\bf x},t)}{1+\gamma|\hat{\bf B}_\tau|^2}
\tilde{\bf B}_\tau \cdot\nabla\times {\bf v}
+\int_{Q_T}({\bf U}\times \tilde{\bf B}_\tau)\cdot \nabla\times {\bf v}.
\end{eqnarray}
For the first term, we have
\begin{eqnarray}
\label{equation:eq-64}
\lim\limits_{\tau\rightarrow 0}\int_{Q_T}\frac{ \partial {\bf B}_\tau}{\partial t} {\bf v}
=-\lim\limits_{\tau\rightarrow 0}\int_{Q_T}{\bf B}_\tau
\frac{ \partial {\bf v}}{\partial t}
=-\int_{Q_T}{\bf B}\cdot
\frac{ \partial {\bf v}}{\partial t}=\int_{Q_T}
\frac{ \partial {\bf B}}{\partial t}\cdot {\bf v}.
\end{eqnarray}
For the second and the third term, since
$\nabla\times \tilde{\bf B}_\tau $ and $\nabla\cdot \tilde{\bf B}_\tau$
converge to $\nabla\times {\bf B}$ and $\nabla\cdot {\bf B}$, respectively,
we have
\begin{eqnarray*}
\lim\limits_{\tau\longrightarrow 0}
\int_{Q_T}\lambda(\hat{\xi}_\tau+\theta_0) \nabla\times \tilde{\bf B}_\tau \cdot\nabla\times {\bf v}
& = &\int_{Q_T}\lambda({\xi}+\theta_0) \nabla\times{\bf B}\cdot \nabla\times {\bf v},\\
\lim\limits_{\tau\longrightarrow 0}
\int_{Q_T}\Lambda \nabla\cdot\tilde{\bf B}_\tau \cdot\nabla\cdot {\bf v}
&=& \int_{Q_T}\Lambda \nabla\cdot{\bf B}\cdot \nabla\cdot {\bf v}.\ \
\end{eqnarray*}
Since both $f({\bf x},t)$and $ {
\bf U}$ are Lipschitz continuous and
$\frac{1}{1+\gamma|\hat{\bf B}_\tau|^2}\longrightarrow
\frac{1}{1+\gamma|{\bf B}|^2}$ and
${\bf U}\times \tilde{\bf B}_\tau \longrightarrow {\bf U}\times {\bf B} $
strongly in $L^1(Q_T)$, we have
\begin{eqnarray}
\label{equation:eq-65}
&&\lim\limits_{\tau\longrightarrow 0}\int_{Q_T}\frac{1}{1+\gamma|\hat{\bf B}_\tau|^2}
\tilde{\bf B}_\tau\cdot \nabla\times {\bf v}=
\int_{Q_T}\frac{1}{1+\gamma|{\bf B}|^2}
{\bf B}\cdot \nabla\times {\bf v},\\
\label{equation:eq-66}
&&\lim\limits_{\tau\longrightarrow 0}\int_{Q_T}
{\bf U}\times \tilde{\bf B}_\tau \cdot \nabla\times{\bf v}
\longrightarrow \int_{Q_T}{\bf U}\times {\bf B} \cdot \nabla\times{\bf v}.
\end{eqnarray}
From (\ref{equation:eq-64})-(\ref{equation:eq-66}), we have
\begin{eqnarray}
\label{equation:eq-67}
\int_{Q_T}\frac{ \partial {\bf B}}{\partial t} {\bf v}
&+&\int_{Q_T}\lambda ({\xi}+\theta_0)\nabla\times {\bf B} \nabla\times {\bf v}
+\int_{Q_T}\Lambda \nabla\cdot {\bf B} \cdot\nabla\times {\bf v}\nonumber\\
&=&R_\alpha \int_{Q_T}\frac{f({\bf x},t)}{1+\gamma|{\bf B}|^2}
{\bf B} \cdot\nabla\times {\bf v}
+\int_{Q_T}({\bf U}\times {\bf B})\cdot \nabla\times {\bf v}.
\end{eqnarray}
Since $\phi(t)\in C_0^\infty(0,T)$, for any ${ \Phi}\in C_0^\infty(\Omega)$,
we have
\begin{eqnarray}
\label{equation:eq-68}
\int_{\Omega}\frac{ \partial {\bf B}}{\partial t} { \Phi}
&+&\int_{\Omega}\lambda({\xi}+\theta_0) \nabla\times {\bf B} \nabla\times { \Phi}
+\int_{\Omega}\Lambda \nabla\cdot {\bf B} \cdot\nabla\times { \Phi}\nonumber\\
&=&\int_{\Omega}R_\alpha \int_{Q_T}\frac{f({\bf x},t)}{1+\gamma|{\bf B}|^2}
{\bf B} \cdot\nabla\times { \Phi}
+\int_{\Omega}({\bf U}\times {\bf B})\cdot \nabla\times { \Phi}.
\end{eqnarray}
By the density of $C_0^\infty $ in $\mathcal{V}$, the equation
(\ref{equation:eq-68}) holds
for any ${ \Phi}\in \mathcal{V}$, too.
\end{proof}
\begin{theorem}
The limit function $\xi$ is the weak solution of problem (\ref{equation:eq-38})
with the initial condition $\xi({\bf x},0)=0$.
\end{theorem}
\begin{proof}
For any $\eta({\bf x},t)=\Upsilon({\bf x})\phi(t)$ with
$\Upsilon({\bf x})\in C^\infty_0(\Omega)$ and $\phi(t)\in C_0^\infty(0,T), $
from (\ref{equation:eq-40}) we know that
\begin{eqnarray}
\int_{Q_T}\frac{\partial \xi_\tau}{\partial t} \eta
&+&\int_{Q_T}\kappa\nabla \tilde{\xi}_\tau\cdot \nabla\eta
+\int_0^T\int_{\Gamma_2}(\Psi(\tilde{\xi}_\tau+\theta_0)-\Psi(\theta_0))\eta\nonumber\\
\label{equation:eq-69}
&=&
\int_{Q_T}q(\hat{\xi}_\tau)[\mathcal{K}(\tilde{\bf B}_\tau)]_\epsilon\eta-\int_{Q_T}\kappa\nabla\theta_0\cdot \nabla\eta.
\end{eqnarray}
Similar to (\ref{equation:eq-64}), and the weak convergence
in $L^2(0,T;\mathcal{Y})$,
we have
\begin{eqnarray}
\label{equation:eq-70}
\lim\limits_{\tau\longrightarrow 0}\int_{Q_T}\frac{\partial \xi_\tau}{\partial t}
\eta=\int_{Q_T}\frac{\partial \xi}{\partial t}\eta, \ \
\lim\limits_{\tau\longrightarrow 0}\int_{Q_T}
\kappa\nabla \tilde{\xi}_\tau\cdot \nabla\eta \longrightarrow
\int_{Q_T}
\kappa\nabla {\xi}\cdot \nabla\eta.
\end{eqnarray}
Since $ L^5(\Gamma_2)$ is embedded compactly into $ L^4(\Gamma_2)$ and
$\tilde{\xi}_\tau$ converges strongly to $\xi$ in $ L^4(\Gamma_2)$, we have
\begin{eqnarray}
\label{equation:eq-71}
\lim\limits_{\tau\longrightarrow 0}\int_0^T\int_{\Gamma_2}
(\Psi(\tilde{\xi}_\tau+\theta_0)-\Psi(\theta_0))\eta
\longrightarrow
\int_0^T\int_{\Gamma_2}
(\Psi({\xi}+\theta_0)-\Psi(\theta_0))\eta.
\end{eqnarray}
The right hand of the equation (\ref{equation:eq-69}) can be considered
from the the uniform boundedness and the strong convergence of $\hat{\xi}_\tau$ in
$L^2(0,T,\mathcal{Y})$ and the strong convergence of
$ \nabla\times \tilde{\bf B}_\tau \rightarrow \nabla\times {\bf B}$,
$ {\bf U}\times \tilde{\bf B}_\tau \rightarrow {\bf U}\times {\bf B}$,
$ \frac{f({\bf x},t)\tilde{\bf B}_\tau}{1+\gamma|\hat{\bf B}_\tau|^2}
\rightarrow \frac{f({\bf x},t){\bf B}}{1+\gamma|{\bf B}|^2}$
in $L^1(Q_T)$, we have
\begin{eqnarray}
\label{equation:eq-72}
\lim\limits_{\tau\rightarrow 0}
\int_{Q_T}[q(\hat{\xi}_\tau)\mathcal{K}(\tilde{\bf B}_\tau)]_\epsilon\eta
\rightarrow
\int_{Q_T}[q({\xi})\mathcal{K}({\bf B})]_\epsilon\eta.
\end{eqnarray}
From (\ref{equation:eq-69})-(\ref{equation:eq-72}), we can get
\begin{eqnarray}
\label{equation:eq-73}
\int_{Q_T}\frac{\partial \xi}{\partial t}\eta&+&
\int_{Q_T}\kappa\nabla {\xi}\cdot \nabla\eta+\int_0^T\int_{\Gamma_2}
(\Psi({\xi}+\theta_0)-\Psi(\theta_0))\eta\nonumber\\
&=&
\int_{Q_T}[q({\xi})\mathcal{K}({\bf B})]_\epsilon\eta-\int_{Q_T}\kappa\nabla\theta_0\cdot \nabla\eta.
\end{eqnarray}
By the arbitrariness of $\phi(t)$, it yields
\begin{eqnarray}
\label{equation:eq-74}
\int_{\Omega}\frac{\partial \xi}{\partial t}\Upsilon&+&
\int_{\Omega}\kappa\nabla \tilde{\xi}\cdot \nabla\Upsilon+\int_{\Gamma_2}
(\Psi({\xi}+\theta_0)-\Psi(\theta_0))\Upsilon\nonumber\\
&=&
\int_{\Omega}q({\xi})\mathcal{K}({\bf B})\Upsilon-\int_{\Omega}\kappa\nabla\theta_0\cdot \nabla\Upsilon,\ \ \forall \Upsilon \in
\mathcal{Y}\cap C^\infty(\Omega).
\end{eqnarray}
By the density of $\mathcal{Y}\cap C^\infty(\Omega)$ in $\mathcal{Y}$,
the above equation (\ref{equation:eq-74}) holds for any
$\Upsilon \in \mathcal{Y}.$
\par
Taking any $\Upsilon\in C_0^\infty$ and let $\eta(t)=(T-t)\Upsilon$, we
have $\eta(0)=T\Upsilon, \eta(T)=0.$ Using integration by part, we have
\begin{eqnarray*}
&&T\int_\Omega \xi(0)\cdot \Upsilon({\bf x})
=-\int_0^T\int_\Omega\frac{\partial}{\partial t}(\xi \cdot \eta)
=\int_0^T\int_\Omega \xi\cdot \Upsilon-\int_0^T\int_\Omega
\frac{\partial \xi}{\partial t}\cdot \eta\\
&=&\lim\limits_{\tau\rightarrow 0}\int_0^T\int_\Omega {\xi}_\tau\cdot \Upsilon
+\int_0^T\int_{\Gamma_2}(\Psi(\xi+\theta_0)-\Psi(\theta_0))\eta
+\int_0^T\int_\Omega
[\kappa \nabla\xi \cdot \nabla \eta- q(\xi)\mathcal{K}({
\bf B})\cdot\eta]\\
&&=\lim\limits_{\tau\rightarrow 0}\int_0^T\int_\Omega [{\xi}_\tau\cdot \Upsilon
+[\kappa \nabla\tilde{\xi}_\tau \cdot \nabla \eta- q(\hat{\xi}_\tau)
\mathcal{K}(\tilde{
\bf B}_\tau)\cdot\eta]
+\lim\limits_{\tau\rightarrow 0}\int_0^T\int_{\Gamma_2}
(\Psi(\tilde{\xi}_\tau+\theta_0)-\Psi(\theta_0))\cdot\eta\\
&&=\lim\limits_{\tau\rightarrow 0}[\int_0^T\int_\Omega
\xi_\tau\cdot \Upsilon-\int_0^T\int_\Omega
\frac{\partial \xi_\tau}{\partial t}\cdot \eta]=\lim\limits_{\tau\rightarrow 0}
T\int_\Omega\xi_\tau(0)\cdot\Upsilon({\bf x})=0, \forall \Upsilon({\bf x})
\in C_0^\infty(\Omega).
\end{eqnarray*}
Therefore, $\xi({\bf x},0)=0,$ which finish the proof.
\end{proof}
\subsection{Stability of the Regularized Problem}
Now we present the stability estimate of the regularized problem
to ensure the well-posedness of the equations
(\ref{equation:eq-19})- (\ref{equation:eq-20}).
\begin{lemma}
\label{lemma:Lem-71}
There exists a constant $C_{10}$ depending of $\Omega, T,
\lambda_0, \Lambda, R_\alpha,
\|f({\bf x},t)\|_{L^\infty(0,T,L^\infty(\Omega))}$ and
$ \|{\bf U}\|_{L^\infty(0,T,L^\infty(\Omega))} $ such that
\begin{eqnarray*}
\|{\bf B}\|_{L^\infty(0,T;L^2(\Omega))}+\sqrt{\lambda_0}\|\nabla\times{\bf B}\|_{L^2(0,T;L^2(\Omega))}
+\Lambda\|\nabla\cdot{\bf B}\|_{L^2(0,T;L^2(\Omega))}\leq C_{10}\|{\bf B}_0\|_0.
\end{eqnarray*}
\end{lemma}
\begin{proof}
Taking $ \Phi={\bf B}$ in (\ref{equation:eq-37}) and using $\lambda(\xi+\theta_0)\geq \lambda_0>0$, we have
\begin{eqnarray}
\frac{1}{2}\frac{\partial}{\partial t}\|{\bf B}\|_0^2&+&
\lambda_0\|\nabla\times{\bf B}\|_0^2
+{\Lambda}\|\nabla\cdot{\bf B}\|_0^2\nonumber\\
&\leq&
R_\alpha(\frac{f({\bf x},t){\bf B}}{1+\gamma|{\bf B}|^2}, \nabla\times{\bf B})
\label{equation:eq-75}
+({\bf U\times B},\nabla\times{\bf B}).
\end{eqnarray}
Integrating the above with respect to $0\leq t\leq s$ and using Cauchy inequality
and young inequality, we have
\begin{eqnarray}
\|{\bf B}\|^2_0&+&\int_0^s(\lambda_0\|\nabla\times{\bf B}\|_0^2+{\Lambda}\|\nabla\cdot{\bf B}\|_0^2)\nonumber\\
&\leq& \frac{R_\alpha\|f({\bf x},t\|_{L^\infty(0,T,L^\infty(\Omega))}}{4a_1}
\int_0^s\|{\bf B}\|_0^2+\int_0^sa_1\|\nabla\times{\bf B}\|_0^2\nonumber\\
\label{equation:eq-76}
&+&\frac{\|{\bf U}\|_{L^\infty(0,T,L^\infty(\Omega))}}{4a_2}\int_0^s\|{\bf B}\|_0^2
+\int_0^sa_2\|\nabla\times{\bf B}\|_0^2+\|{\bf B}_0\|^2_0
\end{eqnarray}
Taking $a_1,a_2$ such that $\lambda_0-a_1-a_2>0$, by employing
the Growall's inequality, we have
\begin{eqnarray}
\label{equation:eq-77}
\|{\bf B}\|_{L^\infty(0,T;L^2(\Omega))}+\sqrt{\lambda_0}\|\nabla\times{\bf B}\|_{L^2(0,T;L^2(\Omega))}
+\sqrt{\Lambda}\|\nabla\cdot{\bf B}\|_{L^2(0,T;L^2(\Omega))}\leq C_{10}\|{\bf B}_0\|_0.
\end{eqnarray}
The proof is completed.
\end{proof}
\begin{lemma}
\label{lemma:lem-8}
There exits a constant $C_{11}$ depending on $\Omega, T,\kappa,
\lambda, \Lambda, R_\alpha,
\|f({\bf x},t)\|_{L^\infty(0,T,L^\infty(\Omega))}$ and
$ \|{\bf U}\|_{L^\infty(0,T,L^\infty(\Omega))} $ such that
\begin{eqnarray}
\label{equation:eq-78}
\|\xi\|_{L^\infty(0,T;L^1(\Omega))}+\|\xi\|_{L^\infty(0,T;L^4(\Gamma_2))}^4
\leq C_{11}\|{\bf B}_0\|_0.
\end{eqnarray}
\end{lemma}
\begin{proof}
First we need define a function
$$h_\rho(s)=\frac{1}{\rho}sign(s)\min (|s|, \rho).$$
Obviously, $ h_\rho(s)$ is a bounded, absolutely continuous and increasing
function
in $ \mathbf{R}$. for any $\eta \in \mathcal{Y},$ it can be calculate to get
$ h_\rho(\eta)\in \mathcal{Y}$ and
\begin{eqnarray}
\label{equation:eq-79} \nabla h_\rho(\eta)=\chi_{\{ {\bf x}\in \Omega: |\eta|<\rho \} }
\frac{1}{\rho}\nabla \eta,
\ \ \lim\limits_{\rho\rightarrow 0}h_\rho(\eta)\rightarrow sign(\eta),
\ a.e.\ in\ \Omega,
\end{eqnarray}
where $\chi_{\{ {\bf x}\in \Omega: |\eta|<\rho \} } $
is the characteristic function.
\par
Taking $\Upsilon=h_\rho(\Upsilon)$ in (\ref{equation:eq-38}), we have
\begin{eqnarray}
(\partial_t\xi, h_\rho(\Upsilon))&+&(\kappa\nabla\xi, \nabla h_\rho(\Upsilon))
+<(\Psi(\xi+\theta_0)-\Psi(\theta_0)),h_\rho(\Upsilon)>_{\Gamma_2}\nonumber\\
\label{equation:eq-80}
&=&
([q(\xi)\mathcal{K}({\bf B})]_\epsilon,h_\rho(\Upsilon))-(\kappa\nabla\theta_0,\nabla h_\rho(\Upsilon)).
\end{eqnarray}
We need analyze (\ref{equation:eq-80}) term by term. From the convergence
of (\ref{equation:eq-79}), we have
\begin{eqnarray}
\label{equation:eq-81}
&&\lim\limits_{\rho\rightarrow 0}\int_\Omega\frac{\partial\xi}{\partial t}
h_\rho(\xi)
=\int_\Omega\frac{\partial\xi}{\partial t} sign(\xi)
=\frac{\partial}{\partial t}\int_\Omega|\xi|,\\
\label{equation:eq-82}
&&\int_\Omega\kappa \nabla\xi\cdot \nabla h_\rho(\xi) =\frac{1}{\rho}
\int_\Omega \chi_{\{ {\bf x}\in \Omega: |\xi|<\rho \} }
\kappa|\nabla\xi|^2 \geq 0,\\
\label{equation:eq-83}
&&\lim\limits_{\rho\rightarrow 0}\int_{\Gamma_2}
(\Psi(\xi+\theta_0)-\Psi(\theta_0))h_\rho(\xi)
\geq \frac{\zeta}{8}\|\xi\|_{L^4(\Gamma_2)}^4
+\omega\|\xi\|_{L^1(\Gamma_2)},
\end{eqnarray}
and
\begin{eqnarray}
|\int_\Omega[&q&(\xi)\mathcal{K}({\bf B})]_\epsilon h_\rho(\xi)|
\leq |\int_\Omega q(\xi)\mathcal{K}({\bf B})|
\leq C(\|\nabla\times{\bf B}\|_0^2\nonumber\\
&+&\frac{R_\alpha\|f({\bf x},t)\|_{L^\infty(0,T;L^\infty(\Omega))}}{4a_3}\|{\bf B}\|_0^2
+a_3\|\|\nabla\times{\bf B}\|_0^2\nonumber\\
\label{equation:eq-84}
&+&\frac{\|{\bf U}\|_{L^\infty(0,T;L^\infty(\Omega))}}{4a_4}\|{\bf B}\|_0^2
+a_4\|\|\nabla\times{\bf B}\|_0^2.
\end{eqnarray}
From (\ref{equation:eq-81})- (\ref{equation:eq-84}) and Lemma \ref{lemma:Lem-71}, we have
\begin{eqnarray}
\label{equation:eq-85}
\frac{\partial}{\partial t}\int_\Omega|\xi|+
\frac{\zeta}{8}\|\xi\|_{L^4(\Gamma_2)}^4
\leq C(\|{\bf B}_0\|_0+\|\nabla\theta_0\|_0).
\end{eqnarray}
By using The Growall's inequality and integrating the inequality with
respect to $0\leq t\leq s$ for any $s\in [0,T]$, we can finish the proof.
\end{proof}
\begin{lemma}
\label{lemma:Lem-10}
There exists a constant $C_{12}$ depending only on $ \Omega,T,\lambda,\Lambda,R_\alpha,
\|f({\bf x},t)\|_{L^\infty(0,T,L^\infty(\Omega))}$ and
$ \|{\bf U}\|_{L^\infty(0,T,L^\infty(\Omega))} $ such that
\begin{eqnarray}
\label{equation:eq-911}
\int_{Q_T}\frac{\kappa|\nabla \xi|^2}{(1+|\xi|^{\frac{3}{2}})}\leq C_{12}(\|{\bf B}_0\|_0+\kappa\|\nabla \theta_0\|_0^2)
\end{eqnarray}
\end{lemma}
\begin{proof}
Defining $ h(s)=sign(s)[1-(1+|s|)^{-\frac{1}{2}}]$, we have $|h(s)|\leq 1.$
For any $\eta\in \mathcal{Y}$, there holds $ h(\eta)\in \mathcal{Y}$ and
$\nabla h(\eta)=\frac{\nabla\eta}{2(1+|\eta|)^{\frac{3}{2}}}$.
Let $H(s)$ be the primitive function of $h(s)$ defined by
$$ H(s)=\int_0^s h(s')ds'=2+|s|-\frac{2}{1+|s|}\geq 0,$$
which implies
\begin{eqnarray}
\label{equation:eq-87}
\int_0^T\int_\Omega \frac{\partial \xi}{\partial t}h(\xi)=\int_0^T\frac{\partial}{\partial t}\int_\Omega H(\xi)=\int_\Omega H(\xi(T))\geq 0.
\end{eqnarray}
Taking $\Upsilon=h(\Upsilon)$ in (\ref{equation:eq-38}), we have
\begin{eqnarray}
(\partial_t\xi, h(\Upsilon))&+&(\kappa\nabla\xi, \nabla h(\Upsilon))
+<(\Psi(\xi+\theta_0)-\Psi(\theta_0)),h(\Upsilon)>_{\Gamma_2}\nonumber\\
\label{equation:eq-88}
&=&
([q(\xi)\mathcal{K}({\bf B})]_\epsilon,h(\Upsilon))-(\kappa\nabla\theta_0,\nabla h(\Upsilon)),
\end{eqnarray}
From (\ref{equation:eq-87}),we have
\begin{eqnarray}
&&\int_{0}^T\int_\Omega\kappa \nabla\xi\cdot \nabla h(\Upsilon)
+\int_0^T\int_{\Gamma_2}(\Psi(\xi+\theta_0)-\Psi(\theta_0))h(\Upsilon)\nonumber\\
\label{equation:eq-89}
&\leq &\int_{0}^T\int_\Omega[q(\xi)\mathcal{K}({\bf B})]_\epsilon h(\Upsilon)
-\int_{0}^T\int_\Omega\kappa\nabla\theta_0\nabla h(\Upsilon).
\end{eqnarray}
Now we estimate (\ref{equation:eq-88}) term by term.
\begin{eqnarray}
\label{equation:eq-90}
&& \int_{0}^T\int_\Omega\kappa \nabla\xi\cdot \nabla h(\Upsilon)=\frac{1}{2}
\int_{0}^T\int_\Omega \frac{\kappa|\nabla \xi|^2}{(1+|\xi|^{\frac{3}{2}})},\\
\label{equation:eq-91}
&&(\Psi(\xi+\theta_0)-\Psi(\theta_0))h(\Upsilon)\geq [1-(1+|\xi|)^{-\frac{1}{2}}]\frac{\zeta}{8}(|\xi|^4+\omega|\xi|)\geq 0.
\end{eqnarray}
From (\ref{equation:eq-84}) and Lemma \ref{lemma:Lem-71},
we have
\begin{eqnarray}
\label{equation:eq-92}
&&\int_0^T\int_\Omega[q(\xi)\mathcal{K}({\bf B})]_\epsilon h(\Upsilon)\leq
\int_0^T\int_\Omega q(\xi)\mathcal{K}({\bf B})
\leq C_{13}\|{\bf B}_0\|_0,\\
\label{equation:eq-93}
&& \int_0^T\int_\Omega \kappa\nabla\theta_0\nabla h(\Upsilon)
\leq \|\kappa\theta_0\|_{L^2(\Omega)}[ \int_{Q_T}\frac{|\nabla\xi|^2}{(1+|\xi|)^3}]^{\frac{1}{2}}.
\end{eqnarray}
By Young's inequality in (\ref{equation:eq-93}), we have
\begin{eqnarray}
\frac{1}{2}
\int_{0}^T\int_\Omega \frac{\kappa|\nabla \xi|^2}{(1+|\xi|^{\frac{3}{2}})}
\leq C_{13}\|{\bf B}_0\|_0+C_{14}\|\kappa\nabla\theta_0\|_{L^2(\Omega)},
\end{eqnarray}
which implies the estimate of this lemma.
\end{proof}
\begin{lemma}
\label{lemma:Lem-11}
Assume that $1\leq q\leq \frac{5}{4}$, there exists a constant $C>0$ such that
\begin{eqnarray}
\label{equation:eq-95}
\|\xi\|_{L^{\frac{4q}{3}(Q_T)}}+\|\nabla\xi\|_{L^q(Q_T)}\leq C.
\end{eqnarray}
\end{lemma}
\begin{proof}
Taking $p=\frac{4q}{3}, q_1=\frac{3q}{3-q}$, by the Cauchy-Schwarz inequality, we have
\begin{eqnarray}
\label{equation:eq-96}
\int_\Omega|\xi(t)|^p&=&\int_\Omega|\xi(t)|^{\frac{q}{3}}|\xi(t)|^q
\leq [\int_\Omega|\xi(t)|^{\frac{q}{3}\cdot \frac{3}{q}}]^{\frac{q}{3}}
[\int_\Omega|\xi(t)|^{q\cdot \frac{3}{3-q}}]^{1-\frac{q}{3}}\nonumber\\
&=&\|\xi(t)\|_{L^1(\Omega)}^{\frac{q}{3}}[\int_\Omega|\xi(t)|^{q_1}]^{1-\frac{q}{3}}
\end{eqnarray}
By the embedding of $W^{1,q}\hookrightarrow L^{q_1}$ and using Poincare's inequality, we have
\begin{eqnarray}
\label{equation:eq-97}
\int_{Q_T}|\xi|_p\leq C\|\xi\|_{L^\infty(0,T;L^1(\Omega))}^{\frac{q}{3}}\|\nabla\xi\|_{L^q(Q_T)}^q
\leq C\|\nabla\xi\|_{L^q(Q_T)}^q.
\end{eqnarray}
Taking $ r=\frac{5-4q}{3}$, then we have $ p=\frac{(1+r)q}{2-q}$. By lemma 10 and Cauchy-Schwarz inequality, we have
\begin{eqnarray}
&&\int_{Q_T}|\nabla\xi|^q=\int_{Q_T}
\frac{|\nabla\xi|^q}{(1+|\xi|)^{\frac{q(1+r)}{2}}}{(1+|\xi|)^{\frac{q(1+r)}{2}}}\nonumber\\
&&\leq [\int_{Q_T}\frac{|\nabla\xi|^q}{(1+|\xi|)^{\frac{q(1+r)}{2}}} ]^{\frac{q}{2}}[\int_{Q_T} {(1+|\xi|)^{\frac{q(1+r)}{2-q}}}]^{1-\frac{q}{2}}\nonumber\\
\label{equation:eq-98}
&&\leq [1+\|\xi \|^p_{L^p(Q_T)}]\leq C(1+\|\nabla\xi\|_{L^q(Q_T)}^{q(1-\frac{q}{2})}),
\end{eqnarray}
which implies $ \|\nabla\xi\|_{L^q(Q_T)}\leq C$, and (\ref{equation:eq-97})
implies $\|\xi\|_{L^p(Q_T)}\leq C.$
\end{proof}
\section{Well-posedness of the Source Problem}
We will prove the well-posedness of the problem (\ref{equation:eq-19})-
(\ref{equation:eq-20}), that is, we will investigate the limit of the solution of
the regularized problem (\ref{equation:eq-37})-
(\ref{equation:eq-38}) as the regularization parameter $\epsilon\rightarrow 0$.
For convenience, we denote the solutions of (\ref{equation:eq-37})-
(\ref{equation:eq-38}) by $({\bf B}_\epsilon,\xi_\epsilon).$
Then the regularized problem (\ref{equation:eq-37})-
(\ref{equation:eq-38}) can be represented by:find
${\bf B}_\epsilon\in L^2(0,T;{\mathcal{V}}) $ and $\xi_\epsilon \in L^2(0,T;\mathcal{Y})$
such that
\begin{eqnarray}
(\partial_t{ \bf B}_\epsilon,{\Phi})&+&(\lambda(\xi+\theta_0)\nabla\times {\bf B}_\epsilon,\nabla\times{\Phi})
+{\Lambda}(\nabla\cdot{\bf B}_\epsilon,\nabla\cdot{\Phi})=R_\alpha(\frac{f({\bf x},t){\bf B}_\epsilon}{1+\gamma|{\bf B}_\epsilon|^2}, \nabla\times{\Phi})\nonumber\\
\label{equation:eq-99}
&+&({\bf U}\times{\bf B}_\epsilon,\nabla\times{\Phi}), \ \ \forall {\Phi\in \mathcal{V}},\\
(\partial_t\xi_\epsilon, \Upsilon)&+&(\kappa\nabla\xi_\epsilon, \nabla\Upsilon)
+<(\Psi(\xi_\epsilon+\theta_0)-\Psi(\theta_0)),\Upsilon>_{\Gamma_2}\nonumber\\
\label{equation:eq-100}
&=&
([q(\xi_\epsilon)\mathcal{K}({\bf B}_\epsilon)]_\epsilon,\Upsilon)-(\kappa\nabla\theta_0,\nabla\Upsilon),\forall \Upsilon\in
\mathcal{Y}.
\end{eqnarray}
From Lemma \ref{lemma:Lem-71}, there exists ${\bf B}\in L^2(0,T;\mathcal{V})$ and a sequence ${\bf B}_\epsilon$ such that
$$ {\bf B}_\epsilon\rightarrow {\bf B}, \ in\ L^2(0,T;\mathcal{V}),\ \ \
{\bf B}_\epsilon\rightarrow {\bf B}, \nabla\times {\bf B}_\epsilon \rightarrow \nabla\times{\bf B},\ in\ L^1(Q_T).$$
From lemma \ref{lemma:Lem-11},there exists a $\xi\in W^{1,q}(Q_T)$ and a sequence $\xi_\epsilon$ such that
$$\xi_\epsilon\rightarrow \xi\ in W^{1,q}(Q_T),\forall q\in [1,\frac{5}{4}). $$
Since $q(\xi)$ is bounded and Lipschitz continuous, we know that
$$q(\xi_\epsilon)\rightarrow q(\xi), \ \ a.e.\ \ in\ \ Q_T.$$
\begin{theorem}
Let ${\bf B}$ be the limit of the approximate solutions ${\bf B}_\epsilon$ as $\epsilon\rightarrow 0$. Then ${\bf B}$ satisfies the weak formulation (\ref{equation:eq-19}) together with the initial condition ${\bf B}(0)={\bf B}_0(x)$.
\end{theorem}
\begin{proof}
The proof is parallel to that of Theorem 2 and we omit the details here.
\end{proof}
\begin{lemma}
\label{lemma:lem-12}
There exists a subsequence of ${\bf B}_\epsilon$ denoted still by the same notation such that
\begin{eqnarray}
\label{equation:eq-101}
\lim\limits_{\epsilon\rightarrow 0}\|q(\xi_\epsilon)\mathcal{K}({\bf B}_\epsilon)-
q(\xi)\mathcal{K}({\bf B})\|_{L^1(Q_T)}=0.
\end{eqnarray}
\end{lemma}
\begin{proof}
Firstly, we have
\begin{eqnarray}
\label{equation:eq-102}
&&\lim\limits_{\epsilon\rightarrow 0}\int_{Q_T}|q(\xi_\epsilon)\mathcal{K}({\bf B}_\epsilon)-
q(\xi)\mathcal{K}({\bf B})|\nonumber\\
&=&\lim\limits_{\epsilon\rightarrow 0}\int_{Q_T}
|q(\xi_\epsilon)(| \nabla\times {\bf B}_\epsilon|^2-| \nabla\times {\bf B}|^2) |
+(q(\xi_\epsilon)-q(\xi))| \nabla\times {\bf B}|^2\nonumber\\
&=&\lim\limits_{\epsilon\rightarrow 0}\int_{Q_T}[q(\xi_\epsilon)(| \nabla\times {\bf B}_\epsilon|-| \nabla\times {\bf B}|)(| \nabla\times {\bf B}_\epsilon|+| \nabla\times {\bf B}|)\nonumber\\
&&\hspace{0.5cm}+(q(\xi_\epsilon)-q(\xi))| \nabla\times {\bf B}|^2]=0
\end{eqnarray}
Secondly, we have
\begin{eqnarray}
\label{equation:eq-103}
&&\lim\limits_{\epsilon\rightarrow 0}\int_{Q_T}|q(\xi_\epsilon)\nabla\times{\bf B}_\epsilon\cdot ({\bf U}\times{\bf B}_\epsilon ) -q(\xi)\nabla\times{\bf B}\cdot ({\bf U}\times{\bf B} )|\nonumber\\
&&=\lim\limits_{\epsilon\rightarrow 0}\int_{Q_T}
(q(\xi_\epsilon)-q(\xi))\nabla\times{\bf B}_\epsilon\cdot ({\bf U}\times{\bf B}_\epsilon )+q(\xi)\nabla\times{\bf B}_\epsilon\cdot({\bf U}\times ({\bf B}_\epsilon-{\bf B}) )\nonumber\\
&&\hspace{0.5cm}+q(\xi)(\nabla\times{\bf B}_\epsilon-\nabla\times{\bf B})\cdot ({\bf U}\times{\bf B} )\nonumber\\
&&\leq\lim\limits_{\epsilon\rightarrow 0}\int_{Q_T}| (q(\xi_\epsilon)-q(\xi))\nabla\times{\bf B}_\epsilon\cdot ({\bf U}\times{\bf B}_\epsilon )|\nonumber\\
&&+\lim\limits_{\epsilon\rightarrow 0}\int_{Q_T}|q(\xi)(\nabla\times{\bf B}_\epsilon-\nabla\times{\bf B})\cdot ({\bf U}\times{\bf B} )|\nonumber\\
&&+\lim\limits_{\epsilon\rightarrow 0}\int_{Q_T}|q(\xi)(\nabla\times{\bf B}_\epsilon-\nabla\times{\bf B})\cdot ({\bf U}\times{\bf B} )|=0,
\end{eqnarray}
Thirdly, we have
\begin{eqnarray}
\label{equation:eq-104}
&&\lim\limits_{\epsilon\rightarrow 0}
R_\alpha\int_{Q_T}|q(\xi_\epsilon)\nabla\times{\bf B}_\epsilon\cdot\frac{f({\bf x},t){\bf B}_\epsilon}{1+\gamma|{\bf B}_\epsilon|^2}-q(\xi)\nabla\times{\bf B}\cdot\frac{f({\bf x},t){\bf B}}{1+\gamma|{\bf B}|^2}|\nonumber\\
&&\leq \lim\limits_{\epsilon\rightarrow 0}R_\alpha\int_{Q_T}
|(q(\xi_\epsilon)-q(\xi))\nabla\times{\bf B}_\epsilon\cdot\frac{f({\bf x},t){\bf B}_\epsilon}{1+\gamma|{\bf B}_\epsilon|^2}|\nonumber\\
&&+\lim\limits_{\epsilon\rightarrow 0}R_\alpha\int_{Q_T}|q(\xi)\nabla\times{\bf B}_\epsilon\cdot f({\bf x},t)\cdot (\frac{{\bf B}_\epsilon}{1+\gamma|{\bf B}_\epsilon|^2}-\frac{{\bf B}}{1+\gamma|{\bf B}|^2})|\nonumber\\
&&+\lim\limits_{\epsilon\rightarrow 0}R_\alpha\int_{Q_T}
|q(\xi)\frac{f({\bf x},t)}{1+\gamma|{\bf B}|^2}(\nabla\times{\bf B}_\epsilon-\nabla\times{\bf B})|=0.
\end{eqnarray}
From (\ref{equation:eq-102})-(\ref{equation:eq-104}), by using the triangle inequality, (\ref{equation:eq-101}) can be proved.
\begin{theorem}
Let $\xi$ be the limit of the approximate solutions $\xi_\epsilon$ as $\epsilon \rightarrow 0.$ Then $\xi $ satisfies the
weak formulation (\ref{equation:eq-20}) together with the initial condition $\xi(0)=0.$
\end{theorem}
\begin{proof}
Define the function: for any $>0$
$$g_r(s)=\frac{1}{1+rs^4}, G_r(s)=\int_0^sg_r(s')ds'.$$
It is easy to see that $ G_r$ is a primitive function of $g_r$ and it satisfies $|g_r(s)|\leq 1, |G_r|\leq T.$ Since $\xi_\epsilon\rightarrow \xi$ in $W^{1,6/5}(Q_T)$ when $\epsilon\rightarrow 0$, it is easy to see $ g_r(\xi_\epsilon)\rightarrow g_r(\xi)$ in $W^{1,6/5}(Q_T)$ and $G_r(\xi_\epsilon)\rightarrow G_r(\xi) $ in $W^{1,6/5}(Q_T)$.
Moreover, $g_r(\xi_\epsilon)$ and $G_r(\xi_\epsilon)$
are uniformly bounded with respect to
$\epsilon$, we infer that
$$G_r(\xi_\epsilon)\rightarrow G_r(\xi), g_r(\xi_\epsilon)\rightarrow g_r(\xi) \ \ \ in W^{1,6/5}(Q_T).$$
For any $v\in C_0^\infty(0,T;C^\infty(\Omega))$, let $ \Upsilon_\epsilon=vg_r(\xi_\epsilon),\ \ \Upsilon=vg_r(\xi).$
Clearly, we have $\phi_\epsilon\rightarrow \phi$ in $Q_T$. The proof consists of two steps.
From (\ref{equation:eq-100}), we have
\begin{eqnarray}
(\partial_t\xi_\epsilon, \Upsilon_\epsilon)&+&(\kappa\nabla\xi_\epsilon, \nabla\Upsilon_\epsilon)
+<(\Psi(\xi_\epsilon+\theta_0)-\Psi(\theta_0)),\Upsilon_\epsilon>_{\Gamma_2}\nonumber\\
\label{equation:eq-105}
&=&
([q(\xi_\epsilon)\mathcal{K}({\bf B}_\epsilon)]_\epsilon,\Upsilon_\epsilon)-(\kappa\nabla\theta_0,\nabla\Upsilon_\epsilon),
\end{eqnarray}
It is easy to see that
\begin{eqnarray}
&&\lim\limits_{\epsilon\rightarrow 0}\int_{Q_T}\partial_t\xi_\epsilon\Upsilon_\epsilon
=\lim\limits_{\epsilon\rightarrow 0}\int_{Q_T}\frac{\partial G_r(\xi_\epsilon)}{\partial t}v\nonumber\\
\label{equation:eq-106}
&&=-\lim\limits_{\epsilon\rightarrow 0}\int_{Q_T}
G_r(\xi_\epsilon)\frac{\partial v}{\partial t}
=\int_{Q_T}
G_r(\xi)\frac{\partial v}{\partial t}
=\int_{Q_T}\frac{\partial \xi}{\partial t}.
\end{eqnarray}
At the same time, since $ \xi_\epsilon\rightarrow \xi$ in $ W^{1,q}(Q_T),\forall q\in[1,\frac{5}{4})$, there holds
\begin{eqnarray}
&&\lim\limits_{\epsilon\rightarrow 0}\int_{Q_T}\kappa\nabla\xi_\epsilon \cdot \nabla\Upsilon_\epsilon\nonumber\\
&=&\lim\limits_{\epsilon\rightarrow 0}\int_{Q_T}
\kappa g_r(\xi_\epsilon)\nabla\xi_\epsilon \cdot \nabla v-
\lim\limits_{\epsilon\rightarrow 0}\int_{Q_T}\frac{4r\kappa \xi_\epsilon^3}{1+r\xi_\epsilon^4}|\nabla\xi_\epsilon|^2\nonumber\\
&=&\int_{Q_T}\kappa g_r(\xi)\nabla\xi \cdot \nabla v
-\int_{Q_T}\frac{4r\kappa \xi^3}{1+r\xi^4}|\nabla\xi|^2\nonumber\\
\label{equation:eq-107}
&=&\int_{Q_T}\kappa\nabla\xi \cdot \nabla\Upsilon.
\end{eqnarray}
From Lemma \ref{lemma:lem-8}
there exists a subsequence denoted by the same notation such that
$ \xi_\epsilon\rightarrow \xi$ in $L^3(0,T;\Gamma_2)$. This implies that
$$ (\zeta|\xi_\epsilon+\theta_0|^3+\omega)g_r(\xi_\epsilon)\rightarrow
(\zeta|\xi+\theta_0|^3+\omega)g_r(\xi), \ \ a.e.\ in \ (0,T)\times \Gamma_2. $$
The third term of (\ref{equation:eq-105}) satisfies
\begin{eqnarray}
\label{equation:eq-108}
&&\lim\limits_{\epsilon\rightarrow 0}\int_{0}^T\int_{\Gamma_2}\Psi(\xi_\epsilon+\theta_0)\Upsilon_\epsilon=
\int_{0}^T\int_{\Gamma_2}\Psi(\xi+\theta_0)\Upsilon.
\end{eqnarray}
For the righthand side of (\ref{equation:eq-105}), by lemma \ref{lemma:lem-12}, we have
\begin{eqnarray}
&&\lim\limits_{\epsilon\rightarrow 0}[\int_{Q_T}
[q(\xi_\epsilon)\mathcal{K}({\bf B}_\epsilon)]_\epsilon\Upsilon_\epsilon+\int_0^T\int_{\Gamma_2}\Psi(\theta_0)
\Upsilon_\epsilon-
\int_{Q_T}\kappa\nabla\theta_0\nabla\Upsilon_\epsilon]\nonumber\\
\label{equation:eq-109}
&&=\int_{Q_T}
q(\xi)\mathcal{K}({\bf B})\Upsilon+\int_0^T\int_{\Gamma_2}\Psi(\theta_0)\Upsilon-
\int_{Q_T}\kappa\nabla\theta_0\nabla\Upsilon
\end{eqnarray}
From (\ref{equation:eq-106})-(\ref{equation:eq-109}) and (\ref{equation:eq-105}), we can get
\begin{eqnarray}
&&\int_{Q_T}\frac{\partial\xi}{\partial t}\Upsilon+
\int_{Q_T}\kappa\nabla\xi\cdot \nabla\Upsilon
+\int_0^T\int_{\Gamma_2}\Psi(\xi+\theta_0)\Upsilon\nonumber\\
\label{equation:eq-110}
&&=\int_{Q_T}
q(\xi)\mathcal{K}({\bf B})\Upsilon+\int_0^T\int_{\Gamma_2}\Psi(\theta_0)\Upsilon-
\int_{Q_T}\kappa\nabla\theta_0\nabla\Upsilon.
\end{eqnarray}
The initial condition $\xi(0)=0 $ can be proved similarly as in the proof of Theorem 3. We
do not elaborate on the details here.
For the function $g_r(\xi)$, we know $ g_r(\xi)\rightarrow 1, \ a.e. in \ Q_T$. Then we have
$$ \lim\limits_{r\rightarrow 0}\int_{Q_T}\kappa\nabla\xi\cdot \nabla g_r(\xi)= \lim\limits_{r\rightarrow 0}\int_{Q_T}
-\frac{4r\kappa\xi^3}{1+r\xi^4}|\nabla\xi|^2=0.$$
We can get
$$\lim\limits_{r\rightarrow 0}\int_{Q_T}[\frac{\partial \xi}{\partial t}\Upsilon+\kappa\nabla\xi\cdot\nabla\Upsilon]
=\int_{Q_T}[\frac{\partial \xi}{\partial t} v+\kappa\nabla\xi\cdot\nabla v]. $$
Since $\|\xi_\epsilon\|_{L^4(0,T;\Gamma_2)}\leq C$, there exists a subsequence such that $ \|\xi\|_{L^4(0,T;\Gamma_2)}\leq \lim\limits_{\epsilon\rightarrow 0}\|\xi_\epsilon\|_{L^4(0,T;\Gamma_2)}\leq C.$
Then we have
$$ \lim\limits_{r\rightarrow 0}\int_{0}^T\int_{\Gamma_2}\Psi(\xi+\theta_0)\Upsilon
=\int_{0}^T\int_{\Gamma_2}\Psi(\xi+\theta_0)v.$$
Similarly, the righthand side converges to the form
$$ \int_{Q_T}
q(\xi)\mathcal{K}({\bf B})v +\int_0^T\int_{\Gamma_2}\Psi(\theta_0)v-
\int_{Q_T}\kappa\nabla\theta_0\nabla v.$$
Collecting all the above equalities and using (\ref{equation:eq-110}), we finally get:for $\forall v\in C_0^\infty(0,T;\mathcal{Y} \cap C^\infty(\Omega))$
\begin{eqnarray}
&&\int_{Q_T}\frac{\partial\xi}{\partial t}v+
\int_{Q_T}\kappa\nabla\xi\cdot \nabla v
+\int_0^T\int_{\Gamma_2}\Psi(\xi+\theta_0)v\nonumber\\
\label{equation:eq-111}
&&=\int_{Q_T}
q(\xi)\mathcal{K}({\bf B})v+\int_0^T\int_{\Gamma_2}\Psi(\theta_0)v-
\int_{Q_T}\kappa\nabla\theta_0\nabla v \ \ .
\end{eqnarray}
By the arbitrariness of v, we conclude (\ref{equation:eq-20}).
\end{proof}
In the foloowing, we present the stability of the solutions of the problem
(\ref{equation:eq-19})-(\ref{equation:eq-20}) from Lemma 8-Lemma 11 directly.
\begin{theorem}
Let ${\bf B}, \xi$ be the limits of ${\bf B}_\epsilon, \xi_\epsilon $ given by (\ref{equation:eq-99}) and (\ref{equation:eq-100}), respectively. Then $ ({\bf B},\xi)$
solves the weak problem (\ref{equation:eq-19})-(\ref{equation:eq-20}). Furthermore,
\begin{eqnarray}
\label{equation:eq-112}
\|{\bf B}\|_{L^2(0,T;\mathcal{V})}+\|\xi\|_{L^{\frac{4q}{3}}(\Omega)}
+\|\nabla\xi\|_{L^q(Q_T)}\leq C, \ \ \forall q\in [1, \frac{5}{4}),
\end{eqnarray}
where $C$ depending on $ \Omega,T,\lambda,\Lambda,R_\alpha,
\|f({\bf x},t)\|_{L^\infty(0,T,L^\infty(\Omega))}$ and
$ \|{\bf U}\|_{L^\infty(0,T,L^\infty(\Omega))}$.
\end{theorem}
\end{proof}
In the end, we give the uniqueness analysis of the solutions of the problem (\ref{equation:eq-19})-(\ref{equation:eq-20}).
\begin{theorem}
Assume that ${\bf B}\in L^\infty (0,T;W^{1,4}({curl},\Omega))$, $\xi\in L^2(0,T;H^1(\Omega))\cap L^\infty (0,T;L^\infty(\Omega))$, $\lambda, \sigma$ satisfy the Lipsctiz continuous, $U, f \in L^\infty(0,T;L^\infty(\Omega))$, then the equations (\ref{equation:eq-19})-(\ref{equation:eq-20}) have a unique solution pair (${\bf B}, \xi$).
\end{theorem}
\begin{proof}
Assume $({\bf B}_1, \xi_1)$ and $({\bf B}_2, \xi_2)$ are two solutions of (\ref{equation:eq-19})-(\ref{equation:eq-20}), with ${\bf B}_i$ stays bounded in $L^\infty (0,T;W^{1,4}({curl},\Omega))$ and $\xi_i$ stays bounded in $L^2(0,T;H^1(\Omega))\cap L^\infty (0,T;L^\infty(\Omega))$, for $i=1, 2$. By denoting $\tilde{\bf B}={\bf B}_1-{\bf B}_2, \tilde{\xi}=\xi_1-\xi_2$ and setting $\Phi=\tilde{\bf B}, \Upsilon=\tilde{\xi}$, we get
\begin{eqnarray}
\label{equation:uni-001}
&&\frac{1}{2}\frac{d}{dt}\|\tilde{\bf B}\|_0^2+(\lambda(\theta_1)\nabla\times\tilde{\bf B},\nabla\times\tilde{\bf B})+((\lambda(\theta_1)-\lambda(\theta_2))\nabla\times{\bf B}_2,\nabla\times\tilde{\bf B})\nonumber\\
&&=R_\alpha(\frac{f({\bf x},t){\bf B}_1}{1+\gamma|{\bf B}_1|^2}-\frac{f({\bf x},t){\bf B}_2}{1+\gamma|{\bf B}_2|^2},\nabla\times\tilde{\bf B})+({\bf U}\times\tilde{\bf B},\nabla\times\tilde{\bf B}),\\
\label{equation:uni-002}
&&\frac{1}{2}\frac{d}{dt}\|\tilde{\xi}\|_0^2-(\kappa\nabla\tilde{\xi},\nabla\tilde{\xi})+\langle\Psi(\xi_1+\theta_0)
-\Psi(\xi_2+\theta_0),\tilde{\xi}\rangle_{\Gamma_2}\nonumber\\
&&=([q(\xi_1)K({\bf B}_1)]_\epsilon-[q(\xi_2)K({\bf B}_2)]_\epsilon,\tilde{\xi}).
\end{eqnarray}
For the first error equation, based on the equality that $\lambda(\theta_1)-\lambda(\theta_2) = \lambda' (\eta) \tilde{\xi}$, with $\eta$ between $\theta_1$ and $\theta_2$, we have
\begin{eqnarray}
&&
- ((\lambda(\theta_1)-\lambda(\theta_2))\nabla\times{\bf B}_2,\nabla\times\tilde{\bf B})
= - ( \lambda' (\eta) \tilde{\xi} \, \nabla\times{\bf B}_2,\nabla\times\tilde{\bf B}) \nonumber
\\
&\le&
\| \lambda' (\eta) \|_{L^\infty(\Omega)} \| \tilde{\xi} \|_{L^4 (\Omega)} \| \nabla\times{\bf B}_2 \|_{L^4 (\Omega)} \| \nabla\times\tilde{\bf B} \|_0 \nonumber
\\
&\le&
C \| \tilde{\xi} \|_{L^4 (\Omega)} \| \nabla\times{\bf B}_2 \|_{L^4 (\Omega)} \| \nabla\times\tilde{\bf B} \|_0 \le C \| \tilde{\xi} \|_{L^4 (\Omega)} \| \nabla\times\tilde{\bf B} \|_0 , \label{uniqueness-1}
\end{eqnarray}
in which the last two steps come from the fact that both $\theta_1$ and $\theta_2$ stay bounded in $L^\infty (0,T; L^\infty (\Omega))$, and ${\bf B}_2$ stays bounded in $L^\infty (0,T; W^{1,4}({curl},\Omega))$. The right hand side of (\ref{equation:uni-001}) could be bounded in a more straightforward way:
\begin{eqnarray}
&&
R_\alpha(\frac{f({\bf x},t){\bf B}_1}{1+\gamma|{\bf B}_1|^2}-\frac{f({\bf x},t){\bf B}_2}{1+\gamma|{\bf B}_2|^2},\nabla\times\tilde{\bf B})
\le 2 R_\alpha \|f\|_{L^\infty(\Omega)} \|\tilde{\bf B}\|_{0} \|\nabla\times\tilde{\bf B}\|_0 ,
\label{uniqueness-2}
\\
&& \mbox{since} \, \,
\left| \frac{{\bf B}_1}{1+\gamma|{\bf B}_1|^2}-\frac{{\bf B}_2}{1+\gamma|{\bf B}_2|^2} \right|
\le 2 | {\bf B}_1 - {\bf B}_2 | , \nonumber
\\
&&
({\bf U}\times\tilde{\bf B},\nabla\times\tilde{\bf B}) \le \|{\bf U}\|_{L^\infty(\Omega)}\|\tilde{\bf B}\|_{0}\|\nabla\times\tilde{\bf B}\|_{0} . \label{uniqueness-3}
\end{eqnarray}
Therefore, a substitution of (\ref{uniqueness-1})-(\ref{uniqueness-3}) into (\ref{equation:uni-001}) yields
\begin{eqnarray}
\label{equation:uni-003}
\frac{1}{2}\frac{d}{dt}\|\tilde{\bf B}\|_0^2 + \lambda_{min} \|\nabla\times\tilde{\bf B}\|_0^2 \leq C ( \|\tilde{\xi}\|_{L^4(\Omega)} + \|\tilde{\bf B}\|_{0} ) \| \nabla \times \tilde{\bf B}\|_{0} .
\end{eqnarray}
For the second error equation (\ref{equation:uni-002}), a direct calculation shows that
\begin{eqnarray}
&&\frac{1}{2}\frac{d}{dt}\|\tilde{\xi}\|_0^2+\kappa_{min}\|\nabla\tilde{\xi}\|_0^2+\langle\Psi(\xi_1+\theta_0)
-\Psi(\xi_2+\theta_0),\tilde{\xi}\rangle_{\Gamma_2}\nonumber\\
&&\leq \left( (q(\xi_1)-q(\xi_2)) \mathcal{K}({\bf B}_2) , \tilde{\xi} \right) \nonumber\\
&&+ \Bigl( q(\xi_1)(|\nabla\times{\bf B}_1|^2-|\nabla\times{\bf B}_2|^2-(\nabla\times{\bf B}_1\cdot({\bf U}\times{\bf B}_1)-\nabla\times{\bf B}_2\cdot({\bf U}\times{\bf B}_2))\nonumber\\
&&-(R_\alpha\nabla\times{\bf B}_1\frac{f{\bf B}_1}{1+\gamma|{{\bf B}_1}|^2}-R_\alpha\nabla\times{\bf B}_2\frac{f{\bf B}_2}{1+\gamma|{{\bf B}_2}|^2}) ) , \tilde{\xi} \Bigr) . \label{uniqueness-4}
\end{eqnarray}
The assumption that ${\bf B}_2$ stays bounded in $L^\infty (0,T; W^{1,4}({curl},\Omega))$ implies that
\begin{eqnarray}
\| \mathcal{K}({\bf B}_2) \|_{L^\infty (0,T; L^2 (\Omega))} \le C . \label{uniqueness-5}
\end{eqnarray}
This in turn indicates that
\begin{eqnarray}
&&
\left( (q(\xi_1)-q(\xi_2)) \mathcal{K}({\bf B}_2) , \tilde{\xi} \right)
= \left( q' (\eta) \xi \mathcal{K}({\bf B}_2) , \tilde{\xi} \right) \nonumber
\\
&\le& C \| \xi \|_{L^4 (\Omega)} \| \mathcal{K}({\bf B}_2) \|_{L^2 (\Omega)} \| \tilde{\xi} \|_{L^4 (\Omega)}
\le C \| \xi \|_{L^4 (\Omega)}^2 . \label{uniqueness-6}
\end{eqnarray}
Again, the fact that both $\xi_1$ and $\xi_2$ stay bounded in $L^\infty (0,T; L^\infty (\Omega))$ has been used in the derivation. For the second expansion term on the right hand side of (\ref{uniqueness-4}), we see that
\begin{eqnarray}
&&
\left( q(\xi_1)(|\nabla\times{\bf B}_1|^2-|\nabla\times{\bf B}_2|^2 ) , \tilde{\xi} \right)
= \left( q(\xi_1) ( \nabla\times ( {\bf B}_1 + {\bf B}_2 ) ) \cdot ( \nabla \times \tilde{\bf B} ) , \tilde{\xi} \right) \nonumber
\\
&\le&
C ( \| \nabla\times {\bf B}_1 \|_{L^4 (\Omega)} + \| \nabla \times {\bf B}_2 \|_{L^4 (\Omega)} ) \| \nabla \times \tilde{\bf B} \|_0 \| \tilde{\xi} \|_{L^4 (\Omega)} \nonumber
\\
&\le&
C \|\tilde{\xi}\|_{L^4(\Omega)} \| \nabla \times \tilde{\bf B}\|_0 . \label{uniqueness-7}
\end{eqnarray}
The other terms on the right hand side of (\ref{uniqueness-4}) could be analyzed in a similar way:
\begin{eqnarray}
&&
- \Bigl( q(\xi_1) (\nabla\times{\bf B}_1\cdot({\bf U}\times{\bf B}_1)-\nabla\times{\bf B}_2\cdot({\bf U}\times{\bf B}_2)) , \tilde{\xi} \Bigr) \nonumber
\\
&\le&
C \|\tilde{\xi}\|_{L^4(\Omega)} ( \| \tilde{\bf B} \|_0 + \| \nabla \times \tilde{\bf B}\|_0 ) , \label{uniqueness-8}
\\
&&
- R_\alpha \Bigl( q(\xi_1) ( \nabla\times{\bf B}_1\frac{f{\bf B}_1}{1+\gamma|{{\bf B}_1}|^2}-\nabla\times{\bf B}_2\frac{f{\bf B}_2}{1+\gamma|{{\bf B}_2}|^2}) ) , \tilde{\xi} \Bigr) \nonumber
\\
&\le&
C \|\tilde{\xi}\|_0 \| \tilde{\bf B} \|_0 . \label{uniqueness-9}
\end{eqnarray}
And also, the estimate for the boundary integral term on the left hand side of \ref{uniqueness-4}) is trivial:
\begin{eqnarray}
\langle\Psi(\xi_1+\theta_0)
-\Psi(\xi_2+\theta_0),\tilde{\xi}\rangle_{\Gamma_2} \ge 0 . \label{uniqueness-10}
\end{eqnarray}
Subsequently, a substitution of (\ref{uniqueness-6})-(\ref{uniqueness-10}) into (\ref{uniqueness-4}) results in
\begin{eqnarray}
\frac{1}{2}\frac{d}{dt}\|\tilde{\xi}\|_0^2+\kappa_{min}\|\nabla\tilde{\xi}\|_0^2
\le C \|\tilde{\xi}\|_{L^4(\Omega)} ( \| \tilde{\bf B} \|_0 + \| \nabla \times \tilde{\bf B}\|_0 )
+ C \|\tilde{\xi}\|_{L^4(\Omega)}^2 .
\label{equation:uni-004}
\end{eqnarray}
As a result, a combination of (\ref{equation:uni-003}) and (\ref{equation:uni-004}) yields
\begin{eqnarray}
&&
\frac{1}{2}\frac{d}{dt} ( \|\tilde{\bf B}\|_0^2 + \|\tilde{\xi}\|_0^2 ) + \lambda_{min} \|\nabla\times\tilde{\bf B}\|_0^2 +\kappa_{min}\|\nabla\tilde{\xi}\|_0^2 \nonumber
\\
&\le&
C_1 \|\tilde{\bf B}\|_{0} \| \nabla \times \tilde{\bf B}\|_{0}
+ C_2 \|\tilde{\xi}\|_{L^4(\Omega)} ( \| \tilde{\bf B} \|_0 + \| \nabla \times \tilde{\bf B}\|_0 )
+ C_3 \|\tilde{\xi}\|_{L^4(\Omega)}^2 . \label{equation:uni-005}
\end{eqnarray}
Furthermore, the following Sobolev inequality (in 3-D) is applied:
\begin{eqnarray}
\| \tilde{\xi} \|_{L^4} \le C \| \tilde{\xi} \|_{H^{\frac34}} \le C \| \tilde{\xi} \|_0^{\frac14} \cdot \| \tilde{\xi} \|_1^{\frac34} \le C ( \| \tilde{\xi} \|_0 + \| \tilde{\xi} \|_0^{\frac14} \cdot \| \nabla\tilde{\xi} \|_0^{\frac34} , \label{equation:uni-006}
\end{eqnarray}
so that the following estimates become available:
\begin{eqnarray}
&&
C_1 \|\tilde{\bf B}\|_{0} \| \nabla \times \tilde{\bf B}\|_{0}
\le \frac{C_1^2}{\kappa_{min}} \|\tilde{\bf B}\|_{0}^2 + \frac14 \lambda_{min} \| \nabla \times \tilde{\bf B}\|_0^2 , \label{equation:uni-007-1}
\\
&&
C_2 \|\tilde{\xi}\|_{L^4(\Omega)} \| \tilde{\bf B} \|_0
\le C_4 ( \| \tilde{\xi} \|_0 + \| \tilde{\xi} \|_0^{\frac14} \cdot \| \nabla\tilde{\xi} \|_0^{\frac34} ) \| \tilde{\bf B} \|_0 \nonumber
\\
&\le&
C_5 ( \| \tilde{\xi} \|_0^2 + \| \tilde{\bf B} \|_0^2 ) + \frac14 \kappa_{min} \| \nabla \tilde{\xi} \|_0^2 , \label{equation:uni-007-2}
\\
&&
C_2 \|\tilde{\xi}\|_{L^4(\Omega)} \| \nabla \times \tilde{\bf B} \|_0
\le C_4 ( \| \tilde{\xi} \|_0 + \| \tilde{\xi} \|_0^{\frac14} \cdot \| \nabla\tilde{\xi} \|_0^{\frac34} ) \| \nabla \times \tilde{\bf B} \|_0 \nonumber
\\
&\le&
C_6 ( \| \tilde{\xi} \|_0^2 + \| \tilde{\bf B} \|_0^2 ) + \frac14 \lambda_{min} \| \nabla \times \tilde{\bf B}\|_0^2 + \frac14 \kappa_{min} \| \nabla \tilde{\xi} \|_0^2 , \label{equation:uni-007-3}
\\
&&
C_3 \|\tilde{\xi}\|_{L^4(\Omega)}^2 \| \nabla \times \tilde{\bf B} \|_0
\le C_7 ( \| \tilde{\xi} \|_0 + \| \tilde{\xi} \|_0^{\frac14} \cdot \| \nabla\tilde{\xi} \|_0^{\frac34} )^2 \nonumber
\\
&\le&
C_8 \| \tilde{\xi} \|_0^2 + \frac14 \kappa_{min} \| \nabla \tilde{\xi} \|_0^2 , \label{equation:uni-007-4}
\end{eqnarray}
in which Young's inequality has been extensively applied. Going back to (\ref{equation:uni-005}), we arrive at
\begin{eqnarray}
&&
\frac{1}{2}\frac{d}{dt} ( \|\tilde{\bf B}\|_0^2 + \|\tilde{\xi}\|_0^2 ) + \frac12 \lambda_{min} \|\nabla\times\tilde{\bf B}\|_0^2 + \frac14 \kappa_{min}\|\nabla\tilde{\xi}\|_0^2 \nonumber
\\
&\le&
( \frac{C_1^2}{\kappa_{min}} + C_5 + C_6 ) \| \tilde{\bf B} \|_0^2
+ (C_5 + C_6 + C_8) \| \tilde{\xi} \|_0^2 . \label{equation:uni-008}
\end{eqnarray}
Consequently, with an application of Gronwall inequality, and making use of the fact that $ \| \tilde{\bf B} ( \cdot, t=0) \|_0 =0$, $ \| \tilde{\xi} ( \cdot, t=0) \|_0 =0$, we arrive at
\begin{eqnarray}
\| \tilde{\bf B} ( \cdot, t) \|_0 = 0 , \, \, \, \| \tilde{\xi} ( \cdot, t) \|_0 = 0 , \quad \forall t > 0. \label{equation:uni-009}
\end{eqnarray}
This completes the uniqueness proof.
\end{proof}
\end{document}
|
\begin{document}
\thispagestyle{empty}
\title{fseries Cost Sharing over Combinatorial Domains: \ Complement-Free Cost Functions and Beyond hanks{A conference version appears in the proceedings of the 27th Annual European Symposium on Algorithms, ESA 2019.}
\sloppy
\begin{abstract}
We study mechanism design for combinatorial cost sharing models. Imagine that multiple items or services are available to be shared among a set of interested agents. The outcome of a mechanism in this setting consists of an assignment, determining for each item the set of players who are granted service, together with respective payments. Although there are several works studying specialized versions of such problems, there has been almost no progress for general combinatorial cost sharing domains until recently \cite{DobzinskiO17}. Still, many questions about the interplay between strategyproofness, cost recovery and economic efficiency remain unanswered.
The main goal of our work is to further understand this interplay in terms of budget balance and social cost approximation.
Towards this, we provide a refinement of cross-monotonicity (which we term \emph{trace-monotonicity}) that is applicable to iterative mechanisms. The trace here refers to the order in which players become finalized. On top of this, we also provide two parameterizations (complementary to a certain extent) of cost functions which capture the behavior of their average cost-shares.
Based on our trace-monotonicity property, we design a scheme of ascending cost sharing mechanisms which is applicable to the combinatorial cost sharing setting with symmetric submodular valuations.
Using our first cost function parameterization, we identify conditions under which our mechanism is weakly group-strategyproof, $O(1)$-budget-balanced and $O(H_n)$-approximate with respect to the social cost. Further, we show that our mechanism is budget-balanced and $H_n$-approximate if both the valuations and the cost functions are symmetric submodular; given existing impossibility results, this is best possible.
Finally, we consider general valuation functions and exploit our second parameterization to derive a more fine-grained analysis of the Sequential Mechanism introduced by Moulin. This mechanism is budget balanced by construction, but in general only guarantees a poor social cost approximation of $n$. We identify conditions under which the mechanism achieves improved social cost approximation guarantees. In particular, we derive improved mechanisms for fundamental cost sharing problems, including Vertex Cover and Set Cover.
\end{abstract}
\fussy
\pagestyle{plain}
\section{Introduction}
\label{sec:intro}
How to share the cost of a common service (or public good) among a set of interested agents constitutes a fundamental problem in mechanism design that has been studied intensively for at least two decades. Several deep and significant advancements have been achieved throughout this period, notably also combining classical mechanism design objectives (such as incentive compatibility, economic efficiency, etc.) with theoretical computer science objectives (such as approximability and computational efficiency).
However, in the vast majority of the cost sharing models that have been proposed and analyzed in the literature, it is assumed that the mechanism designer is offering a single service and that each agent has a private value describing the willingness to pay for the service. At the same time, there is also a publicly known cost function which describes the total cost for offering the service to each possible subset of agents. Said differently, this results in a single-parameter mechanism design problem, where the goal is to select a subset of the players that will be granted service, subject to covering the cost and achieving an economically efficient outcome.
Although significant progress has been made for such single-parameter domains, moving towards more general \emph{combinatorial domains} has been almost elusive so far. Imagine that there are multiple goods to be shared among the agents who now have more complex valuation functions, expressing their willingness to pay for different subsets (or bundles) of goods. The cost function now depends on the subsets of agents sharing each of the items. An outcome of a mechanism under this setting, consists of an allocation, which specifies for each agent the goods for which she is granted service, together with a payment scheme.
The desired properties in designing a cost-sharing mechanism (be it combinatorial or not) are three-fold:
(i) \emph{group-strategyproofness}: we would like resistance to misreporting preferences by individual agents or coalitions,
(ii) \emph{budget-balance}: the payments of the players should cover the incurred cost,
(iii) \emph{economic efficiency}: the allocation should maximize a measure of social efficiency.
The fundamental results in \cite{GKL76,Roberts79} rule out the possibility that all three properties can be achieved. As a result, if we insist on any variant of strategyproofness, we are forced to settle with approximate notions of at least one of the other two criteria. In this context, approximate budget balance means that the mechanism may overcharge the agents, but not by too much. In terms of efficiency, considering a social cost objective instead of the classical social welfare objective (definitions are given in Section \ref{sec:defs}) seems more amenable for multiplicative approximation guarantees.
These adapted objectives have been investigated thoroughly for single-parameter problems, especially for cost-sharing variants of well-known optimization problems.
In the context of more general combinatorial cost-sharing mechanisms, a restricted model with multiple levels of service was first studied in \cite{MehtaRS09}. Ever since, for almost a decade, there was no additional progress along these lines. It was only recently that a step forward was made by Dobzinski and Ovadia \cite{DobzinskiO17}. In their work, they introduce a combinatorial cost-sharing model and derive the first mechanisms guaranteeing good budget balance and social cost approximation guarantees for different classes of valuation and cost functions. As already pointed out in \cite{DobzinskiO17}, however, several important questions concerning our understanding of the approximability of these objectives remain open and deserve further study. This constitutes the starting point of our investigations reported in this work.
\myheader{Our Contributions}
We make further advancements on the design and analysis of mechanisms for combinatorial cost-sharing models.
To begin with, the analysis of the mechanisms we study asks for new conceptual ideas (which might be interesting on their own):
\begin{itemize}
\item We first provide a refinement of the well-known notion of \emph{cross-monotonic} cost sharing functions, which is key in the intensively studied class of Moulin-Shenker mechanisms \cite{MS01} for the single-parameter domain. We introduce the notion of \emph{trace-monotonic} cost sharing functions which is applicable for mechanisms that proceed iteratively and evict agents one-by-one. Trace-monotonicity formalizes the fact that the cost-shares observed by a player for an item do not decrease throughout the \emph{course} of the mechanism. That is, these cost shares may depend on the specific order (or \emph{trace} as we will call it) in which the mechanism considers the agents.
\item We identify two different and (to some extent) complementary parameterizations of the cost functions. Intuitively, these parameters measure the ``variance'' of the average cost-share $c(S)/|S|$, over all agent sets $S$. We introduce two such notions, which we term \emph{$\alpha$-average decreasing} and \emph{$\alpha$-average min-bounded} (see Definition \ref{def:avg-dec} and Definition \ref{def:cmin}, respectively). We note that for every cost function, there exist respective values of $\alpha$ (possibly different for each definition) for which these properties are satisfied. These definitions provide an alternative way to classify cost functions and their respective approximation guarantees in terms of budget balance and social cost.
\end{itemize}
Using the above ideas, in Section \ref{sec:mech}, we derive a scheme for ascending cost sharing mechanisms, which can be seen as a (non-trivial) adaptation of the Moulin-Shenker mechanisms from the binary accept/reject setting to combinatorial cost sharing. Our notion of trace-monotonic cost shares plays a crucial role here. We show that our proposed mechanism is applicable for any non-decreasing cost function and for symmetric submodular valuations (i.e., submodular functions whose value depends only on the cardinality of the set).
By exploiting the first parameterization of $\alpha$-average decreasing cost functions, our main result of Section \ref{sec:mech} is that for $\alpha = O(1)$, our mechanism is polynomial-time, weakly group-strategyproof, $O(1)$-budget-balanced and $O(H_n)$-approximate with respect to social cost, where $n$ is the number of agents \footnote{We use $H_n$ to denote the \emph{$n$-th Harmonic number} defined as $H_n = 1 + \frac{1}{2} + \dots + \frac{1}{n}$.}. As a consequence, if both the valuation and cost functions are symmetric submodular ($\alpha=1$), the mechanism is budget-balanced and $H_n$-approximate. This is best possible even for a single item, as there exist corresponding inapproximability results by Dobzinski et al.~\cite{DobzinskiMRS18}.
Prior to our work, the best known mechanism for symmetric submodular valuation and cost functions is $H_n$-budget balanced and $H_n$-approximate \cite{DobzinskiO17}. We anticipate that further extensions and generalizations might be feasible through our framework and this type of ascending mechanisms.
In Section \ref{sec:FPM}, we exploit our second parameterization of $\alpha$-average min-bounded cost functions, and provide results for general valuation functions. As it turns out, our parameterization enables us to obtain a more fine-grained analysis of the Sequential Mechanism introduced by Moulin \cite{Moulin99}.
This mechanism is budget-balanced by construction, but in general only guarantees a poor social cost approximation of factor $n$.
We show that for $\alpha$-average min-bounded cost functions with $\alpha = O(1)$, the Sequential Mechanism is budget balanced and $H_n$-approximate with respect to social cost.
Interestingly, this result does not even require monotonicity of the valuation functions. In addition, we can push our results even a bit further by introducing a refinement of this class of cost functions (see Definition~\ref{def:cmax}) for which we show that the Sequential Mechanism is $O(1)$-approximate.
The refinement allows us to obtain improved mechanisms for several cost functions originating from combinatorial optimization problems. For example, our result implies that the Sequential Mechanism is $d$-approximate for certain cost-sharing variants of Vertex Cover and Set Cover, where $d$ is the maximum degree of a node or the maximum size of a set, respectively; this improves upon existing results, even in the well-studied single-item case, when $d$ is constant.
In general, the two parameterizations of the cost functions introduced in this work seem to be suitable means to accurately capture the approximation guarantees of both the ascending cost-sharing mechanism of Section \ref{sec:mech}, and the Sequential Mechanism of Section \ref{sec:FPM}. In fact, we have not managed to construct natural examples of cost functions which do not admit an $O(H_n)$-approximation by neither of the mechanisms studied here. See also the discussion in Section \ref{sec:dis}, where some examples are constructed but they are rather artificial (Proposition \ref{prop:intersec}).
As such, these parameterizations help us to narrow down the class of cost functions which are not yet known to admit a good social cost approximation and enhance our understanding towards further progress in combinatorial cost sharing.
\myheader{Related Work} For the single-item setting and with submodular cost functions, the best known group-strategyproof and budget balanced cost-sharing mechanism is arguably the Shapley value mechanism, introduced by Moulin and Shenker \cite{Moulin99,MS01}.
This was also the first work that tried to quantify the efficiency loss of budget balanced cost-sharing mechanisms.
Later, Feigenbaum et al.~\cite{FeigenbaumKSS03} showed that if one insists on truthfulness, there is no mechanism that achieves a finite approximation of the social welfare objective, even if one relaxes the budget balance property to cost recovery.
To overcome this impossibility result, Roughgarden et al.~\cite{RoughgardenS09} introduced the notion of \emph{social cost} as an alternative means to quantify the efficiency of a mechanism. In the same work, they showed that the Shapley value mechanism is $H_n$-approximate with respect to this objective.
Dobzinski et al.~\cite{DobzinskiMRS18} established another impossibility result for the social cost objective, and showed that every mechanism satisfying truthfulness and cost recovery cannot achieve a social cost approximation guarantee better than $\Omega(\log{n})$.
The problem of deriving mechanisms with the best possible budget balance and social cost approximation guarantees for different cost functions arising from combinatorial optimization problems has been extensively studied in various works, see e.g., \cite{BleischwitzS08,BrennerS07,ChawlaRS06,GuptaKLRS15}.
Moving beyond the single-item case, Mehta et al.~\cite{MehtaRS09} introduced a new family of truthful mechanisms (called \emph{acyclic mechanisms}) which apply to general demand settings of multiple identical items when players have symmetric submodular valuations. For additional works that consider the general demand setting, the reader is referred to~ \cite{BleischwitzS08,BrennerS07,DevanurMV05,Moulin99}.
Birmpas et al.~\cite{BirmpasC0M15} also studied families of valuation and cost functions for the multiple item setting, under cost sharing models that are motivated by applications in participatory sensing environments.
Most related to our work is the recent work by Dobzinski and Ovadia~\cite{DobzinskiO17}. To the best of our knowledge, this is the only prior work that considers a more general approach for combinatorial cost sharing.
They studied a multi-parameter setting and proposed a new VCG-based mechanism. Basically, the idea is to run a VCG mechanism~\cite{Clarke71,Groves73,Vickrey61} with respect to a modified objective function which is defined as the sum of the player valuations minus a potential.
Intuitively, the latter ensures that the payments computed by the mechanism cover the actual cost.
They showed that this mechanism is strategyproof and $H_n$-approximate with respect to social cost. They also identified several classes of valuation and cost functions for which the mechanism is $H_n$-budget balanced. In particular, this is the case if the valuation and cost functions are symmetric.\footnote{We note that their definition of symmetry for the cost function differs from the one we use here.}
Additionally, they established that their mechanism is optimal with respect to the social cost approximation among all symmetric VCG-based mechanisms that always cover the cost.
\section{Definitions and Notation}
\label{sec:defs}
We assume there is a set $N = \{1, 2, \dots, n\}$ of players and a set $M=\{1,2,\dots,m\}$ of items. Each item can be viewed as a public good
or some service that can be shared by the players. Each player $i$ has a \emph{private valuation function $v_i:2^{M}\to \mathbb{R}_{\geq 0}$} specifying the value that she derives from each subset of items.
A \emph{cost-sharing mechanism} takes as input the declared (possibly false) valuation functions $\vec{b} = (b_i)_{i \in N}$ of the players and outputs (i) an allocation that determines which players share each item and (ii) a payment $p_i$ for each player $i$.
An allocation is denoted by a tuple $A = (A_1, \dots, A_n)$, where $A_i \subseteq M$ is the set of items provided to player $i$. For notational convenience, we also represent an allocation $A = (A_i)_{i \in N}$ as a tuple over the items space $(T_1, \dots, T_m)$ such that for every item $j \in M$, $T_j \subseteq N$ is the subset of players sharing item $j$, i.e., $T_j = \sset{i\in N}{j\in A_i}$.
In this paper, we consider mostly \emph{separable} cost functions. In the \emph{separable setting}, we assume that the overall cost of an allocation decomposes into the cost for providing each item separately. Hence, every item $j$ is associated to a known cost function $c_j:2^{N}\to \mathbb{R}_{\geq 0}$, which specifies for each set of players $T \subseteq N$, the cost $c_j(T)$ of providing item $j$ to the players in $T$.
Thus, the total cost of an allocation $A$ is defined as
\begin{equation}
\label{eq:cost-def}
C(A) = \sum_{j\in M} c_j(T_j)
\end{equation}
In Section \ref{subsec:nonsep}, we also consider the \emph{non-separable setting}, where we are given a more general cost function $C: (2^M)^n \rightarrow \mathbb{R}_{\ge 0}$, specifying for every allocation $A = (A_i)_{i \in N}$ the corresponding cost $C(A)$. Non-separable functions can capture dependencies among different items.
We assume that the utility functions of the players are \emph{quasilinear}, i.e., given an allocation $A = (A_i)_{i \in N}$ and payments $(p_i)_{i \in N}$ determined by the mechanism for valuation functions $\vec{\ensuremath{v}} = (\ensuremath{v}_i)_{i \in N}$, the utility of player $i$ is defined as $u_i(\vec{v}) = v_i(A_i) - p_i$.
All our mechanisms have \emph{no positive transfers} (NPT), i.e., $p_i\geq 0$, and satisfy \emph{individual rationality} (IR), i.e., $p_i \le \ensuremath{v}_i(A_i)$.
In addition to the above, we are also interested in the following properties:
\begin{itemize}
\item \textbf{Weak Group-Strategyproofness (WGSP):}
We insist on a stronger notion of resistance to manipulation than truthfulness: A mechanism is \emph{weakly group-strategyproof} if there is no deviation by a coalition of players that makes all its members strictly better off. More formally, we require that for every coalition $Q \subseteq N$ of players, every profile $\vec{v}_{-Q}$ of the other players, there is no deviation $\vec{b}_Q$ of the players in $Q$ such that $u_i(\vec{b}_Q, \vec{v}_{-Q}) > u_i(\vec{v}_Q, \vec{v}_{-Q})$ for every $i \in Q$, where $\vec{v}_Q$ is the profile of the actual valuation functions of $Q$.
\item \textbf{Budget Balance:}
We are interested in mechanisms whose payments cover the allocation cost, ideally exactly. However, the latter is not always possible as it may be incompatible with the other objectives. We therefore consider an approximate budget balance notion: A mechanism is \emph{$\beta$-budget-balanced} ($\beta\geq 1$) if for every valuation profile $\vec{\ensuremath{v}} = (\ensuremath{v}_i)_{i \in N}$, the outcome $(A, p)$ computed by the mechanism satisfies
\begin{equation}
C(A) \leq \sum_{i\in N} p_i \leq \beta\cdot C(A).
\end{equation}
Clearly, we want $\beta$ to be as small as possible to not overcharge players too much for covering the cost. We say that the mechanism is \emph{budget balanced} if $\beta = 1$.
\item \textbf{Economic Efficiency:}
Our goal is to compute outcomes that are (approximately) efficient. To this aim, we use the \emph{social cost objective}, originally defined in \cite{RoughgardenS09}. Adapted to our combinatorial setting, the \emph{social cost} of an allocation $A = (A_i)_{i \in N}$ is defined as the actual cost of the outcome plus the value missed by not serving all items to all players, i.e.,
\begin{equation}
\pi(A) = \sum_{j\in M} c_j(T_j) + \sum_{i\in N}[ \ensuremath{v}_i(M)-\ensuremath{v}_i(S_i)].\footnote{Note that this adaptation was proposed in \cite{DobzinskiO17}. }
\end{equation}
A mechanism is said to be \emph{$\alpha$-approximate} with respect to the social cost objective if for every valuation profile $\vec{\ensuremath{v}} = (\ensuremath{v}_i)_{i \in N}$, the allocation $A$ output by the mechanism satisfies
\begin{equation}
\pi(A) \leq \alpha \cdot \pi(A^*),
\end{equation}
where $A^*$ is an allocation of minimum social cost.
\end{itemize}
We assume that both the valuation functions $(v_i)_{i \in N}$ and the cost functions $(c_j)_{j \in M}$ are non-decreasing (see below for formal definitions).
Further, we focus on certain classes of valuation and cost functions: More specifically, we consider \emph{submodular} and \emph{subadditive} cost functions, both naturally modeling economies of scale. As to the valuation functions, we consider \emph{submodular} valuation functions in Section~\ref{sec:mech} and general valuation functions in Section~\ref{sec:FPM}.
Further, the class of \emph{symmetric XOS} functions play a prominent role in Section~\ref{sec:mech}.\footnote{It is not hard to verify that these functions can equivalently be defined as stated in Definition~\ref{def:sym-XOS} (see also \cite{EzraFRS18}).}
Below we summarize all relevant definitions (see also Lehman et al. \cite{LehmanLN06}).
\begin{definition}\label{def:sym-XOS}
Let $f: 2^U \rightarrow \mathbb{R}_{\ge 0}$ be a function defined over subsets of a universe $U$.
\begin{compactenum}
\item {$f$ is \emph{non-decreasing} if $f(S) \le f(T)$ for every $S \subseteq T \subseteq U$.}
\item $f$ is \emph{symmetric} if $f(S)=f(T)$ for every $S, T \subseteq U$ with $|S| = |T|$.
\item $f$ is \emph{submodular} if $f(S \cup \{i\}) - f(S) \geq f(T\cup \{i\}) - f(T)$ for every $S\subseteq T \subseteq U$ and $i\not\in S$.
\item {$f$ is \emph{XOS} if there are additive functions $f^1, \dots, f^k$ such that $f(S) = \max_{i \in [k]} f^i(S)$ for all $S \subseteq U$.}
\item $f$ is \emph{subadditive} if $f(S \cup T) \leq f(S) + f(T)$ for every $S, T \subseteq U$.
\item $f$ is \emph{symmetric XOS} if it is symmetric and $f(S)/|S| \ge f(T)/|T|$ for every $S \subseteq T \subseteq U$.
\end{compactenum}
\end{definition}
\iffalse
In Section~\ref{sec:mech}, the class of \emph{symmetric XOS} functions have a prominent role. It is not hard to verify that these functions can equivalently be defined as follows (see also \cite{EzraFRS18}):
\begin{definition}\label{def:sym-XOS}
A function $f: 2^U \rightarrow \mathbb{R}_{\ge 0}$ is \emph{symmetric XOS} if it is symmetric and $f(S)/|S| \ge f(T)/|T|$ for every $S \subseteq T \subseteq U$ also holds.
\end{definition}
\fi
Some of our mechanisms make use of cross-monotonic cost-sharing functions defined as follows:
\begin{definition}
Let $c: 2^N \rightarrow \mathbb{R}_{\ge 0}$ be a cost function. A \emph{cost-sharing function}\footnote{We stress here that it is also possible for cost-sharing methods to overcharge, something that leads to approximate budget balanced mechanisms.} $\chi: N \times 2^N \rightarrow \mathbb{R}_{\ge 0}$ with respect to $c$ specifies for each subset $S \subseteq N$ and every player $i \in S$ a non-negative cost share $\chi(i, S)$ such that $\sum_{i\in S} \chi(i, S) = c(S)$.\footnote{For notational convenience, we define $\chi(i, S) = \infty$ for $i \notin S$.}
$\chi$ is \emph{cross-monotonic} if for all $S \subseteq T \subseteq N$ and every $i \in S$, we have $\chi(i, S) \ge \chi(i, T)$.
\end{definition}
\section{An Iterative Ascending Cost Sharing Mechanism}
\label{sec:mech}
In this section, we present our \emph{Iterative Ascending Cost Sharing Mechanism ($\text{IACSM}$)} for the combinatorial cost sharing setting with symmetric submodular valuations and general cost functions. We first provide a generic description of our mechanism and identify two properties which are sufficient for our main result to go through. We then show that these properties are satisfied if the valuations are symmetric submodular.
\subsection{Definition of $\text{IACSM}$}
\label{subsec:def-iacsm}
\label{subsec:twoprop}
\begin{algorithm}[t]
\small
\DontPrintSemicolon
\caption{Iterative Ascending Cost Sharing Mechanism ($\text{IACSM}$)} \label{fig:alg-1}
\KwIn{Declared valuation functions $(b_i)_{i \in N}$.}
\KwOut{Allocation $A = (A_i)_{i \in N}$ and payments $p = (p_i)_{i \in N}$.}
\textbf{Initialization:} Let $X = N$ be the set of active players and define $T_j = N$ for every item $j \in M$.
\label{alg:s0}
\While{$X \neq \emptyset$}{
Compute an \emph{optimal bundle} $A_i$ for every player $i \in X$:
\vspace*{-
amount}
\label{alg:s1}
\begin{align}\label{eq:opt-choice}
A_i & \in \arg\max_{S \subseteq M} \{b_i(S) - p_i(S)\}, \quad \text{where} \quad p_i(S) = \sum_{j \in S} \chi_j(T_j) \\[-5ex] \notag
\end{align}
(If there are several optimal bundles, resolve ties as described within Section \ref{subsec:def-iacsm}.) \;
Let $i^* \in X$ be a player such that $|A_{i^*}| \le |A_i|$ for every $i \in X$.\;
\label{alg:s2}
Assign the items in $A_{i^*}$ to player $i^*$ and remove player $i^*$ from $X$. \;
\label{alg:s3}
For every item $j \in M \setminus A_{i^*}$, set $T_j = T_j \setminus \set{i^*}$, and update the cost shares $\chi_j(T_j)$.\;
\label{alg:s4}
}
\Return $A = (A_i)_{i \in N}$ and $p = (p_i)_{i \in N}$, where $p_i = \sum_{j \in A_i} \chi_j(T_j)$.
\label{alg:s5}
\end{algorithm}
Mechanism $\text{IACSM}$ can be viewed as a generalization of the \emph{Moulin-Shenker} mechanism \cite{MS01} to the combinatorial setting in the sense that it simulates in parallel an ascending iterative auction for each item. To our knowledge this is the first ascending price mechanism for the combinatorial setting which is not VCG-based and as we will describe below, this adaptation is not straightforward since there are several obstacles we need to overcome. A description of our mechanism $\text{IACSM}$ is given in Algorithm~\ref{fig:alg-1}.
The mechanism maintains a set of \emph{active} players $X$ and for each item $j \in M$ a set of players $T_j$ who are \emph{tentatively} assigned to $j$. Initially, each player is active and tentatively assigned to all the items, i.e., $X = N$ and $T_j = N$ for all $j \in M$.
The mechanism then proceeds in iterations.
In each iteration, each item $j$ is offered to each active player $i \in X$ at a price that only depends on the set of tentatively assigned players $T_j$. For this, we use a player-independent \emph{cost sharing function} $\chi_j(\cdot, T_j)$ for every item $j$, and since we require that $\chi_j(i, T_j) = \chi_j(k, T_j)$ for every $i, k\in T_j$, we will simply denote by $\chi_j(T_j)$ the cost share that each player $i \in T_j$ tentatively assigned to $j$ has to pay.
Based on these cost shares, every active player $i \in X$ computes an \emph{optimal bundle} $A_i$ with respect to the payments $p_i(\cdot)$, as defined in Equation \eqref{eq:opt-choice}. If there are ties, we resolve them according to the following \emph{tie-breaking rule}: if there are several optimal bundles, then player $i$ chooses one of maximum size.
If there are multiple optimal bundles of maximum size $k$, then she chooses the bundle consisting of the $k$ cheapest items (where ties between equal cost share items are resolved consistently, say by index of the items).
After determining the optimal bundle for each active player, the mechanism then chooses an active player $i^*$ whose optimal bundle has minimum size. Again, we break ties consistently, say by index of the players.
The items in $A_{i^*}$ are assigned to player $i^*$ and $i^*$ becomes inactive. Finally, for every item $j$ which is not part of the optimal bundle $A_{i^*}$, $i^*$ is removed from the tentative set $T_j$. The mechanism terminates when all players are inactive.
\subsection{Two Crucial Properties}
\label{subsec:twoprop}
In this section we identify two crucial properties that our mechanism has to satisfy for our main result to go through. To formalize these properties, we introduce first some more notation.
\myheader{Trace of $\text{IACSM}$}
Note that the execution of our mechanism $\text{IACSM}$ on an instance of the problem induces an order $\tau = (\tau_1, \dots, \tau_n)$ on the players.
Without loss of generality, we may assume that the players are renamed such that $\tau = (1, \dots, n)$, i.e., player $i$ becomes inactive in iteration $i$; however, we emphasize that this order is determined by the run of our mechanism.
The order $\tau = (1, \dots, n)$ together with the final bundle $A_i$ assigned to each player $i$ at the end of iteration $i$ induces an order of player withdrawals for each item $j$. More precisely, for every $j\in M$ we let $\tau_j$ be the subsequence of $\tau$ consisting only of the players who withdrew from item $j$ (at the end of the iteration when they became inactive). We refer to $\tau_j$ as the \emph{trace of item $j$}. Recall that initially $T_j = N$ and hence all players are tentatively assigned to $j$. The length of $\tau_j$ can vary from $0$, when nobody withdraws from item $j$ and $\tau_j$ is the null sequence, all the way to $n$, when everybody withdraws from $j$ and $\tau_j = \tau$.
Given a trace $\tau_j$ in the form $\tau_j= ({i_1}, {i_2}, \dots, {i_\ell})$ and $k\in \{0, 1, \dots, |\tau_j|\}$, let $R_j^k = N\setminus \{{i_1}, {i_2}, \dots , {i_k}\}$; define $R_j^0 = N$. Note that the set $R_j^k$ is precisely the set of players tentatively assigned to $j$ after $k$ players have withdrawn from $j$ during the execution of the mechanism.
We note that the notion of trace is valid also for any other iterative mechanism where the assignment of one player becomes finalized at each iteration, e.g., \cite{MehtaRS09}.
\myheader{Trace-monotonic cost sharing functions}
We introduce a new property of cost sharing functions which will turn out to be crucial below. Intuitively, it is a refinement of the standard cross-monotonicity property which has to hold only for \emph{certain} subsets of players encountered by the mechanism, namely for the sets $\set{R^k_j}$. More precisely,
given a trace $\tau_j$ for an item $j\in M$, we say that the cost sharing function $\chi_j$ is \emph{cross-monotonic with respect to $\tau_j$} (or, \emph{trace-monotonic} for short), if
\[\forall k \in \set{0, \dots, |\tau_j|-1}: \qquad \chi_j(R^k_{j}) \le \chi_j(R^{k+1}_{j}).
\]
Note that this ensures that the cost share of item $j$ (weakly) increases during the execution of the mechanism, as we consider the sequence of sets
\[
R_j^0 \supset R_j^1 \supset \dots \supset R_j^{|\tau_j|}.
\]
A subtle point here is that the definition of the cost share $\chi_j(R_j^k)$ may not only depend on the set of players $R^k_j$, but also on the trace $\tau_j$ specifying how the set $R^k_j$ has been reached by the mechanism.\footnote{Notationally, we would have to write here $\chi^{\tau_j}_j$ to indicate the dependency on $\tau_j$. However, in the analysis we focus on a fixed trace produced by an execution of the mechanism and omit the explicit reference to it for notational convenience.}
It will become clear below that this additional flexibility enables us to implement our mechanism for \emph{arbitrary} cost functions.
\myheader{Properties (P1) and (P2)}
Our first property is rather intuitive: An item $j$ needs to be offered to all active players at the same price and this price can only increase in subsequent iterations. In particular, this ensures that if at the end of iteration $i$, player $i$ withdraws from an item $j \in M \setminus A_i$, then the price of $j$ for the remaining players in $T_j \setminus \set{i}$ does not decrease. This is crucial to achieve strategyproofness, and it is captured precisely by trace-monotonic cost sharing functions.\footnote{Note that we have to require trace-monotonicity with respect to an \emph{arbitrary} trace of item $j$ here, because we cannot control the trace $\tau_j$ that will be realized by $\text{IACSM}$.
}
\begin{enumerate}[\textbf{(P1)}]
\item For each item $j \in M$ the cost sharing function $\chi_j$ is trace monotonic for every trace $\tau_j$.
\end{enumerate}
The first property alone is not sufficient to ensure that our mechanism $\text{IACSM}$ is weakly group-strategyproof (or even strategyproof).
Additionally, we need to enforce the following refinement property on the final bundles assigned to the players. We prove below that Property (P2) is satisfied for symmetric submodular valuation functions.
\begin{enumerate}[\textbf{(P2)}]\setcounter{enumi}{1}
\item The final bundles $(A_i)_{i \in N}$ assigned to the players satisfy the following \emph{refinement property}: $A_i \subseteq A_{i+1}$ for every $i \in \set{1, \dots, n-1}$.
\end{enumerate}
\subsection{Feasibility of (P1) and (P2)}
We next define the cost sharing function that we use. The intuition is as follows: Suppose that $S = T_j$ is the set of players who are tentatively allocated to item $j$ at the beginning of iteration $i$ for some $i\in [n]$. Ideally, we would like to charge the average cost $c_j(S)/|S|$ to each player in $S$, but we cannot simply do this because the average cost might decrease with respect to the previous iteration, and this will destroy Property (P1). Given our new notion of trace-monotonicity, we can resolve this by defining the cost share of item $j$ as the maximum average cost over all player sets which were tentatively allocated to $j$ so far.
More formally, let $\tau_j$ be the trace of item $j$ induced by $\text{IACSM}$ when executed on a given instance. Let $S$ be the set of players tentatively assigned to item $j$ at the beginning of iteration $i$, and fix $k$ such that $R_j^k = S$ (by the definition of our mechanism, such a $k$ must exist and $k \le i-1$).
We define
\begin{equation}\label{eq:max-avg-csr}
\chi_j(S) = \max_{\ell \in \set{0, \dots, k}}
\frac{c_j(R_j^\ell)}{|n-\ell|}.
\end{equation}
Note that by using this definition we may end up overcharging the actual cost $c_j(S)$ of item $j$ in the sense that $|S| \cdot \chi_j(S) > c_j(S)$. As we show in Section \ref{subsec:main}, the budget balance and social cost approximation guarantees depend on the magnitude by which we might overcharge.
It is now trivial to show that Property (P1) holds.
\begin{lemma}\label{lem:average-cs}
Consider some item $j \in M$ and let $c_j: 2^N \rightarrow \mathbb{R}_{\ge 0}$ be an arbitrary cost function. Let $\tau_j$ be an arbitrary trace of $j$.
The cost sharing function $\chi_j$ defined in \eqref{eq:max-avg-csr} is trace-monotonic.
\end{lemma}
\begin{proof}
By definition \eqref{eq:max-avg-csr}, we have for every $k \in \set{0, \dots, |\tau_j|-1}$
$$
\chi_j(R^k_j)
= \max_{\ell \in \set{0, \dots, k}} \frac{c_j(R_j^\ell)}{|n-\ell|}
\le \max_{\ell \in \set{0, \dots, k+1}} \frac{c_j(R^\ell_j)}{|n-\ell|}
= \chi_j(R_j^{k+1}).
$$
\end{proof}
We turn to Property (P2). In general, it seems difficult to guarantee (P2), but it is not hard to see that it holds if the valuation functions are symmetric submodular.
\begin{lemma}\label{lem:refine}
Suppose the valuation functions are symmetric submodular. Then $A_i \subseteq A_{i+1}$ for every $i \in \set{1, \dots, n-1}$.
\end{lemma}
\begin{proof}
Fix some $i \in \set{1, \dots, n-1}$ and consider players $i$ and $i+1$.
Note that both $i$ and $i+1$ are active at the beginning of iteration $i$.
Let $A_i$ and $A'_{i+1}$ be the optimal bundles chosen by $i$ and $i+1$ in iteration $i$, respectively.
Because $i$ is chosen, we have $|A_i| \le |A'_{i+1}|$.
Further, because the valuation functions are symmetric, $A_i$ consists of the $|A_i|$ smallest cost share items (by our tie-breaking rule). Similarly, $A'_{i+1}$ consists of the $|A'_{i+1}|$ smallest cost share items. We conclude that $A_i \subseteq A'_{i+1}$. (Note that here we exploit that if there are several optimal bundles for player $i+1$, then the one of maximum size is chosen.)
Note that at the end of iteration $i$, player $i$ becomes inactive and withdraws from the items in $A'_{i+1} \setminus A_i$. By trace-monotonicity (Property (P1)), the cost shares of these items do not decrease in iteration $i+1$. Also, the cost shares of all items in $A_i$ remain the same.
Consider now player $i+1$ in iteration $i+1$. By using similar arguments, it follows that the optimal bundle $A_{i+1}$ consists of the $|A_{i+1}|$ lowest cost share items.
But note that in iteration $i$, the optimal bundle $A'_{i+1}$ of player $i+1$ contained all the items of $A_i$ and possibly a few more. Since the items of $A_i$ continue to be the ones of smallest cost share, the optimal bundle of $i+1$ in iteration $i+1$ must still contain all the items in $A_i$. (Note that here we again exploit that the optimal bundle of maximum size is chosen if there are ties.) Thus it must hold that $A_{i+1} \supseteq A_i$.
\end{proof}
\subsection{Main result for IACSM}
\label{subsec:main}
In order to state our main result of this section, we need to introduce a crucial parameter that determines the budget balance and social cost approximation guarantees of our mechanism.
\begin{definition} \label{def:avg-dec}
A cost function $c: 2^N \rightarrow \mathbb{R}_{\ge 0}$ is \emph{$\alpha$-average-decreasing} if there exists some $\alpha \ge 1$ such that for every $S \subseteq T \subseteq N$, $\alpha \cdot \frac{c(S)}{|S|} \ge \frac{c(T)}{|T|}$.
\end{definition}
Note that for every cost function $c$ there exists some $\alpha \ge 1$ such that $c$ is $\alpha$-average decreasing. However, here we are particularly interested in $\alpha$-average decreasing cost functions for which the parameter $\alpha$ is small, as can be seen by Theorem \ref{thm:main} below.
Average decreasing functions with small values of $\alpha$ arise naturally in the domains of digital goods and public goods. For digital goods the cost of serving a non-empty set of customers is typically assumed to be constant because there is a cost for producing the good and then it can be shared with no additional cost (hence the definition is satisfied with $\alpha=1$). The same is applicable for some public good models. Note also that \emph{symmetric XOS cost functions} (see Definition~\ref{def:sym-XOS}) are average-decreasing (i.e., $\alpha = 1$).
The following is the main result of this section.
\begin{theorem}\label{thm:main}
Suppose the valuation functions are symmetric submodular and the cost functions are $\alpha$-average decreasing. Then the mechanism $\text{IACSM}$ runs in polynomial time, satisfies IR, NPT, WGSP and is $\alpha$-budget balanced and $2 \alpha^3 H_n$-approximate.
\end{theorem}
Symmetric submodular cost functions are average decreasing (i.e., $\alpha = 1$) since they are a subclass of symmetric XOS functions. As a consequence, we obtain the following corollary from Theorem~\ref{thm:main} (with an additional improvement on the social cost approximation).
\begin{corollary}\label{col:adm}
Suppose the valuation functions and the cost functions are symmetric submodular. Then the mechanism $\text{IACSM}$ runs in polynomial time, satisfies IR, NPT, WGSP and is budget balanced and $H_n$-approximate.
\end{corollary}
\begin{proof}
Applying Theorem \ref{thm:main} with $\alpha=1$, we get immediately all the claimed properties except for the social cost approximation which is $2H_n$.
However, we note that for symmetric submodular cost functions the term $2\alpha$ in Lemma~\ref{lem:adiffer-2} (see Section \ref{subsec:sum}) can simply be omitted, because we have $c(S) \leq c(T)$ for any two sets $S, T$ with $|S|\leq |T|$.
By exploiting this in the remaining proof of the social cost approximation guarantee, we obtain an $H_n$-approximation.
\end{proof}
Note that the approximation factor of $H_n$ for symmetric submodular functions is tight: The impossibility result of Dobzinski et al.~\cite{DobzinskiMRS18} for a single public good implies that achieving a better approximation ratio is impossible, even in the single-item case ($m = 1$).
Finally we point out that \emph{$\alpha$-average-decreasing} functions are subadditive when $\alpha=1$, while this is not necessarily true for $\alpha>1$.
\begin{lemma}\label{lem:subadditivity}
Let $c(\cdot)$ be an $\alpha$-average-decreasing cost function where $\alpha=1$. Then $c(\cdot)$ is subadditive and in addition, not necessarily symmetric, or submodular. In case $c(\cdot)$ is $\alpha$-average-decreasing with $\alpha>1$, then $c(\cdot)$ is not necessarily subadditive.
\end{lemma}
\begin{proof}
Let $S, T \subseteq N$ and assume without loss of generality that
$\frac{c(S)}{|S|} \leq \frac{c(T)}{|T|}$.
Using that the average cost of $c$ is non-increasing, we obtain
\begin{align*}
c(S\cup T)
&
\leq |S\cup T|\cdot \frac{c(S)}{|S|}
\leq c(S) + |T| \frac{c(S)}{|S|} \leq c(S)+c(T).
\end{align*}
Hence $c$ is subadditive.
We construct now an example of a non-symmetric and non-submodular cost function that is average-decreasing. Let $N = \set{1, 2, 3}$ and consider the function $c$ defined as follows: $c(\{1\})=5$, $c(\{2\})=7$, $c(\{3\})=8$, $c(\{1,2\})=10$, $c(\{2,3\}) = c(\{1,3\})=9$, and $c(\{1,2, 3\}) = 11$. It is easy to verify that $c$ has decreasing average cost, but obviously, it is not a symmetric function; in fact, it is not even submodular because $c(\{1,2, 3 \})-c(\{1, 3\})=11-9=2>1=9-8=c(\{2, 3\})-c(\{3\})$.
Finally consider the following example:
\[ c(S) =
\begin{cases}
0 & \text{if $S=\emptyset$} \\
1 & \text{if $|S|=1,2$} \\
3 & \text{if $|S|\geq 3$}
\end{cases}
\]
It is easy to see that this function is \emph{2-average-decreasing} (just consider sets of cardinality 2 and supersets of cardinality 3 and notice that this case gives the maximum possible $\alpha$). Regarding non-subadditivity, consider sets $S\subseteq T \subseteq N$ where $|S|=1, |T|=2$ and $S\cap T=\emptyset$. We have that $c(S\cup T)=3>2=1+1=c(S)+c(T)$.
\end{proof}
The remainder of this section is devoted to the proof of Theorem~\ref{thm:main}.
Unless stated otherwise, we assume below that the valuation functions are symmetric submodular and the cost functions are $\alpha$-average decreasing.
\subsection{Computational Efficiency}
We argue that $\text{IACSM}$ can be implemented in polynomial time. Clearly, the mechanism terminates after $n$ iterations.
In each iteration, the only non-trivial operations are (i) to compute the optimal bundles for all active players (Line 3) and (ii) to update the cost shares of the items (Line 6). All other operations can be implemented to run in time $O(n+m)$. We prove below that (i) and (ii) can be done in polynomial time.
We first show that optimal bundles can be computed efficiently if the valuation functions are symmetric submodular.
\begin{lemma}\label{cl:utility-max}
If the valuation functions are symmetric submodular, then an optimal bundle for player $i$ can be computed in polynomial time.
\end{lemma}
\begin{proof}
Fix an arbitrary iteration and let $i \in X$ be an active player. We need to show that we can efficiently compute an optimal bundle as defined in \eqref{eq:opt-choice}.
Recall that the value that player $i$ derives from a bundle of items only depends on its cardinality (because the valuation functions are symmetric submodular). Thus, to build an optimal bundle, we can start with the empty set and iteratively add an item of lowest price as long as this price is at most the added marginal value. As a result, the value of the constructed bundle (weakly) increases whenever we add a new item. At the same time, the marginal value of the added items (weakly) decreases because of submodularity. Thus, the first time we consider an item whose marginal value is (strictly) less than its price, we have to stop as the utility can only decrease if we add any of the remaining items. Note that the optimal bundle constructed in this way satisfies the tie-breaking rule described above. Clearly, this procedure stops after at most $m$ iterations.
\end{proof}
We note that for non-symmetric valuation functions, the complexity of $\text{IACSM}$ depends on the time needed to compute an optimal bundle. For example, if one has access to demand queries, we can still have an efficient implementation with polynomially many queries.
We next turn to the computation of the cost shares. Note that in general it is not clear whether the cost shares as defined in \eqref{eq:max-avg-csr} can be computed efficiently (as there might be exponentially many supersets that need to be considered). However, as we show below, the cost shares of the items which are constructed throughout the execution of our mechanism $\text{IACSM}$ can be computed efficiently.
\begin{lemma}\label{lem:cost-share-comp}
The cost shares as defined in \eqref{eq:max-avg-csr} can be computed efficiently throughout the execution of mechanism $\text{IACSM}$.
\end{lemma}
\begin{proof}
Let $\tau = (1, 2, \dots, n)$ be the player order induced by $\text{IACSM}$.
Fix an arbitrary item $j \in M$. After the initialization, we have $T_j = N$ and the cost share of $j$ is thus $\chi_j(R_j^0) = c_j(N)/|N|$. Clearly, as long as the set of players $T_j$ who are tentatively assigned to item $j$ does not change, the cost share of $j$ remains the same. Suppose that $T_j$ changes at the end of iteration $i$ because player $i$ withdraws from item $j$, resulting in a new set $T'_j = T_j \setminus \set{i}$. The new cost share $\chi_j(T'_j)$ of item $j$ can then be determined simply by taking the maximum of the current cost share $\chi_j(T_j)$ and $c_j(T'_j)/|T'_j|$. Note that this update ensures that the cost share definition in \eqref{eq:max-avg-csr} is met.
\end{proof}
\subsection{IR, NPT and WGSP}
\label{subsec:prop}
The individual rationality and the no positive transfers properties follow directly from the definition of the mechanism. The WGSP property is established by the following lemma.
\begin{lemma}\label{lem:tr}
Mechanism $\text{IACSM}$ is weakly group-strategyproof.
\end{lemma}
\begin{proof}
Fix a coalition $Q \subseteq N$. Let $I$ be the instance in which all players in $Q$ report their valuations truthfully and let $\hat{I}$ be an instance in which all players in $Q$ misreport their valuations. We need to prove that not every player $i \in Q$ strictly improves her utility by misreporting.
Consider the runs of $\text{IACSM}$ on instances $I$ and $\hat{I}$, respectively.
Let $\tau$ and $\hat{\tau}$ be the player orders induced by $\text{IACSM}$ for $I$ and $\hat{I}$, respectively.
We assume without loss of generality that in the run on $I$, player $i$ is considered in iteration $i$, i.e., ${\tau} = (1, \dots, n)$.
\footnote{Note that the mechanism might terminate before iteration $n$, but for the analysis it will be convenient to assume that it uses exactly $n$ iterations. Conceptually, simply assume that the players who become inactive all at once in the final iteration are removed one-by-one (using an arbitrary but consistent tie breaking rule).}
Let $\hat{\tau} = (\hat{\tau}_1, \dots, \hat{\tau}_n)$ be the order in which the players are considered in the run on $\hat{I}$.
Let $i$ be the first iteration in which either (a) the considered players $i$ and $\hat{\tau}_i$ differ, i.e., $i \neq \hat{\tau}_i$, or (b) the same player $i = \hat{\tau}_i$ is considered, but the bundles allocated in $I$ and $\hat{I}$ differ.
Note that such an $i$ must exist as otherwise both runs return the same allocation and we are done. By the choice of $i$, at the beginning of iteration $i$ the cost share of each item is the same in $I$ and $\hat{I}$, and can only increase subsequently. We distinguish two cases:
\emph{Case 1: $i \in Q$.} The bundle $A_i$ allocated to player $i$ in $I$ is chosen such that
\begin{equation}\label{eq:max-bundle}
\ensuremath{v}_i(A_i) - p_i(A_i) \ge \ensuremath{v}_i(S) - p_i(S) \qquad \forall S \subseteq M,
\end{equation}
where $p_i(S)$ is the sum of the cost shares of the items in $S$ in iteration $i$ in $I$ (which are the same as in $\hat{I}$).
Suppose player $i$ is considered in iteration $k$ in the run on $\hat{I}$, i.e., $\hat{\tau}_k = i$. By the choice of $i$, we have $k \ge i$. Thus, in iteration $k$ in $\hat{I}$ the price $\hat{p}_i(S)$ of $i$ for each bundle $S \subseteq M$ must satisfy $\hat{p}_i(S) \ge p_i(S)$. In particular, for the bundle $\hat{A}_i$ allocated to player $i$ in iteration $k$ in $\hat{I}$ we have
$$
\ensuremath{v}_i(\hat{A}_i) - \hat{p}_i(\hat{A}_i) \le
\ensuremath{v}_i(\hat{A}_i) - p_i(\hat{A}_i) \le
\ensuremath{v}_i(A_i) - p_i(A_i),
$$
where the last inequality follows from \eqref{eq:max-bundle}.
The claim follows because $i$ is part of the deviating coalition $Q$.
\emph{Case 2: $i \notin Q$.}
We first argue that player $k = \hat{\tau}_i \in Q$. Assume that $k \notin Q$. Then both $i$ and $k$ bid truthfully in iteration $i$ in $I$ and $\hat{I}$. Because in iteration $i$ the cost shares are the same in both runs, the bundles chosen by $i$ in $I$ and $\hat{I}$ are the same. The same holds for player $k$. If $i \neq k$ then this is a contradiction to the assumption that $\text{IACSM}$ uses a consistent tie breaking rule (as $i$ is chosen in $I$ but $k$ in $\hat{I}$). If $i = k$ then this is a contradiction to our choice of iteration $i$ (as $i$ chooses the same bundle in $I$ and $\hat{I}$). We conclude that $k \in Q$.
We now compare the utility obtained by player $k = \hat{\tau}_i \in Q$ in $I$ and $\hat{I}$.
Note that $k \neq i$ because $k \in Q$ and $i \notin Q$. Observe that in iteration $i$ in instance $\hat{I}$, player $i$ reports truthfully and thus opts for the same bundle $A_i$ as in iteration $i$ in $I$. Given that player $k$ is chosen in iteration $i$ in $\hat{I}$ (and not player $i$), the bundle $\hat{A}_k$ allocated to $k$ satisfies $\hat{A}_k \subseteq A_i$.
Now, consider the run on $I$ and let $k > i$ be the iteration in which player $k = \hat{\tau}_i$ is considered. By Property (P2), we have $A_i \subseteq A_k$. We conclude that $\hat{A}_k \subseteq A_k$.
Because $k$ reports truthfully in $I$, the choice of $A_k$ implies that
$$
\ensuremath{v}_k(A_k) - p_k(A_k) \ge \ensuremath{v}_k(S) - p_k(S) \qquad \forall S \subseteq M.
$$
In particular, for the bundle $\hat{A}_k$ this implies that
\begin{equation}\label{eq:pos-add}
\ensuremath{v}_k(A_k) - \ensuremath{v}_k(\hat{A}_k) - p_k(A_k \setminus \hat{A}_k) \ge 0.
\end{equation}
Further, note that the cost shares of all items in $A_i$ and $\hat{A}_k$ remain the same as in iteration $i$ in the runs on $I$ and $\hat{I}$, respectively. Exploiting that $\hat{A}_k \subseteq A_i$ and that the cost shares in iteration $i$ are the same in both runs, we conclude that $p_k(\hat{A}_k) = \hat{p}_k(\hat{A}_k)$.
We obtain
\begin{align*}
\ensuremath{v}_k(A_k) - p_k(A_k)
& =\ensuremath{v}_k(\hat{A}_k) - p_k(\hat{A}_k) + [\ensuremath{v}_k(A_k) - \ensuremath{v}_k(\hat{A}_k) - p_k(A_k \setminus \hat{A}_k)] \\
& \ge \ensuremath{v}_k(\hat{A}_k) - {p}_k(\hat{A}_k) =\ensuremath{v}_k(\hat{A}_k) - \hat{p}_k(\hat{A}_k),
\end{align*}
where the first equality holds because $p_k$ is additive and the inequality follows from \eqref{eq:pos-add}.
The claim now follows because $k \in Q$.
\end{proof}
\subsection{Budget Balance and Social Cost Approximation}
\label{subsec:sum}
We start by providing the budget balance performance of the mechanism.
\begin{lemma}\label{lem:bb}
Mechanism $\text{IACSM}$ is $\alpha$-budget balanced.
\end{lemma}
\begin{proof}
Let the player order induced by $\text{IACSM}$ be $\tau = (1, \dots, n)$. Fix an arbitrary item $j$ and let $\tau_j$ be the trace of item $j$. Let $T_j$ be the final set of players allocated to item $j$ and recall that $R_j^k = T_j$ for $k = |\tau_j|$.
By the definition of the cost sharing function $\chi_j$ in \eqref{eq:max-avg-csr}, there is some set $S = R^{\ell}_j$ with $\ell \in \set{0, \dots, k}$ such that the cost share of item $j$ is
\begin{equation}\label{eq:alpha-bb}
\chi_j(T_j) = \frac{c_j(R^\ell_{j})}{n-\ell} \ge \frac{c_j(R^k_{j})}{n-k} = \frac{c_j(T_j)}{|T_j|}.
\end{equation}
Summing over all players in $T_j$, we obtain
\begin{equation}\label{eq:alpha-p}
c_j(T_j)
\leq \sum_{i\in T_j}\chi_j(T_j)=|T_j|\cdot \frac{c_j(S)}{|S|} \leq |T_j|\cdot \alpha \frac{c_j(T_j)}{|T_j|}={\alpha} \cdot c_j(T_j),
\end{equation}
where the second inequality holds because $c_j$ is $\alpha$-average-decreasing and $T_j \subseteq S$.
Finally, summing inequality \eqref{eq:alpha-p} over all items $j \in M$ we obtain
\begin{equation}\label{eq:alpha-tbb}
\sum_{j \in M}c_j(T_j)
\leq \sum_{j \in M}\sum_{i\in T_j}\chi_j(T_j)
\leq \alpha\sum_{j \in M}c_j(T_j),
\end{equation}
which proves the claim.
\end{proof}
\noindent
We now show that our mechanism $\text{IACSM}$ is $2\alpha^3 H_n$-approximate with respect to the social cost objective for symmetric submodular valuation functions.
Let $\ensuremath{A} = (\ensuremath{A}_i)_{i \in N}$ be the allocation computed by the mechanism, where $\ensuremath{A}_i \subseteq M$ is the subset of items that player $i$ receives. As before, without loss of generality we assume that the player order induced by $\text{IACSM}$ is $\tau = (1, \dots, n)$.
Recall that for every item $j \in M$, $\ensuremath{T}_j = \sset{i \in N}{j \in \ensuremath{A}_i}$ is the final set of players that receive item $j$.
We also use $\ensuremath{T}^i_j$ to refer to the subset of players who are allocated to item $j$ at the beginning of iteration $i$. Clearly, $\ensuremath{T}^i_j \supseteq \ensuremath{T}_j$ for every player $i$ and item $j$.
We first prove some lemmas which will be helpful later on.
\begin{lemma}\label{lem:final}
Fix an item $j \in M$ and let $i$ be the first player in $\tau$ such that $j \in \ensuremath{A}_i$. Then $\ensuremath{T}_j = \set{i, \dots, n}$.
\end{lemma}
\begin{proof}
By the choice of $i$, we have that $j \in \ensuremath{A}_i$ and $j \notin \ensuremath{A}_k$ for every player $k < i$.
From Property (P2) it follows that for every $k$ with $i \le k < n$, $\ensuremath{A}_{k} \subseteq \ensuremath{A}_{k+1}$. Thus $j \in \ensuremath{A}_{k}$ for every $i \le k \le n$, which concludes the proof.
\end{proof}
\begin{lemma}\label{lem:opt-bundle}
Consider player $i$ who becomes inactive in iteration $i$.
We have
$$
\ensuremath{v}_i(\ensuremath{A}_i) - \sum_{j \in \ensuremath{A}_i} \chi_j(T_j) \ge
\ensuremath{v}_i(S) - \sum_{j \in S} \chi_j(T_j) \quad \forall S \subseteq M.
$$
\end{lemma}
\begin{proof}
In iteration $i$, the final bundle $\ensuremath{A}_i$ is chosen as the set of items maximizing the utility of player $i$ with respect to the current cost shares, i.e.,
\begin{equation}\label{eq:local-shares}
\ensuremath{v}_i(\ensuremath{A}_i) - \sum_{j \in \ensuremath{A}_i} \chi_j(\ensuremath{T}^i_j) \ge
\ensuremath{v}_i(S) - \sum_{j \in S} \chi_j(\ensuremath{T}^i_j) \quad \forall S \subseteq M.
\end{equation}
Recall that $\ensuremath{T}^i_j$ is the set of players that are allocated to item $j$ in iteration $i$.
Note that by Lemma~\ref{lem:final}, $\ensuremath{T}^i_j = \ensuremath{T}_j$ for every $j \in \ensuremath{A}_i$. Further, $\ensuremath{T}^i_j \supseteq \ensuremath{T}_j$ for every $j \in M \setminus \ensuremath{A}_i$ as additional players might withdraw from $j$ in subsequent iterations. Note that the final set $T_j$ is reached from $T_j^i$ by following the trace $\tau_j$ of item $j$. The claim now follows from the trace-monotonicity of $\chi_j$ (Property (P1)).
\end{proof}
\begin{lemma}\label{lem:acost-share-bound}
Consider player $i$ who becomes inactive in iteration $i$.
For every item $j \in M$,
$$
\chi_j(\ensuremath{T}^i_j) \le {\alpha}\frac{c_j(\set{i, \dots, n})}{n-i+1}.
$$
\end{lemma}
\begin{proof}
Fix some $j \in M$.
In iteration $i$, we have $\ensuremath{T}^i_j \supseteq \set{i, \dots, n}$.
Let $k$ be such that $\ensuremath{T}^i_j = R^k_j$ and recall that $R_j^\ell \supseteq R_j^k$ for every $\ell \in \set{0, \dots, k}$.
We obtain
\begin{equation}\label{eq:acost-share-bound}
\chi_j(\ensuremath{T}^i_j) = \max_{\ell \in \set{0, \dots, k}} \frac{c_j(R^\ell_j)}{n-\ell} \le {\alpha}\frac{c_j(\set{i, \dots, n})}{n-i+1},
\end{equation}
where the inequality holds because $c_j$ is $\alpha$-average decreasing.
\end{proof}
\begin{lemma}\label{lem:adiffer-2}
Let $c$ be an $\alpha$-average decreasing cost function.
Let $S, T \subseteq N$ be arbitrary subsets with $|S| \le |T|$.
Then $c(S) \le {2}{\alpha}c(T)$.
\end{lemma}
\begin{proof}
Assume for the sake of a contradiction that $c(S) > {2}{\alpha}c(T)$. Consider the set $S \cup T$ and note that $|S \cup T| \le 2|T|$. Because $c$ is non-decreasing, we have $c(S \cup T) \ge c(S) >\frac{2}{\alpha} c(T)$. Using that $c$ is $\alpha$-average-decreasing, we obtain $\frac{c(S \cup T)}{|S \cup T|} \le {\alpha}\frac{c(T)}{|T|}$, which implies that $c(S \cup T) \le {\alpha}\frac{|S \cup T|}{|T|} c(T) \le {2}{\alpha} c(T)$, a contradiction.
\end{proof}
We are now ready to prove the approximation guarantee.
\begin{lemma}
Mechanisms $\text{IACSM}$ is $2\alpha^3 H_n$-approximate.
\end{lemma}
\begin{proof}
Let $\ensuremath{A}^* = (\ensuremath{A}^*_1, \dots, \ensuremath{A}^*_n)$ be an optimal allocation and let $\ensuremath{T}^*_j$ be the respective set of players that receive item $j$ in $\ensuremath{A}^*$.
We have
\begin{align*}
\pi(\ensuremath{A})
& = \sum_{i \in N} \big(\ensuremath{v}_i(M) - \ensuremath{v}_i(\ensuremath{A}_i)\big) + \sum_{j \in M} c_j(\ensuremath{T}_j) \\
& \leq \sum_{i \in N} \ensuremath{v}_i(M) - \sum_{i \in N} \bigg(\ensuremath{v}_i(\ensuremath{A}_i) - \sum_{j \in \ensuremath{A}_i} \chi_j(\ensuremath{T}_j) \bigg) \\
& \le \sum_{i \in N} \ensuremath{v}_i(M) - \sum_{i \in N} \bigg(\ensuremath{v}_i(\ensuremath{A}^*_i) - \sum_{j \in \ensuremath{A}^*_i} \chi_j(\ensuremath{T}^i_j) \bigg) \\
& = \sum_{i \in N} \big(\ensuremath{v}_i(M) - \ensuremath{v}_i(\ensuremath{A}^*_i)\big) + \sum_{i \in N} \sum_{j \in \ensuremath{A}^*_i} \chi_j(\ensuremath{T}^i_j),
\end{align*}
where the first inequality holds because $\chi_j$ is ${\alpha}$-budget balanced and the second inequality follows from \eqref{eq:local-shares} in the proof of Lemma~\ref{lem:opt-bundle}.
The proof follows if we can show that
\begin{equation}\label{eq:cs-bound}
\sum_{i \in N} \sum_{j \in \ensuremath{A}^*_i} \chi_j(\ensuremath{T}^i_j) \le {2}{\alpha^3}H_n \sum_{j \in M} c_j(\ensuremath{T}^*_j).
\end{equation}
We use a charging argument to prove \eqref{eq:cs-bound}. Fix some item $j \in M$ and order the players in $\ensuremath{T}^*_j$ according to the player order $\tau = (1, \dots, n)$ induced by $\text{IACSM}$; let $\ensuremath{T}^*_j = \set{i_1, \dots, i_{k^*_j}}$ be the ordered set with $k^*_j := |\ensuremath{T}^*_j|$. We now ``tag'' each player $i$ in $\ensuremath{T}^*_j$ with a fraction of the cost $c_j(\ensuremath{T}^*_j)$ for item $j$ as follows: For the $l$th player $i = i_l$ in $\ensuremath{T}^*_j$ with $1 \le l \le k^*_j$, define
\begin{equation}\label{eq:tag}
\ensuremath{\textsf{tag}}_i(j) := \frac{c_j(\ensuremath{T}^*_j)}{k^*_j-l+1}.
\end{equation}
That is, the first player $i_1$ in $\ensuremath{T}^*_j$ is tagged with $c_j(\ensuremath{T}^*_j)/k^*_j$, the second player $i_2$ with $c_j(\ensuremath{T}^*_j)/(k^*_j-1)$ and so forth, and the last player $i_{k_j^*}$ is tagged with $c_j(\ensuremath{T}^*_j)$.
We first derive two lower bounds on the tagged cost:
\begin{claim}\label{claim:tag-bound}
For every player $i \in N$ and for every item $j \in \ensuremath{A}^*_i$:
$$
\ensuremath{\textsf{tag}}_i(j) \ge \frac{c_j(\ensuremath{T}^*_j)}{n-i+1} \qquad\text{and}\qquad \ensuremath{\textsf{tag}}_i(j) \ge \frac{c_j(\ensuremath{T}^*_j)}{|T^*_j|}.
$$
\end{claim}
\begin{proof}
The latter bound holds by definition \eqref{eq:tag}.
To see that the former bound holds, observe that the $k$th last player ($1 \le k \le k^*_j$) in the ordered set $\ensuremath{T}^*_j$ is tagged by $c_j(\ensuremath{T}^*_j)/k$. The claim now follows because there are at most $n-i$ players succeeding $i$ in $\ensuremath{T}^*_j$ according to the order.
\end{proof}
Note that the total tagged cost of item $j$ satisfies
\begin{equation}\label{eq:total-tag}
\sum_{i \in \ensuremath{T}^*_j} \ensuremath{\textsf{tag}}_i(j) = \sum_{l = 1}^{k^*_j} \frac{c_j(\ensuremath{T}^*_j)}{k^*_j-l+1} \le H_n c_j(\ensuremath{T}^*_j).
\end{equation}
Thus, to prove \eqref{eq:cs-bound} it suffices to show that the total cost share sum is upper bounded by the total tagged cost, i.e.,
\begin{equation}\label{eq:acs-bound2}
\sum_{i \in N} \sum_{j \in \ensuremath{A}^*_i} \chi_j(\ensuremath{T}^i_j) \le {2}{\alpha^3}\sum_{j \in M} \sum_{i \in \ensuremath{T}^*_j} \ensuremath{\textsf{tag}}_i(j).
\end{equation}
We show that for every $i$ and every $j \in \ensuremath{A}^*_i$, $\chi_j(\ensuremath{T}^i_j) \le \ensuremath{\textsf{tag}}_i(j)$.
Summing over all $i \in N$ and $j \in \ensuremath{A}^*_i$ then proves \eqref{eq:cs-bound}.
We distinguish two cases:
Case 1: $|\ensuremath{T}^*_j| \ge n-i+1$: Let $S \subseteq \ensuremath{T}^*_j$ be a set such that $|S| = n-i+1$. We have
\begin{equation}\label{eq:asc-c1}
\chi_j(\ensuremath{T}^i_j) \leq {\alpha} \frac{c_j(\set{i, \dots, n})}{n-i+1}
\leq {2}{\alpha^2}\frac{c_j(S)}{|S|}
\leq {2}{\alpha^2}\frac{c_j(\ensuremath{T}^*_j)}{n-i+1}
\leq {2}{\alpha^2}\ensuremath{\textsf{tag}}_i(j) ,
\end{equation}
where the first inequality follows from Lemma~\ref{lem:acost-share-bound}, the second inequality follows from Lemma~\ref{lem:adiffer-2}, the third inequality holds because $c_j$ is non-decreasing and the last inequality follows from Claim~\ref{claim:tag-bound}.
Case 2: $|\ensuremath{T}^*_j| < n-i+1$:
Let $S \supset \ensuremath{T}^*_j$ be a set such that $|S| = n-i+1$.
We have
\begin{equation}\label{eq:asc-c1}
\chi_j(\ensuremath{T}^i_j)
\leq {\alpha} \frac{c_j(\set{i, \dots, n})}{n-i+1}
\leq {2}{\alpha^2}\frac{c_j(S)}{|S|}
\leq {2}{\alpha^3}\frac{c_j(\ensuremath{T}^*_j)}{|\ensuremath{T}^*_j|}
\leq {2}{\alpha^3}\ensuremath{\textsf{tag}}_i(j) ,
\end{equation}
where the first inequality follows from Lemma~\ref{lem:acost-share-bound}, the second inequality follows from Lemma~\ref{lem:adiffer-2}, the third inequality holds because $c_j$ is $\alpha$-average-decreasing and the last inequality follows from Claim~\ref{claim:tag-bound}.
This concludes the proof.
\end{proof}
\section{Mechanisms for General Valuations and Subadditive Cost Functions}\label{sec:FPM}
In this section, we move away from symmetric submodular valuation functions and derive results for more general functions. In particular, we investigate the performance of the {\em Sequential Mechanism} \cite{Moulin99} for general valuations and subadditive cost functions. Although for arbitrary subadditive cost functions this mechanism does not provide favorable approximation guarantees, we identify conditions on the cost functions under which it achieves significantly better approximation factors. This is based on considering a different parameterization of cost functions with regard to their average cost shares.
\subsection{The Sequential Mechanism}\label{subsec:Seq}
The \emph{Sequential Mechanism (SM)} was introduced by Moulin \cite{Moulin99} and was also studied in \cite{DobzinskiO17}.
A description of the mechanism SM is given in Algorithm~\ref{fig:alg-3}. We note that this mechanism is applicable both to separable and non-separable cost functions. Here, we first focus on separable cost functions. In Section \ref{subsec:nonsep}, we consider generalizations to the non-separable setting.
\begin{algorithm}[t]
\small
\DontPrintSemicolon
\caption{Sequential Mechanism (SM)} \label{fig:alg-3}
\KwIn{Declared valuation functions $(b_i)_{i \in N}$.}
\KwOut{Allocation $A = (A_i)_{i \in N}$ and payments $p = (p_i)_{i \in N}$.}
\textbf{Initialization:} Fix an order on the set of players $N = \set{1, \dots, n}$.
\For{$i = 1, \dots, n$}{
Compute an \emph{optimal bundle} $A_i$ for player $i$:
\vspace*{-1ex}
\begin{align*}\label{eq:opt-choice}
A_i & \in \arg\max_{S \subseteq M} \{b_i(S) - p_i(S)\}, \quad \text{where} \\&\
\qquad p_i(S) = C(A_1, \dots, A_{i-1}, S, \emptyset, \dots, \emptyset) - C(A_1, \dots, A_{i-1}, \emptyset, \dots, \emptyset).
\end{align*}
(If there are multiple optimal bundles, choose the lexicographically smallest one.) \;
}
\Return $A = (A_i)_{i \in N}$ and $p = (p_i)_{i \in N}$, where $p_i = p_i(A_i)$.
\end{algorithm}
It is trivial to see that SM is budget-balanced and it is also known that it is WGSP \cite{DobzinskiO17}. However, for arbitrary monotone subadditive cost functions, the mechanism achieves a (poor) social cost approximation guarantee of $n$ only (see \cite{DobzinskiO17}).
\begin{theorem}\cite{DobzinskiO17}
Suppose we have general valuation functions and non-decreasing subadditive cost functions. Then the Sequential Mechanism satisfies IR, NPT, WGSP, and is budget balanced and $n$-approximate.
\end{theorem}
Despite this, we show that SM has better guarantees under certain conditions. Namely, we identify a crucial parameter of each cost function $c_j$ with $j \in M$ that allows us to quantify this improvement. The parameterization introduced here is different from the one used in Section \ref{sec:mech} and it compares the average cost of a set $T \subseteq N$ with the minimum standalone cost of a player in $T$. More specifically, we define the following property:
\begin{definition}
\label{def:cmin}
A cost function $c: 2^N \rightarrow \mathbb{R}_{\ge 0}$ is \emph{$\alpha$-average min-bounded}, if there exists some $\alpha \ge 1$ such that for every set $T \subseteq N$, we have $\alpha\cdot \frac{c(T)}{|T|} \geq c_{\min}$, where $c_{\min} = \min_{j\in T}c(\{j\})$.
\end{definition}
Definition \ref{def:cmin} may look somewhat contrived at first glance and we thus provide some more intuition on how we arrived at this parameterization.
Given that IACSM performs well for $\alpha$-average decreasing functions and small values of $\alpha$ (see Section~\ref{sec:mech}), it is natural to focus on the complement of this class. For example, fix $\alpha = 1$ for now. Then the exact complement is not easy to characterize because it involves two existential quantifiers. We therefore consider a subset of this complement (with only one existential quantifier) by demanding that for every $T$, there exists $S \subseteq T$ such that $c(S)/|S| < c(T)/|T|$. It is not hard to verify that this definition is equivalent to the class of 1-average min-bounded functions. For larger values of $\alpha$, we can again see that $\alpha$-average-min-bounded functions capture a chunk of the complement of $\alpha$-average-decreasing functions. Thus, a positive result for $\alpha$-average-min-bounded functions narrows down on the cost functions that are not yet known to admit good approximation guarantees.
Note that for every cost function we can find an $\alpha \ge 1$ such that it is $\alpha$-average min-bounded. As the next theorem reveals, the Sequential Mechanism attains a favorable performance for small values of $\alpha$.
\begin{theorem}\label{thm:ultra}
Suppose we have general valuation functions and for each item $j\in M$, the cost function $c_j:2^N \rightarrow\mathbb{R}_{\geq 0}$ is non-decreasing, subadditive, and $\alpha$-average min-bounded for some $\alpha\geq 1$. Then the Sequential Mechanism satisfies IR, NPT, WGSP, and is budget balanced and $\alpha\cdot H_n$-approximate.
\end{theorem}
For the proof of Theorem \ref{thm:ultra}, we use the following proposition:
\begin{proposition}\label{prop:betterg}
If $c: 2^N \rightarrow \mathbb{R}_{\ge 0}$ is non-decreasing and $\alpha$-average min-bounded, then $\sum_{i \in T}c(\{i\}) \leq \alpha H_{|T|} \cdot c(T)$ for every $T \subseteq N$.
\end{proposition}
\begin{proof}
Fix $T\subseteq N$ and rename the players of $T$ so that they are in decreasing order with respect to the standalone cost, i.e., for any $i,j \in T$ with $ i<j$, it holds $c(\{i\}) \geq c(\{j\})$. For convenience, we may assume that $T = \{1, 2, \dots, |T|\}$. Fix some $i \in T$ and consider the set $\{1,2,\dots,i\} \subseteq T$. Note that in this set player $i$ has the minimum standalone cost $c(\{i\})$. Hence, by Definition \ref{def:cmin}, the cost function satisfies:
$$
c(\{i\})\leq \alpha\cdot \frac{c(\{1,\dots,i\})}{i} \leq \alpha\cdot \frac{c(T)}{i},
$$
where the last inequality holds because $c$ is non-decreasing.
The above inequality holds for any $i \in T$. Summing over all players $i\in T$ proves the claim.
\end{proof}
\begin{proof}[Proof of Theorem \ref{thm:ultra}]
We only need to prove that SM is $\alpha H_n$-approximate. All the other properties have been established in \cite{DobzinskiO17,Moulin99}.
Let $\ensuremath{A}=(\ensuremath{A}_i)_{i \in N}$ be the allocation output by the mechanism and let $\ensuremath{A}^* = (\ensuremath{A}^*_i)_{i \in N}$ be an optimal allocation. Further, let $\ensuremath{T}^*_j$ be the respective set of players that receive item $j$ in $\ensuremath{A}^*$.
To simplify notation in the analysis, we also let $A_{<i}$ denote the tuple $(A_1, \dots, A_{i-1}, \emptyset, \dots, \emptyset)$. Define now the incremental cost of a player $i$ for a bundle $S\subseteq M$, with respect to the allocation constructed by the Sequential Mechanism before $i$'s turn as $\Delta_i(A_{<i}, S) = C(A_1, \dots, A_{i-1}, S, \emptyset, \dots, \emptyset) - C(A_1, \dots, A_{i-1}, \emptyset, \dots, \emptyset)$.
We have
\begin{align*}
\pi(\ensuremath{A})
& = \sum_{i \in N} \big[\ensuremath{v}_i(M) - \ensuremath{v}_i(\ensuremath{A}_i)\big] + C(A)\\
&= \sum_{i \in N} \ensuremath{v}_i(M) - \sum_{i \in N}\big[ \ensuremath{v}_i(\ensuremath{A}_i) - \Delta_i(A_{<i}, A_i) \big] \\
& \le \sum_{i \in N} \ensuremath{v}_i(M) - \sum_{i \in N}\big[ \ensuremath{v}_i(\ensuremath{A}^*_i) - \Delta_i(A_{<i}, A^*_i) \big] \\
&= \sum_{i \in N} \big[\ensuremath{v}_i(M) - \ensuremath{v}_i(\ensuremath{A}^*_i)\big] + \sum_{i \in N} \Delta_i(A_{<i}, A^*_i).
\end{align*}
Note that the inequality holds because $A_i$ was chosen as the optimal bundle for $i$. The next step is to prove a bound on the incremental costs in the form
\begin{equation}
\label{eq:icb}
\sum_{i \in N} \Delta_i(A_{<i}, A^*_i) \le \beta \cdot C(A^*).
\end{equation}
The proof follows if we can show that \eqref{eq:icb} holds for $\beta = \alpha H_n$ because we then have
$$ \pi(\ensuremath{A}) \le \sum_{i \in N} \big[\ensuremath{v}_i(M) - \ensuremath{v}_i(\ensuremath{A}^*_i)\big] + \alpha\cdot H_n C(A^*) \le \alpha H_n\cdot \pi(A^*). $$
By exploiting the subadditivity of the cost functions $c_j$, we obtain
\begin{align*}
\Delta_i(A_{<i}, A^*_i)
& = C(A_{<i}, A^*_{i}) - C(A_{<i})
\le C(A_{<i}) + C(A^*_{i}, \emptyset_{-i}) - C(A_{<i})
= \sum_{j\in A^*_i} c_j(\set{i}).
\end{align*}
Summing over all $i\in N$, and using Proposition \ref{prop:betterg}, we get:
$$\sum_{i \in N} \Delta_i(A_{<i}, A^*_i) \le \sum_{i\in N} \sum_{j\in A^*_i} c_j(\set{i}) = \sum_{j\in M} \sum_{i\in T^*_j} c_j(\set{i}) \le \sum_{j\in M} \alpha H_{|T_j^*|} c_j(T_j^*) \le \alpha H_n C(A^*)$$
\vspace*{-
amount}\vspace*{-
amount}
\end{proof}
By going through the proof of Theorem \ref{thm:ultra} more carefully, we realize the following:
\begin{remark}
\label{rem:gen}
For any subclass of non-decreasing, subadditive cost functions, it suffices to establish inequality \eqref{eq:icb} to prove that the Sequential Mechanism has a social cost approximation guarantee of $\beta$.
\end{remark}
We next prove that for $\alpha = 1$ the approximation factor is tight.
\begin{proposition}\label{prop:tight}
Even for the single item setting, there exists a $1$-average min-bounded cost function, under which the Sequential Mechanism provides an $H_n$-approximation.
\end{proposition}
\begin{proof}
Consider a set $N=\{p_1, p_2, ..., p_n\}$ of players and the following function $c:2^n \rightarrow\mathbb{R}_{\geq 0}$:
\begin{align}
c(S) = \begin{cases}
0 & \text{if $S=\emptyset$} \\
\frac{k}{j} & \text{if $S=\{p_j\}$} \\
\min\{k, \sum_{p_j \in S}c(\{p_j\})\} & \text{if $|S|\geq 2$}
\end{cases}
\end{align}
where $k \geq 0$ and $S \subseteq N$. We have to show that the above function is subadditive, non-decreasing, it has bounded average costs, and that the \emph{Sequential Mechanism} is $H_n$-approximate.
\noindent
\textbf{Subadditivity:} Initially notice that if $p_1 \in S \subseteq N$, then $c(S)=k$. Now consider the non-empty sets $A,B \subseteq N$.
\begin{itemize}\itemsep0pt
\item If $p_1 \in A \cup B$, then $p_1$ is in at least one of $A,B$. Thus we have that $c(A\cup B) \leq c(A)+c(B)$.
\item If $p_1 \notin A \cup B$, then
\begin{align*}
c(A\cup B)
&=\min\{k, \sum_{p_j \in A\cup B}c(\{p_j\})\}\\
&\leq
\min\{k, \sum_{p_j \in A}c(\{p_j\})+ \sum_{p_j \in B}c(\{p_j\})\}\\
&\leq \min\{k, \sum_{p_j \in A}c(\{p_j\})\}+ \min\{k, \sum_{p_j \in B}c(\{p_j\})\}\\
&=c(A)+c(B)
\end{align*}
\end{itemize}
\noindent
\textbf{Non-decreasingness:} Consider the sets $A\subseteq B \subseteq N$.
\begin{itemize}\itemsep0pt
\item If $p_1 \in A \Rightarrow p_1 \in B \Rightarrow c(A)=k=c(B)$.
\item If $p_1 \notin A$ and $p_1 \in B$, then $C(A)=\min\{k, \sum_{p_j \in A}c(\{p_j\})\} \leq k=c(B)$.
\item If $p_1 \notin A$ and $p_1 \notin B$, then $c(A)=\min\{k, \sum_{p_j \in A}c(\{p_j\})\} \leq \min\{k, \sum_{p_j \in B}c(\{p_j\})\}=c(B)$.
\end{itemize}
\noindent
\sloppy
\textbf{Min-bounded average costs:}
Consider the non-empty set of indices $A \subseteq [n]$ and the set $B=\{p_1, p_2, ...,p_{|A|}\}$.
\begin{itemize}\itemsep0pt
\item If $p_1 \in A \Rightarrow c(A)=k$. Now notice that $\frac{c(A)}{|A|}=\frac{k}{|A|}=\min_{j \in B}c(\{i_j\}) \geq \min_{j \in A}c(\{i_j\})$.
\item If $p_1 \notin A$, then we have that either $c(A)=k$ and thus $\frac{c(A)}{|A|} \geq \min_{j \in A}c(\{i_j\})$ as before, or $c(A)=\sum_{p_j \in A}c(\{p_j\})$ and thus, $\frac{c(A)}{|A|}=\frac{\sum_{p_j \in A}c(\{p_j\})}{|A|} \geq \min_{j \in A}c(\{i_j\})$.
\end{itemize}
\fussy
\noindent
\textbf{Approximation of the Sequential Mechanism:} Consider an instance where the value each player $p_j$ has for the item is $v_{p_j}=\frac{k}{j}-\epsilon$, for arbitrary small $\epsilon>0$. Suppose now that $A^*$ is the optimal allocation, $A'$ is the allocation where every player is served, and $A$ is the output of the Sequential Mechanism, in which it is easy to see that no player is served. We have that $\pi(A^*) \leq \pi(A')=k$, while
\begin{align*}
\pi(A)=\ 0+\sum_{p_j \in N}\frac{k}{j}-n\epsilon=kH_n-n\epsilon.
\end{align*}
So since $\epsilon$ is arbitrary small, the approximation cannot be better than $H_n$.
\end{proof}
\subsection{Improved Approximation Guarantees and Applications}\label{subsec:App}
We continue with a natural refinement of Definition \ref{def:cmin} which turns out to provide even better approximation factors of the Sequential Mechanism.
\begin{definition} \label{def:cmax}
A cost function $c: 2^N \rightarrow \mathbb{R}_{\ge 0}$ is \emph{$\alpha$-average max-bounded}, if there exists some $\alpha \ge 1$ such that for every set $T \subseteq N$, we have $\alpha \cdot \frac{c(T)}{|T|} \geq c_{\max}$, where $c_{\max} = \max_{j\in T}c(\{j\})$.
\end{definition}
Clearly, any function that is $\alpha$-average max-bounded is also $\alpha$-average min-bounded.
Thus, we already have an $\alpha H_n$-approximation for non-decreasing, subadditive and $\alpha$-average max-bounded cost functions. Below we show that we can achieve a much better guarantee.
\begin{theorem}\label{thm:gmax}
Suppose we have general valuation functions and for each item $j\in M$, the cost function $c_j:2^N \rightarrow\mathbb{R}_{\geq 0}$ is non-decreasing, subadditive, and $\alpha$-average max-bounded for some $\alpha \geq 1$. Then the Sequential Mechanism satisfies IR, NPT, WGSP, and is budget-balanced and $\alpha$-approximate.
\end{theorem}
\begin{proof}
We only need to prove that the mechanism is $\alpha$-approximate.
By following exactly the same reasoning as in the proof of Theorem \ref{thm:ultra} and using the observation made in Remark \ref{rem:gen} (note that $c_j$ is subadditive by assumption), we only need to prove that \eqref{eq:icb} holds for $\beta = \alpha$.
Exploiting the properties of the cost functions, we obtain
$$\sum_{i \in N} \Delta_i(A_{<i}, A^*_i) \le \sum_{i\in N} \sum_{j\in A^*_i} c_j(\set{i}) \le \sum_{j\in M} \sum_{i\in T^*_j} \alpha \frac{c_j(T_j^*)}{|T_j^*|} = \alpha \sum_{j\in M} c_j(T_j^*) \le \alpha C(A^*).
$$
\vspace*{-
amount}\vspace*{-
amount}
\end{proof}
\vspace*{-
amount}
\myheader{Example applications of combinatorial cost functions}
We give some examples of combinatorial cost functions below and show that they are $\alpha$-average max-bounded (possibly depending on some parameters of the combinatorial problem). In particular, by applying Theorem~\ref{thm:gmax} we obtain attractive social cost approximation guarantees for these problems.
For simplicity, all examples consider a single item only; but clearly, we can consider more general multiple item settings (e.g., when for each item $j \in M$, $c_j$ captures one of the problems below).
\begin{enumerate}
\item \textbf{Set Cover.} We are given a universe of elements $U$ and a family $\mathcal{F} \subseteq 2^U$ of subsets of $U$. The players correspond to the elements of $U$ and the cost $c(S)$ for serving a set of players $S \subseteq U$ is defined as the size of a minimum cardinality set cover for $S$.
\item \textbf{Vertex Cover.} This is a special case of Set Cover. We are given an undirected and unweighted graph $G = (V, E)$ and the players are the edges of the graph. The cost $c(S)$ for serving a set $S \subseteq E$ of players is defined as the size of a minimum vertex cover in the subgraph induced by $S$.
\item \textbf{Matching.} We are given an undirected and unweighted graph $G = (V, E)$ and the players correspond to the edges. The cost $c(S)$ for serving a set $S$ of players is defined as the size of a maximum cardinality matching in the subgraph induced by $S$.
\end{enumerate}
Using our $\alpha$-average max-bounded notion, it is now easy to prove that these problems admit constant social cost approximation guarantees (under certain restrictions).
\begin{theorem}\label{thm:appl}
The Sequential Mechanism is $\alpha$-approximate for the above problems, where
\begin{compactenum}
\item $\alpha = d$ for the Set Cover problem, where $d$ is the maximum cardinality of the sets in $\mathcal{F}$;
\item $\alpha = k$ for the Vertex Cover problem in graphs of maximum degree $k$;
\item $\alpha = k$ for the Matching problem in bipartite graphs of maximum degree $k$;
\item $\alpha = (5k+3)/4$ for the Matching problem in general graphs of maximum degree $k$.
\end{compactenum}
\end{theorem}
\begin{proof} We have the following:
\begin{enumerate}
\item For Set Cover, it is trivial to check that the cost function is subadditive. Consider a subset $T$ of the players. Note that for a single player $i\in T$, $c(\{i\}) = 1$, since each element can be covered by a single subset. Given that the maximum cardinality of a subset is $d$, the minimum set cover for covering the set of players $T$ is at least $|T|/d$. We conclude that $c$ is $d$-average max-bounded.
\item Vertex Cover is a special case of Set Cover. Consider a subset $T\subseteq E$ of the edges. Given that the maximum degree is $k$, the minimum vertex cover for covering a set of edges $T$ is at least $|T|/k$. We conclude that this cost function is $k$-average max-bounded.
\item The cost function in the Maximum Matching problem is also subadditive. In bipartite graphs, we also know that the cardinality of a maximum matching equals the cardinality of a minimum vertex cover. This immediately implies a $k$-approximation.
\item For general graphs, note again that $c(\{i\}) = 1$ for a single player $i\in E$. By the work of \cite{Han08}, we know that in a graph of $m$ edges and with degree $k$, there always exists a matching of size at least $4m/(5k+3)$. Hence for a set of players $T\subseteq E$, we have that $c(T)/|T| \geq 4/(5k+3)$. We conclude that the cost function $c$ is $(5k+3)/4$-average max-bounded.
\end{enumerate}
\end{proof}
We compare these bounds with the existing results in the literature:
For Vertex Cover, there is a mechanism that is $2$-budget-balanced and $O(\log{n})$-approximate \cite{MehtaRS09}. Thus, for graphs with maximum degree less than $\log{n}$, we obtain a better guarantee.
For Set Cover, there is a mechanisms that is $O(\log{n})$-budget-balanced and $O(\log{n})$-approximate \cite{MehtaRS09}. Hence, we obtain an improvement if the sets in $\mathcal{F}$ have small size.
Finally, we note that our results do not apply to the weighted versions of these problems.
\subsection{Guarantees of the Sequential Mechanism for Non-Separable Cost Functions}\label{subsec:nonsep}
We extend our results to non-separable cost functions.
Recall that in this setting, the cost $C(A)$ of an allocation $A = (A_i)_{i \in N}$ is given by some general (not necessarily separable) cost function $C:(2^M)^n \rightarrow \mathbb{R}_{\geq 0}$. In particular, $C$ may encode dependencies among different items.
We introduce some more notation. Given two allocations $S = (S_i)_{i \in N}$ and $T = (T_i)_{i \in N}$, we define $S \cup T$ as the componentwise union of $S$ and $T$, i.e., $S \cup T = (S_1 \cup T_1, \dots, S_n \cup T_n)$. Similarly, we write $S \subseteq T$ if this relation holds componentwise, i.e., $S_i \subseteq T_i$ for every $i \in N$.
Given an allocation $A = (A_i)_{i \in N}$ and a set of players $S \subseteq N$, we define $A|_{S} = (A_S, \emptyset_{-S})$ as the allocation in which each player $i \in S$ receives the items in $A_i$ and all other players receive nothing.
If $S = \set{i}$ is a singleton set, we also write $A|_i$ instead of $A|_{\set{i}}$.
Throughout this section, we remain in the domain of non-decreasing and subadditive cost functions. In the non-separable case, a cost function $C:(2^M)^n \rightarrow \mathbb{R}_{\geq 0}$ is non-decreasing if $C(S) \le C(T)$ for every pair of allocations $S, T$, with $S \subseteq T$. Also, it is subadditive if for every two allocations $S = (S_i)_{i \in N}$ and $T = (T_i)_{i \in N}$, we have $C(S \cup T) \leq C(S)+C(T)$.
We now adapt Definitions \ref{def:cmin} and \ref{def:cmax} to non-separable cost functions.
\begin{definition}
Let $C:(2^M)^n \rightarrow \mathbb{R}_{\geq 0}$ be a non-separable cost function.
\begin{compactitem}\itemsep0pt
\item $C$ is \emph{$\alpha$-average min-bounded}, if there exists some $\alpha\geq 1$ such that for every allocation $A$ and every subset $T \subseteq N$ with $|T| \ge 2$, it holds $\alpha \frac{C(A|_T)}{|T|} \geq C_{\min}$, where $C_{\min} = \min_{j\in T} C(A|_j)$.
\item $C$ is \emph{$\alpha$-average max-bounded}, if there exists some $\alpha\geq 1$ such that for every allocation $A$ and every subset $T \subseteq N$ with $|T| \ge 2$, it holds $\alpha \frac{C(A|_T)}{|T|} \geq C_{\max}$, where $C_{\max} = \max_{j\in T} C(A|_j)$.
\end{compactitem}
\end{definition}
As before, if a non-separable function is $\alpha$-average max-bounded, then it is also $\alpha$-average min-bounded.
We remark that it has been shown in \cite{Moulin99,DobzinskiO17} that the Sequential Mechanism is weakly group-strategyproof and budget balanced for the non-separable setting. We proceed by adapting Proposition~\ref{prop:betterg} for the non-separable setting.
\begin{proposition}\label{prop:genbetterg}
If $C:(2^M)^n \rightarrow \mathbb{R}_{\geq 0}$ is a non-separable cost function, which is non-decreasing and $\alpha$-average min-bounded, then for every allocation $A$, $\sum_{i \in T} C(A|_i) \leq H_{|T|} \cdot C(A|_T)$ for every $T \subseteq N$.
\end{proposition}
\begin{proof}
Let $A$ be an allocation and fix $T\subseteq N$.
Rename the players in $T$ such that for any $i, j \in T$ with $i < j$, it holds that $C(A|_i) \ge C(A|_j)$. For convenience, we may assume that $T = \{1, 2, \dots, |T|\}$. Fix some $i \in T$ and consider the set $\set{1, 2, \dots, i} = [i]\subseteq T$. By exploiting that $C$ is $\alpha$-average min-bounded and non-decreasing, we have
$$
C(A|_i) \leq \alpha \frac{C(A|_{[i]})}{i} \leq \frac{C(A|_T)}{i}. $$
Note that the above inequality holds for any $i \in T$.
Summing over all players $i\in T$ proves the claim.
\end{proof}
Now using the same reasoning as in the proof of Theorem \ref{thm:ultra}, we obtain the same approximation guarantee of $\alpha H_n$ as in the separable setting. Further, the improvement we obtained in Theorem \ref{thm:gmax} also goes through in this setting. We summarize these observations in the following corollary.
\begin{corollary}\label{col:seqgen}
Suppose we have general valuation functions and a non-decreasing, subadditive, and $\alpha$-average min-bounded cost function $C:(2^M)^n \rightarrow\mathbb{R}_{\geq 0}$. Then the Sequential Mechanism satisfies IR, NPT, WGSP, and is budget balanced and $\alpha \cdot H_n$-approximate. Furthermore, if $C$ is also $\alpha$-average max-bounded, then the Sequential Mechanism is $\alpha$-approximate.
\end{corollary}
\section{Discussion}\label{sec:dis}
In Section \ref{sec:mech}, we proposed the mechanism IACSM, which is weakly group-strategyproof under general cost functions and symmetric submodular valuations. Moreover it is $\alpha$-budget balanced and $2\alpha^3 H_n$-approximate when we restrict the cost functions to the {$\alpha$-average-decreasing} class. The social cost approximation guarantee further improves to $H_n$ if the cost functions are symmetric submodular and this is best possible (due to the known lower bound for public-excludable goods \cite{DobzinskiMRS18}). It would be very interesting to explore mechanisms that go beyond symmetric submodular valuation functions. It seems that entirely new ideas are needed for this setting. It would also be interesting to extend our mechanism to non-separable cost functions. We note that separability of the costs in Section \ref{sec:mech} is needed for IACSM only to argue that the cost share per item increases as players withdraw (with respect to the trace). One would need to investigate how to adapt the mechanism and enforce this property in the non-separable setting. Technically, this seems far from obvious and we leave a proper treatment of this issue for future work.
In Section \ref{sec:FPM}, we studied the (partially) complementary class of {$\alpha$-average min bounded} cost functions. We showed that the well-known {Sequential Mechanism} is budget balanced and $\alpha H_n$-approximate even for general valuation functions. These results also extend to non-separable cost functions. A very natural question is whether SM is optimal in this setting and we note that the answer is not yet clear: The impossibility result of \cite{DobzinskiMRS18} holds for the public-excludable good cost function which is symmetric submodular and thus {1-average-decreasing}. However, it is not hard to see that this does not fall within the {$\alpha$-average min bounded} class for any constant $\alpha$. This leads to the question of whether there exists a WGSP mechanism that breaks the $\Omega(\log(n))$-approximation in terms of social cost for {$\alpha$-average-min bounded} functions with small values of $\alpha$.
Finally, what we also find very interesting is to identify the class of cost functions for which neither of the two mechanisms studied here perform well. Recall that, for any constant value of $\alpha$, if a cost function is either $\alpha$-average decreasing or $\alpha$-average min-bounded, then a good performance is guaranteed. Thus, we need look at the complement of the set of $\alpha$-average decreasing functions and the set of $\alpha$-average min-bounded functions for small value of $\alpha$ and examine whether these complements have a non-empty intersection. The following proposition shows that this intersection is indeed non-empty.
\begin{proposition}\label{prop:intersec}
Given $\alpha\geq 1$, the intersection of the complements of $\alpha$-average-decreasing and $\alpha$-average min-bounded functions is non-empty.
\end{proposition}
\begin{proof}
We begin by defining the complements of the two sets:
\begin{itemize}
\item \emph{Complement of $\alpha$-average-decreasing}: For every constant $\alpha \geq 1$ that we choose, there exist sets $S\subseteq T \subseteq N$ such that: $\alpha \frac{C(S)}{|S|}< \frac{C(T)}{|T|}$.
\item \emph{Complement of $\alpha$-average min-bounded}: For every constant $\alpha \geq 1$ that we choose, there exists a set $T \subseteq N$ such that: $\alpha \frac{C(T)}{|T|}< C(\{i\})$, where $i = \argmin_{j\in T}C(\{j\})$.
\end{itemize}
We will prove this for the case where there exists only a single item. Consider set $N=\{p_1, p_2, ..., p_n\}$ of players and the following function $c:2^n \rightarrow\mathbb{R}_{\geq 0}$:
\begin{align}
c(S) = \begin{cases}
0 & \text{if $S=\emptyset$} \\
\sqrt{j} & \text{if $S=\{p_j\}$} \\
\max_{j \in S}c(\{p_j\}) & \text{if $|S|\geq 2$}
\end{cases}
\end{align}
where $S \subseteq N$. Before we proceed with the proof, notice that this function is also subadditive and by definition non-decreasing.
\begin{itemize}
\item \emph{Complement of $\alpha$-average-decreasing}: Set $S=\{p_2\}$ and $T=\{p_2, p_n\}$. Now notice that $\alpha< \frac{\sqrt{n}}{2} \Rightarrow 2\alpha< \sqrt{n}$, which holds since for any $\alpha \geq 1$ that we choose, we can always find a large enough $n$ for the inequality to be true.
\item \emph{Complement of $\alpha$-average min-bounded}: $S=\{p_2\}$ and $T=N$. Now notice that $\alpha \frac{\sqrt{n}}{n}<1\Rightarrow \sqrt{n}<\frac{n}{\alpha}$, which holds since for any $\alpha \geq 1$ that we choose, we can always find a large enough $n$ for the inequality to be true.
\end{itemize}
Therefore, the intersection of the complements is non-empty and the proof is complete.
\end{proof}
Notice that the proof of this proposition follows by constructing a cost function that requires non-constant values of $\alpha$ to be captured by either of our parameterizations. Although the intersection turns out to be non-empty, the constructed cost function is rather artificial and more natural examples are elusive so far. In fact, for most of the known cost functions that have been studied in the literature, at least one of our mechanisms achieves an $O(H_n)$-approximation. To make further progress, we believe it is important to understand better the class of functions defined by the intersection of the two complements, as it would help us to identify the missing elements for deriving mechanisms for a wider class of cost functions.
\end{document}
|
\begin{equation}gin{document}
\makeatletter\@addtoreset{equation}{section}
\makeatother\deltaf\thesection.\arabic{equation}{\thesection.\arabic{equation}}
\ranglenewcommand{\lambdabelenumi}{{\rm (\roman{enumi})}}
\ranglenewcommand{\roman{enumi}}{\roman{enumi}}
\tildetle{A phase transition between endogeny and nonendogeny}
\author{Bal\'azs R\'ath\footnote{Department of Stochastics, Institute of Mathematics,
Budapest University of Technology and Economics, MTA-BME Stochastics Research Group,
M\H{u}egyetem rkp. 3., H-1111 Budapest, Hungary. Alfr\'ed R\'enyi Institute of Mathematics, Re\'altanoda utca 13-15, 1053 Budapest, Hungary.
[email protected]},
Jan~M.~Swart\footnote{The Czech Academy of Sciences,
Institute of Information Theory and Automation,
Pod vod\'arenskou v\v e\v z\' i 4,
18200 Praha 8,
Czech Republic.
[email protected]},
and M\'arton Sz\H{o}ke\footnote{Department of Stochastics, Institute of Mathematics,
Budapest University of Technology and
M\H{u}egyetem rkp. 3., H-1111 Budapest, Hungary. Alfr\'ed R\'enyi Institute of Mathematics, Re\'altanoda utca 13-15, 1053 Budapest, Hungary.
[email protected] }}
\date{\today}
\title{A phase transition between endogeny and nonendogeny}
\begin{equation}gin{abstract}\noindent
The Marked Binary Branching Tree (MBBT) is the family tree of a rate one
binary branching process, on which points have been generated according to a
rate one Poisson point process, with i.i.d.\ uniformly distributed activation
times assigned to the points. In frozen percolation on the MBBT, initially,
all points are closed, but as time progresses points can become either frozen
or open. Points become open at their activation times provided they have not
become frozen before. Open points connect the parts of the tree below and
above it and one says that a point percolates if the tree above it is
infinite. We consider a version of frozen percolation on the MBBT in which at
times of the form $\theta^n$, all points that percolate are frozen. The limiting
model for $\theta\to 1$, in which points freeze as soon as they percolate, has
been studied before by R\'ath, Swart, and Terpai. We extend their results by
showing that there exists a $0<\theta^\ast<1$ such that the model is endogenous
for $\theta\leq\theta^\ast$ but not for $\theta>\theta^\ast$. This means that for
$\theta\leq\theta^\ast$, frozen percolation is a.s.\ determined by the MBBT but
for $\theta>\theta^\ast$ one needs additional randomness to describe it.
\end{abstract}
\noindent
{\it MSC 2010.} Primary: 82C27; Secondary: 60K35, 82C26, 60J80. \\
{\it Keywords:}
frozen percolation,
recursive distributional equation,
recursive tree process,
endogeny.
\\[10pt]
{\it Acknowledgements:} The authors would like to thank both anonymous referees for their numerous constructive comments that improved the
quality of this paper.
The work of B.~R\'ath was partially supported by grant NKFI-FK-123962 of NKFI (National Research, Development and Innovation Office), the Bolyai Research Scholarship of the Hungarian Academy of Sciences, the \'UNKP-20-5-BME-5 New National Excellence Program of the Ministry for Innovation and Technology, and the ERC Synergy under Grant No. 810115 - DYNASNET. J.M.~Swart is supported by grant 20-08468S of the Czech Science Foundation (GA CR). The work of M\'arton Sz\H{o}ke is partially supported by the ERC Consolidator Grant 772466 ``NOISE''.
{\setlength{\mathbb{P}arskip}{-2pt}\tableofcontents}
\section{Introduction and main results}
\subsetsection{Introduction}
The concept of frozen percolation was introduced by Aldous \cite{Ald00}. In
it, i.i.d.\ activation times that are uniformly distributed on $[0,1]$ are
assigned to the edges of an infinite, unoriented graph. Initially, all edges
are closed. At its activation time, an edge opens, provided it is not frozen.
Here, by definition, an edge freezes as soon as one of its endvertices becomes
part of an infinite open cluster. For general graphs, the existence of a
process satisfying this description is not obvious. Indeed, Benjamini and
Schramm observed that on the square lattice, frozen percolation does not exist
(see \cite[Section~3]{BT01} for an account of the argument).
On the other hand, Aldous \cite{Ald00} showed that frozen percolation on the
infinite 3-regular tree does exist. Under natural additional assumptions, such
a process is even unique in law. This was partially already observed in
\cite{Ald00} and made more precise in \cite[Thm~2]{RST19}. The problem of
almost sure uniqueness stayed open for 19 years, but has recently been solved
negatively in \cite[Thm~3]{RST19}, where it is shown that the question
whether a given edge freezes cannot be decided only by looking at the activation
times of all edges.
The proof of \cite[Thm~3]{RST19} depends on detailed calculations that are
specific to the details of the model. As a result, the question of almost sure
uniqueness is still open for frozen percolation on $n$-regular trees with
$n>3$. This raises the question whether model specific calculations are
necessary, or whether the absence of almost sure uniqueness can alternatively
be demonstrated by more general, ``soft'' arguments that have so far been
overlooked.
The results in the present paper suggest that this is not the case and model
specific calculations are, to some degree, unavoidable. We look at a modified
model in which edges can freeze only at a certain countable set of times. For
the resulting model, which depends on a parameter $0<\theta<1$, we show that
under the same natural additional assumptions that guarantee uniquess in law,
there exists a nontrivial critical value $\theta^\ast$ such that almost sure
uniqueness holds for $\theta\leq\theta^\ast$ but not for $\theta>\theta^\ast$.
It turns out that it is mathematically simpler to formulate our results for
frozen percolation on a certain oriented tree,
the Marked Binary Branching Tree (MBBT), a random oriented continuum tree
introduced in \cite{RST19}. Using methods of Section~3 of that paper, our
results can also be translated into results for the unoriented 3-regular
tree. For brevity, we omit the details of the latter step and stick for the
remainder of the paper to the oriented (rather than the unoriented) setting on the MBBT (rather than the 3-regular tree).
\subsetsection{Frozen percolation on the MBBT}
Let ${\mathbb T}$ be the set of all finite words $\mathbf{i}=i_1\cdots i_n$ $(n\geq 0)$ made
up from the alphabet $\{1,2\}$. We call $|\mathbf{i}|:=n$ the length of the word
$\mathbf{i}$ and denote the word of length zero by $\varnothing$, which we distinguish
notationally from the empty set $\emptyset$. The concatenation of two words
$\mathbf{i}=i_1\cdots i_n$ and $\mathbf{j}=j_1\cdots j_m$ is denoted by
$\mathbf{i}\mathbf{j}:=i_1\cdots i_nj_1\cdots j_m$. We view ${\mathbb T}$ as an oriented tree with
root $\varnothing$, in which each point $\mathbf{i}$ has two offspring $\mathbf{i} 1$ and
$\mathbf{i} 2$, and each point $\mathbf{i}=i_1\cdots i_n$ except for the root has one
parent $\lvec\mathbf{i}:=i_1\cdots i_{n-1}$. In pictures, we draw the root at
the bottom and we draw the descendants of a point above their predecessor.
By definition, a \emph{rooted subtree} of ${\mathbb T}$ is a subset ${\mathbb U}\subset{\mathbb T}$ such
that $\lvec\mathbf{i}\in{\mathbb U}$ for all $\mathbf{i}\in{\mathbb U}\begin{equation}h\{\varnothing\}$. We call
$\mathbb{P}a{\mathbb U}:=\{\mathbf{i}\in{\mathbb T}\begin{equation}h{\mathbb U}:\lvec\mathbf{i}\in{\mathbb U}\}$ the \emph{boundary} of ${\mathbb U}$,
and we use the convention that $\mathbb{P}artial{\mathbb U}=\{{\rm Var}nothing\}$ if ${\mathbb U}=\emptyset$.
Let $(\tau_\mathbf{i},\kappa_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ be i.i.d.\ uniformly distributed on
$[0,1]\tildemes\{1,2\}$. We interpret $\tau_\mathbf{i}$ as the \emph{activation time}
of $\mathbf{i}$ and $\kappa_\mathbf{i}$ as its \emph{number of legal offspring}. If
$\kappa_\mathbf{i}=1$, then we call $\mathbf{i} 1$ and $\mathbf{i} 2$ the legal and illegal
offspring of $\mathbf{i}$, respectively. Points $\mathbf{i}\in{\mathbb T}$ with $\kappa_\mathbf{i}=1$
or $=2$ are called \emph{internal points} and \emph{branching points},
respectively. We denote the corresponding sets as
${\mathbb I}:=\{\mathbf{i}\in{\mathbb T}:\kappa_\mathbf{i}=1\}$ and ${\mathbb B}:=\{\mathbf{i}\in{\mathbb T}:\kappa_\mathbf{i}=2\}$. Only
activation times of internal points matter; activation times of branching
points will not be used. For any $\mathbf{i}\in{\mathbb T}$ and $A\subset{\mathbb T}$, we write
$\mathbf{i}\mathbb{P}ercol{A}\infty$ if there exist $(j_k)_{k\geq 1}$ such that
\begin{equation}\lambdabel{percol}
{\rm(i)}\quad j_{k+1}\leq\kappa_{\mathbf{i} j_1\cdots j_k}
\quad\mbox{and}\quad
{\rm(ii)}\quad \mathbf{i} j_1\cdots j_k\in A\quad\mbox{for all }k\geq 0.
\end{equation}
In words, this says that there is an infinite open upwards path through $A$
starting at $\mathbf{i}$ such that each next point is a legal offspring of its
parent.
We will be interested in frozen percolation on ${\mathbb T}$ with the following
informal description. At any time, points can be \emph{closed}, \emph{frozen},
or \emph{open}. Once a point is frozen or open, it stays that way. Initially,
all branching points are open and all internal points are closed. Branching
points stay open for all time. An internal
point $\mathbf{i}$ becomes open at its activation time $\tau_\mathbf{i}$ provided that, by this
time, it has not yet become frozen. The rules for freezing points are as
follows. We fix a set $\Xi\subset(0,1]$ that is closed w.r.t.\ the relative
topology of $(0,1]$. Letting ${\mathbb O}^t$ denote the set of
open points at time $t$, we decree that up to and including its
activation time, a closed internal point $\mathbf{i}$ becomes frozen at the
first time in $\Xi$ when its legal offspring percolates, i.e., when
$\mathbf{i} 1\mathbb{P}ercol{{\mathbb O}^t}\infty$.
Let
\begin{equation}\lambdabel{Tt}
{\mathbb T}^t:=\{\mathbf{i}\in{\mathbb I}:\tau_\mathbf{i}\leq t\big\}\cup{\mathbb B}
\qquad(0\leq t\leq 1)
\end{equation}
denote the set of all points at time $t$ that are either an internal point that has already been activated
or a branching point. Let
${\mathbb F}$ denote the set of internal points that eventually become frozen.
Since once a point opens or freezes, it stays open or frozen for the remaining time,
the set of open points at time $t$ is given by ${\mathbb O}^t={\mathbb T}^t\begin{equation}h{\mathbb F}$. In view of this,
we make our informal description precise
by saying that a random subset ${\mathbb F}$ of ${\mathbb T}$ \emph{solves the frozen percolation
equation} for the \emph{set of possible freezing times} $\Xi$ if
\begin{equation}\lambdabel{frozdef}
\mbox{$\mathbf{i}\in{\mathbb F}$ if and only if $\kappa_\mathbf{i}=1$ and $\mathbf{i}
1\mathbb{P}ercol{{\mathbb T}^t\begin{equation}h{\mathbb F}}\infty$ for some $t\in\Xi\cap(0,\tau_\mathbf{i}]$,}
\end{equation}
which says that the points that eventually become frozen are those internal
points $\mathbf{i}$ for which $\mathbf{i} 1$ percolates at some time in $\Xi$ before or
at the activation time of $\mathbf{i}$.
It turns out that solutions to (\ranglef{frozdef}) always exist, but the question
of uniqueness is more subtle. To get at least uniqueness in law, we impose
additional conditions. We write $\omega_\mathbf{i}:=(\tau_\mathbf{i},\kappa_\mathbf{i})$
$(\mathbf{i}\in{\mathbb T})$ and for any $\mathbf{j}\in{\mathbb T}$, we let
\begin{equation}\lambdabel{Om}
\Omega_\mathbf{j}:=\big(\omega_{\mathbf{j}\mathbf{i}}\big)_{\mathbf{i}\in{\mathbb T}}
\end{equation}
denote the i.i.d.\ randomness that resides in the subtree of ${\mathbb T}$ rooted at
$\mathbf{j}$. In particular, we write $\Omega:=\Omega_\varnothing$. If ${\mathbb F}$ is a solution to the
frozen percolation equation, then for each $\mathbf{j}\in{\mathbb T}$, we define a random
subset ${\mathbb F}_\mathbf{j}$ of ${\mathbb T}$ by
\begin{equation}
{\mathbb F}_\mathbf{j}:=\{\mathbf{i}\in{\mathbb T}:\mathbf{j}\mathbf{i}\in{\mathbb F}\}.
\end{equation}
We say that a solution ${\mathbb F}$ to the frozen percolation equation (\ranglef{frozdef})
is \emph{stationary} if the law of $(\Omega_\mathbf{j},{\mathbb F}_\mathbf{j})$ does not depend on
$\mathbf{j}\in{\mathbb T}$. We say that ${\mathbb F}$ is \emph{adapted} if for each finite rooted
subtree ${\mathbb U}\subset{\mathbb T}$, the collection of random variables
$(\Omega_\mathbf{j},{\mathbb F}_\mathbf{j})_{\mathbf{j}\in\mathbb{P}a{\mathbb U}}$ is independent of
$(\omega_\mathbf{i})_{\mathbf{i}\in{\mathbb U}}$. Finally, we say that ${\mathbb F}$ \emph{respects the tree
structure} if $(\Omega_\mathbf{j},{\mathbb F}_\mathbf{j})_{\mathbf{j}\in\mathbb{P}a{\mathbb U}}$ is a collection of
independent random variables for each finite rooted subtree ${\mathbb U}\subset{\mathbb T}$.
With these definitions, we can formulate our first result about existence and
uniqueness in law of solutions to the frozen percolation equation
(\ranglef{frozdef}). In the special case that $\Xi=(0,1]$, the following theorem
has been proved before in (in a somewhat different guise) in
\cite[Thm~2]{RST19}.
\begin{equation}gin{theorem}[Uniqueness in law of frozen percolation]
Let\lambdabel{T:frozen} $\Xi$ be a closed subset of $(0,1]$ (w.r.t.\ the relative
topology). Then there exists a solution ${\mathbb F}$ of the frozen percolation
equation (\ranglef{frozdef}). This solution can be chosen so that it is
stationary, adapted, and respects the tree structure. Subject to these
additional conditions, the joint law of $\Omega$ and ${\mathbb F}$ is uniquely determined.
\end{theorem}
We will prove Theorem~\ranglef{T:frozen} in Subsection~\ranglef{S:uni}.
As we will see in the coming subsections, the question of almost sure
uniqueness of solutions to the frozen percolation equation is subtle and the
answer depends on the choice of the closed set~$\Xi$.
In the remainder of the present subsection, which can be skipped at a first
reading, we explain how our set-up relates to the definition of the Marked
Binary Branching Tree (MBBT) introduced in \cite{RST19}. Let
\begin{equation}
{\mathbb S}:=\big\{i_1\cdots i_n\in{\mathbb T}:i_m\leq\kappa_{i_1\cdots i_{m-1}}
\ \forall 1\leq m\leq n\big\}
\end{equation}
denote the random rooted subtree of ${\mathbb T}$ consisting of all legal descendants
of the root. Then ${\mathbb S}$ is the family tree of a branching process in which each
individual has one or two offspring, with equal probabilities. For any rooted
subtree ${\mathbb U}\subset{\mathbb S}$, we call
\begin{equation}
\nabla{\mathbb U}:=\mathbb{P}a{\mathbb U}\cap{\mathbb S}
\end{equation}
the \emph{boundary} of ${\mathbb U}$ \emph{relative to} ${\mathbb S}$.
Let $(\end{lemma}l_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ be i.i.d.\ exponentially distributed random
variables with mean $1/2$, independent of $\Omega$. We interpret $\end{lemma}l_\mathbf{i}$ as
the lifetime of the individual $\mathbf{i}$ and let
\begin{equation}\lambdabel{birthdeath}
b_{i_1\cdots i_n}:=\sum_{k=0}^{n-1}\end{lemma}l_{i_1\cdots i_k}
\quad\mbox{and}\quad
d_{i_1\cdots i_n}:=\sum_{k=0}^{n}\end{lemma}l_{i_1\cdots i_k}
\end{equation}
with $b_\varnothing:=0$ and $d_\varnothing:=\end{lemma}l_\varnothing$ denote the birth and death
times of $i_1\cdots i_n\in{\mathbb T}$. For $h\geq 0$, we let
\begin{equation}\begin{array}{l@{\qquad}l}\lambdabel{nabS}
\mathrm{d}splaystyle{\mathbb T}_h:=\big\{\mathbf{i}\in{\mathbb T}:d_\mathbf{i}\leq h\big\},
&\mathrm{d}splaystyle\mathbb{P}a{\mathbb T}_h=\big\{\mathbf{i}\in{\mathbb T}:b_\mathbf{i}\leq h<d_\mathbf{i}\big\},\\[5pt]
\mathrm{d}splaystyle{\mathbb S}_h:={\mathbb T}_h\cap{\mathbb S},
&\mathrm{d}splaystyle\nabla{\mathbb S}_h=\mathbb{P}a{\mathbb T}_h\cap{\mathbb S}
\end{array}\ee
denote the sets of individuals in ${\mathbb T}$ or ${\mathbb S}$ that have died by time $h$ and
those that are alive at time $h$, respectively. Note that the former are
a.s.\ finite rooted subtrees of ${\mathbb T}$ and ${\mathbb S}$, respectively, and the latter
are their boundaries relative to ${\mathbb T}$ or ${\mathbb S}$. Now
\begin{equation}\lambdabel{branch}
(\nabla{\mathbb S}_h)_{h\geq 0}
\end{equation}
is a continuous-time branching process subject to the following dynamics:
\begin{equation}gin{itemize}
\item each individual $\mathbf{i}$ is with rate 1 replaced by two new individuals
$\mathbf{i} 1$ and $\mathbf{i} 2$,
\item each individual $\mathbf{i}$ is with rate 1 replaced by one new individual
$\mathbf{i} 1$.
\end{itemize}
Let $(\nabla{\mathbb S}_{h-})_{h\geq 0}$ denote the left-continuous modification of the
branching process in (\ranglef{branch}) and let
\begin{equation}
{\cal T}:=\big\{(\mathbf{i},h):\mathbf{i}\in\nabla{\mathbb S}_{h-}, h\geq 0\big\}.
\end{equation}
As in \cite[Subsection~1.5]{RST19}, we equip ${\cal T}$ with a metric $d$
by setting $d\big((\mathbf{i},h),(\mathbf{j},g)\big):=h+g-\tau$, where $\tau$ is
the last time before $h\wedge g$ when there existed a common ancestor
of $\mathbf{i}$ and $\mathbf{j}$. Then ${\cal T}$ is a random continuum tree. We can
think of ${\cal T}$ as the family tree of a rate one binary branching
process. Recall that ${\mathbb I}=\{\mathbf{i}\in{\mathbb T}:\kappa_\mathbf{i}=1\}$ denotes the set
of internal points of ${\mathbb T}$. Let
\begin{equation}
{\mathbb P}i_0:=\big\{(\mathbf{i},d_\mathbf{i}):\mathbf{i}\in{\mathbb I}\cap{\mathbb S}\big\}
\quad\mbox{and}\quad
{\mathbb P}i:=\big\{(\mathbf{i},d_\mathbf{i},\tau_\mathbf{i}):\mathbf{i}\in{\mathbb I}\cap{\mathbb S}\big\}.
\end{equation}
In words, ${\mathbb P}i_0$ consists of all points $z=(\mathbf{i},d_\mathbf{i})$ in the continuum tree ${\cal T}$ at which an individual $\mathbf{i}$ dies and is replaced by a single new individual $\mathbf{i} 1$, and ${\mathbb P}i$ consists of all pairs $(z,\tau_z)$ where $z\in{\mathbb P}i_0$ and $\tau_z$ is the activation time of the individual that dies at this point. Then the pair $({\cal T},{\mathbb P}i)$ is a \emph{Marked Binary Branching Tree} (MBBT) as defined in \cite[Subsection~1.5]{RST19}. As explained in \cite[Subsection~1.7]{RST19}, the MBBT naturally arises as the near-critical scaling limit of percolation on a wide class of oriented trees.
If we forget about the specific labeling of elements of ${\cal T}$, i.e., if we are only interested in ${\cal T}$ as a metric space where we view two metric spaces as equal if they are isometric, then we can no longer recognise from ${\cal T}$ at which points a single individual is replaced by a single individual with a different label. In such a setting one can check that conditional on ${\cal T}$, the set ${\mathbb P}i$ is a Poisson point process of intensity one on ${\cal T}\tildemes[0,1]$. In particular, ${\mathbb P}i_0$ is a Poisson point process of intensity one on ${\cal T}$ and conditionally on $({\cal T},{\mathbb P}i_0)$, there is an independent, uniformly distributed activation time $\tau_z$ attached to each point $z\in{\mathbb P}i_0$.
Frozen percolation on the MBBT has been introduced in
\cite[Subsection~1.6]{RST19}. Our earlier definitions, translated into the
language of the MBBT, result in a process with the following informal
description. Initially, all points $z\in{\mathbb P}i_0$ are closed. Such
points open at their activation time $\tau_z$, provided that by this time
they have not yet become frozen. A point $z\in{\mathbb P}i_0$ freezes at
the first time in $\Xi$ before or at its activation time when the open
component of ${\cal T}$ that sits just above the point has infinite size.
\subsetsection{Burning times}\lambdabel{S:burn}
Let $\Xi\subset(0,1]$ be a relatively closed set of possible freezing times and let ${\mathbb F}$ be
a solution to the frozen percolation equation (\ranglef{frozdef}). We define the
\emph{burning time} of a point $\mathbf{i}\in{\mathbb T}$ as
\begin{equation}\lambdabel{YF}
Y_\mathbf{i}:=\inf\big\{t\in\Xi:
\mathbf{i}\mathbb{P}ercol{{\mathbb T}^t\begin{equation}h{\mathbb F}}\infty\big\}\qquad(\mathbf{i}\in{\mathbb T}),
\end{equation}
with the convention that $\inf\emptyset:=\infty$. The choice of the term
``burning time'' is motivated by a certain analogy with forest fire models.
The following lemma implies that if $Y_\mathbf{i}\leq 1$, then the infimum in
(\ranglef{YF}) is in fact a minimum.
\begin{equation}gin{lemma}[Percolation times]
For\lambdabel{L:perctime} any random subset ${\mathbb A}\subset{\mathbb T}$ and $\mathbf{i}\in{\mathbb T}$, the set
$\{t\in[0,1]:\mathbf{i}\mathbb{P}ercol{{\mathbb T}^t\begin{equation}h{\mathbb A}}\infty\}$ is a.s.\ closed.
\end{lemma}
We will prove Lemma~\ranglef{L:perctime} and Lemma~\ranglef{L:FY} below in
Section~\ranglef{S:uni}. By formula (\ranglef{YF}), the burning times
$(Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ are a.s.\ uniquely determined by the set ${\mathbb F}$ and the
i.i.d.\ randomness $\Omega$. The following lemma shows that conversely, given
$\Omega$ and $(Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$, one can recover ${\mathbb F}$.
\begin{equation}gin{lemma}[Frozen points]
Let\lambdabel{L:FY} ${\mathbb F}$ be a solution to the frozen percolation equation
(\ranglef{frozdef}) and let $(Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ be defined by (\ranglef{YF}). Then
\begin{equation}\lambdabel{FY}
{\mathbb F}=\big\{\mathbf{i}\in{\mathbb I}:Y_{\mathbf{i} 1}\leq\tau_\mathbf{i}\big\}.
\end{equation}
\end{lemma}
\begin{equation}gin{remark}
If ${\mathbb F}$ is adapted, then $Y_{\mathbf{i} 1}$ is independent of $\tau_\mathbf{i}$ and hence ${\mathbb P}[Y_{\mathbf{i} 1}=\tau_\mathbf{i}]=0$ for each $\mathbf{i}\in{\mathbb T}$. According to our definitions, the point $\mathbf{i}$ freezes when $Y_{\mathbf{i} 1}=\tau_\mathbf{i}$, but as long as we only discuss adapted solutions, it in fact does not matter how things are defined in this case.
\end{remark}
Let $I:=[0,1]\cup\{\infty\}$. If ${\mathbb F}$ is a solution to the frozen percolation
equation (\ranglef{frozdef}), then it is not hard to see that the burning times
$(Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ satisfy the inductive relation
\begin{equation}\lambdabel{Yind}
Y_\mathbf{i}=\checki[\tau_\mathbf{i},\kappa_\mathbf{i}](Y_{\mathbf{i} 1},Y_{\mathbf{i} 2}),
\end{equation}
where $\checki:[0,1]\tildemes\{1,2\}\tildemes I^2\to I$ is the function
\begin{equation}\lambdabel{chi_def}
\checki[\tau,\kappa](x,y):=\left\{\begin{array}{ll}
x\quad&\mbox{if }\kappa=1,\ x>\tau,\\[5pt]
\infty\quad&\mbox{if }\kappa=1,\ x\leq\tau,\\[5pt]
x\wedge y\quad&\mbox{if }\kappa=2.\end{array}\right.
\end{equation}
Assume that ${\mathbb F}$ is stationary, adapted, and respects the tree
structure. Then the law of $Y_\varnothing$ satisfies the Recursive Distributional
Equation (RDE)
\begin{equation}\lambdabel{uni_RDE}
Y_\varnothing\isd\checki[\omega](Y_1,Y_2),
\end{equation}
where $\isd$ denotes equality in distribution, $Y_1,Y_2$ are i.i.d.\ copies of
$Y_\varnothing$, and $\omega$ is an independent uniformly distributed random variable
on $[0,1]\tildemes\{1,2\}$. Proposition~37 of \cite{RST19} classifies all
solutions of the RDE (\ranglef{uni_RDE}). Expanding on that result, we can prove the
following lemma, which is the basis of our proof of Theorem~\ranglef{T:frozen}.
\begin{equation}gin{lemma}[Law of burning times]
For\lambdabel{L:burnlaw} each set $\Xi\subset(0,1]$ that is closed w.r.t.\ the
relative topology of $(0,1]$, there exists a unique
probability measure $\rho_\Xi$ on $I$ such that
\begin{equation}gin{enumerate}
\item $\rho_\Xi$ solves the RDE (\ranglef{uni_RDE}),
\item $\rho_\Xi$ is concentrated on $\Xi\cup\{\infty\}$,
\item $\rho_\Xi\big([0,t]\big)\geq\ha t$ for all $t\in\Xi$.
\end{enumerate}
Assume that ${\mathbb F}$ solves the frozen percolation equation (\ranglef{frozdef}) for the
set of possible freezing times $\Xi$ and that ${\mathbb F}$ is stationary, adapted, and
respects the tree structure. Then the burning time of the root $Y_\varnothing$,
defined in (\ranglef{YF}), has law $\rho_\Xi$.
\end{lemma}
We will prove Lemma~\ranglef{L:burnlaw} together with Lemma~\ranglef{L:genRDE} below
in Section~\ranglef{S:uni}. The following lemma shows that every solution of the
RDE (\ranglef{uni_RDE}) is of the form $\rho_\Xi$ for some closed set
$\Xi\subset(0,1]$. Below, ${\rm supp}(\mu)$ denotes the support of a measure~$\mu$.
\begin{equation}gin{lemma}[General solutions to the RDE]
If\lambdabel{L:genRDE} $\rho$ solves the RDE (\ranglef{uni_RDE}), then $\rho=\rho_\Xi$
with $\Xi:=(0,1]\cap{\rm supp}(\rho)$.
\end{lemma}
By condition~(ii) of Lemma~\ranglef{L:burnlaw}, for a general closed subset $\Xi\subset(0,1]$, we have $(0,1]\cap{\rm supp}(\rho_\Xi)\subset\Xi$. This inclusion may be strict,\footnote{For example, if $\Xi=\{s,t\}$ with $0<s<t\leq 1$ and $t\leq 2s$, then using Lemma~\ranglef{L:RDEint} below it is easy to check that $\rho_\Xi=s\delta_s+(1-s)\delta_\infty$.} however, so the correspondence between solutions of the RDE (\ranglef{uni_RDE}) and sets of possible freezing times is not one-to-one.
\subsetsection{Almost sure uniqueness}\lambdabel{S:as}
Recall from (\ranglef{Om}) that $\Omega=(\omega_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ with
$\omega_\mathbf{i}=(\tau_\mathbf{i},\kappa_\mathbf{i})$. For a given set $\Xi\subset(0,1]$ of possible
freezing times, we say that solutions to the frozen percolation equation
(\ranglef{frozdef}) are \emph{almost surely unique} if, whenever ${\mathbb F}$ and ${\mathbb F}'$
solve (\ranglef{frozdef}) relative to the same $\Omega$, one has ${\mathbb F}={\mathbb F}'$ a.s.
Let us first note that it is easy to show that if $\Xi$ is a finite subset of $(0,1]$ then the solutions of (\ranglef{frozdef}) are almost surely unique. Indeed, if $\Xi=\{ t_1,...,t_n \}$ with $0<t_1<\dots<t_n \leq 1$ then one proves by induction on $k=1,\dots,n$ that the set of vertices that burn at time $t_k$ is determined by $\Omegaega$. This implies that the burning time $Y_\mathbf{i}$ of each vertex $\mathbf{i}\in{\mathbb T}$ is determined by $\Omegaega$, hence the set ${\mathbb F}$ is also determined by $\Omegaega$ using Lemma~\ranglef{L:FY}.
For the remainder of the paper, we will mostly focus our attention on a
one-parameter family of sets of possible burning times. For $0<\theta<1$, we
define $\Xi_\theta:=\{\theta^n:n\in{\mathbb N}\}$ (with ${\mathbb N}:=\{0,1,2,\ldots\}$) and we set
$\Xi_1:=(0,1]$, which can naturally be viewed as the limit of $\Xi_\theta$ as
$\theta\to 1$. As a straightforward application of \cite[Prop~37]{RST19},
one can check that for these sets, the probability laws $\rho_\Xi$ from
Lemma~\ranglef{L:burnlaw} are given by
\bc\lambdabel{muXi}
\mathrm{d}splaystyle\rho_{\Xi_\theta}(\mathrm{d} t)
&=&\mathrm{d}splaystyle\frac{1-\theta}{1+\theta}\sum_{k=0}^\infty\theta^k\delta_{\theta^k}(\mathrm{d} t)
+\frac{\theta}{1+\theta}\delta_\infty(\mathrm{d} t)\qquad(0<\theta<1),\\[15pt]
\mathrm{d}splaystyle\rho_{\Xi_1}(\mathrm{d} t)&=&\mathrm{d}splaystyle\ha\mathrm{d} t+\ha\delta_\infty(\mathrm{d} t).
\end{array}\ee
It is not hard to see that $\rho_{\Xi_\theta}$ converges weakly to $\rho_{\Xi_1}$
as $\theta\to 1$.
We conjecture that for the sets $\Xi_\theta$ with $0<\theta < \tfrac{1}{2}$, solutions to the frozen percolation equation are almost surely unique. We have not been able to prove this, but we can prove that there exists a $\tfrac{1}{2}<\theta^\ast<1$ such that almost sure uniqueness does not hold for $\theta>\theta^\ast$ and almost sure uniqueness holds under additional assumptions for $\theta\leq\theta^\ast$.
To explain this in more detail, fix $\Omega=(\omega_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$, let ${\mathbb F}$ be a solution to the frozen percolation equation (\ranglef{frozdef}) that is stationary, adapted, and respects the tree structure, and let $(Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ be the burning times defined in (\ranglef{YF}). Then
\begin{equation}gin{enumerate}
\item For each finite rooted subtree ${\mathbb U}\subset{\mathbb T}$, the r.v.'s $(Y_\mathbf{i})_{\mathbf{i}\in\mathbb{P}a{\mathbb U}}$ are i.i.d.\ and independent of $(\omega_\mathbf{i})_{\mathbf{i}\in{\mathbb U}}$.
\item $\mathrm{d}splaystyle Y_\mathbf{i}=\checki[\omega_\mathbf{i}](Y_{\mathbf{i} 1},Y_{\mathbf{i} 2})\qquad(\mathbf{i}\in{\mathbb T})$.
\end{enumerate}
This means that $(\omega_\mathbf{i},Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ is a \emph{Recursive Tree Process} (RTP) as defined in \cite{AB05}. Note that since by Theorem~\ranglef{T:frozen}, the joint law of $(\Omega,{\mathbb F})$ is uniquely determined, the same is true for the law of the RTP $(\omega_\mathbf{i},Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$. Following a definition from \cite{AB05}, one says that such an RTP is \emph{endogenous} if $Y_\varnothing$ is measurable w.r.t.\ the \ensuremath{\sigma}-field generated by the collection of random variables $\Omega=(\omega_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$. We make the following observation.
\begin{equation}gin{lemma}[Endogeny and almost sure uniqueness]
Let $(\omega_\mathbf{i},Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ be the RTP defined above. Then the\lambdabel{L:endas} following claims are equivalent:
\begin{equation}gin{enumerate}
\item The RTP $(\omega_\mathbf{i},Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ is endogenous.
\item If ${\mathbb F}$ and ${\mathbb F}'$ solve (\ranglef{frozdef}) relative to the same $\Omega$, and
moreover ${\mathbb F}$ and ${\mathbb F}'$ are stationary, adapted, and respect the tree
structure, then ${\mathbb F}={\mathbb F}'$ a.s.
\end{enumerate}
\end{lemma}
\begin{equation}gin{proposition}ro
Fix $\Omega=(\omega_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ and let ${\mathbb F}$ be a solution to (\ranglef{frozdef}) relative to $\Omega$ that is stationary, adapted, and respects the tree structure. By Theorem~\ranglef{T:frozen}, such a solution exists (perhaps on an extended probability space) and the joint law of $(\Omega,{\mathbb F})$ is uniquely determined. Let $(\omega_\mathbf{i},Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ be the corresponding RTP defined by (\ranglef{YF}). Endogeny says that $Y_\varnothing$ is measurable w.r.t.\ the \ensuremath{\sigma}-field generated by $\Omega$. Since $(\omega_{\mathbf{j}\mathbf{i}},Y_{\mathbf{j}\mathbf{i}})_{\mathbf{i}\in{\mathbb T}}$ is equally distributed with $(\omega_\mathbf{i},Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$, endogeny implies that $Y_\mathbf{j}$ is measurable w.r.t.\ the \ensuremath{\sigma}-field generated by $\Omega$ for each $\mathbf{j}\in{\mathbb T}$. This shows that endogeny is equivalent to the statement that $(Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ is measurable w.r.t.\ the \ensuremath{\sigma}-field generated by $\Omega$. Since by (\ranglef{YF}) and Lemma~\ranglef{L:FY}, given $\Omega$, the set ${\mathbb F}$ and collection of random variables $(Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ determine each other a.s.\ uniquely, this is in turn equivalent to the statement that ${\mathbb F}$ is measurable w.r.t.\ the \ensuremath{\sigma}-field generated by $\Omega$. Equivalently, this says that the conditional law of ${\mathbb F}$ given $\Omega$ is a delta-measure. This shows that (i) implies (ii). Conversely, if (i) does not hold, let us construct a random variable ${\mathbb F}'$ such that ${\mathbb F}'$ is conditionally independent of ${\mathbb F}$
given $\Omega$, moreover the conditional distributions of ${\mathbb F}$ and ${\mathbb F}'$ are the same if we condition on $\Omega$. In particular, ${\mathbb F}'$ then also solves (\ranglef{frozdef}) relative to $\Omega$ and is stationary, adapted, and respects the tree structure. Since the conditional law of ${\mathbb F}$ given $\Omega$ is not a delta-measure, we then have ${\mathbb F}\neq{\mathbb F}'$ with positive probability, showing that (ii) does not hold.
\end{proposition}ro
It follows from Lemma~\ranglef{L:endas} that if the RTP
$(\omega_\mathbf{i},Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ is nonendogenous, then solutions to the frozen
percolation equation (\ranglef{frozdef}) are not almost surely unique.
We pose the converse implication as an open problem:
\begin{equation}gin{quote}
\textbf{Question~1} Does endogeny of the RTP $(\omega_\mathbf{i},Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$
imply almost sure uniqueness of solutions to the frozen percolation equation
(\ranglef{frozdef})?
\end{quote}
In other words, Question~1 asks whether in part~(ii) of
Lemma~\ranglef{L:endas}, one can remove the conditions that ${\mathbb F}$ and ${\mathbb F}'$
are stationary, adapted, and respect the tree structure.
We now address the question of endogeny. To state our main result, we need one
technical lemma, which introduces a parameter $\theta^\ast$. Numerically, we
find that $\theta^* \approx 0.636$.
\begin{equation}gin{lemma}[The critical parameter]
Let\lambdabel{L:tetdef} $g:(0,1)\to{\mathbb R}$ be defined as \begin{equation}gin{equation}
g(\theta):=2(1+\theta)
-\sum_{\end{lemma}l=0}^{\infty}\frac{\theta^{2\end{lemma}l}(1-\theta^2)}{2/(1+\theta)-\theta^{\end{lemma}l}}.
\end{equation}
Then $g$ is a strictly decreasing continuous function that changes sign at a
point $\theta^\ast \in (\tfrac{1}{2},1)$.
\end{lemma}
The following theorem is the main result of our paper. For $\theta=1$,
the result has been proved in \cite[Thm~12]{RST19} but the result is
new in the regime $0<\theta<1$.
\begin{equation}gin{theorem}[Endogeny]
Let\lambdabel{T:endog} $\theta^\ast$ be as in Lemma~\ranglef{L:tetdef}.
Let $0<\theta\leq 1$, and for the set of possible freezing times
$\Xi_\theta$, let $(\Omega,{\mathbb F})$ be defined as in Theorem~\ranglef{T:frozen}. Let
$(\omega_\mathbf{i},Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ be the corresponding RTP of burning times
defined in (\ranglef{YF}). This RTP is endogenous for $0<\theta\leq\theta^\ast$ but
not for $\theta^\ast<\theta\leq 1$.
\end{theorem}
In the Subsections \ranglef{S:scale} and \ranglef{S:scRD} below, we eleborate
a bit on our methods for proving Theorem~\ranglef{T:endog}. We use the
remainder of the present subsection to make a few additional comments
on Question~1 posed above.
Set ${\mathbb F}_0:=\emptyset$ and define inductively for $k\geq 1$
\begin{equation}\lambdabel{Fk}
Y^k_\mathbf{i}:=\inf\big\{t\in\Xi:\mathbf{i}\mathbb{P}ercol{{\mathbb T}^t\begin{equation}h{\mathbb F}_{k-1}}\infty\big\}
\quad(\mathbf{i}\in{\mathbb T})\quad\mbox{and}\quad
{\mathbb F}_k:=\big\{\mathbf{i}\in{\mathbb I}:Y^k_{\mathbf{i} 1}\leq\tau_\mathbf{i}\big\},
\end{equation}
with the usual convention that $\inf\emptyset:=\infty$. Then it is not hard to see that
\begin{equation}\begin{array}{r@{\ }l@{\quad}r@{\ }l}\lambdabel{incdec}
{\rm(i)}&\mathrm{d}splaystyle{\mathbb F}_{2n}\subset{\mathbb F}_{2n+1}\quad
&{\rm(ii)}&\mathrm{d}splaystyle{\mathbb F}_{2n+1}\supset{\mathbb F}_{2n+2},\\[5pt]
{\rm(iii)}&\mathrm{d}splaystyle{\mathbb F}_{2n}\subset{\mathbb F}_{2n+2}\quad
&{\rm(iv)}&\mathrm{d}splaystyle{\mathbb F}_{2n+1}\supset{\mathbb F}_{2n+3},
\end{array}\qquad(n\in{\mathbb N}).
\end{equation}
Moreover, if ${\mathbb F}$ solves the frozen percolation equation (\ranglef{frozdef}), then
\begin{equation}\lambdabel{Fpm2}
{\mathbb F}_{2n}\subset{\mathbb F}\subset{\mathbb F}_{2n+1}\qquad(n\in{\mathbb N}).
\end{equation}
For the sets of possible burning times $\Xi_\theta$ with $0<\theta\leq 1$, it is possible to verify by calculation that ${\mathbb F}_2=\emptyset$ a.s.\ if and only if $\theta\geq 1/2$. In particular, if $\theta<1/2$, then there are points that must freeze in any solution to the frozen percolation equation (\ranglef{frozdef}). We conjecture that in fact, for any $\theta<1/2$, the sets $\bigcup_{n\in{\mathbb N}}{\mathbb F}_{2n}$ and $\bigcap_{n\in{\mathbb N}}{\mathbb F}_{2n+1}$ are a.s.\ equal and as a result, solutions to the frozen percolation equation (\ranglef{frozdef}) are a.s.\ unique for all $\theta<1/2$. Note that even if this conjecture is correct, it does not not fully settle Question~1, since the parameter $\theta^\ast$ from Lemma~\ranglef{L:tetdef} is strictly larger than $1/2$.
\subsetsection{Scale invariance}\lambdabel{S:scale}
We fix a set of possible burning
times $\Xi$, construct a frozen percolation process $(\Omega,{\mathbb F})$ as in
Theorem~\ranglef{T:frozen} and let $(\omega_\mathbf{i},Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ be the
corresponding RTP of burning times defined in (\ranglef{YF}). Conditional on
$\Omega=(\omega_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$, let $(Y'_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ be an independent
copy of $(Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$. Then endogeny is equivalent to the statement
that $Y_\varnothing=Y'_\varnothing$ a.s. An easy argument, which can be found in
\cite[Appendix~B]{MSS18}, shows that the joint law of
$(Y_\varnothing,Y'_\varnothing)$ solves the \emph{bivariate RDE}
\begin{equation}\lambdabel{bivar_RDE}
(Y_\varnothing,Y'_\varnothing)\isd\big(\checki[\omega](Y_1,Y_2),\checki[\omega](Y'_1,Y'_2)\big),
\end{equation}
where $(Y_1,Y'_1)$ and $(Y_2,Y'_2)$ are independent copies of
$(Y_\varnothing,Y'_\varnothing)$ and $\omega$ is an independent uniformly distributed random
variable on $[0,1]\tildemes\{1,2\}$. We define probability laws on $I^2$ by
\begin{equation}\lambdabel{unnu}
\underline\rho^{(2)}_\Xi:={\mathbb P}\big[(Y_\varnothing,Y'_\varnothing)\in\,\cdot\,\big]
\quad\mbox{and}\quad
\overline\rho^{(2)}_\Xi:={\mathbb P}\big[(Y_\varnothing,Y_\varnothing)\in\,\cdot\,\big].
\end{equation}
The marginals of these measures are the measure $\rho_\Xi$ defined in Lemma~\ranglef{L:burnlaw}. General theory for RTPs yields the following:
\begin{equation}gin{proposition}[Bivariate uniqueness]
The\lambdabel{P:bivar} following statements are equivalent:
\begin{equation}gin{enumerate}
\item The RTP $(\omega_\mathbf{i},Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ is endogenous.
\item $\underline\rho^{(2)}_\Xi=\overline\rho^{(2)}_\Xi$.
\item The measure $\overline\rho^{(2)}_\Xi$ is the only solution of the bivariate RDE
(\ranglef{bivar_RDE}) in the space of symmetric probability measures on $I^2$ with
marginals given by $\rho_\Xi$.
\end{enumerate}
\end{proposition}
\begin{equation}gin{proposition}ro
The equivalence of (i) and (ii) follows immediately from the definitions in (\ranglef{unnu}), the equivalence of (i) and (iii) is proved in \cite[Thm~11]{AB05} (see also \cite[Thm~1]{MSS18}), and the implication (iii)$\ensuremath{\Rightarrow}$(ii) is trivial.
\end{proposition}ro
Proposition~\ranglef{P:bivar} is our main tool for proving
Theorem~\ranglef{T:endog}, but in order to be able to successfully apply
Proposition~\ranglef{P:bivar}, we need one more idea. For a general set of
possible burning times $\Xi$, it is difficult to find all solutions of
the bivariate RDE (\ranglef{bivar_RDE}) in the space of symmetric
probability measures with marginals given by $\rho_\Xi$. For the
special sets $\Xi_\theta$ with $0<\theta\leq 1$, however, it turns out to
be sufficient to look only at scale invariant solutions of the
bivariate RDE. As we will explain below, this leads to a significant
simplification of the problem, which allows us to prove
Theorem~\ranglef{T:endog} for the sets $\Xi_\theta$, but not for general
$\Xi$.
It has been proved in \cite[Prop~9]{RST19} that the law of the MBBT is
invariant under a certain scaling relation. This is ultimately the consequence
of the fact that the MBBT is itself the scaling limit of near-critical
percolation on trees of finite degree. We will not repeat the scaling property
of the MBBT here but instead formulate scaling properties of solutions to the
RDE (\ranglef{uni_RDE}) and bivariate RDE (\ranglef{bivar_RDE}) that are consequences
of the scaling of the MBBT.
For each $t>0$, we define a scaling map $\mathbb{P}si_t:I\to I$ by
\begin{equation}\lambdabel{psit}
\mathbb{P}si_t(y):=\left\{\begin{array}{ll}
\mathrm{d}splaystyle t^{-1}y\quad&\mbox{ if }y\leq t,\\
\mathrm{d}splaystyle\infty\quad&\mbox{otherwise.}\end{array}\right.
\end{equation}
We let ${\cal M}^{(1)}$ denote the space of all probability measures $\rho$ on
$I=[0,1]\cup\{\infty\}$ that satisfy $\rho\big([0,t]\big)\leq t$ for all $0\leq
t\leq 1$, and we define scaling maps ${\mathbb G}amma_t$ by
\begin{equation}\lambdabel{Ga1}
{\mathbb G}amma_t\rho:=t^{-1}\rho\circ\mathbb{P}si_t^{-1}+(1-t^{-1})\delta_\infty
\qquad(\rho\in{\cal M}^{(1)},\ t>0),
\end{equation}
where $\delta_\infty$ denotes the delta-measure at $\infty$. It is not hard to
see that ${\mathbb G}amma_t$ maps the space ${\cal M}^{(1)}$ into itself. In particular, for
$0<t<1$, the assumption $\rho\big([0,t]\big)\leq t$ guarantees that
${\mathbb G}amma_t\rho$ puts nonnegative mass at $\infty$. The following lemma
says that the set of solutions to the RDE (\ranglef{uni_RDE}) is invariant under
the scaling maps ${\mathbb G}amma_t$. Below, $\rho_\Xi$ denotes the measure defined in
Lemma~\ranglef{L:burnlaw}.
\begin{equation}gin{lemma}[Scale invariance of the RDE]
Let\lambdabel{L:scaleRDE} $\rho$ be a solution to the RDE (\ranglef{uni_RDE}). Then
$\rho\in{\cal M}^{(1)}$, and for each $t>0$, the measure ${\mathbb G}amma_t\rho$ is also a
solution to the RDE (\ranglef{uni_RDE}). In particular, if $\Xi$ is a relatively
closed subset of $(0,1]$, then
\begin{equation}\lambdabel{scaleRDE}
{\mathbb G}amma_t\rho_\Xi=\rho_{\Xi'}\quad\mbox{with}\quad
\Xi':=\{t^{-1}y:y\in\Xi\}\cap[0,1]\qquad(t>0).
\end{equation}
\end{lemma}
For the bivariate RDE, a result similar to Lemma \ranglef{L:scaleRDE} holds, which
we formulate now. We say that a probability measure on $I^2$ is
\emph{symmetric} if it is invariant under the map $(y_1,y_2)\mapsto(y_2,y_1)$.
Let ${\cal M}^{(2)}$ denote the space of all symmetric probability measures
$\rho^{(2)}$ on $I^2$ that satisfy
\begin{equation}\lambdabel{Mi2def}
\rho^{(2)}\big(
[0,t]\tildemes I\cup I\tildemes[0,t]\big)\leq t
\qquad\forall 0\leq t\leq 1.
\end{equation}
We define $\mathbb{P}si^{(2)}_t:I^2\to I^2$ by
$\mathbb{P}si^{(2)}_t(y,y'):=\big(\mathbb{P}si_t(y),\mathbb{P}si_t(y')\big)$ and
we define ${\mathbb G}amma^{(2)}_t:{\cal M}^{(2)}\to{\cal M}^{(2)}$ by
\begin{equation}\lambdabel{Ga2}
{\mathbb G}amma^{(2)}_t\rho:=t^{-1}\rho\circ(\mathbb{P}si^{(2)}_t)^{-1}+(1-t^{-1})\delta_{(\infty,\infty)}
\qquad(\rho\in{\cal M}^{(2)},\ t>0).
\end{equation}
We will prove that $\rho \in {\cal M}^{(2)} $ indeed implies ${\mathbb G}amma^{(2)}_t\rho \in {\cal M}^{(2)}$ in Section \ranglef{S:scaleprf}.
With the above definitions, we have the following lemmas, which are analogous to
Lemma~\ranglef{L:scaleRDE}. The measures $\underline\rho^{(2)}_\Xi$ and $\overline\rho^{(2)}_\Xi$
that occur in Lemma~\ranglef{L:scalenu} are defined in (\ranglef{unnu}).
\begin{equation}gin{lemma}[Scale invariance of bivariate RDE]
Let\lambdabel{L:scalebivRDE} $\rho^{(2)}$ be a symmetric solution to the bivariate
RDE (\ranglef{bivar_RDE}). Then $\rho^{(2)}\in{\cal M}^{(2)}$, and for each $t>0$,
the measure ${\mathbb G}amma^{(2)}_t\rho^{(2)}$ is also a solution to (\ranglef{bivar_RDE}).
\end{lemma}
\begin{equation}gin{lemma}[Scale invariance of special solutions]
Let\lambdabel{L:scalenu} $\Xi\subset(0,1]$ be relatively closed. Then, for each $t>0$,
\begin{equation}
{\mathbb G}amma^{(2)}_t\underline\rho^{(2)}_\Xi=\underline\rho^{(2)}_{\Xi'}
\quad\mbox{and}\quad
{\mathbb G}amma^{(2)}_t\overline\rho^{(2)}_\Xi=\overline\rho^{(2)}_{\Xi'}
\quad\mbox{with}\quad
\Xi':=\{t^{-1}y:y\in\Xi\}\cap[0,1].
\end{equation}
\end{lemma}
\subsetsection{Scale invariant solutions to the bivariate RDE}\lambdabel{S:scRD}
In the present subsection, we explain how scale invariance helps us prove our
main result Theorem~\ranglef{T:endog}. It follows from Lemma~\ranglef{L:scaleRDE},
and can also easily be checked by direct calculation using
formula (\ranglef{muXi}), that the measures $\rho_{\Xi_\theta}$ are invariant under
scaling by $\theta$, and hence also by $\theta^n$ for each $n\geq 0$. Likewise,
$\rho_{\Xi_1}$ is invariant under scaling by any $0<t\leq 1$, so we have
\begin{equation}\lambdabel{rhosca}
{\mathbb G}amma_t\rho_{\Xi_\theta}=\rho_{\Xi_\theta}\qquad\big(0<\theta\leq 1,\ t\in\Xi_\theta).
\end{equation}
Motivated by this, for $0<\theta\leq 1$, we let ${\cal M}^{(2)}_\theta$ denote the
space of probability measures $\rho^{(2)}$ on $I^2$ such that:
\begin{equation}gin{enumerate}
\item $\rho^{(2)}\in{\cal M}^{(2)}$,
\item the marginals of $\rho^{(2)}$ are given by $\rho_{\Xi_\theta}$,
\item ${\mathbb G}amma^{(2)}_t\rho^{(2)}=\rho^{(2)}$ for all $t\in\Xi_\theta$.
\end{enumerate}
Let $0<\theta\leq 1$, and for the set of possible freezing times $\Xi_\theta$, let
$(\Omega,{\mathbb F})$ be defined as in Theorem~\ranglef{T:frozen}. Let
$(\omega_\mathbf{i},Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ be the corresponding RTP of burning times
defined in (\ranglef{YF}). It follows from Proposition~\ranglef{P:bivar} and
Lemma~\ranglef{L:scalenu} that the RTP $(\omega_\mathbf{i},Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ is
endogenous if and only if $\overline\rho^{(2)}_{\Xi_\theta}$ is the only solution of the
bivariate RDE (\ranglef{bivar_RDE}) in the space ${\cal M}^{(2)}_\theta$.
In view of this, Theorem~\ranglef{T:endog} is implied by the following theorem.
\begin{equation}gin{theorem}[Scale invariant solutions of the bivariate RDE]
Let\lambdabel{the_theorem} $\theta^\ast$ be as in Lemma~\ranglef{L:tetdef}
and let $0<\theta\leq 1$. Then:
\begin{equation}gin{enumerate}
\item If $\theta \le \theta^*$ then $\overline\rho^{(2)}_{\Xi_\theta}$ is the only
solution of the bivariate RDE (\ranglef{bivar_RDE}) in the space
${\cal M}^{(2)}_\theta$.
\item If $\theta^*<\theta$, then there exists a measure $\hat{\rho}^{(2)}
\in {\cal M}^{(2)}_\theta$ with $\hat{\rho}^{(2)}\neq\overline\rho^{(2)}_{\Xi_\theta}$ that
solves (\ranglef{bivar_RDE}).
\end{enumerate}
\end{theorem}
We call $\overline\rho^{(2)}_{\Xi_\theta}$ the \emph{diagonal} solution of the
bivariate RDE since it is concentrated on $\{(y,y):y\in I\}$ (see
(\ranglef{unnu})). In the special case $\theta=1$, Theorem~\ranglef{the_theorem} has
been proved in \cite[Thm~12]{RST19}, where it is moreover shown that the
bivariate RDE (\ranglef{bivar_RDE}) has precisely two solutions in the
space ${\cal M}^{(2)}_1$. We conjecture that this holds more generally.
In Remark~\ranglef{remark:conj_unique} below, we present numerical evidence for
the following conjecture.
\begin{equation}gin{conjecture}[Uniqueness of the nondiagonal solution]
For\lambdabel{conj:unique} all $\theta^\ast<\theta\leq 1$, the measures
$\overline\rho^{(2)}_{\Xi_\theta}$ and $\underline\rho^{(2)}_{\Xi_\theta}$ defined in
(\ranglef{unnu}) are the only solutions of the bivariate RDE (\ranglef{bivar_RDE}) in
the space ${\cal M}^{(2)}_\theta$.
\end{conjecture}
The main advantage of scale invariance is that it reduces the number of
parameters. In general, we can characterise a measure on $[0,1]^2$ by its
distribution function, which is a real function of two variables. However,
using scale invariance, we can characterise a measure
$\rho^{(2)}\in{\cal M}^{(2)}_\theta$ using a real function of one variable only, see
Definition~\ranglef{def:rho->f} below. This significantly simplifies the
calculations.
We can in fact be a little more general. Generalizing the definition
above (\ranglef{muXi}), for $0<\theta<1$ and $0<\alpha\leq 1$, let us define
$\Xi_{\theta,\alpha}:=\{\alpha\theta^n:n\in{\mathbb N}\}$. Then Lemma~\ranglef{L:scaleRDE} implies
that $\rho_{\Xi_{\theta,\alpha}}={\mathbb G}amma_{1/\alpha}\rho_{\Xi_\theta}$. Moreover,
Proposition~\ranglef{P:bivar} and Lemma~\ranglef{L:scalenu} imply that the RTP
corresponding\footnote{The precise definition of an RTP corresponding to a solution to an RDE can be found below formula (\ranglef{RDE}) below.} to $\rho_{\Xi_{\theta,\alpha}}$ is endogenous if and only if the RTP
corresponding to $\rho_{\Xi_\theta}$ is endogenous. Since this does not
conceptually add anything new, for simplicity, we have formulated our main
results only for the set of possible burning times $\Xi_\theta$.
\section{Frozen percolation on the MBBT}
\subsetsection{Existence and uniqueness in law}\lambdabel{S:uni}
In this subsection, we prove Theorem~\ranglef{T:frozen} and
Lemmas~\ranglef{L:perctime}, \ranglef{L:FY}, \ranglef{L:burnlaw}, and \ranglef{L:genRDE}.
\begin{equation}gin{proposition}ro[of Lemma~\ranglef{L:perctime}]
It suffices to prove the claim for $\mathbf{i}=\varnothing$. Let $P:=\{t\in[0,1]:
\varnothing\mathbb{P}ercol{{\mathbb T}^t\begin{equation}h{\mathbb A}}\infty\}$. Similar to the definition in
(\ranglef{percol}), for any $\mathbf{i},\mathbf{k}\in{\mathbb T}$ and $A\subset{\mathbb T}$, we write
$\mathbf{i}\mathbb{P}ercol{A}\mathbf{k}$ if there exist a $\mathbf{j}=j_1\cdots j_n\in{\mathbb T}$ such
that $\mathbf{k}=\mathbf{i}\mathbf{j}$ and
\begin{equation}\lambdabel{percolij}
{\rm(i)}\quad j_{k+1}\leq\kappa_{\mathbf{i} j_1\cdots j_k}\quad(0\leq k<n)
\quad\mbox{and}\quad
{\rm(ii)}\quad \mathbf{i} j_1\cdots j_k\in A\quad(0\leq k\leq n).
\end{equation}
By (\ranglef{Tt}), the set $T(\mathbf{i}):=\{t\in[0,1]:\mathbf{i}\in{\mathbb T}^t\}$ is closed for each $\mathbf{i}\in{\mathbb T}$, so for each finite $n$, the set
\begin{equation}
P_n:=\big\{t\in[0,1]:\varnothing\mathbb{P}ercol{{\mathbb T}^t\begin{equation}h{\mathbb A}}\mathbf{j}
\mbox{ for some }\mathbf{j}\in{\mathbb T}\mbox{ with }|\mathbf{j}|=n\big\},
\end{equation}
being a finite intersection and union of sets of the form $T(\mathbf{i})$, is also closed. It follows that the same is true for $P=\bigcap_{n\geq 0}P_n$.
\end{proposition}ro
\begin{equation}gin{proposition}ro[of Lemma~\ranglef{L:FY}]
By Lemma~\ranglef{L:perctime}, for each $\mathbf{i}\in{\mathbb T}$, the set
\[
T(\mathbf{i}):=\big\{t\in[0,1]:\mathbf{i}\mathbb{P}ercol{{\mathbb T}^t\begin{equation}h{\mathbb F}}\infty\big\}
\]
is a random closed subset of $[0,1]$. By Lemma~\ranglef{L:perc} below, ${\mathbb P}[\mathbf{i}\mathbb{P}ercol{{\mathbb T}^t\begin{equation}h{\mathbb F}}\infty]\leq{\mathbb P}[\mathbf{i}\mathbb{P}ercol{{\mathbb T}^t}\infty]=t$ for all $t\in(0,1]$, so there a.s.\ exists a random $\end{proposition}s>0$ such that $T(\mathbf{i})\subset[\end{proposition}s,1]$, i.e., $T(\mathbf{i})$ is a random compact subset of $(0,1]$. Since $\Xi$ is a closed subset of $(0,1]$ this implies that on the event that $Y_\mathbf{i}\leq 1$, the infimum in (\ranglef{YF}) is in fact a minimum and $\mathbf{i}\mathbb{P}ercol{{\mathbb T}^t\begin{equation}h{\mathbb F}}\infty$ for $t=Y_\mathbf{i}$.
Let $\mathbf{i}\in{\mathbb I}$. If $Y_{\mathbf{i} 1}>\tau_\mathbf{i}$, then clearly there exists no $t\in(0,\tau_\mathbf{i}]$ such that $\mathbf{i} 1\mathbb{P}ercol{{\mathbb T}^t\begin{equation}h{\mathbb F}}\infty$, and hence by (\ranglef{frozdef}) $\mathbf{i}\not\in{\mathbb F}$. On the other hand, if $Y_{\mathbf{i} 1}\leq\tau_\mathbf{i}$, then by what we have just proved, setting $t:=Y_{\mathbf{i} 1}$ we have $t\in\Xi\cap(0,\tau_\mathbf{i}]$ and $\mathbf{i} 1\mathbb{P}ercol{{\mathbb T}^t\begin{equation}h{\mathbb F}}\infty$, which by (\ranglef{frozdef}) shows that $\mathbf{i}\in{\mathbb F}$.
\end{proposition}ro
To prepare for the proof of Lemma~\ranglef{L:burnlaw}, we need a bit of
theory. Let ${\rm BV}$ denote the space of functions $F:{\mathbb R}\to{\mathbb R}$ that are
locally of bounded variation. For each $F\in{\rm BV}$, the right and
left limits $F(t+):=\langlem_{s\downarrow t}F(s)$ and $F(t-):=\langlem_{s\uparrow t}F(s)$ exist
for each $t\in{\mathbb R}$, and $F$ defines a signed measure $\mathrm{d} F$ on ${\mathbb R}$ by any of
the equivalent formulas
\begin{equation}
\mathrm{d} F\big((s,t]\big)=F(t+)-F(s+)\quad(s<t)\quad\mbox{and}\quad
\mathrm{d} F\big([s,t)\big)=F(t-)-F(s-)\quad(s<t).
\end{equation}
For $G,F\in{\rm BV}$, we let $G\mathrm{d} F$ denote the signed measure obtained by
weighting $\mathrm{d} F$ with the density $G$. For $F\in{\rm BV}$, we define
\begin{equation}
\overline F(t):=\ha\big(F(t-)+F(t+)\big)\qquad(t\in{\mathbb R}).
\end{equation}
It is well-known that a right-continuous function with left limits makes at
most countably many jumps, and hence $\overline F(t)\neq F(t)$ for at most countably
many values of $t$. We will need the following simple fact.
\begin{equation}gin{lemma}[Product rule]
For\lambdabel{L:prodrul} $F,G\in{\rm BV}$, one has $FG\in{\rm BV}$ and
$\mathrm{d}(FG)=\overline F\mathrm{d} G+\overline G\mathrm{d} F$.
\end{lemma}
\begin{equation}gin{proposition}ro
The statement is well-known if $F$ and $G$ are continuous. Therefore, since
our formula is linear in $F$ and $G$ and since each measure can be decomposed
into an atomic and nonatomic part, it suffices to prove the statement only
when $\mathrm{d} F$ and $\mathrm{d} G$ are purely atomic. Using again linearity and a simple
limit argument, it suffices to prove the statement only in the case that $\mathrm{d}
F=\delta_s$ and $\mathrm{d} G=\delta_t$ for some $s,t\in{\mathbb R}$. If $s\neq t$, the statement is
trivial. If $s=t$, then the statement follows from the observation that
\bc
\mathrm{d}splaystyle F(t+)G(t+)-F(t-)G(t-)
&=&\mathrm{d}splaystyle\ha\big(F(t+)+F(t-)\big)\big(G(t+)-G(t-)\big)\\[5pt]
&&\mathrm{d}splaystyle+\ha\big(G(t+)+G(t-)\big)\big(F(t+)-F(t-)\big).
\end{array}\ee
\end{proposition}ro
We cite the following lemma from \cite[Lemma~38]{RST19}.
\begin{equation}gin{lemma}[Integral formulation of RDE]
A\lambdabel{L:RDEint} probability measure $\rho$ on $I$ solves the RDE
(\ranglef{uni_RDE}) if and only if
\begin{equation}\lambdabel{RDEint}
\int_{[0,t]}\rho(\mathrm{d} s)s=\rho\big([0,t]\big)^2\qquad\big(t\in[0,1]\big).
\end{equation}
\end{lemma}
The following lemma is just a simple rewrite of the previous one. Below, we
let $\mu\big|_A$ denote the restriction of a (signed) measure $\mu$ to a
measurable set $A$, defined as $\mu\big|_A(B):=\mu(A\cap B)$.
\begin{equation}gin{lemma}[Differential formulation of RDE]
Let\lambdabel{L:RDEdif} $T$ denote the identity function $T(t):=t$ $(t\in{\mathbb R})$. Assume that $F\in{\rm BV}$ is right-continuous and nondecreasing and satisfies $F(t)=0$ $(t<0)$, $F(t)=F(1)$ $(t>1)$, and
\begin{equation}\lambdabel{RDEdif}
T\mathrm{d} F=2\overline F\mathrm{d} F.
\end{equation}
Then there exists a unique solution $\rho$ to the RDE (\ranglef{uni_RDE})
such that
\begin{equation}\lambdabel{rhoF}
\rho\big([0,t]\big)=F(t)\qquad\big(t\in[0,1]\big),
\end{equation}
and each solution $\rho$ to the RDE (\ranglef{uni_RDE}) arises in this way.
\end{lemma}
\begin{equation}gin{proposition}ro
Let $\rho$ be a solution of the RDE (\ranglef{uni_RDE}) and let $F:[0,1]\to{\mathbb R}$ be defined as in (\ranglef{rhoF}). Extend $F$ to a function in ${\rm BV}$ by setting $F(t):=0$ for $t<0$ and $F(t):=F(1)$ for $t>1$. Then by Lemma~\ranglef{L:RDEint}, $\int_{(0,t]}T\mathrm{d} F=F(t)^2$ $(t\in[0,1])$, which by Lemma~\ranglef{L:prodrul} implies that $F$ solves (\ranglef{RDEdif}).
Assume, conversely, that $F\in{\rm BV}$ is right-continuous and nondecreasing and satisfies $F(t)=0$ $(t<0)$ and (\ranglef{RDEdif}). Then clearly $F\geq 0$. Formula (\ranglef{RDEdif}) implies that for a.e.\ $t$ w.r.t.\ $\mathrm{d} F$, we have $\overline F(t)=\ha t$, which by the fact that $F\geq 0$ implies $F(t)\leq t$. It follows that setting $\rho([0,t]):=F(t)$ $(t\in[0,1])$ defines a subprobability measure on $[0,1]$, which can uniquely be extended to a probability measure on $[0,1]\cup\{\infty\}$. Lemma~\ranglef{L:prodrul} implies that $\int_{(0,t]}T\mathrm{d} F=F(t)^2$ $(t\in[0,1])$, so using the fact that $\rho(\{0\})=F(0)-F(0-)=0$ and Lemma~\ranglef{L:RDEint}, we conclude that $\rho$ solves the RDE (\ranglef{uni_RDE}).
\end{proposition}ro
Let $T\in{\rm BV}$ denote the identity function $T(t):=t$ $(t\in{\mathbb R})$. For a given closed set $\Xi\subset{\mathbb R}$, we will be interested in right-continuous functions $F\in{\rm BV}$ that solve the differential equation
\begin{equation}\lambdabel{RDEdif2}
{\rm(i)}\ T\mathrm{d} F=2\overline F\mathrm{d} F,\quad{\rm(ii)}\ \mathrm{d} F\big|_\Xi=\mathrm{d} F
\quad{\rm(iii)}\ F(t)\geq\ha t\quad(t\in\Xi).
\end{equation}
Note that condition~(ii) says that the signed measure $\mathrm{d} F$ is concentrated on $\Xi$. Our first lemma says that the distance between two solutions of (\ranglef{RDEdif2}) is a nonincreasing function of time.
\begin{equation}gin{lemma}[Distance between two solutions]
Let\lambdabel{L:Fdist} $\Xi\subset{\mathbb R}$ be closed and for
$i=1,2$, let $F_i\in{\rm BV}$ be right-continuous solutions to
the differential equation (\ranglef{RDEdif2}). Then
$\big|F_1(t)-F_2(t)\big|\leq\big|F_1(s)-F_2(s)\big|$ $(s\leq t)$.
\end{lemma}
\begin{equation}gin{proposition}ro
We observe that by (i), we have $\overline F_i(t)=\ha t$ for a.e.\ $t$ w.r.t.\ $\mathrm{d}
F_i$. In particular, $\overline F_i(t)=\ha t$ whenever $F_i(t-)\neq F_i(t)$, which
we can combine with condition~(iii) to get
\[
\mbox{(iii)'}\;\ \overline F_i(t)\geq\ha t\quad(t\in\Xi).
\]
We now use Lemma~\ranglef{L:prodrul} to calculate
\begin{equation}\begin{array}{l}\lambdabel{diF}
\ha\mathrm{d}(F_1-F_2)^2=(\overline F_1-\overline F_2)(\mathrm{d} F_1-\mathrm{d} F_2)\\[5pt]
\mathrm{d}splaystyle\quad=\overline F_1\mathrm{d} F_1-\overline F_1\mathrm{d} F_2-\overline F_2\mathrm{d} F_1+\overline F_2\mathrm{d} F_2
=(\ha T-\overline F_2)\mathrm{d} F_1+(\ha T-\overline F_1)\mathrm{d} F_2,
\end{array}\ee
where in the last step we have used (i). Using moreover
(ii) and (iii)', we see that the right-hand side of (\ranglef{diF}) is nonpositive,
so the claim of the lemma follows by integration.
\end{proposition}ro
Let ${\cal F}$ denote the space of all right-continuous, nondecreasing functions $F:{\mathbb R}\to{\mathbb R}$ that satisfy $0\leq F(t)\leq 0\vee t$ $(t\in{\mathbb R})$. In other words, these are the distribution functions of nonnegative measures $\mathrm{d} F$ on ${[0,\infty)}$ that satisfy $\mathrm{d} F([0,t])\leq t$ for all $t\geq 0$. We equip ${\cal F}$ with a topology that corresponds to vague convergence of the measures $\mathrm{d} F$. Then ${\cal F}$ is a compact, metrisable space and $F_n\to F$ in the topology on ${\cal F}$ if and only if $F_n(t)\to F(t)$ for each continuity point $t$ of $F$. Our aim is to prove that each closed set $\Xi\subset{[0,\infty)}$, there exists a unique $F\in{\cal F}$ that solves (\ranglef{RDEdif2}). Uniqueness follows from Lemma~\ranglef{L:Fdist}, so it remains to prove existence. We will use an approximation argument. We start by proving the statement for finite $\Xi$.
\begin{equation}gin{lemma}[Finite sets]
For\lambdabel{L:finex} each finite set $\Xi\subset(0,\infty)$, there exists an $F\in{\cal F}$ that solves (\ranglef{RDEdif2}).
\end{lemma}
\begin{equation}gin{proposition}ro
Let $\Xi=\{t_1,\ldots,t_n\}$ with $0=:t_0<t_1<\cdots<t_n$. We inductively define $F$ so that it is constant on each of the intervals $(-\infty,t_1)$, $[t_1,t_2)$,\ldots $[t_{n-1},t_n)$, and $[t_n,\infty)$, satisfies $F(0)=0$, and
\begin{equation}\lambdabel{Fkind}
F(t_k):=F(t_{k-1})\vee\big(t_k-F(t_{k-1})\big)\qquad(1\leq k\leq n).
\end{equation}
Note that the average of $F(t_{k-1})$ and $t_k-F(t_{k-1})$ is $\ha t_k$, so their maximum is $\geq\ha t_k$. In view of this, $F$ clearly satisfies (\ranglef{RDEdif2})~(ii) and (iii). Moreover, for each $1\leq k\leq n$, we have either $F(t_k)=F(t_{k-1})$ or $F(t_k)=t_k-F(t_{k-1})$. In either case,
\begin{equation}
t_k\big(F(t_k)-F(t_{k-1})\big)=2\cdot\ha\big(F(t_k)+F(t_{k-1})\big)\big(F(t_k)-F(t_{k-1})\big),
\end{equation}
which shows that $F$ satisfies (\ranglef{RDEdif2})~(i). It is clear that $F$ is right-continuous, nonnegative, and nondecreasing, and by induction (\ranglef{Fkind}) also implies that $F(t)\leq t$ for all $t\geq 0$, showing that $F\in{\cal F}$.
\end{proposition}ro
Let $d$ be any metric generating the topology on $[0,\infty]$ and let ${\cal K}[0,\infty]$ denote the space of all closed subsets of $[0,\infty]$. For each $A\in{\cal K}[0,\infty]$ and $\end{proposition}s>0$, we set
\begin{equation}\lambdabel{Hau1}
A_\end{proposition}s:=\big\{t\in[0,\infty]:d(t,A)<\end{proposition}s\big\}
\quad\mbox{where}\quad
d(t,A):=\inf_{s\in A}d(t,s).
\end{equation}
We equip ${\cal K}[0,\infty]$ with the \emph{Hausdorff metric}
\begin{equation}\lambdabel{Hau2}
d_{\rm H}(A,B):=\inf\big\{\end{proposition}s>0:A\subset B_\end{proposition}s\mbox{ and }B\subset A_\end{proposition}s\big\}.
\end{equation}
By \cite[Lemma~B.1]{SSS14}, the topology generated by $d_{\rm H}$ does not depend on the choice of the metric $d$ generating the topology on $[0,\infty]$. The following lemma lists some elementary properties of the space ${\cal K}[0,\infty]$.
\begin{equation}gin{lemma}[Properties of the Hausdorff metric]
The\lambdabel{L:Haus} space ${\cal K}[0,\infty]$ is compact and the set of all finite subsets of $(0,\infty)$ is dense in ${\cal K}[0,\infty]$.
\end{lemma}
\begin{equation}gin{proposition}ro
Since $[0,\infty]$ is homeomorphic to $[0,1]$, we may equivalently show that ${\cal K}[0,1]$ is compact and the set of all finite subsets of $(0,1)$ is dense in ${\cal K}[0,1]$, where the Hausdorff metric on ${\cal K}[0,1]$ is defined in the same way as in (\ranglef{Hau1})--(\ranglef{Hau2}), with $d(x,y):=|x-y|$ the usual metric on $[0,1]$. The fact that ${\cal K}(E)$ is compact if $E$ is compact is well-known, see, e.g., \cite[Lemma~B.4]{SSS14}. If $\Xi\subset[0,1]$ is closed, then it is easy to see that the sets $\Xi_n:=\{k/n:1<k<n,\ d(k/n,\Xi)\leq 1/n\}$ converge to $\Xi$ in the Hausdorff metric. This shows that the set of finite subsets of $(0,1)$ is dense in ${\cal K}[0,1]$.
\end{proposition}ro
Our next lemma will allow us to construct solutions to (\ranglef{RDEdif2}) for general $\Xi$ by approximation with finite $\Xi$.
\begin{equation}gin{lemma}[Limits of solutions]
Let\lambdabel{L:limsol} $F,F_n\in{\cal F}$ and $\Xi_n,\Xi\in{\cal K}[0,\infty]$ satisfy $F_n\to F$ and $\Xi_n\to\Xi$. Assume that $F_n$ solves (\ranglef{RDEdif2}) relative to $\Xi_n\cap{[0,\infty)}$ for each $n$. Then $F$ solves (\ranglef{RDEdif2}) relative to $\Xi\cap{[0,\infty)}$.
\end{lemma}
\begin{equation}gin{proposition}ro
Recall that $F_n\to F$ means that $\mathrm{d} F_n\to\mathrm{d} F$ vaguely, or equivalently, $F_n(t)\to F(t)$ for each continuity point $t$ of $F$. Since $T$ is a continuous function, the vague convergence $\mathrm{d} F_n\to\mathrm{d} F$ implies that also $T\mathrm{d} F_n\to T\mathrm{d} F$ vaguely. By Lemma~\ranglef{L:prodrul}, $2\overline F\mathrm{d} F=\mathrm{d} F^2$. Now if $F_n(t)\to F(t)$ for each continuity point $t$ of $F$, then also $F^2_n(t)\to F^2(t)$ for each continuity point $t$ of $F^2$, so taking the value limit on the left- and right-hand sides of the equation, we see that $F$ solves (\ranglef{RDEdif2}) ~(i). Since $\Xi_n\to\Xi$, we easily obtain that $\mathrm{d} F$ is concentrated on $\Xi_\end{proposition}s$ for each $\end{proposition}s>0$, and hence $F$ satisfies (\ranglef{RDEdif2}) ~(ii). To see that $F$ also satisfies (\ranglef{RDEdif2}) ~(iii), fix $t\in\Xi$. Since $\Xi_n\to\Xi$ we can find $t_n\in\Xi_n$ such that $t_n\to t$. Then for each $s>t$, we have $F_n(s)\geq F_n(t_n)\geq\ha t_n$ for all $n$ large enough. Taking the limit, it follows that $F(s)\geq\ha t$ for each $s\geq t$ that is a continuity point of $F$, and hence $F(t)\geq\ha t$ by right-continuity.
\end{proposition}ro
We can now prove existence of solutions to (\ranglef{RDEdif2}) for general $\Xi$.
\begin{equation}gin{lemma}[Existence of solutions to the RDE]
For\lambdabel{L:exist} each closed set $\Xi\subset{[0,\infty)}$, there exists a function $F\in{\cal F}$ that solves (\ranglef{RDEdif2}).
\end{lemma}
\begin{equation}gin{proposition}ro
By Lemma~\ranglef{L:Haus}, for each closed $\Xi\subset[0,\infty]$, there exist finite $\Xi_n\subset(0,\infty)$ such that $\Xi_n\to\Xi$. By Lemma~\ranglef{L:finex}, for each $n$ there exists an $F_n\in{\cal F}$ so that $F_n$ solves (\ranglef{RDEdif2}) relative to $\Xi_n$. Since ${\cal F}$ is compact, by going to a subsequence if necessary we can assume that $F_n\to F$ for some $F\in{\cal F}$. Then Lemma~\ranglef{L:limsol} tells us that $F$ solves (\ranglef{RDEdif2}) relative to $\Xi\cap{[0,\infty)}$.
\end{proposition}ro
Before we prove Lemma~\ranglef{L:burnlaw}, we recall the general definition of an
RTP. Let ${\mathbb T}$ denote the space of all finite words $\mathbf{i}=i_1\cdots i_n$
$(n\geq 0)$ made up from the alphabet $\{1,\ldots,d\}$, where $d\geq 1$ is
some fixed integer. All previous notation involving the binary tree
generalizes in a straightforward manner to the $d$-ary tree ${\mathbb T}$. Let $I$ and
$\Omega$ be Polish spaces, let $\gamma:\Omega\tildemes I^d\to I$ be a measurable function,
and let $(\omega_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ be i.i.d.\ $\Omega$-valued random variables. Let
$\nu$ be a probability law on $I$ that solves the Recursive Distributional
Equation (RDE)
\begin{equation}\lambdabel{RDE}
X_\varnothing\isd\gamma[\omega_\varnothing](X_1,\ldots,X_d),
\end{equation}
where $\isd$ denotes equality in distribution, $X_\varnothing$ has law $\nu$, and
$X_1,\dots, X_d$ are copies of $X_\varnothing$, independent of each other and of
$\omega_\varnothing$. A simple argument based on Kolmogorov's extension theorem (see
\cite[Lemma~1.9]{MSS20}) tells us that the i.i.d.\ random variables
$(\omega_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ can be coupled to $I$-valued random
variables $(X_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ in such a way that:
\begin{equation}gin{enumerate}
\item For each finite rooted subtree ${\mathbb U}\subset{\mathbb T}$, the r.v.'s
$(X_\mathbf{i})_{\mathbf{i}\in\mathbb{P}a{\mathbb U}}$ are i.i.d.\ with common law $\nu$ and independent
of $(\omega_\mathbf{i})_{\mathbf{i}\in{\mathbb U}}$.
\item $\mathrm{d}splaystyle X_\mathbf{i}=\gamma[\omega_\mathbf{i}](X_{\mathbf{i} 1},\ldots,X_{\mathbf{i} d})\qquad(\mathbf{i}\in{\mathbb T})$.
\end{enumerate}
Moreover, these conditions uniquely determine the joint law of
$(\omega_\mathbf{i},X_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$. We call the latter the
\emph{Recursive Tree Process} (RTP) corresponding to the maps $\gamma$ and
solution $\nu$ of the RDE (\ranglef{RDE}).
\begin{equation}gin{proposition}ro[of Lemma~\ranglef{L:burnlaw}]
Let $\Xi\subset(0,1]$ be relatively closed and let $\overline\Xi:=\Xi\cup\{0\}$. By Lemma~ \ranglef{L:exist}, there exists a solution $F\in{\cal F}$ of the differential equation (\ranglef{RDEdif2}) relative to $\overline\Xi$. Set $\rho_\Xi([0,t]):=F(t)$ $(t\in[0,1])$ and $\rho_\Xi(\{\infty\}):=1-F(1)$ (which is $\geq 0$ since $F(1)\leq 1$ by the definition below (\ranglef{diF}) of the class ${\cal F}$) and observe that $\rho_\Xi(\{0\})=0$. Then $\rho_\Xi$ is a probability measure on $I$ that satisfies conditions (ii) and (iii) of Lemma~\ranglef{L:burnlaw}, and by Lemma~\ranglef{L:RDEdif} also condition~(i). Assume, conversely, that $\rho_\Xi$ satisfies conditions (i)--(iii) of Lemma~\ranglef{L:burnlaw}, and set $F(t):=\rho_\Xi([0,t])$ $(t\in[0,1])$, $F(t):=0$ $(t<0)$, $F(t):=F(1)$ $(t>1)$. Then by Lemma~\ranglef{L:RDEdif}, $F$ solves the differential equation (\ranglef{RDEdif2}) subject to the initial condition $F(t):=0$ $(t<0)$. By Lemma~\ranglef{L:Fdist}, these conditions uniquely determine $F$ and hence also $\rho_\Xi$.
Assume that ${\mathbb F}$ solves the frozen percolation equation (\ranglef{frozdef}) for
the set of possible freezing times $\Xi$ and that ${\mathbb F}$ is stationary, adapted,
and respects the tree structure. Generalising (\ranglef{YF}), for any ${\mathbb D}elta\subset(0,1]$
that is relatively closed, we set
\begin{equation}\lambdabel{YFwhole}
Y_\mathbf{i}^{\mathbb D}elta:=\inf\big\{t\in{\mathbb D}elta:
\mathbf{i}\mathbb{P}ercol{{\mathbb T}^t\begin{equation}h{\mathbb F}}\infty\big\}\qquad(\mathbf{i}\in{\mathbb T}).
\end{equation}
Then in particular, $Y_\mathbf{i}^{\Xi}$ is the burning time $Y_\mathbf{i}$ defined in (\ranglef{YF}). As in (\ranglef{Om}), we write $\omega_\mathbf{i}=(\tau_\mathbf{i},\kappa_\mathbf{i})$ $(\mathbf{i}\in{\mathbb T})$.Since ${\mathbb F}$ is stationary, adapted, and respects the tree structure, the random
variables $(\omega_\mathbf{i},Y^\Xi_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ form an RTP corresponding to the
map $\checki$ in (\ranglef{chi_def}) and some solution $\rho_\Xi$ to the RDE
(\ranglef{uni_RDE}). To complete the proof, we need to show that $\rho_\Xi$
also satisfies conditions (ii) and (iii) of Lemma~\ranglef{L:burnlaw}.
Since $\rho_\Xi$ is the law of $Y^\Xi_\mathbf{i}$ $(\mathbf{i}\in{\mathbb T})$,
it clearly satisfies condition~(ii) of Lemma~\ranglef{L:burnlaw}. To also
prove (iii), we use that by Lemma~\ranglef{L:FY}, we have
${\mathbb F}=\big\{\mathbf{i}\in{\mathbb I}:Y^\Xi_{\mathbf{i} 1}\leq\tau_\mathbf{i}\big\}$,
which allows us to apply \cite[Prop.~39]{RST19}, which tells us that
\begin{equation}
{\mathbb P}\big[Y^{(0,1]}_\mathbf{i}\leq t\big]=F(t)\vee\big(t-F(t)\big)
\qquad\big(\mathbf{i}\in{\mathbb T},\ t\in[0,1]\big),
\end{equation}
where $F(t):=\rho_\Xi\big([0,t]\big)$ $\big(t\in[0,1]\big)$. Since
\begin{equation}\lambdabel{YXiY}
Y^\Xi_\mathbf{i}=\inf\big\{t\in\Xi:t\geq Y^{(0,1]}_\mathbf{i}\big\}\qquad(\mathbf{i}\in{\mathbb T}),
\end{equation}
it follows that
\begin{equation}\lambdabel{FtF}
F(t)=\rho_\Xi\big([0,t]\big)={\mathbb P}\big[Y^\Xi_\mathbf{i}\leq t\big]
={\mathbb P}\big[Y^{(0,1]}_\mathbf{i}\leq t\big]=F(t)\vee\big(t-F(t)\big)
\qquad(t\in\Xi),
\end{equation}
where the two probabilities are equal by (\ranglef{YXiY}) and the fact that $t\in\Xi$. This proves that $\rho_\Xi$ satisfies condition~(iii) of Lemma~\ranglef{L:burnlaw}.
\end{proposition}ro
\begin{equation}gin{proposition}ro[of Lemma~\ranglef{L:genRDE}]
If $\rho$ solves the RDE (\ranglef{uni_RDE}), then by Lemma~\ranglef{L:RDEdif}, the
function $F\in{\rm BV}$ defined in (\ranglef{rhoF}) is right-continuous and
nondecreasing with $F(0)=0$ and satisfies (\ranglef{RDEdif}). Let
$\Xi:={\rm supp}(\mathrm{d} F)\cap(0,1]$. Then (\ranglef{RDEdif}) implies that
$\overline F(t)=\ha t$ for a.e.\ $t$ w.r.t.\ $\mathrm{d} F$. Since $F$ is right-continuous
with left limits, this implies that $\overline F(t)=\ha t$ for all $t\in\Xi$, and
hence $F(t)\geq\ha t$ for all $t\in\Xi$. It follows that $\rho$ satisfies
conditions (i)--(iii) of Lemma~\ranglef{L:burnlaw} and hence $\rho=\rho_\Xi$.
\end{proposition}ro
The following lemma settles the existence part of Theorem~\ranglef{T:frozen}.
\begin{equation}gin{lemma}[Frozen points]
Let\lambdabel{L:YFY} $\Xi\subset(0,1]$ be closed w.r.t.\ the relative topology of
$(0,1]$, let $(\omega_\mathbf{i},Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ be the RTP corresponding to the
solution $\rho_\Xi$ to the RDE (\ranglef{uni_RDE}) defined in Lemma~\ranglef{L:burnlaw},
and let ${\mathbb F}$ be defined by (\ranglef{FY}). Then ${\mathbb F}$ solves the frozen
percolation equation (\ranglef{frozdef}) for the set of possible freezing times
$\Xi$ and ${\mathbb F}$ is stationary, adapted, and respects the tree structure.
Moreover, the $Y_\mathbf{i}$ are given by (\ranglef{YF}).
\end{lemma}
\begin{equation}gin{proposition}ro
It follows from the properties of an RTP that ${\mathbb F}$, defined by (\ranglef{FY}), is
stationary, adapted, and respects the tree structure. The inductive relation
(\ranglef{Yind}) implies that if $Y_\mathbf{i}<\infty$, then there exist
$(j_k)_{k\geq 1}$ such that $\mathbf{i} j_1\cdots j_n$ is a legal descendant of
$\mathbf{i} j_1\cdots j_{n-1}$ and $Y_\mathbf{i}=Y_{\mathbf{i} j_1\cdots j_n}$ for all $n\geq
1$. For all $n\geq 0$ such that $\kappa_{\mathbf{i} j_1\cdots j_n}=1$, the fact that
$Y_{\mathbf{i} j_1\cdots j_n}<\infty$ and (\ranglef{Yind}) moreover imply that
$Y_{\mathbf{i} j_1\cdots j_n1}>\tau_\mathbf{i}$. Therefore, we have that
\begin{equation}
\mathbf{i}\mathbb{P}ercol{{\mathbb T}^t\begin{equation}h{\mathbb F}}\infty\quad\mbox{if }t=Y_\mathbf{i}<\infty.
\end{equation}
Since $Y_\mathbf{i}$ takes values in $\Xi\cup\{\infty\}$, it follows that
\begin{equation}\lambdabel{Ygeqinf}
Y_\mathbf{i}\geq\inf\big\{t\in\Xi:
\mathbf{i}\mathbb{P}ercol{{\mathbb T}^t\begin{equation}h{\mathbb F}}\infty\big\}\qquad(\mathbf{i}\in{\mathbb T}).
\end{equation}
To prove that this is actually an equality, let $Y'_\mathbf{i}$ denote the
right-hand side of (\ranglef{Ygeqinf}). Since ${\mathbb F}$ is
stationary, adapted, and respects the tree structure, as pointed out
in Section~\ranglef{S:burn}, the random variables
$(\omega_\mathbf{i},Y'_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ form an RTP corresponding to the map $\checki$ in
(\ranglef{chi_def}) and some solution $\rho$ to the RDE (\ranglef{uni_RDE}). By
Lemma~\ranglef{L:burnlaw}, $\rho=\rho_\Xi$, so $Y'_\mathbf{i}$ and $Y_\mathbf{i}$ are equal in
law, which by (\ranglef{Ygeqinf}) implies that they are a.s.\ equal.
This proves that the $Y_\mathbf{i}$ are given by (\ranglef{YF}). By assumption,
${\mathbb F}$ is defined by (\ranglef{FY}). Inserting
(\ranglef{YF}) into (\ranglef{FY}), we see that ${\mathbb F}$ solves the
frozen percolation equation (\ranglef{frozdef}).
\end{proposition}ro
\begin{equation}gin{proposition}ro[of Theorem~\ranglef{T:frozen}]
Lemma~\ranglef{L:YFY} proves existence of a solution ${\mathbb F}$ of the frozen
percolation equation (\ranglef{frozdef}) for the set of possible freezing times
$\Xi$ that is stationary, adapted, and respects the tree structure.
It remains to prove uniqueness in law. Set $\omega_\mathbf{i}:=(\tau_\mathbf{i},\kappa_\mathbf{i})$
$(\mathbf{i}\in{\mathbb T})$ and let $\Omega=(\omega_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$. Let $Y_\mathbf{i}$ $(\mathbf{i}\in{\mathbb T})$
be the burning times defined in (\ranglef{YF}). Since ${\mathbb F}$ is stationary, adapted,
and respects the tree structure, as pointed out
in Section~\ranglef{S:burn}, the random variables
$(\omega_\mathbf{i},Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ form an RTP corresponding to the map $\checki$ in
(\ranglef{chi_def}) and some solution $\rho$ to the RDE (\ranglef{uni_RDE}). By
Lemma~\ranglef{L:burnlaw}, $\rho=\rho_\Xi$, and hence by
\cite[Lemma~1.9]{MSS20} the law of $(\omega_\mathbf{i},Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ is uniquely
determined. By Lemma~\ranglef{L:FY}, this implies that the joint law of
$(\Omega,{\mathbb F})$ is also uniquely determined.
\end{proposition}ro
\subsetsection{Scale invariance}\lambdabel{S:scaleprf}
In this subsection, we prove Lemmas \ranglef{L:scaleRDE}, \ranglef{L:scalebivRDE}, and \ranglef{L:scalenu} about invariance of solutions of the (bivariate) RDE under the scaling maps ${\mathbb G}amma_t$ and ${\mathbb G}amma^{(2)}_t$. We will generalise a bit and define scaling maps ${\mathbb G}amma^{(n)}_t$ for any $1\leq n\leq\infty$, where the case $n=\infty$ will play an important role in the proof of Lemma~\ranglef{L:scalenu}.
Recall the definition of the scaling maps $\mathbb{P}si_t:I\to I$ $(t>0)$ in (\ranglef{psit}). For $t>0$, we define a cut-off map $c_t:I\to I$ by
\begin{equation}\lambdabel{ct}
c_t(y):=\left\{\begin{array}{ll}
y\quad&\mbox{if }y\leq t,\\[5pt]
\infty\quad&\mbox{otherwise.}
\end{array}\right.
\end{equation}
Note that $c_t$ is the identity map when $t\geq 1$. It is easy to check that
\begin{equation}\lambdabel{psic}
\mathbb{P}si_{1/t}\circ\mathbb{P}si_t=c_t\qquad(t>0).
\end{equation}
For $1\leq n<\infty$, we write $[n]:=\{1,\ldots,n\}$ and we set $[\infty]:={\mathbb N}_+$. We denote a generic element of $I^n$ by $\vec y=(y^k)_{k\in[n]}$ and we define $\mathbb{P}si^{(n)}_t:I^n\to I^n$ and $c^{(n)}_t:I^n\to I^n$ in a coordinatewise way by $\mathbb{P}si^{(n)}_t(\vec y):=\big(\mathbb{P}si_t(y^k)\big)_{k\in[n]}$ and $c^{(n)}_t(\vec y):=\big(c_t(y^k)\big)_{k\in[n]}$.
We say that a probability measure on $I^n$ is \emph{symmetric} if it is invariant under a permutation of the coordinates. Generalising the definitions of ${\cal M}^{(1)}$ and ${\cal M}^{(2)}$ in Subsection~\ranglef{S:scale}, for any $0<t\leq 1$ and $1\leq n\leq\infty$, we let ${\cal M}^{(n)}$ denote the space of symmetric probability measures $\rho^{(n)}$ on $I^n$ such that
\begin{equation}\lambdabel{infyk}
\rho^{(n)}\big(J^n[t]\big)\leq t\qquad(0<t\leq 1)
\quad\mbox{with}\quad
J^n[t]:=\big\{\vec y\in I^n:\exists k\in[n]\mbox{ s.t.\ }y^k\leq t\big\}.
\end{equation}
Note that $J^n[1]=I^n\begin{equation}h\{\vec\infty\}$, where $\vec\infty$ denotes the element $\vec y\in I^n$ with $y^k:=\infty$ for all $k\in[n]$. Generalising the definitions of ${\mathbb G}amma_t^{(1)}$ and ${\mathbb G}amma_t^{(2)}$ in Subsection~\ranglef{S:scale}, for each $1\leq n\leq\infty$ and $t>0$, we define
\begin{equation}\lambdabel{Gan}
{\mathbb G}amma^{(n)}_t\rho^{(n)}:=t^{-1}\rho^{(n)}\circ(\mathbb{P}si^{(n)}_t)^{-1}
+(1-t^{-1})\delta_{\vec\infty}\qquad\big(\rho^{(n)}\in{\cal M}^{(n)}).
\end{equation}
We also define cut-off maps $C^{(n)}_t$ by
\begin{equation}\lambdabel{Can}
C^{(n)}_t\rho^{(n)}:=\rho^{(n)}\circ(c^{(n)}_t)^{-1}\qquad\big(\rho^{(n)}\in{\cal M}^{(n)})
\end{equation}
and in particular set $C_t:=C^{(1)}_t$. Finally, for all $1\leq n\leq\infty$, we define a map $T^{(n)}$ acting on probability measures on $I^n$ by
\begin{equation}\lambdabel{Tndef}
T^{(n)}\rho^{(n)}:=
\mbox{ the law of }\big(\checki[\omega](Y^k_1,Y^k_2)\big)_{k\in[n]},
\end{equation}
where $(Y^k_1)_{k\in[n]}$ and $(Y^k_2)_{k\in[n]}$ are independent random variables with law $\rho^{(n)}$, and $\omega$ is an independent random variable that is uniformly distributed on $[0,1]\tildemes\{1,2\}$. We call the equation
\begin{equation}\lambdabel{nvar}
T^{(n)}\rho^{(n)}=\rho^{(n)}
\end{equation}
the \emph{$n$-variate RDE}. In particular, for $n=1$ this is the RDE (\ranglef{uni_RDE}) and for $n=2$ this is bivariate RDE (\ranglef{bivar_RDE}). The following lemma, which will be proved below, shows that all these maps are well-defined on the space ${\cal M}^{(n)}$.
\begin{equation}gin{lemma}[Maps are well-defined]
For\lambdabel{L:mapdef} each $1\leq n\leq\infty$ and $t>0$, the maps ${\mathbb G}amma^{(n)}_t$, $C^{(n)}_t$, and $T^{(n)}$ map the space ${\cal M}^{(n)}$ into itself.
\end{lemma}
The following lemma says that as long as we are interested in symmetric solutions of the $n$-variate RDE (\ranglef{nvar}), it suffices to look for solutions in the space ${\cal M}^{(n)}$.
\begin{equation}gin{lemma}[Solutions to the RDE are scalable]
If\lambdabel{L:scalable} a symmetric probability measure $\rho^{(n)}$ on $I^n$ solves the $n$-variate RDE (\ranglef{nvar}), then $\rho^{(n)}\in{\cal M}^{(n)}$.
\end{lemma}
The following lemma is the central result of this subsection.
\begin{equation}gin{lemma}[Commutation relation]
For\lambdabel{L:commut} each $1\leq n\leq\infty$, one has
\begin{equation}\lambdabel{commut}
{\mathbb G}amma^{(n)}_tT^{(n)}\rho^{(n)}=tT^{(n)}{\mathbb G}amma^{(n)}_t\rho^{(n)}
+(1-t){\mathbb G}amma^{(n)}_t\rho^{(n)}
\qquad(t>0,\ \rho^{(n)}\in{\cal M}^{(n)}).
\end{equation}
\end{lemma}
We first show how Lemmas \ranglef{L:mapdef}--\ranglef{L:commut} imply Lemmas \ranglef{L:scaleRDE} and \ranglef{L:scalebivRDE}, and then prove Lemmas \ranglef{L:mapdef}--\ranglef{L:commut}. We start by proving a more general statement.
\begin{equation}gin{lemma}[Scale invariance of $n$-variate RDE]
Let\lambdabel{L:scalenRDE} $1\leq n\leq\infty$ and let $\rho^{(n)}$ be a
symmetric solution to the $n$-variate RDE (\ranglef{nvar}). Then
$\rho^{(n)}\in{\cal M}^{(n)}$, and for each $t>0$, the measure ${\mathbb G}amma^{(n)}_t\rho^{(n)}$
is also a solution to (\ranglef{nvar}).
\end{lemma}
\begin{equation}gin{proposition}ro
Let $1\leq n\leq\infty$ and let $\rho^{(n)}$ be a symmetric solution
to the $n$-variate RDE (\ranglef{nvar}). Then $\rho^{(n)}\in{\cal M}^{(n)}$
by Lemma~\ranglef{L:scalable}. Moreover, for each $t>0$, Lemma~\ranglef{L:commut}
and the fact that $T^{(n)}\rho^{(n)}=\rho^{(n)}$ imply that
\begin{equation}
{\mathbb G}amma^{(n)}_t\rho^{(n)}=tT^{(n)}{\mathbb G}amma^{(n)}_t\rho^{(n)}
+(1-t){\mathbb G}amma^{(n)}_t\rho^{(n)}
\end{equation}
which shows that $T^{(n)}{\mathbb G}amma^{(n)}_t\rho^{(n)}={\mathbb G}amma^{(n)}_t\rho^{(n)}$, i.e.,
the measure ${\mathbb G}amma^{(n)}_t\rho^{(n)}$ solves the $n$-variate RDE (\ranglef{nvar}).
\end{proposition}ro
\begin{equation}gin{proposition}ro[of Lemmas \ranglef{L:scaleRDE} and \ranglef{L:scalebivRDE}]
Most of the statements of Lemmas \ranglef{L:scaleRDE} and \ranglef{L:scalebivRDE} follow by specialising Lemma~\ranglef{L:scalenRDE} to the cases $n=1$ and $n=2$, respectively. Apart from this, we only need to prove (\ranglef{scaleRDE}). Let $\Xi\subset(0,1]$ be relatively closed and let $\Xi'$ be as in (\ranglef{scaleRDE}). Then ${\mathbb G}amma_t\rho_\Xi$ solves the RDE (\ranglef{uni_RDE}) by Lemma~\ranglef{L:scalenRDE}. Using the definition of ${\mathbb G}amma_t$, it is easy to see that the fact that $\rho_\Xi$ has properties (ii) and (iii) of Lemma~\ranglef{L:burnlaw} implies that ${\mathbb G}amma_t\rho_\Xi$ has these same properties with $\Xi$ replaced by $\Xi'$, i.e., ${\mathbb G}amma_t\rho_\Xi$ is concentrated on $\Xi'\cup\{\infty\}$, and ${\mathbb G}amma_t\rho_\Xi\big([0,t']\big)\geq\ha t'$ for all $t'\in\Xi'$. Now Lemma~\ranglef{L:burnlaw} allows us to identify ${\mathbb G}amma_t\rho_\Xi$ as $\rho_{\Xi'}$.
\end{proposition}ro
We now provide the proofs of Lemmas \ranglef{L:mapdef}--\ranglef{L:commut}.
\begin{equation}gin{proposition}ro[of Lemma~\ranglef{L:mapdef} (partially)]
Let $\rho^{(n)}\in{\cal M}^{(n)}$. It is clear that the right-hand side of (\ranglef{Gan}) defines a signed measure that is symmetric with respect to a permutation of the coordinates and that satisfies ${\mathbb G}amma^{(n)}_t\rho^{(n)}(I^n)=1$. We observe that by (\ranglef{psit}),
\begin{equation}\begin{array}{l}
\mathrm{d}splaystyle(\mathbb{P}si^{(n)}_t)^{-1}(J^n[s])
=\big\{\vec y\in I^n:\exists k\in[n]\mbox{ s.t.\ }\mathbb{P}si_t(y^k)\leq s\big\}\\[5pt]
\quad\mathrm{d}splaystyle=\big\{\vec y\in I^n:\exists k\in[n]\mbox{ s.t.\ }y^k\leq t
\mbox{ and }t^{-1}y^k\leq s\big\}
=\big\{\vec y\in I^n:\exists k\in[n]\mbox{ s.t.\ }y^k\leq st\wedge t\big\},
\end{array}\ee
and hence
\begin{equation}\lambdabel{MiMi}
{\mathbb G}amma^{(n)}_t\rho^{(n)}(J^n[s])
=t^{-1}\rho^{(n)}\circ(\mathbb{P}si^{(n)}_t)^{-1}(J^n[s])
=t^{-1}\rho^{(n)}(J^n[st\wedge t])\leq t^{-1}(st\wedge t)\leq s.
\end{equation}
for each $t>0$ and $0<s\leq 1$. Applying this with $s=1$ and using the fact that $I^n=J^n[1]\cup\{\vec\infty\}$ we see that ${\mathbb G}amma^{(n)}_t\rho^{(n)}$ is a probability measure. More generally, (\ranglef{MiMi}) shows that ${\mathbb G}amma^{(n)}_t$ maps the space ${\cal M}^{(n)}$ into itself.
It is clear that $C^{(n)}_t\rho^{(n)}$, defined in (\ranglef{Can}) , is a (symmetric) probability measure on $I^n$ whenever $\rho^{(n)}$ is. If moreover $\rho^{(n)}\in{\cal M}^{(n)}$, then
\begin{equation}
C^{(n)}_t\rho^{(n)}(J^n[s])
=\rho^{(n)}\circ(c^{(n)}_t)^{-1}(J^n[s])
=\rho^{(n)}(J^n[s\wedge t])\leq s
\end{equation}
for each $t>0$ and $0<s\leq 1$, which shows that $C^{(n)}_t$ maps the space ${\cal M}^{(n)}$ into itself.
This proves the claims for ${\mathbb G}amma^{(n)}_t$ and $C^{(n)}_t$. We postpone the proof of claim for $T^{(n)}$ until the proof of Lemma~\ranglef{L:commut}, where it will follow as a side result of the main argument.
\end{proposition}ro
The proof of Lemma~\ranglef{L:scalable} uses the following simple lemma, which we cite from \cite[Lemma~8]{RST19}.
\begin{equation}gin{lemma}[Percolation probability]
One\lambdabel{L:perc} has
$\mathrm{d}splaystyle{\mathbb P}\big[\varnothing\mathbb{P}ercol{{\mathbb T}^t}\infty\big]=t\quad(0\leq t\leq 1)$.
\end{lemma}
\begin{equation}gin{proposition}ro[of Lemma~\ranglef{L:scalable}]
We will prove the following, somewhat stronger statement. Let $1\leq
n\leq\infty$ and let $\rho^{(n)}$ be a solution to the $n$-variate RDE
(\ranglef{nvar}). Then we will show that $\rho^{(n)}$ satisfies
(\ranglef{infyk}). In particular, if $\rho^{(n)}$ is symmetric, this
implies that $\rho^{(n)}\in{\cal M}^{(n)}$.
Let $\rho^{(n)}$ be a solution to the $n$-variate RDE (\ranglef{nvar}) and for
$k\in[n]$, let $\rho_k$ denote the $n$-th marginal of $\rho^{(n)}$. It is
clear from (\ranglef{nvar}) that $\rho_k$ solves the RDE (\ranglef{uni_RDE}), so
by Lemma~\ranglef{L:genRDE}, for each $k\in[n]$, there exists a relatively closed
set $\Xi_k\subset(0,1]$ such that $\rho_k=\rho_{\Xi_k}$. Since $\rho^{(n)}$
solves the $n$-variate RDE (\ranglef{nvar}), by \cite[Lemma~1.9]{MSS20},
we can construct an $n$-variate RTP
\begin{equation}
\big(\omega_\mathbf{i},\vec Y_\mathbf{i}\big)_{\mathbf{i}\in{\mathbb T}}
\end{equation}
where $\vec Y_\mathbf{i}=(Y^k_\mathbf{i})_{k\in[n]}$ are $I^n$-valued random variables
such that $\vec Y_\mathbf{i}$ is inductively given in terms of $\vec Y_{\mathbf{i} 1}$ and
$\vec Y_{\mathbf{i} 2}$ as in (\ranglef{nvar}). In particular, for each $k\in[n]$,
$(\omega_\mathbf{i},Y^k_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ is an RTP corresponding to $\rho_k=\rho_{\Xi_k}$.
Let
\begin{equation}\lambdabel{FYk}
{\mathbb F}^k=\big\{\mathbf{i}\in{\mathbb I}:Y^k_{\mathbf{i} 1}\leq\tau_\mathbf{i}\big\}\qquad(k\in[n]).
\end{equation}
Then Lemma~\ranglef{L:YFY} tells us that
\begin{equation}\lambdabel{YFk}
Y^k_\mathbf{i}:=\inf\big\{t\in\Xi:
\mathbf{i}\mathbb{P}ercol{{\mathbb T}^t\begin{equation}h{\mathbb F}^k}\infty\big\}\qquad(\mathbf{i}\in{\mathbb T},\ k\in[n]),
\end{equation}
with the convention that $\inf\emptyset:=\infty$. Using Lemma~\ranglef{L:perc}, we
can now estimate
\begin{equation}
{\mathbb P}\big[\inf_{k\in[n]}Y^k_\varnothing\leq t\big]
\leq{\mathbb P}\big[\varnothing\mathbb{P}ercol{{\mathbb T}^t}\infty\big]=t\qquad(0<t\leq 1),
\end{equation}
which proves that $\rho^{(n)}$ satisfies (\ranglef{infyk}).
\end{proposition}ro
\begin{equation}gin{proposition}ro[of Lemma~\ranglef{L:commut}]
We will first prove (\ranglef{commut}) for $0<t\leq 1$, and on the way also
establish that $T^{(n)}$ maps ${\cal M}^{(n)}$ into itself, which is the missing part
of Lemma~\ranglef{L:mapdef} that still remains to be proved.
Fix $1\leq n\leq\infty$, $0<t\leq 1$, and $\rho^{(n)}\in{\cal M}^{(n)}$. Let $\vec
Y=(Y^k)_{k\in[n]}$ be a random variable with law $\rho^{(n)}$. It follows from
(\ranglef{infyk}) that we can couple $\vec Y$ to a Bernoulli random variable $B$
such that ${\mathbb P}[B=1]=t$ and ${\mathbb P}[B=1\,|\,\inf_{k\in[n]}Y^k\leq t]=1$. More
formally, there exists a probability measure $\mu$ on $I^n\tildemes\{0,1\}$
whose first marginal is $\rho^{(n)}$, whose second marginal is the Bernoulli
distribution with parameter $t$, and that is concentrated on $\{(\vec
y,b):b=1\mbox{ if }\inf_{k\in[n]}y_k\leq t\}$. Such a measure $\mu$ is not
unique, but we fix one from now on. Let $(\vec Y_1,B_1)$ and $(\vec
Y_2,B_2)$ be independent random variables with law $\mu$. Then
\begin{equation}\lambdabel{indB}
{\rm(i)}\quad{\mathbb P}\big[B_i=1\,\big|\,\inf_{k\in[n]}Y^k_i\leq t\big]=1,
\qquad
{\rm(ii)}\quad{\mathbb P}[B_i=1]=t.\qquad(i=1,2).
\end{equation}
Let $\omega=(\tau,\kappa)$ be an independent random variable that is uniformly
distributed on $[0,1]\tildemes\{1,2\}$. We define
\begin{equation}\begin{array}{lr@{\,}c@{\,}l}\lambdabel{wurzdef}
\mathrm{d}splaystyle{\rm(i)}\quad&
\mathrm{d}splaystyle\vec Y_\varnothing&:=&\mathrm{d}splaystyle\big(\checki[\omega](Y^k_1,Y^k_2)\big)_{k\in[n]},\\[5pt]
\mathrm{d}splaystyle{\rm(ii)}\quad&\mathrm{d}splaystyle B_\varnothing&:=&\mathrm{d}splaystyle1_{\{\kappa=1\}}1_{\{\tau\leq t\}}B_1
+1_{\{\kappa=2\}}(B_1\vee B_2).
\end{array}\ee
The second definition is motivated by the heuristic idea that if $B_i$ is the event that the subtree rooted at $i$ percolates at time $t$ (neglecting the freezing), then $B_\varnothing$ is the event that the whole tree percolates at time $t$. We claim that
\begin{equation}\lambdabel{Bprop}
{\rm(i)}\quad{\mathbb P}\big[B_\varnothing=1\,\big|\,\inf_{k\in[n]}Y^k_\varnothing\leq t\big]=1,
\qquad
{\rm(ii)}\quad{\mathbb P}[B_\varnothing=1]=t.
\end{equation}
Indeed, if $Y^k_\varnothing\leq t$ for some $k$, then by the definition of $\checki$ in
(\ranglef{chi_def}), a.s.\ either $\kappa=1$ and $\tau<Y^k_1\leq t$, or $\kappa=2$
and $Y^k_1\wedge Y^k_2\leq t$. In either case, it follows that $B_\varnothing=1$,
proving part~(i) of (\ranglef{Bprop}). Part~(ii) follows by writing
\begin{equation}
{\mathbb P}[B_\varnothing=1]=\ha t{\mathbb P}[B_1=1]+\ha{\mathbb P}[B_1\vee B_2=1]=\ha t^2+\ha[1-(1-t)^2]=t.
\end{equation}
We next claim that for each measurable subset $A\subset I^n$,
\begin{equation}\begin{array}{lr@{\,}c@{\,}l}\lambdabel{RGarep}
\mathrm{d}splaystyle{\rm(i)}\quad&\mathrm{d}splaystyle T^{(n)}\rho^{(n)}(A)
&=&\mathrm{d}splaystyle{\mathbb P}\big[\vec Y_\varnothing\in A\big],\\[5pt]
\mathrm{d}splaystyle{\rm(ii)}\quad&\mathrm{d}splaystyle{\mathbb G}amma^{(n)}_t\rho^{(n)}(A)
&=&\mathrm{d}splaystyle{\mathbb P}\big[\mathbb{P}si^{(n)}_t(\vec Y_i)\in A\,\big|\,B_i=1\big]\qquad(i=1,2),\\[5pt]
\mathrm{d}splaystyle{\rm(iii)}\quad&\mathrm{d}splaystyle{\mathbb G}amma^{(n)}_tT^{(n)}\rho^{(n)}(A)
&=&\mathrm{d}splaystyle{\mathbb P}\big[\mathbb{P}si^{(n)}_t(\vec Y_\varnothing)\in A\,\big|\,B_\varnothing=1\big].
\end{array}\ee
Part~(i) of (\ranglef{RGarep}) is immediate from (\ranglef{wurzdef})~(i). Since both sides of the equation are probability measures, it suffices to prove part~(ii) for $A\subset I^n\begin{equation}h\{\vec\infty\}$. Then $\mathbb{P}si^{(n)}_t(\vec Y_i)\in A$ implies $\inf_{k\in[n]}Y^k_i\leq t$ which by (\ranglef{indB})~(i) in turn implies $B_i=1$. It follows that ${\mathbb P}[\mathbb{P}si^{(n)}_t(\vec Y_i)\in A]={\mathbb P}[\mathbb{P}si^{(n)}_t(\vec Y_i)\in A,\ B_i=1]=t{\mathbb P}[\mathbb{P}si^{(n)}_t(\vec Y_i)\in A\,|\,B_i=1]$ $(i=1,2)$. Comparing with the definition of ${\mathbb G}amma^{(n)}_t$ in (\ranglef{Gan}), we see that part~(ii) holds. Formulas (\ranglef{RGarep})~(i) and (\ranglef{Bprop}) imply that
\begin{equation}
T^{(n)}\rho^{(n)}(J^n[t])
={\mathbb P}\big[\inf_{k\in[n]}Y^k_\varnothing\leq t\big]\leq{\mathbb P}\big[B_\varnothing=1\big]=t.
\end{equation}
Since this holds for general $0<t\leq 1$, and since $T^{(n)}$ also clearly
preserves the symmetry of $\rho^{(n)}$, we conclude that $T^{(n)}$ maps the
space ${\cal M}^{(n)}$ into itself. In particular, this shows that
${\mathbb G}amma^{(n)}_tT^{(n)}\rho^{(n)}$ is well-defined. Using (\ranglef{Bprop}), part~(iii) of (\ranglef{RGarep}) now follows by the same argument as part~(ii), but applied to $\vec Y_\varnothing$ which by part~(i) has law $T^{(n)}\rho^{(n)}$. We next prove (\ranglef{commut}) for $0<t\leq 1$. We set
\begin{equation}
B_\circ:=1_{\{\kappa=1\}}1_{\{\tau\leq t\}}B_1
+1_{\{\kappa=2\}}(B_1\wedge B_2).
\end{equation}
We claim that for each measurable subset $A\subset I^n$,
\begin{equation}\lambdabel{Bcirc}
{\rm(i)}\quad{\mathbb P}\big[B_\circ=1\big]=t^2,\quad
{\rm(ii)}\quad T^{(n)}{\mathbb G}amma^{(n)}_t\rho^{(n)}(A)
={\mathbb P}\big[\mathbb{P}si^{(n)}_t(\vec Y_\varnothing)\in A\,\big|\,B_\circ=1\big].
\end{equation}
Part~(i) is a consequence of the independence of $\tau,\kappa,B_1$ and $B_2$
which yields ${\mathbb P}[B_\circ=1]=\ha\cdot t\cdot t+\ha\cdot t\cdot t$. To prove
part~(ii), we introduce the function
\begin{equation}\lambdabel{Phidef}
{\mathbb P}hi[s](y):=\left\{\begin{array}{ll}
y\quad&\mbox{if }s<y,\\
\infty\quad&\mbox{if }y\leq s,
\end{array}\right.\qquad\big(s\in[0,1],\ y\in I\big),
\end{equation}
with the help of which we can write the function $\checki$ from (\ranglef{chi_def}) as
\begin{equation}\lambdabel{chiPhi}
\checki[\tau,\kappa](y_1,y_2):=\left\{\begin{array}{ll}
{\mathbb P}hi[\tau](y_1)\quad&\mbox{if }\kappa=1,\\[5pt]
y_1\wedge y_2\quad&\mbox{if }\kappa=2.\end{array}\right.
\end{equation}
We define ${\mathbb P}hi^{(n)}[s](\vec y)$ and $\vec y_1\wedge\vec y_2$ in a
componentwise way, i.e., ${\mathbb P}hi^{(n)}[s](\vec y):=({\mathbb P}hi[s](y^k))_{k\in[n]}$ and
$\vec y_1\wedge\vec y_2:=(y^k_1\wedge y^k_2)_{k\in[n]}$. Using the facts that
\begin{equation}\begin{array}{r@{\ }l}\lambdabel{psiprop}
{\rm(i)}&\mathrm{d}splaystyle\mathbb{P}si_t({\mathbb P}hi[s](y))={\mathbb P}hi[t^{-1}s](\mathbb{P}si_t(y))\qquad(s\leq t),\\[5pt]
{\rm(ii)}&\mathrm{d}splaystyle\mathbb{P}si_t(y_1\wedge y_2)=\mathbb{P}si_t(y_1)\wedge\mathbb{P}si_t(y_2),
\end{array}\end{equation}
we can write
\begin{equation}\begin{array}{l}
\mathrm{d}splaystyle{\mathbb P}\big[\mathbb{P}si^{(n)}_t(\vec Y_\varnothing)\in A\,\big|\,B_\circ=1\big]\\[5pt]
\mathrm{d}splaystyle\quad={\mathbb P}\big[\kappa=1,\ \mathbb{P}si^{(n)}_t({\mathbb P}hi^{(n)}[\tau](\vec Y_1))\in A
\,\big|\,B_\circ=1\big]
+{\mathbb P}\big[\kappa=2,\ \mathbb{P}si^{(n)}_t(\vec Y_1\wedge\vec Y_2)\in A
\,\big|\,B_\circ=1\big]\\[5pt]
\mathrm{d}splaystyle\quad=\ha{\mathbb P}\big[{\mathbb P}hi^{(n)}[t^{-1}\tau](\mathbb{P}si^{(n)}_t(\vec Y_1))\in A
\,\big|\,\tau\leq t,\ B_1=1\big]\\[5pt]
\mathrm{d}splaystyle\quad\mathbb{P}hantom{=}
+\ha{\mathbb P}\big[(\mathbb{P}si^{(n)}_t(\vec Y_1)\wedge\mathbb{P}si^{(n)}_t(\vec Y_2))\in A
\,\big|\,B_1=1=B_2\big].
\end{array}\ee
Using (\ranglef{RGarep})~(ii) and the fact that conditional on $\tau\leq t$, the random variable $t^{-1}\tau$ is uniformly distributed on $[0,1]$, we can rewrite this as
\begin{equation}
\ha{\mathbb P}\big[{\mathbb P}hi[\tilde\tau](\vec Z_1)\in A\big]
+\ha{\mathbb P}\big[(\vec Z_1\wedge\vec Z_2)\in A\big],
\end{equation}
where $\vec Z_1,\vec Z_2$ are independent with law ${\mathbb G}amma^{(n)}_t(\rho^{(n)})$ and $\tilde\tau$ is an independent random variable that is uniformly distributed on $[0,1]$. In view of (\ranglef{chiPhi}) and the definition of $T^{(n)}$ in (\ranglef{Tndef}), we arrive at (\ranglef{Bcirc})~(ii).
Formulas (\ranglef{Bprop})~(ii), (\ranglef{RGarep})~(iii), and (\ranglef{Bcirc}) give,
for any measurable subset $A\subset I^n$,
\bc
\mathrm{d}splaystyle t{\mathbb G}amma^{(n)}_tT^{(n)}\rho^{(n)}(A)
&=&\mathrm{d}splaystyle{\mathbb P}\big[\mathbb{P}si^{(n)}_t(\vec Y_\varnothing)\in A,\ B_\varnothing=1\big],\\[5pt]
\mathrm{d}splaystyle t^2T^{(n)}{\mathbb G}amma^{(n)}_t\rho^{(n)}(A)
&=&\mathrm{d}splaystyle{\mathbb P}\big[\mathbb{P}si^{(n)}_t(\vec Y_\varnothing)\in A,\ B_\circ=1\big].
\end{array}\ee
We observe that the event $\{B_\circ=1\}$ is contained in the event
$\{B_\varnothing=1\}$ and the difference of these events is the event that
$\kappa=2$ and precisely one of the random variables $B_1$ and $B_2$ is one.
On the event that $\kappa=2$ by (\ranglef{psiprop})~(ii) we have $\mathbb{P}si^{(n)}_t(\vec Y_\varnothing)=\mathbb{P}si^{(n)}_t(\vec Y_1\wedge\vec Y_2)=\mathbb{P}si^{(n)}_t(\vec Y_1)\wedge\mathbb{P}si^{(n)}_t(\vec Y_2)$. Moreover on the event that $B_i=0$, by (\ranglef{indB})~(i) we have $\inf_{k\in[n]}Y^k_i>t$ and hence $\mathbb{P}si^{(n)}_t(\vec Y_i)=\vec\infty$. In view of this, for any measurable subset $A\subset I^n$,
\begin{equation}\begin{array}{l}
\mathrm{d}splaystyle t{\mathbb G}amma^{(n)}_tT^{(n)}\rho^{(n)}(A)-t^2T^{(n)}{\mathbb G}amma^{(n)}_t\rho^{(n)}(A)\\[5pt]
\mathrm{d}splaystyle\quad={\mathbb P}\big[\mathbb{P}si^{(n)}_t(\vec Y_1)\in A,\ \kappa=2,\ B_1=1,\ B_2=0\big]
+{\mathbb P}\big[\mathbb{P}si^{(n)}_t(\vec Y_2)\in A,\ \kappa=2,\ B_1=0,\ B_2=1\big]\\[5pt]
\mathrm{d}splaystyle\quad=\ha t(1-t){\mathbb P}\big[\mathbb{P}si^{(n)}_t(\vec Y_1)\in A\,\big|\,B_1=1\big]
+\ha t(1-t){\mathbb P}\big[\mathbb{P}si^{(n)}_t(\vec Y_2)\in A\,\big|\,B_2=1\big]\\[5pt]
\mathrm{d}splaystyle\quad= t(1-t){\mathbb G}amma^{(n)}_t\rho^{(n)}(A),
\end{array}\ee
where in the last step we have used (\ranglef{RGarep})~(ii). Dividing by $t$, we see
that (\ranglef{commut}) holds for $0<t\leq 1$.
To derive (\ranglef{commut}) also for $t\geq 1$, replacing $t$ by $1/t$, we may equivalently show that
\begin{equation}\lambdabel{commut2}
{\mathbb G}amma^{(n)}_{1/t}T^{(n)}\rho^{(n)}=t^{-1}T^{(n)}{\mathbb G}amma^{(n)}_{1/t}\rho^{(n)}
+(1-t^{-1}){\mathbb G}amma^{(n)}_{1/t}\rho^{(n)}
\qquad(0<t\leq 1,\ \rho^{(n)}\in{\cal M}^{(n)}).
\end{equation}
For $0<t\leq 1$, we set
\begin{equation}\lambdabel{Mint}
{\cal M}^{(n)}[t]:=\big\{\rho^{(n)}\in{\cal M}^{(n)}:\rho^{(n)}
\mbox{ is concentrated on }I^n_t\big\}
\quad\mbox{with}\quad I_t:=[0,t]\cup\{\infty\}.
\end{equation}
We observe that $\mathbb{P}si_t:I_t\to I$ is a bijection and $\mathbb{P}si_{1/t}$ is its inverse. As a result, ${\mathbb G}amma^{(n)}_t:{\cal M}^{(n)}[t]\to{\cal M}^{(n)}$ is a bijection and ${\mathbb G}amma^{(n)}_{1/t}$ is its inverse. Using this and applying (\ranglef{commut}) for $0<t\leq 1$ to the measure ${\mathbb G}amma^{(n)}_{1/t}\rho^{(n)}$, we conclude that
\bc\lambdabel{invcal}
\mathrm{d}splaystyle{\mathbb G}amma^{(n)}_tT^{(n)}{\mathbb G}amma^{(n)}_{1/t}\rho^{(n)}
&=&\mathrm{d}splaystyle tT^{(n)}{\mathbb G}amma^{(n)}_t{\mathbb G}amma^{(n)}_{1/t}\rho^{(n)}
+(1-t){\mathbb G}amma^{(n)}_t{\mathbb G}amma^{(n)}_{1/t}\rho^{(n)}\\[5pt]
&=&\mathrm{d}splaystyle tT^{(n)}\rho^{(n)}+(1-t)\rho^{(n)}.
\end{array}\ee
By our earlier remarks, we have ${\mathbb G}amma^{(n)}_{1/t}\rho^{(n)}\in{\cal M}^{(n)}[t]$,
which is easily seen to imply that also
$T^{(n)}{\mathbb G}amma^{(n)}_{1/t}\rho^{(n)}\in{\cal M}^{(n)}[t]$.
Using this, we can apply ${\mathbb G}amma^{(n)}_{1/t}$ from the left to (\ranglef{invcal})
and multiply by $t^{-1}$ to obtain
\begin{equation}
t^{-1}T^{(n)}{\mathbb G}amma^{(n)}_{1/t}\rho^{(n)}
={\mathbb G}amma^{(n)}_{1/t}T^{(n)}\rho^{(n)}+(t^{-1}-1){\mathbb G}amma^{(n)}_{1/t}\rho^{(n)},
\end{equation}
which proves (\ranglef{commut2}).
\end{proposition}ro
The rest of this subsection is devoted to the proof of Lemma~\ranglef{L:scalenu}. The maps ${\mathbb G}amma^{(\infty)}_t$, $C^{(\infty)}_t$, and $T^{(\infty)}$ will play an important role in the proof. Symmetric probability measures on $I^\infty$ are also known as exchangeable probability measures. We will use De Finetti's theorem to associate the space ${\cal M}^{(\infty)}$ with a subspace ${\cal M}^\ast$ of the space of all probability measures on the space of probability measures on $I$. The space ${\cal M}^\ast$ is naturally equipped with a special kind of stochastic order, called the convex order, and we will use a characterisation, proved in \cite{MSS18}, of the measures $\underline\rho^{(2)}_\Xi$ and $\overline\rho^{(2)}_\Xi$ from (\ranglef{unnu}) in terms of the convex order.
We now give the precise definitions. We let ${\cal P}(I^n)$ denote the space of all probability measures on $I^n$ and denote the subspace of symmetric probability measures by ${\cal P}_{\rm sym}(I^n)$. We equip ${\cal P}(I)$ with the topology of weak convergence and the associated Borel-\ensuremath{\sigma}-field and let ${\cal P}({\cal P}(I))$ denote the space of all probability measures on ${\cal P}(I)$. Each $\nu\in{\cal P}({\cal P}(I))$ is the law of a ${\cal P}(I)$-valued random variable, i.e., we can construct a random probability measure $\xi$ on $I$ such that $\nu={\mathbb P}[\xi\in\,\cdot\,]$ is the law of $\xi$. By definition, for $1\leq n\leq\infty$,
\begin{equation}
\nu^{(n)}:={\mathbb E}\big[\underlinederbrace{\xi\otimes\cdots\otimes\xi}_{\mbox{$n$ times}}\big]
\end{equation}
is called the \emph{$n$-th moment measure} of $\nu$. Here
$\xi\otimes\cdots\otimes\xi$ denotes the product measure of $n$ identical
copies of $\xi$ and the expectation of a random measure $\mu$ on a Polish
space $\Omega$ is the deterministic measure $E[\mu]$ defined by
$\int_\Omega\mathbb{P}hi\mathrm{d} E[\mu]:=E\big[\int_\Omega\mathbb{P}hi\,\mathrm{d}\mu\big]$ for all bounded
measurable $\mathbb{P}hi:\Omega\to{\mathbb R}$. Let $\xi$ be a ${\cal P}(I)$-valued
random variable with law $\nu$, and conditional on $\xi$, let $(Y^k)_{k\in[n]}$
be i.i.d.\ with law $\xi$. Then it is easy to see (compare
\cite[formula~(4.1)]{MSS18}) that the unconditional law of $(Y^k)_{k\in[n]}$ is
given by $\nu^{(n)}$, i.e.,
\begin{equation}\lambdabel{momeas}
\nu^{(n)}={\mathbb P}\big[(Y^k)_{k\in[n]}\in\,\cdot\,\big]
\quad\mbox{where}\quad
{\mathbb P}\big[(Y^k)_{k\in[n]}\in\,\cdot\,\big|\,\xi\big]
=\underlinederbrace{\xi\otimes\cdots\otimes\xi}_{\mbox{$n$ times}}.
\end{equation}
We observe that $\nu^{(n)}\in{\cal P}_{\rm sym}(I^n)$ for all $\nu\in{\cal P}({\cal P}(I))$ and $1\leq n\leq\infty$. In fact, by De Finetti's theorem, the map $\nu\mapsto\nu^{(\infty)}$ is a bijection from ${\cal P}({\cal P}(I))$ to ${\cal P}_{\rm sym}(I^\infty)$. This allows us to identify the space ${\cal M}^{(\infty)}$ with a subspace of ${\cal P}({\cal P}(I))$. We set (compare (\ranglef{infyk}))
\begin{equation}\begin{array}{l}\lambdabel{Mist}
\mathrm{d}splaystyle{\cal M}^\ast:=\big\{\nu\in{\cal P}({\cal P}(I)):
\nu(J^\ast[t])\leq t\ \forall 0<t\leq 1\big\}\\[5pt]
\mathrm{d}splaystyle\quad\mbox{with}\quad
J^\ast[t]:=\big\{\xi\in{\cal P}(I):\xi([0,t])>0\big\}
\qquad(0<t\leq 1).
\end{array}\ee
Note that $J^\ast[1]={\cal P}(I)\begin{equation}h\{\delta_\infty\}$, where $\delta_\infty$ denotes the delta-measure at $\infty$. The following lemma identifies ${\cal M}^{(\infty)}$ with ${\cal M}^\ast$.
\begin{equation}gin{lemma}[Probability measures on probability measures]
The\lambdabel{L:Miast} map $\nu\mapsto\nu^{(\infty)}$ is a bijection from ${\cal M}^\ast$ to
${\cal M}^{(\infty)}$.
\end{lemma}
In order to expose the main line of the argument, we postpone the proof of this and some of the following lemmas till later. It follows immediately from Lemmas \ranglef{L:mapdef} and \ranglef{L:Miast} that there exist unique maps ${\mathbb G}amma^\ast_t$, $C^\ast_t$, and $T^\ast$, mapping the space ${\cal M}^\ast$ into itself, such that
\begin{equation}\lambdabel{high}
({\mathbb G}amma^\ast_t\nu)^{(\infty)}={\mathbb G}amma^{(\infty)}_t\nu^{(\infty)},\quad
(C^\ast_t\nu)^{(\infty)}=C^{(\infty)}_t\nu^{(\infty)},\quad\mbox{and}\quad
(T^\ast\nu)^{(\infty)}=T^{(\infty)}\nu^{(\infty)}
\end{equation}
for any $t>0$ and $\nu\in{\cal M}^\ast$. The equation $T^\ast\nu=\nu$ has been called the \emph{higher-level RDE} in \cite{MSS18} and we will refer to ${\mathbb G}amma^\ast_t$, $C^\ast_t$, and $T^\ast$ as \emph{higher-level maps}. The following lemma gives a more explicit description of ${\mathbb G}amma^\ast_t$ and $C^\ast_t$.
\begin{equation}gin{lemma}[Higher-level scaling and cut-off maps]
Let\lambdabel{L:hiGa} $\nu\in{\cal M}^\ast$ and let $\xi$ be a ${\cal P}(I)$-valued
random variable with law $\nu$. Then for each $t>0$, the maps
${\mathbb G}amma^\ast_t$ and $C^\ast_t$ defined in (\ranglef{high}) are given by
\begin{equation}\begin{array}{lr@{\,}c@{\,}l}\lambdabel{hiGa}
{\rm(i)}&\mathrm{d}splaystyle{\mathbb G}amma^\ast_t\nu&=&\mathrm{d}splaystyle t^{-1}{\mathbb P}\big[\xi\circ\mathbb{P}si_t^{-1}\in\,\cdot\,\big]
+(1-t^{-1})\delta_{\delta_\infty},\\[5pt]
{\rm(ii)}&\mathrm{d}splaystyle C^\ast_t\nu&=&\mathrm{d}splaystyle{\mathbb P}\big[\xi\circ c_t^{-1}\in\,\cdot\,\big],
\end{array}\ee
where $\delta_{\delta_\infty}\in{\cal P}({\cal P}(I))$ denotes the delta measure at the point
$\delta_\infty\in{\cal P}(I)$.
\end{lemma}
The following lemma, which we cite from \cite[Lemma~2]{MSS18},
identifies the map $T^\ast$ more explicitly. Recall the definition of
the map $\checki[\omega]:I^2\to I$ in (\ranglef{chi_def}). In line with earlier
notation, in (\ranglef{czT}) below, $\xi_1\otimes\xi_2\circ\checki[\omega]^{-1}$
denotes the image of the random measure $\xi_1\otimes\xi_2$ under the
random map $\checki[\omega]$.
\begin{equation}gin{lemma}[Higher-level RDE map]
Let\lambdabel{L:hilev} $\nu\in{\cal M}^\ast$, let $\xi_1,\xi_2$ be independent
${\cal P}(I)$-valued random variables with law $\nu$, and let $\omega$ be an
independent random variable that is uniformly distributed on
$[0,1]\tildemes\{1,2\}$. Then the map $T^\ast$ defined in (\ranglef{high}) is given by
\begin{equation}\lambdabel{czT}
T^\ast(\nu)={\mathbb P}\big[\xi_1\otimes\xi_2\circ\checki[\omega]^{-1}\in\,\cdot\,\big].
\end{equation}
\end{lemma}
We equip the space ${\cal P}({\cal P}(I))$ with the \emph{convex order}, which we denote
by $\leq_{\rm cv}$. Two measures $\nu_1,\nu_2\in{\cal P}({\cal P}(I))$ satisfy
$\nu_1\leq_{\rm cv}\nu_2$ if and only if the following two equivalent
conditions are satisfied, see \cite[Thm~13]{MSS18}:
\begin{equation}gin{enumerate}
\item $\mathrm{d}splaystyle\int\mathbb{P}hi\,\mathrm{d}\nu_1\leq\int\mathbb{P}hi\,\mathrm{d}\nu_2$ for all convex continuous
functions $\mathbb{P}hi:{\cal P}(I)\to{\mathbb R}$.
\item There exists an $I$-valued random variable $Y$ defined on a probability
space $(\Omega,{\cal F},{\mathbb P})$ and \ensuremath{\sigma}-fields ${\cal F}_1\subset{\cal F}_2\subset{\cal F}$ such that
$\nu_i={\mathbb P}\big[{\mathbb P}[Y\in\,\cdot\,|{\cal F}_i]\in\,\cdot\,\big]$ $(i=1,2)$.
\end{enumerate}
The convex order is a partial order, in particular,
$\nu_1\leq_{\rm cv}\nu_2\leq_{\rm cv}\nu_1$ implies $\nu_1=\nu_2$, see
\cite[Lemma~15]{MSS18}. The following lemma says that the scaling maps
${\mathbb G}amma^\ast_t$ preserve the convex order.
\begin{equation}gin{lemma}[Monotonicity with respect to the convex order]
Let\lambdabel{L:cvmon} $t>0$ and let ${\mathbb G}amma^\ast_t$ be defined in (\ranglef{high}).
Then $\nu_1,\nu_2\in{\cal M}^\ast$ and $\nu_1\leq_{\rm cv}\nu_2$
imply ${\mathbb G}amma^\ast_t\nu_1\leq_{\rm cv}{\mathbb G}amma^\ast_t\nu_2$.
\end{lemma}
Let $(\omega_\mathbf{i},Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ be an RTP corresponding to a solution $\rho$ to the RDE (\ranglef{uni_RDE}) and let $\Omega:=(\omega_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$. We define $\underline\rho,\overline\rho\in{\cal P}({\cal P}(I))$ by
\begin{equation}\lambdabel{unov}
\underline\rho:={\mathbb P}\big[{\mathbb P}[Y_\varnothing\in\,\cdot\,|\,\Omega]\in\,\cdot\,\big]
\quad\mbox{and}\quad
\overline\rho:={\mathbb P}\big[\delta_{Y_\varnothing}\in\,\cdot\,\big].
\end{equation}
We observe that the second moment measures of $\underline\rho$ and $\overline\rho$ are given by
\begin{equation}\lambdabel{unnu2}
\underline\rho^{(2)}={\mathbb P}\big[(Y_\varnothing,Y'_\varnothing)\in\,\cdot\,\big]
\quad\mbox{and}\quad
\overline\rho^{(2)}={\mathbb P}\big[(Y_\varnothing,Y_\varnothing)\in\,\cdot\,\big],
\end{equation}
where $(Y'_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ is conditionally independent of $(Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ given $\Omega$ and conditionally equally distributed with $(Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$. In particular, if $\rho=\rho_\Xi$, then $\underline\rho^{(2)}$ and $\overline\rho^{(2)}$ are the measures defined in (\ranglef{unnu}). The following proposition, which we cite from \cite[Props~3 and 4]{MSS18}, says that $\underline\rho$ and $\overline\rho$ are the minimal and maximal solutions, with respect to the convex order, of the higher-level RDE $T^\ast(\nu)=\nu$.
\begin{equation}gin{proposition}[Minimal and maximal solutions]
Let\lambdabel{P:minmax} $\rho$ be a solution to the RDE (\ranglef{uni_RDE}). Then the set
\begin{equation}\lambdabel{Sir}
{\cal S}_\rho:=\big\{\nu\in{\cal P}({\cal P}(I)):T^\ast(\nu)=\nu,\ \nu^{(1)}=\rho\big\}
\end{equation}
has a unique minimal element $\underline\rho$ and maximal element $\overline\rho$ with respect to the convex order, and these are the measures defined in (\ranglef{unov}).
\end{proposition}
We will derive Lemma~\ranglef{L:scalenu} from the following, stronger statement. We will first give the proofs of Lemmas \ranglef{L:CGa} and \ranglef{L:scalenu} and then provide the proofs of the remaining lemmas.
\begin{equation}gin{lemma}[Scaling of minimal and maximal solutions]
Let\lambdabel{L:CGa} $\rho$ be a solution to the RDE (\ranglef{uni_RDE}) and let $t>0$. Then
\begin{equation}\lambdabel{CGa}
{\rm(i)}\ C^\ast_t\underline\rho=\underline{C_t\rho},\quad
{\rm(ii)}\ C^\ast_t\overline\rho=\overline{C_t\rho},\quad
{\rm(iii)}\ {\mathbb G}amma^\ast_t\underline\rho=\underline{{\mathbb G}amma_t\rho},\quad
{\rm(iv)}\ {\mathbb G}amma^\ast_t\overline\rho=\overline{{\mathbb G}amma_t\rho}.
\end{equation}
\end{lemma}
\begin{equation}gin{proposition}ro
We first prove (\ranglef{CGa})~(i) and (ii). Recall from (\ranglef{Can}) that $C_t\rho:=\rho\circ c_t^{-1}$ where $c_t$ is the cut-off map defined in (\ranglef{ct}). Let $(\omega_\mathbf{i},Y_\mathbf{i})_{\mathbf{i}\in{\mathbb T}}$ be the RTP corresponding to $\rho$. Then it is easy to see that $(\omega_\mathbf{i},c_t(Y_\mathbf{i}))_{\mathbf{i}\in{\mathbb T}}$ is the RTP corresponding to $C_t\rho$. Applying the definition in (\ranglef{unov}), it follows that
\begin{equation}
\underline{C_t\rho}={\mathbb P}\big[{\mathbb P}[c_t(Y_\varnothing)\in\,\cdot\,|\,\Omega]\in\,\cdot\,\big]
={\mathbb P}\big[{\mathbb P}[Y_\varnothing\in\,\cdot\,|\,\Omega]\circ c_t^{-1}\in\,\cdot\,\big]=C^\ast_t\underline\rho,
\end{equation}
where in the last equlity we have used Lemma~\ranglef{L:hiGa}. This proves (\ranglef{CGa})~(i). The proof of (\ranglef{CGa})~(ii) is similar, using the fact that $\delta_{c_t(Y_\varnothing)}=\delta_{Y_\varnothing}\circ c_t^{-1}$.
We next prove (\ranglef{CGa})~(iii) and (iv). We start by observing that (\ranglef{high}) implies that
\begin{equation}\lambdabel{fimo}
({\mathbb G}amma^\ast_t\nu)^{(1)}={\mathbb G}amma_t\nu^{(1)}\qquad(\nu\in{\cal M}^\ast,\ t>0).
\end{equation}
We moreover claim that
\begin{equation}\lambdabel{GGC}
{\mathbb G}amma^\ast_{1/t}\circ{\mathbb G}amma^\ast_t\nu=C^\ast_t\nu\qquad(\nu\in{\cal M}^\ast,\ t>0).
\end{equation}
To see this, define $\mathbb{P}si^\ast_t:{\cal P}(I)\to{\cal P}(I)$ by $\mathbb{P}si^\ast_t(\xi):=\xi\circ\mathbb{P}si_t^{-1}$. Then (\ranglef{hiGa})~(i) says that ${\mathbb G}amma^\ast_t\nu=t^{-1}\nu\circ(\mathbb{P}si^\ast_t)^{-1}+(1-t^{-1})\delta_{\delta_\infty}$. A simple calculation using the fact that $\mathbb{P}si^\ast_t(\delta_\infty)=\delta_\infty$ then gives
\begin{equation}
{\mathbb G}amma^\ast_s{\mathbb G}amma^\ast_t\nu=(st)^{-1}\nu\circ(\mathbb{P}si^\ast_t)^{-1}\circ(\mathbb{P}si^\ast_s)^{-1}+(1-(st)^{-1})\delta_{\delta_\infty}\qquad(s,t>0).
\end{equation}
Applying this with $s=1/t$, using (\ranglef{psic}), and (\ranglef{hiGa})~(ii), it follows that if $\xi$ is a ${\cal P}(I)$-valued random variable with law $\nu$, then
\begin{equation}
{\mathbb G}amma^\ast_{1/t}\circ{\mathbb G}amma^\ast_t\nu={\mathbb P}\big[\xi\circ\mathbb{P}si_t^{-1}\circ\mathbb{P}si_{1/t}^{-1}\in\,\cdot\,\big]={\mathbb P}\big[\xi\circ c_t^{-1}\in\,\cdot\,\big]=C^\ast_t\nu,
\end{equation}
which proves (\ranglef{GGC}). Let $\rho$ be a solution to the RDE (\ranglef{uni_RDE}), let $0<t\leq 1$, and let $\rho':={\mathbb G}amma_{1/t}\rho$. Then, by Lemma~\ranglef{L:scaleRDE}, $\rho'$ also solves the RDE (\ranglef{uni_RDE}). Moreover, $\rho'$ is concentrated on $[0,t]\cup\{\infty\}$ and hence in view of (\ranglef{psic}) ${\mathbb G}amma_t\rho'=\rho$. Let us write
\begin{equation}
{\cal M}^\ast_\rho:=\big\{\nu\in{\cal M}^\ast:\nu^{(1)}=\rho\big\}
\end{equation}
and let ${\cal M}^\ast_{\rho'}$ be defined similarly with $\rho$ replaced by $\rho'$. Since $t\leq 1$, the cut-off map $c_{1/t}$ and hence also $C^\ast_{1/t}$ are the identity maps and hence it follows from (\ranglef{GGC}) with $1/t$ instead of $t$ and from (\ranglef{fimo}) that
\begin{equation}\lambdabel{tinvt}
\mbox{${\mathbb G}amma^\ast_{1/t}$ is a bijection from ${\cal M}^\ast_\rho$ to ${\cal M}^\ast_{\rho'}$ and that ${\mathbb G}amma^\ast_t$ is its inverse.}
\end{equation}
It follows from Lemma~\ranglef{L:scalable} and our identification of ${\cal M}^{(\infty)}$ with ${\cal M}^\ast$ in Lemma~\ranglef{L:Miast} and (\ranglef{high}) that the sets ${\cal S}_\rho$ and ${\cal S}_{\rho'}$ defined in (\ranglef{Sir}) are subsets of ${\cal M}^\ast_\rho$ and ${\cal M}^\ast_{\rho'}$, respectively. Using moreover Lemma~\ranglef{L:scalenRDE}, we see that ${\mathbb G}amma^\ast_{1/t}$ maps ${\cal S}_\rho$ into ${\cal S}_{\rho'}$ and that ${\mathbb G}amma^\ast_t$ maps ${\cal S}_{\rho'}$ into ${\cal S}_\rho$. By (\ranglef{tinvt}), we conclude that ${\mathbb G}amma^\ast_{1/t}$ is a bijection from ${\cal S}_\rho$ to ${\cal S}_{\rho'}$. By Lemma~\ranglef{L:cvmon}, the map ${\mathbb G}amma^\ast_{1/t}$ is monotone with respect to the convex order. By Proposition~\ranglef{P:minmax}, the set ${\cal S}_\rho$ has unique minimal and maximal elements with respect to the convex order, which are $\underline\rho$ and $\overline\rho$. Likewise, $\underline\rho'$ and $\overline\rho'$ are the unique minimal and maximal elements of ${\cal S}_{\rho'}$. Since ${\mathbb G}amma^\ast_{1/t}$ is a monotone bijection from ${\cal S}_\rho$ to ${\cal S}_{\rho'}$, it must map $\underline\rho$ and $\overline\rho$ to $\underline\rho'$ and $\overline\rho'$, respectively. Recalling that $\rho'={\mathbb G}amma_{1/t}\rho$, this shows that
\begin{equation}
{\mathbb G}amma^\ast_{1/t}\underline\rho=\underline{{\mathbb G}amma_{1/t}\rho},\quad
{\mathbb G}amma^\ast_{1/t}\overline\rho=\overline{{\mathbb G}amma_{1/t}\rho},
\end{equation}
which proves (\ranglef{CGa})~(iii) and (iv) in the special case that $t\geq 1$.
To prove (\ranglef{CGa})~(iii) for $0<t\leq 1$, let $\rho''$ be a solution to the RDE (\ranglef{uni_RDE}), let $\rho:={\mathbb G}amma_t\rho''$, and as before let $\rho'={\mathbb G}amma_{1/t}\rho$. Then, by (\ranglef{psic}), $\rho'={\mathbb G}amma_{1/t}\circ{\mathbb G}amma_t\rho''=C_t\rho''$. Our previous arguments show that ${\mathbb G}amma^\ast_{1/t}$ maps $\underline\rho$ into $\underline\rho'$ and hence the inverse map ${\mathbb G}amma^\ast_t$ maps $\underline\rho'$ into $\underline\rho$, i.e.,
\begin{equation}\lambdabel{acG}
{\mathbb G}amma^\ast_t\underline\rho'=\underline\rho.
\end{equation}
Formulas (\ranglef{GGC}) and (\ranglef{CGa})~(i) tell us that
\begin{equation}
{\mathbb G}amma^\ast_{1/t}\circ{\mathbb G}amma^\ast_t\underline\rho''=C^\ast_t\underline\rho''=\underline{C_t\rho''}=\underline\rho'.
\end{equation}
Applying ${\mathbb G}amma^\ast_t$ from the left, using (\ranglef{tinvt}) and (\ranglef{acG}), we obtain that
\begin{equation}
{\mathbb G}amma^\ast_t\underline\rho''={\mathbb G}amma^\ast_t\underline\rho'=\underline\rho.
\end{equation}
Since $\rho={\mathbb G}amma_t\rho''$, this proves (\ranglef{CGa})~(iii) for $0<t\leq 1$. The proof of (\ranglef{CGa})~(iv) for $0<t\leq 1$ goes exactly in the same way.
\end{proposition}ro
\begin{equation}gin{proposition}ro[of Lemma~\ranglef{L:scalenu}]
It follows from Lemma~\ranglef{L:CGa} and (\ranglef{high}) that ${\mathbb G}amma^{(2)}_t\underline\rho^{(2)}_\Xi=\underline{{\mathbb G}amma_t\rho_\Xi}^{(2)}$ and ${\mathbb G}amma^{(2)}_t\overline\rho^{(2)}_\Xi=\overline{{\mathbb G}amma_t\rho_\Xi}^{(2)}$, where ${\mathbb G}amma_t\rho_\Xi=\rho_{\Xi'}$ by Lemma~\ranglef{L:scaleRDE}.
\end{proposition}ro
We cited Lemma~\ranglef{L:hilev} and Proposition~\ranglef{P:minmax} from \cite{MSS18}, so to complete the proofs of this subsection, it only remains to provide the proofs of Lemmas \ranglef{L:Miast}, \ranglef{L:hiGa}, and \ranglef{L:cvmon}.
\begin{equation}gin{proposition}ro[of Lemma~\ranglef{L:Miast}]
By De Finetti's theorem, the map $\nu\mapsto\nu^{(\infty)}$ is a bijection from ${\cal P}({\cal P}(I))$ to ${\cal P}_{\rm sym}(I^\infty)$, so it suffices to show that for $\nu\in{\cal P}({\cal P}(I))$, one has $\nu\in{\cal M}^\ast$ if and only if $\nu^{(\infty)}\in{\cal M}^{(\infty)}$. Let $\xi$ be a ${\cal P}(I)$-valued random variable with law $\nu$ and conditional on $\xi$, let $(Y^k)_{k\in{\mathbb N}_+}$ be i.i.d.\ with law $\xi$. Then the unconditional law of $(Y^k)_{k\in{\mathbb N}_+}$ is $\nu^{(\infty)}$. By the definition in (\ranglef{Mist}), $\nu\in{\cal M}^\ast$ if and only if ${\mathbb P}[\xi([0,t])>0]\leq t$ for all $0<t\leq 1$. The event $\{\xi([0,t])>0\}$ is a.s.\ equal to the event $\{\exists k\in{\mathbb N}_+\mbox{ s.t.\ }Y^k\leq t\}$, so comparing with the definition in (\ranglef{infyk}) we see that $\nu\in{\cal M}^\ast$ if and only if $\nu^{(\infty)}\in{\cal M}^{(\infty)}$.
\end{proposition}ro
\begin{equation}gin{proposition}ro[of Lemma~\ranglef{L:hiGa}]
We need to show that ${\mathbb G}amma^\ast_t$ and $C^\ast_t$ defined as in (\ranglef{hiGa}) satisfy (\ranglef{high}). Conditional on $\xi$, let $(Y^k)_{k\in{\mathbb N}_+}$ be i.i.d.\ with law $\xi$. Then the unconditional law of $(Y^k)_{k\in{\mathbb N}_+}$ is $\nu^{(\infty)}$ and by (\ranglef{Can}) $C^{(\infty)}_t\nu^{(\infty)}$ is the (unconditional) law of $(c_t(Y^k))_{k\in{\mathbb N}_+}$, which is the same as $(C^\ast_t\nu)^{(\infty)}$. The claim for ${\mathbb G}amma^\ast_t$ follows in the same way, using (\ranglef{Gan}) and the fact that $\delta_{\delta_\infty}^{(\infty)}=\delta_{\vec\infty}$.
\end{proposition}ro
\begin{equation}gin{proposition}ro[of Lemma~\ranglef{L:cvmon}]
Assume that $\nu_1,\nu_2\in{\cal M}^\ast$ satisfy $\nu_1\leq_{\rm cv}\nu_2$. By
characterisation~(ii) of the convex order, we can find a random variable $Y$
and \ensuremath{\sigma}-fields ${\cal F}_1\subset{\cal F}_2$ such that
$\nu_i={\mathbb P}\big[{\mathbb P}[Y\in\,\cdot\,|{\cal F}_i]\in\,\cdot\,\big]$ $(i=1,2)$.
Then, by (\ranglef{hiGa})~(i),
\begin{equation}
{\mathbb G}amma^\ast_t\nu_i=t^{-1}\tilde\nu_i+(1-t^{-1})\delta_{\delta_\infty}
\quad\mbox{with}\quad
\tilde\nu_i:={\mathbb P}\big[{\mathbb P}[\mathbb{P}si_t(Y)\in\,\cdot\,|{\cal F}_i]\in\,\cdot\,\big]
\qquad(i=1,2).
\end{equation}
Since ${\cal F}_1\subset{\cal F}_2$, by characterisation~(ii) of the convex order, we see
that $\tilde\nu_1\leq_{\rm cv}\tilde\nu_2$. Using characterisation~(i) of the
convex order, it follows that ${\mathbb G}amma^\ast_t\nu_1\leq_{\rm cv}{\mathbb G}amma^\ast_t\nu_2$.
\end{proposition}ro
\section{Scale invariant solutions to the bivariate RDE}\lambdabel{section_scale_invarint_solutions_of_RDE}
The goal of this section is to prove Theorem \ranglef{the_theorem}.
Let $\theta \in (0,1)$.
Throughout Section \ranglef{section_scale_invarint_solutions_of_RDE} we will use the shorthand $\rho$ to denote the probability measure $\rho_{\Xi_\theta}$ defined in \eqref{muXi}.
Let
\begin{equation}gin{equation}\lambdabel{x_and_c_def}
x_k := \theta^k \quad \text{and} \quad c_k := \frac{1-\theta}{1+\theta} \cdot \theta^k, \qquad k \in \mathbb{N}.
\end{equation}
thus we have
\begin{equation}gin{equation}\lambdabel{rho}
\rho = \rho_{\Xi_\theta} =\sum_{k=0}^\infty c_k \delta_{x_k} +\frac{\theta}{1+\theta}\delta_\infty.
\end{equation}
For simplification we also introduce the notation
\begin{equation}gin{equation}\lambdabel{x_minus_one}
x_{-1} := \infty \quad \text{and} \quad c_{-1} := \rho(\{\infty\}) = \frac{\theta}{1+\theta}.
\end{equation}
Using this notation we have $\rho(\{x_k\}) = c_k$ for every $k \ge -1$.
\begin{equation}gin{definition}\lambdabel{biv_with_rho_marginals}
Let $\mathcal{P}^{(2)}_\theta$ denote the space of symmetric probability measures on $I \tildemes I$ such that its marginal distributions are $\rho$.
\end{definition}
\subsetsection{Main lemmas}\lambdabel{subsection_main_lemmas}
\mathbb{P}aragraph{}In Section \ranglef{subsection_main_lemmas} we state the key lemmas of Section \ranglef{section_scale_invarint_solutions_of_RDE} and prove Theorem \ranglef{the_theorem} using them.
\begin{equation}gin{definition}[The signature of a scale invariant measure]\lambdabel{def:rho->f}
Let $\theta \in (0,1)$. The signature of a scale invariant measure $\rho^{(2)} \in \mathcal{M}^{(2)}_{\theta}$ is the function $f_{\rho^{(2)}}:\mathbb{N}\to\mathbb{R}$
defined by
\begin{equation}gin{equation}\lambdabel{rho->f}
f_{\rho^{(2)}}(n) := \rho^{(2)} \big( \{ [0,x_n] \tildemes I\} \cup \{ I \tildemes [0,1] \} \big), \qquad n \in \mathbb{N}.
\end{equation}
\end{definition}
The signature of the diagonal measure $\overlineerline{\rho}^{(2)}$ (c.f.\ \eqref{unnu}) is
\begin{equation}gin{equation}\lambdabel{signature_of_diagonal}
f_{\overlineerline{\rho}^{(2)}}(n) = {\mathbb P}\big[Y_\varnothing \leq x_n \text{ or } Y_\varnothing \leq 1 \big]= \sum \langlemits_{k = 0}^\infty c_k = \frac{1}{1+\theta}, \quad n \in \mathbb{N}.
\end{equation}
\begin{equation}gin{lemma}[Conditions for $f$ to be a signature]\lambdabel{lemma:f_conditions}
If $\theta \in (0,1)$ and $f: \mathbb{N} \to \mathbb{R}$ then there exists a (unique) probability measure $\rho^{(2)} \in \mathcal{M}^{(2)}_{\theta}$ such that $f$ is its signature if and only if
\begin{equation}gin{enumerate}
\item $f(0) \le 1$,
\item $\langlem \langlemits_{n \to \infty} f(n) = \frac{1}{1+\theta}$,
\item $f(n)$ is non-increasing,
\item $(1+\theta) \cdot f(0) \le 2f(1)$,
\item $(1+\theta) \cdot f(n) \le \theta \cdot f(n-1) + f(n+1)$ for every $n \ge 1$.
\end{enumerate}
\end{lemma}
\mathbb{P}aragraph{}We will prove this lemma in Section \ranglef{sec:f_conditions}.
Next we define a function $f_{\theta, c}(n), n \in \mathbb{N}$ that will help us identify the signature of a scale invariant solution of the bivariate RDE.
\begin{equation}gin{lemma}[Implicit equation for $f_{\theta, c}(n)$]\lambdabel{lemma:imp_eq_for_f_theta_c}
Let $\theta \in (0,1)$ and $c \ge 0$ be arbitrary. The system of equations
\begin{equation}gin{align}
f_{\theta,c}(0)^2 - \frac{1}{1+\theta}f_{\theta,c}(0) &= 2c, \lambdabel{quadratic_eq_for_f_0} \\
f_{\theta,c}(n-1)^2- f_{\theta,c}(n)^2 &= \theta^{n-1}\left( f_{\theta,c}(n-1)-f_{\theta,c}(n) \right)+c \cdot \theta^{2n-2}(1-\theta^2), \quad n \geq 1, \lambdabel{quadratic_eq_for_f_n_and_n_minus_1} \\
f_{\theta,c}(0) & >0, \quad f_{\theta,c}(n)> \frac{\theta^{n-1}}{2}, \quad n \geq 1 \lambdabel{f_theta_c_n_geq_ugly}
\end{align}
has a unique solution $f_{\theta,c}(n), n \in \mathbb{N}$.
\end{lemma}
\begin{equation}gin{lemma}[Existence and continuity of $f_{\theta, c}(\infty)$]\lambdabel{lemma:f_continuous}
If $\theta \in (0,1), c \ge 0$, then the limit
\begin{equation}gin{equation}\lambdabel{def:eq:f_theta_infty}
f_{\theta, c}(\infty):= \langlem_{n \to \infty} f_{\theta, c}(n)
\end{equation} exists and
the function $c \mapsto f_{\theta, c}(\infty)$ is continuous.
\end{lemma}
\mathbb{P}aragraph{}We will prove Lemmas \ranglef{lemma:imp_eq_for_f_theta_c} and \ranglef{lemma:f_continuous} in Section \ranglef{sec:f_unique}.
Note that if $c=0$ then
\begin{equation}gin{equation}\lambdabel{f_theta_null}
f_{\theta, 0}(n) = \frac{1}{1+\theta}, \qquad n \in \mathbb{N}
\end{equation}
is a solution of \eqref{quadratic_eq_for_f_0}-\eqref{f_theta_c_n_geq_ugly} (and it follows from the uniqueness statement of Lemma \ranglef{lemma:imp_eq_for_f_theta_c} that \eqref{f_theta_null} is the only solution of of \eqref{quadratic_eq_for_f_0}-\eqref{f_theta_c_n_geq_ugly} in the $c=0$ case).
\begin{equation}gin{remark}\lambdabel{remark:cont_case_system}
Note that if we rearrange \eqref{quadratic_eq_for_f_0} and \eqref{quadratic_eq_for_f_n_and_n_minus_1}, we get
\begin{equation}gin{equation}
f_{\theta, c}(0)^2 - \frac{f_{\theta, c}(0)}{1+\theta} = 2c, \qquad
\frac{f_{\theta, c}(n-1) - f_{\theta, c}(n)}{\theta^{n-1} - \theta^n} = \frac{c \cdot \theta^{n-1} \cdot \frac{1+\theta}{2}}{\frac{f_{\theta, c}(n-1) + f_{\theta, c}(n)}{2} - \frac{\theta^{n-1}}{2}}.
\end{equation}
Now if we non-rigorously define the function $f$ by $f(\theta^n) := f_{\theta, c}(n)$ when $\theta$ is very close to $1$, moreover we denote $r:= \theta^n$, then in the $\theta \to 1$ limit we get
\begin{equation}gin{equation} \lambdabel{cont_case}
f(1)^2 - \frac 12 f(1) = 2c, \qquad
\frac{\mathbb{P}artial}{\mathbb{P}artial r} f(r) = \frac{c \cdot r}{f(r) - r/2}
\end{equation}
i.e., conditions (iii) and (i) of equation (2.2) of \cite{RST19}.
We also note that condition (ii) of equation (2.2) of \cite{RST19},i.e., $f(0) = \frac 12$, corresponds to the condition $ f_{\theta, c}(\infty) = \frac{1}{1+\theta}$ in our current discrete setting. We will see that the key question is whether there exists $c>0$ for which $ f_{\theta, c}(\infty) = \frac{1}{1+\theta}$ holds.
\end{remark}
\begin{equation}gin{lemma}[Signature of scale invariant solution of the bivariate RDE]\lambdabel{lemma:equivalent_system}
Let $\rho^{(2)} \in \mathcal{M}^{(2)}_{\theta}$ and let $f_{\rho^{(2)}}$ denote its signature.
\begin{equation}gin{enumerate}
\item $\rho^{(2)}$ is a solution of the bivariate RDE \eqref{bivar_RDE} if and only if there exists $c \ge 0$ such that $f_{\rho^{(2)}}(n) = f_{\theta, c}(n)$ holds for every $n \in \mathbb{N}$.
\item If $\rho^{(2)}$ is a solution of the bivariate RDE and $c$ is the parameter for which $f_{\rho^{(2)}}(n) = f_{\theta, c}(n)$ holds for every $n \in \mathbb{N}$, then
\begin{equation}gin{equation}\lambdabel{c_upper_bound}
c \le \max \left( 0, \frac{\theta \cdot (2\theta - 1)}{(1+\theta)^2} \right).
\end{equation}
\end{enumerate}
\end{lemma}
\mathbb{P}aragraph{}We will prove this lemma in Section \ranglef{sec:equivalent_system}.
Note that that the diagonal measure $\overlineerline{\rho}^{(2)}$ defined in \eqref{unnu} is a solution of the bivariate RDE \eqref{bivar_RDE}, and indeed $f_{\overlineerline{\rho}^{(2)}}(n) = \frac{1}{1+\theta} = f_{\theta, 0}(n)$ for every $n \in \mathbb{N}$ by \eqref{signature_of_diagonal} and \eqref{f_theta_null},
in accordance with Lemma \ranglef{lemma:equivalent_system}.
\begin{equation}gin{definition}[Perturbation of the diagonal signature]\lambdabel{def:f_tilde}
Let $\theta \in (0,1)$ and $c \ge 0$ be arbitrary and $f_{\theta, c}$. Let us define
\begin{equation}gin{equation}\lambdabel{eq_tilde_f_theta_n}
\tildelde{f}_\theta(n) := \left( \frac{\mathbb{P}artial}{\mathbb{P}artial c} f_{\theta, c}(n) \right) \bigg|_{c=0_+} \quad \text{and} \quad \tildelde{f}_\theta(\infty) := \langlem \langlemits_{n \to \infty} \tildelde{f}_\theta (n).
\end{equation}
\end{definition}
We will prove in Section \ranglef{sec:lim_f_c=0} that the limit in \eqref{eq_tilde_f_theta_n} exists. Recall the notion of $\theta^* \in (\tfrac{1}{2},1)$ from Lemma \ranglef{L:tetdef}.
\begin{equation}gin{lemma}[Critical value]\lambdabel{lemma:lim_f_c=0}
We have $\tildelde{f}_\theta(\infty) > 0$ for every $\theta \in (0, \theta^*)$, $\tildelde{f}_\theta(\infty) < 0$ for every $\theta \in (\theta^*, 1)$ and $\tildelde{f}_{\theta^*}(\infty)=0$.
\end{lemma}
We will prove this lemma in Section \ranglef{sec:lim_f_c=0}.
\begin{equation}gin{lemma}[Solution of the recursion if $\theta \le \theta^*$]\lambdabel{lemma:theta<=theta*}
If $\theta \in \left(\frac 12, \theta^*\right]$, then there does not exist $c \in \left( 0, \frac{\theta \cdot (2\theta - 1)}{(1+\theta)^2} \right] $ for which $ f_{\theta, c}(\infty) = \frac{1}{1+\theta}$.
\end{lemma}
\mathbb{P}aragraph{}We will prove this lemma in Section \ranglef{sec:theta<=theta*}.
\begin{equation}gin{lemma}[Solution of the recursion if $\theta > \theta^*$]\lambdabel{lemma:theta>theta*}
For any $\theta \in (\theta^*, 1)$ there exists a $\hat{c} > 0$ for which $ f_{\theta, \hat{c}}(\infty) = \frac{1}{1+\theta}$,
moreover $f_{\theta, \hat{c}}$ also satisfies all of the conditions of Lemma \ranglef{lemma:f_conditions}.
\end{lemma}
\mathbb{P}aragraph{}We will prove this lemma in Section \ranglef{sec:theta>theta*}.
\begin{equation}gin{remark}
Figure \ranglef{fig:fix_c} shows the values of the parameter $c$ for which we have $ f_{\theta, c}(\infty) = \frac{1}{1+\theta}$ for different values of $\theta \in (0.6,1)$. It shows that if $\theta \le \theta^*$ then the only such value is $c=0$, but if $\theta > \theta^*$ then there also exists a positive value $\hat{c}$. We also note that if $\theta \to 1$ then the numerical simulations suggest that $\hat{c} \to 0.01770838$, which coincides with parameter value of $c$ which gives the non-diagonal solution in the case $\Xi_1=(0,1]$, see \cite[Section 1.6]{RST19}. In other words, $c=0.01770838$ is the unique positive value of $c$ for which the differential equation \eqref{cont_case} together with the boundary condition $f(0)= \frac 12$ has a solution.
\end{remark}
\begin{equation}gin{figure}[!ht]
\centering
\includegraphics[scale=0.33]{fix_c.png}
\caption{The values of $c$ for which $ f_{\theta, c}(\infty) = \frac{1}{1+\theta}$}
\lambdabel{fig:fix_c}
\end{figure}
\begin{equation}gin{Proof}[of Theorem \ranglef{the_theorem}]
The diagonal measure $\overline\rho^{(2)}$ defined in \eqref{unnu} is indeed a solution of the bivariate RDE \eqref{bivar_RDE} for every $\theta \in (0,1)$.
If $\theta \le \frac 12$ and $\rho^{(2)} \in \mathcal{M}^{(2)}_{\theta}$ is a solution of the bivariate RDE, then by Lemma \ranglef{lemma:equivalent_system}(ii) we must have $c=0$, where $c$ is the parameter for which $f_{\rho^{(2)}} \equiv f_{\theta, c}$ (such $c$ exists by Lemma \ranglef{lemma:equivalent_system}(i)). By \eqref{f_theta_null}
we have $f_{\rho^{(2)}}(n) \equiv \frac{1}{1+\theta}$, thus by \eqref{signature_of_diagonal} and the uniqueness statement of Lemma~\ranglef{lemma:f_conditions} we obtain that there is no scale invariant solution of the bivariate RDE other than the diagonal solution in the $\theta \le \frac 12$ case.
If $\theta \in \left(\frac 12, \theta^*\right]$ and we assume that $\rho^{(2)} \in \mathcal{M}^{(2)}_{\theta}$ is a solution of the bivariate RDE, then by Lemma \ranglef{lemma:equivalent_system} we have $c \in \left[ 0, \frac{\theta \cdot (2\theta - 1)}{(1+\theta)^2} \right]$ for the parameter $c$ which gives $f_{\rho^{(2)}} \equiv f_{\theta, c}$. But by Lemma \ranglef{lemma:theta<=theta*} we know that there is no $c \in \left( 0, \frac{\theta \cdot (2\theta - 1)}{(1+\theta)^2} \right]$ such that $\langlem \langlemits_{n \to \infty} f_{\theta, c}(n) = \frac{1}{1+\theta}$ holds. Therefore by condition (ii) of Lemma \ranglef{lemma:f_conditions} we see that again only $c=0$ produces a signature of a scale invariant solution of the bivariate RDE.
If $\theta > \theta^*$ then by Lemmas \ranglef{lemma:f_conditions} and \ranglef{lemma:theta>theta*} there exists a measure $\hat{\rho}^{(2)} \in \mathcal{M}^{(2)}_{\theta}$ for which $f_{\theta, \hat{c}}(n) = f_{\hat{\rho}^{(2)}}(n)$ for every $n \in \mathbb{N}$. The measure $\hat{\rho}^{(2)}$ is non-diagonal, as we explain. First note that $\hat{c} \neq 0$ implies $f_{\theta, \hat{c}} \neq f_{\theta, 0}$, thus $f_{\hat{\rho}^{(2)}} \neq f_{\overlineerline{\rho}^{(2)}}$ (since $f_{\overlineerline{\rho}^{(2)}}=f_{\theta, 0}$ by \eqref{signature_of_diagonal} and \eqref{f_theta_null}), and thus we must have $\hat{\rho}^{(2)} \neq \overlineerline{\rho}^{(2)}$, i.e., $\hat{\rho}^{(2)}$ is non-diagonal.
Finally, we observe that $\hat{\rho}^{(2)}$ is a solution of the bivariate RDE \eqref{bivar_RDE} by Lemma \ranglef{lemma:equivalent_system}(i).
\end{Proof}
\subsetsection{\texorpdfstring{Conditions for $f$ to be a signature}{Conditions for f to be a signature}}\lambdabel{sec:f_conditions}
\mathbb{P}aragraph{}In this section we show the necessary and sufficient conditions for a function $f: \mathbb{N} \to \mathbb{R}$ to be the signature of some $\rho^{(2)} \in \mathcal{M}^{(2)}_{\theta}$, i.e., we prove Lemma \ranglef{lemma:f_conditions}. To do this, first we define the bivariate signature $F_{\rho^{(2)}}$ in Definition \ranglef{def:rho->F} for each $\rho^{(2)} \in \mathcal{P}^{(2)}_\theta$ (c.f.\ Definition \ranglef{biv_with_rho_marginals}). In Lemma \ranglef{lemma:F_characterizes} we prove that this function $F_{\rho^{(2)}}$ characterizes the distribution $\rho^{(2)}$ and in Lemma \ranglef{lemma:F_conditions} we prove necessary and sufficient conditions for bivariate functions to be the bivariate signature of some $\rho^{(2)} \in \mathcal{P}^{(2)}_\theta$. After analysing the relation between scale invariant measures and scale invariant bivariate signatures in Lemma \ranglef{lemma:scale_inv} as well as the relation between $F_{\rho^{(2)}}$ and the univariate signature in Lemma \ranglef{lemma:F_and_f}, we can easily conclude the proof of Lemma \ranglef{lemma:f_conditions}.
\begin{equation}gin{definition}[Bivariate signature]\lambdabel{def:rho->F}
Given $\rho^{(2)} \in \mathcal{P}^{(2)}_\theta$, we define the bivariate function $F_{\rho^{(2)}} : \{x_k\}_{k=0}^\infty \tildemes \{x_k\}_{k=0}^\infty \to [0,1] $ by
\begin{equation}gin{equation}\lambdabel{rho->F}
F_{\rho^{(2)}}(x_k, x_j) := \rho^{(2)} \big( \{ [0,x_k] \tildemes I\} \cup \{ I \tildemes [0,x_j] \} \big), \quad j, k \in \mathbb{N}.
\end{equation}
\end{definition}
Recall the notation from the beginning of Section \ranglef{section_scale_invarint_solutions_of_RDE}.
\begin{equation}gin{lemma}[Bivariate signature characterizes the measure]\lambdabel{lemma:F_characterizes}
The measure $\rho^{(2)} \in \mathcal{P}^{(2)}_\theta$ is uniquely characterized by $F_{\rho^{(2)}}$, in particular, for any $ j, k \in \mathbb{N}$ we have
\begin{equation}gin{align}
\lambdabel{first_qq}
\rho^{(2)}\big(\{\infty\}\tildemes\{\infty\}\big)&= 1-F_{\rho^{(2)}}(x_0,x_0),\\
\lambdabel{second_qq}
\rho^{(2)}\big([0,x_k]\tildemes\{\infty\}\big)&= F_{\rho^{(2)}}(x_k,x_0) - \frac{1}{1+\theta},\\
\lambdabel{third_qq}
\rho^{(2)}\big(\{\infty\}\tildemes[0,x_j]\big)&= F_{\rho^{(2)}}(x_j,x_0) - \frac{1}{1+\theta},\\
\lambdabel{F_charact_4}
\rho^{(2)}\big([0,x_k]\tildemes[0,x_j]\big)&= \frac{x_k}{1+\theta} + \frac{x_j}{1+\theta} - F_{\rho^{(2)}}(x_k,x_j).
\end{align}
\end{lemma}
\begin{equation}gin{Proof} The proof of \eqref{first_qq} follows from Definition \ranglef{def:rho->F} using $x_0=1$ and
$\rho^{(2)}( ([0,1]\cup \{\infty\})^2 )=1$.
Since the marginal distribution of $\rho^{(2)}$ is $\rho$, for every $j \in \mathbb{N}$ we have
$\rho^{(2)}\big( I \tildemes [0,x_j] \big)=\rho( [0,x_j] )=\sum_{i=j}^\infty c_i=\frac{x_j}{1+\theta}$.
The equalities \eqref{second_qq}, \eqref{third_qq} and \eqref{F_charact_4} readily follow.
The $\rho^{(2)}$ measure of every atom of $\rho^{(2)}$ can be determined using \eqref{first_qq}-\eqref{F_charact_4} and inclusion-exclusion.
\end{Proof}
\begin{equation}gin{lemma}[Necessary and sufficient conditions on $F$]\lambdabel{lemma:F_conditions}
Let $\theta \in (0,1)$. Let us assume that we are given a function $F: \{x_k\}_{k=0}^\infty \tildemes \{x_k\}_{k=0}^\infty \to [0, \infty)$. There exists a unique probability measure $\rho^{(2)} \in \mathcal{P}^{(2)}_\theta$ such that $F \equiv F_{\rho^{(2)}}$ holds (where $F_{\rho^{(2)}}$ is defined in Definition \ranglef{def:rho->F}) if and only if the following conditions are fulfilled:
\begin{equation}gin{enumerate}[1.]
\item $F(x_0, x_0) \le 1$,
\item $\langlem \langlemits_{k \to \infty} F(x_k, x_j) = \frac{x_j}{1+\theta} \quad \forall \, j \in \mathbb{N}$,
\item $F(x_k, x_0)$ is non-increasing in $k$,
\item $F(x_k, x_j) = F(x_j, x_k) \quad \forall \, j, k \in \mathbb{N}$,
\item $-F(x_k, x_j) + F(x_{k+1}, x_j) + F(x_k, x_{j+1}) - F(x_{k+1}, x_{j+1}) \ge 0 \quad \forall \, j, k \in \mathbb{N}$.
\end{enumerate}
\end{lemma}
\begin{equation}gin{Proof}
If $\rho^{(2)} \in \mathcal{P}^{(2)}_\theta$ and we define $F_{\rho^{(2)}}$ as in \eqref{rho->F}, then conditions 1., 2.\ and 3.\ trivially hold for $F_{\rho^{(2)}}$. Condition 4.\ follows from the symmetry of $\rho^{(2)}$ and finally condition 5.\ also holds, since $-F_{\rho^{(2)}}(x_k, x_j) + F_{\rho^{(2)}}(x_{k+1}, x_j) + F_{\rho^{(2)}}(x_k, x_{j+1}) - F_{\rho^{(2)}}(x_{k+1}, x_{j+1}) = \rho^{(2)}(x_k, x_j)$, where $\rho^{(2)}(x_k, x_j)$ is a shorthand for $\rho^{(2)} \big( \{ (x_k, x_j) \} \big)$.
In the other direction, the uniqueness statement follows from Lemma \ranglef{lemma:F_characterizes}.
If $F$ is a function such that all of the conditions of the lemma hold, then we will define $\rho^{(2)}$ pointwise on $\{(x_k, x_j)\}_{k,j=-1}^\infty$ (where $x_{-1}=\infty$, c.f.\ \eqref{x_minus_one}) and prove that $\rho^{(2)}$ is a probability measure, it is in $\mathcal{P}^{(2)}_\theta$ and $F \equiv F_{\rho^{(2)}}$ holds.
For every $j, k \in \mathbb{N}$ let
\begin{equation}gin{align}
&\rho^{(2)}(x_k, x_j) := -F(x_k, x_j) + F(x_{k+1}, x_j) + F(x_k, x_{j+1}) - F(x_{k+1}, x_{j+1}),\\
&\rho^{(2)}(\infty, x_k) := F(x_k, x_0) - F(x_{k+1}, x_0),\\
&\rho^{(2)}(x_k, \infty) := F(x_k, x_0) - F(x_{k+1}, x_0),\\
&\rho^{(2)}(\infty, \infty) := 1 - F(x_0, x_0).
\end{align}
The non-negativity of $\rho^{(2)}$ follows from conditions 1., 3.\ and 5., moreover $\rho^{(2)}$ is trivially symmetric, since $F$ is also symmetric by 4.
The marginals of $\rho^{(2)}$:
\begin{equation}gin{align}
\bullet &\sum \langlemits_{i=-1}^\infty \rho^{(2)}(x_k, x_i) = F(x_k, x_0) - F(x_{k+1}, x_0) +\nonumber\\
&+ \sum \langlemits_{i=0}^\infty (-F(x_k, x_i) + F(x_{k+1}, x_i) + F(x_k, x_{i+1}) - F(x_{k+1}, x_{i+1})) =\nonumber\\
&= F(x_k, x_0) - F(x_{k+1}, x_0) - F(x_k, x_0) + \langlem \langlemits_{i \to \infty} F(x_k, x_i) + F(x_{k+1}, x_0) - \nonumber\\
&- \langlem \langlemits_{i \to \infty} F(x_{k+1}, x_i) \stackrel{2.}{=} \frac{(1-\theta) \cdot x_k}{1+\theta} \stackrel{\eqref{x_and_c_def}}{=} c_k, \qquad k \in \mathbb{N}\\
\bullet &\sum \langlemits_{i=-1}^\infty \rho^{(2)}(\infty, x_i) = 1 - F(x_0, x_0) + \sum \langlemits_{k=0}^\infty (F(x_k, x_0) - F(x_{k+1}, x_0)) =\nonumber\\
&= 1 - F(x_0, x_0) + F(x_0, x_0) - \langlem \langlemits_{k \to \infty} F(x_k, x_0) \stackrel{2.}{=}1-\frac{x_0}{1+\theta} = c_{-1}
\end{align}
So the measure $\rho^{(2)}$ has marginal distributions $\rho$ defined as in \eqref{rho}.
In particular, $\rho^{(2)}$ is a probability measure on $I^2$.
We still have to check that $F \equiv F_{\rho^{(2)}}$ holds:
\begin{equation}gin{multline}
F_{\rho^{(2)}}(x_k, x_j) \stackrel{\eqref{rho->F}}{=} \rho^{(2)} \big( \{ [0,x_k] \tildemes I\} \cup \{ I \tildemes [0,x_j] \} \big)
= \sum \langlemits_{i=k}^\infty c_i + \sum \langlemits_{l=j}^\infty \sum \langlemits_{i=-1}^{k-1} \rho^{(2)} (x_i,x_l) = \\
\frac{x_k}{1+\theta} + \sum \langlemits_{l=j}^\infty (-F(x_0, x_l) + F(x_k, x_l) + F(x_0, x_{l+1}) - F(x_k, x_{l+1}))
+ F(x_0, x_j) - \langlem \langlemits_{j \to \infty} F(x_0, x_j) = \\ \frac{x_k}{1+\theta} - F(x_0, x_j) + \langlem \langlemits_{l \to \infty} F(x_0, x_l)
+ F(x_k, x_j) - \langlem \langlemits_{l \to \infty} F(x_k, x_l) + F(x_0, x_j) - \langlem \langlemits_{j \to \infty} F(x_0, x_j) \stackrel{2.}{=}\\ F(x_k, x_j), \qquad j, k \in \mathbb{N}.
\end{multline}
\end{Proof}
\begin{equation}gin{definition}[Scale invariant bivariate function]
$F: \{x_k\}_{k=0}^\infty \tildemes \{x_k\}_{k=0}^\infty \to [0, \infty)$ is a scale invariant bivariate function if
\begin{equation}gin{equation}\lambdabel{F_scale_inv}
F(x_{k+l}, x_{j+l}) = \theta^l F(x_k, x_j), \quad j, k, l \in \mathbb{N}.
\end{equation}
\end{definition}
If $F$ is scale invariant then for every $0 \le j \le k$ we have
\begin{equation}gin{equation}\lambdabel{F_scale_inv_remark}
F(x_k, x_j) = \theta^j F(x_{k-j}, x_0).
\end{equation}
Recall the notation of ${\cal M}^{(2)}_\theta$ from below (\ranglef{rhosca}) as well as that of $\mathcal{P}^{(2)}_\theta$ from Definition \ranglef{biv_with_rho_marginals}.
\begin{equation}gin{lemma}[Scale invariant measures and functions]\lambdabel{lemma:scale_inv} Let $\rho^{(2)} \in \mathcal{P}^{(2)}_\theta$.
$\rho^{(2)} \in {\cal M}^{(2)}_\theta$ holds if and only if $F_{\rho^{(2)}}$ defined in Definition \ranglef{def:rho->F} is a scale invariant function.
\end{lemma}
\begin{equation}gin{Proof} First note that if $F_{\rho^{(2)}}$ is a scale invariant function then
\begin{equation}gin{equation}
\rho^{(2)}\big([0,\theta^n]\tildemes I\cup I\tildemes[0,\theta^n]\big) \stackrel{ \eqref{rho->F} }{=} F_{\rho^{(2)}}(x_n,x_n) \stackrel{ \eqref{F_scale_inv} }{=}
\theta^n F_{\rho^{(2)}}(x_0,x_0) \leq \theta^n, \qquad n \geq 0.
\end{equation}
Together with our assumption that both of the marginals of $\rho^{(2)}$ are $\rho$, this implies that we have $\rho^{(2)}\big([0,t]\tildemes I\cup I\tildemes[0,t]\big) \leq t $
for all $0 \leq t \leq 1$, thus by \eqref{Mi2def} we have $\rho^{(2)} \in {\cal M}^{(2)}$.
It remains to show that if $\rho^{(2)} \in {\cal M}^{(2)} \cap \mathcal{P}^{(2)}_\theta$ then $F_{\rho^{(2)}}$ is scale invariant if and only if ${\mathbb G}amma_\theta^{(2)} \rho^{(2)}=\rho^{(2)}$.
Let $\hat{\rho}^{(2)}:={\mathbb G}amma_\theta^{(2)}\rho^{(2)}$. By the scale invariance of the marginal distribution (c.f.\ \eqref{rhosca}) we have
$\hat{\rho}^{(2)} \in \mathcal{P}^{(2)}_\theta$. Thus by Lemma \ranglef{lemma:F_characterizes} we only need to prove that $F_{\hat{\rho}^{(2)}} \equiv F_{\rho^{(2)}}$ holds if and only
$ \theta^{-1} F_{\rho^{(2)}}(x_{k+1}, x_{j+1}) = F_{\rho^{(2)}}(x_k, x_j)$ holds for any $ k,j \in \mathbb{N}$ (i.e., $F_{\rho^{(2)}}$ satisfies the $l=1$ case of \eqref{F_scale_inv}). This equivalence follows as soon as we observe that we have
\begin{equation}gin{multline}
F_{\hat{\rho}^{(2)}}(x_k,x_j)\stackrel{ \eqref{rho->F} }{=} {\mathbb G}amma_\theta^{(2)}\rho^{(2)} \big( \{ [0,x_k] \tildemes I\} \cup \{ I \tildemes [0,x_j] \} \big)
\stackrel{ \eqref{Ga2} }{=} \\
\theta^{-1} \rho^{(2)} \big( (\mathbb{P}si^{(2)}_\theta)^{-1} \left( \{ [0,x_k] \tildemes I\} \cup \{ I \tildemes [0,x_j] \} \right) \big) \stackrel{(*)}{=}
\theta^{-1} \rho^{(2)}\big( \{ [0,\theta x_k] \tildemes I\} \cup \{ I \tildemes [0, \theta x_j] \} \big)\stackrel{\eqref{x_and_c_def}}{=} \\
\theta^{-1} \rho^{(2)}\big( \{ [0, x_{k+1}] \tildemes I\} \cup \{ I \tildemes [0, x_{j+1}] \} \big)\stackrel{ \eqref{rho->F} }{=}
\theta^{-1}F_{\rho^{(2)}}(x_{k+1},x_{j+1}), \qquad k,j \in \mathbb{N},
\end{multline}
where $(*)$ holds since we defined $\mathbb{P}si^{(2)}_\theta : I^2\to I^2$ by
$\mathbb{P}si^{(2)}_\theta(x,x'):=\big(\mathbb{P}si_\theta(x),\mathbb{P}si_\theta(x')\big)$, where $\mathbb{P}si_\theta(x)= x/\theta$ if $x\leq \theta$ and $\mathbb{P}si_\theta(x)=\infty$ if $x>\theta$, cf.\ \eqref{psit}.
\end{Proof}
\begin{equation}gin{lemma}[Relationship between $F_{\rho^{(2)}}$ and the signature]\lambdabel{lemma:F_and_f}
If $\rho^{(2)} \in \mathcal{M}^{(2)}_{\theta}$, $F_{\rho^{(2)}}$ is the function defined in Definition \ranglef{def:rho->F} and $f_{\rho^{(2)}}$ is the signature of $\rho^{(2)}$ (c.f. Definition \ranglef{def:rho->f}), then
\begin{equation}gin{equation}\lambdabel{F_and_f}
f_{\rho^{(2)}}(n) = F_{\rho^{(2)}}(x_n, x_0) = \frac{1}{x_{k \wedge j}} F_{\rho^{(2)}}(x_k, x_j)
\end{equation}
holds for every $j, k \in \mathbb{N}$ for which $n = |k-j|$.
\end{lemma}
\begin{equation}gin{Proof}
The first equality is trivial from the definition of $f_{\rho^{(2)}}$ and $F_{\rho^{(2)}}$. The second equality follows from the fact that $F_{\rho^{(2)}}$ is a scale invariant function (by Lemma \ranglef{lemma:scale_inv}), $x_{k \wedge j } = \theta^{k \wedge j}$ (cf.\ \eqref{x_and_c_def}), \eqref{F_scale_inv_remark} and symmetry of $F_{\rho^{(2)}}$ (cf. Condition 4. of Lemma \ranglef{lemma:F_conditions}).
\end{Proof}
\begin{equation}gin{Proof}[of Lemma \ranglef{lemma:f_conditions}]
First let $\rho^{(2)} \in \mathcal{M}^{(2)}_{\theta}$ and $f_{\rho^{(2)}}$ be its signature.
Let us also define $F_{\rho^{(2)}}$ as in \eqref{rho->F}, which means $f_{\rho^{(2)}}(n) = F_{\rho^{(2)}}(x_n, x_0)$ by Lemma \ranglef{lemma:F_and_f}.
Let us now check that $f_{\rho^{(2)}}$ satisfies the properties (i)-(v) of Lemma \ranglef{lemma:f_conditions}.
\begin{equation}gin{enumerate}
\item $f_{\rho^{(2)}}(0) = F_{\rho^{(2)}}(x_0, x_0) \le 1$ using condition 1. of Lemma \ranglef{lemma:F_conditions}.
\item $ \langlem \langlemits_{n \to \infty} f_{\rho^{(2)}}(n) = \frac{1}{1+\theta}$ by \eqref{F_and_f} and condition 2. of Lemma \ranglef{lemma:F_conditions}.
\item We have
$ f_{\rho^{(2)}}(n) = F_{\rho^{(2)}}(x_n, x_0) \ge F_{\rho^{(2)}}(x_{n+1}, x_0) = f_{\rho^{(2)}}(n+1)$ for any $n \in \mathbb{N}$ by condition 3. of Lemma \ranglef{lemma:F_conditions}.
\item From condition 5. of Lemma \ranglef{lemma:F_conditions} we know
\begin{equation}gin{equation}\lambdabel{F_cond_from_rho_nonneg}
-F_{\rho^{(2)}}(x_k, x_j) + F_{\rho^{(2)}}(x_{k+1}, x_j) + F_{\rho^{(2)}}(x_k, x_{j+1}) - F_{\rho^{(2)}}(x_{k+1}, x_{j+1}) \ge 0.
\end{equation}
Using $F_{\rho^{(2)}}(x_k, x_j) = x_{k \wedge j} \cdot f_{\rho^{(2)}}(|k-j|)$ (c.f.\ \eqref{F_and_f}) and substituting $j:=k$ into \eqref{F_cond_from_rho_nonneg} we obtain
\begin{equation}gin{equation}
- x_k \cdot f_{\rho^{(2)}}(0) + x_k \cdot f_{\rho^{(2)}}(1) + x_k \cdot f_{\rho^{(2)}}(1) - x_{k+1} \cdot f_{\rho^{(2)}}(0) \ge 0 .
\end{equation}
Dividing by $x_k$, after rearranging we get condition (iv).
\item If we use $F_{\rho^{(2)}}(x_k, x_j) = x_{k \wedge j} \cdot f_{\rho^{(2)}}(|k-j|)$ in \eqref{F_cond_from_rho_nonneg} if $k > j$, we get
\begin{equation}gin{equation}\lambdabel{four_terms_signiture}
-x_j \cdot f_{\rho^{(2)}}(k-j) + x_j \cdot f_{\rho^{(2)}}(k+1-j) + x_{j+1} \cdot f_{\rho^{(2)}}(k-j-1)
- x_{j+1} \cdot f_{\rho^{(2)}}(k-j) \ge 0 .
\end{equation}
Let $n := k-j$. If we divide \eqref{four_terms_signiture} by $x_j$, after rearranging we get the required inequality.
\end{enumerate}
In the other direction, assume that $f: \mathbb{N} \to \mathbb{R}$ satisfies conditions (i)-(v) of Lemma \ranglef{lemma:f_conditions}. Our goal is to show that
there exists a unique probability measure $\rho^{(2)} \in \mathcal{M}^{(2)}_{\theta}$ such that $f$ is its signature. As a first step, we define
\begin{equation}gin{equation}\lambdabel{f_F_again}
F(x_k, x_j) := x_{k \wedge j} \cdot f(|k-j|), \qquad j, k \in \mathbb{N}
\end{equation}
and show that the conditions of Lemma \ranglef{lemma:F_conditions} hold for $F$.
Conditions (i), (ii), (iii) of Lemma \ranglef{lemma:f_conditions} on $f$ imply respectively conditions 1., 2.\ and 3.\ of Lemma \ranglef{lemma:F_conditions}.
Condition 4.\ of Lemma \ranglef{lemma:F_conditions} of $F$ is straightforward from \eqref{f_F_again}.
Condition 5.\ of Lemma \ranglef{lemma:F_conditions} follows from condition (iv) of Lemma \ranglef{lemma:f_conditions} (in the $k=j$ case) and condition (v) of Lemma \ranglef{lemma:f_conditions} (in the $k > j$ case, and, by symmetry, in the $j > k$ case).
We can thus apply Lemma \ranglef{lemma:F_conditions} to infer that there exists a probability measure $\rho^{(2)} \in \mathcal{P}^{(2)}_\theta$ such that $F \equiv F_{\rho^{(2)}}$ holds.
In fact $\rho^{(2)} \in \mathcal{M}^{(2)}_{\theta}$ by Lemma \ranglef{lemma:scale_inv} and the scale invariance of $F$ (c.f.\ \eqref{F_scale_inv}), which is straightforward from \eqref{f_F_again}.
Finally $f \equiv f_{\rho^{(2)}}$ follows from Definition \ranglef{def:rho->f}, Lemma \ranglef{lemma:F_and_f} and \eqref{f_F_again}.
Uniqueness is clear since the signature of a bivariate measure in
$\mathcal{M}^{(2)}_{\theta}$ uniquely determines its bivariate signature by Lemma \ranglef{lemma:F_and_f}, which in turn uniquely determines the bivariate measure by Lemma \ranglef{lemma:F_characterizes}.
\end{Proof}
\subsetsection{\texorpdfstring{Basic properties of $f_{\theta, c}(n)$}{Basic properties of f theta c(n)}}\lambdabel{sec:f_unique}
The main goal of Section \ranglef{sec:f_unique} is to prove Lemmas \ranglef{lemma:imp_eq_for_f_theta_c} and \ranglef{lemma:f_continuous}, but we will also collect some other useful properties of $f_{\theta, c}(n)$ in Corollary \ranglef{corr_f_inc_dec}. We will first define an auxiliary function $g_{\theta, c}(n), n \in \mathbb{N}$ and later we will identify $f_{\theta, c}(n)$ as $f_{\theta, c}(n)=\theta^n g_{\theta, c}(n)$. In order to construct $g_{\theta, c}(n)$, we need the following definition.
\begin{equation}gin{definition}[Recursion map $\mathbb{P}si_{\theta,c}$]\lambdabel{def_psi}
Given some $\theta \in (0,1)$ and $c \geq 0$, let us define the function $\mathbb{P}si_{\theta,c}: \mathcal{D}_{\theta,c} \to \mathbb{R}$ by
\begin{equation}gin{equation}\lambdabel{def_eq__psi_theta_c}
\mathbb{P}si_{\theta,c}(x)=\frac{1+\sqrt{(2x-1)^2-4c \cdot (1-\theta^2)}}{2\theta}, \quad \mathcal{D}_{\theta, c}=(\sqrt{(1-\theta^2)c}+1/2, +\infty).
\end{equation}
\end{definition}
Note that $x \in \mathcal{D}_{\theta,c} $ if and only if $2x-1\geq 0$ and $(2x-1)^2-4c \cdot (1-\theta^2) > 0$.
\begin{equation}gin{lemma}[Recursive definition of $g_{\theta, c}(n)$]\lambdabel{lemma_rec_def_of_g}
For any $\theta \in (0,1)$ and $c \geq 0$, the recursion
\begin{equation}gin{equation}\lambdabel{g_recursion}
g_{\theta, c}(0)=\frac{1+\sqrt{1+8c(1+\theta)^2}}{2(1+\theta)}, \quad
g_{\theta, c}(n)= \mathbb{P}si_{\theta,c}\big(g_{\theta, c}(n-1)\big), \quad n \geq 1
\end{equation}
has a solution (i.e., $g_{\theta, c}(n) \in \mathcal{D}_{\theta,c}$ holds for all $n \geq 0$). Moreover, the solution satisfies
\begin{equation}gin{equation}
\lambdabel{g_increasing}
g_{\theta, c}(n) \geq g_{\theta, c}(n-1), \qquad n \geq 1.
\end{equation}
\end{lemma}
\begin{equation}gin{proof} We first check that $g_{\theta, c}(0) \in \mathcal{D}_{\theta,c}$ holds. In order to do so, let us denote $\gammamma=4c(1+\theta)^2$.
After some rearrangements, we only need to check that $\sqrt{1+2\gammamma}> \sqrt{(1-\theta^2)\gammamma}+\theta$ holds. Taking the square of both sides and rearranging,
we want to show $(1-\theta^2)+(1+\theta^2)\gammamma>2\theta\sqrt{(1-\theta^2)\gammamma}$. Taking the square of both sides again and rearranging, we need
\begin{equation}gin{equation}
(1-\theta^2)^2 + 2(1-\theta^2)^2\gammamma +(1+\theta^2)^2 \gammamma^2 >0,
\end{equation}
and this inequality indeed holds, since all of the terms are non-negative for any choice of $\theta \in (0,1)$ and $c \geq 0$. We have thus checked $g_{\theta, c}(0) \in \mathcal{D}_{\theta,c}$.
Next we observe that $x \mapsto \mathbb{P}si_{\theta,c}(x)$ is an increasing and concave function of $x \in \mathcal{D}_{\theta,c}$, moreover $\frac{\mathrm{d}}{\mathrm{d}x}\mathbb{P}si_{\theta,c}(x) > 1/\theta >1$ holds for all $x \in \mathcal{D}_{\theta,c}$. This implies that the equation
$\mathbb{P}si_{\theta,c}(x)=x$ has at most one solution in $\mathcal{D}_{\theta,c}$.
Let $y_0:=\sqrt{(1-\theta^2)c}+1/2$ denote the left endpoint of $\mathcal{D}_{\theta, c}$. One easily checks that $ \mathbb{P}si_{\theta,c}(y_0)\geq y_0$ holds if and only if $c \leq \frac{1-\theta}{4 \theta^2 (1+\theta)}$ holds. We will prove Lemma \ranglef{lemma_rec_def_of_g} by treating the cases $ \mathbb{P}si_{\theta,c}(y_0) \geq y_0$ and $ \mathbb{P}si_{\theta,c}(y_0)< y_0$ separately.
If $ \mathbb{P}si_{\theta,c}(y_0) \geq y_0$ then $ \mathbb{P}si_{\theta,c}(x)>x$ for every $x \in \mathcal{D}_{\theta,c}$ follows from the above listed properties of $\mathbb{P}si_{\theta,c}$.
Now it follows from \eqref{g_recursion} by induction on $n$ that $g_{\theta, c}(n) \in \mathcal{D}_{\theta,c}$ and \eqref{g_increasing} hold for all $n \geq 0$.
If $ \mathbb{P}si_{\theta,c}(y_0)< y_0$ then the above listed properties of $\mathbb{P}si_{\theta,c}$ imply that there exists a unique $x^*_0 \in \mathcal{D}_{\theta,c}$ for which $\mathbb{P}si_{\theta,c}(x^*_0)=x^*_0$, moreover $x\geq x^*_0$ implies
$\mathbb{P}si_{\theta,c}(x)\geq x$. One checks that $x^*_0:=\frac{1+\sqrt{1+4c(1+\theta)^2}}{2(1+\theta)}$, thus $g_{\theta, c}(0) \geq x^*_0$ holds.
It is enough to prove that $g_{\theta, c}(n) \geq x^*_0$ for all $n \geq 0$ to conclude that $g_{\theta, c}(n) \in \mathcal{D}_{\theta,c}$ for all $n \geq 0$.
Now both $g_{\theta, c}(n) \geq x^*_0$ and \eqref{g_increasing} follow by induction on $n$ using the recursive definition \eqref{g_recursion} of $g_{\theta, c}(n)$.
\end{proof}
Now we are ready to prove the existence and uniqueness of $f_{\theta,c}(n), n \in \mathbb{N}$.
\begin{equation}gin{proof}[Proof of Lemma \ranglef{lemma:imp_eq_for_f_theta_c}] We will show by induction on $n$ that
\begin{equation}gin{equation}\lambdabel{def_eq_g}
f_{\theta, c}(n) = g_{\theta, c}(n) \theta^n , \qquad n \in \mathbb{N}
\end{equation}
is the unique solution of the system of equations \eqref{quadratic_eq_for_f_0}-\eqref{f_theta_c_n_geq_ugly}.
The induction hypothesis holds for $n=0$, since \eqref{quadratic_eq_for_f_0} is a quadratic equation for $f_{\theta,c}(0)$, which has two solutions,
one of them is equal to $g_{\theta, c}(0) \theta^0$, while the other solution is less then or equal to zero, therefore only
$g_{\theta, c}(0) \theta^0$ satisfies \eqref{f_theta_c_n_geq_ugly} for $n=0$.
Now assume that $n \geq 1$ and \eqref{def_eq_g} holds for $n-1$, i.e.,
we have $f_{\theta, c}(n-1) = g_{\theta, c}(n-1) \theta^{n-1}$. We can view \eqref{quadratic_eq_for_f_n_and_n_minus_1} as a quadratic equation for $f_{\theta,c}(n)$ which has two solutions:
\begin{equation}gin{equation}\lambdabel{two_solutions-for_f_n}
\widetilde{x}_{1,2}= \frac{\theta^{n-1} \mathbb{P}m \sqrt{(2f_{\theta, c}(n-1) - \theta^{n-1})^2 - 4c \cdot \theta^{2n-2} \cdot (1-\theta^2)}}{2}.
\end{equation}
Now $\widetilde{x}_1=\theta^n \mathbb{P}si_{\theta,c}\left(g_{\theta, c}(n-1)\right)=g_{\theta, c}(n) \theta^n $ follows from $f_{\theta, c}(n-1) = g_{\theta, c}(n-1) \theta^{n-1}$, \eqref{def_eq__psi_theta_c} and \eqref{g_recursion}, moreover $\widetilde{x}_1 > \theta^{n-1}/2$, while $\widetilde{x}_2 < \theta^{n-1}/2$, thus only $\widetilde{x}_1$ satisfies
\eqref{f_theta_c_n_geq_ugly} and therefore \eqref{def_eq_g} holds.
\end{proof}
\begin{equation}gin{corollary}[Recursion for $f_{\theta,c}$]\lambdabel{corr_f_inc_dec} For any $\theta \in (0,1]$ and $c \geq 0$ we have
\begin{equation}gin{align}
&f_{\theta, c}(0) = \frac{1+\sqrt{1+8c(1+\theta)^2}}{2(1+\theta)}, \lambdabel{f0_with_c}\\
&f_{\theta, c}(n) = \frac{\theta^{n-1} + \sqrt{(2f_{\theta, c}(n-1) - \theta^{n-1})^2 - 4c \cdot \theta^{2n-2} \cdot (1-\theta^2)}}{2}, \quad n \ge 1. \lambdabel{fn_with_c}
\end{align}
Moreover, the function $f_{\theta, c}(n)$ decreases in $n$:
\begin{equation}gin{equation}
\lambdabel{f_decreasing}
f_{\theta, c}(n) \le f_{\theta, c}(n-1), \quad n \geq 1.
\end{equation}
\end{corollary}
\begin{equation}gin{Proof}
The identities \eqref{f0_with_c} and \eqref{fn_with_c} follow from \eqref{g_recursion} and \eqref{def_eq_g}.
In order to prove \eqref{f_decreasing},
we need to show that $g_{\theta, c}(n)\leq g_{\theta, c}(n-1)/\theta $ holds for any $n \geq 1$: this inequality follows from the fact that
$\mathbb{P}si_{\theta,c}(x)\leq \mathbb{P}si_{\theta,0}(x) = x/\theta$ holds for any $x \in \mathcal{D}_{\theta,c}$.
\end{Proof}
\begin{equation}gin{Proof}[of Lemma \ranglef{lemma:f_continuous}] The limit $ f_{\theta, c}(\infty)= \langlem_{n \to \infty} f_{\theta, c}(n)$ exists since $f_{\theta, c}(n) $ decreases as $n$ increases (c.f.\ \eqref{f_decreasing}) and $f_{\theta, c}(n) \geq 0$. It follows from \eqref{f0_with_c} and \eqref{fn_with_c} by induction on $n$ that for each $n$ the function
$c \mapsto f_{\theta, c}(n)$ is continuous. Thus, in order to prove that $c \mapsto f_{\theta, c}(\infty)$ is continuous, we only need to check that the functions $c \mapsto f_{\theta, c}(n)$ converge uniformly as $n \to \infty$ on $[0,c_0]$ for any $0 \leq c_0 <+\infty$.
In order to achieve this, we will show
\begin{equation}gin{equation}\lambdabel{cauchy_ingredient}
f_{\theta, c}(n-1)-f_{\theta, c}(n) \leq \frac{1}{2} \sqrt{4c \theta^{2n-2}(1-\theta^2)}, \qquad n \geq 1, c \geq 0.
\end{equation}
By \eqref{def_eq_g} we only need to prove $g_{\theta,c}(n-1)-\theta g_{\theta,c}(n) \leq \frac{1}{2} \sqrt{4c (1-\theta^2)} $. By
\eqref{def_eq__psi_theta_c} and \eqref{g_recursion} it is enough to show that for all $ x \in \mathcal{D}_{\theta, c}$
we have $(2x-1)-\sqrt{(2x-1)^2 -4c(1-\theta^2) } \leq \sqrt{4c(1-\theta^2)}$, but this inequality easily follows using the properties
of $\mathcal{D}_{\theta, c}$ listed below \eqref{def_eq__psi_theta_c}.
It follows from \eqref{cauchy_ingredient} that for any $c_0>0$ the functions $c \mapsto f_{\theta, c}(n)$ form a Cauchy
sequence with respect to the sup-norm on $[0,c_0]$. From this the desired uniform convergence readily follows.
\end{Proof}
\subsetsection{Signature of a scale invariant solution of the bivariate RDE}\lambdabel{sec:equivalent_system}
\mathbb{P}aragraph{}Our next goal is to prove Lemma \ranglef{lemma:equivalent_system}. First we show a formula in Lemma \ranglef{lemma:F_recursion}, which characterizes the distribution of the right-hand side of the bivariate RDE \eqref{bivar_RDE} in terms of the bivariate signature $F_{\rho^{(2)}}$ (c.f. Definition \ranglef{def:rho->F}). Using this we get an equation for the univariate signature $f_{\rho^{(2)}}$ of a scale invariant measure $\rho^{(2)}$ in Lemma \ranglef{lemma:f_recursion}, which holds if and only if $\rho^{(2)}$ is a solution of the bivariate RDE.
In Lemma \ranglef{lemma_properties_of_f_theta_c} we show that $f_{\theta,c}$ (defined in Lemma \ranglef{lemma:imp_eq_for_f_theta_c}) satisfies a very similar equation.
Finally we conclude the proof of Lemma \ranglef{lemma:equivalent_system}.
Recall the notion of $\mathcal{P}^{(2)}_\theta$ from Definition \ranglef{biv_with_rho_marginals}.
\begin{equation}gin{definition}[Definition of $\tildelde{\rho}^{(2)}$]\lambdabel{def:rho_tilde}
Let $\theta \in (0,1)$, $\rho^{(2)} \in \mathcal{P}^{(2)}_\theta$. Denote by $\tildelde{\rho}^{(2)}$ the law of
\begin{equation}gin{equation}
(\checki[\tau, \kappa](Y_1,Y_2), \checki[\tau, \kappa](Y_1^*,Y_2^*)),
\end{equation}
where the function $\checki$ is defined in \eqref{chi_def} and the other notation are defined in \eqref{bivar_RDE}, so $(Y_1, Y_1^*) \ensuremath{\sigma}m \rho^{(2)}$, $(Y_2, Y_2^*) \ensuremath{\sigma}m \rho^{(2)}$, $\tau \ensuremath{\sigma}m \text{UNI}[0,1]$, $\kappa$ is a random variable such that $\mathbb{P}(\kappa = 1) = \mathbb{P}(\kappa = 2) = \frac 12$ and $(Y_1, Y_1^*), (Y_2, Y_2^*), \tau$ and $\kappa$ are mutually independent.
\end{definition}
\begin{equation}gin{lemma}[Expressing $F_{\tildelde{\rho}^{(2)}}$ in terms of $F_{\rho^{(2)}}$]\lambdabel{lemma:F_recursion}
If $\theta \in (0,1), \rho^{(2)} \in \mathcal{P}^{(2)}_\theta$, then for every $j, k \in \mathbb{N}$ we have
\begin{equation}gin{multline} \lambdabel{F_recursion}
F_{\tildelde{\rho}^{(2)}}(x_k, x_j)
= F_{\rho^{(2)}}(x_k, x_j) - \frac 12 F_{\rho^{(2)}}(x_k, x_j)^2 + \frac{x_k^2}{2(1+\theta)^2} + \frac{x_j^2}{2(1+\theta)^2} +\\
+ \frac{ 1-\theta}{2\theta} \sum \langlemits_{t= k \vee j + 1}^\infty \bigg[ \theta^t {\cal B}g( F_{\rho^{(2)}}(x_k, x_j) - F_{\rho^{(2)}}(x_t, x_j)
- F_{\rho^{(2)}}(x_k, x_t) + F_{\rho^{(2)}}(x_t, x_t) {\cal B}g) \bigg],
\end{multline}
where $F_{\rho^{(2)}}$, $F_{\tildelde{\rho}^{(2)}}$ are the bivariate signatures of $\rho^{(2)}$ and $\tildelde{\rho}^{(2)}$ respectively (c.f.\ Definition \ranglef{def:rho->F}).
\end{lemma}
\begin{equation}gin{Proof}
Let us use the notation
\begin{equation}gin{equation}
(\tildelde{Y}, \tildelde{Y}^*) := (\checki[\tau, \kappa](Y_1,Y_2), \checki[\tau, \kappa](Y_1^*,Y_2^*)), \text{ so that } (\tildelde{Y}, \tildelde{Y}^*) \ensuremath{\sigma}m \tildelde{\rho}^{(2)}.
\end{equation}
Let us also use the shorthand $F=F_{\rho^{(2)}}$, $\widetilde{F}=F_{\tildelde{\rho}^{(2)}}$ in this proof.
Thus we have
\begin{equation}gin{equation}\lambdabel{kappa_1_or_2}
\widetilde{F}(x_k,x_j)= \mathbb{P}(\kappa = 1, \tildelde{Y} \leq x_k \text{ or } \, \tildelde{Y}^* \leq x_j) + \mathbb{P}(\kappa = 2, \tildelde{Y} \leq x_k \text{ or } \, \tildelde{Y}^* \leq x_j).
\end{equation}
Here
\begin{equation}gin{multline}\lambdabel{incl_excl}
\mathbb{P}(\kappa = 1, \tildelde{Y} \leq x_k \text{ or } \, \tildelde{Y}^* \leq x_j) = \\
\frac 12 \big[ \mathbb{P}(\tildelde{Y} \le x_k \, |\, \kappa = 1) + \mathbb{P}(\tildelde{Y}^* \le x_j\, |\, \kappa = 1) - \mathbb{P}(\tildelde{Y} \le x_k, \tildelde{Y}^* \le x_j \,|\, \kappa = 1)\big].
\end{multline}
By the definition of $\checki$ in \eqref{chi_def} we will calculate all the three terms on the r.h.s.\ of \eqref{incl_excl}.
\begin{equation}gin{align}
&\mathbb{P}(\tildelde{Y} \le x_k \, |\, \kappa = 1) = \mathbb{P}(Y_1 \le x_k, Y_1 > \tau) = \sum \langlemits_{l=k}^\infty \mathbb{P}(Y_1 = x_l, x_l > \tau) \nonumber\\
&= \sum \langlemits_{l=k}^\infty x_l \cdot c_l = \sum \langlemits_{l=k}^\infty \frac{1-\theta}{1+\theta} \cdot \theta^{2l} = x_k^2 \cdot \frac{1-\theta}{1+\theta} \cdot \sum \langlemits_{l=0}^\infty \theta^{2l} = \frac{x_k^2}{(1+\theta)^2}.
\end{align}
Similarly, we have
\begin{equation}gin{align}
&\mathbb{P}(\tildelde{Y}^* \le x_j \, | \, \kappa = 1) = \frac{x_j^2}{(1+\theta)^2}.
\end{align}
\begin{equation}gin{multline}
\mathbb{P}(\tildelde{Y} \le x_k, \tildelde{Y}^* \le x_j \, |\, \kappa = 1) = \mathbb{P}(Y_1 \le x_k, Y_1^* \le x_j, \tau < Y_1 \wedge Y_1^*) = \\
\sum \langlemits_{t = k \vee j + 1}^\infty \mathbb{P}(x_t < Y_1 \le x_k, x_t < Y_1^* \le x_j, \tau \in [x_t, x_{t-1})) = \\
\sum \langlemits_{t = k \vee j + 1}^\infty ((F(x_t, x_j) + F(x_k, x_t) - F(x_k, x_j) - F(x_t, x_t) ) (x_{t-1} - x_t) = \\
\frac{\theta-1}{\theta} \sum \langlemits_{t= k \vee j + 1}^\infty \bigg[ \theta^t {\cal B}g( F(x_k, x_j) - F(x_t, x_j)
- F(x_k, x_t) + F(x_t, x_t) {\cal B}g) \bigg].
\end{multline}
Now we calculate the other term of \eqref{kappa_1_or_2}:
\begin{equation}gin{align}
\mathbb{P}(\kappa &= 2, \tildelde{Y} \leq x_k \text{ or } \, \tildelde{Y}^* \leq x_j) = \frac 12 \cdot \big( 1 - \mathbb{P}(Y_1 \wedge Y_2 > x_k, Y_1^* \wedge Y_2^* > x_j)\big) \nonumber \\
& \stackrel{(*)}{=} \frac 12 \cdot \big( 1 - (1-F(x_k, x_j))^2 \big)= F(x_k, x_j) - \frac 12 \cdot F(x_k, x_j)^2, \lambdabel{kappa_2_case}
\end{align}
where $(*)$ holds by the independence of $(Y_1, Y^*_1 )$ and $(Y_2, Y^*_2 )$.
Now \eqref{F_recursion} follows if we substitute \eqref{incl_excl}-\eqref{kappa_2_case} into \eqref{kappa_1_or_2}.
\end{Proof}
Recall the notation of ${\cal M}^{(2)}_\theta$ from below (\ranglef{rhosca}).
\begin{equation}gin{lemma}[Equation for the signature]\lambdabel{lemma:f_recursion}
$\rho^{(2)} \in \mathcal{M}^{(2)}_{\theta}$ is a solution of the bivariate RDE \eqref{bivar_RDE} if and only if its signature $f_{\rho^{(2)}}$ satisfies
\begin{equation}gin{multline}
f_{\rho^{(2)}}(n)^2 =
\frac{1}{(1+\theta)^2} +
\theta^n \cdot f_{\rho^{(2)}}(n) - (1-\theta) \cdot \sum \langlemits_{t=n}^\infty \theta^{t} f_{\rho^{(2)}}(t+1) +
\\
+\left( \frac{1}{(1+\theta)^2} + \frac{\theta \cdot f_{\rho^{(2)}}(0)}{1+\theta} - (1-\theta) \cdot \sum \langlemits_{t=0}^\infty \theta^t f_{\rho^{(2)}}(t+1) \right) \cdot \theta^{2n}. \lambdabel{f_recursion}
\end{multline}
\end{lemma}
\begin{equation}gin{Proof} By
Lemma \ranglef{lemma:F_characterizes} the measure $\rho^{(2)}$ is a solution of the bivariate RDE \eqref{bivar_RDE} if and only if $F_{\tildelde{\rho}^{(2)}}=F_{\rho^{(2)}}$.
We will prove that if $\rho^{(2)} \in \mathcal{M}^{(2)}_{\theta}$ then \eqref{f_recursion} holds if and only if $F_{\tildelde{\rho}^{(2)}}=F_{\rho^{(2)}}$.
We have $F_{\tildelde{\rho}^{(2)}}=F_{\rho^{(2)}}$ in \eqref{F_recursion} if and only if
\begin{equation}gin{multline}\lambdabel{F_fix}
F_{\rho^{(2)}}(x_k, x_j)^2 = \frac{x_k^2}{(1+\theta)^2} + \frac{x_j^2}{(1+\theta)^2} + \\ \frac{ 1-\theta}{\theta} \sum \langlemits_{t= k \vee j + 1}^\infty \bigg[ \theta^t {\cal B}g( F_{\rho^{(2)}}(x_k, x_j)
- F_{\rho^{(2)}}(x_t, x_j) - F_{\rho^{(2)}}(x_k, x_t) + F_{\rho^{(2)}}(x_t, x_t) {\cal B}g) \bigg]
\end{multline}
holds for every $j, k \in \mathbb{N}$.
By symmetry of $F_{\rho^{(2)}}$ we can assume that $0 \le j \le k$ (so $x_k \le x_j$) and let $n := k-j$. We have $F_{\rho^{(2)}}(x_l, x_i) = x_{l \wedge i} f(|l-i|), l, i \in \mathbb{N}$ (c.f.\ \eqref{F_and_f}). Hence
\begin{equation}gin{align}
F_{\rho^{(2)}}(x_k, x_j) &= x_j \cdot f_{\rho^{(2)}}(n),
\\
\sum \langlemits_{t= k \vee j + 1}^\infty \theta^t F_{\rho^{(2)}}(x_k, x_j) &= \sum \langlemits_{t= k+1}^\infty \theta^t \cdot x_j \cdot f_{\rho^{(2)}}(n) =
\frac{x_{k+j+1} \cdot f_{\rho^{(2)}}(n)}{1-\theta},\\
\notag \sum \langlemits_{t= k \vee j + 1}^\infty \theta^t F_{\rho^{(2)}}(x_t, x_j) &= \sum \langlemits_{t= k+1}^\infty \theta^t \cdot x_j \cdot f_{\rho^{(2)}}(t-j) =\\
&= x_{k+j+1} \cdot \sum \langlemits_{t=0}^\infty \theta^t f_{\rho^{(2)}}(t+n+1)
= x_{k+j+1} \cdot \sum \langlemits_{t=n}^\infty \theta^{t-n} f_{\rho^{(2)}}(t+1),\\
\sum \langlemits_{t= k \vee j + 1}^\infty \theta^t F_{\rho^{(2)}}(x_k, x_t) &= \sum \langlemits_{t= k+1}^\infty \theta^t x_k f_{\rho^{(2)}}(t-k) = x_{2k+1} \sum \langlemits_{t=0}^\infty \theta^t f_{\rho^{(2)}}(t+1),\\
\sum \langlemits_{t= k \vee j + 1}^\infty \theta^t F_{\rho^{(2)}}(x_t, x_t) &=
\sum \langlemits_{t= k+1}^\infty \theta^t \cdot x_t \cdot f_{\rho^{(2)}}(0) = \frac{x_{2(k+1)} \cdot f_{\rho^{(2)}}(0)}{1-\theta^2}.
\end{align}
If we substitute all of the above into \eqref{F_fix} and divide by $x_j^2 = \theta^{2j}$, we get:
\begin{equation}gin{multline}
f_{\rho^{(2)}}(n)^2 = \frac{\theta^{2n}}{(1+\theta)^2} + \frac{1}{(1+\theta)^2} + \theta^n f_{\rho^{(2)}}(n) - (1-\theta)
\theta^n
\cdot \sum \langlemits_{t=n}^\infty \theta^{t-n} f_{\rho^{(2)}}(t+1)
- \\
(1-\theta) \cdot \theta^{2n} \cdot \sum \langlemits_{t=0}^\infty \theta^t f_{\rho^{(2)}}(t+1) + \frac{\theta^{2n+1} f_{\rho^{(2)}}(0)}{1+\theta}.
\end{multline}
We get \eqref{f_recursion} after rearranging above formula, so $F_{\tildelde{\rho}^{(2)}}=F_{\rho^{(2)}}$ in \eqref{F_recursion} if and only if $f_{\rho^{(2)}}$ satisfies \eqref{f_recursion}, therefore we proved the lemma.
\end{Proof}
Next we consider the sequence $f_{\theta,c}(n), n\in \mathbb{N}$ (c.f.\ Lemma \ranglef{lemma:imp_eq_for_f_theta_c}) and derive some formulas analogous to \eqref{f_recursion}.
\begin{equation}gin{lemma}[Properties of $f_{\theta,c}$]\lambdabel{lemma_properties_of_f_theta_c}
Given some $\theta \in (0,1)$ and $c \geq 0$, let us assume that $\langlem \langlemits_{n \to \infty} f_{\theta,c}(n) = \frac{1}{1+\theta}$ holds. Under these conditions we have
\begin{equation}gin{equation}\lambdabel{f_theta_c_sum_eq}
f_{\theta,c}(n)^2 = \frac{1}{(1+\theta)^2} + \theta^n \cdot f_{\theta,c}(n) - (1-\theta) \cdot \sum \langlemits_{t=n}^\infty \theta^{t} f_{\theta,c}(t+1) + c \cdot \theta^{2n}, \quad n \in \mathbb{N},
\end{equation}
\begin{equation}gin{equation}\lambdabel{c_f_theta_c_formula}
c = \frac{1}{(1+\theta)^2} + \frac{\theta \cdot f_{\theta,c}(0)}{1+\theta} - (1-\theta) \cdot \sum \langlemits_{t=0}^\infty \theta^t f_{\theta,c}(t+1),
\end{equation}
\begin{equation}gin{equation}\lambdabel{f_theta_c_0_in_interval}
\frac{1}{1+\theta} \wedge \frac{2 \theta}{1+\theta} \leq f_{\theta,c}(0) \leq \frac{1}{1+\theta} \vee \frac{2 \theta}{1+\theta},
\end{equation}
\begin{equation}gin{equation}\lambdabel{c_leq_bbbound}
c \leq 0 \vee \frac{\theta \cdot (2\theta - 1)}{(1+\theta)^2}.
\end{equation}
\end{lemma}
\begin{equation}gin{Proof}
To prove \eqref{f_theta_c_sum_eq} let us denote by $\begin{equation}ta_n$ the difference of the r.h.s.\ and the l.h.s.\ of \eqref{f_theta_c_sum_eq}. Our goal is to show $\begin{equation}ta_n \equiv 0$.
For every $n \ge 1$ we have
\begin{equation}gin{align}
\begin{equation}ta_n - \begin{equation}ta_{n-1} =& f_{\theta,c}(n-1)^2 - f_{\theta,c}(n)^2 - \theta^{n-1} f_{\theta,c}(n-1) + \theta^n f_{\theta,c}(n) +\nonumber\\
&+ (1-\theta) \theta^{n-1} f_{\theta,c}(n) - c \cdot \theta^{2n-2}(1 - \theta^2).\lambdabel{equivalent_system_other_direction}
\end{align}
The right-hand side of \eqref{equivalent_system_other_direction} is $0$ by \eqref{quadratic_eq_for_f_n_and_n_minus_1}.
Therefore the sequence $\begin{equation}ta_n$ is constant, but we also have $\langlem \langlemits_{n \to \infty} \begin{equation}ta_n=0$ by the definition of $\begin{equation}ta_n$ and our assumption $\langlem \langlemits_{n \to \infty} f_{\theta,c}(n) = \frac{1}{1+\theta}$. So $\begin{equation}ta_n \equiv 0$, thus we get \eqref{f_theta_c_sum_eq}.
Next we show \eqref{c_f_theta_c_formula}.
If we take \eqref{f_theta_c_sum_eq} at $n=0$, we obtain
\begin{equation}gin{equation}\lambdabel{c_equation_1}
f_{\theta,c}(0)^2 = \frac{1}{(1+\theta)^2} + f_{\theta,c}(0) - (1-\theta) \cdot \sum \langlemits_{t=0}^\infty \theta^{t} f_{\theta,c}(t+1) + c.
\end{equation}
If we take the difference of \eqref{quadratic_eq_for_f_0} and \eqref{c_equation_1} and rearrange, we get \eqref{c_f_theta_c_formula}.
Next we prove \eqref{f_theta_c_0_in_interval}. From our assumption $\langlem \langlemits_{n \to \infty} f_{\theta,c}(n) = \frac{1}{1+\theta}$ and \eqref{f_decreasing}
we obtain that $f_{\theta,c}(n) \ge \frac{1}{1+\theta}$ for every $n \in \mathbb{N}$, hence
\begin{equation}gin{equation}
c \stackrel{\eqref{c_f_theta_c_formula} }{\le} \frac{1}{(1+\theta)^2} + \frac{\theta f_{\theta,c}(0)}{1+\theta} - (1-\theta) \sum \langlemits_{t=0}^\infty \frac{\theta^t}{1+\theta} = \frac{\theta ((1+\theta) f_{\theta,c}(0) - 1)}{(1+\theta)^2}. \lambdabel{c_inequality}
\end{equation}
Putting together \eqref{quadratic_eq_for_f_0} and \eqref{c_inequality}, we obtain the inequality
\begin{equation}gin{equation}
f_{\theta,c}(0)^2 - \frac{1}{1+\theta}f_{\theta,c}(0) \leq 2 \frac{\theta ((1+\theta) f_{\theta,c}(0) - 1)}{(1+\theta)^2} ,
\end{equation} which implies
\eqref{f_theta_c_0_in_interval}, since the roots of the polynomial $x^2 - \frac{x}{1+\theta}- 2 \frac{\theta ((1+\theta) x - 1)}{(1+\theta)^2} $ are $\frac{1}{1+\theta}$ and $\frac{2 \theta}{1+\theta}$.
Finally, \eqref{c_leq_bbbound} follows by plugging the upper bound of \eqref{f_theta_c_0_in_interval} into \eqref{c_inequality}.
\end{Proof}
\begin{equation}gin{Proof}[of Lemma \ranglef{lemma:equivalent_system}]
First assume that $\rho^{(2)} \in \mathcal{M}^{(2)}_{\theta}$ is a solution of the bivariate RDE \eqref{bivar_RDE} and let us define
\begin{equation}gin{equation}\lambdabel{c_def}
c := \frac{1}{(1+\theta)^2} + \frac{\theta \cdot f_{\rho^{(2)}}(0)}{1+\theta} - (1-\theta) \cdot \sum \langlemits_{t=0}^\infty \theta^t f_{\rho^{(2)}}(t+1).
\end{equation}
We will prove that $f_{\rho^{(2)}}(n) = f_{\theta, c}(n)$ holds for this $c$ for every $n \in \mathbb{N}$. By Lemma \ranglef{lemma:imp_eq_for_f_theta_c}, it is enough
to show that $f_{\rho^{(2)}}(n)$ satisfies \eqref{quadratic_eq_for_f_0}-\eqref{f_theta_c_n_geq_ugly}.
By Lemma \ranglef{lemma:f_recursion} the equation \eqref{f_recursion} holds. If we plug the definition \eqref{c_def} of $c$ into equation \eqref{f_recursion}, we get
\begin{equation}gin{equation}\lambdabel{f_recursion_with_c}
f_{\rho^{(2)}}(n)^2 = \frac{1}{(1+\theta)^2} + \theta^n \cdot f_{\rho^{(2)}}(n) - (1-\theta) \cdot \sum \langlemits_{t=n}^\infty \theta^{t} f_{\rho^{(2)}}(t+1) + c \cdot \theta^{2n}.
\end{equation}
If we take \eqref{f_recursion_with_c} at $n = 0$, subtract $\frac{1}{1+\theta} \cdot f_{\rho^{(2)}}(0)$ from both sides and again use the definition \eqref{c_def} of $c$, we get
$f_{\rho^{(2)}}(0)^2 - \frac{1}{1+\theta}f_{\rho^{(2)}}(0) = 2c$, i.e., that \eqref{quadratic_eq_for_f_0} holds. Now let $n \geq 1$.
If we take the difference of \eqref{f_recursion_with_c} at $n-1$ and at $n$, we obtain
\begin{equation}gin{align}
&f_{\rho^{(2)}}(n-1)^2 - f_{\rho^{(2)}}(n)^2 =\\
&= \theta^{n-1} f_{\rho^{(2)}}(n-1) - \theta^n f_{\rho^{(2)}}(n) - (1-\theta) \theta^{n-1} f_{\rho^{(2)}}(n) + c \cdot \theta^{2n-2}(1 - \theta^2),\nonumber
\end{align}
therefore \eqref{quadratic_eq_for_f_n_and_n_minus_1} holds. Both inequalities required by \eqref{f_theta_c_n_geq_ugly} can be proved using $f_{\rho^{(2)}}(n) \geq \frac{1}{1+\theta}$
(which holds by Lemma \ranglef{lemma:f_conditions}), also using $\frac{1}{1+\theta} >\frac{1}{2}\geq \frac{\theta^{n-1}}{2}$ in the proof of the $n \geq 1$ case of \eqref{f_theta_c_n_geq_ugly}.
We also need that $c \ge 0$: this follows from $f_{\rho^{(2)}}(0)^2 - \frac{1}{1+\theta}f_{\rho^{(2)}}(0) = 2c$ and $f_{\rho^{(2)}}(n) \geq \frac{1}{1+\theta}$.
In the other direction, we assume that for some $\rho^{(2)} \in \mathcal{M}^{(2)}_{\theta}$ we have $f_{\rho^{(2)}}(n) = f_{\theta, c}(n)$ for every $n \in \mathbb{N}$ for some $c \ge 0$, and we have to show that $\rho^{(2)}$ is a solution of the bivariate RDE. By Lemma \ranglef{lemma:f_recursion} it is enough to show that \eqref{f_recursion} holds for every $n \in \mathbb{N}$. In order to do so, we use Lemma \ranglef{lemma_properties_of_f_theta_c} (the conditions of which do hold, since $\langlem \langlemits_{n \to \infty} f_{\rho^{(2)}}(n) = \frac{1}{1+\theta}$ by Lemma \ranglef{lemma:f_conditions}): the identity \eqref{f_recursion} follows by putting \eqref{f_theta_c_sum_eq} and \eqref{c_f_theta_c_formula} together. This completes the proof of statement (i) of Lemma \ranglef{lemma:equivalent_system}. Also, \eqref{c_upper_bound} follows from \eqref{c_leq_bbbound}, i.e., statement (ii) of Lemma \ranglef{lemma:equivalent_system} also holds.
The proof of Lemma \ranglef{lemma:equivalent_system} is complete.
\end{Proof}
\subsetsection{\texorpdfstring{Definition of $\theta^*$}{Definition of theta*}}\lambdabel{sec:lim_f_c=0}
\mathbb{P}aragraph{}In this section our goal is to prove Lemmas \ranglef{L:tetdef} and \ranglef{lemma:lim_f_c=0}. We will give an explicit formula for $\tildelde{f}_\theta(\infty) = \langlem \langlemits_{n \to \infty} \left( \frac{\mathbb{P}artial}{\mathbb{P}artial c} f_{\theta, c}(n) \right) \big|_{c=0_+}$ in Lemma \ranglef{lemma:f_tilde} and prove that it is strictly decreasing in $\theta$. As we will see, this fact implies Lemmas \ranglef{L:tetdef} and \ranglef{lemma:lim_f_c=0}. This is a key point: we will see in Sections \ranglef{sec:theta<=theta*} and \ranglef{sec:theta>theta*} that the sign of $\tildelde{f}_\theta(\infty)$ determines whether or not we have a non-diagonal scale invariant solution of the RDE.
skip
Recall from Corollary \ranglef{corr_f_inc_dec} that $f_{\theta,c}$ satisfies \eqref{f0_with_c} and \eqref{fn_with_c}. If we differentiate these equations with respect to $c$, we obtain
\begin{equation}gin{align}
&\frac{\mathbb{P}artial}{\mathbb{P}artial c} f_{\theta, c}(0) = \frac{2(1+\theta)}{\sqrt{1+8c (1+\theta)^2}},\lambdabel{f0_diff_c} \\
&\frac{\mathbb{P}artial}{\mathbb{P}artial c} f_{\theta, c}(n) = \frac{\frac{\mathbb{P}artial}{\mathbb{P}artial c} f_{\theta, c}(n-1) \cdot (2f_{\theta, c}(n-1) - \theta^{n-1}) - \theta^{2n-2} \cdot (1-\theta^2)}{\sqrt{\left(2f_{\theta, c}(n-1) - \theta^{n-1} \right)^2 - 4c \cdot \theta^{2n-2} \cdot (1-\theta^2)}}, \quad n \geq 1. \lambdabel{fn_diff_c}
\end{align}
For any $\theta \in (0,1)$, let us define
\begin{equation}gin{equation}\lambdabel{gamma_n_theta}
\gammamma_n(\theta):=\frac{\theta^{2n-2} \cdot (1-\theta^2)}{\frac{2}{1+\theta} - \theta^{n-1}}=\frac{(1+\theta)^2 \theta^{n-1}}{ 1 +2 \sum_{k=1}^{n-1} \theta^{-k} }, \quad
n \geq 1.
\end{equation}
Note that the equality of the two formulas in \eqref{gamma_n_theta} holds for all $\theta \in (0,1)$, but the second formula for $\gammamma_n(\theta)$ extends continuously to $\theta=1$ as well.
Recall the notations $\tildelde{f}_\theta(n)$ and $\tildelde{f}_\theta(\infty)$ of Definition \ranglef{def:f_tilde}.
\begin{equation}gin{lemma}[Formulas for $\tildelde{f}_\theta$]\lambdabel{lemma:f_tilde}
We have
\begin{equation}gin{align}
\tildelde{f}_\theta(0) &= 2(1+\theta), \lambdabel{f_tilde_0} \\
\tildelde{f}_\theta(n) &= \tildelde{f}_{\theta}(n-1) - \gammamma_n(\theta)=2(1+\theta)-\sum_{k=1}^n \gammamma_k(\theta), \quad n \geq 1, \lambdabel{f_tilde_n}\\
\tildelde{f}_\theta(\infty) &= 2(1+\theta) -\sum_{k=1}^\infty \gammamma_k(\theta) \stackrel{ \eqref{gamma_n_theta} }{=} 1-\theta^2 -\sum_{k=2}^\infty \gammamma_k(\theta) . \lambdabel{lim_f_tilde}
\end{align}
\end{lemma}
\begin{equation}gin{Proof}
Substituting $c=0$ into \eqref{f0_diff_c}, we get \eqref{f_tilde_0}.
Similarly, if we substitute $c=0$ into \eqref{fn_diff_c} using that $f_{\theta, 0}(n) = \frac{1}{1+\theta}$ for every $n \in \mathbb{N}$ (see \eqref{f_theta_null}), we get \eqref{f_tilde_n}.
From \eqref{f_tilde_n} we get \eqref{lim_f_tilde} by the definition of $\tildelde{f}_\theta(\infty)$ (c.f.\ \eqref{eq_tilde_f_theta_n}).
\end{Proof}
\begin{equation}gin{Proof}[of Lemmas \ranglef{L:tetdef} and \ranglef{lemma:lim_f_c=0}] Note that the function $ \theta \mapsto \tildelde{f}_\theta(\infty)$ defined in \eqref{lim_f_tilde} coincides with the function $\theta \mapsto g(\theta)$ defined in Lemma \ranglef{L:tetdef}.
First we show that $\tildelde{f}_\theta (\infty)$ is a decreasing function of $\theta \in [0,1)$.
We begin by observing that $\gammamma_n(\theta)$ is an increasing function of $\theta \in [0,1)$ by the second formula for $\gammamma_n(\theta)$ in \eqref{gamma_n_theta}.
Thus by the second formula for $\tildelde{f}_\theta (\infty)$ in \eqref{lim_f_tilde} we obtain
that $\tildelde{f}_\theta (\infty)$ is a decreasing function of $\theta \in [0,1) $.
The function $\theta \mapsto \tildelde{f}_\theta (\infty)$ is also continuous on any compact sub-interval of $[0,1)$, since it
is the uniform limit of continuous functions.
In order to complete the proof of Lemmas \ranglef{L:tetdef} and \ranglef{lemma:lim_f_c=0}, we just have to show that $\tildelde{f}_{1/2} (\infty)>0$ and $\tildelde{f}_{1-{\rm Var}epsilon} (\infty)<0$ for some ${\rm Var}epsilon>0$. Indeed, $\tildelde{f}_{1/2} (1)=\frac{3}{4}$ and
\begin{equation}gin{equation}
\tildelde{f}_{1/2} (\infty) \stackrel{ \eqref{lim_f_tilde} }{=} 1-\left(\frac{1}{2} \right)^2 -\sum_{k=2}^\infty \frac{2^{2-2k} \cdot (1-\frac{1}{4})}{\frac{2}{1+1/2} - 2^{1-k}} \geq
\frac{3}{4}-\sum_{k=2}^\infty \frac{2^{2-2k} \cdot (1-\frac{1}{4})}{\frac{2}{1+1/2} - 1/2}= \frac{9}{20} >0.
\end{equation}
On the other hand, $\tildelde{f}_1 (2)=-4/3$ by \eqref{gamma_n_theta} and \eqref{f_tilde_n}, moreover $\theta \mapsto \tildelde{f}_\theta(2)$ is a continuous function on $[0,1]$, therefore $\tildelde{f}_{1-{\rm Var}epsilon}(2)<0$ for some ${\rm Var}epsilon>0$, from which $\tildelde{f}_{1-{\rm Var}epsilon} (\infty)<0$ follows,
since $\tildelde{f}_\theta(\infty) \leq \tildelde{f}_\theta(2)$ by \eqref{f_tilde_n} and \eqref{lim_f_tilde}. The proofs of Lemmas \ranglef{L:tetdef} and \ranglef{lemma:lim_f_c=0} are complete.
\end{Proof}
\subsetsection{\texorpdfstring{The $\theta \leq \theta^* $ case}{The theta leq theta* case}}\lambdabel{sec:theta<=theta*}
\mathbb{P}aragraph{}The goal of this section is to prove Lemma \ranglef{lemma:theta<=theta*}.
\begin{equation}gin{lemma}[Lower bound on $f_{\theta,c}(n)$]\lambdabel{lemma:theta<theta*}
If $\theta \in \left(\frac 12, \theta^* \right]$ and $c \in \left(0, \frac{\theta \cdot (2\theta - 1)}{(1+\theta)^2}\right]$, then
\begin{equation}gin{equation}\lambdabel{f_1_strict_ineq}
f_{\theta,c}(1)- \tildelde{f}_\theta(1) c > \frac{1}{1+\theta},
\end{equation}
\begin{equation}gin{equation}\lambdabel{induction_f_n_ineq}
f_{\theta,c}(n)-\tildelde{f}_\theta(n) c \geq f_{\theta,c}(1)- \tildelde{f}_\theta(1) c, \qquad n \geq 1.
\end{equation}
\end{lemma}
Before we prove Lemma \ranglef{lemma:theta<theta*}, let us deduce Lemma \ranglef{lemma:theta<=theta*} from it.
\begin{equation}gin{Proof}[of Lemma \ranglef{lemma:theta<=theta*}]
By Lemma \ranglef{lemma:lim_f_c=0} we have $\langlem_{n \to \infty} \tildelde{f}_\theta(n)= \tildelde{f}_\theta(\infty) \geq 0$ for any $\theta \leq \theta^*$, where $\tildelde{f}_\theta(\infty)$ is defined in Definition \ranglef{def:f_tilde}. Thus
\begin{equation}gin{equation}
\langlem_{n \to \infty} f_{\theta,c}(n) \geq
\langlem_{n \to \infty}\left( f_{\theta,c}(n)- \tildelde{f}_\theta(n) c \right)
\stackrel{\eqref{induction_f_n_ineq} }{\geq}
f_{\theta,c}(1)- \tildelde{f}_\theta(1) c\stackrel{ \eqref{f_1_strict_ineq} }{>}\frac{1}{1+\theta}
\end{equation}
holds for any $\theta \in \left(\frac 12, \theta^* \right]$ and $c \in \left(0, \frac{\theta \cdot (2\theta - 1)}{(1+\theta)^2}\right]$. The proof of Lemma \ranglef{lemma:theta<=theta*} is complete.
\end{Proof}
\begin{equation}gin{remark} We will prove \eqref{induction_f_n_ineq} by induction on $n$.
We have to start the induction from $n=1$, since it can be easily seen that the analogue of \eqref{f_1_strict_ineq} does not hold in the $n=0$ case, i.e., we have
$f_{\theta,c}(0)- \tildelde{f}_\theta(0) c < \frac{1}{1+\theta}$.
\end{remark}
\begin{equation}gin{Proof}[of \eqref{f_1_strict_ineq}.] By \eqref{gamma_n_theta} and \eqref{f_tilde_n} we have $\tildelde{f}_\theta(1)=1-\theta^2$, so by \eqref{fn_with_c} we need to show
$\frac{1}{2} \left( 1+\sqrt{(2 f_{\theta,c}(0)-1)^2 - 4c(1-\theta^2) } \right) -(1-\theta^2)c > \frac{1}{1+\theta}$. Applying a series of equivalent transformations, we see that we need
\begin{equation}gin{equation} f_{\theta,c}(0) > \frac{1}{2}\left( 1+ \sqrt{ \frac{(1-\theta)^2}{(1+\theta)^2} +8(1-\theta)c + 4 (1-\theta^2)^2 c^2 } \right).
\end{equation}
Substituting the formula \eqref{f0_with_c}
for $f_{\theta,c}(0)$ into this, we obtain after some rearrangements that we need to show
\begin{equation}gin{equation*}
\sqrt{1+8(1+\theta)^2c}-\theta > \sqrt{ (1-\theta)^2 +8(1-\theta)(1+\theta)^2c + 4 (1-\theta^2)^2(1+\theta)^2 c^2 }.
\end{equation*}
Taking the square of both sides of this inequality, introducing the notation $\alphapha=(1+\theta)^2 c$ and rearranging a bit, we obtain that we need to show that
$8 \theta \alphapha > 2\theta (\sqrt{1+8\alphapha}-1)+4(1-\theta)^2 \alphapha^2$ holds. Introducing the notation $\begin{equation}ta=\sqrt{1+8\alphapha}-1$, we may equivalently rewrite this and obtain that we need to show $ \begin{equation}ta < \frac{4 \sqrt{\theta}}{1-\theta}-2$. Using the definition of $\alphapha$ and $\begin{equation}ta$, our assumption $c \leq \frac{\theta \cdot (2\theta - 1)}{(1+\theta)^2}$ becomes $\begin{equation}ta \leq \sqrt{ (1-4\theta)^2 }-1$. Using that $\theta > \frac{1}{2}$ we see that we have $\begin{equation}ta \leq 4\theta -2$, so it is enough to show
$4\theta < \frac{4 \sqrt{\theta}}{1-\theta} $ to conclude the desired inequality $ \begin{equation}ta < \frac{4 \sqrt{\theta}}{1-\theta}-2$. Now $\theta < \frac{ \sqrt{\theta}}{1-\theta} $ does hold for all $\theta \in (0,1)$ (therefore it holds for $\theta \in \left(\frac 12, \theta^* \right]$), completing the proof of \eqref{f_1_strict_ineq}.
\end{Proof}
\begin{equation}gin{Proof}[of \eqref{induction_f_n_ineq}.]
We prove \eqref{induction_f_n_ineq} by induction on $n$. The $n=1$ case trivially holds. Let $n \geq 2$. Let us denote $q=\tildelde{f}_\theta(n-1) c+ f_{\theta,c}(1)- \tildelde{f}_\theta(1) c$. By our induction hypothesis we know that $f_{\theta,c}(n-1)\geq q$ holds, and we want to show that \eqref{induction_f_n_ineq} also holds, or, equivalently, we want
$f_{\theta,c}(n)\geq q -\gammamma_n(\theta)c $ to hold
(c.f.\ \eqref{gamma_n_theta}, \eqref{f_tilde_n}). Let us note that we have
\begin{equation}gin{equation}\lambdabel{q_gamma_theta_ineq}
q -\gammamma_n(\theta)c \stackrel{\eqref{f_tilde_n}, \eqref{f_1_strict_ineq}}{\geq} \tildelde{f}_\theta(n) c +\frac{1}{1+\theta} \stackrel{(*)}{\geq} \frac{1}{1+\theta},
\end{equation}
where $(*)$ holds since our assumption $\theta \leq \theta^*$ and Lemma \ranglef{lemma:lim_f_c=0} together imply $\tildelde{f}_\theta(\infty)\geq 0$ and the formulas \eqref{gamma_n_theta}, \eqref{f_tilde_n} and
\eqref{lim_f_tilde} together imply $\tildelde{f}_\theta(n)\geq \tildelde{f}_\theta(\infty)$.
Using our induction hypothesis and \eqref{fn_with_c}, we see that it is enough to prove
\begin{equation}gin{equation}\lambdabel{enough_to_prove_ind_q}
\frac{1}{2} \left( \theta^{n-1} +\sqrt{ (2q-\theta^{n-1})^2-4c\gammamma_n(\theta)\left(\frac{2}{1+\theta}-\theta^{n-1} \right) } \right) \geq q-\gammamma_n(\theta)c
\end{equation}
in order to arrive at the desired $f_{\theta,c}(n)\geq q -\gammamma_n(\theta)c $. We will now show \eqref{enough_to_prove_ind_q}.
We first show that the expression under the square root is non-negative:
\begin{equation}gin{multline}\lambdabel{expr_under_sq_root_non-neg}
(2q-\theta^{n-1})^2-4c\gammamma_n(\theta)\left(\frac{2}{1+\theta}-\theta^{n-1} \right) \stackrel{ \eqref{q_gamma_theta_ineq} }{\geq}
\left( \left(\frac{2}{1+\theta}-\theta^{n-1}\right) + 2c\gammamma_n(\theta) \right)^2- \\ 4c\gammamma_n(\theta)\left(\frac{2}{1+\theta}-\theta^{n-1} \right)=
\left(\frac{2}{1+\theta}-\theta^{n-1}\right)^2 +\left( 2c\gammamma_n(\theta) \right)^2 \geq 0.
\end{multline}
Using this
we can rearrange \eqref{enough_to_prove_ind_q} and see that it is equivalent to
\begin{equation}gin{equation}
(2q-\theta^{n-1})^2-4c\gammamma_n(\theta)\left(\frac{2}{1+\theta}-\theta^{n-1} \right) \geq \left( (2q-\theta^{n-1}) -2 \gammamma_n(\theta)c \right)^2,
\end{equation}
which is in turn equivalent to
$2\left(q-\frac{1}{1+\theta}\right) \geq \gammamma_n(\theta)c $, and
this inequality indeed holds by \eqref{q_gamma_theta_ineq}. The proof of the induction step is complete.
\end{Proof}
The proof of Lemma \ranglef{lemma:theta<theta*} is complete.
\begin{equation}gin{remark} Our assumption
$c \in \left(0, \frac{\theta \cdot (2\theta - 1)}{(1+\theta)^2}\right]$ that appears in the statement of Lemma \ranglef{lemma:theta<theta*} (or something similar to it) seems
indispensable, because numerical simulations suggest that the conclusions of Lemma \ranglef{lemma:theta<theta*} do not hold for big values of $c$.
\end{remark}
\subsetsection{\texorpdfstring{The $\theta> \theta^*$ case}{The theta>theta* case}}\lambdabel{sec:theta>theta*}
\mathbb{P}aragraph{}In this section we prove Lemma \ranglef{lemma:theta>theta*}. First we show Lemma \ranglef{lemma:f_lower_bound}, which implies that $f_{\theta, c}(\infty)$ is large if $c$ is large. We will also argue that $f_{\theta, c}(\infty)<\frac{1}{1+\theta}$ if
$\theta > \theta^*$ and $c$ is small.
We then combine these facts to show that there exists a $\hat{c} >0$ for which $ f_{\theta, \hat{c}}(\infty) = \frac{1}{1+\theta}$. After that we will see in Lemma \ranglef{lemma:conditions_fulfil} and \ranglef{lemma:every_alpha} that this $f_{\theta, \hat{c}}$ satisfies the conditions of Lemma \ranglef{lemma:f_conditions} (and therefore it is the signature of a non-diagonal solution $\hat{\rho}^{(2)} \in \mathcal{M}^{(2)}_{\theta}$ of the bivariate RDE \eqref{bivar_RDE}).
\begin{equation}gin{lemma}[Lower bound on $f_{\theta, c}$]\lambdabel{lemma:f_lower_bound}
If $\theta \in (0,1)$ and $c \ge 4$, then
\begin{equation}gin{equation}\lambdabel{lower_bound_on_f}
f_{\theta, c}(n) \ge \frac{\theta^n}{2} + \sqrt{ \left( \frac{1}{2} +\theta^{2n} \right) \cdot c}, \qquad n \in \mathbb{N}.
\end{equation}
\end{lemma}
\begin{equation}gin{Proof}
We prove \eqref{lower_bound_on_f} by induction on $n$. The $n=0$ case holds, since
\begin{equation}gin{equation}
f_{\theta, c}(0) \stackrel{\eqref{f0_with_c}}{\geq } \frac{1+\sqrt{8c(1+\theta)^2}}{2(1+\theta)} \geq \frac{1}{4} + \sqrt{2 c}
\stackrel{(*)}{\geq}
\frac{1}{2} + \sqrt{ \frac{3}{2} c} =\frac{\theta^0}{2} + \sqrt{ \left( \frac{1}{2} +\theta^{2\cdot 0} \right) \cdot c} ,
\end{equation}
where $(*)$ holds if $c \geq 4$. Now assume that $n \geq 1$ and \eqref{lower_bound_on_f} holds for $n-1$, and we want to deduce that \eqref{lower_bound_on_f}
also holds with $n$ as well:
\begin{equation}gin{multline*}
f_{\theta, c}(n) \stackrel{\eqref{fn_with_c}}{=}
\frac{\theta^{n-1} + \sqrt{(2f_{\theta, c}(n-1) - \theta^{n-1})^2 - 4c \cdot \theta^{2n-2} \cdot (1-\theta^2)}}{2}
\stackrel{(**)}{\geq} \\
\frac{\theta^{n} + \sqrt{ 4 (\frac{1}{2} + \theta^{2(n-1)} ) c - 4c \cdot \theta^{2n-2} \cdot (1-\theta^2)}}{2} =
\frac{\theta^n}{2} + \sqrt{ \left( \frac{1}{2} +\theta^{2n} \right) \cdot c},
\end{multline*}
where in $(**)$ we used the induction hypothesis and also that $\theta^{n-1}\geq \theta^n$.
\end{Proof}
\begin{equation}gin{lemma}[$f_{\theta, c}$ satisfies necessary conditions]\lambdabel{lemma:conditions_fulfil}
If $\theta \in (0,1)$ and $c \ge 0$ are arbitrary, then $f_{\theta, c}$ satisfies conditions (iii), (iv) and (v) of Lemma \ranglef{lemma:f_conditions}, i.e.
\begin{equation}gin{enumerate}[1.]
\item $f_{\theta, c}(n)$ is non-increasing in $n$,
\item $(1+\theta) \cdot f_{\theta, c}(0) \le 2 f_{\theta, c}(1)$,
\item $(1+\theta) \cdot f_{\theta, c}(n) \le \theta \cdot f_{\theta, c}(n-1) + f_{\theta, c}(n+1)$ for every $n \geq 1$.
\end{enumerate}
\end{lemma}
\begin{equation}gin{Proof}
$ $
\begin{equation}gin{enumerate}[1.]
\item We have already seen this in Corollary \ranglef{corr_f_inc_dec}.
\item Recalling the notation introduced at the beginning of Section \ranglef{sec:f_unique} (see in particular \eqref{g_recursion} and \eqref{def_eq_g}), we want to show
\begin{equation}gin{equation}\lambdabel{g_measure_requirement}
(1+\theta)g_{\theta,c}(0) \leq 2 \theta \mathbb{P}si_{\theta,c}\big(g_{\theta,c}(0)\big).
\end{equation}
Using the definition \eqref{def_eq__psi_theta_c} of $\mathbb{P}si_{\theta,c}$ and $\mathcal{D}_{\theta,c}$ one deduces that
\begin{equation}gin{align}
\lambdabel{psi_incr_concave} \text{the function $x \mapsto 2 \theta \mathbb{P}si_{\theta,c}(x)$ is increasing and concave on $\mathcal{D}_{\theta,c}$,}\\
\lambdabel{psi_slope_big} \frac{\mathrm{d}}{\mathrm{d}x} 2 \theta \mathbb{P}si_{\theta,c}(x) = 2 \frac{2x-1}{\sqrt{(2x-1)^2 -4c\cdot(1-\theta^2) }} > 2 >1+\theta , \qquad x \in \mathcal{D}_{\theta,c},\\
\lambdabel{psi_bigger_than_that_line_for_large_x} \langlem_{x \to \infty} 2 \theta \mathbb{P}si_{\theta,c}(x) - (1+\theta)x = +\infty.
\end{align}
It follows from \eqref{psi_incr_concave} and \eqref{psi_slope_big} that
\begin{equation}gin{equation}\lambdabel{atmost_one_solutionn}
\text{ the equation $2 \theta \mathbb{P}si_{\theta,c}(x)=(1+\theta)x$ has at most one solution in $\mathcal{D}_{\theta,c}$.}
\end{equation}
Let $y_0:=\sqrt{(1-\theta^2)c}+1/2$ denote the left endpoint of $\mathcal{D}_{\theta, c}$.
One easily checks that $ 2\theta \mathbb{P}si_{\theta,c}(y_0)\geq (1+\theta) y_0$ holds if and only if $c \leq \frac{1-\theta}{4(1+\theta)^3}$ holds. We will prove \eqref{g_measure_requirement} by treating the cases $ 2 \theta \mathbb{P}si_{\theta,c}(y_0) \geq (1+\theta) y_0$ and $ 2 \theta \mathbb{P}si_{\theta,c}(y_0)< (1+\theta)y_0$ separately.
If $ 2 \theta \mathbb{P}si_{\theta,c}(y_0) \geq (1+\theta) y_0$ then $ 2 \theta \mathbb{P}si_{\theta,c}(x) \geq (1+\theta)x $ for every $x \in \mathcal{D}_{\theta,c}$ follows from \eqref{psi_slope_big}, and in particular \eqref{g_measure_requirement} holds.
If $ 2 \theta \mathbb{P}si_{\theta,c}(y_0)< (1+\theta)y_0$ then this inequality, \eqref{psi_bigger_than_that_line_for_large_x} and \eqref{atmost_one_solutionn} together imply that
there exists a unique $\tildelde{x} \in \mathcal{D}_{\theta,c}$ such that
$2\theta \mathbb{P}si_{\theta,c}(\tildelde{x})=(1+\theta)\tildelde{x}$, moreover we obtain using \eqref{psi_slope_big} that $ \tildelde{x} \leq x$ implies $ 2\theta \mathbb{P}si_{\theta,c}(x) \geq (1+\theta)x $.
One easily finds that $\tildelde{x}=\frac{1+\sqrt{4(\theta+3)(\theta+1)c+1 }}{\theta+3}$, thus we only need to check $\tildelde{x} \leq g_{\theta,c}(0)$, i.e., by the definition \eqref{g_recursion} of $g_{\theta,c}(0)$ we need to check that $\alphapha_\theta(c)\leq \begin{equation}ta_\theta(c)$ holds for all $c \geq 0$, where
\begin{equation}gin{equation}
\alphapha_\theta(c):= \frac{1+\sqrt{4(\theta+3)(\theta+1)c+1 }}{\theta+3}, \qquad \begin{equation}ta_\theta(c):= \frac{1+\sqrt{1+8(1+\theta)^2 c} }{2(\theta+1)}.
\end{equation}
The inverse functions of both $ c \mapsto \alphapha_\theta(c)$ and $ c \mapsto \begin{equation}ta_\theta(c)$ are quadratic polynomials:
\begin{equation}gin{equation} \alphapha^{-1}_\theta(y)=\frac{((\theta+3)y-1 )^2-1}{4(\theta+3)(\theta+1)}, \qquad \begin{equation}ta^{-1}_\theta(y)=\frac{ (2(\theta+1)y-1)^2-1 }{8(\theta+1)^2}.
\end{equation}
It is enough to check that $\alphapha^{-1}_\theta(y)\geq \begin{equation}ta^{-1}_\theta(y)$ holds for all $y \in \mathbb{R}$, and indeed we have $\alphapha^{-1}_\theta(y)-\begin{equation}ta^{-1}_\theta(y)=\frac{(1-\theta)y^2}{4(\theta+1)}$, which is nonnegative for all $\theta \in (0,1], y \in \mathbb{R}$.
\item We have to show $f_{\theta, c}(n) - f_{\theta, c}(n+1) \le \theta \cdot (f_{\theta, c}(n-1) - f_{\theta, c}(n))$
for every $n \ge 1$. Rewriting this using the notation introduced in Section \ranglef{sec:f_unique} as well as \eqref{def_eq_g}, we need to show that the inequality
$g_{\theta,c}(n)-\theta g_{\theta,c}(n+1)\leq g_{\theta,c}(n-1)-\theta g_{\theta,c}(n)$ holds. Since $g_{\theta,c}(n+1)=\mathbb{P}si_{\theta,c}(g_{\theta,c}(n))$ and
$g_{\theta,c}(n)=\mathbb{P}si_{\theta,c}(g_{\theta,c}(n-1))$ by \eqref{g_recursion}, moreover we know $g_{\theta,c}(n)\geq g_{\theta,c}(n-1)$ (c.f.\ \eqref{g_increasing}), it is enough
to show that ${\rm Var}phi_{\theta,c}(x)$ is a decreasing function of $x$, where ${\rm Var}phi_{\theta,c}(x):=x-\theta \mathbb{P}si_{\theta,c}(x)$. This is indeed the case, since
we have ${\rm Var}phi'_{\theta,c}(x)=1-\frac{2x-1}{\sqrt{(2x-1)^2 -4c(1-\theta^2) }}<0$ for every $x $ in the domain $\mathcal{D}_{\theta,c}$ of ${\rm Var}phi_{\theta,c}(\cdot)$.
\end{enumerate}
\end{Proof}
\begin{equation}gin{lemma}[Upper bound on $f_{\theta, \hat{c}}(0)$]\lambdabel{lemma:every_alpha}
If $\theta \in (0,1)$ and $f_{\theta, \hat{c}}(\infty) = \frac{1}{1+\theta}$, then $f_{\theta, \hat{c}}(0) \le 1$.
\end{lemma}
\begin{equation}gin{Proof} The conditions of Lemma \ranglef{lemma_properties_of_f_theta_c} are fulfilled for $f_{\theta, \hat{c}}$, thus we may use
\eqref{f_theta_c_0_in_interval} to conclude $f_{\theta, \hat{c}}(0) \leq \frac{1}{1+\theta} \vee \frac{2 \theta}{1+\theta} \leq 1$.
\end{Proof}
\begin{equation}gin{Proof}[of Lemma \ranglef{lemma:theta>theta*}]
We will show that the function $c \mapsto f_{\theta, c}(\infty) -\frac{1}{1+\theta} $ takes both positive and negative values. This is enough to conclude the proof of the first statement of Lemma \ranglef{lemma:theta>theta*}, since this function is continuous by Lemma \ranglef{lemma:f_continuous}.
We know from \eqref{f_theta_null} that $ f_{\theta, 0}(n) = \frac{1}{1+\theta}$ for all $n \in \mathbb{N}$.
By the $\theta > \theta^*$ case of Lemma \ranglef{lemma:lim_f_c=0} we have $\tildelde{f}_\theta(\infty) = \langlem \langlemits_{n \to \infty} \tildelde{f}_\theta (n) < 0$, therefore we can fix an
$n \in \mathbb{N}$ such that $\tildelde{f}_\theta (n) < 0$.
Recall from Definition \ranglef{def:f_tilde} that $\tildelde{f}_\theta (n)$ denotes $ \frac{\mathbb{P}artial}{\mathbb{P}artial c} f_{\theta, c}(n) \big|_{c=0_+}$.
We can thus fix a small but positive value of $c$ such that $f_{\theta, c}(n) < f_{\theta, 0}(n) =\frac{1}{1+\theta}$. Now $f_{\theta, c}(\infty) < \frac{1}{1+\theta} $ follows from the fact that $f_{\theta, c}(n)$ decreases as $n$ increases (c.f.\ \eqref{f_decreasing}).
Next we show that there exists a $c > 0$ for which $f_{\theta, c}(\infty) > \frac{1}{1+\theta}$. This follows from Lemma \ranglef{lemma:f_lower_bound}, since for $c \ge 4$ we have
\begin{equation}gin{equation}
\langlem \langlemits_{n \to \infty} f_{\theta, c}(n) \stackrel{\eqref{lower_bound_on_f}}{\ge} \langlem \langlemits_{n \to \infty} \left(
\frac{\theta^n}{2} + \sqrt{ \left( \frac{1}{2} +\theta^{2n} \right) \cdot c} \right) = \sqrt{\frac{1}{2}c} > \frac{1}{1+\theta}.
\end{equation}
Therefore there exists $\hat{c} > 0$ such that $f_{\theta, \hat{c}}(\infty) = \frac{1}{1+\theta}$.
Now we prove the second statement of Lemma \ranglef{lemma:theta>theta*}. Since $ f_{\theta, \hat{c}}(\infty) = \frac{1}{1+\theta}$, condition (ii) of Lemma \ranglef{lemma:f_conditions} holds and condition (i) also holds by Lemma \ranglef{lemma:every_alpha}. By Lemma \ranglef{lemma:conditions_fulfil} we also know that conditions (iii), (iv) and (v) of Lemma \ranglef{lemma:f_conditions} are true. So we can conclude that $f_{\theta, \hat{c}}$ satisfies all of the conditions of Lemma \ranglef{lemma:f_conditions}.
\end{Proof}
\begin{equation}gin{remark}\lambdabel{remark:conj_unique}
In Figure \ranglef{fig:85} we can see $ f_{0.85, c}(\infty)$ as a function of $c$, where $c$ is an element of the interval $c \in \left[0, \frac{0.85 \cdot (2 \cdot 0.85 - 1)}{(1+0.85)^2}\right]$. The horizontal red line is the constant $\frac{1}{1+\theta} \stackrel{\theta = 0.85}{=} \frac{20}{37}$. We see that first it is decreasing, then it is increasing and goes to infinity, thus there exists $\hat{c} > 0$ for which $ f_{0.85, \hat{c}}(\infty) = \frac{20}{37}$. We get a similar picture for every $\theta \in \left(\theta^*, 1\right)$.
We also note that Figure \ranglef{fig:85} suggests that Conjecture \ranglef{conj:unique} holds, since this conjecture is equivalent with the fact that there exists exactly one $\hat{c}>0$ for which $ f_{\theta, \hat{c}}(\infty) = \frac{1}{1+\theta}$.
\end{remark}
\begin{equation}gin{figure}[!ht]
\centering
\includegraphics[scale=0.31]{lim_f_in_c_theta=85.png}
\caption{$ f_{0.85, c}(\infty)$}
\lambdabel{fig:85}
\end{figure}
\begin{equation}gin{thebibliography}{RST19}
\bibitem[AB05]{AB05}
D.J.~Aldous and A.~Bandyopadhyay.
A survey of max-type recursive distributional equations.
\emph{Ann.\ Appl.\ Probab.}~15(2) (2005), 1047--1110.
\bibitem[Ald00]{Ald00}
D.J.~Aldous.
The percolation process on a tree where infinite clusters are frozen.
\emph{Math.\ Proc.\ Cambridge Philos.\ Soc.}~128 (2000), 465--477.
\bibitem[BT01]{BT01}
J.~van den Berg, B.~T\'oth.
A signal-recovery system: asymptotic properties, and construction of an
infinite-volume process.
\emph{Stochastic Process.\ Appl.}~96(2) (2001), 177--190.
\bibitem[MSS18]{MSS18}
T.~Mach, A.~Sturm, and J.M.~Swart.
A new characterization of endogeny.
\emph{Math.\ Phys.\ Anal.\ Geom.}~21(4) (2018), no.~30.
\bibitem[MSS20]{MSS20}
T.~Mach, A.~Sturm, and J.M.~Swart.
Recursive tree processes and the mean-field limit of stochastic flows.
\emph{Electron.\ J.\ Probab.}~25 (2020) paper No.~61, 1--63.
\bibitem[RST19]{RST19}
B.~R\'ath, J.M.~Swart, and T.~Terpai.
Frozen percolation on the binary tree is nonendogenous.
\emph{Ann.\ of Probab.} Vol.~49(5) (2021) 2272 -- 2316.
\bibitem[SSS14]{SSS14}
E.~Schertzer, R.~Sun and J.M.~Swart.
Stochastic flows in the Brownian web and net.
\emph{Mem.\ Am.\ Math.\ Soc.} Vol.~227 (2014), Nr.~1065.
\end{thebibliography}
\end{document}
|
\begin{document}
\title[Endomorphism ring of the trivial module]
{The endomorphism ring of the trivial module in a localized category}
\author[Jon F. Carlson]{Jon F. Carlson}
\address{Department of Mathematics, University of Georgia,
Athens, GA 30602, USA}
\email{[email protected]}
\thanks{Research partially supported by
Simons Foundation grant 054813-01}
\keywords{finite group representations, stable module category, idempotent
modules, Verdier localization}
\begin{abstract}
Suppose that $G$ is a finite group and $k$ is a field of characteristic
$p >0$. Let ${\mathcal{M}}$ be the thick tensor ideal of finitely generated
modules whose support variety is in a fixed subvariety $V$ of the projectivized
prime ideal spectrum $\operatorname{Proj}\nolimits \operatorname{H}\nolimits^*(G,k)$. Let ${\mathcal{C}}$ denote the Verdier
localization of the stable module category $\operatorname{{\bf stmod}}\nolimits(kG)$ at ${\mathcal{M}}$. We
show that if $V$ is a finite collection of closed points and if
the $p$-rank every maximal elementary abelian $p$-subgroups of $G$
is at least 3, then the endomorphism ring of the trivial
module in ${\mathcal{C}}$ is a local ring whose unique maximal ideal is infinitely
generated and nilpotent. In addition, we show an example where the
endomorphism ring in ${\mathcal{C}}$ of a compact object is not finitely presented as a
module over the endomorphism ring of the trivial module.
\end{abstract}
\maketitle
\section{Introduction}
Suppose that $G$ is a finite group and that $k$ is a field of characteristic
$p > 0$. The stable category $\operatorname{{\bf stmod}(\text{$kG$})}\nolimits$ of finitely generated $kG$-modules
is a tensor triangulated category. A thick tensor ideal in $\operatorname{{\bf stmod}(\text{$kG$})}\nolimits$
is determined by the support variety of its objects. Hence, for any
closed subvariety $V$ in $V_G(k) = \operatorname{Proj}\nolimits \HH{*}{G}{k}$, the full subcategory
${\mathcal{M}}_V$ of all
finitely generated $kG$-modules $M$ with $V_G(M) \subset V$, is a
thick subcategory that is closed under tensor product with any
finitely generated $kG$-module. Moreover, every thick tensor ideal can
be defined in this or a similar way.
Associated to ${\mathcal{M}}_V$ is a distinguished triangle
\[
\xymatrix{
{} \ar[r] & {\mathcal{E}}_V \ar[r] & k \ar[r] & {\mathcal{F}}_V \ar[r] & {}
}
\]
where ${\mathcal{E}}_V$ and ${\mathcal{F}}_V$ are idempotent $kG$-modules that are almost
always infinitely generated.
In addition, ${\mathcal{E}}_V \otimes M \cong M$ in the stable category if and
only if $V_G(M) \subseteq V$. Tensoring with
${\mathcal{F}}_V$ is the localizing functor
to the Verdier localization ${\mathcal{C}}_V$ of $\operatorname{{\bf stmod}(\text{$kG$})}\nolimits$ at ${\mathcal{M}}_V$. Thus the
localized category ${\mathcal{C}}_V$ is embedded in the stable category
$\operatorname{{\bf StMod}(\text{$kG$})}\nolimits$ of all $kG$-modules.
In the localized category ${\mathcal{C}}_V$, the trivial
module $k$ is identified with ${\mathcal{F}}_V$ and the ring $\operatorname{End}\nolimits_{{\mathcal{C}}_V}(k)$ is
isomorphic to $\operatorname{End}\nolimits_{\operatorname{{\bf StMod}(\text{$kG$})}\nolimits}({\mathcal{F}}_V)$. For $kG$-modules $M$ and $N$,
the group $\operatorname{Hom}\nolimits_{{\mathcal{C}}_V}(M,N)$ is a module over $\operatorname{End}\nolimits_{{\mathcal{C}}_V}(k)$.
This suggests that modules in the category ${\mathcal{C}}_V$ can be distinguished
by invariants such as the annihilators of their endomorphism rings or
cohomology rings. Such is the essence of the support variety theory that
classifies the thick tensor ideals in $\operatorname{{\bf stmod}}\nolimits(kG)$. In subsequent work
\cite{Clocvar}, we show that there is such a theory that is nontrivial in
the case of the colocalized category generated by ${\mathcal{E}}_V$. However,
in the localized category, there are additional complications, as we
see in this paper.
In this paper, we complete the task of characterizing $\operatorname{End}\nolimits_{{\mathcal{C}}_V}(k)$
in the case that the maximal elementary abelian subgroups of
$G$ have sufficiently large $p$-rank and
the variety $V$ is a finite collection of closed points.
We prove in such cases that the
ring $\operatorname{End}\nolimits_{{\mathcal{C}}_V}(k)$ is a local ring whose maximal ideal is
infinitely generated and nilpotent.
Our study relies on earlier results in \cite{Ctriv} that proves the
special case in which $V$ is a single closed point in $V_G(k)$ and
$G$ is elementary abelian. More generally, that paper shows that
the nonpositive Tate cohomology ring of any finite group $H$ can be
realized as the endomorpism ring of the trivial
module in the Verdier localization ${\mathcal{C}}_V$ where $V$ is a single point
in the spectrum of the cohomology ring of $G = C \times H$ for $C$
a cyclic group of order $p$. The proof of our main theorem also
requires the fact that nilpotence
in cohomology can be detected on restrictions to
elementary abelian $p$-subgroups of a finite group \cite{CQ}. This
theorem does not hold for $G$ a general finite group scheme, and hence
the proof of our main theorem does not extend to that realm.
The next section presents an introduction and references to the
categories and recalls some theorems on support varieties.
In the three sections that follow, we review the main theorem of \cite{Ctriv}
and extend the result to the case in which the variety $V$ is a
finite collection of more than one closed points. In section 5,
we prove the main theorem for any finite group whose maximal
elementary abelian $p$-subgroups have $p$-rank at least three.
In section 6, we look at the restriction of the module ${\mathcal{F}}_V$ from
an elementary abelian group to one of its proper subgroup. This result
is used in the final section to show by example that even for a
compact object $M$ in $\operatorname{{\bf stmod}(\text{$kG$})}\nolimits$, it is possible that $\operatorname{End}\nolimits_{{\mathcal{C}}_V}(M)$
is not finitely generated over $\operatorname{End}\nolimits_{{\mathcal{C}}_V}(k)$. In the other direction,
we show also in Section 7 that if $V$ is the subvariety of all
homogeneous prime ideals that contain a single non-nilpotent element
of cohomology, then for any compact object $M$, $\operatorname{End}\nolimits_{{\mathcal{C}}_V}(M)$
is finitely generated over $\operatorname{End}\nolimits_{{\mathcal{C}}_V}(k)$. In the final section
we present an example that shows more of the stucture of the
idempotent module ${\mathcal{F}}_V$ in the case of groups that are not elementary
abelian $p$-groups.
We would like to thank Paul Balmer for helpful conversations
and information.
\section{Background}
In this section, we review some background. As references, we refer the
reader to \cite{CTVZ} or \cite{Bbook}
for information on the cohomology of finite groups
and support varieties in this context. For information on triangulated
categories see \cite{Neem}. A lot of the background material is
summarized very well in the paper \cite{BF} of Balmer and Favi.
Throughout the paper, we let $G$ be a finite group and $k$ a field of
characteristic $p > 0$. For convenience, we assume that $k$ is algebraically
closed. Recall that $kG$ is a Hopf algebra so that if $M$ and $N$ are
$kG$-modules, then so is $M \otimes_k N$. In general, we write
$\otimes$ for $\otimes_k$.
Let $\operatorname{{\bf mod}(\text{$kG$})}\nolimits$ denote the category of finitely generated $kG$-modules
and $\operatorname{{\bf Mod}}\nolimitsg$ the category of all $kG$-module. Let $\operatorname{{\bf stmod}(\text{$kG$})}\nolimits$ be the stable
category of finitely generated $kG$-module modulo projectives. The objects in
$\operatorname{{\bf stmod}(\text{$kG$})}\nolimits$ are the same as those in $\operatorname{{\bf mod}(\text{$kG$})}\nolimits$. If $M$ and $N$ are finitely
generated $kG$-modules, then the group of morphisms
from $M$ to $N$ in the stable category is
the quotient $\operatorname{Hom}\nolimitsul_{kG}(M,N) = \operatorname{Hom}\nolimits_{kG}(M,N)/\operatorname{PHom}\nolimits_{kG}(M,N)$ where
$\operatorname{PHom}\nolimits_{kG}(M,N)$ is the set of homomorphisms that factor through projective
modules. The definition of the stable category of all modules
$\operatorname{{\bf StMod}(\text{$kG$})}\nolimits$ is similar.
The stable categories $\operatorname{{\bf stmod}(\text{$kG$})}\nolimits$ and $\operatorname{{\bf StMod}(\text{$kG$})}\nolimits$ are tensor triangulated
categories. The tensor is the one given by the Hopf algebra structure on
$kG$ as mentioned above. Triangles correspond roughly to exact sequences
in the module categories. The translation functor for both is $\Omega^{-1}$,
so that a triangle looks like
\[
\xymatrix{
A \ar[r] & B \ar[r] & C \ar[r] & \Omega^{-1}(A)
} \]
where for some projective module $P$ there is an exact sequence
$0 \to A \to B \oplus P \to C \to 0$. Here, $\Omega^{-1}(M)$
is the cokernel of an injective hull $M \hookrightarrow I$ for $I$ Injective.
The cohomology ring $\operatorname{H}\nolimits^*(G,k)$ is a finitely generated,
graded-commutative algebra over $k$. Let $V_G(k) = \operatorname{Proj}\nolimits(\operatorname{H}\nolimits^*(G,k)$
be its projectivized prime ideal spectrum, the collection of all
homogeneous prime ideals with the Zariski topology. The support
variety of a finitely generated $kG$-module $M$ is the closed subvariety
consisting of all homogeneous prime ideals that contain the
annihilator of $\operatorname{Ext}\nolimits^*_{kG}(M,M)$ in $\operatorname{H}\nolimits^*(G,k)$. The support variety
of an infinitely generated $kG$-module is a subset of $V_G(k)$,
not necessarily closed (see \cite{BCR2}).
If $H$ is a subgroup of $G$, the restriction functor $\operatorname{{\bf mod}(\text{$kG$})}\nolimits \to \operatorname{{\bf mod}(\text{$kH$})}\nolimits$
induces a map on cohomology ring $\operatorname{res}\nolimits_{G,H}:\operatorname{H}\nolimits^*(G,k) \to
\operatorname{H}\nolimits^*(H,k)$ and also a map on sectra $\operatorname{res}\nolimits^*_{G,H}:V_H(k)
\to V_G(k)$.
For much of the next few sections, we assume
that $G = \langle g_1, \dots, g_r \rangle$ is an
elementary abelian $p$-group of order $p^r$.
In this case, we set $X_i = g_i -1 \in kG$ for $i = 1, \dots, r.$
Then $X_i^p = 0$ and $kG \cong k[X_1, \dots, X_r]/(X_1^p, \dots, X_r^p)$.
With this structure in mind, we make the following definition.
\begin{defi} \label{def:flatalg}
Suppose that $kG$ is the group algebra of an elementary abelian $p$-group.
A flat subalgebra of $kG$ is the image in $kG$ of a flat map
$\alpha: k[t_1, \dots, t_s]/(t_1^p, \dots, t_s^p) \to kG$. We say
a flat subalgebra is maximal if $s= r-1$ where $r$ is the $p$-rank
of $G$.
\end{defi}
By definition, a map $\alpha$, as above, is flat if $kG$ is a
projective module over the image of the ring
$k[t_1, \dots, t_s]/(t_1^p, \dots, t_s^p)$.
This happens if and only if the images $\alpha(t_1), \dots, \alpha(t_s)$,
in $\operatorname{Rad}\nolimits(kG)/\operatorname{Rad}\nolimits^2(kG)$ are $k$-linearly independent. In particular,
we have the following.
\begin{lemma} \label{lem:complement}
Suppose that $G$ is an elementary abelian $p$-group of $p$-rank $r$.
Let $\alpha: k[t_1, \dots, t_s]/(t_1^p, \dots, t_s^p) \to kG$ be a
flat map. Then there exists another flat map
$\beta: k[t_1, \dots, t_{r-s}]/(t_1^p, \dots, t_{r-s}^p) \to kG$
such that $kG$ is the internal tensor product $kG = A \otimes B$,
where $A$ and $B$ are the images of $\alpha$ and $\beta$, respectively.
\end{lemma}
\begin{proof}
Choose elements $m_1, \dots, m_{r-s}$ in $kG$, such that the classes
modulo $\operatorname{Rad}\nolimits^2(kG)$ of
$\alpha(t_1), \dots, \alpha(t_s), m_1, \dots, m_{r-s}$
form a basis for $\operatorname{Rad}\nolimits(kG)/\operatorname{Rad}\nolimits^2(kG)$. Then let $\beta$ be defined
by $\beta(t_i) = m_i$ for $i = 1, \dots, s$. Then $AB = kG$, by
Nakayama's Lemma and a dimension argument.
\end{proof}
If $\alpha$, as above, is a flat map, then
the multiplicative subgroup generated
by the images $\alpha(1+t_i)$ is called a shifted subgroup of $kG$ in
other papers. It is an elementary abelian $p$-subgroup of the group
of units of $kG$. In the case that $s = 1$, we have an
example of a $\pi$-point.
\begin{defi} \label{defi:pipoint} \cite{FP}
A $\pi$-point is a flat map $\alpha_K: K[t]/(t^p) \to KG_K$
where $K$ is an extension of the field $k$. If $G$
is a finite group scheme that is not
elementary abelian, then we assume also that $\alpha_K$ factors
by flat maps through a unipotent abelian subgroup scheme of $KG_K$.
Two $\pi$-points $\alpha_K: K[t]/(t^p) \to KG_K$ and
$\beta_L: L[t]/(t^p) \to LG_L$ are equivalent if for any finitely generated
$kG$-module $M$, the restriction $\alpha_K^*(K \otimes M)$ is
projective if and only if $\beta^*_L(L \otimes M)$ is projective.
\end{defi}
The set of equivalence classes of $\pi$-point has a partial order
coming from specializations, and that ordering gives the set a
topology. With this in mind we have the following, which holds for
any finite group scheme $G$.
\begin{thm} \label{thm:rankvar} \cite{FP}
The space of equivalence classes of $\pi$-points is homeomorphic
to $V_G(k) = \operatorname{Proj}\nolimits \HH{*}{G}{k}.$
\end{thm}
The point is that if $A = K[t]/(t^p)$, then $\operatorname{H}\nolimits^*(A,K)/\operatorname{Rad}\nolimits(\operatorname{H}\nolimits^*(A,K))$
is a polynomial ring in one variable. So, if $\alpha: A \to KG$ is a
$\pi$-point, then the kernel of the composition
\[
\xymatrix{
\HH{*}{G}{k} \ar[r]^{\alpha^*} & \operatorname{H}\nolimits^*(A,K) \ar[r] & \operatorname{H}\nolimits^*(A,K)/\operatorname{Rad}\nolimits(\operatorname{H}\nolimits^*(A,K))
}
\]
is a prime ideal. Equivalent $\pi$-points determine the same prime ideal.
With the identification given by the theorem, we can define the support
variety ${\mathcal{V}}_G(M)$ of any $kG$-module $M$
to be the set of all equivalence classes
of $\pi$-point $\alpha_K:K[t]/(t^p) \to KG_K$ such that the restriction
$\alpha_K^*(K \otimes M)$ is not a free $KG$-module. In the case that
$M$ is finitely generated, ${\mathcal{V}}_G(M) \simeq V_G(M)$ is a closed set.
\begin{rem}\label{rem:quillen}
If $G$ is a finite group that is not elementary abelian, then
the Quillen Dimension Theorem (see \cite{Quil} or
\cite[Theorem 8.4.6]{CTVZ}) says
that $V_G(k) = \operatorname{Proj}\nolimits \operatorname{H}\nolimits^*(G,k) = \cup \operatorname{res}\nolimits^*_{G,E}(V_E(k))$,
where the union is over the elementary abelian $p$-subgroups $E$ of $G$.
This assures us that every $\pi$-point is equivalent to one that
factors through the inclusion of the group algebra of some
elementary abelian $p$-subgroup $E$ of $G$ into $kG$. Or, stated another
way, every homogeneous prime ideal in $\HH{*}{G}{k}$ contains the kernel
of the restriction $\operatorname{res}\nolimits_{G,E}:\HH{*}{G}{k} \to \HH{*}{E}{k},$ for some elementary
abelian $p$-subgroup $E.$
\end{rem}
\section{Point varieties}
A subcategory ${\mathcal{M}}$ of a triangulated category ${\mathcal{C}}$
is thick if it is triangulated
and closed under taking direct summands. It is a thick tensor ideal if it
is thick and if, for any $X \in {\mathcal{C}}$ and $Y \in {\mathcal{M}}$, $X \otimes Y$ is in
${\mathcal{M}}$. For $V$ a
closed subset of $V_G(k)$, let ${\mathcal{M}}_V$ be the thick tensor ideal
in $\operatorname{{\bf stmod}(\text{$kG$})}\nolimits$ consisting of all finitely generated $kG$-modules $M$
with $V_G(M) \subseteq V$. More generally, let ${\mathcal{V}}$ be a collection
of closed subsets $V_G(k)$ that is closed under taking finite unions
and specializations (meaning that if $U \subseteq V \in {\mathcal{V}}$ then
$U \in {\mathcal{V}}$). Then the subcategory ${\mathcal{M}}_{{\mathcal{V}}}$ of all finitely generated
modules $M$ with $V_G(M) \in {\mathcal{V}}$ is a thick tensor ideal. Indeed, this is
the story.
\begin{thm} \label{thm:bcr} \cite{BCR}
If ${\mathcal{M}}$ is a thick tensor ideal in $\operatorname{{\bf stmod}(\text{$kG$})}\nolimits$, then ${\mathcal{M}} = {\mathcal{M}}_{{\mathcal{V}}}$
for some collection ${\mathcal{V}}$ of closed subsets of $V_G(k)$ that is closed
under finite unions and specializations.
\end{thm}
Corresponding to a thick tensor ideal ${\mathcal{M}}_{\mathcal{V}}$ in $\operatorname{{\bf stmod}(\text{$kG$})}\nolimits$ is a triangle of
idempotent modules in $\operatorname{{\bf StMod}(\text{$kG$})}\nolimits$ having the form
\[
\xymatrix{
{\mathcal{S}}_{\mathcal{V}}:& {} \ar[r] & {\mathcal{E}}_{\mathcal{V}} \ar[r]^{\sigma_{\mathcal{V}}} & k \ar[r]^{\tau_{\mathcal{V}}} &
{\mathcal{F}}_{\mathcal{V}} \ar[r] & {}.
}
\]
See \cite{R} for proofs and details.
The modules ${\mathcal{E}}_{\mathcal{V}}$ and ${\mathcal{F}}_{\mathcal{V}}$ are idempotent in the stable category,
meaning that ${\mathcal{E}}_{\mathcal{V}} \otimes {\mathcal{E}}_{\mathcal{V}} \cong {\mathcal{E}}_{\mathcal{V}}$ and
${\mathcal{F}}_{\mathcal{V}} \otimes {\mathcal{F}}_{\mathcal{V}} \cong {\mathcal{F}}_{\mathcal{V}}$ in $\operatorname{{\bf StMod}(\text{$kG$})}\nolimits$, {\it i. e.}
ignoring projective summands. In addition, ${\mathcal{E}}_{\mathcal{V}} \otimes {\mathcal{F}}_{\mathcal{V}} \cong 0$
in the stable category. The support variety ${\mathcal{V}}_G({\mathcal{E}}_{{\mathcal{V}}})$ is the set
of all equivalences classes of $\pi$-points
corresponding to irreducible closed subsets in ${\mathcal{V}}$,
and ${\mathcal{V}}_G({\mathcal{F}}_{{\mathcal{V}}}) = V_G(k) \setminus {\mathcal{V}}_G({\mathcal{E}}_{{\mathcal{V}}})$.
For any finitely generated $kG$ module $X$,
the triangle
\[
\xymatrix{
X \otimes {\mathcal{S}}_{\mathcal{V}}: & {\mathcal{E}}_{\mathcal{V}}(X) \ar[r]^{\quad \mu_X} & X
\ar[r]^{\nu_X \quad} & {\mathcal{F}}_{\mathcal{V}}(X) \ar[r] & {}
}
\]
has a couple of universal properties \cite{R}.
Let ${\mathcal{M}}^{\oplus}$ denote the closure of ${\mathcal{M}}$ in $\operatorname{{\bf StMod}(\text{$kG$})}\nolimits$ under
taking arbitrary direct sums. The map $\mu_X$ is universal for maps
from objects in ${\mathcal{M}}_{\mathcal{V}}^\oplus$ to $X$, meaning that if $Y$ is in
${\mathcal{M}}^{\oplus}_{\mathcal{V}}$, then any map $Y \to X$ factors through $\mu_X$. The map
$\nu_X$ is universal for maps from $X$ to ${\mathcal{M}}_{\mathcal{V}}$-local objects, meaning
objects $Y$ such that $\operatorname{Hom}\nolimitsul_{kG}(M, Y) = \{0 \}$ for all $M$ in
${\mathcal{M}}_{\mathcal{V}}$. The universal property says that for an ${\mathcal{M}}_{{\mathcal{V}}}$-local module $Y$,
any map $X \to Y$ factors through $\nu_X$.
In the event that $V$ is a closed subset of $V_G(k)$, let ${\mathcal{E}}_V = {\mathcal{E}}_{\mathcal{V}}$
and ${\mathcal{F}}_V = {\mathcal{F}}_{{\mathcal{V}}}$ where ${\mathcal{V}}$ is the collection of all closed subsets
of $V$.
\begin{lemma}\label{lem:Elocal}
Suppose that $V$ is a closed subvariety of $V_G(k)$. Suppose that $L$ is
a $kG$-module such that $U \cap V = \emptyset$ for all $U \in {\mathcal{V}}_G(L)$. Then
$\operatorname{Hom}\nolimitsul_{kG}({\mathcal{E}}_V, L) = \{0\}$.
\end{lemma}
\begin{proof}
The point is that ${\mathcal{E}}_V$ can be constructed as the direct limit of
finitely generated modules having variety equal to $V$. So $L$ is
${\mathcal{M}}_V$-local.
\end{proof}
Suppose that ${\mathcal{M}} = {\mathcal{M}}_{{\mathcal{V}}}$ is a thick tensor ideal of $\operatorname{{\bf stmod}(\text{$kG$})}\nolimits$ for an
appropriate collection ${\mathcal{V}}$. The Verdier localization
${\mathcal{C}} = {\mathcal{C}}_{{\mathcal{V}}}$ of $\operatorname{{\bf stmod}(\text{$kG$})}\nolimits$
with respect to ${\mathcal{M}}$ is the category whose objects are the same as
those of $\operatorname{{\bf stmod}(\text{$kG$})}\nolimits$. The collection of
morphisms from an object $M$ to an object $N$ is obtained by inverting
any morphism with the property that the third object in the triangle of
that morphism is in ${\mathcal{M}}$. Thus, objects in ${\mathcal{M}}$ are equal to the zero
object in ${\mathcal{C}}$. One of the motivations for this work is that
$\operatorname{End}\nolimitsul({\mathcal{F}}_V)$ is isomorphic to the ring of endomorpisms of the
trivial module $k$ in the localized category ${\mathcal{C}}$.
\begin{prop} \label{prop:nointersect}
Suppose that $V = V_1 \cup V_2$ where $V_1$ and $V_2$ are closed
subvarieties such that $V_1 \cap V_2 = \emptyset$. Then ${\mathcal{F}}_V$
is the pushout of the diagram
\[
\xymatrix{
k \ar[r]^{\tau_{V_1}} \ar[d]^{\tau_{V_2}} & {\mathcal{F}}_{V_1} \ar[d] \\
{\mathcal{F}}_{V_2} \ar[r] & {\mathcal{F}}_{V}
}
\]
That is, ${\mathcal{F}}_V \cong ({\mathcal{F}}_{V_1} \oplus {\mathcal{F}}_{V_2})/ N$ where
$N = \{ (\tau_{V_1}(a), -\tau_{V_2}(a)) \ \vert \ a \in k \}.$
\end{prop}
\begin{proof}
The thing to note is that ${\mathcal{E}}_{V} \cong {\mathcal{E}}_{V_1} \oplus {\mathcal{E}}_{V_2}.$
That is, because, $V_1 \cap V_2 = \emptyset$,
if $M \in {\mathcal{M}}_V$, then $M \cong M_1 \oplus M_2$ where
$M_i \in {\mathcal{M}}_{V_i}$ for $i = 1,2$. So in particular, the map
${\mathcal{E}}_{V_1} \oplus {\mathcal{E}}_{V_2} \to k$ sending $(u, v)$ to
$\sigma_{V_1}(u) + \sigma_{V_2}(v)$ has the desired universal
property. The third object in the triangle of the map is the pushout,
and it also satisfies the desired universal property. Moreover,
we know that ${\mathcal{E}}_{V_1} \otimes {\mathcal{E}}_{V_2}$ is projective because the
varieties of the two modules are disjoint \cite{BCR2}.
So ${\mathcal{E}}_{V_1} \oplus {\mathcal{E}}_{V_2}$
is an idempotent module. This is sufficient to prove the proposition.
\end{proof}
\begin{lemma} \label{lem:restr-idem}
Let $G$ be an elementary abelian group of order $p^r$.
Suppose that $H$ is a subgroup of $G$ or that $kH$ is the image of a
flat map $\gamma:k[t_1, \dots, t_s]/(t_1^p, \dots, t_s^p) \to kG$.
Let $V$ be a closed subvariety of $V_G(k)$.
Then the restriction of the exact triangle ${\mathcal{S}}_V$ to $kH$ is the triangle
\[
\xymatrix{
{\mathcal{S}}_{V^\prime}:& {} \ar[r] & {\mathcal{E}}_{V^\prime} \ar[r]^{\sigma_{V^\prime}}
& k \ar[r]^{\tau_{V^\prime}} & {\mathcal{F}}_{V^\prime} \ar[r] & {},
}
\]
where $V^\prime = (\operatorname{res}\nolimits_{G,H}^*)^{-1}(V)$, the inverse image of $V$
under the restriction map.
\end{lemma}
\begin{proof}
The proof is a straightforward matter checking that the varieties
are correct.
\end{proof}
In the case of $G$ an elementary abelian groups,
one of the main theorem in \cite{Ctriv} is the following.
\begin{thm} \label{thm:decomp}
Suppose that $G$ is an elementary abelian $p$-group having $p$-rank
at least 3. Suppose that $V$ is a subvariety of $V_G(k)$ consisting
of a single closed point. Let $kH$ be the image of a
flat map $\gamma:k[t_1, \dots, t_{r-1}]/(t_1^p, \dots, t_{r-1}^p) \to kG$
with the property that $V$ is not in $\operatorname{res}\nolimits_{G,H}^*(V_H(k))$.
Suppose that $Z = \alpha(t)$ where $\alpha:k[t]/(t^p) \to kG$
is a $\pi$-point whose equivalence class is the point in $V$.
Then, the idempotent module ${\mathcal{F}}_V$ has a
decomposition (as a direct sum of $kH$-modules)
\[
{\mathcal{F}}_V = k \oplus P_0^{p-1} \oplus P_1
\oplus P_2^{p-1} \oplus P_3 \oplus \dots
\]
where
\[
\xymatrix{
\dots \ar[r] & P_2 \ar[r]^\partial & P_1 \ar[r]^\partial &
P_0 \ar[r]^\varepsilon & k \ar[r] &0
}
\]
is a projective $kH$-resolution of the trivial $kH$-module.
Multiplication by the element $Z$ is zero on the summand $k$.
For \ $m \in P_{2i-1}$, $i >0$,
\[
Zm \ = \ -(\partial(m), 0, \dots, 0) \in P_{2i-2}^{p-1}.
\]
For $m = (m_1, \dots, m_{p-1}) \in P_{2i}^{p-1}$,
\[
Zm = \begin{cases} - \varepsilon(m_{p-1}) + (0, m_1, \dots, m_{p-2})
\in k \oplus P_0^{p-1} & \text{ if } i = 0, \\
-\partial(m_{p-1}) + (0, m_1, \dots, m_{p-2}) \in
P_{2i-1} \oplus P_{2i}^{p-1} &
\text{ if } i > 0. \end{cases}
\]
The map $\tau_V: k \to {\mathcal{F}}_V$ has image the summand $k$ in the
decomposition. Moreover, $\operatorname{Hom}\nolimits_{kG}(k, {\mathcal{F}}_V) = \sum_{i\geq 0}
H_i$ is a graded ring with
\[
H_i \cong \begin{cases} k\tau_V(1) & \text{ for } i = 0, \\
\operatorname{Hom}\nolimits_{kH}(k, P_i) & \text{ for } i >0 \end{cases}
\]
A homogeneous element $\theta: k \to {\mathcal{F}}_V$ lifts to a homomorphism
$\hat{\theta}: {\mathcal{F}}_V \to {\mathcal{F}}_V$ that is induced by a $kH$-chain map
$\theta_*:(P_*, \varepsilon) \to (P_*, \varepsilon)$ of the augmented
projective resolution to itself, that lifts $\theta$.
\end{thm}
\begin{proof}
Let $kC$ denote the image of $\alpha$. Because $\gamma$ is a flat
map, the classes modulo
$\operatorname{Rad}\nolimits^2(kG)$ of $\gamma(t_1), \dots, \gamma(t_{r-1})$ span a subspace
of $\operatorname{Rad}\nolimits(kG)/\operatorname{Rad}\nolimits^2(kG)$ of dimension $r-1$. The fact that $V$ is not in
$\operatorname{res}\nolimits_{G,H}^*(V_H(k))$ implies that the class modulo $\operatorname{Rad}\nolimits^2(kG)$ of
$Z$ is not in that subspace. That is, otherwise there would be a
a $\pi$-point equivalent to $\alpha$ that factored through $\gamma$
violating the assumption on the varieties. Thus we have that
$kG \cong kC \otimes kH$, is the group algebra of the direct
product $C\times H$. Now we apply \cite[Theorem 6.2]{Ctriv},
where $V$ the point $[1, 0, \dots, 0]$ in $V_G(k)$ corresponding to $Z$.
This gives the stated result.
That is, for this specific choice of $V$ and generators
$Z, \alpha(t_1), \dots, \alpha(t_{r-1}),$
the module ${\mathcal{F}}_V$ has a decomposition as
described in \cite{Ctriv}.
\end{proof}
We remark that changing the generators of $kG$, as we have done above,
does not preserve the Hopf algebra structure. However, as noted in
\cite[Remark 7.5]{Ctriv}, the structure of the idempotent modules does
not depend on the coalgebra structure.
\begin{thm} \label{thm:deeprad}
Assume the hypothesis of the previous theorem (Thm. \ref{thm:decomp}).
Suppose that $X = \alpha(t)$ where $\alpha:k[t]/(t^p) \to kG$ is a
$\pi$-point not corresponding to the point $V$.
Assume that $\theta: k \to {\mathcal{F}}_V$ is a homomorphism with
the property that $\theta(1) \in X^{p-1}{\mathcal{F}}_V$. Then,
the image of $\hat{\theta}: {\mathcal{F}}_V \to {\mathcal{F}}_V$ is contained in
$X^{p-1}{\mathcal{F}}_V$.
\end{thm}
\begin{proof}
In the statement of Theorem \ref{thm:decomp}, the generators
$Y_i = \alpha(t_i)$ of $kH$
can be chosen so that $Y_1 = X$ and $Y_2, \dots, Y_{r-1}$ are any elements
so that the classes modulo $\operatorname{Rad}\nolimits^2(kG)$ of $Z, X, Y_2, \dots, Y_{r-1}$
form a basis for $\operatorname{Rad}\nolimits(kG)/\operatorname{Rad}\nolimits^2(kG)$.
Thus, $kH \cong kC \otimes kJ$ where
$kC \cong k[X]/(X^p)$ is the flat subalgebra of $kG$ generated by
$X$ and $kJ$ is the flat subalgebra generated by $Y_2, \dots, Y_{r-1}.$
Let $(R_*, \varepsilon_1)$ be a minimal projective $kC$-resolution of $k_C$ and
$(Q_*, \varepsilon_2)$, minimal projective $kJ$-resolution of $k_J$.
Then $R_i \cong kC$ for all $i \geq 0$. The minimal $kH$-resolution
of $k$ can be taken to be the tensor product of these two, so that
\[
P_n = \sum_{i= 0}^n R_i \otimes Q_{n-i}
\]
In the decomposition of ${\mathcal{F}}_V$ given in Theorem \ref{thm:decomp}, the
element $\theta(1) \in X^{p-1}{\mathcal{F}}_V \subseteq \sum_{n \geq 0} P_n$. Because
an assignment of chain maps to elements of $\operatorname{Hom}\nolimits_{kG}(k, {\mathcal{F}}_V)$ is
additive, it is sufficient to prove the theorem assuming that
$\theta(1) \in X^{p-1}P_n$ for some $n$. Indeed, we may assume that
$\theta(1) \in X^{p-1}(R_m \otimes Q_{n-m}) =
(X^{p-1}R_m) \otimes Q_{n-m})$ for some $m$ and $n$.
Because $R_m \cong kC$, we have that $\theta(1) = X^{p-1} \otimes u$
for some $u \in Q_{n-m}$.
Let $\varphi: k \to Q_{n-m}$ be given by $\varphi(1) = u$. Then
$\varphi$ lifts to a chain map
\[
\xymatrix{
\dots \ar[r] & Q_1 \ar[r] \ar[d]^{\varphi_1} &
Q_0 \ar[r] \ar[d]^{\varphi_0} &
k \ar[d]^{\varphi} \ar[r] & 0\\
\dots \ar[r] & \quad Q_{n-m+2} \ar[r] & \quad Q_{n-m+1} \ar[r] &
\quad Q_{n-m} \ar[r] & \dots
}
\]
Let $\mu: R_0 \to X^{p-1}R_m$ be given by $\mu(1) = X^{p-1}$.
Now define $\theta_i: P_i \to P_{n+i+1}$ as the composition
\[
\xymatrix{
P_i \ar[r] & R_0 \otimes Q_i \ar[r] &
R_0/(X^{p-1}R_0) \otimes Q_i \ar[r]^{ \ \ \mu \otimes \varphi_i} &
X^{p-1}R_m \otimes Q_{n-m+i+1} \ar[r] & P_{n+i+1}
}
\]
The first map is projection onto the direct summand $R_0 \otimes Q_i$.
The second is the natural quotient. Then comes the chain map,
and the fourth is the inclusion.
The task to finish the proof amounts to two straightforward exercises which
we leave to the reader. The first is to show that $\{\theta_i\}$ is
a chain map, and the second is to show that it lifts the map $\theta$.
\end{proof}
\begin{cor} \label{cor:restrict}
Assume the hypotheses and notation
of Theorems \ref{thm:decomp} and \ref{thm:deeprad}.
Let ${\mathcal{I}}$ be the collection of all $\theta:k \to {\mathcal{F}}_V$ such that
$\theta(1) \in X^{p-1}{\mathcal{F}}_V$. Then under the correspondence
$\operatorname{Hom}\nolimitsul_{kG}(k, {\mathcal{F}}_V) \cong \operatorname{Hom}\nolimitsul_{kG}({\mathcal{F}}_V, {\mathcal{F}}_V)$, ${\mathcal{I}}$ is
the kernel of the restriction map
$\operatorname{Hom}\nolimitsul_{kG}({\mathcal{F}}_V, {\mathcal{F}}_V) \to \operatorname{Hom}\nolimitsul_{kH}({\mathcal{F}}_V, {\mathcal{F}}_V)$.
In particular, ${\mathcal{I}}$ is an ideal.
\end{cor}
\begin{proof}
The point is, in the notation of the last proof, that $X^{p-1}{\mathcal{F}}_V
\subset P_*$. Thus, $\theta(1)$ and $\hat{\theta}({\mathcal{F}}_V)$ are in
$P_*$ which is free as a $kH$-module. Hence, the map $\theta$
factors through a $kH$-projective object and is zero on restriction
to $kH$.
\end{proof}
\begin{rem} \label{rem:indep}
We emphasize that in Theorem \ref{thm:decomp}, the choices of the flat map
$\gamma$ and also of the generators for $kH$ are arbitrary except that
$kH$ should have rank $r-1$ and the condition on the varieties must be
satisfied. Similarly, in Theorem \ref{thm:deeprad},
any $\pi$-point $\alpha$ satisfying the
desired conditions can be chosen.
\end{rem}
\section{Endomorphisms of ${\mathcal{F}}_V$}
Throughout this section, assume that $G = \langle g_1, \dots, g_r \rangle$
is an elementary abelian group of order $p^r$ for $r \geq 3$. We show
that if $V \subset V_G(k)$ is a closed subvariety of dimension $0$, then
the endomorphism ring of the idempotent module ${\mathcal{F}}_V$ in the stable category
has a unique maximal ideal that is nilpotent and has codimension one.
We assume the notation of the previous section.
We prove the following result in more generality than is actually needed
in the later development.
\begin{prop} \label{prop:disjointV}
Suppose that $V_1$ and $V_2$ are disjoint subvarieties of $V_G(k)$.
Let $kH$ be the image of a flat map
$\gamma:k[t_1, \dots, t_s]/(t_1^p, \dots, t_s^p) \to kG$, for $s \geq 2$.
Let $X = \gamma(t_1)$ and let $kJ$ be the flat subalgebra of
$kG$ generated by $\gamma(t_2), \dots, \gamma(t_s)$.
Assume that we have the following two conditions.
\begin{enumerate}
\item $V_1 \subseteq \operatorname{res}\nolimits_{G,J}^*(V_J(k)).$
\item $V_2 \cap \operatorname{res}\nolimits_{G,H}^*(V_H(k)) = \emptyset$.
\end{enumerate}
Suppose that $\varphi: k \to {\mathcal{F}}_2 = {\mathcal{F}}_{V_2}$ is a homomorphism
such that $\varphi(1) \in X^{p-1}{\mathcal{F}}_2$. Then $\varphi$ extends to a
homomorphism $\psi: {\mathcal{F}}_1 = {\mathcal{F}}_{V_1} \to X^{p-1}{\mathcal{F}}_2$. That is, we have a
commutative diagram
\[
\xymatrix{
k \ar[r]^{\tau_1} \ar[d]_{\varphi} & {\mathcal{F}}_1 \ar[dl]_\psi \ar[d] \\
X^{p-1}{\mathcal{F}}_2 \ar[r]^{ \quad \iota} & {\mathcal{F}}_2
}
\]
where $\iota$ is the inclusion.
\end{prop}
\begin{proof}
By Condition (2), $({\mathcal{E}}_2)_{\downarrow H}$ is free as a $kH$-module. This
implies that $({\mathcal{F}}_2)_{\downarrow H} \cong k \oplus P$, where $P$ is a
free $kH$-module. Thus, $\operatorname{Hom}\nolimitsul_{kH}(k, ({\mathcal{F}}_2)_{\downarrow H})$ has
dimension one, and the fact that $\varphi(1) \in X^{p-1}{\mathcal{F}}_2$ means that
$\varphi$ factors through a projective $kH$-module, namely $P$. This
follows because $\varphi(1) \in \operatorname{Rad}\nolimits(kH){\mathcal{F}}_2 \cap \operatorname{Soc}\nolimits({\mathcal{F}}_2) \subset P$.
Note that $X \not\in kJ$, and hence the restriction of
${\mathcal{E}}_1$ to the subalgebra generated by $X$ is a free module. Moreover,
$(X^{p-1}{\mathcal{F}}_2)_{\downarrow H} = X^{p-1}P$ is free as a $kJ$-module.
Consequently, ${\mathcal{V}}_G({\mathcal{E}}_1) \cap {\mathcal{V}}_G(X^{p-1}{\mathcal{F}}_2) = \emptyset$, and
the composition
\[
\xymatrix{
{\mathcal{E}}_1 \ar[r]^{\sigma_1} & k \ar[r]^{\varphi \qquad} & X^{p-1}{\mathcal{F}}_2
}
\]
is the zero map in the stable category
by Lemma \ref{lem:Elocal}. The existence of the map $\psi$ is implied
from the distinguished triangle.
\end{proof}
\begin{cor} \label{cor:extend}
Suppose that $G$ is an elementary abelian $p$-group having rank
at least 3. Suppose that $V_1, V_2 \subset V_G(k)$ are closed subvarieties
each consisting of a single point. Let
$\beta: k[t_1,t_2]/(t_1^p,t_2^p) \to kG$ be flat map such that the
following hold. For notation, let $kH$ be the image of $\beta$.
\begin{enumerate}
\item The class of the $\pi$-point $\alpha:k[t]/(t^p) \to kG$
with $\alpha(t) = \beta(t_1)$ is in $V_1$.
\item $V_2 \not\subset \operatorname{res}\nolimits_{G,H}^*(V_H(k))$.
\end{enumerate}
Let $X = \beta(t_2)$. Suppose that $\varphi: k \to {\mathcal{F}}_2
= {\mathcal{F}}_{V_2}$ is a map such that $\varphi(1)\in X^{p-1}{\mathcal{F}}_2$.
Then $\varphi$ extends to a map $\theta: {\mathcal{F}}_1 \to {\mathcal{F}}_2$ such that
$\theta({\mathcal{F}}_1) \subseteq X^{p-1}{\mathcal{F}}_2$.
\end{cor}
\begin{proof}
Let $kJ$ be the image of $\alpha$. Then the conditions of
Proposition \ref{prop:disjointV} are satisfied and the corollary follows.
\end{proof}
We can now prove the main theorem of the section.
\begin{thm} \label{thm:elemain}
Suppose that $G$ is an elementary abelian $p$-group having rank $r \geq 3$.
Let $V \subset V_G(k)$ be a closed subset consisting of a finite
number of closed points. Let $kH$ be the image of a flat map
$\gamma:k[t_1, \dots, t_s]/(t_1^p, \dots, t_s^p) \to kG$
such that $s \geq 2$ and $\operatorname{res}\nolimits_{G, H}^*(V_H(k)) \cap V = \emptyset$. Let
${\mathcal{I}}$ be the kernel of the restriction $\operatorname{End}\nolimitsul_{kG}({\mathcal{F}}_V) \to
\operatorname{End}\nolimitsul_{kH}({\mathcal{F}}_V)$. Then ${\mathcal{I}}$ is an ideal of codimension one in
$\operatorname{End}\nolimitsul_{kG}({\mathcal{F}}_V)$, and ${\mathcal{I}}^2 = \{0\}$. Thus ${\mathcal{I}}$ is the unique
maximal ideal in $\operatorname{End}\nolimitsul_{kG}({\mathcal{F}}_V)$.
\end{thm}
\begin{proof}
We write $V = \cup_{i=1}^{n} V_i$ where each $V_i$ is a closed point in
$V_G(k)$. For each $i$, let $\alpha_i:k[t]/(t^p) \to kG$ be a $\pi$-point
corresponding to the closed point in $V_i$. For $i \neq j$ let $kH_{i,j}$
be the image of the flat map $\beta_{i,j}: k[t_1,t_2]/(t_1^p,t_2^p) \to kG$
with $\beta_{i,j}(t_1) = \alpha_i(t)$ and $\beta_{i,j}(t_2) = \alpha_j(t)$.
We note that for all $i , j$, the intersection
\[
\operatorname{res}\nolimits_{G, H_{i,j}}^*(V_{H_{i,j}}(k)) \cap \operatorname{res}\nolimits_{G, H}^*(V_{H}(k))
\]
either contains only one point or is empty.
Let $\alpha:k[t]/(t^p) \to kG$ be a $\pi$-point
that factors through $\gamma$, but is not in
$\operatorname{res}\nolimits_{G, H_{i,j}}^*(V_{H_{i,j}}(k))$ for any pair $i,j$ with
$1 \leq i < j \leq n$.
By Proposition \ref{prop:nointersect}, the idempotent module ${\mathcal{F}}_V$
is the pushout of the system $\{\tau_i: k \to {\mathcal{F}}_i$\}, where ${\mathcal{F}}_i =
{\mathcal{F}}_{V_i}$. For each $i$, there is a homomorphism $\nu_i: {\mathcal{F}}_i \to {\mathcal{F}}$,
such that the compositions $\nu_i\mu_i$ coincide.
From the conditions on the choice of $kH$, we see that
for each $i$, the restriction of ${\mathcal{F}}_i$ to $kH$
has the form $k \oplus P_i$ where $P_i$ is a projective $kH$-module.
In addition, the component isomorphic to $k$ is generated by $\tau_i(1)$.
Thus, the restriction $({\mathcal{F}}_V)_{\downarrow H} \cong k \oplus \sum P_i$.
We see that ${\mathcal{I}}$ is the subspace of $\operatorname{Hom}\nolimitsul_{kG}(k, {\mathcal{F}}_V)$
spanned by all $\varphi: k \to {\mathcal{F}}_V$ with
$\varphi(1) \in X^{p-1} {\mathcal{F}}_V$.
For any $j = 1, \dots, t$, let ${\mathcal{I}}_j$ be the set of all $\varphi \in {\mathcal{I}}$
such that $\varphi(1) \in X^{p-1}P_j$. Thus ${\mathcal{I}}$ is the direct sum of
the subspaces ${\mathcal{I}}_j$.
Throughout the proof, we make the identification
$\operatorname{Hom}\nolimitsul_{kG}(k, {\mathcal{F}}_V) \cong \operatorname{Hom}\nolimitsul_{kG}({\mathcal{F}}_V, {\mathcal{F}}_V)$. If we are
given two elements $\varphi_1$ and $\varphi_2$ in $\operatorname{Hom}\nolimitsul_{kG}(k, {\mathcal{F}}_V)$,
their product is obtained by first finding lifts
$\hat{\varphi_i}: {\mathcal{F}}_V \to {\mathcal{F}}_V$, for $i = 1,2$, taking the composition and
composing with the map $\tau: k \to {\mathcal{F}}_V$. Note that
any lift will serve the purpose. Our aim is to show that if
$\varphi_1, \varphi_2 \in {\mathcal{I}}$, then the product is zero. Without loss
of generality we may assume that $\varphi_i \in {\mathcal{I}}_{j_i}$ for some
$1 \leq j_i \leq t$.
Letting $j = j_1$, there is a $\phi_1: k \to X^{p-1}P_j \subset P_j$
such that $\nu_j\phi_1 = \varphi_1: k \to {\mathcal{F}}_V$.
By Corollary \ref{cor:extend}, for any $i \neq j$ there is an extension
$\theta_i: {\mathcal{F}}_i \to \nu_j(X^{p-1}{\mathcal{F}}_j)$ extending $\phi_1$. Likewise,
by Theorem \ref{thm:deeprad}, there is such an extension also in the
case that $i = j.$ Thus, for every $i = 1, \dots, t$, there is an
extension $\hat{\theta}_i: {\mathcal{F}}_i \to {\mathcal{F}}_V$ of
$\varphi_1$ with the property that
$\hat{\theta}_i({\mathcal{F}}_i) \subseteq X^{p-1}{\mathcal{F}}_V$.
The universal property of pushouts, now guarantees that there is a map
$\hat{\varphi_1}: {\mathcal{F}}_V \to {\mathcal{F}}_V$ such that, for every $i$, the diagram
\[
\xymatrix{
k \ar[r]^{\phi_1} \ar[d]_{\tau} \ar[dr]^{\varphi_1} &
{\mathcal{F}}_j \ar[d]^{\nu_j} \\
{\mathcal{F}}_V \ar[r]_{\hat{\varphi_1}} & {\mathcal{F}}_V
}
\]
commutes and $\hat{\varphi_1}({\mathcal{F}}_V) \subseteq X^{p-1}{\mathcal{F}}_V$.
There is a similar extension $\hat{\varphi_2}: {\mathcal{F}}_V \to {\mathcal{F}}_V$ with the
same property.
Now, we see that $\hat{\varphi_2}(\hat{\varphi_1}({\mathcal{F}}_V) \subseteq
\hat{\varphi_2}(X^{p-1}{\mathcal{F}}_V) \subseteq X^{2(p-1)}{\mathcal{F}}_V = \{0\}$.
Hence, the product of any two elements in ${\mathcal{I}}$ is zero.
We notice that $\operatorname{End}\nolimitsul_{kH}({\mathcal{F}}_V) \cong k$, implying
that ${\mathcal{I}}$ is a maximal ideal. Because ${\mathcal{I}}^2 = \{0\}$,
any element of $\operatorname{End}\nolimitsul_{kG}({\mathcal{F}}_V)$ that is not in ${\mathcal{I}}$ is invertible.
\end{proof}
\section{The general case}
The aim of this section is to extend the conclusion of Theorem
\ref{thm:elemain} to a more general finite group $G$.
The arguments in the proofs depend on the fact (see Remark \ref{rem:quillen})
that any prime
ideal in $\HH{*}{G}{k}$ contains the kernel of a restriction to an elementary
abelian $p$-subgroup of $G$. For this reason, the main results of the section
do not extend to general finite group schemes. Throughout the section
we assume the following.
\begin{hyp} \label{hyp}
suppose that $G$ is a finite group whose maximal
elementary abelian $p$-subgroups all have $p$-rank at least three.
Let $V$ be a closed subvariety of $V_G(k)$, that
is a union of a finite collection of closed points.
\end{hyp}
We make the identification
$\operatorname{Hom}\nolimitsul_{kG}(k, {\mathcal{F}}_V) \cong \operatorname{Hom}\nolimitsul_{kG}({\mathcal{F}}_V, {\mathcal{F}}_V)
= \operatorname{End}\nolimitsul_{kG}({\mathcal{F}}_V)$, as before.
The ideal, that we are interested in, is the following.
\begin{defi} \label{def:maxideal}
Assume that \ref{hyp} holds.
Suppose that $E$ is an elementary abelian $p$-subgroup of $G$
with order $p^r$ for $r \geq 3$.
Let $kH$ be the image of a flat map
$\gamma: k[t_1, \dots, t_{s}]/(t_1^p, \dots, t_{s}^p) \to kE$,
such that $1 \leq s < r$ and
\[
\operatorname{res}\nolimits^*_{G,H}(V_H(k)) \cap V = \emptyset.
\]
Let ${\mathcal{I}} \subset \operatorname{End}\nolimitsul_{kG}({\mathcal{F}}_V)$ be the kernel of the restriction
map $\operatorname{End}\nolimitsul_{kG}({\mathcal{F}}_V) \to \operatorname{End}\nolimitsul_{kH}({\mathcal{F}}_V)$.
\end{defi}
Given $E$, the existence of $H$ follows easily from the geometry. Note that
${\mathcal{I}}$ is an ideal, because if a $kG$-homomorphism factors through a
$kH$-projective module, then so does its composition with any other
homomorphism.
\begin{prop} \label{prop:notdepend}
Assume that \ref{hyp} holds.
The ideal ${\mathcal{I}}$ does not depend on the choice of
$E$ or $H$, as long as the above conditions are satisfied.
\end{prop}
\begin{proof}
The first thing to notice is that the restriction of the module ${\mathcal{E}}_V$ to
$kH$ is projective, and hence, $({\mathcal{F}}_V)_{\downarrow H} \cong k \oplus P$
where $P$ is a projective module by Lemma \ref{lem:restr-idem}.
Then, the independence of the choice of $kH$ in $kE$ follows from
Theorem \ref{thm:elemain}.
Now suppose that $E_1$ and $E_2$ are elementary abelian $p$-subgroups of
$G$ such that $F = E_1 \cap E_2$ has $p$-rank at least $2$. Let ${\mathcal{I}}_j$ be
the ideal defined by $E_j$ as above for $j = 1,2$. We claim that
${\mathcal{I}}_1 = {\mathcal{I}}_2$. The reason is that there must be a $\pi$-point
$\alpha: k[t]/(t^p) \to kF \subset kG$ with the property that
the equivalence class of $\alpha$ does not correspond to any point
of $(\operatorname{res}\nolimits_{G,F}^*)^{-1}(V)$, the inverse image $V$ under the restriction
map $\operatorname{res}\nolimits_{G,F}^*: V_F(k) \to V_G(k)$. Let $H$ be the image of $\alpha$.
Then $kH$ is a flat subalgebra of both $kE_1$ and $kE_2$, which
satisfies the condition of Definition \ref{def:maxideal}.
So both ${\mathcal{I}}_1$ and ${\mathcal{I}}_2$ are the kernel of the restriction to $kH$.
Now suppose $G$ is a $p$-group and that $E$, $E^{\prime}$ are any two
elementary abelian $p$-subgroups having
$p$-rank at least 3. By the argument of Alperin (see the bottom of page
8 to top of page 9 of \cite{Alp}), there is a chain $E = F_1, F_2,
\dots, F_m = E^\prime$ of elementary abelian $p$-subgroups of $G$
such that $F_i \cap F_{i+1}$ has $p$-rank at least 2. Thus by an
easy induction and the previous paragraph, the ideal ${\mathcal{I}}$ is independent
of the choice of $E$.
If $G$ is not a $p$-group we need only notice that if $E_1$ and $E_2$
are conjugate elementary abelian $p$-subgroups, then the ideals
${\mathcal{I}}_1$ and ${\mathcal{I}}_2$ must be the same. Thus, we may assume that any
two elementary abelian $p$-subgroup are in the same Sylow $p$-subgroup.
In such a case the same proof as above works.
\end{proof}
\begin{thm} \label{thm:general}
Assume that the conditions of $\ref{hyp}$ hold.
Let ${\mathcal{F}}_V$ be the idempotent ${\mathcal{F}}$-module corresponding to
$V$. Then $\operatorname{End}\nolimitsul({\mathcal{F}}_V)$ is a local ring whose unique maximal ideal ${\mathcal{I}}$ is
nilpotent. Moreover, there is a number $B$ depending only on $G$ and $p$
such that the nilpotence degree of ${\mathcal{I}}$ is at most $B$.
\end{thm}
\begin{proof}
The proof is an easy consequence of the above Proposition,
Theorem \ref{thm:elemain} and Theorem 2.5
of \cite{CQ}. The last mentioned
theorem can be interpreted as saying that there
is number $N$, depending only on $G$ and $p$, such that for any
sequence $M_0, \dots, M_n$ of $kG$-modules and maps
$\theta_i \in \operatorname{Hom}\nolimitsul_{kG}(M_{i-1}, M_i)$,
$1 \leq i \leq n$, such that $n \ge N$ and
$\operatorname{res}\nolimits_{G, E}(\theta_i) = 0$ for every
elementary abelian subgroup $E$ of $G$, the composition
$\theta_n \cdots \theta_1= 0$. In the case that $M_i = {\mathcal{F}}_V$ for all $i$
and $B = 2N$, choose elements $\theta_i \in {\mathcal{I}}$. Then,
for every $i$, the restriction of the product
$\theta_{2i}\theta_{2i-1}$ to
every elementary abelian $p$-subgroup of $G$ vanishes, by Theorem
\ref{thm:elemain}. It follows that
$\theta_n \cdots \theta_1= 0$ if $n \geq B$.
\end{proof}
\section{Restrctions} \label{sec:restrict}
The aim of the section is prove a few facts about the restrictions of the
endomorphism rings. The first result is known, but perhaps has not been
written down.
\begin{lemma} \label{lem:rest01}
Suppose that $G$ is an elementary abelian $p$-group of order $p^r >1$.
Let $kH \neq kG$ be a flat subalgbra of $kG$. Then for all
$n < 0$, we have that
\[
\operatorname{res}\nolimits_{G,H}: \operatorname{H}\nolimitsH^n(G,k) \to \operatorname{H}\nolimitsH^n(H,k)
\]
is the zero map.
\end{lemma}
\begin{proof} Let $\gamma: k[t_1, \dots, t_s]/(t_1^p, \dots, t_s^p) \to kG$
be a flat map whose image is $kH$. Then the classes modulo $\operatorname{Rad}\nolimits^2(kG)$ of
$\gamma(t_1), \dots, \gamma(t_s)$ are $k$-linearly independent in
$\operatorname{Rad}\nolimits(kG)/\operatorname{Rad}\nolimits^2(kG)$. Let $b_{s+1}, \dots, b_r$ be elements that
are chosen so that the classes of $\gamma(t_1), \dots, \gamma(t_s),
b_{s+1}, \dots, b_r$ form a basis for $\operatorname{Rad}\nolimits(kG)/\operatorname{Rad}\nolimits^2(kG)$.
Let $kJ$ be the flat subalgebra generated by $b_{s+1}, \dots, b_r$
so that $kG \cong kH \otimes kJ$ (see Lemma \ref{lem:complement}).
The Tate cohomology group $\operatorname{H}\nolimitsH^n(G,k)$ is isomorphic to
$\operatorname{Hom}\nolimits_{kG}(k,P_{n+1})$, where $P_*$ is a minimal projective $kG$-resolution
of $k$. The restriction is the map
$\psi_*: \operatorname{Hom}\nolimits_{kG}(k,P_{n-1}) \to \operatorname{Hom}\nolimits_{kG}(k,Q_{n-1})$, where $Q_*$ is
a minimal $kH$-projective resolution of $k$, and $\psi: P_* \to Q_*$ is a
$kH$-chain map. Because of the decomposition $kG \cong kH \otimes kJ$, we
may assume that $Q_*$ is a complex of $kG$-modules on which $kJ$ acts
trivially and that $\psi$ is a $kG$-chain map. However, $P_{n-1}$ is a
free $kG$-module, implying that any map $\zeta:k \to P_{n-1}$ has its
image in $\operatorname{Rad}\nolimits(kJ)P_{n-1}$. Because $kJ$ acts trivially on $Q_{n-1}$,
the image of $\zeta$ is in the kernel of $\psi$.
\end{proof}
\begin{thm} \label{thm:rest02}
Suppose that $G$ is an elementary abelian $p$-group of
$p$-rank $r \geq 3.$
Let $V$ be a subvariety of $V_G(k)$ that
is a union of a finite collection of closed points.
Suppose that $kH \neq kG$ is a flat subalgebra of $kG$.
Then the maximal ideal ${\mathcal{I}} \subseteq \operatorname{End}\nolimitsul_{kG}({\mathcal{F}}_V)$ is
the kernel of the restriction map
$\operatorname{res}\nolimits_{G,H}: \operatorname{End}\nolimitsul_{kG}({\mathcal{F}}_V) \to \operatorname{End}\nolimitsul_{kH}(({\mathcal{F}}_V)_{\downarrow H})$.
\end{thm}
\begin{proof}
We write $V = \cup_{i=1}^{n} V_i$ where each $V_i$ is a closed point in
$V_G(k)$. Recall that by Proposition
\ref{prop:nointersect}, the idempotent module ${\mathcal{F}}_V$
is the pushout of the system $\{\tau_i: k \to {\mathcal{F}}_i$\}. Here, ${\mathcal{F}}_i =
{\mathcal{F}}_{V_i}$ and for each $i$, there is a homomorphism $\nu_i: {\mathcal{F}}_i \to {\mathcal{F}}$,
such that the compositions $\nu_i\tau_i$ coincide.
Let $\gamma_i:k[t_1, \dots, t_{r-1}]/(t_1^p, \dots, t_{r-1}^p) \to kG$
with image $kJ$ such that
\[
\operatorname{res}\nolimits_{G,J}^*(V_J(k)) \cap V = \emptyset
\]
For each $i$, we have that the restriction of ${\mathcal{F}}_i = {\mathcal{F}}_{V_i}$ to $kJ$
has the form ${\mathcal{F}}_i \cong k \oplus Q_i$ where $Q_i$ is a projective
$kJ$-module. It follows that
\[
{\mathcal{F}}_{\downarrow J} \ \cong \ k \oplus \nu_1(Q_1) \oplus \dots \oplus \nu_n(Q_n).
\]
If $\zeta: k \to {\mathcal{F}}_V$ is in ${\mathcal{I}}$, then
$\zeta(1) \in \sum \nu_i(Q_i)$ by Theorem
\ref{thm:elemain} and Corollary \ref{cor:restrict} (see also Remark
\ref{rem:indep}).
Hence, for the remainder of the proof we fix an
element $\zeta \in {\mathcal{I}}$, and without loss of generality, we may assume
that $\zeta(1) \in \nu_i(Q_i)$ for some fixed $i$. Our object is to show that
$\zeta$ factors through a $kH$-projective module. Notice that $\zeta$
must factor through $\nu_i: {\mathcal{F}}_i \to {\mathcal{F}}_V$. Consequently, it is
sufficient to prove the theorem in the case that ${\mathcal{F}}_V = {\mathcal{F}}_i$.
That is, we may assume that $n=1$, $V = V_i$.
There are two cases to consider. First assume that $\operatorname{res}\nolimits_{G,H}^*(V_H(k))$
does not contain the point $V$. In this case $({\mathcal{E}}_V)_{\downarrow H}$
is projective, and hence $({\mathcal{F}}_V)_{\downarrow H} \cong k$ in the stable
category. In this case, the restriction of ${\mathcal{I}}$ to $kH$ is zero and
we are done.
Next, we assume that $V \subset \operatorname{res}\nolimits_{G,H}^*(V_H(k))$. There is a
$\pi$-point $\alpha: k[t]/(t^p) \to kH \subset kG$ whose equivalence
class is the one closed point in $V$. Let $kC$ be the
image of $\alpha$. The flat subalgebra $kH$ has a maximal flat subalgebra
$kL$ such that $kH \cong kC \otimes kL$. There is a flat subalgebra
$kD$ such that $kG \cong kH \otimes kD$
(see Lemma \ref{lem:complement}). We may assume that the subalgebra
$kJ$ has the form $kJ = kL \cdot kD \cong kL \otimes kD$,
because $V$ is not contained in $\operatorname{res}\nolimits_{G,J}^*(V_J(k))$. Therefore,
by Theorem \ref{thm:elemain}, $({\mathcal{F}}_V)_{\downarrow J} \cong k \oplus P$,
where $P$ is the sum of the terms (with multiplicities)
of a minimal augmented $kJ$-projective
resolution of $k$ such that element $\alpha(1)$ acts
as the boundary homomorphism on
the augmented complex. Likewise, the restriction of ${\mathcal{F}}_V$ to $kL$
has the form $({\mathcal{F}}_V)_{\downarrow L} \cong k \oplus Q$ is the sum of the
terms (with multiplicities) of an augmented
minimal $kL$-projective resolution of $k$.
As in the proof of Lemma \ref{lem:rest01}, the restriction map in the
stable category is given by a chain map of augmented complexes which
can be shown to be a $kG$-homomorphism. Because $\zeta \in {\mathcal{I}}$, we have that
$\zeta(1) \in P$, and as in the proof of Lemma \ref{lem:rest01}, the
chain map takes $\zeta(1)$ to zero.
This proves that ${\mathcal{I}}$ is in the kernel of the restriction to
$\operatorname{End}\nolimitsul_{kH}({\mathcal{F}}_V)$. The fact that it is the kernel is a consequence
of its maximality.
\end{proof}
\section{Finite generation}
In this final section we address the issue of
the finite generation of the endomorphism
rings. Suppose that $V$ is a closed subvariety of $V_G(k)$. Let ${\mathcal{M}}_V$
be the subcategory of all $kG$-modules whose support variety is contained
in $V$, and let ${\mathcal{C}}_V$ be the localization of $\operatorname{{\bf StMod}(\text{$kG$})}\nolimits$ at ${\mathcal{M}}_V$.
Tensoring with ${\mathcal{F}}_V$ is the localization functor, and
for $kG$-modules $M$ and $N$, we have that
\[
\operatorname{Hom}\nolimits_{{\mathcal{C}}_V}(M,N) \cong \operatorname{Hom}\nolimitsul_{kG}(M \otimes {\mathcal{F}}_V, N \otimes {\mathcal{F}}_V)
\]
is a module over $\operatorname{End}\nolimits_{{\mathcal{C}}_V}(k) \cong \operatorname{End}\nolimitsul_{kG}({\mathcal{F}}_V)$. The question is
whether it is a finitely generated module?
The question is most relevant
in the case that $M$ and $N$ are finitely generated modules (compact objects
in $\operatorname{{\bf StMod}(\text{$kG$})}\nolimits$). Of course, if the support varieties of the finitely generated
objects $M$ and $N$ are both disjoint from $V$ then the finite generation
is obvious. This is because, in such a case, ${\mathcal{E}}_V \otimes M$ and
${\mathcal{E}}_V \otimes N$ are projective modules, and hence ${\mathcal{F}}_V \otimes M \cong M$
and ${\mathcal{F}}_V \otimes N \cong N$ in the stable category. Also, if
the support variety of either $M$ or $N$ is contained in $V$, then the
finite generation is also clear. In other situations
some proof is required. We show that the answer is not unlike the
answer to the question of the finite generation of $\operatorname{End}\nolimits_{{\mathcal{C}}_V}(k)$.
The proof of the following is straightforward generalization of a well
used argument.
\begin{thm} \label{thm:codim1}
Suppose that $\zeta \in \operatorname{H}\nolimits^d(G,k)$ is a non-nilpotent element. Let
$V = V_G(\zeta)$, the collection of all homogeneous prime ideals that
contain $\zeta$. If $M$ and $N$ are finitely generated $kG$-modules, then
$\operatorname{Hom}\nolimits_{{\mathcal{C}}_V}(M,N)$ is finitely generated as a module over
$\operatorname{End}\nolimits_{{\mathcal{C}}_V}(k)$.
\end{thm}
In fact, what we want to show is the following.
\begin{lemma} \label{lem:codim1}
Assume the hypothesis of Theorem \ref{thm:codim1}. Then
\[
\operatorname{Hom}\nolimits_{{\mathcal{C}}_V}(M, N) \cong (\operatorname{Ext}\nolimits_{kG}^*(M,N)[\zeta^{-1}])_0.
\]
That is, viewing $\operatorname{Ext}\nolimits_{kG}^*(M,N)$ as a module over $\HH{*}{G}{k}$, we
invert the action of $\zeta$ and take the zero grading.
\end{lemma}
\begin{proof}
Suppose that in ${\mathcal{C}}_V$, we have a morphism
\[
\xymatrix{
\phi = \nu\mu^{-1}: & M & L \ar[l]_\mu \ar[r]^\nu & N
}
\]
for some $kG$-module $L$ such that the third object $X$ in the triangle
of $\mu$ is in ${\mathcal{M}}_V$. This implies that for $n$ sufficiently large,
the element $\zeta^n$ annihilates the cohomology of $X$. Consequently,
we have as in the diagram
\[
\xymatrix{
& \Omega^{dn}(M) \ar[d]^{\zeta^n} \ar@{-->}[dl]_{\theta} \\
L \ar[r]^{\mu} & M \ar[r]^\gamma & X
}
\]
that the composition $\gamma\zeta^n = 0$. This implies the existence of the
map $\theta$, and we have that $\phi = \nu\mu^{-1} = (\nu\theta)\zeta^{-n}$,
where $\nu\theta$ represents an element in $\operatorname{Ext}\nolimits_{kG}^{nd}(M,N)$ and any
other representative defines the same element of $\operatorname{Hom}\nolimits_{{\mathcal{C}}_V}(M, N)$.
Likewise, the class of $\theta$ in $\operatorname{Ext}\nolimits_{kG}^{nd}(M,M)$ is the class of
the cocycle $\theta \otimes 1: \Omega^{nd}(k) \otimes M \to k \otimes M$,
and all representatives of this class define the same element of
$\operatorname{Ext}\nolimits_{kG}^{nd}(M,M)$.
\end{proof}
\begin{proof}[Proof of Theorem \ref{thm:codim1}]
We now use the fact that $\operatorname{Ext}\nolimits_{kG}^*(M,N)$ is finitely generated as a
module over $\HH{*}{G}{k} \cong \operatorname{Ext}\nolimits_{kG}^*(k,k)$. Let $\gamma_1, \dots,
\gamma_s$ be a set of homogeneous generators.
If $A = \sum_{n\geq 0} \operatorname{Ext}\nolimits_{kG}^{nd}(k,k)$, then by elementary commutative
algebra $\HH{*}{G}{k}$ is finitely generated over $A$.
Suppose that $\beta_1, \dots, \beta_r$ is a set of homogeneous generators.
Let $B = \sum_{n\geq 0} \operatorname{Ext}\nolimits_{kG}^{nd}(M,N)$. If $\theta \in B$ is a
homogeneous element then $\theta = \sum_{i=1}^s \gamma_i\mu_i$ for
$\mu_i \in \HH{*}{G}{k}$. But then, for each i,
$\mu_i = \sum_{j=1}^r \beta_j\alpha_{ij}$, for $\alpha_{ij} \in A$.
Hence,
\[
\theta = \sum_{i, j = 1,1}^{s,r} \gamma_i\beta_j\alpha_{ij}.
\]
That is, we see that the products $\gamma_i\beta_j$ generate $B$ as a module
over $A$. Note that we need really only consider those whose
degree is a multiple of $d = \text{Degree}(\zeta)$.
Now we have that $\operatorname{Hom}\nolimits_{{\mathcal{C}}_V}(M, N)$ is generated by elements having
the form $\gamma_i\beta_j\zeta^{-r}$, where
$rd = \text{Degree}(\gamma_i\beta_j)$.
\end{proof}
In the other direction we have the following. The example is far
from general, but perhaps the reader can see how other examples can
be constructed.
\begin{thm} \label{thm:nofingen}
Suppose that $V \subset V_G(k)$ is a closed subvariety such that there
is an elementary abelian $p$-subgroup $E$ with
\[
\operatorname{res}\nolimits_{G,E}^*(V_E(k)) \cap V
\]
a nonempty finite set of closed points. Assume that $\vert E \vert \geq p^3$
and that $E$ has a subgroup $F$ with $\vert F \vert = p^2$ and
$\operatorname{res}\nolimits_{G,F}^*(V_F(k)) \cap V \neq \emptyset$. Let $M = k_F^{\uparrow G}
= kG \otimes_{kF} k_F$ be the induced module.
Then $\operatorname{End}\nolimits_{{\mathcal{C}}_V}(M)$ is not finitely generated as a module
over $\operatorname{End}\nolimits_{{\mathcal{C}}_V}(k)$.
\end{thm}
\begin{proof}
First we note that, in the category ${\mathcal{C}}_V$, $M \cong {\mathcal{F}}_V \otimes M$.
By Frobenius Reciprocity,
\[
{\mathcal{F}}_V \otimes M \cong {\mathcal{F}}_V \otimes k_F^{\uparrow G} \cong
({\mathcal{F}}_V)_{\downarrow F})^{\uparrow G}
\]
By Lemma \ref{lem:restr-idem}, $({\mathcal{F}}_V)_{\downarrow F} \cong
{\mathcal{F}}_{V^\prime}$ in the stable category where
$V^\prime = (\operatorname{res}\nolimits_{G,F}^*)^{-1}(V)$. From the hypothesis, we know
that $V^\prime$ consists of a finite set of closed points and is
not equal to $V_F(k)$.
By the Eckmann-Shapiro Lemma, we have the usual adjointness:
\[
\operatorname{Hom}\nolimitsul_{kG}({\mathcal{F}}_V \otimes k_F^{\uparrow G}, {\mathcal{F}}_V \otimes k_F^{\uparrow G})
\cong
\operatorname{Hom}\nolimitsul_{kF}(({\mathcal{F}}_V)_{\downarrow F},
({\mathcal{F}}_V \otimes k_F^{\uparrow G})_{\downarrow F})
\]
\[
\cong
\operatorname{Hom}\nolimitsul_{kF}(({\mathcal{F}}_{V^\prime}),
(({\mathcal{F}}_{V^\prime})^{\uparrow G})_{\downarrow F})
\]
Now notice that $(({\mathcal{F}}_{V^\prime})^{\uparrow G})_{\downarrow F}$
has a direct summand isomorphic to ${\mathcal{F}}_{V^\prime}$. That is,
\[
({\mathcal{F}}_{V^\prime})^{\uparrow G} \cong \sum_{g \in G/F} g \otimes {\mathcal{F}}_{V^\prime}
\]
as $k$-vector spaces. Here, the sum is over a complete set of left
coset representatives of $F$ in $G$.
The subspace $1 \otimes {\mathcal{F}}_{V^\prime}$
is a $kF$-submodule and a direct summand.
This also follows from the Mackey Theorem. The
implication is that $\operatorname{Hom}\nolimitsul_{kF}(({\mathcal{F}}_{V^\prime}),
(({\mathcal{F}}_{V^\prime})^{\uparrow G})_{\downarrow F})$ has a direct sumand
isomorphic to $\operatorname{End}\nolimitsul_{kF}({\mathcal{F}}_{V^\prime})$. We have seen in earlier
sections of this paper that this has infinite $k$-dimension.
The action of $\operatorname{End}\nolimits_{{\mathcal{C}}_V}(k)$ on $\operatorname{End}\nolimits_{{\mathcal{C}}_V}(M)$ is given by
\[
\[email protected]{
\operatorname{Hom}\nolimitsul_{kG}({\mathcal{F}}_V, {\mathcal{F}}_V) \otimes
\operatorname{Hom}\nolimitsul_{kG}({\mathcal{F}}_V \otimes k_F^{\uparrow G}, {\mathcal{F}}_V \otimes k_F^{\uparrow G})
\ar[d] \\
\operatorname{Hom}\nolimitsul_{kF}(({\mathcal{F}}_V)_{\downarrow F}, ({\mathcal{F}}_V)_{\downarrow F}) \otimes
\operatorname{Hom}\nolimitsul_{kF}(({\mathcal{F}}_V)_{\downarrow F},
({\mathcal{F}}_V \otimes k_F^{\uparrow G})_{\downarrow F})
\ar[d] \\
\operatorname{Hom}\nolimitsul_{kF}(({\mathcal{F}}_V)_{\downarrow F},
({\mathcal{F}}_V \otimes k_F^{\uparrow G})_{\downarrow F})
}
\]
where the first arrow is the isomorphism given by the Eckmann-Shapiro
Lemma and the second is composition. The Eckmann-Shapiro Lemma is easily
seen to hold in the stable category.
The main point of the proof is that
in applying the Lemma, the action of $\operatorname{End}\nolimits_{{\mathcal{C}}_V}(k)$ factors through
the restriction map to $kF$. However, the restriction map is transitive,
and hence must factor through the restriction to $kE$. By Theorem
\ref{thm:rest02}, the restriction of the maximal ideal ${\mathcal{I}}$ in
$\operatorname{End}\nolimits_{{\mathcal{C}}_V}(k)$ is zero. That is, the image of the restriction
of $\operatorname{End}\nolimits_{{\mathcal{C}}_V}(k) = \operatorname{Hom}\nolimitsul_{kG}({\mathcal{F}}_V, {\mathcal{F}}_V)$ to
$\operatorname{Hom}\nolimitsul_{kF}(({\mathcal{F}}_V)_{\downarrow F}, ({\mathcal{F}}_V)_{\downarrow F})$
is the identity subring $k$.
It follows from the above that in order for
$\operatorname{End}\nolimits_{{\mathcal{C}}_V}(M)$ to be finitely generated over $\operatorname{End}\nolimits_{{\mathcal{C}}_V}(k)$,
it must be finite dimensional. However, we have already noted that
this is not the case.
\end{proof}
\section{Examples} \label{sec:exm}
We end the paper with a couple of examples and a theorem on the structure
of the idempotent modules. For the first example and most of the section
suppose that $G = SL_2(p^n)$ for $n > 2$, and let $k$ be an algebraically
closed field of characteristic $p$.
Let $a$ be a generator for the multiplicative group ${\mathbb F}_{p^n}^\times$.
The Borel subgroup $B$ of $G$ is generated by the elements
\[
t = \begin{bmatrix} a & 0 \\ 0 & a^{-1} \end{bmatrix} \quad \text{and} \quad
x_i = \begin{bmatrix} 1 & a^i \\ 0 & 1 \end{bmatrix} \quad \text{for} \quad
i = 0, \dots, n-1.
\]
Then $S = \langle x_1, \dots, x_n \rangle$ is a Sylow $p$-subgroup and
$B$ is its normalizer in $G$.
The variety $V_S(k) \cong {\mathbb P}^{n-1}$, projective $(n-1)$-space. The group
$B$ acts on $S$ by conjugation and hence also on $V_S(k)$. The action of
$T = \langle t \rangle$ is given by the relation
\[
\begin{bmatrix} b & 0 \\ 0 & b^{-1} \end{bmatrix}
\begin{bmatrix} 1 & u \\ 0 & 1 \end{bmatrix}
\begin{bmatrix} b^{-1} & 0 \\ 0 & b \end{bmatrix} \quad
= \quad \begin{bmatrix} 1 & ub^2 \\ 0 & 1 \end{bmatrix}
\]
for $u$ in ${\mathbb F}_{p^n}$ and $b$ in ${\mathbb F}_{p^n}^\times$. The thing to notice
is that if $b^2$ is in the prime field ${\mathbb F}_p$, then this element of $T$
operates on $S$ (viewed as an ${\mathbb F}_p$-vector space $S \cong ({\mathbb Z}/(p))^n$)
by a scalar matrix with diagonal entries equal to $b^2$. This implies that
the element of $T$ acts trivially on the projective space $V_S(k)$. With
this in mind, let
\[
d \ = \ \begin{cases} p-1 & \text{if $p=2$ or $n$ is odd}, \\
2(p-1) & \text{otherwise}. \end{cases}
\]
It is easily checked that $d$ is the order of the subgroup of $T$ that
acts trivially on $V_G(S)$. Let $m = (p^n-1)/d$, and let $D$ be the
subgroup of $B$ generated by $c = t^{m}$ and $S$.
Choose $W$ to be any subvariety of $V_S(k) = V_D(k)$
that consists of a single point whose
stabilizer in $T = \langle t \rangle$ is generated by $c$.
Let $V = \operatorname{res}\nolimits_{B,S}^*(W)$. Then the inverse image of $V$ under the
restriction map $\operatorname{res}\nolimits_{B, S}^*$ is the union of the points in the orbit
of $W$ under the action of $T$. Let $V = V_0, \dots, V_{m-1}$ be the
images under $V$ of this action. Let ${\mathcal{F}}_V$ be the idempotent
$kD$-module corresponding to $V$.
The induced module ${\mathcal{F}}_V^{\uparrow B} = kB \otimes_{kD} {\mathcal{F}}_V$
has the form $\sum_{j=0}^{m-1} {\mathcal{F}}_{V_j}$ on restriction to $D$. We may
assume that ${\mathcal{F}}_{V_j} = t^{j} \otimes {\mathcal{F}}_V$ in this context. Thus we
have maps $t^j \otimes \tau_V: t^j \otimes k \to t^j \otimes {\mathcal{F}}_V$.
That is, $T$ acts on this system and also acts on the pushout that is
obtained by identifying the images of the maps $t^j \otimes \tau_V$.
Explicitly, let $N$ be the submodule of
$k_D^{\uparrow B} \cong kB \otimes_{kD} k$ generated by
$1 \otimes 1 - t \otimes 1$. This is a $kB$-submodule of dimension
$m-1$. Let $N^\prime$ be its image in ${\mathcal{F}}_V^{\uparrow B}$, that is
the submodule generated by $1 \otimes \tau_V(1) - t \otimes \tau_V(1)$.
Then we have a triangle
\[
\xymatrix{
{} \ar[r] & k_D^{\uparrow G}/N \ar[r] & {\mathcal{F}}_V^{\uparrow B}/N^\prime \ar[r] &
\ar[r] {\mathcal{F}}_V^{\uparrow B}/k_D^{\uparrow G} \ar[r] & {}
}
\]
Next, a check of the varieties can be done at the level of the Sylow
$p$-subgroup $S$. In particular, the variety of ${\mathcal{E}}_V$ is $\{V\}$, while
that if ${\mathcal{F}}_V$ is ${\mathcal{V}}_G(k) \setminus \{V\}$. Thus, by the tensor
product theorem ${\mathcal{E}}_V \otimes {\mathcal{F}}_V$ is projective, and zero in the stable
category. Tensoring with the triangle, we see that both
${\mathcal{E}}_V$ and ${\mathcal{F}}_V$ are idempotent
modules. It can be seen that the relevant universal properties of the
triangle also hold.
The endomorphism ring $\operatorname{End}\nolimitsul_{kB}({\mathcal{F}}_V)$ is the set of all element of
$\operatorname{End}\nolimitsul_{kS}(({\mathcal{F}}_V)_{\downarrow S})$ that are stable under the action
of $T$. The identity element is certainly $T$-stable.
Let ${\mathcal{I}}$ be the maximal ideal in $\operatorname{End}\nolimitsul_{kS}(({\mathcal{F}}_V)_{\downarrow S})$.
Because, by Theorem
\ref{thm:elemain}, ${\mathcal{I}}^2 = \{0\}$, any element of ${\mathcal{I}}$ that is
invariant under $T$ is an orbit sum of the $T$-action. These elements form
a maximal ideal of codimension one, and the product of any two elements
in this ideal is zero.
The idempotent modules for $G$ can be obtained by using the fact that
$S$ is TI-subgroup (trivial intersection). That is, for any element
$g \in G$ we have that $S \cap gSg^{-1}$ is $S$ if $g \in N_G(S) = B$ and
is $\{1\}$ otherwise. Then by the Mackey Theorem, we have that
\[
(({\mathcal{F}}_V)^{\uparrow G})_{\downarrow B} \cong {\mathcal{F}}_V \oplus P
\]
where $P$ is projective. Consequently, the induced module
$({\mathcal{F}}_V)^{\uparrow G})$ has a single nonprojective direct summand
that is ${\mathcal{F}}_{V^\prime}$ where $V^\prime = \operatorname{res}\nolimits_{B,G}^*(V)$.
It follows that $\operatorname{End}\nolimitsul_{kG}({\mathcal{F}}_{V^\prime})$ is isomorphic to
$\operatorname{End}\nolimitsul_{kG}({\mathcal{F}}_{V})$.
All of this has a sweeping generalization that is reminiscent of the work
in \cite{Bneuc} and \cite{Cind}.
For notation we say that if $S$ is an elementary
abelian $p$-subgroup of a group $G$, its diagonalizer $D = D_G(S)$ is
the subgroup of $N_G(S)$ consisting of all elements whose conjugation
action is by a scalar matrix on the ${\mathbb F}_p$-vector space of $S$ when
written as an additive group. As in the above example, it is the subgroup
of elements of $N_G(S)$ that acts trivially on $V_S(k)$.
\begin{thm} \label{thm:genex}
Suppose that $S$ is a normal elementary abelian $p$-subgroup of $G$ and
that $D = D_G(S)$. Let $U$ be a subvariety of $V_S(k)$ consisting of
a single point and assume that $U$ is not contained in $\operatorname{res}\nolimits_{S, R}^*(V_R(k))$
for any subgroup $R$ of $S$. Let $W = \operatorname{res}\nolimits_{D,S}^*(U)$.
Let $V = \operatorname{res}\nolimits_{G, D}^*(W)$. Let $N$ be the kernel of the natural homomorphism
$\varphi: k_D^{\uparrow G} \to k$ given by $g \otimes 1 \mapsto 1$ for any
$g \in G$. Then we have a triangle
\[
\xymatrix{
{} \ar[r] & k_D^{\uparrow G}/N \ar[r]^{\tau} & {\mathcal{F}}_W^{\uparrow G}/\tau(N)
\ar[r] & \Omega^{-1}({\mathcal{E}}_W)^{\uparrow G} \ar[r] & {}
}
\]
where $\tau$ is the map induced on quotients by $1 \otimes \tau_W:
k_D^{\uparrow G} \to {\mathcal{F}}_W^{\uparrow G}$. In particular, we have that
\[
{\mathcal{F}}_V \cong {\mathcal{F}}_W^{\uparrow G}/\tau(N) \ \text{ and } \
{\mathcal{E}}_V \cong {\mathcal{E}}_W^{\uparrow G},
\]
and the triangle is the triangle of idempotent modules associated to $V$.
\end{thm}
\begin{proof}
This follows by a very similar argument as in the above example.
Note that, by an eigenvalue argument,
$D$ is precisely the subgroup of $G$ that fixes the point
$U$ in $V_S(k)$. The fact
that ${\mathcal{E}}_V$ is induced from a $kD$-module follows also from Theorem 1.5
of \cite{Bneuc}, which is proved in even greater generality.
\end{proof}
\begin{rem} \label{rem:exm}
If the group $G$ in the theorem satisfies the Hypothesis \ref{hyp}, then
Theorem \ref{thm:general} assurs us that $\operatorname{End}\nolimitsul_{kG}({\mathcal{F}}_V)$ has a unique
maximal ideal ${\mathcal{I}}$ having codimension one and that ${\mathcal{I}}$ is nilpotent.
Unlike the example we may not assume that ${\mathcal{I}}^2 = \{0\}$. For an
example, let $p=2$, and $G = H \times S$ where $H$
is a semidihedral group and $S$ has order $2$. So if
$V = \operatorname{res}\nolimits_{G, S}^*(V_S(k))$,
then by Theorem 7.4 of \cite{Ctriv}, $\operatorname{End}\nolimitsul_{kG}({\mathcal{F}}_V)$ is the nonpositive
Tate cohomology ring of $H$ which by \cite{BC2} has nonzero products in its
maximal ideal. Note that in this particular case $D_G(S) = G$, so that
the above theorem says nothing new.
\end{rem}
\end{document}
|
\begin{document}
\title[Onsager's Conjecture for Conservation of Energy and Entropy]{
Onsager's conjecture in bounded domains for the conservation of entropy and other companion laws
}
\author{C. Bardos}
\address{\textit{Claude Bardos: }Laboratoire J.-L. Lions, BP187, 75252 Paris Cedex 05, France. Email:
}
\email{[email protected]}
\author{P. Gwiazda}
\address{\textit{Piotr Gwiazda:} Institute of Mathematics, Polish Academy of Sciences, \'Sniadeckich 8, 00-656 Warszawa, Poland}
\email{[email protected]}
\author{A. \'Swierczewska-Gwiazda}
\address{\textit{Agnieszka \'Swierczewska-Gwiazda:} Institute of Applied Mathematics and Mechanics, University of Warsaw, Banacha 2, 02-097 Warszawa, Poland}
\email{[email protected]}
\author{E.S. Titi}
\address{\textit{Edriss S. Titi:} Department of Mathematics,
Texas A\&M University, 3368 TAMU,
College Station, TX 77843-3368, USA. Department of Applied Mathematics and Theoretical Physics, University of Cambridge,
Wilberforce Road, Cambridge CB3 0WA, UK. Department of Computer Science and Applied Mathematics, The Weizmann Institute of Science, Rehovot 76100, Israel.}
\email{[email protected] \, and \, [email protected]}
\author{E. Wiedemann}
\address{\textit{Emil Wiedemann:} Institute of Applied Analysis, Universit\"at Ulm, Helmholtzstr.\ 18, 89081 Ulm, Germany}
\email{[email protected]}
\begin{abstract}
We show that weak solutions of general conservation laws in bounded domains conserve their generalized entropy, and other respective companion laws, if they possess a certain fractional differentiability of order 1/3 in the interior of the domain, and if the normal component of the corresponding fluxes tend to zero as one approaches the boundary. This extends various recent results of the authors.
\end{abstract}
\noindent\textsc{Date:} February 12, 2019
\maketitle
{\bf Keywords:} {Onsager's conjecture, conservation laws, conservation of entropy.} \\
{\bf MSC Subject Classifications:} {35L65 (primary), 35D30, 35Q35 (secondary).}
\tableofcontents
\section{Introduction}
We consider very general systems of conservation laws of the form
\begin{equation}\label{conslawintro}
\mathrm{d}verg_X(G(U(X)))=0\quad\text{for $X\in\mathcal{X}$},
\end{equation}
where $\mathcal{X}\subset \mathbb{R}^{k+1}$ is open, $U:\mathcal{X}\to\mathcal O$ for some subset $\mathcal{O}\subset \mathbb{R}^n$, and $G:\mathcal{O}\to\mathbb{R}^{n\times(k+1)}$. It is shown in Section~\ref{applications} below that many important evolution equations of hyperbolic character can be written in this {general} form, including the incompressible and compressible Euler systems, the equations of inviscid magnetohydrodynamics, and the equations of elastodynamics.
{Many systems of the form (\ref{conslawintro}) come} with so-called \emph{companion laws} (see~\eqref{complaw} below), according to which sufficiently regular solutions satisfy one or several (sometimes infinitely many) additional conservation laws. Oftentimes, these companion laws can be interpreted as the conservation of energy or entropy. In particular, in the context of hyperbolic conservation laws, the notion of {\emph{ generalized entropy solution}} refers to these additional formally conserved quantities.
Of course a physical entropy can not, in general, be viewed as a conserved quantity; quite the opposite, it is (with the mathematical sign convention) typically \emph{dissipated}, i.e.\ it decreases in time.
In all examples of dissipation of energy or entropy, a certain degree of irregularity is required to violate the corresponding companion law. This is true for Scheffer's solutions and subsequent refinements in the case of the incompressible Euler equations, but also for hyperbolic conservation laws, where the mechanism of entropy dissipation by shock formation is classically known. Mathematically, the formal conservation of energy/entropy relies on the chain rule, which may not be valid for non-Lipschitz functions.
The question thus arises what is the threshold regularity of the solutions above which companion laws are guaranteed to hold.
{In 1949 Onsager \cite{ON} related this issue to the Kolmogorov statistical theory of turbulence and proposed (what then became known as the Onsager conjecture) that in $3d$ for the solutions of the incompressible Euler equations this threshold should be H\"older regularity with critical exponent $\alpha=\frac1 3 $. These recent years have seen definite progress toward the resolution of this conjecture.}
{ On the one hand after the forerunner contributions of Scheffer \cite{Scheffer} (1993) and Shnirelman \cite{shnirel1} (2000), with the introduction, by C.\ De Lellis, and L.\ Sz\'ekelyhidi, of the tools of {\it convex integration}, constant progress have been made in particular with the contributions of Isett, and of Buckmaster, De Lellis, Sz\'ekelyhidi, and Vicol \cite{isett16, buckmasteretal17}. What has been shown there is the following: Given any energy profile $e(t)$, and any $\alpha<\frac13\,,$ there exist space periodic solutions of the $3d$ incompressible Euler equations which belong to $C^\alpha(\mathbb{T}^3\times (0,T))$ and which satisfy the non conservative energy relation:
$$
\int_{\mathbf T^3} |u(x,t)|^2dx =e(t)\,.
$$}
{On the other hand the first proof of a sufficient $\alpha > \frac13 $ regularity condition for the conservation of energy of weak solutions to the $3d$ Euler equations in the full space or subject to periodic boundary conditions goes back to 1994, cf.\ \cite{CET} (and \cite{GEY}, for the case when $\alpha > \frac12$). New refinements and extensions of these results to other systems are the object of the present contribution (see also \cite{BTWP218}).
First, for problems defined in an open set $\mathcal{ X } \subset \mathbb{R}^{k+1} $, a refined Besov-BMO type space, introduced in \cite{FjWi} and denoted here by $\underline{B}_{3,\textit{VBMO}}^{1/3}(\mathcal{X})$ (cf.~\eqref{VMOcondition-1}), is used, for which, with $\alpha>\frac13$, one has the following inclusions:}
{\begin{equation}
C^\alpha\subset B_{3,\infty}^{\alpha}\subset B_{3,c_0}^{1/3}\subset \underline{B}_{3,\textit{VMO}}^{1/3}
\subset B_{3,\infty}^{1/3} \,.\label{cetshvydkoy}
\end{equation} }
{ For the $3d$ Euler equations, conservation of energy was proven for solutions belonging to $B_{3,\infty}^{\alpha}$ by Constantin, E, and Titi \cite{CET}. Then this was extended to solutions belonging to $B_{3,c_0}^{1/3}$ by \cite{CCFS08},
where it was also shown that this result is almost optimal because one can construct divergence free vector fields $U\in B_{3,\infty}^{\frac 1 3}$ with a non zero energy flux. The conservation result of \cite{CCFS08} was recently improved to $\underline{B}_{3,\textit{VMO}}^{1/3}$ in~\cite{FjWi}. Hence $ \underline{B}_{3,\textit{VMO}}^{1/3}$ appears to be an almost optimal regularity class for the conservation of energy.
Moreover, the functions $ U\in \underline{B}_{3,\textit{VMO}}^{1/3}$ are characterized by a simple property in the physical space which makes this space well adapted to localized formulation of an extra conservation law. This makes this space a good tool to deal with the case of domains with boundary, extending the results of \cite{BTW18, BTWP218, DN18, GwMiSw} and in particular relaxing the H\"older $\alpha>\frac13$ regularity hypothesis. At the end of the day the use of the Besov-$VMO$ space leads to a very concise proof of our main theorem (see formulas \eqref{p2} and \eqref{p1} in the proof of Theorem \ref{localthm}). }
{We show in Section~\ref{localtoglobal} that similar conditions as discovered in~\cite{BTW18, DN18} also guarantee validity of companion laws for general global conservation laws of the type~\eqref{conslawintro} (see Theorem~\ref{globalthm} below). That is, if the $ \underline{B}_{3,\textit{VMO}}^{1/3}$ condition~\eqref{VMOcondition-1} is satisfied in the interior of the domain (but not necessarily uniformly up to the boundary), and if the normal component of the flux tends to zero suitably as the boundary is approached, then the corresponding global companion law is satisfied.} The study of such more general nonlinearities was initiated in~\cite{FGSW}, where the isentropic compressible Euler equations (for which the density appears in a non-quadratic way) were studied, but only in the absence of physical boundaries. The general framework for conservation laws of the type~\eqref{conslawintro} was introduced in~\cite{GwMiSw}, and considered for bounded domains (but not in the optimal functional setting) in~\cite{BTWP218}.
{
In the final Section~\ref{applications}, we show how our general results can be applied to various important physical systems from fluid and solid mechanics. In particular, we demonstrate that our boundary assumption~\eqref{boundaryass} relates to the natural boundary conditions usually imposed on the respective equations, e.g.\ the impermeability (or slip) condition for inviscid fluids or the zero traction boundary condition for elastic solids. It is noteworthy that in these examples, the boundary conditions shown to ensure entropy/energy conservation are those that render the respective equations locally well-posed in classes of inital data with sufficient smoothness. }
The framework of this paper is, as explained, very general, but the $C^2$ assumption in Theorem~\ref{localthm} excludes some interesting degenerate situations, like the compressible Euler system with possible vacuum. Such degeneracies were dealt with in~\cite{AkWi, AkDeSkWi}, where it turned out that the analysis beyond $C^2$ nonlinearities is very delicate and can not be expected to be carried out in the generality of~\eqref{conslawintro}.
As a final remark, as emphasized above the space $\underline{B}_{3,\textit{VMO}}^{1/3}$ gives a universal frame for a sufficient condition for the validity of the extra conservation law. For incompressible models, however, it is only in the case of the $3d$ incompressible Euler equations that such a condition has been shown to be (almost) necessary. For genuinely nonlinear hyperbolic conservation laws, on the other hand, the optimality of the exponent $1/3$ is easily obtained from shock solutions, at least on the scale of $L^3$-based spaces: As observed in~\cite{CCFS08, FGSW}, the space $BV\cap L^\infty$, which contains shocks, embeds into the Besov space $B_{3,\infty}^{1/3}$, which is critical in our results.
\section{Extension and Adapted Function Spaces}
{
We formulate a Besov-type condition stated in~\cite{FjWi} in a local version. For all $ {\mathcal{X}}'\subset\subset {\mathcal{X}}\subset \mathbb{R}^{k+1}$ and $0<\varepsilonilon<\frac{d({\mathcal{X}}',\partial {\mathcal{X}})}{2}$ let
\begin{equation}\label{VMOcondition-1}
\int_{\mathcal{X}'}\Xint-_{B_\varepsilonilon(X)}|U(X)-U(Y)|^3dYdX\le\omega_{{\mathcal{X}}'}(\varepsilonilon)\varepsilonilon
\end{equation}
where $\omega_{{\mathcal{X}}'}(\varepsilonilon)>0$ is a nonnegative function on ${\mathcal X}$ which tends to zero as $\varepsilonilon$ tends to zero. We write $U\in \underline{B}_{3,\textit{VMO}}^{1/3}(\mathcal{X})$ if $U \in L^3(\mathcal X)$ and it satisfies condition \eqref{VMOcondition-1}.
}
As explained in~\cite{FjWi}, this condition is more general than the critical Besov condition $U\in B_{3,c_0}^{1/3}(\mathcal{X})$ from~\cite{CCFS08}, as the latter reads
\begin{equation}\label{aver-cond}
\lim_{Z\to 0}\frac{1}{|Z|}\int_{\mathcal{X}}
|U(X)-U(X+Z)|^3dX=0,
\end{equation}
and it is easy to see that this implies~\eqref{VMOcondition-1}.
{
Indeed, rewrite~\eqref{aver-cond} as follows:
\begin{equation*}
\frac{1}{|Z|}\int_{\mathcal{X}}
|U(X)-U(X+Z)|^3dX\le \omega(Z)
\end{equation*}
with $\omega(Z)$ converging to zero as $Z\to0$. Fix $0<\varepsilonilon<1$, then obviously for all $|Z|\le \varepsilonilon$ also
\begin{equation*}
\frac{1}{\varepsilonilon}\int_{\mathcal{X}}
|U(X)-U(X+Z)|^3dX\le \omega(Z)
\end{equation*}
holds and we can integrate
\begin{equation*}
\frac{1}{\varepsilonilon}\Xint-_{B_\varepsilon(0)}\int_{\mathcal{X}}
|U(X)-U(X+Z)|^3dXdZ\le \Xint-_{B_\varepsilon(0)}\omega(Z)dZ.
\end{equation*}
Let us now define $\overline\omega(\varepsilon):= \Xint-_{B_\varepsilon(0)}\omega(Z)dZ$.
It is easy to verify that $\overline\omega(\varepsilon)$ vanishes as $\varepsilon\to0$.
Finally, using Fubini's theorem and a change of variables, we arrive at condition \eqref{VMOcondition-1}.
}
{Localization proofs, as they are used in this paper, involve the action of $U$, or of a function of $U$, on a given test function $\psi\in \mathcal D(\mathcal X)$. If the support of $\psi$ is strictly contained in an open set $\mathcal X'\subset \subset \mathcal X$, and if $0<\varepsilonilon <\varepsilonilon_0$ is chosen small enough, one may choose additional open sets $\mathcal X_1$, $\mathcal X_2$ satisfying the inclusions
\begin{equation}
\operatorname{supp}(\psi) \subset\subset \mathcal X'\subset \subset \mathcal X_2\subset \subset\mathcal X_1\subset \subset\mathcal X \label{multisupport}
\end{equation}
with $\mathcal X_2$ containing an $\varepsilonilon$-neighborhood $\mathcal X'_\varepsilonilon$ of $\mathcal X'$ and $\mathcal X_1$ containing an $\varepsilonilon$-neighborhood $\mathcal X_2^\varepsilonilon $ of $\mathcal X_2\,.$ Then, proceeding as in \cite{BTWP218} Section 2.1 (see also~\cite{BTW18}), introducing a function $I\in \mathcal D(\mathbb{R}^{k+1})$ with support in $\mathcal X_1$, equal to $1$ in $\mathcal X_2$, and with gradient supported in $ \mathcal X_1\backslash \overline{\mathcal X_2^\varepsilonilon}$, we define (and denote by $[T]$) the extension to $\mathcal D'(\mathbb{R}^{k+1} )$ of any distribution $T\in\mathcal D'(\mathcal X)$ by the formula:
\begin{equation}
\la [T], \psi\ra=\la T, I\psi\ra\,.\label{extension}
\end{equation}
This extension allows us, in particular, to make sense of mollifications of a given function on $\mathcal X$, when tested against $\psi\in \mathcal D(\mathcal X)$.
As in previous contributions a sequence of mollifiers will be used. They are defined as follows: }
Starting from a positive function $s\mapsto \eta(s) \in \mathcal D(\mathbb{R}^{k+1})$ with support in $|s|<1$ and total mass $\int\eta(s)ds=1$, we denote by $\eta_\varepsilonilon(X)$ the function
\begin{equation}
\eta_\varepsilonilon(X)= \frac1{\varepsilonilon^{k+1}}\eta\left(\frac{X}{\varepsilonilon}\right)
\end{equation}
and use the notation $S_\varepsilonilon$ for the mollification $\eta_\varepsilonilon\star S $ of a distribution $S
\in{\mathcal D}'(\mathbb{R}^{k+1})\,.$
General results proven below under the hypothesis (\ref {VMOcondition-1}) are almost direct consequences of the following lemmas:
\begin{lemma}\label{VBMOlemma}
Let $\mathcal{X}'\subset\subset\mathcal{X}$ and $\varepsilonilon>0$ so small that $\mathcal{X}$ contains an $\varepsilonilon$-neighbourhood of $\mathcal{X}'$ leading to the construction (\ref{multisupport}) and (\ref{extension}). Let $U\in \underline{B}_{3,\textit{VMO}}^{1/3}(\mathcal{X})$, then, for some $\omega_{(U,\mathcal X')}:(0,1)\to\mathbb{R}^+$ such that $\liminf_{\varepsilonilon\to0}\omega_{(U,\mathcal X')}(\varepsilonilon)=0$, one has
\begin{equation}
\norm{D_X[U]_\varepsilonilon}_{L^3(\mathcal{X}')}\leq ( \omega _{(U,\mathcal X')} (\varepsilonilon))^{\frac 13}\varepsilonilon^{-2/3}.
\end{equation}
\end{lemma}
\begin{proof}
By Jensen's inequality, with
\begin{equation}
D_X\eta_\varepsilonilon(X)=
\frac 1\varepsilonilon
\frac1{\varepsilonilon^{k+1}}
(D_X\eta)\left(\frac X \varepsilonilon\right) \quad \hbox{and} \quad C= \left(\int_{\mathbb{R}^{k+1}}|(D_X(\eta (X)|dX\right)^2
\end{equation}
and one standard integration by parts we have:
\begin{equation}
\begin{aligned}
\norm{D_X[U]_\varepsilonilon}_{L^3(\mathcal{X}')}&=\left(\int_{\mathcal{X}'}\left|\int_{B_\varepsilonilon(X)}U(Y)D_X\eta_\varepsilonilon(X-Y)dY\right|^3dX\right)^{1/3}\\
&=\left(\int_{\mathcal{X}'}\left|\int_{B_\varepsilonilon(X)}(U(Y)-U(X))D_Y\eta_\varepsilonilon(X-Y)dY\right|^3dX\right)^{1/3}\\
&\leq C\varepsilonilon^{1/3}\varepsilonilon^{-1}\left(\frac{1}{\varepsilonilon}\int_{\mathcal{X}'}\Xint-_{B_\varepsilonilon(0)}\left|U(Y)-U(X-Y)\right|^3dYdX\right)^{1/3}\\
&=( \omega _{(U,\mathcal X')} (\varepsilonilon))^{\frac 13} \varepsilonilon^{-2/3}.
\end{aligned}
\end{equation}
\end{proof}
\begin{corollary} Under the above hypothesis, with $B \in W^{1,\infty} (\mathcal O; \mathbb{R}^n)$ and $\psi \in \mathcal D(\mathcal X) $ one has:
\begin{equation}
\|D_X( (B([U]_\varepsilonilon )(.)^T \psi(.))\|_{L^3}\le C(B,\psi) ( \omega _{(U,\mathcal X')} (\varepsilonilon))^{\frac 13} \varepsilonilon^{-2/3}. \label{basic1}
\end{equation}
\end{corollary}
\begin{proof}
Write
$$D_X( (B([U]_\varepsilonilon )(.)^T \psi(.))= D_U B([U]_\varepsilonilon )D_X[U]_\varepsilonilon \psi + B([U]_\varepsilonilon )(.)^T D_X\psi $$
and apply the above estimates.
\end{proof}
\begin{lemma}\label{commestimate}
Let $\mathcal{X}'\subset\subset\mathcal{X}$ and $\varepsilonilon>0$ so small that $\mathcal{X}$ contains an $\varepsilonilon$-neighbourhood of $\mathcal{X}'$. Assume that $\mathcal{O}\subset\mathbb{R}^n$ is convex and $G\in C^2(\overline{\mathcal{O}};\mathbb{R}^{n\times(k+1)})$. Let $U\in \underline{B}_{3,\textit{VMO}}^{1/3}(\mathcal{X})$, then
\begin{equation}
\norm{[G(U)]_\varepsilonilon-G([U]_\varepsilonilon)}_{L^{3/2}(\mathcal{X}')}\leq C \left(\int_{\mathcal{X}'}\Xint-_{B_\varepsilonilon(X)}|U(X)-U(Y)|^3dYdX\right)^{2/3}, \label{basic2}
\end{equation}
where $C$ depends only on $\eta$, the dimension of $\mathcal{O}$, and $ \|D_U^2G\|_{L^\infty(\mathcal O)}$ (but not on $U$ or $\varepsilonilon$).
\end{lemma}
\begin{proof}
With minor improvements we follow the proofs of Lemma 3.1 of \cite{GwMiSw} and of Lemma 2.3 of \cite{BTWP218}. Starting from the pointwise estimate (16) in Section 2 of \cite {BTWP218}, we immediately obtain for $X\in \mathcal X'$
\begin{equation}
\begin{aligned}
&(|[G(U)]_\varepsilonilon(X)-G([U]_\varepsilonilon)(X))|)^{\frac 32}\\
&\quad \quad \quad \quad \leq C \Bigg(\int_{\mathbb{R}^{k+1}} |U(X-Y) -U (X)|^2\eta_\varepsilonilon(Y)dY\Bigg)^{\frac 32}\,.
\end{aligned}
\end{equation}
for a constant as stated. Then with the H\"older inequality one has
\begin{equation}
\begin{aligned}
&\int_{\mathcal X'} dX\Bigg(\int_{\mathbb{R}^{k+1}} |U(X-Y) -U (X)|^2\eta_\varepsilonilon(Y)dY\Bigg)^{\frac 32} \\
&\le \int_{\mathcal{X}'}dX \int_{\mathbb{R}^{k+1}} |U(X-Y) -U (X)|^3\eta_\varepsilonilon(Y)dY
\end{aligned}
\end{equation}
and therefore:
\begin{equation}
\begin{aligned}
& \Bigg( \int_{\mathcal X'} (|[G(U)]_\varepsilonilon(X)-G([U]_\varepsilonilon)(X))|)^{\frac 32}dX\Bigg)^{\frac 2 3}\\
&\le C\Bigg( \int_{\mathcal{X}'}dX \int_{\mathbb{R}^{k+1}} |U(X-Y) -U (X)|^3\eta_\varepsilonilon(Y)dY )\Bigg)^{\frac 23}\\
&\le C\left(\int_{\mathcal{X}'}\Xint-_{B_\varepsilonilon(X)}|U(X)-U(Y)|^3dYdX\right)^{2/3}\,.\label{basic3}
\end{aligned}
\end{equation}
\end{proof}
\begin{remark} \label{r42} Since in the formulas (\ref{basic2}) and then (\ref{basic3}) only the second derivative of $U\mapsto G(U)$ appears, such formulas are trivial when this function is affine. Therefore the
Corollaries 4.1 -- 4.3 of~\cite{GwMiSw} transfer directly to the present situation giving the following results which will be used in Sections \ref{applications}.
\begin{itemize}
\item If $G=(G_1,\ldots,G_s,G_{s+1},\ldots,G_k)$ for \emph{affine} functions $G_1,\ldots,G_s$, and $\mathcal{X}=\mathcal{Y}\times\mathcal{Z}$ for some $\mathcal{Y}\subset\mathbb{R}^s$ and $\mathcal{Z}\subset\mathbb{R}^{k+1-s}$, then in Theorem~\ref{localthm} below it suffices to assume
\begin{equation*}
\liminf_{\varepsilonilon\to0}\frac{1}{\varepsilonilon}\int_{\mathcal{Y}_1}\int_{\mathcal{Z}_1}\Xint-_{B_\varepsilonilon(Z)\cap\mathcal{Z}_1}|U(X,Y)-U(X,Z)|^3dYdZdX=0
\end{equation*}
for all $\mathcal{Y}_1\subset\subset\mathcal{Y}$, $\mathcal{Z}_1\subset\subset\mathcal{Z}$. One should keep in mind the situation $\mathcal{Y}=(0,T)$, $\mathcal{Z}=\Omega$, and $A=\operatorname{id}$ in the terminology of the next section.
\item If $U=(V_1,V_2)$ (with $V_1=(U_1,\ldots,U_s)$, $V_2=(U_{s+1},\ldots,U_n)$), if $B$ is independent of $V_1$, if $G=G_1(V_1)+G_2(V_2)$, and if $G_1$ is linear, then for $U_1,\ldots U_s$ it suffices to assume $U_1,\ldots U_s\in L^3_{loc}(\mathcal{X})$ in Theorem~\ref{localthm}.
\item If the $j$-th row of $G$ is affine, then Theorem~\ref{localthm} remains true even if $B_j$ is only locally Lipschitz in $\mathcal{O}$.
\end{itemize}
\end{remark}
\section{Companion Laws at Critical Regularity}\label{companion}
To apply the previous lemma to systems of (not necessarily hyperbolic) conservation laws in the full generality of \cite{GwMiSw}, we consider the problem:
\begin{equation}\label{conslaw}
\mathrm{d}verg_X(G(U(X)))=0\quad\text{for $X\in\mathcal{X}$.}
\end{equation}
As in \cite{GwMiSw}, we assume $\mathcal{X}\subset \mathbb{R}^{k+1}$ is open, $U:\mathcal{X}\to\mathcal O$ for some subset $\mathcal{O}\subset \mathbb{R}^n$, and $G:\mathcal{O}\to\mathbb{R}^{n\times(k+1)}$. In all our applications in Section~\ref{applications} below, $X=(x,t)$ will be interpreted as a point of space-time.
Following \cite{Daf}, we will consider \emph{companion laws} of the form
\begin{equation}\label{complaw}
\mathrm{d}verg_X(Q(U(X)))=0\quad\textit{for $X\in\mathcal{X}$,}
\end{equation}
where $Q:\mathcal{O}\to\mathbb{R}^{k+1}$ is a smooth function such that there exists another smooth function $B:\mathcal{O}\to\mathbb{R}^{n}$ satisfying the relation
\begin{equation}\label{Qdef}
D_UQ_j(U)=B(U)D_UG_j(U)\quad\text{for all $U\in\mathcal{O}$, $j\in\{0,\ldots,k+1\}$.}
\end{equation}
Note that in the context of hyperbolic conservation laws, the definition of $Q$ corresponds to the well-known notion of \emph{entropy--entropy-flux pairs}, whereas the companion law \eqref{complaw} can be interpreted as the usual \emph{entropy equality}. The companion law is seen to be true by virtue of the chain rule as long as the latter is applicable, e.g.\ for a solution $U$ of~\eqref{conslaw} which is in $C^1$ (or Lipschitz). However, the companion law may fail to be true for \emph{weak solutions}, i.e.\ vector fields $U$ that satisfy
\begin{equation}
\int_{\mathcal{X}}G(U(X)):D_X\psi(X)dX=0
\end{equation}
for every $\psi\in C_c^1(\mathcal{X};\mathbb{R}^n)$. Note carefully that the definition of weak solution is purely \emph{local} in the sense that $U$ is a weak solution on $\mathcal{X}$ if and only if it is a weak solution on every subset $\mathcal{X}'\subset\subset\mathcal{X}$. In particular, no boundary condition is included in this formulation. A weak solution of the companion law \eqref{complaw} is defined analogously.
Then we have the following improvements of Theorem 1.1 in \cite{GwMiSw} and Theorem 2.1 in \cite{BTWP218}:
{\begin{theorem}\label{localthm}
Assume that $\mathcal{O}\subset\mathbb{R}^n$ is convex, $G\in C^2(\overline{\mathcal{O}};\mathbb{R}^{n\times(k+1)})$, $Q\in C^1(\mathcal{O};\mathbb{R}^{k+1})$ and $B\in C^{1}(\mathcal{O};\mathbb{R}^{ n})$ and the following conditions hold:
\begin{equation*}
\label{eq:assumpt_convex}
\begin{aligned}
D_U{B}\in L^{\infty}(\mathcal{O};\mathbb{R}^{ n}), \quad|B(V)|\le C(1+|V|)
\\
|Q(V)|\leq C(1+|V|^3)\ \mbox{for all $V\in\mathcal{O}$},
\\
\sup_{i,j \in{1,\dots,d}}\|\partial_{U_i}\partial _{U_j} G(U)\|_{L^\infty(\mathcal{O};\,\mathbb{M}^{n\times (k+1)})}<+\infty
\end{aligned}
\end{equation*}
for some constant $C$ independent of $V$, and \eqref{Qdef} holds.
If $U$ is a weak solution of \eqref{conslaw} such that $U\in \underline{B}_{3,\textit{VMO}}^{1/3}(\mathcal{X}_1)$ for every $\mathcal{X}_1\subset\subset \mathcal{X}$, then $U$ is also a weak solution of the companion law~\eqref{complaw}.
\end{theorem}}
{\begin{proof}
Following the definition of derivative in the sense of distributions, we consider a test function $\psi\in \mathcal D (\mathcal{X})$ supported in $\mathcal{X}'\subset\subset\mathcal{X}$. Then using the construction described by the formulas (\ref{multisupport}) and (\ref{extension} ) and Lebesgue's dominated convergence theorem, first we write:
\begin{equation}
-\la \mathrm{d}verg_X Q(U), \psi \ra = \lim_{\varepsilonilon \rightarrow 0} \int_{\mathbb{R}^{k+1}} Q([U]_\varepsilonilon)\cdot D_X\psi dX. \label{p1}
\end{equation}
Then since $[U]_\varepsilonilon \in \mathcal D(\mathbb{R}^{k+1})$ one has:
\begin{equation}
\begin{aligned}
& \int_{\mathbb{R}^{k+1}} Q([U]_\varepsilonilon),D_X\psi dX = -\int_{\mathbb{R}^{k+1}}\mathrm{d}verg Q([U]_\varepsilonilon) \cdot \psi(X) dX \\
&= -\int_{\mathbb{R}^{k+1}}B([U]_\varepsilonilon ) D_UG([U]_\varepsilonilon) D_X ([U]_\varepsilonilon) \cdot \psi(X) dX\\
&=-\int_{\mathbb{R}^{k+1}} D_UG([U]_\varepsilonilon)D_X ([U]_\varepsilonilon) \cdot (B([U]_\varepsilonilon )(X)^T \psi(X) dX\\
&=- \int_{\mathbb{R}^{k+1}} D_X(G([U]_\varepsilonilon)) \cdot (B([U]_\varepsilonilon )(X)^T \psi(X) dX\\
&=\int_{\mathbb{R}^{k+1}} (G([U]_\varepsilonilon) -G([U])) \cdot D_X( (B([U]_\varepsilonilon )(X)^T \psi(X)) dX\\
&+ \la G([U]), D_X( (B([U]_\varepsilonilon )(X)^T \psi(X)) \ra.
\label{p2}
\end{aligned}
\end{equation}
Since $\mathrm{d}verg_X G(U) =0$ in $\mathcal D'(\mathcal X)$, the last term of (\ref{p2}) is equal to $0\,.$ Then from (\ref{p1}) and (\ref{p2}) with (\ref{basic1}) and (\ref{basic2}) we have
\begin{equation}
\begin{aligned}
&|\la \mathrm{d}verg_X Q(U), \psi \ra| \le \lim_{\varepsilonilon\rightarrow 0} \left|\int_{\mathbb{R}^{k+1}} (G([U]_\varepsilonilon) -G([U])) \cdot D_X( (B([U]_\varepsilonilon )(X)^T \psi(X)) dX\right|\\
&\le \|(G([U]_\varepsilonilon) -G(U))\|_{L^{\frac32}(\mathcal{X}')}\|D_X( (B([U]_\varepsilonilon ) ^T \psi )\|_{L^{3}} \le \omega_{(U,\mathcal X)}(\varepsilonilon)\,. \end{aligned}
\end{equation}
Hence in $\mathcal D'(\mathcal X)$ one has
\begin{equation}
\mathrm{d}verg_X (Q(U) ) =0\,.
\end{equation}
\end{proof}}
\section{From Local to Global Companion Laws}\label{localtoglobal}
We now specialise to the case where $\mathcal{X}=\Omega\times(0,T)$ for some domain $\Omega\subset\mathbb{R}^k$, and we write $X=(x,t)$. Then $G$ can be written in the form
\begin{equation}\label{FA}
G(U)=(F(U), A(U))
\end{equation}
for some $A:\mathcal{O}\to \mathbb{R}^n$ and $F:\mathcal{O}\to\mathbb{R}^{n\times k}$, so that the conservation law \eqref{conslaw} reads as
\begin{equation}
\partial_t (A(U(x,t)))+\mathrm{d}verg_x F(U(x,t))=0,
\end{equation}
or, in weak formulation,
\begin{equation}\label{timeweak}
\int_0^T\int_\Omega \partial_t\psi(x,t) \cdot A(U(x,t))+\nabla_x\psi(x,t): F(U(x,t))=0
\end{equation}
for any $\psi\in C_c^1(\Omega\times(0,T);\mathbb{R}^n)$.
Setting $Q(U)=(q(U),\eta(U))$ for $q:\mathcal{O}\to \mathbb{R}^{k}$ and $\eta:\mathcal{O}\to \mathbb{R}$, we accordingly consider companion laws of the form
\begin{equation}\label{timecompanion}
\partial_t (\eta(U(x,t))) +\mathrm{d}verg_x q(U(x,t))=0,
\end{equation}
where $\eta$ and $q$ satisfy
\begin{equation}\label{compat}
\begin{aligned}
D_U\eta(U)&=B(U)D_U A(U),\\
D_U q_j(U)&=B(U)D_U F_j(U)\quad\text{for $j=1,\ldots,k$}
\end{aligned}
\end{equation}
for some smooth map $B:\mathcal{O}\to \mathbb{R}^{n}$.
{In the following, we assume that $\Omega\subset \mathbb{R}^k$ is an open set with a bounded Lipschitz boundary $\partial \Omega$, and therefore the exterior normal to the boundary denoted by $n(x)$ is defined almost everywhere. We denote by $d(x,\partial\Omega)$ the distance of a point $x\in \Omega$ to $\partial \Omega$.
Then we observe the existence of a (small enough) $\varepsilonilon_0$ with the following properties:
For $ d(x,\partial \Omega) \le \varepsilonilon_0$ the function $x\mapsto d(x,\partial\Omega)$ belongs to $W^{1,\infty}(\Omega)$ and there exists, for almost every such $x$, a unique point $\hat x =\sigma(x) \in \partial\Omega $ such that:
\begin{equation}
d(x,\partial \Omega)<\varepsilonilon_0 \mathbb{R}ightarrow d(x,\partial\Omega) = |x-\sigma (x)| \, \quad \hbox{and} \quad \nabla_x d(x,\partial\Omega) =-n(\sigma (x))\,.
\end{equation}}
Choosing a test function of the form $\psi(x,t)=\chi(t)\varphi(x)$ for a generic $\chi$ and an approximation $\varphi$ of the indicator function of $\Omega$, we can then impose similar conditions on the boundary behavior of the fluxes and use similar arguments to those developed in~\cite{BTW18}, to pass from the local statement of Theorem~\ref{localthm} to a global one:
\begin{theorem}\label{globalthm}
Let $\mathcal{O}\subset \mathbb{R}^n$ convex, $A\in C^2(\overline{\mathcal{O}};\mathbb{R}^n)$, and $F\in C^2(\overline{\mathcal{O}};\mathbb{R}^{n\times k})$. Assume there exist $\eta\in C^1(\mathcal{O};\mathbb{R})$, $q\in C^1(\mathcal{O};\mathbb{R}^{ k})$, and $B\in C^{1}(\mathcal{O};\mathbb{R}^{ n})$ such that
\begin{equation*}
\label{eq:assumpt_convex}
\begin{aligned}
D_U{B}\in L^{\infty}(\mathcal{O};\mathbb{R}^{ n}), \quad|B(V)|\le C(1+|V|)
\\
\sup_{i,j \in{1,\dots,d}}\|\partial_{U_i}\partial _{U_j} A(U)\|_{C(\mathcal{O};\,\mathbb{M}^{n\times (k+1)})}<+\infty, \\\sup_{i,j \in{1,\dots,d}}\|\partial_{U_i}\partial _{U_j} F(U)\|_{C(\mathcal{O};\,\mathbb{M}^{n\times (k+1)})}<+\infty
\end{aligned}
\end{equation*}
and
\begin{equation}
|\eta(V)|+|q(V)|\leq C(1+|V|^3)
\label{locest}
\end{equation}
for all $V\in\mathcal{O}$ and for some constant $C$ independent of $V$, and such that~\eqref{compat} holds.
If $U$ is a weak solution of~\eqref{timeweak} such that $U\in \underline{B}^{1/3}_{3,VMO}(\Omega_1\times(0,T))$ for every $\Omega_1\subset\subset\Omega$,
{ i.e.\ if
\begin{equation}
\begin{aligned}
& U \in L^3( \Omega_1\times(0,T))\,, \hbox{ and satisfies the estimate}\\
&\hbox{ with}\quad \omega(U, \varepsilonilon,\Omega_1,T)= \frac{1}{\varepsilonilon}\int_0^T\int_{{\Omega_1}}\Xint-_{B_\varepsilonilon(X)\cap{\Omega_1}}|U(X)-U(Y)|^3dYdX\,,\\
&\liminf_{\varepsilonilon\to0} \omega(U, \varepsilonilon,\Omega_1,T )=0\,,
\end{aligned}
\end{equation}}
and if
\begin{equation}\label{boundaryass}
\liminf_{\varepsilonilon\to0} \int_0^T\frac{1}{\varepsilonilon}\int_{\frac{\varepsilonilon}{4} \le d(x,\partial\Omega) \le \frac{\varepsilonilon}{2}} \big| q(U(x,t))n(\sigma(x)) \big |dxdt=0,
\end{equation}
then
\begin{equation}
\frac{d}{dt}\int_\Omega \eta(U(x,t))dx=0
\end{equation}
in the sense of distributions.
\end{theorem}
\begin{remark}\label{flutangent}
Assumption~\eqref{boundaryass} is satisfied, in particular, if $U$ is continuous near the boundary, and satisfies the boundary condition $q(U(x,t))n(x)=0$ on $\partial\Omega$.
\end{remark}
{\begin{proof}
Let $\chi\in C_c^1(0,T)$ and for $\varepsilonilon<\varepsilonilon_0$
\begin{equation}
\varphi^\varepsilonilon(x)=\varphi\left(\frac{d(x,\partial\Omega)}{\varepsilonilon}\right)
\end{equation}
for some nonnegative function $\varphi\in C^1((0,\infty))$ such that $\varphi\equiv0$ on $(0,\frac{1}{4}]$ and $\varphi\equiv1$ on $(\frac{1}{2},\infty)$. Then, by Theorem~\ref{localthm}, the companion law~\eqref{timecompanion} holds in the sense of distributions, so that in particular
\begin{equation}
\int_0^T\int_\Omega \varphi^\varepsilonilon(x)\chi'(t)\eta(U(x,t))dxdt+\int_0^T\int_\Omega \chi(t)\nabla_x\varphi^\varepsilonilon(x)q(U(x,t))dxdt=0.
\end{equation}
For the first integral, notice that $\varphi^\varepsilonilon\to1$ as $\varepsilonilon\to0$ pointwise in $\Omega$, so that the integral converges to
\begin{equation}
\int_0^T\int_\Omega \chi'(t)\eta(U(x,t))dxdt
\end{equation}
as $\varepsilonilon\to0$. For the second integral, we observe that
\begin{equation}
\begin{aligned}
&\hbox{for every}\,\, x \in \Omega,\,\, \hbox{such that}\,\, \frac{\varepsilonilon}{4} \ge d(x,\partial\Omega)\,\, \hbox{or}\,\, d(x,\partial\Omega)\ge \frac{\varepsilonilon}{2}, \,\, \hbox{one has}\,\, \nabla_x\varphi^\varepsilonilon(x)=0; \\
&\hbox{and for} \quad \frac{\varepsilonilon}{4} \le d(x,\partial\Omega) \le \frac{\varepsilonilon}{2}\quad \hbox{ one has} \quad \nabla_x\varphi^\varepsilonilon(x)=-\frac{1}{\varepsilonilon}\varphi'\left(\frac{d(x,\partial\Omega)}{\varepsilonilon}\right)
n(\sigma( x)).
\end{aligned}
\end{equation}
Therefore we can estimate
\begin{equation}
\begin{aligned}
\left|\int_0^T\int_\Omega \chi(t)\nabla_x\varphi^\varepsilonilon(x)q(U(x,t))dxdt\right|&\leq C\int_0^T|\chi(t)|\frac{1}{\varepsilonilon}\int_{\frac{\varepsilonilon}{4} \le d(x,\partial\Omega) \le \frac{\varepsilonilon}{2}}|q(U(x,t))n(\sigma(x))|dxdt\to 0\label{ldt}
\end{aligned}
\end{equation}
along a subsequence $\varepsilonilon_l\to0$, by virtue of assumption~\eqref{boundaryass}. In total, with the Lebesgue dominated convergence theorem applied to the right hand side of (\ref{ldt}) we obtain
\begin{equation}
\int_0^T\int_\Omega \chi'(t)\eta(U(x,t))dxdt=0,
\end{equation}
as claimed.
\end{proof}}
\section{Applications to Conservation of Energy/Entropy}\label{applications}
\subsection{Incompressible Euler system}\label{incompEul}
Consider the system
\begin{align}
\label{eq:E1}
\partial_t v + \Div_x(v\otimes v) + \nabla_x p &= 0,\\ \label{eq:E2}
\Div_x v &= 0,
\end{align}
for an unknown vector field $v\colon \Omega\times[0,T] \to \mathbb{R}^n$
and scalar $p\colon \Omega\times[0,T] \to \mathbb{R}$.
For the variable $U=(v,p)$ we have $A(U)=(v,0)$, $F(U)=(v\otimes v+p\mathbb{I},v)$.
The entropy (which here is the kinetic energy density) is given by $\eta(U) = \frac 12 |v|^2$, and the flux by $q(U)=\left(\frac{|v|^2}{2}+p\right)v$.
Hence, assuming the usual slip boundary condition ${v}\cdot n=0$ on $\partial\Omega$, we have $q(U)\cdot n=0$ on $\partial\Omega$.
{
The function $B$ has the form $B(U)=(v, p-\frac{1}{2}|v|^2)$. It obviously does not have linear growth, thus Theorems~\ref{localthm} and \ref{globalthm} cannot be directly applied. However, we observe that this problem concerns only the last component of the vector $B(U)=(B_1(U), B_2(U))$ with $B_2(U)=p-\frac{1}{2}|v|^2$. Notice that the flux $G(U)$ has the last row linear (or even zero for $A(U)$), thus there are no error terms there produced when mollification is applied.
For this reason we can prove the same statement without assuming linear growth of all the components of $B$.
Observe also that Remark~\ref{r42} allows to relax the condition on the pressure. Indeed, as $G(U)=(F(U),A(U))$ and $A(U)$ is an affine function, ${\mathcal X}=\Omega\times[0,T]$, then it is enough to require that
\begin{equation*}
\liminf_{\varepsilonilon\to0}\frac{1}{\varepsilonilon}\int_{I}\int_{\Omega'}\Xint-_{B_\varepsilonilon(y)}|U(t,x)-U(t,y)|^3dydx dt=0
\end{equation*}
for all $I\subset\subset [0,T]$, $\Omega'\subset\subset \Omega$, which corresponds to
$U\in L^3(0,T;\underline B^{1/3}_{3, VMO}(\Omega))$.
Moreover, we can write $U=(v,p)$ and notice that $B$ is independent of $p$. In addition
\begin{equation*}
G=\left(\begin{array}{cc}v^T&v\otimes v\\0&v\end{array}\right)+\left(\begin{array}{cc}0
&p{\mathbb I}\\0&0\end{array}\right).
\end{equation*}
Thus it is enough to assume that $p\in L^3_{loc}({\mathcal X}).$
Taking this into account note that our Theorem~\ref{globalthm} yields a similar result as the one in~\cite{BT18} or in ~ \cite{BTW18}.
{However in both articles the elliptic equation
\begin{equation}
-\Delta p=\Div\Div(v\otimes v) \label{laplace}
\end{equation}
was used to relax even further the integrability assumption on the pressure. In \cite{BT18}, the global H\"older regularity of the pressure was deduced from the global H\"older regularity of the velocity $v$, while in \cite{BTW18} first a local result was proven with a much weaker assumption on the pressure, $p\in L^{3/2}_tH^{-\beta}_x$ for some $\beta>0$, which will guarantee the local regularity of the pressure, and then the derivation of the global energy conservation was done as above.}
{
For completeness, let us recall the corresponding result from~\cite{BTW18}:
\begin{theorem}[Th. 4.1 from~\cite{BTW18}]\label{CETloc}
Let $(v,p) \in L^q((0,T); L^2(\Omega ))\times \mathcal D'( \Omega\times(0,T) )$, for some $q\in [1,\infty]$, be a weak solution of the Euler equations satisfying the following hypotheses:
\begin{enumerate}
\item For some $\varepsilon_0>0$, small enough,
\begin{subequations}
\begin{equation}
p\in L^{3/2} ((0,T); H^{-\beta}(V_{\varepsilon_0}))\,, \quad \hbox{with}\quad \beta <\infty \,,\label{forlebesque1}
\end{equation}
where $V_{\varepsilon_0}=\{ x\in \Omega\,: d(x,\partial \Omega)<\varepsilon_0\}$\,;
\item
\begin{equation}
\lim_{\varepsilon \rightarrow 0} \int_0^T\frac{1}{\varepsilonilon}\int_{\frac{\varepsilonilon}{4} \le d(x,\partial\Omega) \le \frac{\varepsilonilon}{2}}\left|\left(\frac {|v|^2}2 + p\right) v(t,x)\cdot n(\sigma( x))\right|\, dxdt =0\,; \label{forlebesgue2}
\end{equation}
\end{subequations}
\item For every open set $\tilde Q = \tilde\Omega \times(t_1,t_2)\subset\subset \Omega\times(0,T) $ there exists $\alpha(\tilde Q)>1/3$ such that $v$~satisfies:
\begin{equation}
\int_{t_1}^{t_2} \|v(.,t)\|^3_{ C^{\alpha(\tilde Q)} (\overline{\tilde\Omega})}dt \le M(\tilde Q) <\infty \,. \label{local2}
\end{equation}
\end{enumerate}
Then, $(v,p)$ globally conserves the energy, i.e., for any $0 < t_1<t_2< T$ it satisfies the relation:
\begin{equation*}
\|v(t_2)\|_{L^2(\Omega)}=\|v(t_1)\|_{L^2(\Omega)}.
\end{equation*}
Moreover, $v \in L^\infty((0,T); L^2(\Omega)) \cap C((0,T); L^2(\Omega))$.
\end{theorem}
As observed in~\cite{BTW18} the hypothesis and conclusion of the above theorem, Theorem \ref{CETloc}, are consistent with the situation where the behavior of the fluid in the vanishing viscosity limit is described by the Prandlt ansatz. This is also consistent with the $1/3-$Kolmogorov Law because in such situation the $\alpha>1/3$ regularity together with condition (\ref{forlebesgue2}) imply the absence of anomalous energy dissipation. The results of~\cite{BTW18} have already been expanded in several direction in~\cite{DN18}. The authors of~\cite{DN18} use the
$B^{1/3}_{3,c_0}$ regularity, cf. (\ref{cetshvydkoy}), instead of the H\"older regularity. Moreover, they provide several avatars of the boundary condition (\ref{forlebesgue2}) which may be useful for the connection with the interpretation of this hypothesis in term of absence of ``turbulent boundary layer''.
}
Eventually let us remark that the theory presented here can also be applied to the \emph{inhomogeneous} incompressible Euler equations, where the density is no longer constant,
\begin{align}
\label{eq:NE1}
\begin{aligned}
\partial_t \rho + \Div_x(\rho v) &=0, \\
\partial_t (\rho v) + \Div_x(\rho v \otimes v) + \nabla_x p &= 0,\\
\Div_x v &= 0,
\end{aligned}
\end{align}
for an unknown vector field $v \colon \Omega\times[0,T] \to \mathbb{R}^n$ and scalar fields $\rho\colon \Omega\times[0,T] \to \mathbb{R}_+$ and $p\colon \Omega\times[0,T] \to \mathbb{R}$.
In this case, for the variable $U=(\rho,v, p)$, we have $A(U)=(\rho,\rho v,0)$ and $F(U)=(\rho v,\rho v\otimes v+p\mathbb{I}, v)$.
The entropy $\eta(U) = \frac 12 \rho|v|^2$ and the entropy flux is $q(U)=\left(\frac{\rho|v|^2}{2}+p\right)v$.
Thus, assuming the usual slip boundary condition ${v}\cdot n=0$ on $\partial\Omega$, we have $q(U)\cdot n=0$ on $\partial\Omega$.
The function $B$ has the form $B(U)=(-\frac{1}{2}|v|^2, v, p)$. Again, as in the case of incompressible Euler system, $B$ does not have a linear growth. However we cannot repeat the same reasoning as for the incompressible Euler system as $G$ is not linear in the first row.
One could add assumptions on boundedness of appropriate quantities, however
we will proceed differently. The system will be rewritten in different variables to provide that the row of $G$ corresponding to the first component of $B$ will be linear. Thus we choose $U=(\rho, m, p)$, where $m=\rho v$. If $\rho\ge\underline\rho>0$, then the system can be rewritten in the new variables as follows
\begin{align}
\label{eq:inho_eul_div_2}
\begin{aligned}
\partial_t \rho+ \mathrm{d}verg_{x}m &= 0,\\
\partial_t m + \mathrm{d}verg_x\left(\frac{m\otimes m}{\rho}+p\mathbb{I}\right) &= 0,\\
\mathrm{d}verg_x v&=0.
\end{aligned}
\end{align}
Here $A(U)=(\rho,m,0)$ and $F(U)=(m, \frac{m\otimes m}{\rho}+p\mathbb{I},v)$. Moreover, $\eta(U)= \frac{|m|^2}{2\rho}$ and an entropy flux $q(U)=\left(
\frac{|m|^2}{2\rho}
+p
\right)
\frac{m}{\rho}$.
Then the function $B$ in these variables has the form $B(U)= \left( -\frac{|m|^2}{2\rho^2},\frac{m}{\rho},p\right)$ and even though $B$ is does not have a linear growth in the first component, but $G$ will not produce error terms in the mollification procedure in the corresponding row. As $\rho$ is bounded away from zero, it provides the linear growth of the second component of $B$.
A corresponding Onsager-type statement for inhomogeneous incompressible Euler on the torus, i.e. $\Omega=\mathbb{T}^d$ was stated in~\cite{FGSW}, see also an analogous result for inhomogeneous incompressible Navier-Stokes equations~\cite{LSh}. As the result for inhomogeneous incompressible Euler in~\cite{FGSW} is stated in a way that allows to trade the Besov regularity between the velocity field and density/momentum, we recall it here:
\begin{theorem}[Th. 3.1 from \cite{FGSW}] \label{inhomonsager}
Let $(\rho, v, p)$ be a solution of~\eqref{eq:NE1} in the sense of distributions. Assume
\begin{equation}\label{besovhypo}
v\in B_p^{\alpha,\infty}(\Omega\times(0,T)),\hspace{0.3cm}\rho, \rho v\in B_q^{\beta,\infty}(\Omega\times(0,T)),\hspace{0.3cm}p\in L^{p^*}_{loc}(\Omega\times(0,T))
\end{equation}
for some $1\leq p,q\leq\infty$ and $0\leq\alpha,\beta\leq1$ such that
\begin{equation}\label{exponenthypo}
\frac{2}{p}+\frac{1}{q}=1,\hspace{0.3cm}\frac{1}{p}+\frac{1}{p^*}=1,\hspace{0.3cm}2\alpha+\beta>1.
\end{equation}
Then the energy is locally conserved, i.e.
\begin{equation}\label{localenergy}
\partial_t\left(\frac{1}{2}\rho|v|^2\right)+\mathrm{d}verg\left[\left(\frac{1}{2}\rho|v|^2+p\right)v\right]=0
\end{equation}
in the sense of distributions on $\Omega\times(0,T)$.
\end{theorem}
Note that although this result is stated in the variables $(\rho, v,p)$, but there is an additional requirement that momentum $\rho v$ is an element of Besov space.
Observe that this theorem can be extended to cases where the problem is considered in any open set $\Omega \subset \mathbb{R}^d$ . Moreover under the condition
$$
\lim_{\varepsilon \rightarrow 0} \int_0^T\frac{1}{\varepsilonilon}\int_{\frac{\varepsilonilon}{4} \le d(x,\partial\Omega) \le \frac{\varepsilonilon}{2}}\left|\left(\rho \frac {|v|^2}2 + p\right) v(t,x)\cdot n(\sigma( x))\right|\, dxdt =0
$$ one has the global energy conservation for $0<t<T$.
To conclude, taking into account the above discussion and Remark~\ref{r42}, we formulate the theorem, which follows from the general result presented in Section~\ref{localtoglobal}.
\begin{theorem}
Let $(\rho,m,p)\in \underline{B}_{3,\textit{VMO}}^{1/3}(\Omega\times [0,T])\times \underline{B}_{3,\textit{VMO}}^{1/3}(\Omega\times [0,T])\times L^3_{loc}(\Omega\times [0,T])$ be a solution to~\eqref{eq:inho_eul_div_2}.
Moroever, let
$$
\lim_{\varepsilon \rightarrow 0} \int_0^T\frac{1}{\varepsilonilon}\int_{\frac{\varepsilonilon}{4} \le d(x,\partial\Omega) \le \frac{\varepsilonilon}{2}}\left|\left(
\frac{|m|^2}{2\rho}
+p
\right)
\frac{m}{\rho} \cdot n(\sigma( x))\right|\, dxdt =0.
$$
Then the energy is globally conserved, i.e.,
\begin{equation}
\frac{d}{dt}\int_\Omega \frac{|m|^2}{2\rho}dx=0
\end{equation}
in the sense of distributions.
\end{theorem}
\subsection{Compressible Euler system}\label{con}
We consider the compressible Euler equations in the following form
\begin{align}
\label{eq:comp_eul}
\begin{aligned}
\partial_t \rho+ \mathrm{d}verg_{x}(\rho v) &= 0, \\
\partial_t (\rho v) + \mathrm{d}verg_x(\rho v\otimes v +p(\rho)\mathbb{I}) &= 0,
\end{aligned}
\end{align}
for an unknown vector field $v\colon \Omega\times[0,T] \to \mathbb{R}^n$
and scalar $\rho\colon \Omega\times[0,T] \to \mathbb{R}$. The function $p\colon [0,\infty)\to\mathbb{R}$ is given. Let $P$ be the so-called pressure potential given by
\begin{equation}
P(\rho)=\rho\int_1^\rho\frac{p(z)}{z^2} dz.
\end{equation}
Let $(\rho,v)\in \underline{B}_{3,\textit{VMO}}^{1/3}(\Omega\times [0,T])\times \underline{B}_{3,\textit{VMO}}^{1/3}(\Omega\times [0,T])$
be a weak solution to \eqref{eq:comp_eul}.
To get the conservation of the energy, we multiply \eqref{eq:comp_eul} with
\begin{equation*}
{ B}(\rho,v)=\left( P'(\rho)-\frac{1}{2}|v|^2, v\right)
\end{equation*}
and obtain
\begin{equation}
\label{eq:comp_cons}
\partial_t\left(
\frac{1}{2}\rho |v|^2 + P(\rho)
\right)
+\mathrm{d}verg_x\left[
\left(
\frac{1}{2}\rho |v|^2
+P(\rho)+p(\rho)
\right)
v\right]
=0.
\end{equation}
In the variables $U=(\rho,v)$, in correspondence to the notation \eqref{FA}, we have
\begin{equation}
A(U)=(\rho, \rho v), \quad F(U)=(\rho v, \ \rho v\otimes v + p(\rho) \mathbb{I})
\end{equation}
and
\begin{equation}
\eta(U)=\frac{1}{2}|v|^2+P(\rho), \quad q(U)= \left(
\frac{1}{2}\rho |v|^2
+P(\rho)+p(\rho)
\right)
v.
\end{equation}
The entropy flux function $q(U)$ is in the form of a product of a scalar function and $v$, say $q(\rho, v)=\tilde q(\rho,v) v$.
Thus the condition $q(U)\cdot n=0$ on the boundary is equivalent to $v\cdot n=0$ on the boundary.
If $\rho\ge\underline\rho>0$ the compressible Euler system can be rewritten with respect to the quantities $\rho$ and momentum $m = \rho v$ as follows
\begin{align}
\label{eq:comp_eul_div_2}
\begin{aligned}
\partial_t \rho+ \mathrm{d}verg_{x}m &= 0,\\
\partial_t m + \mathrm{d}verg_x\left(\frac{m\otimes m}{\rho}+p(\rho)\mathbb{I}\right) &= 0,
\end{aligned}
\end{align}
A suitable choice of ${ B}$ is then \begin{equation}
{ B}(\rho,m) = \left( P'(\rho)+\frac{|m|^2}{2\rho^2},\frac{m}{\rho}\right),
\end{equation}
which leads to the companion law
\begin{equation}
\label{eq:comp_cons_2}
\partial_t\left(
\frac{|m|^2}{2\rho} + P(\rho)
\right)
+\mathrm{d}verg_x\left[
\left(
\frac{|m|^2}{2\rho}
+ P(\rho)+p(\rho)
\right)
\frac{m}{\rho}\right]
=0.
\end{equation}
The flux function $q(U)$ is in the form of a product of a scalar function and $m/\rho$, $q(\rho, m)=\tilde q(\rho,m) \frac{m}{\rho}.$
Thus the condition $q(U)\cdot n=0$ on the boundary is equivalent to $m\cdot n=0$ on the boundary.
To conclude, taking into account the above discussion and Remark~\ref{r42}, we formulate the result in a bounded domain $\Omega$, which follows from the general result presented in Section~\ref{localtoglobal}.
\begin{theorem}
Let $(\rho,m)\in L^3(0,T;\underline{B}_{3,\textit{VMO}}^{1/3}(\Omega))\times L^3(0,T;\underline{B}_{3,\textit{VMO}}^{1/3}(\Omega))$ be a solution to~\eqref{eq:comp_eul_div_2}.
Moroever, let
$$
\lim_{\varepsilon \rightarrow 0} \int_0^T\frac{1}{\varepsilonilon}\int_{\frac{\varepsilonilon}{4} \le d(x,\partial\Omega) \le \frac{\varepsilonilon}{2}}\left|\left(
\frac{|m|^2}{2\rho}
+ P(\rho)+p(\rho)
\right)
\frac{m}{\rho} \cdot n(\sigma( x))\right|\, dxdt =0
$$
Then the energy is globally conserved, i.e.,
\begin{equation}
\frac{d}{dt}\int_\Omega \left(
\frac{|m|^2}{2\rho} + P(\rho)
\right)dx=0
\end{equation}
in the sense of distributions.
\end{theorem}
We recall in detail the result from~\cite{FGSW}, as again, similar as in the case of the inhomogeneous incompressible Euler system, the particular form of the function $A$ allows for the interplay between the Besov regularity of particular terms, i.e., the exponents $\alpha$ and $\beta$. This result, too, was only stated for the system on the torus, i.e. $\Omega=\mathbb{T}^d$.
\begin{theorem}[Th. 4.1 from \cite{FGSW}]\label{compressibleonsager}
Let $\rho$, $v$ be a solution of~\eqref{eq:comp_eul} in the sense of distributions. Assume
\begin{equation*}
v\in B^{\alpha}_{3,\infty}(\Omega\times(0,T)),\hspace{0.3cm}\rho, \rho v\in B^{\beta}_{3,\infty}(\Omega\times(0,T)),\hspace{0.3cm}
0 \leq \underline{\rho} \leq \rho \leq \overline{\rho} \ \mbox{a.a. in } \Omega\times(0,T),
\end{equation*}
for some constants $\underline{\varrho}$, $\overline{\varrho}$, and
$0\leq\alpha,\beta\leq1$ such that
\begin{equation}\label{alphabeta}
\beta > \max \left\{ 1 - 2 \alpha; \frac{1 - \alpha}{2} \right\}.
\end{equation}
Assume further that $p \in C^2[\underline{\varrho}, \overline{\varrho}]$, and, in addition
\begin{equation}\label{pressure}
p'(0) = 0 \ \mbox{as soon as}\ \underline{\varrho} = 0.
\end{equation}
Then the energy is locally conserved, i.e.
\begin{equation*}
\partial_t\left(\frac{1}{2}\rho|v|^2+P(\rho)\right)+\mathrm{d}verg\left[\left(\frac{1}{2}\rho|v|^2+p(\rho)+P(\rho)\right)v\right]=0
\end{equation*}
in the sense of distributions on $\Omega\times(0,T)$.
\end{theorem}
In $\alpha<\frac{1}{3}$, then in case of incompressible Euler system we know there would exist $C^\alpha$ solutions that do not conserve energy.
The above theorem indicates that in case of compressible model, if together with the information $\alpha<\frac{1}{3}$ we know that the density $\rho$ is fine enough to provide that the product $\rho v$ is in a better space (i.e. with an exponent $\beta$ sufficiently high), then the energy is conserved even for such low regular velocity fields.
Similarly to the incompressible case elaborated in~\cite{BTW18}, a possible application concerns the Navier-Stokes-to-Euler limit, as viscosity tends to zero: A vanishing viscosity sequence of solutions to the compressible Navier-Stokes equations that \emph{uniformly} satisfies the interior Besov condition and the normal condition~\eqref{boundaryass} near the boundary will converge to a solution of the compressible Euler equations. This is particularly important as these requirements are consistent with the possible formation of a boundary layer, which affects only the regularity of the \emph{tangential} velocity component.
\subsection{Polyconvex elasticity}\label{polycon}
In this section we first consider a quasi-linear wave equation that may be interpreted as a model of nonlinear elastodynamics, when we understand $y\colon \Omega\times{\mathbb{R}}^+ \to{\mathbb{R}}^3$ as a displacement vector
\begin{equation}
\label{mainI}
\frac{\partial^2 y}{\partial t^2}=\mathrm{d}verg_x S(\nabla y).
\end{equation}
In the above equation $S$ is a gradient of some function ${\mathcal G}:{\mathbb M}^{3\times 3} \to [0,\infty)$.
We rewrite the equation as a system, introducing the notation $v_i=\partial_ty_i$ and $\mathbb{F}_{i j}=\frac{\partial y_i}{\partial x_j}$. Then
$U=(v,\mathbb{F})$ solves the system
\begin{equation}\label{poly}
\begin{aligned}
\frac{\partial v_i}{\partial t}&=\frac{\partial}{\partial x_j}\left(\frac{\partial {\mathcal G}}{\partial \mathbb{F}_{i j}}\right),\\
\frac{\partial \mathbb{F}_{i j}}{\partial t}&=\frac{\partial v_i}{\partial x_j}.
\end{aligned}
\end{equation}
With $A(U)\equiv id$ and $F(U)=\left(\frac{\partial {\mathcal G}}{\partial \mathbb{F}_{i j}}, v\right)$ we have an entropy
$\eta(U)=\frac{1}{2}|v|^2+{\mathcal G}(\mathbb{F})$ and an entropy flux $q_j(U)=v_i\frac{\partial {\mathcal G}(\mathbb{F})}{\partial \mathbb{F}_{i j}}$.
Then the suitable choice of function $B$ is $B=(v, \frac{\partial {\mathcal G}(\mathbb{F})}{\partial \mathbb{F}_{i j}})$. A typical conditions that are assumed on ${\mathcal G}$ are the following (see e.g. Section 2.2 in \cite{DST}):
\begin{equation}
{\mathcal G}\in C^3, \ |D^3{\mathcal G}(F)|\le M \mbox{ for some } M>0,
\end{equation}
\begin{equation}
{\mathcal G}(F)=g_0(F)+\frac{1}{2}|F|^2\ \mbox{where }\lim_{|F|\to\infty}\frac{g_0(F)}{1+|F|^2}=0
\end{equation}
and
\begin{equation}
\lim_{|F|\to\infty}\frac{\frac{\partial {\mathcal G(F)}}{\partial\mathbb{F}_{i j}}}{1+|F|^2}=0.
\end{equation}
Then again $B$ may not satisfy the requirement of linear growth, however the problem does not arise here, as the corresponding row of the flux is linear. Under these assumptions entropy and entropy flux satisfy condition~\eqref{locest}.
One of the natural boundary conditions $S n=0$ on $\partial \Omega$ (i.e.$\frac{\partial {\mathcal G}}{\partial \mathbb{F}_{i j}}$ vanishes in the normal direction), so-called zero traction boundary condition, implies that $q(U)\cdot n=0$ on~$\partial \Omega$. Another boundary conditions that is often considered is the Dirichlet boundary condition $v=0$ on $\partial \Omega$. In that case again $q(U)\cdot n=0$ on~$\partial \Omega$.
Taking into account Remark~\ref{extension} and the above discussion we are ready to state the result in a bounded domain $\Omega$, which can be proved using the general result of Section~\ref{localtoglobal}.
\begin{theorem}
Let $(v, \mathbb{F}) \in
L^3(0,T;\underline{B}_{3,\textit{VMO}}^{1/3}(\Omega))\times L^3(0,T;\underline{B}_{3,\textit{VMO}}^{1/3}(\Omega))$ be a solution to \eqref{poly}. Moroever, let
$$
\lim_{\varepsilon \rightarrow 0} \int_0^T\frac{1}{\varepsilonilon}\int_{\frac{\varepsilonilon}{4} \le d(x,\partial\Omega) \le \frac{\varepsilonilon}{2}}\left|
v_i\frac{\partial {\mathcal G}(\mathbb{F})}{\partial \mathbb{F}_{i j}} \cdot n(\sigma( x))\right|\, dxdt =0.
$$
Then the energy is globally conserved, i.e.,
\begin{equation}
\frac{d}{dt}\int_\Omega \left(\frac{1}{2}|v|^2+{\mathcal G}(\mathbb{F}) \right)dx=0
\end{equation}
in the sense of distributions.
\end{theorem}
\begin{remark}
For system~\eqref{poly}, in the spirit of earlier examples, one could formulate conditions allowing to distinguish among different regularity requirements for $v$ and $\mathbb{F}$. Since the nonlinearity only appears in $\mathbb{F}$, then
to provide the conservation of entropy we could assume that $(v, \mathbb{F}) \in
L^3(0,T;\underline{B}_{3,\textit{VMO}}^{\alpha}(\Omega))\times L^3(0,T;\underline{B}_{3,\textit{VMO}}^{\beta}(\Omega))$ with $\beta=\frac{1-\alpha}{2}$.
\end{remark}
In the case of elastodynamics we regard $S$ as the Piola-Kirchoff stress tensor
obtained as the gradient of a stored energy function,
$S = \frac{\partial W}{\partial {\mathbb F}}$. A natural assumption is that
$W$ is polyconvex, that is $W({\mathbb F}) = {\mathcal G} ( \Phi({\mathbb F}))$ where
${\mathcal G}:{\mathbb M}^{3\times 3}\times{\mathbb M}^{3\times 3}\times \mathbb{R} \to [0,\infty)$
is a strictly convex function and $\Phi({\mathbb F}) = ({\mathbb F} ,\cof {\mathbb F}, \det {\mathbb F})\in
{\mathbb M}^{3\times 3}\times{\mathbb M}^{3\times 3}\times \mathbb{R}$
stands for the vector of null-Lagrangians: ${\mathbb F}$, the cofactor matrix $\cof {\mathbb F}$
and the determinant $\det {\mathbb F}$.
The system can be embedded into the following symmetrizable hyperbolic system in a new dependent variable $\Xi=({\mathbb F},Z,w)$, see e.g.~\cite{Dafermos1985} and~\cite{DST}, taking values in
${\mathbb M}^{3\times 3}\times {\mathbb M}^{3\times 3}\times\mathbb{R}$:
\begin{equation}
\begin{aligned}
\frac{\partial v_i}{\partial t}&=\frac{\partial}{\partial x_j}\left(\frac{\partial {\mathcal G}}{\partial\Xi^A}(\Xi)\frac{\partial\Phi^A}{\partial
{\mathbb F}_{i j}}({\mathbb F})\right),\\
\frac{\partial\Xi^A}{\partial t}&=\frac{\partial}{\partial
x_j}\left(\frac{\partial\Phi^A}{\partial
{\mathbb F}_{i j}}({\mathbb F})v_i\right),
\end{aligned}
\end{equation}
and hence for $U=(v,{\mathbb F},Z,w)$ we have
\begin{equation}
A(U)=id, \quad F(U)=\left(\frac{\partial {\mathcal G}}{\partial\Xi^A}(\Xi)\frac{\partial\Phi^A}{\partial
{\mathbb F}_{i j}}({\mathbb F}), \frac{\partial\Phi^A}{\partial
{\mathbb F}_{i j}}({\mathbb F})v_i\right).
\end{equation}
This system admits the following entropy-entropy flux pair
\begin{equation}
\begin{aligned}
\eta(v,{\mathbb F},Z,w)&=\frac{1}{2}|v|^2+{\mathcal G}({\mathbb F},Z,w),\\
q_j(v,{\mathbb F},Z,w)&=v_i\,\frac{\partial {\mathcal G}}{\partial\Xi^A}(\Xi)\frac{\partial\Phi^A}{\partial {\mathbb F}_{i j}}({\mathbb F}).
\end{aligned}
\end{equation}
Of course in this situation the conclusions follow from the ones stated in the general case described at first.
\subsection{Incompressible magnetohydrodynamics}
Let us consider the system
\begin{align}
\label{eq:MHD}
\left.
\begin{aligned}
\partial_t v + \Div_x(v\otimes v - h\otimes h) + \nabla_x (p+\frac{1}{2}|h|^2) &= 0,
\\
\partial_t h + \Div_x(v\otimes h-h \otimes v) &= 0,\\
\Div_xv &= 0,
\\
\Div_xh&= 0,
\end{aligned}
\right.
\end{align}
for unknown vector functions $v\colon \Omega\times[0,T] \to \mathbb{R}^n$ and $h\colon \Omega\times[0,T] \to \mathbb{R}^n$ and an unknown scalar function $p\colon \Omega\times[0,T]\to \mathbb{R}$. It is sufficient to require that $\Div_x h$ is equal to zero at the initial time, as this information is then transported and thus we may reduce the system to $2n+1$ equations. The system describes the motion of an ideal electrically conducting fluid, see e.g.~\cite[Chapter VIII]{landau}.
Here $U=(v,h,p)$, $A(U)=(v,h,0)$,
and
$$F(v,h) = \left(v\otimes v-h\otimes h+(p+\frac{1}{2}|h|^2)\mathbb{I},v\otimes h-h \otimes v,v\right).$$
The entropy is given as $\eta = \frac12 (|v|^2 + |h|^2)$
and the entropy fluxes are $q= \frac 12(|v|^2 + |h|^2)v - (v\cdot h) h$. The possible choice of
the function $B$ is the following $B=(v,h, p-\frac 12|v|^2)$.
Assuming $v\cdot n=0$ and $h\cdot n=0$ on the boundary (see e.g. \cite{FNS2014}) provides that $q(U)\cdot n=0$ on the boundary.
Using again Remark~\ref{extension} we can state the result in a bounded domain $\Omega$.
\begin{theorem}
Let $(v,h,p)\in L^3(0,T;\underline{B}_{3,\textit{VMO}}^{1/3}(\Omega))\times L^3(0,T;\underline{B}_{3,\textit{VMO}}^{1/3}(\Omega))\times L^3((0,T)\times\Omega)$ be a solution to~\eqref{eq:MHD}.
Moroever, let
$$
\lim_{\varepsilon \rightarrow 0} \int_0^T\frac{1}{\varepsilonilon}\int_{\frac{\varepsilonilon}{4} \le d(x,\partial\Omega) \le \frac{\varepsilonilon}{2}}\left|\left(
\frac 12(|v|^2 + |h|^2)v - (v\cdot h) h
\right)
\cdot n(\sigma( x))\right|\, dxdt =0.
$$
Then the energy is globally conserved, i.e.,
\begin{equation}
\frac{d}{dt}\int_\Omega \frac12 (|v|^2 + |h|^2)dx=0
\end{equation}
in the sense of distributions.
\end{theorem}
In fact, the integrability requirement on $p$ can be relaxed owing to the elliptic arguments of~\cite{BTW18}, cf.\ the remark at the end of subsection~\ref{incompEul}.
An extension of the error estimates from~\cite{CET} was proposed by Caflisch et al.\ \cite{Cafetal} to handle
the global energy conservation for incompressible magnetohydrodynamics in the case without a boundary, i.e., $\Omega=\mathbb{T}^d$.
We recall this result below:
\begin{theorem}[Th. 4.1 from~ \cite{Cafetal}] \label{caflisch}
Let $d = 2,3$ and let $(v,h)$ be a weak solution of ~\eqref{eq:MHD}. Suppose that
\[v\in C([0,T],B_{3,\infty}^{\alpha}(\Omega)),\;\; h\in C([0,T],B_{3,\infty}^{\beta}(\Omega))
\]
with
\[\alpha > \frac{1}{3},\;\;\alpha+2\beta > 1.
\]
Then the following energy identity holds for any $t\in[0,T]$:
\begin{equation}\label{energyMHD}
\int_{\Omega}|v(x,t)|^2 + |h(x,t)|^2\ dx = \int_{\Omega}|v(x,0)|^2 + |h(x,0)|^2\ dx.
\end{equation}
\end{theorem}
The same system was studied by Kang and Lee \cite{KangLee}, who
formulated the result in the spirit of the framework of
Cheskidov et al.~\cite{CCFS08}, with $\Omega=\mathbb{R}^3$:
\begin{theorem}[Th. 6 from~\cite{KangLee}]
let $(v,h)$ be a weak solution of ~\eqref{eq:MHD}. Suppose that
\[v\in L^3([0,T],B^{\alpha}_{3,c_0}(\Omega)),\;\; h\in L^3([0,T],B^{\beta}_{3,c_0}(\Omega))
\]
with
\[\alpha \geq \frac{1}{3},\;\;\alpha+2\beta \geq 1.
\]
Then ~\eqref{energyMHD} holds.
\end{theorem}
\subsection{Compressible magnetohydrodynamics}
We consider the system
\begin{align}
\label{eq:MHD_diver}
\begin{aligned}
\rho_t+\mathrm{d}verg_{x}(\rho v) &= 0,\\
\partial_t(\rho v) + \mathrm{d}verg_x\left(\rho v\otimes v + p(\rho)\mathbb{I} +\frac{1}{2}|h|^2\mathbb{I}-h\otimes h\right)&= 0,\\
\partial_t h + \mathrm{d}verg_x(h \otimes v-v\otimes h) &= 0,\\
\mathrm{d}verg_{x}h &= 0,
\end{aligned}
\end{align}
where $v \colon\Omega\times[0,T]\to\mathbb{R}^n$ is the velocity field, $\rho \colon\Omega\times[0,T]\to\mathbb{R}$ the density of the fluid and $h\colon\Omega\times[0,T]\to\mathbb{R}^3$ is the magnetic field.
With ${ B}(\rho,v,h)=(P'(\rho)-1\slash 2 |v|^2,v,h,-h\cdot v)$, the conservation of the total energy reads:
\begin{align}
\label{eq:MHD_en}
&\partial_t\left(
\frac{1}{2}\rho|v|^2 + P(\rho)+ \frac{1}{2}|h|^2
\right)+\mathrm{d}verg_x\left[
\left(\frac{1}{2} \rho|v|^2+P(\rho)+p(\rho)+|h|^2\right) v -(v\cdot h)h
\right]=0.
\end{align}
Assuming again $v\cdot n=0$ and $h\cdot n=0$ on the boundary provides that $q(U)\cdot n=0$ on the boundary.
In the case of compressible magnetohydrodynamics, we can act in a similar fashion as in the case of the compressible Euler system and formulate the equations in different variables $\rho,m,h$, where again $m$ is the momentum, i.e., $m=\rho v$:
\begin{align}
\label{eq:MHD_diver-m}
\begin{aligned}
\rho_t+\mathrm{d}verg_{x}m &= 0,\\
\partial_t m + \mathrm{d}verg_x\left(\frac{m\otimes m}{\rho} + p(\rho)\mathbb{I} +\frac{1}{2}|h|^2\mathbb{I}-h\otimes h\right)&= 0,\\
\partial_t h + \mathrm{d}verg_x\left(\frac{h \otimes m}{\rho}-\frac{m\otimes h}{\rho}\right) &= 0,\\
\mathrm{d}verg_{x}h &= 0.
\end{aligned}
\end{align}
Similarly, if $\rho>\underline\rho>0$, we can state the result in a bounded domain $\Omega$.
\begin{theorem}
Let $(\rho,m,h)\in L^3(0,T;\underline{B}_{3,\textit{VMO}}^{1/3}(\Omega))\times L^3(0,T;\underline{B}_{3,\textit{VMO}}^{1/3}(\Omega))\times L^3(0,T;\underline{B}_{3,\textit{VMO}}^{1/3}(\Omega))$ be a solution to~\eqref{eq:MHD_diver-m}.
Moroever, let
$$
\lim_{\varepsilon \rightarrow 0} \int_0^T\frac{1}{\varepsilonilon}\int_{\frac{\varepsilonilon}{4} \le d(x,\partial\Omega) \le \frac{\varepsilonilon}{2}}\left|\left(\left(
\frac{|m|^2}{2\rho}
+ P(\rho)+p(\rho)+|h|^2
\right)
\frac{m}{\rho} -\left(\frac{m}{\rho}\cdot h\right) h
\right)
\cdot n(\sigma( x))\right|\, dxdt =0.
$$
Then the energy is globally conserved, i.e.,
\begin{equation}
\frac{d}{dt}\int_\Omega \left(
\frac{|m|^2}{2\rho} + P(\rho)+\frac 12|h|^2
\right)dx=0
\end{equation}
in the sense of distributions.
\end{theorem}
\end{document}
|
\begin{document}
\title{Bivariate Chromatic Polynomials of Mixed Graphs}
\author{Matthias Beck}
\address{Department of Mathematics, San Francisco State University, San Francisco, CA 94132, U.S.A.}
\email{[email protected]}
\author{Sampada Kolhatkar}
\address{Institut f\"ur Mathematik, Freie Universit\"at Berlin, 14195 Berlin, Germany}
\email{[email protected]}
\begin{abstract}
The bivariate chromatic polynomial $\chi_G(x,y)$ of a graph $G = (V, E)$,
introduced by Dohmen--P\"{o}nitz--Tittmann (2003), counts all $x$-colorings of $G$ such that
adjacent vertices get different colors if they are $\le y$. We extend this notion to mixed
graphs, which have both directed and undirected edges. Our main result is a decomposition
formula which expresses $\chi_G(x,y)$ as a sum of bivariate order polynomials
(Beck--Farahmand--Karunaratne--Zuniga Ruiz 2020), and a combinatorial reciprocity theorem
for $\chi_G(x,y)$.
\end{abstract}
\keywords{Mixed graph, bivariate chromatic polynomial, bivariate order polynomial, poset, acyclic orientation, order preserving map, combinatorial reciprocity theorem}
\makeatletter
\@namedef{subjclassname@2020}{
\textup{2020} Mathematics Subject Classification}
\makeatother
\subjclass[2020]{Primary 05C15; Secondary: 05A15, 06A07, 05C31}
\date{01 November 2022}
\maketitle
\section{Introduction}
Graph coloring problems are ubiquitous in many areas within and outside of mathematics. Our interest is in enumerating proper colorings for graphs, directed graphs, and mixed graphs (and in the latter two instances, there are two definitions of the notion of a coloring being proper).
The motivation of our study is the \Def{bivariate chromatic polynomial} $\chi_G(x,y)$
of a graph $G = (V, E)$, first introduced in \cite{dohmenponitztittmann} and
defined as the counting function of colorings $c : V \to [x] := \left\{ 1, 2, \dots, x \right\}$ that satisfy for any edge $vw \in E$
\[
c(v) \ne c(w) \quad \text{ or } \quad c(v) = c(w) > y \, .
\]
The usual univariate chromatic polynomial of $G$ can be recovered as the special evaluation $\chi_G(x,x)$.
Dohmen, P\"onitz, and Tittmann provided basic properties of $\chi_G(x,y)$
in~\cite{dohmenponitztittmann}, including polynomiality and special evaluations which yield the matching and independence polynomials of $G$.
Subsequent results include a deletion--contraction formula and applications to Fibonacci-sequence identities~\cite{hillarwindfeldt}, common generalizations of $\chi_G(x,y)$ and the Tutte polynomial~\cite{averbouchgodlinmakowsky}, and closed formulas for paths and cycles~\cite{dohmenbivariatepathsandcycles}.
We initiate the study of a directed/mixed version of this bivariate chromatic polynomial. Since directed graphs form a subset of mixed graphs, we may restrict our definitions to a \Def{mixed graph} $G = (V, E, A)$ consisting, as usual, of a set $V$ of vertices, a set $E$ of (undirected) edges, and a set $A$ of arcs (directed edges).
Coloring problems in mixed graphs have various applications, for example in scheduling problems in which one has both disjunctive and precedence constraints (see, e.g., \cite{furmanczykkosowkiries,HaKuWe,SoTaWe}).
\begin{defi}For a mixed graph $G = \left(V, E, A\right)$, the \Def{bivariate
chromatic polynomial} $\chi_G(x,y)$, where $1 \leq y \leq x$, is defined as the counting function of colorings $c : V \longrightarrow \left[x\right]$ that satisfies
for every edge $uv \in E$
\begin{align}
c(u) \neq c(v) \quad &\text{ or } \quad c(u)>y \label{cond:bcpomg1}
\end{align}
and for every arc $\overrightarrow{uv} \in A$
\begin{align}
c(u) < c(v) \quad &\text{ or } \quad c(u) > y \, . \label{cond:bcpomg2}
\end{align}
\end{defi}
It is not obvious that this counting function is a polynomial in $x$ and $y$; we will prove
this as a by-product of Theorem~\ref{mainthm2} below.
Naturally, for a mixed graph with $A = \emptyset$, we recover the Dohmen--P\"{o}nitz--Tittmann chromatic polynomial above.
On the other hand, $\chi_G(x,x)$ is the univariate chromatic polynomial of the mixed graph
$G$;
Sotskov--Tanaev \cite{sotskovtanaev} showed that this function (if not identically zero) is indeed a polynomial in $x$ of degree $|V|$ and computed the two leading coefficients.
We note that $\chi_G(x,x)$ is sometimes called the \emph{strong} chromatic polynomial of
$G$, because there is an alternative notion of a proper coloring of $G$ in which the
inequality in~\eqref{cond:bcpomg2} is replaced by $\le$ (see, e.g.,~\cite{kotekmakowskyzilber}).
\begin{exam} Consider the directed graph with $V = \left\{ u,v\right\},$ $E = \emptyset$, $A = \left\{\overrightarrow{uv}\right\}$. A quick case analysis yields the bivariate chromatic polynomial
\begin{align*}
\chi_{G}(x,y) &= {x \choose 2} + y(x-y) + {x-y+1 \choose 2} = \frac{1}{2} \left( 2x^2 - y^2 -y \right).
\end{align*}
\end{exam}
\begin{figure}
\caption{A mixed graph $G$.}
\label{fig:eg2fig1}
\end{figure}
\begin{exam}
The mixed graph in Figure~\ref{fig:eg2fig1} has bivariate chromatic polynomial
\[
\chi_G(x,y) = x^3 - \frac{1}{2}xy^2 - \frac{5}{2}xy+y^2+y.
\]
as we will compute in Section~\ref{sec:decomp}.
\end{exam}
After providing some background in Section~\ref{sec:background}, we provide in
Section~\ref{sec:delcontr} deletion--contrac\-tion formulas for $\chi_{G}(x,y)$. Our main
results are in Section~\ref{sec:decomp}, where we decompose $\chi_{G}(x,y)$ into bivariate
order polynomials (originally introduced in~\cite{Beckbop} and loosely connected with the marked poset concepts of~\cite{ardilabliemsalazar}), and Section~\ref{sec:bcpmgReciprocity}, where we give a combinatorial reciprocity theorem interpreting $\chi_{G}(-x,-y)$.
Our results recover known theorems for undirected graphs (the case $A =
\emptyset$)~\cite{Beckbop}. Bivariate order polynomials are the natural counterparts of bivariate chromatic polynomials in the theory of posets. Our work reveals that bivariate order polynomials are as helpful in the setting of mixed graphs as they are for undirected graphs.
\section{Chromatic and (Bicolored) Order Polynomials}\label{sec:background}
For a finite poset $(P, \preceq)$, Stanley~\cite{stanleychromaticlike} (see
also~\cite[Chapter 3]{stanleyec1}) famously introduced the ``chromatic-like'' \Def{order
polynomial} $\Omega_P(x)$ counting all \Def{order preserving maps} $\varphi : P \to [x]$, that is,
\[
a \preceq b \qquad \Longrightarrow \qquad \varphi(a) \le \varphi(b) \, .
\]
Here we think of $[x]$ as a chain with $x$ elements, and so $\le$ denotes the usual order in $\RR$.
The connection to chromatic polynomials is best exhibited through a variant of
$\Omega_P(x)$, namely the number $\Omega_P^\circ(x)$ of all \Def{strictly order preserving
maps} $\varphi : P \to [x]$:
\[
a \prec b \qquad \Longrightarrow \qquad \varphi(a) < \varphi(b) \, .
\]
When thinking of $P$ as an acyclic directed graph, it is a short step interpreting $\Omega_P^\circ(x)$ as a directed version of the chromatic polynomial.
Along the same lines, one can write the chromatic polynomial of a given graph $G$ as
\begin{equation}\label{eq:chiintoomegas}
\chi_G(x) \ = \sum_{ \sigma \text{ acyclic orientation of } G } \Omega_\sigma^\circ(x) \, .
\end{equation}
Stanley's two main initial results on order polynomials were
\begin{itemize}
\item decomposition formulas for $\Omega_P(x)$ and $\Omega_P^\circ(x)$ in terms of certain permutation statistics for linear extensions of~$P$, from which polynomiality of
$\Omega_P(x)$ and $\Omega_P^\circ(x)$ also follows;
\item the combinatorial reciprocity theorem
$
(-1)^{ |P| } \, \Omega_P(-x) \ = \ \Omega_P^\circ(x) \, .
$
\end{itemize}
The latter, combined with~\eqref{eq:chiintoomegas}, gives in turn rise to
\begin{itemize}
\item Stanley's reciprocity theorem for chromatic polynomials: $(-1)^{ |V| } \, \chi_G(-x)$ equals the number of pairs of an $x$-coloring and a compatible acyclic
orientation~\cite{stanleyacyclic}.
\end{itemize}
Reciprocity theorems for the two versions of univariate chromatic polynomials of mixed
graphs were proved in~\cite{Golomb,weakmixed}.
It is natural to extend order polynomials and the three bullet points above to
the bivariate chromatic setting, and this was done for (undirected) graphs in~\cite{Beckbop}.
As we will need it below, we recall the setup here.
The finite poset $\left(P, \preceq\right)$ is called a \Def{bicolored poset} if $P$ can be viewed as the disjoint union of sets $C$ and $S$,
whose elements are called \Def{celeste} and \Def{silver}, respectively.
This color labeling of the elements of the bicolored poset is captured in the order
preserving maps by introducing another variable, as follows.
A map $\varphi : P \longrightarrow \left[x\right]$ is called an \Def{order preserving $(x,y)$-map} if
\[
a \preceq b \ \Longrightarrow \ \varphi(a) \le \varphi(b) \ \ \text{ for all } a, b \in P
\qquad \text{ and } \qquad
\varphi(c) \ge y \ \ \text{ for all } c \in C \, .
\]
The function $\Omega_{ P, \, C } (x, y)$ counts the number of order preserving $(x, y)$-maps.
The map $\varphi : P \longrightarrow \left[x\right]$ is a \Def{strictly order preserving $(x,y)$-map} if
\[
a \prec b \ \Longrightarrow \ \varphi(a) < \varphi(b) \ \ \text{ for all } a, b \in P
\qquad \text{ and } \qquad
\varphi(c) > y \ \ \text{ for all } c \in C \, .
\]
The function $\Omega^{\circ}_{ P, \, C } (x, y)$ counts the number of strictly order preserving $(x, y)$-maps.
The functions $\Omega^{\circ}_{ P, \, C } (x, y)$ and $\Omega_{ P, \, C } (x, y)$ are called \Def{bivariate order polynomial} and \Def{weak bivariate order polynomial}, respectively.
They are indeed polynomials, which can be computed via certain descent statistics, and which
are related via the combinatorial reciprocity~\cite{Beckbop}
\begin{equation}\label{eq:reciprocityBOP}
(-1)^{ |P| } \, \Omega^{\circ}_{ P, \, C } (-x,-y) \ = \ \Omega_{ P, \, C } (x,y+1) \, .
\end{equation}
As we mentioned in the introduction, bivariate order polynomials exhibit a connection to the theory of marked posets introduced in \cite{ardilabliemsalazar}. Briefly, one marks here the celeste elements, with a lower bound of $y$, and demands the lower bound 0 and the upper bound $x$ throughout the poset.
\section{Deletion--Contraction}\label{sec:delcontr}
We start developing the properties of $\chi_G(x,y)$ by providing deletion--contraction
formulas.
For a mixed graph $G= \left(V,E,A\right)$, let $G-e$ denote edge deletion and $G/e$ denote edge contraction for an edge $e$ of $G$; let $v_e$ denote the vertex obtained after the contraction of edge $e$. We use a similar terminology for deleting/contracting an arc. For an arc $a$ of mixed graph $G$, let $G_a \coloneqq \left(V,E, A-\left\{\overrightarrow{uv} \right\} \cup \left\{ \overrightarrow{vu} \right\}
\right)$, that is, $G_a$ is the graph obtained by reversing the direction of arc~$a$.
\begin{prop} \label{prop:delcontredgebcpmg}
If $G= \left(V,E,A\right)$ is a mixed graph and $e \in E$ is an edge, then
\begin{equation}\label{eq:delcontredge}
\chi_G(x,y) \ = \ \chi_{G-e}(x,y) - \chi_{G/e}(x,y) + (x-y) \chi_{(G/e) -v_e}(x,y) \, .
\end{equation}
If $a = \overrightarrow{uv} \in A$ is an arc, then
\begin{equation}\label{eq:delcontrarc}
\begin{split}
\chi_G(x,y) + \chi_{G_a}(x,y) \ &= \ \chi_{G-a} (x,y) - \chi_{G/a} (x,y) + (x-y)(1-x+y)\chi_{(G/a) -v_a}(x,y) \\
& \hspace{15pt} + (x-y) \left( \chi_{G-a-v}(x,y) + \chi_{G-a-u}(x,y) \right) .
\end{split}
\end{equation}
\end{prop}
We remark that \eqref{eq:delcontredge} is equivalent to
\cite[Proposition~1]{averbouchgodlinmakowskyelimination}.
\begin{proof}[of~\eqref{eq:delcontrarc}]
Given $a =\overrightarrow{uv} \in A$, let $C$ be the set of all bivariate colorings of $G$ and $C_a$ the set of all bivariate colorings of $G_a$.
By inclusion--exclusion,
\begin{align}
\chi_G(x,y) + \chi_{G_a}(x,y) & = \lvert C \cup C_a \rvert + \lvert C \cap C_a \rvert. \label{eq:G+Ga}
\end{align}
For a coloring $c \in C \cup C_a$, we count the number of ways the following coloring conditions are satisfied: $c(u) < c(v)$ or $c(v) < c(u)$ or $c(u)> y$ or $c(v) > y$. This means, we have to count the number of ways of coloring vertices $u$ and $v$ such that they can have any color labels from the set $\left\{1,2,\ldots,x\right\}$ except that the vertices can not have equal colors with labels in the set $\left\{1,2,\ldots,y\right\}$. This is exactly counted by
\begin{align} \chi_{G-a} (x,y) - \chi_{G/a} (x,y) + (x-y) \, \chi_{(G/a)-v_a} (x,y) \ = \
\lvert C \cup C_a \rvert \, . \label{eq:CunionCa}
\end{align}
\noindent For a coloring $c \in C \cap C_a$ we distinguish between the following
cases..
\begin{itemize}[leftmargin=5pt]
\item[]Case 1: $c(u) < c(v)$ and $c(u) > c(v) $.
\newline There does not exist a feasible coloring in $C \cap C_a$ that satisfies these conditions simultaneously.
\item[]Case 2: $ y < c(v)$ with $ c(u) \leq c(v)$ and $ y < c(u)$ with $ c(v) \leq
c(u)$.
\newline This implies the coloring condition $y < c(u) = c(v)$, which is counted in
$(x-y) \, \chi_{(G/a)-v_a}(x,y) $ ways.
\item[]Case 3: $c(u) < c(v)$ and $ y < c(v)$ with $ c(u) \leq c(v)$
\newline This implies that the coloring $c$ must satisfy $ y < c(v)$ with $ c(u) < c(v)$. There are two possibilities:
\begin{itemize}
\item[{$\bullet$}] $y <c(u) < c(v) \leq x$.
The colors for $u$ and $v$ can be chosen in ${x-y \choose 2}$ ways. Thus the
number of possible colorings is ${x-y \choose 2}\chi_{(G/a) -v_a}(x,y)$.
\item[{$\bullet$}] $1 \leq c(u) \leq y < c(v) \leq x$.
There are $(x-y)$ ways to color $v$. To color $u$, the condition $1 \leq c(u) \leq y$
needs to be satisfied. This is equivalent to counting colorings where $c(u) \leq x$
and removing the possible colorings with $c(u) > y$, giving $(x-y) \left( \chi_{G-a-u}(x,y) - (x-y) \chi_{(G/a) -v_a}(x,y)\right)$ colorings .
\end{itemize}
In total there are ${x-y \choose 2}\chi_{(G/a) -v_a}(x,y) + (x-y) \big(
\chi_{G-a-u}(x,y) - (x-y) \, \chi_{(G/a) -v_a}(x,y)\big)$ colorings.
\item[]Case 4: $c(v) < c(u)$ and $ y < c(u)$ with $ c(v) \leq c(u)$
\newline This implies that the coloring $c$ must satisfy $ y < c(u)$ with $ c(v) < c(u)$. There are two possibilities:
\begin{itemize}
\item[{$\bullet$}] $y <c(v) < c(u) \leq x$.
The colors for $u$ and $v$ can be chosen in ${x-y \choose 2}$ ways. Thus the possible colorings are counted by ${x-y \choose 2}\chi_{(G/a) -v_a}(x,y)$.
\item[{$\bullet$}] $1 \leq c(v) \leq y < c(u) \leq x$.
There are $(x-y)$ ways to color $u$. For coloring $v$, the condition $1 \leq c(v) \leq
y$ needs to be satisfied. This is equivalent to counting colorings where $c(v) \leq x$
and removing the possible colorings with $c(v) > y$, yielding $(x-y) \left(
\chi_{G-a-v}(x,y) - (x-y) \chi_{(G/a) -v_a} (x,y)\right)$ colorings.
\end{itemize}
In total there are \[{x-y \choose 2}\chi_{(G/a) -v_a}(x,y) + (x-y) \big(
\chi_{G-a-v}(x,y) - (x-y) \, \chi_{(G/a) -v_a}(x,y)\big)\] colorings.
\end{itemize}
Thus
\begin{align}
\vert C \cap C_a \vert \ &= \ (x-y) \, \chi_{(G/a) -v_a}(x,y)+ 2 {x-y \choose 2} \chi_{(G/a) - v_a} (x,y) \nonumber \\
&\qquad + (x-y) \left[ \chi_{G-a-v}(x,y) - (x-y) \chi_{G/a -v}(x,y)\right] \nonumber \\ &\qquad + (x-y) \left[ \chi_{G-a-u}(x,y) -(x-y) \chi_{G/a -u}(x,y)\right]. \label{eq:CintersectionCa}
\end{align}
From Equations \eqref{eq:G+Ga}, \eqref{eq:CunionCa} and \eqref{eq:CintersectionCa}
we finally obtain
\begin{align}
\chi_G(x,y) + \chi_{G_a}(x,y) \
& = \ \chi_{G-a} (x,y) - \chi_{G/a} (x,y) + (x-y)(1-x+y)\chi_{(G/a) -v_a}(x,y) \nonumber \\
& \hspace{15pt} + (x-y) \left[ \chi_{G-a-v}(x,y) + \chi_{G-a-u}(x,y) \right].
\nonumber \qedhere
\end{align}
\end{proof}
\section{Decomposition into Order Polynomials}\label{sec:decomp}
For a mixed graph $G= \left(V,E,A\right)$, we recall that a \Def{flat} of $G$ is a
mixed graph $H$ that can be constructed from $G$ by a series of contractions of edges
and arcs. We denote the sets of vertices, edges and arcs of the flat $H$ by $V(H), E(H)$ and
$ A(H)$, respectively. The subset of vertices of $H$ that results from contractions of
$G$ is denoted by $C(H)$. An example is depicted in Figure~\ref{fig:3graphs}, where we
obtain the flat $H$ by contracting the edge $v_1v_4$. For this flat, the set of
contracted vertices is $C(H) = \{v_1v_4\}$.
For a mixed graph $G$, let $G^{u}$ denote the underlying undirected graph, that is, the graph obtained from $G$ by replacing its arcs with undirected edges.
For some acyclic orientation $\sigma$ of $G^{u}$, let $T(\sigma)$ be the set of all tail vertices of arcs $a$ of a flat $H$ of $G$ for which the orientation of an edge in $\sigma$ is opposite to the direction of~$a$.
\begin{figure}
\caption{A mixed graph $G$}
\label{fig:fig4a}
\caption{The flat $H$ obtained by contracting the edge $v_1v_4$.}
\label{fig:fig4b}
\caption{The underlying undirected graph.}
\label{fig:fig4c}
\caption{A mixed graph, one of its flat and the associated undirected graph.}
\label{fig:3graphs}
\end{figure}
\begin{thm} \label{mainthm2}
For a mixed graph $G$,
\[
\chi_G(x,y) \ = \sum_{H \text{ \rmfamily flat of } G}
\sum_{ \substack{ \sigma \text{ \rmfamily acyclic} \\ \text{\rmfamily orientation of } H^{u} } }\!\!\!\!
\Omega^{\circ}_{\sigma, \, C(H) \cup T(\sigma)} (x,y) \, .
\]
\end{thm}
Note that this implies that $\chi_G(x,y)$ is a polynomial (because $\Omega^{\circ}_{\sigma,
\, C(H) \cup T(\sigma)} (x,y)$ is).
\begin{proof}
Let $c: V \longrightarrow [x]$ be a coloring of the mixed graph $G$ that satisfies the coloring conditions~\eqref{cond:bcpomg1} and~\eqref{cond:bcpomg2}.
Note that the colors of the end-points of edges and arcs can be equal only if they are $>y$.
Let $H$ be a flat of $G$ obtained by contracting all edges and arcs whose end-points have the same color. Thus the vertices in $C(H)$ have color labels $>y$.
Consider $H^{u}$, the underlying undirected graph of the flat $H$. We orient the edges of $H^{u}$ along the color gradient, that is, for the edge $uv$, we introduce the orientation $u \longrightarrow v$ if and only if $c(u) < c(v)$. Let $\sigma$ be such an orientation.
No two vertices in $H^{u}$ that are connected by an edge have identical color labels. This gives us that $\sigma$ is acyclic.
Let
\[
T(\sigma) \ \coloneqq \ \left\{ v \in V(H) : \, \overrightarrow{vw} \in A(H) \ \text{ and }
\ v \longleftarrow w \ \text{ in } \ \sigma \right\} .
\]
As the color gradient is decreasing along the arcs with tail vertices in the set $T(\sigma)$, we have $c(u) >y$ for each $u \in T(\sigma)$ from the coloring constraints.
Now we regard the acyclic orientation $\sigma$ as a binary relation on the set $V(H^{u})$ defined by $u \preceq v$ if $u \longrightarrow v$. This gives us a bicolored poset $P$ where the vertices in the set $C(H) \cup T(\sigma)$ are celeste elements. The coloring $c$ is an order preserving $(x,y)$\textendash map on $P$. The bivariate order polynomial $\Omega^{\circ}_{\sigma, \, C(H) \cup T(\sigma)} (x,y)$ counts all such order preserving maps.
Conversely, given a flat $H$ of $G$ and an acyclic orientation $\sigma$ of $H^{u}$,
an order preserving $(x,y)$\textendash map counted by $\Omega^{\circ}_{\sigma, \, C(H) \cup T(\sigma)} (x,y)$ can be extended to a coloring of $G$ as follows.
All the vertices of $H$ get colors such that the color gradient follows $\sigma$.
The celeste elements of the bicolored poset induced by the orientation $\sigma$ is given by the set $C(H) \cup T(\sigma)$. Hence the vertices in the set $T(\sigma)$ get colors $>y$. The coloring is then extended to the graph $G$ such that the vertices of the graph $G$ that result in contractions to form the flat $H$ get equal colors $>y$.
This gives a coloring of the mixed graph $G$.
Consider two distinct colorings $c_1$ and $c_2$ of $G$. We need to show that the corresponding order preserving maps $\phi_1$ and $\phi_2$ are distinct.
Construct the flats $H_1$ and $H_2$ of the graph by contracting those edges and arcs that
have end-vertices with equal color labels with respect to the colorings $c_1$ and $c_2$ respectively.
If $H_1 \neq H_2$, then the posets on the vertices of the underlying undirected graphs
$H_1^{u}$ and $H_2^{u}$ will be different for each coloring. This will give us distinct
order preserving $(x,y)$-maps.
Suppose $H_1 = H_2$, that is, both flats are identical, then the underlying undirected graphs $H_1^{u}$ and $H_2^{u}$ will also be identical. Let $\sigma_1$ and $\sigma_2$ be acyclic orientations of $H_1^{u}$ and $H_2^{u}$, respectively.
Now define $T_i (\sigma_i) \coloneqq \left\{ v \in V(H_i) \mid \overrightarrow{vw} \in
A(H_i) \text{ and } v \longleftarrow w \text{ in } \sigma_i \text{ of } H_i^{u} \right\}$
for $i=1,2$. If these sets are distinct, then the celeste elements in the corresponding
bicolored posets will be distinct, resulting in different vertex orderings which will give distinct order preserving $(x,y)$\textendash maps for corresponding colorings.
If for the vertex sets, $T_1(\sigma_1) = T_2(\sigma_2)$ but the acyclic orientations $\sigma_1$ and $\sigma_2$ are distinct, then the posets induced by these acyclic orientations will be distinct resulting in distinct order preserving $(x,y)$\textendash maps for corresponding graph colorings.
If the flats, the acyclic orientation and the celeste sets are identical, then the
bicolored posets corresponding to both colorings are the same. The bivariate order
polynomial $\Omega^{\circ}_{\sigma, \, C(H) \cup T(\sigma)} (x,y)$ counts all possible
order preserving $(x,y)$\textendash maps on this bicolored poset exactly once.
\end{proof}
\begin{figure}
\caption{Acyclic orientations of contractions of $G$. }
\label{fig:eg2}
\end{figure}
Naturally, an undirected graph is a special case of the above with $A = \emptyset$, and
Theorem~\ref{mainthm2} specializes to one of the results of~\cite{Beckbop}:
\begin{cor}\label{cor:undirpoly}
For an undirected graph $G=\left(V,E\right)$,
\begin{align*}
\chi_G(x,y) &= \sum_{H \text{ \rmfamily flat of } G \hspace{3pt}}
\sum_{ \substack{ \sigma \text{ \rmfamily acyclic} \\ \text{\rmfamily orientation of } H } }\!\!\!\! \Omega^{\circ}_{\sigma, \, C(H)} (x,y).
\end{align*}
\end{cor}
\begin{exam}\label{ex:k3}
For the mixed graph $G$ shown in Figure~\ref{fig:eg2fig1}, our proof of Theorem~\ref{mainthm2} is illustrated by Figure~\ref{fig:eg2}.
For $H=G$, there are six acyclic orientations of $H^{u}$ as shown in Figure~\ref{fig:eg2}.
There are three flats obtained by contracting one edge or one arc in $G$. For each underlying undirected graph of a flat, there are two acyclic orientations each. Figure~\ref{fig:eg2} also shows these orientations. There is one flat obtained by contracting two edges or an edge and an arc of the graph resulting in a vertex $v_1v_2v_3$.
Computing the bivariate order polynomial for each of these orientations yields
\begin{align*}
\chi_G(x,y) &= 3 {x \choose 3} + 2(x-y) {y \choose 2} + (3y+6) {x-y \choose 2} + 3 {x-y \choose 3} + (x-y)(3y+1) \\
&= x^3 - \frac{1}{2}xy^2 - \frac{5}{2}xy+y^2+y \, .
\end{align*}
\end{exam}
\section{Reciprocity} \label{sec:bcpmgReciprocity}
An orientation $\sigma$ and a coloring $c: V \longrightarrow [x]$ of the mixed graph $G$
satisfying~\eqref{cond:bcpomg1} and~\eqref{cond:bcpomg2}
\iffalse
\begin{itemize}
\item for every undirected edge $uv \in E$, $$c(u) \neq c(v) \text{ or } c(u)>y ;$$
\item for every directed arc $\overrightarrow{uv} \in A$
$$c(u) < c(v), \text{ or } c(u) > y,$$
\end{itemize}
\fi
are \Def{compatible} if $c(u) \leq c(v)$ for any edge/arc directed from $u$ to $v$ in~$\sigma$.
We define $m_{H}(x,y)$ to be the number of compatible pairs $(\sigma,c)$ consisting of an
acyclic orientation $\sigma$ of $H^u$ and a coloring $c$ with $c(v) > y$ if $v \in C(H) \cup T(\sigma)$.
\begin{thm}\label{thm:chirec}
For a mixed graph $G$,
\begin{align*}
\chi_G(-x,-y) \ &= \sum_{H \text{ \rmfamily flat of } G \hspace{3pt}} (-1)^{\vert V(H) \vert}
\, m_{H} (x,y) \, .
\end{align*}
\end{thm}
\begin{proof} By the reciprocity result of bivariate order polynomials~\eqref{eq:reciprocityBOP},
\begin{align*}
\chi_G(-x,-y) \ &= \sum_{H \text{ \rmfamily flat of } G \hspace{3pt}} \sum_{ \substack{\sigma \text{ \rmfamily acyclic} \\ \text{ \rmfamily orientation of } H^u \\
}} \!\!\!\!
(-1)^{\vert V(H) \vert} \, \Omega_{\sigma, \, C(H) \cup T(\sigma)} (x,y+1)\\
&= \sum_{H \text{ \rmfamily flat of } G \hspace{3pt}} (-1)^{\vert V(H) \vert} \, m_{H}
(x,y) \, .
\end{align*}
The last equation holds because $\Omega_{\sigma, \, C(H) \cup T(\sigma)} (x,y+1)$ counts the number of order preserving maps $\varphi : \sigma \longrightarrow [x]$ subject to the following conditions:
\begin{itemize}
\item[$\bullet$] for $u \in C(H) \cup T(\sigma) $, we have $\varphi(u) \geq y+1$;
\item[$\bullet$] the map $\varphi$ is compatible with $\sigma$. \qedhere
\end{itemize}
\end{proof}
Once more, undirected graphs are mixed graphs with $A = \emptyset$, and so
Theorem~\ref{thm:chirec} specializes to one of the main results of~\cite{Beckbop}:
\begin{cor}
For an undirected graph $G=\left(V,E\right)$,
\[
\chi_G(-x,-y) \ = \sum_{H \text{ \rmfamily flat of } G} (-1)^{\vert V(H) \vert} \, m_H(x,y) \, ,
\]
where $m_H(x,y)$ is the number of pairs $(\sigma,c)$ consisting of an acylic orientation $\sigma$ of $H$ and a compatible coloring $c: V(H) \longrightarrow [x]$ such that $c(v) >y$ if $v \in C(H)$.
\end{cor}
\section*{Acknowledgments}
We are grateful to two anonymous referees for helpful comments. SK thanks Sophia Elia and Sophie Rehberg for encouraging discussions.
\setlength{\parskip}{0cm}
\end{document}
|
\begin{document}
\title{Quantum Communication between Multiplexed Atomic Quantum Memories}
\author{C. Li$^{}$}
\affiliation{Center for Quantum Information, IIIS, Tsinghua University, Beijing 100084, PR China}
\author{ N. Jiang$^{\footnotemark[1]}$}
\affiliation{Center for Quantum Information, IIIS, Tsinghua University, Beijing 100084, PR China}
\author{Y.-K. Wu$^{}$}
\affiliation{Center for Quantum Information, IIIS, Tsinghua University, Beijing 100084, PR China}
\author{W. Chang$^{}$}
\affiliation{Center for Quantum Information, IIIS, Tsinghua University, Beijing 100084, PR China}
\author{Y.-F. Pu$^{\footnotemark[2]}$}
\affiliation{Center for Quantum Information, IIIS, Tsinghua University, Beijing 100084, PR China}
\author{S. Zhang$^{}$}
\affiliation{Center for Quantum Information, IIIS, Tsinghua University, Beijing 100084, PR China}
\author{L.-M. Duan$^{\footnotemark[3]}$}
\affiliation{Center for Quantum Information, IIIS, Tsinghua University, Beijing 100084, PR China}
\renewcommand{\fnsymbol{footnote}}{\fnsymbol{footnote}}
\footnotetext[1]{Present address: Department of Physics, Beijing Normal University, Beijing 100875, China}
\footnotetext[2]{Present address: Institute for Experimental Physics, University of Innsbruck, A-6020 Innsbruck, Austria.}
\begin{abstract}
The use of multiplexed atomic quantum memories (MAQM) can significantly enhance the efficiency to establish entanglement in a quantum network. In the previous experiments, individual elements of a quantum network, such as the generation, storage and transmission of quantum entanglement have been demonstrated separately. Here we report an experiment to show the compatibility of these basic operations. Specifically, we generate photon-atom entanglement in a $6\times 5$ MAQM, convert the spin wave to time-bin photonic excitation after a controllable storage time, and then store and retrieve the photon in a second MAQM for another controllable storage time. The preservation of quantum information in this process is verified by measuring the state fidelity. We also show that our scheme supports quantum systems with higher dimension than a qubit.
\end{abstract}
\maketitle
Quantum network is one of the central targets of quantum information science \cite{kimble2008quantum}, with wide applications in quantum communication \cite{PhysRevLett.67.661,PhysRevLett.70.1895} and distributed quantum computing \cite{PhysRevA.59.4249}. To generate and distribute quantum entanglement among distant nodes of a quantum network, the idea of a quantum repeater \cite{briegel1998quantum} is proposed and it is shown that high-fidelity quantum memories are necessary for its efficient implementation \cite{duan2001long,sangouard2011quantum}.
The atomic ensemble has been one of the most popular candidates for realizing quantum repeaters since the proposal of the DLCZ protocol \cite{duan2001long}: simply using linear optics and photon counting technology, the quantum information can be stored as atomic excitations in the ensemble with long lifetime, and can be efficiently retrieved as flying photons through the collective effect of the atoms \cite{hammerer2010quantum}. Tremendous progress has been made in this field: the basic elements of a quantum network, the generation, transmission, storage and retrieval of quantum information have been achieved in the atomic ensemble \cite{van_der_Wal196,Matsukevich663,chaneliere2005storage} and the entanglement distribution between remote ensembles have been demonstrated \cite{chou2005measurement,Chou1316}.
To further improve the efficiency of entanglement distribution, many variants of the DLCZ protocol have been proposed \cite{sangouard2011quantum}. Among them is the use of multiplexed quantum memories \cite{collins2007multiplexed}: with the ability to entangle arbitrary pairs of memory cells in distant nodes, multiplexing can outperform simple parallelization and significantly reduce the communication time, especially when the coherence time of the memory is limited. Pioneering experiments have demonstrated multiplexing in the atomic ensemble and its solid-state variant using the
spatial \cite{lan2009multiplexed, pu2017experimental}, angular \cite{Chrapkiewicz2017highcapacity} or temporal modes
\cite{saglamyurek2011broadband,Usmani2010mapping,Tang2015storage,
Laplane2016Multiplexed}. For the spatial multiplexing, fundamental elements of the quantum network have been realized separately, such as the heralded generation of atomic excitation \cite{pu2017experimental} and entanglement \cite{pu2018experimental}, their conversion to photonic qubits, and the storage and retrieval of the photonic qubits in arbitrary memory cells \cite{Jiang2019Experimental}.
With these individual elements at hand, it is still important to show their compatibility in a single setup \cite{chaneliere2005storage}. Therefore, in this experiment, we first combine these operations together and demonstrate the generation of photon-ensemble entanglement in a multiplexed quantum memory, the quantum state transfer from atomic qubits to time-bin photonic qubits, and further the storage and retrieval of the photonic qubits in a second multiplexed memory. Then we show that our experimental setup has a native support for high-dimensional quantum systems (qudits), which can enhance the efficiency of various tasks of the quantum network like quantum communication \cite{PhysRevA.61.062308,PhysRevA.64.012306,PhysRevLett.88.127902,PhysRevLett.96.090501,Islame1701491} and quantum computing \cite{1707.08834,1905.10481}.
\begin{figure}
\caption{\textbf{Generation, transmission and storage of entanglement in multiplexed atomic quantum memory (MAQM).}
\end{figure}
\section{Results}
\subsection{Generation of photon-atom qubit entanglement and quantum state transfer}
The experimental setup is shown in FIG.~1, which consists of two cold $^{87}$Rb ensembles in two magneto-optical traps (MOT). All the atoms are initially prepared in the ground state $|g\rangle \equiv |5S_{1/2},F=1\rangle$. A weak write beam is applied to generate the quantum correlation between a signal photon and a spin-wave excitation in one atomic ensemble (MAQM1) through the DLCZ scheme. The signal photon is collected by a single photon detector (SPD1); while after a controllable storage time, the spin wave is retrieved by a strong read beam to an idler photon,
which is directed to the second ensemble (MAQM2) by a $7\,$m fiber, and is further stored there as a collective spin wave via electromagnetically induced transparency (EIT).
After a second controllable storage time, this spin wave is retrieved and finally collected by SPD2.
Crossed acousto-optic deflectors (AODs) and lens under $4f$ configuration are used for 2D multiplexing and de-multiplexing \cite{pu2017experimental}. All the beams and single photon modes can be directed to or collected from a particular site of the ensembles by programming the RF signals in the crossed AODs.
In this way, the two atomic ensembles are divided into two $6\times5$ arrays of micro-ensembles, which form our two multiplexed access quantum memories (MAQM). As we show in Supplementary Materials, each memory cell can be addressed individually with low crosstalk errors.
In the following experiment, three pairs and a $2\times2$ sub-array of memory cells in each MAQM are chosen to demonstrate the entanglement generation, transmission and storage (see FIG.~2(a), 2(b)).
\begin{figure}
\caption{\textbf{Retrieval efficiency of MAQMs and control pulse sequences.}
\end{figure}
The pulse sequence for the qubit state generation, transmission and storage is shown in FIG.~2(c).
When the write beam is equally split into two micro-ensembles located at $(x_1,\,y_1)$ and $(x_1,\,y_2)$, conditioned on a signal photon being detected, the state of the atomic ensemble and the signal photon prior to the detection can be described by
\begin{equation}
|\Psi\rangle = \frac{1}{\sqrt{2}} \left(|x_1,\,y_1\rangle_{s}|x_1,\,y_1\rangle_{a_1} + e^{i\phi}|x_1,\,y_2\rangle_{s}|x_1,\,y_2\rangle_{a_1}\right),
\end{equation}
where subscripts $s$ and $a_1$ denote the signal photon and the spin-wave excitation in MAQM1, and the relative phase $\phi$, which we set as zero in this experiment, can be precisely controlled by the phase of the RF signal in the AODs.
After a controllable storage time $t_1=15.6\,\mu$s in MAQM1, we retrieve the spin-wave excitation into a photonic qubit. Note that the spin wave in the two memory cells should not be retrieved out at the same time, otherwise the two idler modes will interfere at the demultiplexing AODs that combine them together. Instead, we convert the spin-wave qubit into a time-bin qubit
and transfer it to MAQM2 for storage.
First, we switch all the AODs to address the $(x_1,\,y_1)$ cell in MAQM1 and the $(x'_1,\,y'_1)$ cell in MAQM2. The idler photon is retrieved from the $(x_1,\,y_1)$ cell of MAQM1 by a strong read beam, and is further stored into the $(x'_1,\,y'_1)$ cell of MAQM2 by adiabatically shutting off the coupling beam. Then the state is described by
\begin{equation}
|\Psi'\rangle = \frac{1}{\sqrt{2}} \left[e^{i(\alpha_1-\beta_1)}|x_1,\,y_1\rangle_{s}|x'_1,\,y'_1\rangle_{a_2} + |x_1,\,y_2\rangle_{s}|x_1,\,y_2\rangle_{a_1}\right],
\end{equation}
where $\alpha_1$ and $\beta_1$ are the phases introduced by the read and the coupling beams respectively. The additional phase due to the transmission of the photon is fixed for the given memory cells and thus can be absorbed into the definition of the basis states. Followed by the same operations on the $(x_1,\,y_2)$ cell in MAQM1 and the $(x'_1,\,y'_2)$ cell in MAQM2 after an interval of $\tau_1=7.8\,\mu$s between the time bins, the state becomes
\begin{equation}
|\Psi''\rangle = \frac{1}{\sqrt{2}} \left[e^{i(\alpha_1-\beta_1)}|x_1,\,y_1\rangle_{s} |x'_1,\,y'_1\rangle_{a_2} + e^{i(\alpha_2-\beta_2)}|x_1,\,y_2\rangle_{s} |x'_1,\,y'_2\rangle_{a_2}\right].
\end{equation}
In the experiment, the read and the coupling beams are produced from the same laser, with the coupling beam being guided to MAQM2 by an additional $7\,$m fiber (not shown in FIG.~1). The relative phase fluctuation between the two paths is small during the experimental cycle even without any active phase-locking technique, and the wavefront of the two beams are within the coherence time as the spin-wave excitation is transferred from MAQM1 to MAQM2. Consequently, the phase factors in the above equation can be cancelled and we get
\begin{equation}
|\Psi''\rangle = \frac{1}{\sqrt{2}} \left(|x_1,\,y_1\rangle_{s} |x'_1,\,y'_1\rangle_{a_2} + |x_1,\,y_2\rangle_{s} |x'_1,\,y'_2\rangle_{a_2}\right).
\end{equation}
Finally, by turning on the coupling beam again after a controllable storage time of $t_2=7.8\,\mu$s, we retrieve the spin-wave excitation in MAQM2 for single-photon measurements.
The qubit entanglement is verified by quantum state tomography \cite{James2001On}. In FIG.~3(a) we show the reconstructed density matrix of the signal photon and the atomic qubit in pair A of MAQM1 by blocking the coupling, cooling and repumping beams of MAQM2; and the corresponding reconstructed density matrix after EIT storage in pair \uppercase\expandafter{\romannumeral1} of MAQM2 is shown in FIG.~3(b). Entanglement fidelity $F=\langle \Psi_0|\rho |\Psi_0\rangle$ can be used to quantify the entanglement when the atomic
qubit is stored in MAQM1 or MAQM2
, where $|\Psi_0\rangle$ is the maximally entangled two-qubit state and $\rho$ the reconstructed experimental density matrix. In Table 1 we report these values for quantum state transfer between several typical pairs of memory cells. All the measured entanglement fidelities are above 0.86 in MAQM1, and only decay slightly after the transmission to MAQM2. The decay of fidelity is mainly caused by the small difference in the EIT storage-retrieval efficiency of the two micro-ensembles. We also compute the transmission fidelity, the similarity between the entangled states when the atomic qubit is stored in MAQM1 and MAQM2, for these cases, which are all above 0.85.
\begin{figure}
\caption{\textbf{Entanglement verification.}
\end{figure}
\begin{table}[ptb]
\centering
\includegraphics[width=18cm]{figure4.pdf}\\
\caption{\textbf{Fidelity of quantum states.} The entanglement fidelity of the qubit state is calculated by comparing the reconstructed density matrix with the ideal maximally entangled state. In the third row we directly compute the fidelity between the two reconstructed density matrices when the atomic qubit is in MAQM1 and in MAQM2; the high fidelity shows a faithful transmission of the quantum state. The qudit state transmission is characterized by the W-state fidelity \cite{pu2018experimental}; here the transmission fidelity is not computed because we do not reconstruct the complete density matrices. The error bars are calculated by the Monte Carlo simulation with a Poisson distribution assumption of photon counts.}
\end{table}
\subsection{Generalization to high-dimensional qudits}
The qudit entanglement can be generated and transferred in the same way, with the pulse sequence given by FIG.~2(d). Here, the write beam is divided equally into four paths. Prior to the detection of a signal photon, the entanglement state of the photon and MAQM1 is
\begin{equation}
|\Phi\rangle = \frac{1}{2} \sum_{i,j=1,2} |x_i,\,y_j\rangle_{s} |x_i,\,y_j\rangle_{a_1},
\end{equation}
where the relative phases between different memory cells are again set to zero. Subsequently, the spin wave in different cells can be retrieved in arbitrary order, and can be transferred to the corresponding memory cells of MAQM2. This leads to an entangled state
\begin{equation}
|\Phi'\rangle = \frac{1}{2} \sum_{i,j=1,2} |x_i,\,y_j\rangle_{s} |x'_i,\,y'_j\rangle_{a_2}.
\end{equation}
However, the verification of this qudit entanglement is more challenging: direct quantum state tomography would require a lot more measurements. Since our main purpose is to demonstrate the transfer of quantum states, here we collect the signal photon in a fixed state to simplify the measurements. Specifically, we set the signal AODs such that the signal photon modes from the four memory cells are combined with equal weight. Upon a photon detection in the generation stage, we project the MAQM1 into a W state
\begin{equation}
|\tilde{\Phi}\rangle = \frac{1}{2} \sum_{i,j=1,2} |x_i,\,y_j\rangle_{a_1},
\end{equation}
and the state of MAQM2 after the transmission is
\begin{equation}
|\tilde{\Phi}'\rangle = \frac{1}{2} \sum_{i,j=1,2}^4 |x'_i,\,y'_j\rangle_{a_2}.
\end{equation}
Following the steps of Ref.~\cite{pu2018experimental}, we measure the W state fidelity before and after the qudit state transmission in Table~1. The fidelity only decays slightly from $(94.4\pm 1.6)\%$ to $(87.7\pm 2.9)\%$, which suggests that the quantum information is well preserved during the state transfer process.
The spin wave components in each memory cell when stored in MAQM1 and in MAQM2 are shown in FIG.~3(c) and 3(d). Note that they are not the density matrices of the atomic qudits because we do not perform complete quantum state tomography.
\section{Discussion}
In this letter, we demonstrate the generation, transmission, storage and retrieval of quantum states between two MAQMs by time-bin qubit/qudit. The experimental results confirm that the quantum information is preserved during the process.
Higher dimensional qudit entanglement and state transmission can be achieved by the same method.
However, the retrieval efficiency is mainly limited by the time to transfer the time-bin qudits, which is proportional to the dimension, compared with the memory time of the atomic ensembles. On the one hand, the interval between adjacent time bins is lower-bounded by the switch time of $2\,\mu$s of the AODs, which is governed by the acoustic speed in the AO crystal and the waist of the laser beam on the AODs. To shorten the switch time, we may use materials with higher acoustic speed and suppress the laser beam waist. On the other hand, the memory times of the spin waves in both our atomic ensembles are only tens of microseconds. It can be extended to sub-millisecond by optically pumping all the atoms to a magnetic-field insensitive state \cite{zhao2009long}.
The reduction in the state fidelity is primarily caused by the nonuniformity of the optical depth in different memory cells, thus different retrieval efficiencies. The MAQM in our experiment requires a large cross section of the ensemble for the large number of memory cells. Therefore, the cigar-shaped ensemble cannot be used even if it has almost uniform EIT retrieval efficiency \cite{PhysRevLett.120.183602,vernaz2018highly}. Nevertheless, improvement is still possible if we individually trap several ensembles in the same vacuum chamber \cite{chisholm2018three}. Besides, the number of micro-ensembles can be increased by squeezing the waist of laser beams and photon modes, as well as loading larger atomic ensembles.
\section{Methods}
\subsection{Initialization of atomic ensembles}
The $^{87}$Rb atomic cloud of MAQM1 is first cooled by a 2D$^{+}$ MOT and then by a 3D MOT. The three pairs of strong cooling beams are red detuned to the D2 cyclic transition $|5S_{1/2},\,F=2\rangle\leftrightarrow|5P_{3/2},\,F=3\rangle$, and the repumping beams are on resonance to the $|5S_{1/2},\,F=1\rangle\leftrightarrow|5P_{3/2},\,F=2\rangle$ transition. The angle between the two pairs of horizontal cooling beams is set to $60^{\circ}$ to produce an ellipsoidal ensemble with about two billion atoms, such that larger cross section can be achieved to support more micro-ensembles. Then we apply a compressed MOT for $10\,$ms by increasing the detuning of the cooling beams and the intensity of the trap coil current to twice as large. After this stage, the ensemble stays at the center of the trap, and is large enough for the experiment. The atoms are further cooled by polarization gradient cooling (PGC) for $7\,$ms, reaching a final temperature of about $25\,\mu$K, and an optical depth of about 10 for the D1 transition $|5S_{1/2},\,F=2\rangle \leftrightarrow |5P_{1/2},\,F=2\rangle$. The details about the preparation of MAQM2 is described in Ref.~\cite{Jiang2019Experimental}. The memory time of MAQM1 is about $65\,\mu$s, and that of MAQM2 is about $27.8\,\mu$s.
Before the experiment, we apply $700\,$ns pulses on the targeted micro-ensembles of MAQMs to optically pump the atoms to the ground state $|g\rangle\equiv|5S_{1/2},\,F=1\rangle$.
\subsection{Multiplexing and de-multiplexing RF signals}
The RF signals for AODs (AA DTSXY-400) are generated by arbitrary waveform generators (AWG, Tektronix 5014C). The relative phases between different optical paths are intrinsically stable for multiplexing and de-multiplexing, hence can be adjusted by varying the phases of different RF frequency components on the AODs. To form the memory cell array in MAQM1, the RF frequency of its crossed AODs is swept from $95.5\,$MHz to $103\,$MHz in the Y direction, and from $97\,$MHz to $103\,$MHz in the X direction, both with a step size of $1.5\,$MHz. As for MAQM2, the RF frequency is scanned from $99\,$MHz to $105\,$MHz in the Y direction, and from $101.1\,$MHz to $105.9\,$MHz in the X direction, both with a step size of $1.2\,$MHz.
\subsection{Laser beams for state generation, storage and retrieval}
To generate the photon-atom entanglement in MAQM1, a $100\,$ns write pulse is applied, which is blue detuned by $18\,$MHz to the D1 transition $|g\rangle\leftrightarrow|e\rangle\equiv|5P_{1/2},\,F=2\rangle$. If no signal photon is detected, a $500\,$ns clean pulse resonant to the $|e\rangle\leftrightarrow|s\rangle\equiv|5S_{1/2},\,F=2\rangle$ transition is applied to pump the atoms back to $|g\rangle$, and the process is repeated. If a signal photon is detected, we further apply the sequences in FIG.~2.
The optimal power to retrieve the spin wave from a single micro-ensemble is about $64\,\mu$W for the read and the coupling beams. It ensures the high retrieval efficiency and the bandwidth matching between the idler photon and the MAQM2. When addressing multiple cells in the cell pairs and the $2\times 2$ sub-arrays, the optimal total powers are $140\,\mu$W and $300\,\mu$W respectively. During different stages of the experiment, the powers of the read and the coupling beams on each optical path are adjusted to the appropriate values by controlling the amplitudes of RF signals in the AODs. The conditional control of the write, the read and the coupling pulses is achieved by a home-made field-programmable gate array (FPGA). It also registers the detection of the signal and the idler photons and their coincidence from the SPDs. Furthermore, it produces the event trigger for AWGs to output the next pre-programmed RF signal to the AODs.
\subsection{Timing of control pulses}
The retrieval efficiency of the collective spin wave excitation is modulated by Larmor precession of the atoms, because the background magnetic field is not completely suppressed. Therefore, we set the storage time in MAQM1 and MAQM2, and the interval between time bins, to the periods of Larmor procession for the highest efficiency. The Larmor periods of the MAQMs can be controlled by the magnetic field parallel to write/read beams in MAQM1 and the coupling beam in MAQM2; the magnetic fields are adjusted by two pairs of bias coils for each MAQM. The ambient magnetic field can be roughly compensated, so the Larmor period of MAQM1 can be widely tuned from $2\,\mu$s to $16\,\mu$s. Despite the change in the magnetic field, the ensemble remains at the center of the MOT after the compression stage, so the optical circuits need no considerable modification. The Larmor period of MAQM2 can be precisely adjusted from $1.2\,\mu$s to $1.4\,\mu$s. In the experiment of qubit (qudit) transfer, the Larmor period of MAQM1 is set to $7.8\,\mu$s ($3.9\,\mu$s), and that of MAQM2 is optimized accordingly.
\subsection{Measurement of photon-atom qubit entanglement}
To measure the signal photon and the atomic qubit (after the retrieval to a photon) in arbitrary basis $a|U\rangle + b|D\rangle$, we adjust the relative amplitude and phase of the two frequency components on the AODs that correspond to the memory cells $U$ and $D$ \cite{pu2017experimental}.
This enables us to perform the quantum state tomography using the measurement basis described in Refs.~\cite{pu2017experimental,Jiang2019Experimental}.
\begin{thebibliography}{35}
\providecommand{\natexlab}[1]{#1}
\providecommand{\url}[1]{\texttt{#1}}
\expandafter\ifx\csname urlstyle\endcsname\relax
\providecommand{\doi}[1]{doi: #1}\else
\providecommand{\doi}{doi: \begingroup \urlstyle{rm}\Url}\fi
\bibitem[Kimble(2008)]{kimble2008quantum}
H~Jeff Kimble.
\newblock The quantum internet.
\newblock \emph{Nature}, 453\penalty0 (7198):\penalty0 1023, 2008.
\bibitem[Ekert(1991)]{PhysRevLett.67.661}
Artur~K. Ekert.
\newblock Quantum cryptography based on bell's theorem.
\newblock \emph{Phys. Rev. Lett.}, 67:\penalty0 661--663, Aug 1991.
\newblock \doi{10.1103/PhysRevLett.67.661}.
\newblock URL \url{https://link.aps.org/doi/10.1103/PhysRevLett.67.661}.
\bibitem[Bennett et~al.(1993)Bennett, Brassard, Cr\'epeau, Jozsa, Peres, and
Wootters]{PhysRevLett.70.1895}
Charles~H. Bennett, Gilles Brassard, Claude Cr\'epeau, Richard Jozsa, Asher
Peres, and William~K. Wootters.
\newblock Teleporting an unknown quantum state via dual classical and
einstein-podolsky-rosen channels.
\newblock \emph{Phys. Rev. Lett.}, 70:\penalty0 1895--1899, Mar 1993.
\newblock \doi{10.1103/PhysRevLett.70.1895}.
\newblock URL \url{https://link.aps.org/doi/10.1103/PhysRevLett.70.1895}.
\bibitem[Cirac et~al.(1999)Cirac, Ekert, Huelga, and
Macchiavello]{PhysRevA.59.4249}
J.~I. Cirac, A.~K. Ekert, S.~F. Huelga, and C.~Macchiavello.
\newblock Distributed quantum computation over noisy channels.
\newblock \emph{Phys. Rev. A}, 59:\penalty0 4249--4254, Jun 1999.
\newblock \doi{10.1103/PhysRevA.59.4249}.
\newblock URL \url{https://link.aps.org/doi/10.1103/PhysRevA.59.4249}.
\bibitem[Briegel et~al.(1998)Briegel, D\"ur, Cirac, and
Zoller]{briegel1998quantum}
H.-J. Briegel, W.~D\"ur, J.~I. Cirac, and P.~Zoller.
\newblock Quantum repeaters: The role of imperfect local operations in quantum
communication.
\newblock \emph{Phys. Rev. Lett.}, 81:\penalty0 5932--5935, Dec 1998.
\newblock \doi{10.1103/PhysRevLett.81.5932}.
\newblock URL \url{https://link.aps.org/doi/10.1103/PhysRevLett.81.5932}.
\bibitem[Duan et~al.(2001)Duan, Lukin, Cirac, and Zoller]{duan2001long}
L-M Duan, MD~Lukin, J~Ignacio Cirac, and Peter Zoller.
\newblock Long-distance quantum communication with atomic ensembles and linear
optics.
\newblock \emph{Nature}, 414\penalty0 (6862):\penalty0 413, 2001.
\bibitem[Sangouard et~al.(2011)Sangouard, Simon, de~Riedmatten, and
Gisin]{sangouard2011quantum}
Nicolas Sangouard, Christoph Simon, Hugues de~Riedmatten, and Nicolas Gisin.
\newblock Quantum repeaters based on atomic ensembles and linear optics.
\newblock \emph{Rev. Mod. Phys.}, 83:\penalty0 33--80, Mar 2011.
\newblock \doi{10.1103/RevModPhys.83.33}.
\newblock URL \url{https://link.aps.org/doi/10.1103/RevModPhys.83.33}.
\bibitem[Hammerer et~al.(2010)Hammerer, S\o{}rensen, and
Polzik]{hammerer2010quantum}
Klemens Hammerer, Anders~S. S\o{}rensen, and Eugene~S. Polzik.
\newblock Quantum interface between light and atomic ensembles.
\newblock \emph{Rev. Mod. Phys.}, 82:\penalty0 1041--1093, Apr 2010.
\newblock \doi{10.1103/RevModPhys.82.1041}.
\newblock URL \url{https://link.aps.org/doi/10.1103/RevModPhys.82.1041}.
\bibitem[van~der Wal et~al.(2003)van~der Wal, Eisaman, Andr{\'e}, Walsworth,
Phillips, Zibrov, and Lukin]{van_der_Wal196}
C.~H. van~der Wal, M.~D. Eisaman, A.~Andr{\'e}, R.~L. Walsworth, D.~F.
Phillips, A.~S. Zibrov, and M.~D. Lukin.
\newblock Atomic memory for correlated photon states.
\newblock \emph{Science}, 301\penalty0 (5630):\penalty0 196--200, 2003.
\newblock ISSN 0036-8075.
\newblock \doi{10.1126/science.1085946}.
\newblock URL \url{https://science.sciencemag.org/content/301/5630/196}.
\bibitem[Matsukevich and Kuzmich(2004)]{Matsukevich663}
D.~N. Matsukevich and A.~Kuzmich.
\newblock Quantum state transfer between matter and light.
\newblock \emph{Science}, 306\penalty0 (5696):\penalty0 663--666, 2004.
\newblock ISSN 0036-8075.
\newblock \doi{10.1126/science.1103346}.
\newblock URL \url{https://science.sciencemag.org/content/306/5696/663}.
\bibitem[Chaneliere et~al.(2005)Chaneliere, Matsukevich, Jenkins, Lan, Kennedy,
and Kuzmich]{chaneliere2005storage}
T~Chaneliere, DN~Matsukevich, SD~Jenkins, S-Y Lan, TAB Kennedy, and Alex
Kuzmich.
\newblock Storage and retrieval of single photons transmitted between remote
quantum memories.
\newblock \emph{Nature}, 438\penalty0 (7069):\penalty0 833, 2005.
\bibitem[Chou et~al.(2005)Chou, De~Riedmatten, Felinto, Polyakov, Van~Enk, and
Kimble]{chou2005measurement}
Chin-Wen Chou, H~De~Riedmatten, D~Felinto, SV~Polyakov, SJ~Van~Enk, and H~Jeff
Kimble.
\newblock Measurement-induced entanglement for excitation stored in remote
atomic ensembles.
\newblock \emph{Nature}, 438\penalty0 (7069):\penalty0 828, 2005.
\bibitem[Chou et~al.(2007)Chou, Laurat, Deng, Choi, de~Riedmatten, Felinto, and
Kimble]{Chou1316}
Chin-Wen Chou, Julien Laurat, Hui Deng, Kyung~Soo Choi, Hugues de~Riedmatten,
Daniel Felinto, and H.~Jeff Kimble.
\newblock Functional quantum nodes for entanglement distribution over scalable
quantum networks.
\newblock \emph{Science}, 316\penalty0 (5829):\penalty0 1316--1320, 2007.
\newblock ISSN 0036-8075.
\newblock \doi{10.1126/science.1140300}.
\newblock URL \url{https://science.sciencemag.org/content/316/5829/1316}.
\bibitem[Collins et~al.(2007)Collins, Jenkins, Kuzmich, and
Kennedy]{collins2007multiplexed}
O.~A. Collins, S.~D. Jenkins, A.~Kuzmich, and T.~A.~B. Kennedy.
\newblock Multiplexed memory-insensitive quantum repeaters.
\newblock \emph{Phys. Rev. Lett.}, 98:\penalty0 060502, Feb 2007.
\newblock \doi{10.1103/PhysRevLett.98.060502}.
\newblock URL \url{https://link.aps.org/doi/10.1103/PhysRevLett.98.060502}.
\bibitem[Lan et~al.(2009)Lan, Radnaev, Collins, Matsukevich, Kennedy, and
Kuzmich]{lan2009multiplexed}
S.-Y. Lan, A.~G. Radnaev, O.~A. Collins, D.~N. Matsukevich, T.~A.~B. Kennedy,
and A.~Kuzmich.
\newblock A multiplexed quantum memory.
\newblock \emph{Opt. Express}, 17\penalty0 (16):\penalty0 13639--13645, Aug
2009.
\newblock \doi{10.1364/OE.17.013639}.
\newblock URL
\url{http://www.opticsexpress.org/abstract.cfm?URI=oe-17-16-13639}.
\bibitem[Pu et~al.(2017)Pu, Jiang, Chang, Yang, Li, and
Duan]{pu2017experimental}
YF~Pu, N~Jiang, W~Chang, HX~Yang, C~Li, and LM~Duan.
\newblock Experimental realization of a multiplexed quantum memory with 225
individually accessible memory cells.
\newblock \emph{Nature communications}, 8:\penalty0 15359, 2017.
\bibitem[Chrapkiewicz et~al.(2017)Chrapkiewicz, Dabrowski, and
Wasilewski]{Chrapkiewicz2017highcapacity}
Rados{\l}aw Chrapkiewicz, Micha{\l} Dabrowski, and Wojciech Wasilewski.
\newblock High-capacity angularly multiplexed holographic memory operating at
the single-photon level.
\newblock \emph{Phys. Rev. Lett.}, 118:\penalty0 063603, Feb 2017.
\newblock \doi{10.1103/PhysRevLett.118.063603}.
\newblock URL \url{https://link.aps.org/doi/10.1103/PhysRevLett.118.063603}.
\bibitem[Saglamyurek et~al.(2011)Saglamyurek, Sinclair, Jin, Slater, Oblak,
Bussieres, George, Ricken, Sohler, and Tittel]{saglamyurek2011broadband}
Erhan Saglamyurek, Neil Sinclair, Jeongwan Jin, Joshua~A Slater, Daniel Oblak,
F{\'e}lix Bussieres, Mathew George, Raimund Ricken, Wolfgang Sohler, and
Wolfgang Tittel.
\newblock Broadband waveguide quantum memory for entangled photons.
\newblock \emph{Nature}, 469\penalty0 (7331):\penalty0 512, 2011.
\bibitem[Usmani et~al.(2010)Usmani, Afzelius, De~Riedmatten, and
Gisin]{Usmani2010mapping}
Imam Usmani, Mikael Afzelius, Hugues De~Riedmatten, and Nicolas Gisin.
\newblock Mapping multiple photonic qubits into and out of one solid-state
atomic ensemble.
\newblock \emph{Nature Communications}, 1:\penalty0 12, 2010.
\bibitem[Tang et~al.(2015)Tang, Zhou, Wang, Li, Liu, Hua, Zou, Wang, He, Chen,
et~al.]{Tang2015storage}
Jian-Shun Tang, Zong-Quan Zhou, Yi-Tao Wang, Yu-Long Li, Xiao Liu, Yi-Lin Hua,
Yang Zou, Shuang Wang, De-Yong He, Geng Chen, et~al.
\newblock Storage of multiple single-photon pulses emitted from a quantum dot
in a solid-state quantum memory.
\newblock \emph{Nature communications}, 6:\penalty0 8652, 2015.
\bibitem[Laplane et~al.(2015)Laplane, Jobez, Etesse, Timoney, Gisin, and
Afzelius]{Laplane2016Multiplexed}
Cyril Laplane, Pierre Jobez, Jean Etesse, Nuala Timoney, Nicolas Gisin, and
Mikael Afzelius.
\newblock Multiplexed on-demand storage of polarization qubits in a crystal.
\newblock \emph{New Journal of Physics}, 18\penalty0 (1):\penalty0 013006, dec
2015.
\newblock \doi{10.1088/1367-2630/18/1/013006}.
\newblock URL \url{https://doi.org/10.1088\%2F1367-2630\%2F18\%2F1\%2F013006}.
\bibitem[Pu et~al.(2018)Pu, Wu, Jiang, Chang, Li, Zhang, and
Duan]{pu2018experimental}
Yunfei Pu, Yukai Wu, Nan Jiang, Wei Chang, Chang Li, Sheng Zhang, and Luming
Duan.
\newblock Experimental entanglement of 25 individually accessible atomic
quantum interfaces.
\newblock \emph{Science advances}, 4\penalty0 (4):\penalty0 eaar3931, 2018.
\bibitem[Jiang et~al.(2019)Jiang, Pu, Chang, Li, Zhang, and
Duan]{Jiang2019Experimental}
N.~Jiang, Y.-F. Pu, W.~Chang, C.~Li, S.~Zhang, and L.-M. Duan.
\newblock Experimental realization of 105-qubit random access quantum memory.
\newblock \emph{npj Quantum Information}, 5\penalty0 (1):\penalty0 28, 2019.
\newblock ISSN 2056-6387.
\newblock \doi{10.1038/s41534-019-0144-0}.
\newblock URL \url{https://doi.org/10.1038/s41534-019-0144-0}.
\bibitem[Bechmann-Pasquinucci and Tittel(2000)]{PhysRevA.61.062308}
H.~Bechmann-Pasquinucci and W.~Tittel.
\newblock Quantum cryptography using larger alphabets.
\newblock \emph{Phys. Rev. A}, 61:\penalty0 062308, May 2000.
\newblock \doi{10.1103/PhysRevA.61.062308}.
\newblock URL \url{https://link.aps.org/doi/10.1103/PhysRevA.61.062308}.
\bibitem[Bourennane et~al.(2001)Bourennane, Karlsson, and
Bj\"ork]{PhysRevA.64.012306}
Mohamed Bourennane, Anders Karlsson, and Gunnar Bj\"ork.
\newblock Quantum key distribution using multilevel encoding.
\newblock \emph{Phys. Rev. A}, 64:\penalty0 012306, Jun 2001.
\newblock \doi{10.1103/PhysRevA.64.012306}.
\newblock URL \url{https://link.aps.org/doi/10.1103/PhysRevA.64.012306}.
\bibitem[Cerf et~al.(2002)Cerf, Bourennane, Karlsson, and
Gisin]{PhysRevLett.88.127902}
Nicolas~J. Cerf, Mohamed Bourennane, Anders Karlsson, and Nicolas Gisin.
\newblock Security of quantum key distribution using $\mathit{d}$-level
systems.
\newblock \emph{Phys. Rev. Lett.}, 88:\penalty0 127902, Mar 2002.
\newblock \doi{10.1103/PhysRevLett.88.127902}.
\newblock URL \url{https://link.aps.org/doi/10.1103/PhysRevLett.88.127902}.
\bibitem[Walborn et~al.(2006)Walborn, Lemelle, Almeida, and
Ribeiro]{PhysRevLett.96.090501}
S.~P. Walborn, D.~S. Lemelle, M.~P. Almeida, and P.~H.~Souto Ribeiro.
\newblock Quantum key distribution with higher-order alphabets using spatially
encoded qudits.
\newblock \emph{Phys. Rev. Lett.}, 96:\penalty0 090501, Mar 2006.
\newblock \doi{10.1103/PhysRevLett.96.090501}.
\newblock URL \url{https://link.aps.org/doi/10.1103/PhysRevLett.96.090501}.
\bibitem[Islam et~al.(2017)Islam, Lim, Cahall, Kim, and
Gauthier]{Islame1701491}
Nurul~T. Islam, Charles Ci~Wen Lim, Clinton Cahall, Jungsang Kim, and Daniel~J.
Gauthier.
\newblock Provably secure and high-rate quantum key distribution with time-bin
qudits.
\newblock \emph{Science Advances}, 3\penalty0 (11), 2017.
\newblock \doi{10.1126/sciadv.1701491}.
\newblock URL \url{https://advances.sciencemag.org/content/3/11/e1701491}.
\bibitem[Pavlidis and Floratos(2017)]{1707.08834}
Archimedes Pavlidis and Emmanuel Floratos.
\newblock Arithmetic circuits for multilevel qudits based on quantum fourier
transform, 2017.
\newblock arXiv:1707.08834.
\bibitem[Gokhale et~al.(2019)Gokhale, Baker, Duckering, Brown, Brown, and
Chong]{1905.10481}
Pranav Gokhale, Jonathan~M. Baker, Casey Duckering, Natalie~C. Brown,
Kenneth~R. Brown, and Frederic~T. Chong.
\newblock Asymptotic improvements to quantum circuits via qutrits, 2019.
\newblock arXiv:1905.10481.
\bibitem[James et~al.(2001)James, Kwiat, Munro, and White]{James2001On}
Daniel F.~V. James, Paul~G. Kwiat, William~J. Munro, and Andrew~G. White.
\newblock Measurement of qubits.
\newblock \emph{Phys. Rev. A}, 64:\penalty0 052312, Oct 2001.
\newblock \doi{10.1103/PhysRevA.64.052312}.
\newblock URL \url{https://link.aps.org/doi/10.1103/PhysRevA.64.052312}.
\bibitem[Zhao et~al.(2009)Zhao, Dudin, Jenkins, Campbell, Matsukevich, Kennedy,
and Kuzmich]{zhao2009long}
R~Zhao, YO~Dudin, SD~Jenkins, CJ~Campbell, DN~Matsukevich, TAB Kennedy, and
A~Kuzmich.
\newblock Long-lived quantum memory.
\newblock \emph{Nature Physics}, 5\penalty0 (2):\penalty0 100, 2009.
\bibitem[Hsiao et~al.(2018)Hsiao, Tsai, Chen, Lin, Hung, Lee, Chen, Chen, Yu,
and Chen]{PhysRevLett.120.183602}
Ya-Fen Hsiao, Pin-Ju Tsai, Hung-Shiue Chen, Sheng-Xiang Lin, Chih-Chiao Hung,
Chih-Hsi Lee, Yi-Hsin Chen, Yong-Fan Chen, Ite~A. Yu, and Ying-Cheng Chen.
\newblock Highly efficient coherent optical memory based on electromagnetically
induced transparency.
\newblock \emph{Phys. Rev. Lett.}, 120:\penalty0 183602, May 2018.
\newblock \doi{10.1103/PhysRevLett.120.183602}.
\newblock URL \url{https://link.aps.org/doi/10.1103/PhysRevLett.120.183602}.
\bibitem[Vernaz-Gris et~al.(2018)Vernaz-Gris, Huang, Cao, Sheremet, and
Laurat]{vernaz2018highly}
Pierre Vernaz-Gris, Kun Huang, Mingtao Cao, Alexandra~S Sheremet, and Julien
Laurat.
\newblock Highly-efficient quantum memory for polarization qubits in a
spatially-multiplexed cold atomic ensemble.
\newblock \emph{Nature communications}, 9\penalty0 (1):\penalty0 363, 2018.
\bibitem[Chisholm et~al.(2018)Chisholm, Thomas, Deb, and
Kj{\ae}rgaard]{chisholm2018three}
CS~Chisholm, R~Thomas, AB~Deb, and N~Kj{\ae}rgaard.
\newblock A three-dimensional steerable optical tweezer system for ultracold
atoms.
\newblock \emph{Review of Scientific Instruments}, 89\penalty0 (10):\penalty0
103105, 2018.
\end{thebibliography}
\textbf{Acknowledgements:} This work was supported by the Ministry of Education of China, Tsinghua University, and the National key Research and Development Program of China (2016YFA0301902).Y.K.W. acknowledges support from Shuimu Tsinghua Scholar Program and International Postdoctoral Exchange Fellowship Program (Talent-Introduction Program).
\textbf{Author Contributions:} C.L., N.J., W.C., Y.F.P, S.Z. performed the experiment under supervision of L.M.D. Y.K.W preformed theoretical analysis. C.L., Y.K.W, and L.M.D wrote the manuscript.
\textbf{Competing interests:} The authors declare no competing interests.
\textbf{Author Information:} Correspondence and requests for materials should be addressed to L.M.D.
([email protected]).
\textbf{Data Availability} The data that support the findings of this study are available
from the corresponding author upon request.
\end{document}
|
\begin{document}
\begin{asciiabstract}
Let p be an odd prime. Let G be a p-local finite group
over the extraspecial p-group p_+^{1+2}.
In this paper we study
the cohomology and the stable splitting of their p-complete
classifying space BG.
\end{asciiabstract}
\begin{htmlabstract}
Let p be an odd prime. Let G be a p–local finite group
over the extraspecial p–group p<sub>+</sub><sup>1+2</sup>.
In this paper we study
the cohomology and the stable splitting of their p–complete
classifying space BG.
\end{htmlabstract}
\begin{abstract}
Let $p$ be an odd prime. Let $G$ be a $p$--local finite group
over the extraspecial $p$--group $p_+^{1+2}$.
In this paper we study
the cohomology and the stable splitting of their $p$--complete
classifying space $BG$.
\end{abstract}
\maketitle
\section{Introduction}
\label{sec:sec1}
Let us write by $E$ the extraspecial $p$--group $p_+^{1+2}$
of order $p$ and exponent $p$ for an odd prime $p$. Let $G$ be a
finite group
having $E$ as a $p$--Sylow subgroup, and $BG$ ($=BG_p^{\wedge}$) the
$p$--completed classifying space of $G$. In papers by Tezuka and Yagita
\cite{T-Y} and Yagita \cite{Y1,Y2}, the
cohomology and
stable splitting for such groups are studied.
In many cases non isomorphic groups have homotopy equivalent
$p$--completed
classifying spaces, showing that there are not too many homotopy
types of $BG$, as
was first suggested by C\,B Thomas \cite{Th} and D Green \cite{G}.
Recently, Ruiz and Viruel \cite{R-V} classified all $p$--local finite groups
for the $p$--group $E$.
Their results show that each classifying space $BG$ is homotopic to
one of the classifying spaces which were studied in \cite{T-Y} or
classifying spaces of three exotic $7$--local finite groups.
(While descriptions in \cite{T-Y} of $H^*(^2F_4(2)')_{(3)}$
$H^*(Fi_{24}')_{(7)}$ and $H^*({\mathbb M})_{(13)}$ contained some errors.)
In \fullref{sec:sec2}, we recall the results of Ruiz and Viruel.
In \fullref{sec:sec3}, we also recall the cohomology $H^*(BE;{\mathbb Z})/(p,\surd 0)$.
In this paper, we simply write
\[H^*(BG)=H^*(BG;{\mathbb Z})/(p,\surd 0)\]
and study them mainly. The cohomology $H^{\mathrm{odd}}(BG;{\mathbb Z}_{(p)})$
and the nilpotents
parts in $H^{\mathrm{even}}(BG;{\mathbb Z}_{(p)})$ are given in \fullref{sec:sec11}.
\fullref{sec:sec4} is devoted to the explanations of stable splitting of $BG$
according to Dietz, Martino and Priddy. In \fullref{sec:sec5}, and
\fullref{sec:sec6}, we study
cohomology and stable splitting of $BG$ for a finite group $G$
having a $3$--Sylow group $({\mathbb Z}/3)^2$ or $E=3_+^{1+2}$ respectively.
In \fullref{sec:sec7} and
\fullref{sec:sec8}, we study cohomology of $BG$ for groups $G$ having
a $7$--Sylow subgroup $E=7_+^{1+2}$, and the three exotic $7$--local
finite groups. In \fullref{sec:sec9}, we study their stable splitting.
In \fullref{sec:sec10} we study the cohomology and stable splitting of
the Monster group ${\mathbb M}$ for $p=13$.
\section[p--local finite groups over E]{$p$--local finite groups over $E$}
\label{sec:sec2}
Recall that the extraspecial $p$--group $p_+^{1+2}$ has a presentation
as
\[ p_+^{1+2}={\langle}a,b,c|a^p=b^p=c^p=1, [a,b]=c,\ c\in
\mathrm{Center}{\rangle}\]
and denote it simply by $E$ in this paper. We consider $p$--local
finite groups
over $E$, which are generalization of groups whose
$p$--Sylow subgroups are isomorphic to $E$.
The concept of the $p$--local finite groups arose in the work of
Broto, Levi and Oliver \cite{B-L-O} as a generalization of a classical concept
of finite groups. The $p$--local finite group is stated as a triple
${\langle}S,F,L{\rangle}$ where $S$ is a $p$--group,
$F$ is a saturated fusion system over a centric linking system $L$
over $S$
(for a detailed definition, see \cite{B-L-O}). Given a $p$--local finite
group, we can
construct its classifying space $B{\langle}S,F,L{\rangle}$ by the
realization
$|L|_p^{\wedge}$. Of course if ${\langle}S,F,L{\rangle}$ is induced
from a finite
group $G$ having $S$ as a
$p$--Sylow subgroup,
then $B{\langle}S,F,L{\rangle}\cong BG$. However note that in
general, there exist
$p$--local finite groups which are not induced from finite groups
(exotic cases).
Ruiz and Viruel recently determined
${\langle}p_+^{1+2},F,L{\rangle}$ for
all odd primes $p$.
We can check the possibility of existence of finite groups
only for simple groups and their extensions. Thus they find
new exotic $7$--local finite groups.
The $p$--local finite groups ${\langle}E,F,L{\rangle}$ are classified
by $\Out_F(E)$,
number of $F^{\mathrm{ec}}$--radical $p$--subgroup $A$ (where $A\cong
({\mathbb Z}/p)^2$),
and $\Aut_F(A)$ (for details see \cite{R-V}). When a $p$--local finite
group
is induced from a finite group $G$, then we see easily that
$\Out_F(E)\cong W_G(E) (=N_G(E)/E.C_G(E))$ and $\Aut_F(A)\cong W_G(A)$.
Moreover $A$ is $F^{\mathrm{ec}}$--radical
if and only if $\Aut_F(A)\supset SL_2({\mathbb F}_p)$ by \cite[Lemma 4.1]{R-V}.
When $G$ is a sporadic simple group, $F^{\mathrm{ec}}$--radical follows
$p$--pure.
\begin{thm}[Ruiz and Viruel \cite{R-V}]\label{thm:thm2.1}
If $p\not =3,7,5,13$, then a $p$--local finite group
${\langle}E,F,L{\rangle}$ is
isomorphic
to one of the following types.
\begin{enumerate}
\item $E \colon W$ for $W\subset \Out(E)$ and $(|W|,p)=1$,
\item $ p^2 \colon SL_2({\mathbb F}_p).r$ for $r|(p-1)$,
\item $SL_3({\mathbb F}_p) \colon H$ for $H\cong {\mathbb Z}/2,{\mathbb Z}/3$ or $S_3.$
\end{enumerate}
When $p=3,5,7$ or $13$, it is either of one of the previous types or
of the following types.
\begin{enumerate}\setcounter{enumi}{4}
\item $^2F_4(2)',J_4$, for $p$=3,
\item $Th$ for $p$=5,
\item $He,He \colon 2,{Fi'}_{24},Fi_{24},O'N,O'N \colon 2$,
and three exotic $7$--local finite groups for $p$=7,
\item ${\mathbb M}$ for $p$=13.
\end{enumerate}
\end{thm}
For case (1), we know that
$H^*(E{\colon}W)\cong H^*(E)^W$. Except for these extensions and
exotic cases,
all $H^{\mathrm{even}}(G;{\mathbb Z})_{(p)}$ are studied by Tezuka and Yagita \cite{T-Y}. In
\cite{Y1}, the author studied
ways to distinguish
$H^{\mathrm{odd}}(G;{\mathbb Z})_{(p)}$ and $H^*(G;{\mathbb Z}/p)$ from
$H^{\mathrm{even}}(G;{\mathbb Z})_{(p)}$.
The stable splittings for such $BG$ are studied in \cite{Y2}.
However there were some errors in the cohomology of
$^2F_4(2)',{Fi'}_{24},{\mathbb M}$.
In this paper, we study cohomology and stable splitting of $BG$
for $p=3$,$7$ and $13$ mainly.
\section{Cohomology}
\label{sec:sec3}
In this paper we mainly consider the cohomology
$H^*(BG;{\mathbb Z})/(p,\surd 0)$
where $\surd 0$ is the ideal generated by nilpotent elements. So we
write it simply
\[H^*(BG)=H^*(BG;{\mathbb Z})/(p,\surd 0).\]
Hence we have
\[H^*(B{\mathbb Z}/p)\cong {\mathbb Z}/p[y],\qua H^*(B({\mathbb Z}/p)^2)\cong {\mathbb Z}/p[y_1,y_2]
\text{ with }|y|=|y_i|=2.\]
Let us write $({\mathbb Z}/p)^2$ as $A$
and let an $A$--subgroup of $G$ mean a subgroup
isomorphic to $({\mathbb Z}/p)^2$.
The cohomology of the extraspecial $p$ group $E=p_+^{1+2}$ is well
known.
In particular recall (Leary \cite{L2} and Tezuka--Yagita \cite{T-Y})
\begin{equation}\label{eqn:eqn3.1}
H^*(BE)\cong \left({\mathbb Z}/p[y_1,y_2]/(y_1^py_2-y_1y_2^p)\oplus
{\mathbb Z}/p\{C\}\right)\otimes {\mathbb Z}/p[v],
\end{equation}
where $|y_i|=2,|v|=2p,|C|=2p-2$ and $Cy_i=y_i^{p}$,
$C^2=y_1^{2p-2}+y_2^{2p-2}-y_1^{p-1}y_2^{p-1}$.
In this paper we write $y_i^{p-1}$ by $Y_i$, and $v^{p-1}$ by $V$,
eg $C^2=Y_1^2+Y_2^2-Y_1Y_2$.
The Poincare series of the subalgebra generated by $y_i$ and $C$ are
computed
\[\frac{1-t^{p+1}}{(1-t)(1-t)}+t^{p-1}=
\frac{(1+\cdots+t^{p-1})+t^{p-1}}{(1-t)}
=\frac{(1+\cdots+t^{p-1})^2-t^{2p-2}}{(1-t^{p-1})}.\]
From this Poincare series and \eqref{eqn:eqn3.1}, we get the another
expression of $H^*(BE)$
\begin{equation}\label{eqn:eqn3.2}
H^*(BE)\cong {\mathbb Z}/p[C,v]\left\{y_1^iy_2^j|0\le i,j\le p-1,
(i,j)\not =(p-1,p-1)\right\}.
\end{equation}
The $E$ conjugacy classes of $A$--subgroups are written by
\begin{align*}
A_i &={\langle}c,ab^i{\rangle} \text{ for }0\le i\le p-1 \\
A_{\infty} &={\langle}c,b{\rangle}.
\end{align*}
Letting $H^*(BA_i)\cong {\mathbb Z}/p[y,u]$ and writing $i_{A_i}^*(x)=x|A_i$
for the inclusion $i_{A_i}{\co}A_i\subset E$, the restriction
images are given by
\begin{align}\label{eqn:eqn3.3}
y_1|A_i=y \text{ for } i\in {\mathbb F}_p, y_1|A_{\infty}=0,
\qua & y_2|A_i=iy \text{ for } i\in {\mathbb F}_p, y_2|A_{\infty}=y,\\ \nonumber
C|A_i=y^{p-1},\qua & v|A_i=u^p-y^{p-1}u\text{ for all }i.
\end{align}
For an element $g=\bigl(\begin{smallmatrix} \alpha&\beta\\ \gamma&\delta
\end{smallmatrix}\bigr)
\in GL_2({\mathbb F}_p)$, we can identify $GL_2({\mathbb F}_p)\cong \Out(E)$ by
\[g(a)=a^{\alpha}b^{\gamma},
g(b)=a^{\beta}b^{\delta},\ g(c)=c^{\det(g)}.\]
Then the action of $g$ on the cohomology is given (see Leary \cite{L2}
and Tezuka--Yagita \cite[page 491]{T-Y}) by
\begin{equation}\label{eqn:eqn3.4}
g^*C=C,\ g^*y_1=\alpha y_1+\beta y_2,
\ g^*y_2=\gamma y_1+\delta y_2,\ g^*v=(\det(g))v.
\end{equation}
Recall that $A$ is $F^{\mathrm{ec}}$--radical if and only if $SL_2({\mathbb F}_p)
\subset W_G(A)$ (see Ruiz--Viruel \cite[Lemma 4.1]{R-V}).
\begin{thm}[Tezuka--Yagita {{\cite[Theorem 4.3]{T-Y}}},
Broto--Levi--Oliver \cite{B-L-O}]
\label{thm:thm3.1}
Let $G$ have the $p$--Sylow subgroup $E$, then we have the isomorphism
\[H^*(BG)\cong H^*(BE)^{W_G(E)}\cap
_{A{\colon}F^{\mathrm{ec}}-\operatorname{radical}}i_A^{*-1}H^*(BA)
^{W_G(A)}.\]
\end{thm}
In \cite{B-L-O} and \cite{T-Y}, proofs of the above theorem are given
only for $H^*(BG;{\mathbb Z}_{(p)})$. A proof for $H^*(BG)$ is explained in
\fullref{sec:sec11}.
\section{Stable splitting}
\label{sec:sec4}
Martino--Priddy prove the following theorem of complete stable
splitting.
\begin{thm}[Martino--Priddy \cite{M-P}]
\label{thm:thm4.1}
Let $G$ be a finite group with a $p$--Sylow subgroup $P$.
The complete stable splitting of $BG$ is given by
\[BG\sim \vee \rank A(Q,M)X_M\]
where indecomposable summands $X_M$ range over isomorphic classes of
simple \linebreak ${\mathbb F}_p[\Out(Q)]$--modules $M$ and over isomorphism classes of
subgroups
$Q\subset P$.
\end{thm}
\begin{rem}This theorem also holds for $p$--local finite groups
over $P$,
because all arguments for the proofs are done about the induced maps
from some fusion systems of $P$ on stable homotopy types of
related classifying spaces.
\end{rem}
For the definition of $\rank A(Q,M)$ see Martino and Priddy \cite{M-P}. In particular,
when $Q$ is not a subretract
(that is not a proper retract of a subgroup) of $P$
(see \cite[Definition 2]{M-P}) and when
$W_G(Q)\subset \Out(Q)\cong GL_n({\mathbb F}_p)$
(see \cite[Corollary 4.4 and the proof of Corollary 4.6]{M-P}),
the rank of $A(Q,M)$ is computed by
\[\rank A(Q,M)=\sum \dim_{{\mathbb F}_p}(\wbar W_G(Q_i) M),\]
where $\wbar W_G(Q_i)=\sum_{x\in W_G(Q_i)}x$ in ${\mathbb F}_p[GL_n(F_p)]$ and
$Q_i$ ranges
over representatives of $G$--conjugacy classes of
subgroups isomorphic to $Q$.
Recall that $\Out(E)\cong \Out(A)\cong GL_2({\mathbb F}_p)$. The simple
modules of $G=GL_2({\mathbb F}_p)$ are well known. Let us think of
$A$ as the natural two-dimensional representation, and $\det$ the
determinant
representation of $G$. Then there are $p(p-1)$ simple
${\mathbb F}_p[G]$--modules given by
$M_{q,k}=S(A)^q\otimes (\det)^k$ for $0\le q\le p-1,0\le k\le p-2$.
Harris and Kuhn \cite{H-K} determined the stable splitting
of abelian $p$--groups. In particular, they showed
\begin{thm}[Harris--Kuhn \cite{H-K}]
\label{thm:thm4.2}
Let $\tilde X_{q,k}=X_{M_{q,k}}$ (resp. $L(1,k)$)
identifying $M_{q,k}$ as an ${\mathbb F}_p[\Out(A)]$--module (resp. $M_{0,k}$ as
an
${\mathbb F}_p[\Out({\mathbb Z}/p)]$--module).
There is the complete stable splitting
\[BA\sim \vee_{q,k}(q+1)\tilde X_{q,k}\vee _{q\not =0}(q+1)L(1,q),
\]
where $0\le q\le p-1$, $0\le k\le p-2$.
\end{thm}
The summand $L(1,p-1)$ is usually written by $L(1,0)$.
It is also known $H^+(L(1,q))\cong {\mathbb Z}/p[y^{p-1}]\{y^q\}. $
Since we have the isomorphism
\[H^{2q}(BA)\cong ({\mathbb Z}/p)^{q+1} \cong H^{2q}((q+1)L(1,q)),
\text{ for }1\le q\le p-1,\]
we get $H^*(\tilde X_{q,k})\cong 0$ for $*\le 2(p-1)$.
\begin{lemma}\label{lem:lem4.3}
Let $H$ be a finite solvable group with $(p,|H|)=1$ and
$M$ be an ${\mathbb F}_p[H]$--module.
Then we have $\wbar H(M)=(\sum_{x\in H}x)M\cong M^H\cong H^0(H;M)$.
\end{lemma}
\begin{proof}
First assume $H={\mathbb Z}/s$ and $x\in {\mathbb Z}/s$ its generator. Then
\[\wbar H(M)=(1+x+\cdots+x^{s-1})H.\]
Since $(1-x^s)=0$, we see $\Ker(1-x)\supset \mathrm{Image}(\wbar H)$.
The facts that $M$ is a ${\mathbb Z}/p$--module and $(|H|,p)=1$ imply
$H^*(H;M)=0$
for $*>0$. Hence
\[\Ker(1-x)/\mathrm{Image}(1+\cdots+x^{s-1})\cong H^1(H;M)=0.\]
Thus we have $\wbar H(M)=\Ker(1-x)=M^H$.
Suppose that $H$ is a group such that
\[0\to H'\to H\stackrel{\pi}{\to} H''\to 0\]
and that $\wbar H'(M')=(M')^{H'}$ (resp. $\wbar H''(M'')=(M'')^{H''}$)
for each
${\mathbb Z}/p[H']$--module $M'$ (resp. ${\mathbb Z}/p[H'']$--module $M''$).
Let $\sigma$ be a (set theoretical) section of $\pi$ and denote
$\sigma(\wbar H'')=\sum_{x\in H''}\sigma(x)\in {\mathbb F}_p[H]$.
Then
\[\wbar H(M)=\sigma(\wbar H'') \wbar H'(M)=
\sigma(\wbar H'')(M^{H'})=\wbar H''(M^{H'})=(M^{H'})^{H''}=M^H\]
here the third equation follows from that we can identify
$M^{H'}$ as an ${\mathbb F}_p[H'']$--module.
Thus the lemma is proved.\end{proof}
It is known from a result of Suzuki \cite[Chapter 3 Theorem 6.17]{S}
that any subgroup of
$SL_2({\mathbb F}_{p^n})$, whose order is prime to $p$
is isomorphic to a subgroup of
${\mathbb Z}/s$, $4S_4$, $SL_2({\mathbb F}_3)$, $SL_2({\mathbb F}_5)$ or
\[Q_{4n}={\langle}x,y|x^n=y^2,y^{-1}xy=x^{-1}{\rangle}.\]
\begin{cor}\label{cor:cor4.4}
Let $H\subset GL_2({\mathbb F}_p)$ with $(|H|,p)=1$ and $H$ do not have a
subgroup
isomorphic to
$SL_2({\mathbb F}_3)$ nor $SL_2({\mathbb F}_5)$. Let $G=A{\co}H$ and let us
write
$BG\sim
\vee _{q,k}\tilde n(H)_{q,k}\tilde X_{q,k}\vee _{q'}\tilde
m(H)_{q'}L(1,q').$
Then
\begin{align*}
\tilde n(H)_{q,k} &=\rank_pH^0(H;M_{q,k}), \\
\tilde m(H)_{q'} &=\rank_pH^{2q'}(BG).
\end{align*}
In particular $\tilde n(H)_{q,0}=\rank_pH^{2q}(BG)$.
\end{cor}
\begin{proof}
Since $H^*(\tilde X_{q,k})\cong 0$ for $*\le 2(p-1)$,
it is immediate that $\tilde m(H)_{q'}=\rank_pH^{2q'}(G)$.
Since $ GL_2({\mathbb F}_p)\cong SL_2({\mathbb F}_p).{\mathbb F}_p^*$ and ${\mathbb F}_p^*\cong
{\mathbb Z}/(p-1)$,
each subgroup $H$
in the above satisfies the condition in \fullref{lem:lem4.3}.
The first equation is immediate from the lemma.
\end{proof}
Next consider the stable splitting for the extraspecial $p$--group $E$.
Dietz and Priddy prove the following theorem.
\begin{thm}[Dietz--Priddy \cite{D-P}]
\label{thm:thm4.5}
Let $X_{q,k}=X_{M_{q,k}}$ (resp. $L(2,k)$, $L(1,k)$)
identifying $M_{q,k}$ as an ${\mathbb F}_p[\Out(E)]$--module (resp. $M_{p-1,k}$
as an
${\mathbb F}_p[\Out(A)]$--module, ${\mathbb F}_p[\Out({\mathbb Z}/p)]$--module).
There is the complete stable splitting
\[BE\sim \vee_{q,k}(q+1)X_{q,k}\vee _{k}(p+1)L(2,k)\vee_{q\not
=0}(q+1)L(1,q)\vee
L(1,p-1)\]
where $0\le q\le p-1$, $0\le k\le p-2$.
\end{thm}
\begin{rem} Of course $\tilde X_{q,k}$ is different from $X_{q,k}$
but
$\tilde X_{p-1,k}=L(2,k)$.
\end{rem}
The number of $L(1,q)$ for $1\le q{<}p-1$ is given by the following.
Let us consider the decomposition $E/{\langle}c{\rangle}\cong \wbar
A_i\oplus \wbar
A_{-i}$
where $\wbar A_i={\langle}ab^i{\rangle}$ and $\wbar A_{-0}=\wbar
A_{\infty}$.
We consider the projection
$\pr_i \co E\to \wbar A_i.$
Let $x\in H^1(B\wbar A_i;{\mathbb Z}/p)=\Hom(\wbar A_i,{\mathbb Z}/p)$ be the dual of
$ab^i$.
Then
\[\pr_i^*x(a)=x(\pr_{i}(a))=x(\pr_i(ab^iab^{-i})^{1/2})=x((ab^i)^{1/2})=1
/2,\]
\[\pr_i^*x(b)=x(\pr_{i}(ab^i(ab^{-i})^{-1})^{1/(2i)})=1/(2i).\]
Hence for $\beta(x)=y$, we have $\pr_i^*(y)=1/2y_1+1/(2i)y_2$.
Therefore the $k+1$ elements
$(1/2y_1+1/(2i)y_2)^k,\
i=0,\ldots,k$ form a base of $H^{2k}(E/{\langle}c{\rangle};{\mathbb Z}/p)\cong
({\mathbb Z}/p)^{k+1}$
for $k{<}p-1$.
Thus we know the number of $L(1,k)$ is $k+1$ for $0{<}k{<}p-1$.
Recall that
\[H^{2q}(BE)\cong
\begin{cases}({\mathbb Z}/p)^{q+1}\cong H^{2q}((q+1)L(1,q))\text{ for }0\le
2\le p-2 \\
({\mathbb Z}/p)^{q+2}\cong H^{2p-2}((p+1)L(1,0))\text{ for }q=p-1.
\end{cases}\]
This shows $H^*(X_{q,k})\cong 0$ for $*\le 2p-2$ since so is
$L(2,k)$.
The number $n(G)_{q,k}$ of $X_{q,k}$ is only depend on $W_G(E)=H$.
Hence we have the following corollary.
\begin{cor}\label{cor:cor4.6}
Let $G$ have the $p$--Sylow subgroup $E$ and $W_G(E)=H$. Let
\[ BG \sim \vee n(G)_{q,k} X_{q,k} \vee m(G,2)_k L(2,k)\vee
m(G,1)_k L(1,k). \]
Then $n(G)_{q,k}=\tilde n(H)_{q,k}$ and $m(G,1)_k=\rank_pH^{2k}(G)$.
\end{cor}
Let $W_G(E)=H$. We also compute the dominant summand by the
cohomology $H^*(BE)^H\cong H^*(B(E{\co}H))$.
Let us write the ${\mathbb Z}/p$--module
\[X_{q,k}(H)= S(A)^q\otimes v^k \cap H^*(B(E{\co}H))\quad with\
\ S(A)^q={\mathbb Z}/p\{y_1^q,y_1^{q-1}y_2,\ldots,y_2^q\}.\]
Since the module ${\mathbb Z}/p\{v^k\}$ is isomorphic to the $H$--module
$\det^k$,
we have the following lemma.
\begin{lemma} \label{lem:lem4.7} The number $n_{q,k}(G)$ of $X_{q,k}$
in $BG$ is given by $\rank_p(X_{q,k}(W_G(E)))$.
\end{lemma}
Next problem is to seek $m(G,2)_k$.
The number $p+1$ for the summand $L(2,k)$ in $BE$ is given as follows.
For each $E$--conjugacy class of $A$--subgroup
$A_i={\langle}c,ab^i{\rangle},i\in
{\mathbb F}_p\cup
\infty$, we see
\[W_E(A_i)=N_E(A_i)/A_i=E/A_i\cong {\mathbb Z}/p\{b\}\quad
b^*{\co}ab^i\mapsto ab^ic.\]
Let $u=\left(\begin{smallmatrix}1&1 \\ 0&1 \end{smallmatrix}\right)$ in
$GL_2({\mathbb F}_p)$ and $U={\langle}u{\rangle}$ the maximal unipotent
subgroup.
Then we can identify $W_E(A_i)\cong U$ by $b\mapsto u$.
For $y_1^sy_2^l \in
M_{q,k}$ (identifying $H^*(BA)\cong S^*(A)={\mathbb Z}/p[y_1,y_2]$), we
can compute
\begin{eqnarray*}
\wbar{W}_E(A)y_1^sy_2^l = &(1+u+\cdots+u^{p-1})y_1^sy_2^l
& = \sum_{i=0}^{p-1}(y_1+iy_2)^sy_2^l \\
= &\sum_i\sum_t \tbinom{s}{t}
i^ty_1^{s-t}y_2^ty_2^l & = \sum_t
\tbinom{s}{t}
\sum_i i^t
y_1^{s-t}y_2^{t+l} .
\end{eqnarray*}
Here $\sum_{i=0}^{p-1}i^t=0$ for $1\le t\le p-2$, and $=-1$ for
$t=p-1$.
Hence we know
\[\dim_p \wbar{W}_G(A_i)M_{q,k}=
\begin{cases}0 & \text{ for }1\le q \le p-2 \\
1 & \text{ for }q=p-1.
\end{cases}
\]
Thus we know that $BE$ has just one $L(2,k)$
for each $E$--conjugacy $A$--subgroup $A_i$.
\begin{lemma} \label{lem:lem4.8}
Let $A$ be an $F^{\mathrm{ec}}$--radical subgroup, ie $W_G(A)\supset
SL_2({\mathbb F}_p)$.
Then $\wbar W_G(A)(M_{q,k})=0$ for all $k$ and $1\le q \le p-1$.
\end{lemma}
\begin{proof}
The group $SL_2({\mathbb F}_p)$ is generated by $u=\left(\begin{smallmatrix}
1&1\\ 0&1 \end{smallmatrix}\right)$ and $u'=\left(\begin{smallmatrix}
1&0\\ 1&1 \end{smallmatrix}\right)$. We know
$\Ker(1-u)\cong {\mathbb Z}/p[y_1^p-y_2^{p-1}y_1,y_2]$ and
$\Ker(1-u')\cong {\mathbb Z}/p[y_2^p-y_1^{p-1}y_2,y_1]$.
Hence we get
$(\Ker(1-u)\cap \Ker(1-u'))^*\cong 0$ for $0{<}*\le p-1$.
\end{proof}
\begin{prop} \label{prop:prop4.9}
Let $G$ have the $p$--Sylow subgroup $E$. The number of $L(2,0)$ in
$BG$
is given by
\[m(G,2)_0= \sharp_G(A)-\sharp_G(F^{\mathrm{ec}}A)\]
where $\sharp_G(A)$(resp.$\sharp_G(F^{\mathrm{ec}}A)$) is
the number of $G$--conjugacy classes
of $A$--subgroups (resp. $F^{\mathrm{ec}}$--radical subgroups).
\end{prop}
\begin{proof}
Let us write $K=E{\co}W_G(E)$ and $H^*(BE)^{W_G(E)}=H^*(BK)$.
From \fullref{thm:thm3.1}, we have
\begin{equation} \label{eqn:star}
\quad H^*(BG)\cong H^*(BK)\cap
_{A{\co}F^{\mathrm{ec}}-\operatorname{radical}}i_A^{*-1}H^*(BA)
^{W_G(A)}.
\end{equation}
Let $A$ be an $A$--subgroup of $K$ and $x\in W_{K}(A)$.
Recall $A={\langle}c,ab^i{\rangle}$ for some $i$.
Identifying $x$ as an element of $N_G(A)\subset E{\co}\Out(E)$
We see $x{\langle}c{\rangle}={\langle}c{\rangle}$ from \eqref{eqn:eqn3.4}
and since
${\langle}c{\rangle}$ is the center of $E$.
Hence
\[W_{K}(A)\subset B=U{\co}({\mathbb F}_p^*)^2 \text{ the Borel subgroup}.\]
So we easily see that $\wbar W_{K}(y_1^{p-1})=\lambda y_2^{p-1}$
for some $\lambda\not =0$ follows from $b^*y_i^{p-1}=y_i^{p-1}$ for
$b= \text{diagonal} \in ({\mathbb F}_p)^{*2}$ and the arguments just before
\fullref{lem:lem4.8}.
We also see $\wbar W_K(y_1^{p-1-i}y_2^i)=0$
for $i{>}0$.
Hence we have $m(K,2)_0=\sharp _K(A)$.
From the isomorphism \eqref{eqn:star}, we have
$m(G,2)_0=\sharp_K(A)-\sharp_G(F^{\mathrm{ec}}A)$.
On the other hand $m(G,2)_0\le \sharp_G(A)-\sharp_G(F^{\mathrm{ec}}A)$
from the above lemma.
Since $\sharp_K(A)\ge \sharp_G(A)$, we see that
$\sharp_K(A)=\sharp_G(A)$
and get the proposition.
\end{proof}
\begin{cor} \label{cor:cor4.10}
Let $G$ have the $p$--Sylow subgroup $E$. The number of $L(1,0)$ in
$BG$
is given by
\[m(G,1)_{p-1}=\rank_pH^{2(p-1)}(G)= \sharp_G(A)-\sharp_G(F^{\mathrm{ec}}A).\]
\end{cor}
\begin{proof}
Since $L(1,0)=L(1,p-1)$ is linked to $L(2,0)$, we know
$m(G,1)_{p-1}=m(G,2)_0$.
\end{proof}
\begin{lemma} \label{lem:lem4.11}
Let $\xi\in {\mathbb F}_p^*$ be a primitive $(p-1)$th root of $1$
and $G\supset E{\colon}{\langle}\diag(\xi,\xi){\rangle}$. If
$\xi^{3k}\not =1$,
then $BG$ does not contain the summand $L(2,k)$, ie $m(G,2)_k=0$.
\end{lemma}
\begin{proof}
It is sufficient to prove the case
$G=E{\colon}{\langle}\diag(\xi,\xi){\rangle}$.
Let $G=E{\colon}{\langle}\diag(\xi,\xi){\rangle}$. Recall
$A_i={\langle}c,ab^i{\rangle}$ and
\[ \diag(\xi,\xi)\co ab^i \mapsto (ab^i)^{\xi},\qua c\mapsto
c^{\xi^2}.\]
So the Weyl group is
$W_G(A_i)=U{\co}{\langle}\diag(\xi^2,\xi){\rangle}$.
For $v=\lambda y_1^{p-1}+\cdots \in M_{q,k}$, we have
\[\wbar W_G(A_i)v=\sum_{i=0}^{p-2}(\xi^{3i})^k
\diag(\xi^{2i},\xi^{i})(1+\cdots+u^{p-1})v=\sum_{i=0}^{p-2}\xi^{3ik}
\lambda y_2^{p-1}.\]
Thus we get the lemma from
$\sum_{i=0}^{p-2}\xi^{3ik}=0$ for $3k\not =0 \mod(p-1)$ and $=-1$
otherwise.
\end{proof}
\section[Cohomology and splitting of B(Z/3)2]
{Cohomology and splitting of $B({\mathbb Z}/3)^2$}
\label{sec:sec5}
In this section, we study the cohomology and stable splitting
of $BG$ for $G$ having a $3$--Sylow subgroup $({\mathbb Z}/3)^2=A$.
In this and next sections, $p$ always means $3$.
Recall $\Out(A)\cong GL_2({\mathbb F}_3)$ and $\Out(A)'$ consists the
semidihedral group
\[SD_{16}={\langle}x,y|x^8=y^2=1,yxy^{-1}=x^3{\rangle}.\]
Every $3$--local finite group $G$ over $A$ is of type
$A{\co}W,\ W\subset SD_{16}$. There is the $SD_{16}$--conjugacy
classes of
subgroups(here $B\longleftarrow C$ means $B\supset C$)
\[SD_{16} \begin{cases}
\longleftarrow Q_8\longleftarrow {\mathbb Z}/4\\
\longleftarrow {\mathbb Z}/8 \longleftarrow {\mathbb Z}/4 \longleftarrow
{\mathbb Z}/2 \longleftarrow 0\\
\longleftarrow D_8 \longleftarrow {\mathbb Z}/2\oplus {\mathbb Z}/2 \longleftarrow
{\mathbb Z}/2
\end{cases}
\]
We can take generators of subgroups in $GL_2({\mathbb F}_3)$
by the matrices
\begin{align*}
{\mathbb Z}/8={\langle}l{\rangle}, Q_8={\langle}w,k{\rangle},
D_8={\langle}w',k{\rangle}, {\mathbb Z}/4={\langle}w{\rangle}, \\
{\mathbb Z}/4={\langle}k{\rangle}, {\mathbb Z}/2\oplus
{\mathbb Z}/2={\langle}w',m{\rangle},
{\mathbb Z}/2={\langle}m{\rangle}, {\mathbb Z}/2={\langle}w'{\rangle},
\end{align*}
where $l=\left(\begin{smallmatrix} 0&1\\ 1&-1\end{smallmatrix}\right)$,
$w=\left(\begin{smallmatrix} 0&1\\ -1&0 \end{smallmatrix}\right)$,
$k=l^2=\left(\begin{smallmatrix} 1&-1\\ -1&-1\end{smallmatrix}\right)$,
$w'=wl=\left(\begin{smallmatrix} 1&-1\\ 0&-1\end{smallmatrix}\right)$ and
$m=w^2=k^2=\left(\begin{smallmatrix} -1&0\\ 0&-1\end{smallmatrix}\right)$.
Here we note that $k$ and $w$ are $GL_2({\mathbb F}_3)$--conjugate,
in fact $uku^{-1}=w$. Hence we note that
\[H^*(B(A{\co}{\langle}k{\rangle}))\cong
H^*(B(A{\co}{\langle}w{\rangle})).\]
The cohomology of $A$ is given $H^*(BA)\cong {\mathbb Z}/3[y_1,y_2]$,
and the following
are immediately
\[H^*(BA)^{{\langle}m{\rangle}}\cong {\mathbb Z}/3[y_1^2,y_2^2]\{1,y_1y_2\}
\qua
H^*(BA)^{{\langle}w'{\rangle}}
\cong {\mathbb Z}/3[y_1+y_2,y_2^2].
\]
Let us write $Y_i=y_i^2$ and $t=y_1y_2$. The $k$--action is given
$Y_1\mapsto Y_1+Y_2+t$, $Y_2\mapsto Y_1+Y_2-t,$ $t\mapsto -Y_1+Y_2.$
So the following are invariant
\[a=-Y_1+Y_2+t,\ a_1=Y_1(Y_1+Y_2+t),\ a_2=Y_2(Y_1+Y_2-t),\
b=t(Y_1-Y_2).\]
Here we note that $a^2=a_1+a_2$ and $b^2=a_1a_2.$
We can prove the invariant ring is
\[H^*(BA)^{{\langle}k{\rangle}}\cong {\mathbb Z}/3[a_1,a_2]\{1,a,b,ab\}.\]
Next consider the invariant under $Q_8={\langle}w,k{\rangle}$. The
action for $w$ is
$a\mapsto -a,\ a_1\leftrightarrow a_2,\ b\mapsto b$. Hence we get
\[H^*(BA)^{Q_8}\cong {\mathbb Z}/3[a_1+a_2,a_1a_2]\{1,b\}\{1,(a_1-a_2)a\}.\]
Let us write $S={\mathbb Z}/3[a_1+a_2,a_1a_2]$ and $a'=(a_1-a_2)a$.
The action for $l$ is given
$l \co Y_1\mapsto Y_2\mapsto Y_1+Y_2+t \mapsto Y_1+Y_2-t \mapsto
Y_1.$
Hence $l \co a\mapsto -a,\ a_1\leftrightarrow a_2,\ b\mapsto -b$.
Therefore
we get
$H^*(BA)^{{\langle}l{\rangle}}\cong S\{1,a',ab,(a_1-a_2)b\}.$
The action for $w' \co Y_1\mapsto Y_1+Y_2+t,\ Y_2\mapsto Y_2$,
implies that
$w' \co a\mapsto a,\ a_i\mapsto a_i,\ b\mapsto -b$. Then we can
see
\[H^*(BA)^{D_8}=H^*(BA)^{{\langle}k,w'{\rangle}}\cong
{\mathbb Z}/3[a_1,a_2]\{1,a\}
\cong S\{1,a,a_1,a'\}.\]
We also have
\[H^*(BA)^{SD_{16}}\cong H^*(BA)^{Q_8}\cap H^*(BA)^{{\mathbb Z}/8}
\cong S\{1,a'\}.\]
Recall the Dickson algebra $DA={\mathbb Z}/3[\tilde D_1,\tilde D_2]\cong
H^*(BA)^{GL_2({\mathbb F}_3)}$ where
$\tilde D_1=Y_1^3+Y_1^2Y_2+Y_1Y_2^2+Y_2^3=(a_2-a_1)a=a'$ and
$\tilde D_2=(y_1^3y_2-y_1y_2^3)^2=a_1a_2$.
Using $a^2=(a_1+a_2)$ and $\tilde D_1^2=a^6-a_1a_2a^2$, we can write
\[H^*(BA)^{SD_{16}}\cong {\mathbb Z}/3[a^2,\tilde D_2]\{1,\tilde D_1\}
\cong DA\{1,a^2,a^4\}.\]
\begin{thm} \label{thm:thm5.1} Let $G=({\mathbb Z}/3)^2{\co}H$ for $H\subset SD_{16}$.
Then $BG$ has the stable splitting given by
\begin{footnotesize}
\[\stackrel{\tilde X_{0,0}}{\gets} SD_{16} \begin{cases}
\stackrel{\tilde X_{0,1}}{\longleftarrow}
Q_8\\
\quad \\
\stackrel{\tilde X_{2,1}}{\longleftarrow} {\mathbb Z}/8
\stackrel{\tilde X_{2,0}\vee \tilde X_{0,1}\vee L(1,0)}
{\longleftarrow}
{\mathbb Z}/4 \stackrel{2\tilde X_{2,0}\vee 2\tilde X_{2,1}
\vee 2L(1,0)}{\longleftarrow}
{\mathbb Z}/2 \stackrel{2\tilde X_{1,0}\vee 2\tilde X_{1,1}
\vee 2L(1,1)}{\longleftarrow} 0\\ \quad \\
\stackrel{\tilde X_{2,0}\vee L(1,0)}{\longleftarrow} D_8
\stackrel{\tilde X_{2,0}\vee \tilde X_{2,1}\vee L(1,0)}{
\longleftarrow}
{\mathbb Z}/2\oplus {\mathbb Z}/2 \stackrel{\tilde X_{1,0}
\vee \tilde X_{1,1}\vee L(1,1)}{\longleftarrow}
{\mathbb Z}/2
\end{cases}
\]
\end{footnotesize}
where
$\stackrel{\tilde X_1}{\gets} \cdots \stackrel{\tilde X_s}{\gets}H$
means
$B(({\mathbb Z}/3)^2{\colon}H)\sim \tilde X_1\vee \cdots \vee \tilde X_s$.
\end{thm}
For example
\[B(E{\co}SD_{16})\sim \tilde X_{0,0}, \qua B(E{\co}Q_8)\sim
\tilde X_{0,0}\vee
\tilde X_{0,1}, \qua B(E{\co}{\mathbb Z}/8)\sim \tilde X_{0,0}\vee \tilde
X_{2,1}.\]
Main parts of the above splittings are given by the author in
\cite[(6)]{Y2} by direct computations of $\wbar W_G(A)$ (see
\cite[page 149]{Y2}). However we get the theorem more easily
by using cohomology here. For example, let us consider the case
$G=A{\co}{\langle}k{\rangle}$. The cohomology
\[H^0(BG)\cong {\mathbb Z}/3, \qua H^2(BG)\cong 0,H^4(BG)\cong {\mathbb Z}/3\]
implies that $BG$ contains just one $\tilde X_{0,0},\tilde
X_{2,0},L(1,0)$ but
does not $\tilde X_{1,0},L(1,1)$. Since $\det(k)=1$, we also know that
$\tilde X_{0,1},\tilde X_{2,1}$ are contained. So we can see
\[B(A{\colon}{\mathbb Z}/4)\sim \tilde X_{0,0}\vee \tilde X_{0,1}\vee \tilde
X_{2,0}
\vee \tilde X_{2,1} \vee L(1,0).\]
Next consider the case $G'=A \colon {\langle}l{\rangle}$. The fact
$H^4(G)\cong 0$
implies that $BG'$ does not contain $\tilde X_{2,0},L(1,0)$. The
determinant $\det(l)=-1$,
and $l \co a\mapsto -a$ shows that $BG'$ contains $\tilde X_{2,1}$
but does not contain
$\tilde X_{0,1}$. Hence we know $BG'\sim \tilde X_{0,0} \vee \tilde
X_{2,1}$.
Moreover we know $BA \colon SD_{16}\sim \tilde X_{0,0}$ since
$w \co a\to -a$ but
$\det(w)=1$. Thus we have the graph
\[ \stackrel{\tilde X_{0,0}}{\gets} SD_{16}
\stackrel{\tilde X_{2,1}}{\longleftarrow} {\mathbb Z}/8
\stackrel{\tilde X_{2,0}\vee \tilde X_{0,1}\vee L(1,0)}
{\longleftarrow}
{\mathbb Z}/4 .\]
Similarly we get the other parts of the above graph.
\begin{cor} \label{cor:cor5.2}
Let $S={\mathbb Z}/3[a_1+a_2,a_1a_2]$. Then we have the isomorphisms
\begin{align*}
H^*(\tilde X_{0,0})& \cong S\{1,\tilde D_1\} \\
H^*(\tilde X_{0,1})& \cong S\{b,\tilde D_1b\} \\
H^*(\tilde X_{2,1})& \cong S\{ab,(a_1-a_2)b\} \\
H^*(\tilde X_{2,0}\vee L(1,0))& \cong S\{a,a_1-a_2\}\cong
DA\{a,a^2,a^3\}.
\end{align*}
\end{cor}
Here we write down the decomposition of cohomology for a typical case
\begin{align*}
H^*(BA)^{{\langle}k{\rangle}} & \cong S\{1,a_1-a_2\}\{1,a\}\{1,b\} \\
& \cong S\{1,a(a_1-a_2), b,ba(a_1-a_2), ab,(a_1-a_2)b, a,(a_1-a_2)\} \\
& \cong H^*(\tilde X_{0,0})\oplus H^*(\tilde X_{0,1})
\oplus H^*(\tilde X_{2,1})\oplus H^*(\tilde X_{2,0}\vee L(1,0)).
\end{align*}
\section[Cohomology and splitting of B3(1+2)]
{Cohomology and splitting of $B3_+^{1+2}$.}
\label{sec:sec6}
In this section we study the cohomology and stable splitting of $BG$
for
$G$ having a $3$--Sylow subgroup $E=3_+^{1+2}$.
In the splitting for $BE$, the summands $X_{q,k}$ are called
dominant summands. Moreover the summands $L(2,0)\vee L(1,0)$ is
usually written by $M(2)$.
\begin{lemma} \label{lem:lem6.1}
If $G\supset E{\co}{\langle}\diag(-1,-1){\rangle}$ identifying
$\Out(E)\cong
GL_2({\mathbb F}_3)$ and
$G$ has $E$ as a $3$--Sylow subgroup, then
\[BG\sim (\text{dominant summands})\vee (\sharp_G(A)-\sharp_G(F^{\mathrm{ec}}A)
(M(2)).\]
\end{lemma}
\begin{proof}
From \fullref{lem:lem4.11},
we know $m(G,2)_1=0$ ie $L(2,1)$ is not
contained.
The summand $L(1,1)$ is also not contained, since
$H^2(BE)^{{\langle}\diag(-1,-1){\rangle}}\cong 0$.
The lemma is almost immediately from \fullref{prop:prop4.9}
and \fullref{cor:cor4.10}.
\end{proof}
\begin{thm} \label{thm:thm6.2}
If $G$ has a $3$--Sylow subgroup $E$, then $BG$ is homotopic to the
classifying space
of one of the following groups.
Moreover the stable splitting is given by the graph so that
$\stackrel{X_1}{\gets}\cdots\stackrel{X_s}{\gets}G$
means $BG\sim X_1\vee\cdots\vee X_i$ and $EH=E \colon H$ for $H\subset
SD_{16}$
\begin{footnotesize}
\[\stackrel{X_{0,0}}{\gets}
J_4 \begin{cases}\stackrel{M(2)}{\gets}
ESD_{16} \begin{cases}
\stackrel{X_{0,1}}{\gets}
EQ_8\\
\quad \\
\stackrel{X_{2,1}}{\gets} E{\mathbb Z}/8
\stackrel{\stackrel{X_{2,0}\vee X_{0,1}}\vee M(2)} {\gets}
E{\mathbb Z}/4 \stackrel{\stackrel{2X_{2,0}\vee 2X_{2,1}}{\vee
2M(2)}}{\gets}
E{\mathbb Z}/2 \stackrel{\stackrel{2X_{1,0}\vee 2X_{1,1}\vee}{
4L(2,1)\vee 2L(1,1)}}{\gets}
E\\
\stackrel{X_{2,0}\vee M(2)}{\gets} ED_8
\ \stackrel{X_{2,0}\vee X_{2,1}\vee M(2)}{
\gets}
\ E({\mathbb Z}/2)^2
\stackrel{\stackrel{X_{1,0}\vee X_{1,1}\vee}{2L(2,1)\vee L(1,1)}}
{\gets}
E{\mathbb Z}/2
\end{cases}\\
\quad \\
\stackrel{X_{2,0}}{\gets}\ ^2F_4(2)'
\stackrel{M(2)}{\gets} M_{24}
\stackrel{X_{2,0}\vee X_{2,1}}{\gets} M_{12}
\stackrel{M(2)}{\gets} {\mathbb F}_3^2{\co}GL_2({\mathbb F}_3)
\stackrel{\stackrel{X_{1,0}\vee X_{1,1}\vee}{L(2,1)\vee L(1,1)}}
{\gets} {\mathbb F}_3^2{\co}SL_2({\mathbb F}_3)
\end{cases}\]
\end{footnotesize}
\end{thm}
\begin{proof}
All groups except for $E$,$E{\co}{\langle}w'{\rangle}$ and
${\mathbb F}_3^2{\co}SL_2({\mathbb F}_3)$
contain
$E{\co}{\langle}\diag(-1,-1){\rangle}$. Hence we get the theorem
from \fullref{cor:cor4.4}, \fullref{thm:thm5.1} and \fullref{lem:lem6.1},
except for the place for
$H^*(BE{\co}{\langle}w'{\rangle})$
and $H^*({\mathbb F}_3^2{\co}SL_2({\mathbb F}_3))$.
Let $G=E{\co}{\langle}w'{\rangle}$. Note $w'{\co}y_1 \mapsto
y_1-y_2,
y_2\mapsto -y_2,
v\mapsto -v$. Hence $H^2(G)\cong {\mathbb Z}/3\{y_1+y_2\}$. So $BG$ contains
one
$L(1,1)$. Next consider the number of $L(2,0)$, $L(2,1)$.
The $G$--conjugacy classes of $A$--subgroups are $A_0,A_2,A_1\sim
A_{\infty}$. The Weyl groups are
\[W_G(A_{\infty})\cong U,\qua W_G(A_2)\cong
U{\colon}{\langle}\diag(-1,-1){\rangle},
\qua W_G(A_{0})\cong U{\colon}{\langle}\diag(-1,1){\rangle},\]
eg $N_G(A_0)/A_0$ is generated by
$b,w'$ which is represented by
$u,\diag(-1,1)$ respectively.
By the arguments similar to the proof of \fullref{lem:lem4.11},
we have that
\[\begin{cases} \dim(\wbar W_G(A_i)M_{2,0})=1\text{ for all }i\\
\dim(\wbar W_G(A_i)M_{2,1})=1,1,0\text{ for }i=\infty,2,0
\text{ respectively}.
\end{cases}\]
Thus we show $BG\supset 3L(2,0)\vee 2L(2,1)$
and we get the graph for $G=E{\colon}{\langle}w'{\rangle}$.
For the place $G={\mathbb F}_3^2{\co}SL_2({\mathbb F}_3)$, we see
$W_G(A_{\infty})\cong SL_2({\mathbb F}_3)$. We also have
\[\begin{cases} \dim(\wbar W_G(A_i)M_{2,0})=0,1,1
\text{ for }i=\infty,2,0
\text{ respectively} \\
\dim(\wbar W_G(A_i)M_{2,1})=0,1,0\text{ for } i=\infty,2,0
\text{ respectively}.
\end{cases}\]
Thus we can see the graph for the place
$H^*({\mathbb F}_3^2{\co}SL_2({\mathbb F}_3))$.
\end{proof}
\begin{rem} From Tezuka--Yagita \cite{T-Y}, Yagita \cite{Y1} and
\fullref{thm:thm2.1}, we have the following
homotopy equivalences (localized at $3$).
\[BJ_4\cong BRu,\quad BM_{24}\cong BHe,
\qua BM_{12}\cong BGL_3({\mathbb F}_3)\]
\[ B(E{\co}SD_{16})\cong BG_2(2)\cong BG_2(4),
\qua B(E{\co}D_8)\cong BHJ\cong BU_3(3).\]
\end{rem}
We write down the cohomologies explicitly (see also Tezuka--Yagita
\cite{T-Y} and Yagita \cite{Y2}). First we compute $H^*(B(E{\co}H))$.
The following cohomologies are easily computed
\begin{align*}
H^*(BE)^{{\langle}m{\rangle}} &\cong
{\mathbb Z}/3[C,v]\{1,y_1y_2,Y_1,Y_2\},\qua
H^*(BE)^{{\langle}w{\rangle}}\cong {\mathbb Z}/3[C,v]\{1,Y_1+Y_2\}.\\
H^*(BE)^{{\langle}k{\rangle}} & \cong {\mathbb Z}/3[C,v]\{1,a\}
\text{ where }a=-Y_1+Y_2+y_1y_2,\ C^2=a^2.
\end{align*}
Recall that $V=v^{p-1}$ and $C$ multiplicatively generate
$H^*(BE)^{\Out(E)}$.
Let us write
\[CA={\mathbb Z}/p[C,V]\cong H^*(BE)^{\Out(E)}.\]
Then we have
\begin{align*}
H^*(BE)^{{\langle}w'{\rangle}} & \cong
CA\{1,y_1',Y_1',Y_2,Y_2y_1',y_2v,y_1'y_2v,Y_1'y_2v\}
\text{ with }y_1'=y_1+y_2 \\
H^*(BE)^{{\langle}w',m{\rangle}} & \cong CA\{1,a,a',Y_2\}\text{ where }
a'=(t+Y_2)v=y_1'y_2v.
\end{align*}
We can compute
\begin{align*}
H^*(BE)^{Q_8} & \cong H^*(BE)^{{\langle}k{\rangle}}\cap
H^*(BE)^{{\langle}w{\rangle}}
\cong {\mathbb Z}/3[C,v]\cong CA\{1,v\},\\
H^*(BE)^{D_8}& \cong CA\{1,a\},\qua
H^*(BE)^{{\langle}l{\rangle}}\cong CA\{1,av\}.
\end{align*}
Hence we have $H^*(BE)^{SD_{16}}\cong CA.$
Let $D_1=C^p+V$ and $D_2=CV$. Then it is known that
\[D_1|A_i=\tilde D_1, \ D_2|A_i=\tilde D_2\text{ for all }
i\in {\mathbb F}_p\cup
\infty.\]
So we also write $DA\cong{\mathbb Z}/p[D_1,D_2]$. Since $CD_1-D_2=C^{p+1}$,
we can write
$CA\cong DA\{1,C,C^2,\ldots,C^p\}.$
Now return to the case $p=3$ and we get (see \cite{T-Y})
\[H^*(BJ_4)\cong H^*(BE)^{SD_{16}}\cap
i_0^{*-1}H^*(BA_0)^{GL_2({\mathbb F}_3)}
\cong DA.\]
\begin{prop} \label{prop:prop6.3}
There are isomorphisms for $|a''|=4$,
\[H^*(^2F_4(2)')\cong DA\{1,(D_1-C^3)a''\},\qua
H^*(M_{24})\cong DA\oplus CA\{a''\}.\]
\end{prop}
\begin{proof}
Let $G=M_{24}$. Then $G$ has just two $G$--conjugacy classes of
$A$--subgroups
\[\{A_0, A_2\}, \qua \{A_1, A_{\infty}\}.\]
It is known that one is $F^{\mathrm{ec}}$--radical and the other is not.
Suppose that $A_0$ is $F^{\mathrm{ec}}$--radical. Then $W_G(A_0)\cong
GL_2({\mathbb F}_3)$.
Let $a''=a+C$. Then
\[a''|A_0=(-Y_1+Y_2+y_1y_2+C)|A_0=0,\qua a''|A_{\infty}=-Y.\]
By \fullref{thm:thm3.1}
\[H^*(BM_{24})\cong H^*(BE)^{D_8}\cap
i_{A_{0}}^{*-1}H^*(BA_0)^{W_G(A_0)},\]
we get the isomorphism for $M_{24}$.
When $A_{\infty}$ is a $F^{\mathrm{ec}}$--radical, we take $a''=a-c$. Then we
get the same result.
For $G=^2F_4(2)'$, the both conjugacy classes are $F^{\mathrm{ec}}$--subgroups
and $W_G(A_{\infty})\cong GL_2({\mathbb F}_3)$. Hence (for case $a''=a+C$)
\[H^*(B^2F_4(2)')\cong H^*(BM_{24})
\cap i_{A_{\infty}}^{*-1}H^*(BA_{\infty})^{GL_2({\mathbb F}_3)}.\]
We know
\[(D_1-C^3)a''|A_0=0, \qua (D_1-C^3)a''|A_{\infty}=-VY=-\tilde D_2.
\]
Thus we get the cohomology of $^2F_4(2)'$.
\end{proof}
\begin{rem} In \cite{T-Y,Y2}, we take
\[({\mathbb Z}/2)^2={\langle}\diag(\pm 1,\pm 1){\rangle},\quad
D_8={\langle}\diag(\pm 1,\pm 1),
w{\rangle}. \]
For this case, the $M_{24}$--conjugacy classes
of $A$--subgroups are $A_0\sim A_{\infty},\ A_1\sim A_2$ , and we can
take
$a''=C-Y_1-Y_2$. The expressions of
$H^*(M_{12})$,
$H^*(A{\colon}GL_2({\mathbb F}_3))$
become more simple (see \cite{T-Y,Y2}), in
fact,
\[H^*(B^2F_4(2)')\cong DA\{1,(Y_1+Y_2)V\}.\]
\end{rem}
\begin{rem} \cite[Corollary 6.3]{T-Y}
and \cite[Corollary 3.7]{Y2} were not correct. This followed from
an error in \cite[Theorem 6.1]{T-Y}. This theorem is only correct with
adding the assumption that
there are exactly two $G$ conjugacy classes of $A$--subgroups such that
one is $p$--pure and the other is not. This assumption is always
satisfied for
sporadic simple groups but not for $ ^2F_4(2)'$.
\end{rem}
\begin{cor} \label{cor:cor6.4}
There are isomorphisms of cohomologies
\begin{align*}
H^*(X_{2,0})& \cong DA\{D_2\},\qua
H^*(X_{2,1})\cong CA\{av\}
\text{ where }
(av)^2=CD_2\\
H^*(X_{0,1})& \cong CA\{v\}, \qua
H^*(M(2))\cong DA\{C,C^2,C^3\}\text{ where }C^4=CD_1-D_2.
\end{align*}
\end{cor}
Here we write down typical examples. First recall
\begin{align*}
CA &\cong DA\{1,C,C^2,C^3\}\cong H^*(X_{0,0})\oplus H^*(M(2)) \\
CA\{C\} & \cong DA\{C,C^2,C^3,\ D_2\}\cong H^*(M(2))\oplus
H^*(X_{2,0}).
\end{align*}
Thus the decomposition for $H^*(BE)^{D_8}$ gives the isomorphisms
\[ CA\{1,a''\}\cong CA\{1,C\}\cong
H^*(X_{0,0})\oplus H^*(M(2))\oplus H^*(X_{2,0})\oplus H^*(M(2)).\]
Similarly the decomposition for $H^*(BE)^{{\langle}k{\rangle}}$ gives
the isomorphism
\[ CA\{1,a, v, av\}\cong H^*(BE)^{D_8}\oplus H^*(X_{0,1})
\oplus H^*(X_{2,1}).\]
We recall here \fullref{lem:lem4.7} and the module
\[X_{q,k}({\langle}k{\rangle})=S(V)^q\otimes v^k \cap
H^*(B(E{\co}{\langle}k{\rangle}).\]
Then it is easily seen that
\[X_{0,0}({\langle}k{\rangle})=\{1\},
X_{2,0}({\langle}k{\rangle})=\{a\}, X_{0,1}
({\langle}k{\rangle})=\{v\},
X_{2,1}({\langle}k{\rangle})=\{av\}. \]
Hence we also see $B(E{\co}{\langle}k{\rangle})$ has the dominant
summands
$X_{0,0}\vee X_{2,0}\vee X_{0,1}\vee X_{2,1}.$
Moreover it has non dominant summands $2M(2)$ since
$H^{4}(B(E{\co}{\langle}k{\rangle}))
\cong {\mathbb Z}/3\{C,a\} $.
Thus we can give an another proof of \fullref{thm:thm6.2}
from \fullref{lem:lem4.7} and the cohomologies $H^*(BG)$.
\section[Cohomology for B7(1+2) I]
{Cohomology for $B7_+^{1+2} $ I.}
\label{sec:sec7}
In this section, we assume $p=7$ and $E=7_+^{1+2}$.
We are interested in groups
$O'N,O'N \colon 2,He,He{\co}2,Fi_{24}',Fi_{24}$
and three exotic $7$--local groups. Denote them by $RV_1,RV_2,RV_3$
according the numbering in \cite{R-V}.
We have the diagram from Ruiz and Viruel
\begin{footnotesize}
\[\begin{cases}\stackrel{3SD_{32}}{\longleftarrow}
\ \stackrel{SL_2({\mathbb F}_7){\colon}2}{RV_3}
\ \stackrel{3SD_{16}}{\longleftarrow}
\ \stackrel{SL_2({\mathbb F}_7){\colon}2,SL_2({\mathbb F}_7){\colon}2}{RV_2}
\ \stackrel{3SD_{16}}{\longleftarrow}
\ \stackrel{SL_2({\mathbb F}_7){\colon}2}{O'N{\colon}2}
\ \stackrel{3D_{8}}{\longleftarrow}
\ \stackrel{SL_2({\mathbb F}_7){\colon}2,Sl_2({\mathbb F}_7){\colon}2}{O'N}
\\
\quad \\
\stackrel{6^2{\colon}2}{\longleftarrow}
\stackrel{SL_2({\mathbb F}_7){\colon}2, GL_2({\mathbb F}_7)}{RV_1}
\stackrel{6^2{\colon}2}{\longleftarrow}
\stackrel{SL_2({\mathbb F}_7){\colon}2}{Fi_{24}}
\stackrel{6S_3}{\longleftarrow}
\stackrel{SL_2({\mathbb F}_7){\colon}2,SL_2({\mathbb F}_7){\colon}2}{Fi_{24}'}
\stackrel{6S_3}{\longleftarrow}
\stackrel{SL_2({\mathbb F}_7){\colon}2}{He{\colon}2}
\stackrel{3S_3}{\longleftarrow}
\stackrel{SL_2({\mathbb F}_7)}{He}
\end{cases} \]
\end{footnotesize}
Here $ \stackrel{H}{\longleftarrow}
\stackrel{W_1,\ldots,W_2}{G}$
means $W_G(E)\cong H, W_i=W_G(A_i)$ for $G$--conjugacy classes of
$F^{\mathrm{ec}}A-$
subgroups $A_i$.
In this section, we study the cohomology of $O'N,RV_2,RV_3$.
First we study the cohomology of $G=O'N$.
The multiplicative generators of
$H^*(BE)^{3D_8}$ are still studied in \cite[Lemma 7.10]{T-Y}.
We will study more detailed cohomology structures here.
\begin{lemma} \label{lem:lem7.1} There is the $CA$--module isomorphism
$$
H^*(BE)^{3D_8} \cong CA\{1,a,a^2,a^3/V,a^4/V,a^5/V,
b,ab/V,a^2b/V, d,ad,a^2d\},
$$
where $a=(y_1^2+y_2^2)v^2$,$b=y_1^2y_2^2v^4$ and
$d=(y_1y_2^3-y_1^3y_2)v$.
\end{lemma}
\begin{proof}
The group $3D_8\subset GL_2({\mathbb F}_7)$
is generated by $\diag(-1,1),(2,2)$ and $w=\left(\begin{smallmatrix}
0&1\\ -1&0\end{smallmatrix}\right)$.
If $y_1^iy_2^jv^k$ is invariant under $\diag(-1,1),\diag(1,-1)$
and $\diag(2,2)$, then $i=j=k \,\operatorname{mod}(2)$ and $i+j+2k=0 \,\operatorname{mod}(3)$.
When $i,j\le 6,k\le 5$ but $(i,j)\not =(6,6)$,
the invariant monomials have the following terms,
$y_1^2v^2$, $y_1^4v^4$, $y_1^6$,
$y_1^2y_2^2v^4$, $y_1^4y_2^4v^2$, $y_1y_2v^5$, $y_1^3y_2^3v^3$,
$y_1^5y_2^5v$, $y_1^2y_2^4$, $y_1^2y_2^6v^2$, $y_1^4y_2^6v^4$,
$y_1y_2^3v$, $y_1y_2^5v^3$, $y_1^3y_2^5v^5$
and terms obtained by exchanging $y_1$ and $y_2$. Recall that
$w \co y_1\mapsto y_2,y_2\mapsto -y_1$ and $v\to v$.
From the expression of \eqref{eqn:eqn3.2}, we have
\[H^*(BE)^{3D_8}\cong CA\{1,a,a^2,a', b,b',
c,c',c'', d,ad,bd\}\]
where $a=(y_1^2+y_2^2)v^2$,$a'=y_1^6+y_2^6$,$b=y_1^2y_2^2v^4$,
$b'=y_1^4y_2^4v^2$, $c=(y_1^2y_2^4+y_1^4y_2^2)$,
$c'=(y_1^2y_2^6+y_1^6y_2^2)v^2$ , $c''= (y_1^4y_2^6+y_1^6y_2^4)v^4$,
$d=(y_1y_2^3-y_1^3y_2)v$, $ad=(y_1y_2^5-y_1^5y_2)v^3$
and $bd=(y_1^3y_2^5-y_1^5y_2^3)v^5$.
Here $a^2d=bd$ from $(y_1^6-y_2^6)y_1y_2=0$ in $H^*(BE)$.
It is easily seen that
$b'V=b^2$, $cV=ab$, $c'V=(a^2-2b)b$ and $c''V=ab^2$.
Moreover we get
\begin{align*}
a^3/V& =(y_1^2+y_2^2)^3 =
(y_1^6+y_2^6)+3y_1^2y_2^2(y_1^2+y_2^2)=a'+3ab \\
a^4/V& =(y_1^2+y_2^2)^4v^2=
((y_1^8+y_2^8)+4y_1^2y_2^2(y_1^4+y_2^4)+6y_1y_2^4)v^2 \\
& =aC+4c'+6b' \\
a^5/V & =((y_1^{10}+y_2^{10})+5y_1^2y_2^2(y_1^6+y_2^6)+
10y_1^4y_2^4(y_1^2+y_2^2))v^4 \\
& =c'C+10bC+10c''.
\end{align*}
Hence, we can take generators $a^4/V,a^5/V,ab/V,a^2b/V$ for $b',c'',
c,c'$ respectively, and get the lemma.
\end{proof}
Note that the computations shows
\begin{align*}
a^6& =(y_1^2+y_2^2)^6v^{12}
=(y_1^{12}-y_1^{10}y_2^2+y_1^8y_2^4-y_1^6y_2^6+y_1^4y_2^8-y_1^2y_2^{10}
+y_2^{12})V^2\\
&=(y_1^{12}-y_1^6y_2^6+y_2^{12})V^2 =C^2V^2=D_2^2,
\end{align*}
where we use the fact $y_1^7y_2-y_1y_2^7=0$.
\begin{lemma} \label{lem:lem7.2}
$ H^*(BE)^{3SD_{16}}\cong CA\{1,a,a^2,a^3/V,a^4/V,a^5/V\}.$
\end{lemma}
\begin{proof}
Take the matrix
$k'=\bigl(\begin{smallmatrix}-1&1\\-1&-1\end{smallmatrix}\bigr)$ such that
${\langle}3D_8,k'{\rangle}\cong 3SD_{16}$. Then we have
\begin{align*}
{k'}^*{\co} a & =(y_1^2+y_2^2)v^2\mapsto ((-y_1+y_2)^2+(-y_1-y_2)^2)(2v)^2=a,\\
b&=y_1^2y_2^2v^4\mapsto (y_1^2-y_2^2)^2(2v)^4=2(a^2-4b)=2a^2-b.
\end{align*}
(If we take $\tilde b=b-a^2$, then ${k'}^*{\colon}\tilde b\mapsto
-\tilde b$.)
Similarly we can compute $k' \co d\mapsto -d$.
Then the lemma is almost immediate from the preceding lemma.
\end{proof}
\begin{lemma} \label{lem:lem7.3}
$ H^*(BE)^{3SD_{32}}\cong CA\{1,a^2,a^4/V\}$.
\end{lemma}
\begin{proof}
Take the matrix
$l'=\left(\begin{smallmatrix}-1&3\\-3&-1\end{smallmatrix}\right)$ so that
${l'}^2=k'$ and ${\langle}3SD_8,l'{\rangle}\cong 3SD_{32}$.
We see that
\[{l'}^*{\colon}
a=(y_1^2+y_2^2)v^2\mapsto ((-y_1+3y_2)^2+(-3y_1-y_2)^2)(3v)^2=-a,\]
which shows the lemma.
\end{proof}
\begin{thm} \label{thm:thm7.4}
There is the isomorphism with $C'=C-a^3/V$
\[H^*(BO'N)\cong DA\{1,a,a^2, b,ab,a^2b\}
\oplus CA\{d,ad,a^2d,C',C'a,C'a^2\}\]
\end{thm}
\begin{proof}
Let $G=O'N$.
The orbits of $N_G(E)$--action of $A$--subgroups in $E$ are given by
$\{A_0,A_{\infty}\}$,$\{A_1,A_6\}$ and $\{A_2,A_3,A_4,A_5\}$.
From Ruiz and Viruel \cite{R-V}, $A_0$, $A_{\infty}$, $A_1$ and $A_6$ are
$F^{\mathrm{ec}}$--radical subgroups. Hence we know that
\[H^*(O'N)\cong H^*(BE)^{3D_8}\cap
i_{A_0}^{*-1}H^*(BA_0)^{SL_2({\mathbb F}_7){\co}2}
\cap i_{A_1}^{*-1}H^*(BA_1)^{SL_2({\mathbb F}_7){\co}2}.\]
For element $x=d$ or $x=C'$, the restrictions are
$x|A_0=x|A_1=0$.
Hence we see that $CA\{x\}$ are contained in $H^*(BG)$.
We can take $C',C'a,C'a^2$ instead of $a^3/V$,
$a^4/V$ and $a^5/V$
as the $CA$--module generators since
$a^3/V=(C-C').$
Moreover we know $CA\{C',C'a,C'a^2\}\subset H^*(BG)$.
It is known that
${\mathbb Z}/p[y,u]^{SL_p({\mathbb F}_p)}\cong {\mathbb Z}/p[\tilde D_1,\tilde D_2']$
where $\tilde D_2'=y_1u^p-y_1^pu$
and $(\tilde D_2')^{p-1}=\tilde D_2$.
Hence we know ${\mathbb Z}/7[y,u]^{SL_2({\mathbb F}_7){\co}2}\cong {\mathbb Z}/7[\tilde
D_1,
(\tilde D_2)^2].$
Since $y_1v|A=\tilde D'_2$ we see
$a|A_0=(\tilde D'_2)^2, a|A_1=2(\tilde D'_2)^2$.
Hence $a,a^2$ are in $H^*(BG)$.
The fact $b|A_0=0$ and $b|A_1=(\tilde D'_2)^4$, implies that
$b\in H^*(BG)$. Hence all $a^ib^j$ are also in $H^*(BG)$.
\end{proof}
Next we consider the group $G=O'N{\co}2$. Its Weyl group $W_G(E)$
is isomorphic to $3SD_{16}$.
So we have $H^*(B(O'N{\colon}2))\cong H^*(BO'N)\cap
H^*(BE)^{3SD_{16}}.$
\begin{cor} \label{cor:cor7.5} $H^*(B(O'N{\colon}2))\cong (DA\{1,a,a^2\}
\oplus CA\{C',C'a,C'a^2\}).$
\end{cor}
\begin{cor} \label{cor:cor7.6}
$H^*(BRV_2)\cong DA\{1,a,a^2,a^3,a^4,a^5\}.$
\end{cor}
\begin{proof}
Let $G=RV_2$. Since $A_2$ is also $F^{\mathrm{ec}}$--radical and $W_G(A_2)=
SL_2({\mathbb F}_7){\co}2$. Hence we have
\[H^*(BG)\cong H^*(BE)^{3SD_{16}}
\cap i_{A_2}^{*-1}H^*(BA_2)^{SL_2({\mathbb F}_7){\co}2}.\]
Hence we have the corollary of the theorem.
\end{proof}
Since $H^*(BRV_3)\cong H^*(BE)^{3SD_{32}}\cap H^*(BRV_2)$, we have
the following corollary.
\begin{cor}\label{cor:cor7.7}
$H^*(BRV_3)\cong DA\{1, a^2,a^4\}.$
\end{cor}
\fullref{cor:cor7.7} can also be proved in the following way.
\begin{proof}
Let $G=RV_3$. Since there is just
one $G$--conjugacy class of $A$--subgroups,
by Quillen's theorem \cite{Q}, we know
\[H^*(BRV_3)\subset H^*(BA_0)^{SL_2({\mathbb F}_7){\co}2}\cong
DA\{1,(\tilde D_2')^2,(\tilde D_2')^4\}\text{ with }(\tilde D_2')^6=\tilde
D_2.\]
Note that $a^2|A_0=(\tilde D_2')^4,\ a^4|A_0=(\tilde D_2')^2 \tilde
D_2$
and $D_2|A_0=\tilde D_2$.
The fact ${k'}^*{\co}a\mapsto -a$ implies that
$DA\{a^2,a^4\}\subset
H^*(BG)$ but $DA\{a,a^3,a^5\}\cap H^*(BG)=0$.
\end{proof}
\fullref{cor:cor7.6} can also be proved in the following way.
\begin{proof}
Let $G=RV_2$. Since there is just
two $G$--conjugacy classes of $A$--subgroups,
by Quillen's theorem \cite{Q}, we know
\[H^*(BRV_2)\subset H^*(BA_0)^{SL_2({\mathbb F}_7){\co}2}
\times H^*(BA_2)^{SL_2({\mathbb F}_7){\co}2}\]
Since $a\in H^*(BRV_2)$, the map
$i_0^*{\co} H^*(BRV_2)\to H^*(BA_0)^{SL_2({\mathbb F}_7){\co}2}$
is epimorphism. Take $b'=b^2-2a^2b$ so that
$b'|A_0=b'|A_1=0$. Hence
$$\Ker i_{A_0}^* \supset DA\{b',b'a,C'V\}.$$
Moreover $b'|A_2=(\wbar D_2')^2\wbar D_2,
b'a|A_2=(\wbar D_2')^4\wbar D_2,
c'V|A_2=(\wbar D_2).$
Since $(\wbar D_2')^2$ itself is not in the image of $i_{A_2}^*$,
we get the isomorphism
\[H^*(BRV_2)\cong DA\{1,a,a^2\}\oplus DA\{c'V,b',b'a\}.\proved\]
\end{proof}
\section[Cohomology for B7(1+2) II]
{Cohomology for $B7_+^{1+2}$ II}
\label{sec:sec8}
In this section, we study cohomology of $He,Fi_{24},RV_1$.
First we consider the group $G=He$.
The multiplicative generators of
$H^*(He)$ are still computed by Leary \cite{L1}.
We will study more detailed cohomology structures here.
The Weyl group is $W_G(He)\cong 3S_3$.
\begin{lemma} \label{lem:lem8.1} The invariant $H^*(BE)^{3S_3}$ is
isomorphic to
\[ CA\otimes \{{\mathbb Z}/7\{1,\wbar b,\wbar b^2\}\{1,\wbar a,\wbar b^3/V\}
\oplus {\mathbb Z}/7\{\wbar d\}\{1,\wbar a,\wbar b,\wbar b^2/V,\wbar b^3/V\}
\oplus {\mathbb Z}/7\{\wbar a^2\}),\]
where $\wbar a=(y_1^3+y_2^3)$,
$\wbar b=y_1y_2v^2$ and $\wbar d=(y_1^3-y_2^3)v^3$.
\end{lemma}
\begin{proof}
The group $3S_3\subset GL_2({\mathbb F}_7)$
is generated by $T'=\{\diag(\lambda,\mu)|\lambda^3=\mu^3=1)$ and
$w'=\left(\begin{smallmatrix} 0&1\\ 1&0\end{smallmatrix}\right)$.
If $y_1^iy_2^jv^k$ is invariant under $T'$, then $i=j=-k \,\operatorname{mod}(3)$.
When $i,j\le 6,k\le 5$ but $(i,j)\not =(6,6)$,
the invariant monomials have the following terms
\begin{eqnarray*}
\{1,\wbar c=\wbar b^3/V=y_1^3y_2^3\}\{1,v^3\}\{1,\wbar b=y_1y_2v^2,
\wbar b'=y_1^2y_2^2v\} \\
\{1,v^3\}\{y_1^3,y_1^6,y_1y_2^4v^2,y_1^2y_2^5v,y_1^3y_2^6\},
\end{eqnarray*}
and terms obtained by exchanging $y_1$ and $y_2$. Recall that
$w' \co y_1\mapsto y_2,y_2\mapsto y_1$ and $v\to -v$.
The following elements are invariant
\begin{equation*}
\begin{array}{lll}
\wbar a\wbar b =(y_1y_2^4+y_1^4y_2)v^2, &
\wbar a\wbar b^2 =(y_1^2y_2^5+y_1^5y_2^2)v^4, &
\wbar a\wbar c = (y_1^3y_2^6+y_1^6y_2^3),\\
\wbar b\wbar d =(y_1y_2^4-y_1^4y_2)v^5, &
\wbar b^2\wbar d/V =(y_1^2y_2^5-y_2^5y_2^2)v, &
\wbar c\wbar d =(y_1^3y_2^6-y_1^6y_2^3)v^3 \\
\wbar a\wbar d =(y_1^6-y_2^6)v^3, &
\wbar a\wbar b^3/V =y_1^3y_2^6+y_1^6y_2^3. & \\
\end{array}
\end{equation*}
Thus we get the lemma from \eqref{eqn:eqn3.2}.
\end{proof}
\begin{lemma} \label{lem:lem8.2}
$ H^*(BE)^{6S_3}\cong CA\otimes
({\mathbb Z}/7\{1,\wbar b,\wbar b^2\}\{1,\wbar b^3/V\}\oplus {\mathbb Z}/7\{\wbar d\wbar a,
\wbar a^2\}).$
\end{lemma}
\begin{proof}
We can think $6S_3={\langle}S_3,\diag(-1,-1){\rangle}$. The action
$\diag(-1,-1)$
are given by
$\wbar a\mapsto -\wbar a$,$\wbar b\mapsto
\wbar b$, and $\wbar d\mapsto -\wbar d$.
From \fullref{lem:lem8.1}, we have the lemma.
\end{proof}
\begin{lemma} \label{lem:lem8.3}
$H^*(BE)^{6^2{\co}2}\cong CA\{1,\wbar b^2,\wbar c'',\wbar b^4/V\}$
where $\wbar c''=\wbar a^2-2\wbar b^3/V-2C.$
\end{lemma}
\begin{proof}
We can think $6^2{\co}2={\langle}3S_6,\diag(3,1){\rangle}$. The
action
$\diag(3,1)$
are given by
$\wbar a^2\mapsto \wbar a^2-4\wbar c,\ \wbar b\mapsto -\wbar b,\
\wbar c\mapsto -\wbar c,\ \wbar d\wbar a\mapsto -\wbar d\wbar a$.
For example $\wbar b=y_1y_2v^2 \mapsto (3y_1)y_2(3v)^2=-\wbar b$.
Moreover we have $\wbar c''=Y_1+Y_2-2C\mapsto \wbar c''$.
Thus we have the lemma.
\end{proof}
\begin{thm} \label{thm:thm8.4}
Let $\wbar c'=C+\wbar a^3/V. $Then there is the isomorphism
\[H^*(BHe)\cong DA\{1,\wbar b,\wbar b^2,\wbar d,\wbar d\wbar b,\wbar d\wbar
b^2\}
\oplus CA\{\{\wbar a,\wbar c'\}\{1,\wbar b,\wbar b^2,\wbar d\}, \wbar a^2,
\wbar a^2\wbar c'\}.\]
\end{thm}
\begin{proof}
Let $G=He$.
The orbits of $N_G(E)$--action of $A$--subgroups in $E$ are given by
\[\{A_0,A_{\infty}\},\qua \{A_1,A_2,A_4\},\qua \{A_3,A_5,A_6\}.\]
Since $A_6$ is the $F^{\mathrm{ec}}$--radical (see Leary \cite{L2}), we have
\[H^*(BHe)\cong H^*(BE)^{3S_3}\cap
i_{A_6}^{*-1}H^*(BA_6)^{SL_2({\mathbb F}_7)}.\]
For element $x=\wbar a$ or $x=C+\wbar c=C+y_1^3y_2^3$, the restrictions
are
$x|A_6=0$, eg $\wbar a|A_6=(y^3+(-y)^3)=0.$
Hence we see that $CA \{x\}$
are contained in $H^*(BG)$.
Since $\wbar b=y_1y_2v^2$, we see
$\wbar b|A_0=-y^2v^2=-(\tilde D_2')^2$.
Similarly $\wbar d|A_6=2(\tilde D_2')^3$.
Thus we can compute $H^*(BHe)$.
\end{proof}
\begin{cor}\label{cor:8.5} $H^*(B(He{\co}2))\cong
DA\{1,\wbar b,\wbar b^2\}\oplus CA\{\wbar c',\wbar c'\wbar b,\wbar c'\wbar
b^2,
\wbar a^2,\wbar a\wbar d\}.$
\end{cor}
\begin{thm}\label{thm:thm8.6} There is the isomorphism
\[H^*(BFi_{24}')\cong DA\{1,\wbar b,\wbar b^2,\wbar a^2V,\wbar c'\wbar bV,
\wbar c'\wbar b^2V\}
\oplus CA\{\wbar c'',\wbar a\wbar d\}
\text{ where }\wbar c''=\wbar a^2-2\wbar c'.\]
\end{thm}
\begin{proof}
Let $G=Fi_{24}'$. Since $A_1$ is also $F^{\mathrm{ec}}$--radical and $W_G(A_1)=
SL_2({\mathbb F}_7){\co}2$. Hence we have
\[H^*(BG)\cong H^*(B(He{\co}2))
\cap i_{A_1}^{*-1}H^*(BA_1)^{SL_2({\mathbb F}_7){\co}2}.\]
For the elements $x=\wbar a\wbar d, \wbar c''(=Y_1+Y_2-2C)$, we see
$x|A_1=x|A_6=0$. Hence these elements are in $H^*(BG)$.
Note that $\wbar b|A_1=(\tilde D_2')^2$ and $\wbar b\in H^*(BG)$.
We also know $\wbar a^2V|A_1=\tilde D_2$.
\end{proof}
Since $H^*(BFi_{24})\cong H^*(BFi_{24}')\cap H^*(BE)^{6^2{\co}2}$
and
$\wbar b^4=1/2(\wbar a^2-2C-\wbar c'')V$, we have the following
corollary.
\begin{cor} \label{cor:cor8.7}
$ H^*(BFi_{24})\cong (DA\{1,\wbar b^2,\wbar b^4\}
\oplus CA\{\wbar c''\}). $
\end{cor}
For $G=RV_1$, The subgroup $A_0$ is also $F^{\mathrm{ec}}$--radical, we see
\[H^*(BRV_1)\cong H^*(BFi_{24})\cap i_0^{-1*}H^*(BA_0)^{GL_2({\mathbb F}_7)}\]
Hence we have the following corollary.
\begin{cor} \label{cor:cor8.8}
$H^*(BRV_1)\cong DA\{1,\wbar b^2,\wbar b^4,D_2''\}$
with $\wbar b^6=D_2^2+D_2''D_2$.
\end{cor}
\begin{proof}
Let $D_2''=\wbar c''V=\wbar c''(D_1-C^6\wbar c'')$. Then we have
\[\wbar b^6=Y_1Y_2V^2=(Y_1+Y_2-C)CV^2=(C+(Y_1+Y_2-2C)CV^2
=D_2^2+(\wbar c''V)D_2.\]
Thus the corollary is proved.
\end{proof}
\section[Stable splitting for B7(1+2)]
{Stable splitting for $B7_+^{1+2}$}
\label{sec:sec9}
Let $G$ be groups considered in the preceding two sections, eg
$O'N$,$O'N{\co}2$,\ldots,$RV_1$. First consider the dominant
summands $X_{q,k}$.
From \fullref{cor:cor4.6}, the dominant summands are only related to
$H=W_G(E)$.
Recall the notation $X_{q,k}(H)$ in \fullref{lem:lem4.7}.
The module $X_{q,k}(H)$ is still given in the preceding sections.
From \fullref{lem:lem7.1}, \fullref{lem:lem7.2}, \fullref{lem:lem7.3},
\fullref{lem:lem8.1}, \fullref{lem:lem8.2} and \fullref{lem:lem8.3}
we have
\begin{align*}
H=3D_8 &; X_{6,0}=\{a^3/V,a^2b/V\}, X_{4,4}=\{a^2,b\},
X_{2,2}=\{a\}, \\
& \qua X_{4,1}=\{d\},
X_{6,3}=\{ad\} \\
H=3SD_{16}&; X_{6,0}=\{a^3/V\},
X_{4,4}=\{a^2\}, X_{2,2}=\{a\} \\
H=3SD_{32}&; X_{4,4}=\{a^2\} \\
H=3S_3 &; X_{6,0}=\{\wbar b^3/V,\wbar a^2\},
X_{4,4}=\{\wbar b^2\}, X_{2,2}=\{\wbar b\}, \\
& \qua X_{6,3}=\{\wbar a\wbar d\},X_{3,0}=\{\wbar a\},
X_{5,2}=\{\wbar a\wbar b\}, X_{3,3}=\{\wbar d\},
X_{5,5}=\{\wbar d\wbar b\}\\
H=6S_3 &; X_{6,0}=\{\wbar b^3/V,\wbar a^2\},
X_{2,2}=\{\wbar b\}, X_{4,4}=\{\wbar b^2\}, X_{6,3}=
\{\wbar a\wbar d\} \\
H=6^2{\colon}2 &; X_{6,0}=\{\wbar a^2-2\wbar b^3/V\},
X_{4,4}=\{\wbar b^2\}.
\end{align*}
For example, ignoring nondominant summands, we have the following
diagram
\[\stackrel{X_{0,0}\vee X_{4,4}}{\longleftarrow}
B(E{\co}3SD_{32})\stackrel{X_{6,0}\vee X_{2,2}}{\longleftarrow}
B(E{\co}3SD_{16})\stackrel{X_{6,0}\vee X_{4,4}\vee X_{4,1}\vee
X_{6,3}}
{\longleftarrow} B(E{\co}3D_{8}).\]
From \fullref{cor:cor4.4}, the number $m(G,1)_k$ is given by
$\rank_pH^{2k}(BG)$
for $k{\langle}p-1$ and $\rank_pH^{2p-2}(G)$ for $k=0$. For example
when
$G=E{\co}3S_3$,
\[ m(G,1)_0=3, m(G,1)_3=1,\qua m(G,1)_k=0\text{ for } k\not =0,\not =3.\]
\begin{lemma}\label{lem:lem9.1}
Let $G$ be one of the $O'N,O'N,\ldots,Fi_{24}',RV_1$. Then
the number $m(G,1)_k$ for $L(1,k)$ is given by
\begin{align*}
m(G,1)_0 & = \begin{cases} 2\text{ for } G=He,He{\colon}2\\
1\text{ for }G=O'N,O'N{\colon}2,Fi_{24},Fi_{24}'
\end{cases} \\
m(G,1)_3 & = \begin{cases} 1\text{ for } G=He, \\
m(G,1)_k=0 \text{ otherwise}.
\end{cases}
\end{align*}
\end{lemma}
Now we consider the number $m(G,2)_k$ of the non dominant summand
$L(2,k)$.
\begin{lemma}\label{lem:9.2}
The classifying spaces $BG$ for $G=O'N,O'N{\co}2$ have the non
dominant summands $M(2)\vee L(2,2)\vee L(2,4)$.
\end{lemma}
\begin{proof}
We only consider the case $G=O'N$, and the case $O'N{\co}2$ is
almost the same.
The non $F^{\mathrm{ec}}$--radical groups are $\{A_2,A_3,A_4,A_5\}$
(recall the proof of \fullref{thm:thm7.4}).
The group $W_G(E)=3D_8\cong
{\langle}\diag(2,2),\diag(1,-1),w{\rangle}$.
Hence the normalizer group is
\[N_G(A_2)=E{\co}{\langle}\diag(2,2),\diag(-1,-1){\rangle}.\]
Here note that $w,\diag(1,-1)$ are not in the normalizer,
eg $w{\co}{\langle}c,ab^2{\rangle} \to
{\langle}c,a^2b^{-2}{\rangle}={\langle}c,ab^6{\rangle}$. Since
$\diag(2,2){\co}ab^2 \mapsto (ab^2)^2$, $c\mapsto c^4$
and
$\diag(-1,-1){\co}ab^2 \mapsto (ab^2)^{-1}$, \linebreak $c\mapsto c,$
the Weyl groups are
\[ W_G(A_2)\cong U{\co}{\langle}\diag(4,2),\diag(1,-1){\rangle}.\]
Let $W_1=U{\co}\diag{\langle}4,2{\rangle}$. For
$v=\lambda y_1^{p-1}\in M_{p-1,k}$,
we have $\wbar W_1v=\lambda y_2^{p-1}$ since $2^3=1$,
from the argument in the proof of \fullref{lem:lem4.11}. Moreover
\[ \overline{
{\langle}\diag(1,-1){\rangle}}y_2^{p-1}=(1+(-1)^k)y_2^{p-1}, \]
implies that the $BG$ contains $L(2,k)$ if and only if $k$ even.
\end{proof}
\begin{lemma}\label{lem:lem9.3}
The classifying space $BHe$ (resp.
$B(He{\co}2)$,$Fi_{24}'$,$Fi_{24}$)
contains the non dominant summands
\begin{align*}
2M(2)\vee L(2,2)\vee L(2,4)\vee L(2,3)\vee L(1,3)&\\
(\text{resp. } 2M(2)\vee L(2,2)\vee L(2,4),\ \ M(2),\ \ M(2))&.
\end{align*}
\end{lemma}
\begin{proof}
First consider the case $G=He$. The non $F^{\mathrm{ec}}$--radical group are
\[\{A_0,A_{\infty}\},\qua \{A_1,A_2,A_4\}.\]
The group $W_G(E)\cong 3S_3={\langle}\diag(2,1),w'{\rangle}$. So we
see
$N_G(A_0)=E{\colon}{\langle}\diag(2,1){\rangle}$, and this implies
$W_G(A_0)\cong U{\co}{\langle}\diag(2,2){\rangle}$. The fact
$4^k=0\,\operatorname{mod}(7)$ implies
$k=3\,\operatorname{mod}(6)$. Hence $BG$ contains the summand
\[ M(2)\vee L(2,3)\vee L(1,3)\]
which is induced from $BA_0$.
Next consider the summands induced from $BA_1$.
The normalizer and Weyl
group are $N_G(A_1)=E{\co}{\langle}w'{\rangle}$ and
$W_G(A_1)=U{\colon}{\langle}\diag(-1,1){\rangle}$
since $w'{\colon} ab\mapsto ab,c \mapsto -c$. So we get
\[M(2)\vee
L(2,2)\vee L(2,4)\]
which is induced from $BA_1$.
For $G=He{\colon}2$, we see $\diag(-1,-1) \in W_G(E)$, this implies
that $\diag(-1,-1)\in N_G(A)$ and $\diag(1,-1)\in W_G(A_0)$.
This means that the non dominant
summand induced from $BA_0$ is $M(2)$ but is not $L(2,3)$.
We also know $U{\co}\diag(1,-1)\in W_G(A_1)$ but
the summand induced from $BA_1$ are not changed.
For groups $Fi_{24}'$,$Fi_{24}$, the non $F^{\mathrm{ec}}$--radical groups
make just one $G$--conjugacy class $\{A_0,A_{\infty}\}$.
So $BG$ dose not contain the summands induced from $BA_1$.
\end{proof}
\begin{thm}\label{thm:thm9.4}
When $p=7$, we have the following stable decompositions
of $BG$ so that
$\stackrel{X_1}{\leftarrow} \cdots \stackrel{X_s}{\leftarrow}
G$ means that $BG\sim X_1\vee \cdots \vee X_s$
\[
\stackrel{X_{0,0}}{\longleftarrow}
\begin{cases} \stackrel{X_{4,4}}{\longleftarrow}
RV_3 \stackrel{X_{6,0}\vee X_{2,2}}{\longleftarrow}
RV_2 \stackrel{M(2)\vee L(2,2)\vee L(2,4)}{\longleftarrow}
O'N{\co}2 \stackrel{\stackrel{X_{6,0}\vee X_{4,4}}
{\vee X_{4,1}\vee X_{6,3}}}{\longleftarrow} O'N \\
\quad \\
\stackrel{X_{6,0}\vee X_{4,4}}{\longleftarrow}
RV_1 \stackrel{M(2)}{\longleftarrow}
Fi_{24} \stackrel{X_{6,0}\vee X_{6,3}\vee X_{2,2}}{\longleftarrow}
Fi_{24}' \stackrel{M(2)\vee L(2,2)\vee L(2,4)}
{\longleftarrow} He{\co}2
\end{cases} \]
\[\qquad \qquad \qquad \stackrel{
X_{3,0}\vee X_{5,2}\vee X_{3,3}
\vee X_{5,5}\vee L(2,3)\vee L(1,3)}{\longleftarrow}He.\]
\end{thm}
We write down the cohomology of stable summands.
At first we see that $H^*(X_{0,0})\cong
H^*(BRV_3)\cap H^*(BRV_1)\cong DA.$
Here note that elements
$a^2-(y_1y_2)^2v^4$ in \fullref{sec:sec7}
and $\wbar b^2=y_1^2y_2^2v^4$ in \fullref{sec:sec8}
are not equivalent under the action in $GL_7({\mathbb F}_7)$
because $y_1^2+y_2^2$ is indecomposable in ${\mathbb Z}/7[y_1,y_2]$.
From the cohomologies, $H^*(BRV_3)$ and $H^*(BRV_2)$, then
$H^*(X_{4,4})\cong DA\{a^2,a^4\}$ and
$H^*(X_{6,0}\vee X_{2,2})\cong DA\{a,a^3,a^5\}$.
On the other hand, we know $H^*(X_{6,0})$
from the cohomology $H^*(BRV_1)$. Thus we get the following
lemma.
\begin{lemma}\label{lem:lem9.5}
There are isomorphisms of cohomologies
\begin{align*}
H^*(X_{0,0})& \cong DA, H^*(X_{4,4})\cong DA\{a^2,a^4\} \\
H^*(X_{6,0})& \cong DA\{D_2\}\cong DA\{a^3\},
H^*(X_{2,2})\cong DA\{a,a^5\}.
\end{align*}
\end{lemma}
Let us write $M\{a\}=DA\{1,C,\ldots,C^{p-1}\}\{a\}$.
From the facts that $D_2=CV$, $D_1=C^p+V$ and
$D_2=C(D_1-C^p)=CD_1-C^{p+1}$,
we have two decompositions
\[ CA\{a\}\cong DA\{1,C,\ldots,C^p\}\{a\}
\cong DA\{a\}\oplus M\{Ca\} \cong M\{a\}\oplus DA\{Va\}.\]
From the cohomology of
$H^*(Fi_{24})$, we know the following lemma.
\begin{lemma}\label{lem:lem9.6}
$H^*(M(2))\cong M\{C\}.$
\end{lemma}
Comparing the cohomology
$H^*(B(He{\co}2))\cong
H^*(BFi_{24}')\oplus M\{\wbar a^2,\wbar c'\wbar b,
\wbar c'\wbar b^2\},$
we have the isomorphisms
\[H^*(M(2))\cong M\{\wbar a^2\}, H^*(L(2,2)\vee L(2,4))
\cong M\{\wbar c'\wbar b,\wbar c'\wbar b^2\}.\]
From $H^*(BFi_{24}')\cong
H^*(BFi_{24})\oplus DA\{\wbar a^2V,
\wbar c'\wbar bV,\wbar c'\wbar b^2V\}\oplus CA\{\wbar a\wbar d\}$,
we also know that
\[H^*(X_{6,3})\cong CA\{\wbar a\wbar d\},
H^*(X_{6,0}\vee X_{2,2})\cong DA\{\wbar a^2V,\wbar c'\wbar bV,
\wbar c'\wbar b^2V\}.\]
We still get $H^*(BFi_{24})\cong H^*(BRV_1)\oplus M\{\wbar c''\}$
and $H^*(M(2))\cong M\{\wbar c''\}.$
Next consider the cohomology of groups studied in \fullref{sec:sec7}
eg $O'N$.
There is the isomorphism
\[H^*(BO'N)\cong H^*(BO'N{\co}2)\oplus
DA\{b,b^2,ab^2\}
\oplus CA\{d,da,da^2\}.\]
Indeed, we have
\begin{align*}
H^*(X_{6,0}\vee X_{4,4}) & \cong
DA\{b,b^2,ab^2\}\cong DA\{a^2,a^3,a^4\} \\
H^{*}(X_{6,3}) & \cong CA\{da\} \\
H^*(X_{4,1}) & \cong CA\{d,da^2\}.
\end{align*}
We also have the isomorphism
$H^*(BO'N{\colon}2)\cong H^*(BRV_2)\oplus M\{C',C'a,C'a^2\}$
and $H^*(M(2)\vee L(2,2)\vee L(2,4))\cong M\{C',C'a,C'a^2\}$.
Recall that
\[H^*(BE)^{3SD_{32}}\cong CA\{1,a^2,a^4/V\}
\cong DA\{1,a^2,a^4\}\oplus M\{C,a^2C,a^4/V\},\]
in fact $H^*(M(2)\vee L(2,2)\vee L(2,4))\cong M\{C,a^2C,a^4/V\}$.
\section[The cohomology of M for p=13]
{The cohomology of ${\mathbb M}$ for $p=13$}
\label{sec:sec10}
In this section, we consider the case $p=13$ and $G={\mathbb M}$
the Fisher--Griess Monster group.
It is know that $W_G(E)\cong 3\times 4S_4$.
The $G$--conjugacy classes of $A$--subgroups are
divided two classes ;
one is $F^{\mathrm{ec}}$--radical and the other is
not. The class of $F^{\mathrm{ec}}$--radical groups contains
$6$ $E$--conjugacy classes (see Ruiz--Viruel \cite{R-V}).
(The description of \cite[(4.1)]{T-Y} was not correct, and the
description of $H^*(B{\mathbb M})$ in \cite[Theorem 6.6]{T-Y} was not
correct.) The Weyl group $W_G(A)\cong SL_2({\mathbb F}_{13}).4$ for
each $F^{\mathrm{ec}}$--radical subgroup $A$.
Since $S_4\cong PGL_2({\mathbb F}_3)$ [S], we have the presentation of
\[S_4={\langle}x,y,z|x^3=y^3=z^2=(xy)^2= 1, zxz^{-1}=y{\rangle}.\]
(Take $x=u,y=u'$ in \fullref{lem:lem4.8}, and $z=w$ in \fullref{sec:sec5}.)
By arguments in the proof of Suzuki \cite[Chapter~3~(6.24)]{S}, we can take
elements $x,y,z$ in $GL_2({\mathbb F}_{13})$ by
\begin{equation}\label{eqn:eqn10.1}
x=\left(\begin{smallmatrix}
3&0\\ 0&9\\ \end{smallmatrix}\right) , \qua
y=\left(\begin{smallmatrix}
5&-4\\ -2&7\\ \end{smallmatrix}\right), \qua
z=\left(\begin{smallmatrix}
2&2\\ 1&-2\\ \end{smallmatrix}\right),
\end{equation}
so that we have
\[x^3=y^3=1,\ zxz^{-1}=y,\ (xy)^2=-1,\ z^2=\diag(6,6).\]
Hence we can identify
\begin{equation}\label{eqn:eqn10.2}
\quad 3\times 4S_4\cong {\langle}x,y,z{\rangle} \subset
GL_2({\mathbb F}_{13}).
\end{equation}
It is almost immediate that $H^*(BE)^{{\langle}x{\rangle}}$
(resp. $H^*(BE)^{{\langle}-1{\rangle}}$) is multiplicatively
generated by $y_1y_2,y_1^3,y_2^3$ (resp.
$y_1y_2,y_1^2,y_2^2$) as a ${\mathbb Z}/(13)[C,v]$--algebra.
Hence we can write
\begin{align}\label{eqn:eqn10.3}
H^*(BE)^{{\langle}x,-1{\rangle}}& \cong {\mathbb Z}/(13)[C,v]
\bigl\{\{1,y_1y_2,\ldots,(y_1y_2)^5\}\{(y_1y_2)^6,y_1^6,y_2^6\},\\ \nonumber
& \qua y_1^{12},y_2^{12}, y_1^{12}y_2^6,y_1^6y_2^{12}\bigr\}.
\end{align}
For the invariant $H^*(BE)^{{\langle}y,-1{\rangle}}$, we get the
similar
result
exchanging $y_i$ to $(z^{-1})^*y_i$ since $zxz^{-1}=y$.
Indeed $(z^{-1})^*{\co}H^*(BE)^{{\langle}x,-1{\rangle}}
\cong H^*(BE)^{{\langle}y,-1{\rangle}}$.
To seek invariants, we recall the relation between the $A$--subgroups
and elements in $H^2(BE;{\mathbb Z}/p)$. For
$0\not =y=\alpha y_1+\beta y_2\in H^2(BE;{\mathbb Z}/p)$, let
$A_y=A_{-(\alpha/\beta)}$ so that $y|A_y=0$. This induces
the $1-1$ correspondence,
\[(H^2(BE;{\mathbb Z}/p)-\{0\})/F_p^*
\leftrightarrow \{A_i|i\in F_p\cup\{\infty\}\},
\qua y \leftrightarrow A_y.\]
Considering the map
$g^{-1}A_i \stackrel{g}{\to} A_i \subset
E \stackrel {\beta^{-1}y}{\to} {\mathbb Z}/p,$
we easily see $A_{g^*y}=g^{-1}A_y$.
For example, the order $3$ element $x$ induces the maps
\begin{align*}
x^* \co & y_1-y_2\mapsto 3y_1-9y_2
\mapsto 9y_1-3y_2 \mapsto y_1-y_2 \\
x^{-1} \co & A_{y_1-y_2}={\langle}c,ab{\rangle}\to
{\langle}c,a^9b^{3}{\rangle}
\to {\langle}c,a^3b^{9}{\rangle}\to {\langle}c,ab{\rangle}.
\end{align*}
In particular $A_1,A_9,A_3$ are in the same $x$--orbit of
$A$--subgroups. Similarly the ${\langle}x{\rangle}$--conjugacy classes
of $A$ is given
\[ \{A_0\}, \{A_{\infty}\}, \{A_1,A_3,A_9\},
\{A_2,A_5,A_6\}, \{A_4,A_{10},A_{12}\}, \{A_7,A_8,A_{11}\}. \]
The ${\langle}y{\rangle}$--conjugacy classes are just $\{zA_i\}$ for
${\langle}x{\rangle}$--conjugacy classes $\{A_i\}$.
\[ \{A_7=zA_0\},\{A_{12}\}, \{A_3,A_1,A_5\},
\{A_6,A_9,A_2\}, \{A_{11},A_8,A_{\infty}\},
\{A_0,A_{10},A_{4}\}. \]
Hence we have the ${\langle}x,y{\rangle}$--conjugacy classes
\[ C_1= \{A_1,A_2,A_3,A_5,A_6,A_9\},
C_2=\{A_0,A_4,A_{10},A_{12}\},
C_3=\{A_{\infty},A_7,A_8,A_{11}\}. \]
At last we note ${\langle}x,y,z{\rangle}$--conjugacy classes are
two classes $C_1,C_2\cup C_3.$
Let us write the ${\langle}x{\rangle}$--invariant
\begin{align}\label{eqn:eqn10.4}
u_6 & =\Pi_{A_i\in C_1}(y_2-iy_1)=
(y_2-y_1)(y_2-2y_1)\cdots(y_2-9y_1) \\ \nonumber
& =y_2^6-9y_1^3y_2^3+8y_1^6.
\end{align}
Then $u_6$ is also invariant under
$y^*$ because the ${\langle}x,y{\rangle}$--conjugacy class
$C_1$ divides two ${\langle}y{\rangle}$--conjugacy classes
\[C_1=\{A_1,A_3,A_5\}\cup \{A_2,A_6,A_9\} \]
and the element $u_6$ is rewritten as
\[u_6=\lambda(\Pi_{i=0}^2y^{i*}(y_2-y_1)).
(\Pi_{i=0}^2y^{i*}(y_2-2y_1)) \text{ for }
\lambda\not =0\in {\mathbb Z}/(13).\]
We also note that $u_6|A_i=0$ if and only if $i\in C_1$.
Similarly the following elements are
${\langle}x,y{\rangle}$--invariant,
\begin{align}
u_8 & =\Pi_{A_i\in C_2\cup C_3}(y_2-iy_1)
=y_1y_2(y_2^6+9y_1^3y_2^3+8y_1^6) \\ \nonumber
u_{12} & =\Pi_{A_i\in C_2}(y_2-iy_1)^3=(y_2^4+y_1^3y_2)^3 \\ \nonumber
& =\lambda(\Pi_{i=0}^2x^{i*}y_2)(\Pi_{i=0}^2x^{i*}(y_2-4y_1))^3 \\ \nonumber
& =\lambda'(\Pi_{i=0}^2y^{i*}(y_2-12y_1))(\Pi_{i=0}^2y^{i*}y_2)^3v\\ \nonumber
u_{12}' & =\Pi_{A_i\in C_3}(y_2-iy_1)^3=(y_1y_2^3+8y_1^4)^3.
\end{align}
Of course $(u_{12}u_{12}')^{1/3}=u_8$ and $u_6u_8=0$. Moreover direct
computation shows $u_6^2=u_{12}+5u_{12}'$.
\begin{lemma}\label{lem:lem10.1}
$H^*(BE)^{{\langle}x,y{\rangle}}\cong
{\mathbb Z}/(13)[C,v]\{1,u_6,u_6^2,u_6^3,u_8,u_8^2,u_{12}\}.$
\end{lemma}
\begin{proof}
Recall \eqref{eqn:eqn10.3} to compute
\[H^*(BE)^{{\langle}x,y{\rangle}}\cong
H^*(BE)^{{\langle}x,-1{\rangle}}
\cap H^*(BE)^{{\langle}y,-1{\rangle}}.\]
Since $(z^{-1})^*(y_1y_2)^i\not =(y_1y_2)^i$ for $1\le i \le p-2$,
from \eqref{eqn:eqn10.3} we know invariants of the
lowest positive degree are of the form
\[u=\gamma y_2^6+\alpha y_2^3y_1^3+\beta y_1^6.\]
Then $u'=u-\gamma u_6$ is also invariant with $u'|A_{\infty}=0$.
Hence $u'|A_i=0$ for all $A_i\in C_3$. Thus we know
$u'=\lambda y_1^2(u_{12}')^{1/3}.$
But this is not ${\langle}y{\rangle}$--invariant for $\lambda\not =0$,
because
$(u')^3=\lambda^3 y_1^6u_{12}'$ is invariant, while $y_1^6$ is
not ${\langle}y{\rangle}$--invariant. Thus we know $u'=0$.
Any $16$--dimensional invariant is form of
\[u=y_1y_2(\gamma y_2^6+\alpha y_2^3y_1^3+\beta y_1^6).\]
Since $u|A_0=u|A_{\infty}=0$, we know $u|A_i=0$ for all
$A_i\in C_2\cup C_3$. Hence we know
\[u=\gamma u_{12}^{1/3}(u_{12}')^{1/3}=\gamma u_8.\]
By the similar arguments, we can prove the lemma
for degree $\le 24$.
For $24 <$degree$<48$, we only need consider the elements
$u'=0 \,\operatorname{mod}(y_1y_2)$.
For example, $H^{18}(BE;{\mathbb Z}/13)^{{\langle}x,-1{\rangle}}$ is
generated by
\[\{(y_1y_2)^9, (y_1y_2)^3C, y_1^6C, y_2^6C, y_1^6y_2^{12},
y_1^{12}y_2^6\}.\]
But we can take off $y_1^6C{=}y_1^{18}$, $y_2^6C{=}y_2^{18}$
by $\lambda u_6^3+\mu Cu_6$ so that $u'{=}0 \,\operatorname{mod}(y_1y_2)$.
Hence we can take $u'$ so that $u_8$ divides $u'$
from the arguments similar to the case of degree=$16$.
Let us write $u'=u''u_8$. Then we can
write
\[u''=y_1^ky_2^k(\lambda_1y_1^6+\lambda_2y_1^3y_2^3)
+\lambda_3(y_1y_2)^{k-3}C,\]
taking off $\lambda y_1^ky_2^ku_6$ if necessary
since $u_6u_8=0$. (Of course, for $k{<}3$, $\lambda_3=0$.)
Since $u_8|A_i\not =0$ and $u_6|A_i=0$ for $i\in C_1$,
we have
\[(u''-y^*u'')|A_i=0\text{ for }i\in C_1.\]
Since $y^*y_1=5y_1-4y_2$ and $y^*y_2=-2y_1+7y_2$, we have
\begin{align*}
(u''-y^*u'')|A_i
& =\lambda_1(i^k-(5-4i)^{6+k}(-2+7i)^k) \\
& \qua +\lambda_2(i^{k+3}-(5-4i)^{k+3}(-2+7i)^{k+3}) \\
& \qua
+\lambda_3(i^{k-3}-(5-4i)^{k-3}(-2+7i)^{k-3}).
\end{align*}
We will prove that we can take all $\lambda_i=0$.
Let us write $U=u''-y^*u''$.
We then have the following cases.
\begin{enumerate}
\item The case $k=0$, ie degree=$14$.
If we take $i=1$,
\[U|A_1=\lambda_1(1-1)+\lambda_2(1-1^35^3)=0.\]
So we have $\lambda_2=0$. We also see $\lambda_1=0$ since
$U|A_3=\lambda_1(1-(5-12)^6)=2\lambda_1=0.$
\item The case $k=1$. Since $y_1y_2u_6-u_8=-18y_1^4y_2^4$,
we can assume $\lambda_2=0$
taking off $\lambda u_8^2$ if necessary.
We have also $\lambda_1=0$ from
$U|A_1=\lambda_1(1^1-1^75^1)=0.$
\item The case $k=2$. We get the the result
$U|A_1=2\lambda_1+4\lambda_2$, $U|A_3=5\lambda_1+5\lambda_2$.
\item The case $k=3$. First considering $Cu_8$,
we may take $\lambda_3=0$. The result is given by
$U|A_1=6\lambda_1+2\lambda_2$ and $U|A_2=7\lambda_1+9\lambda_2$.
\item The case $k=4$. The result follows from
\[U|A_1=6\lambda_2+9\lambda_3,
U|A_3=6\lambda_1+6\lambda_2+6\lambda_3,
U|A_5=2\lambda_1-4\lambda_2+6\lambda_3.\]
\end{enumerate}
Hence the lemma is proved.\end{proof}
Next consider the invariant under ${\langle}x,y,\diag(6,6){\rangle}$.
The action for $\diag(6,6)$ is given by
$y_1^iy_2^jv^k \mapsto 6^{i+j+2k}y_1^iy_2^jv^k$.
Hence the invariant property implies $i+j+2k=0 \,\operatorname{mod}(12)$.
Thus $H^*(BE)^{{\langle}x,y,\diag(6,6){\rangle}}$ is generated
as a $CA$--algebra by
\[ \{1,u_6v^3,u_8v^2,u_{12},u_{12}',v^6\}. \]
\begin{lemma}\label{lem:lem10.2}
The invariant
$H^*(BE)^{3\times 4S_4}\cong H^*(BE)^{{\langle}x,y,z{\rangle}}$
is isomorphic to
\[ CA\{1,u_6v^3,
(u_6v^3)^2,(u_6v^3)^3,u_8v^8,(u_8v^8)^2/V,(u_{12}-5u_{12}')\}.\]
\end{lemma}
\begin{proof}
We only need compute $z^*$--action. Since
\[3\times 4S_4\cong
{\langle}x,y,\diag(6,6){\rangle}{\colon}{\langle}z{\rangle},\]
the $z^*$--action on $H^*(BE)^{{\langle}x,y,\diag(6,6){\rangle}}$ is an
involution.
Let $u_6v^3=u_6(y_1,y_2)v^3$. First note
$u_6|A_{\infty}=u_6(0,y)=y^6.$
On the other hand, its $z^*$--action is
\begin{align*}
z^*u_6v^3|A_{\infty}& =u_6(2y_1+2y_2,y_1-2y_2)(-6v)^3|A_{\infty}
=u_6(2y,-2y)(-6v)^3\\
& =((-2)^6-9(-2)^3(2)^3+8(2)^6)(-6)^3y^6v^3 \\
&=(1+9+8)8y^6v^3=y^6v^3.
\end{align*}
Hence we know $u_6v^3$ is invariant, while $u_6v^9$ is not.
Similarly we know
\[u_8v^2|A_1=u_8(y,y)v^2=5y^8v^2,
\qua z^*u_8v^2|A_1=-5y^8v^2.\]
Hence $u_8v^8$ and $u_8^2v^4$ are invariant but $u_8v^2$ is not.
For the action $u_{12}$, we have
\begin{equation*}
\begin{array}{rclrclrclrcl}
u_{12}|A_0 &=&0, &u_{12}|A_{\infty}&=&y^{12}, &
u_{12}'|A_0&=&5y^{12}, & u_{12}'|A_{\infty}&=&0,\\
z^*u_{12}|A_0&=&y^{12}, & z^*u_{12}|A_{\infty}&=&0, &
z^*u_{12}'|A_0&=&0, & z^*u_{12}'|A_{\infty}&=&5y^{12}.
\end{array}
\end{equation*}
Thus we get
$z^*u_{12}=(1/5)u_{12}',\ k^*u_{12}'=5u_{12}.$
Hence we know $u_{12}+(1/5)u_{12}'$ and
$(u_4^3-(1/5)u_{12}')v^6=(u_6v^3)^2$
are invariants. Thus we can prove the lemma.
\end{proof}
\begin{thm}\label{thm:thm10.3}
For $p=13$, the cohomology $H^*(B{\mathbb M})$ is
isomorphic to
\[ DA\{1,u_8v^8,(u_8v^8)^2\}\oplus CA\{u_6v^3,
(u_6v^3)^2,(u_6v^3)^3,(u_{12}-5u_{12}'-3C)\}.\]
\end{thm}
\begin{proof}
Direct computation shows
\[ u_{12}-5u_{12}'=y_2^{12}-2y_2^9y_1^3+3y_2^3y_1^9+y_1^{12},\]
and hence $u_{12}-5u_{12}'-3C|A_1=0$,
indeed, the restriction
is zero for each $A_i\in C_1$.
The isomorphism
\[H^*(B{\mathbb M})\cong H^*(BE)^{3\times 4S_4}\cap i_{A_1}^{-*}
(H^*(BA_1)^{SL_4({\mathbb F}_{13}).4},\]
completes the proof.
\end{proof}
The stable splitting is given by the following theorem.
\begin{thm}
We have the stable splitting
\begin{align*}
B{\mathbb M} & \sim X_{0,0}\vee X_{12,0}\vee X_{12,6}
\vee X_{6,3}\vee X_{8,8}
\vee M(2),\\
B(E{\co}3\times 4S_4) & \sim B{\mathbb M}\vee M(2)
\vee L(2,4)\vee L(2,8).
\end{align*}
\end{thm}
\begin{proof}
Let $H=E{\co}3\times 4S_4.$ Recall that
\[
X_{q,k}(H) =(S(A)^q\otimes v^k)\cap H^*(BH)
\qua 0\le q\le 12, 0\le k\le 11.\]
We already know
\[X_{*,*}(H)={\mathbb Z}/(13)\{ 1,u_8v^8,u_6v^3,u_6^2v^6,
u_{12}-5u_{12}'\}.\]
Hence $BH$ has the dominant summands in the theorem.
The normalizer groups of $A_0,A_1$ are given
\[N_H(A_0)=E{\co}{\langle}x,\diag(6,6){\rangle},
N_H(A_1)=E{\co}{\langle}\diag(6,6){\rangle}.\]
Hence the Weyl groups are
\[W_H(A_0)=U{\co}{\langle}\diag(1,3),\diag(6^2,6){\rangle},
W_H(A_1)=U{\co}{\langle}\diag(6^2,6){\rangle}.\]
From the arguments of \fullref{lem:lem4.11}, the non-dominant
summands induced from $BA_1$ are
$M(2)\vee L(2,4)\vee L(2,8).$
We also know the non-dominant summands from $BA_0$ are
$M(2)$. This follows from
\[ \overline{{\langle}\diag(1,3){\rangle}}y_2^{p-1}=
\sum_{i=0}^2(3^i)^ky_2^{p-1}\quad for \ y_2^{p-1}\in M_{p-1,k}\]
and this is nonzero mod($13$) if and only if $k=0 \,\operatorname{mod}(3)$.
\end{proof}
\begin{rem}
It is known $H^*(Th)\cong DA$ for $p=5$ in \cite{T-Y}.
Hence all cohomology $H^*(BG)$ for groups $G$ in
\fullref{thm:thm2.1} (4)--(7) are explicitly known. For (1)--(3), see also
Tezuka--Yagita \cite{T-Y}.
\end{rem}
\section[Nilpotent parts of H*(BG,Z(p))]
{Nilpotent parts of $H^*(BG;{\mathbb Z}_{(p)})$}
\label{sec:sec11}
It is known that $p^2H^*(BE;{\mathbb Z})=0$ (see Tezuka--Yagita \cite{T-Y} and
Leary \cite{L2}) and
\[pH^{*{>}0}(BE;{\mathbb Z})\cong {\mathbb Z}/p\{pv,pv^2,\ldots\}.\]
In particular $H^{\mathrm{odd}}(BE;{\mathbb Z})$ is all just $p$--torsion.
There is a decomposition
\[H^{\mathrm{even}}(BE;{\mathbb Z})/p\cong H^*(BE)\oplus N \text{ with }
N={\mathbb Z}/p[V]\{b_1,\ldots,b_{p-3}\}\]
where $b_i=Cor_{A_0}^E(u^{i+1}), |b_i|=2i+2$.
(Note for $p=3$,$N=0$.)
The restriction images $b_i|A_j=0$
for all $j\in {\mathbb F}_p \cup \infty$.
For $g\in GL_2({\mathbb F}_p)$, the induced action
is given by $g^*(b_i)=\det(g)^{i+1}b_i$ by
the definition of $b_i$.
Note that
\[2=|y_i|{<}|b_j|=2(j+1){<}|C|=2p-2{<}|v|=2p .\]
So $g^*(y_i)$ is given by \eqref{eqn:eqn3.4}
also in $H^*(BE;{\mathbb Z})$ and $g^*(v)=\det(g)v \,\operatorname{mod}(p)$.
Hence we can identify that
\[H^*(BE)^H=(H^{\mathrm{even}}(BE;{\mathbb Z})/(p,N))^H \subset
H^{\mathrm{even}}(BE;{\mathbb Z}/p)^H.\]
Let us write the reduction map by
$q{\co}H^*(BE;{\mathbb Z})\to H^*(BE;{\mathbb Z}/p).$
\begin{lemma}\label{lem:lem11.1}
Let $H\subset GL_2({\mathbb F}_p)$ and $(|H|,p)=1$.
If $x\in H^*(BE)^H$, then there is $x'\in H^*(BE;{\mathbb Z})^H$
such that $q(x')=x$.
\end{lemma}
\begin{proof}
Let $x\in H^*(BE)^H$ and $G=E{\co}H$.
Then we can think $x\in H^*(BE;{\mathbb Z}/p)^H \cong
H^*(BG;{\mathbb Z}/p)$ and $\beta(x)=0$.
By the exact sequence
\[H^{\mathrm{even}}(BG;{\mathbb Z}_{(p)})\stackrel{q}{\to}
H^{\mathrm{even}}(BG;{\mathbb Z}/p))\stackrel{\delta}{\to} H^{\mathrm{odd}}(BG;{\mathbb Z}_{(p)}),
\]
we easily see that $x\in Image(q)$
since $q\delta(x)=\beta(x)=0$
and $q|H^{\mathrm{odd}}(BG;{\mathbb Z}_{(p)})$ is injective.
Since $H^*(BG;R)\cong H^*(BE;R)^H$ for $R={\mathbb Z}_{(p)}$ or
${\mathbb Z}/p$, we get the lemma.
\end{proof}
\begin{proof}[Proof of \fullref{thm:thm3.1}]
From Tezuka--Yagita \cite[Theorem 4.3]{T-Y} and Broto--Levi--Oliver \cite{B-L-O},
we have the isomorphism
\[H^*(BG;{\mathbb Z})_{(p)}\cong H^*(BE;{\mathbb Z})^{W_G(E)}
\cap _{A{\co}F^{\mathrm{ec}}-\text{radical}}i_A^{*-1}H^*(BA;{\mathbb Z})^{W_G(A)}.\]
The theorem is immediate from the above lemma and the fact that
$H^{\mathrm{even}{>}0}(BA;{\mathbb Z})\cong H^{*{>}0}(BA)$.
\end{proof}
Let us write $N(G)=H^*(BG;{\mathbb Z})\cap N$. Then
\[H^{\mathrm{even}}(BG;{\mathbb Z})/p \cong H^*(BG)\oplus N(G).\]
The nilpotent parts $N(G)$ depends only on the group
$\operatorname{Det}(G)=\{\det(g)|g\in W_G(E)\}\subset {\mathbb F}_p^*$,
in fact, $N(G)=N^{W_G(E)}=N^{\operatorname{Det}(G)}$ .
\begin{lemma}\label{lem:lem11.2}
If $\operatorname{Det}(G)\cong {\mathbb F}_p^*$ (eg $G=O'N,He,\ldots,RV_3$
for $p=7$, or $G={\mathbb M}$ for $p=13$), then
\[N(G)\cong {\mathbb Z}/p[V]\{b_iv^{p-2-i}|1\le i\le p-3\}.\]
\end{lemma}
\begin{lemma}\label{lem:lem11.3}
Let $G$ have a $7$--Sylow subgroup $E$. Then, we have
\[N(G)=\begin{cases}
{\mathbb Z}/7[V]\{b_1v^4,b_2v^3,b_3v^2,b_4v\}
\text{ if }\operatorname{Det}(G)={\mathbb F}_7^*
\\
{\mathbb Z}/7[v^3]\{b_1v,b_2,b_3v^2,b_4v\} \text{ if }\operatorname{Det}(G)\cong {\mathbb Z}/3
\\
{\mathbb Z}/7[v^2]\{b_1,b_2v,b_3,b_4v\} \text{ if }\operatorname{Det}(G)\cong {\mathbb Z}/2\\
{\mathbb Z}/7[v]\{b_1,b_2,b_3,b_4\} \text{ if }\operatorname{Det}(G)\cong \{1\}.
\end{cases}
\]
\end{lemma}
Now we consider the odd dimensional elements.
Recall that $$H^{\mathrm{odd}}(BA;{\mathbb Z})\cong {\mathbb Z}/p[y_1,y_2]\{\alpha\},$$
where $\alpha=\beta(x_1x_2)\in H^*(BA;{\mathbb Z}/p)\cong {\mathbb Z}/p[y_1,y_2]\otimes
\Lambda(x_1,x_2)$ with $\beta(x_i)=y_i$.
Of course $g^*(\alpha)=\det(g )\alpha$ for $g\in \Out(A)$.
For example $H^{\mathrm{odd}}(B(A{\colon}Q_8))\cong
H^*(B(A{\colon}Q_8))\{\alpha\}$
since $\operatorname{Det}(A{\co}Q_8)=\{1\}$.
Recall the Milnor operation
$Q_{i+1}=[P^{p^n}Q_i-Q_iP^{p^n}],Q_0=\beta$. It is known that
\[Q_1(\alpha)=y_1^py_2-y_1y_2^p=\tilde D_2' \text{ with }
(\tilde D_2')^{p-1}=\tilde D_2.\]
The submodule of $H^*(X;{\mathbb Z}_{(p)})$ generated by
(just) $p$--torsion additive generators
can be identified with $Q_0H^*(X;{\mathbb Z}/p)$.
Since $Q_iQ_0=-Q_0Q_i$, we can extend the map
\cite[page 377]{Y1}
\[Q_i{\co}Q_0H^*(X;{\mathbb Z}/p) \stackrel{Q_i}{\to}
Q_0H^*(X;{\mathbb Z}/p)\subset H^*(X;{\mathbb Z}_{(p)}).\]
Since all elements in $H^{\mathrm{odd}}(BA;{\mathbb Z})$ are
(just) $p$--torsion,
we can define the map
$$Q_1{\co}H^{\mathrm{odd}}(BA;{\mathbb Z})\to H^{\mathrm{even}}(BA;{\mathbb Z})=H^{\mathrm{even}}(BA).$$
Moreover this map is injective.
\begin{lemma}[Yagita \cite{Y1}]
\label{lem:lem11.4}
Let $G$ have the $p$--Sylow subgroup $A=({\mathbb Z}/p)^2$.
Then
\[Q_1\ {\co}\ H^{\mathrm{odd}}(BG;{\mathbb Z}_{(p)}) \cong
(H^{\mathrm{even}}(BG)\cap J(G)),\]
with $J(G)=\mathrm{Ideal}(y_1^py_2-y_1y_2^p)\subset H^{\mathrm{even}}(BA).$
\end{lemma}
\begin{cor} \label{cor:cor11.5} For $p=3$, there are isomorphisms
\begin{align*}
H^{\mathrm{odd}}(BA;{\mathbb Z})^{Z/8} & \cong
S\{b,a'b,a,(a_1-a_2)\}\{\alpha\} \\
H^{\mathrm{odd}}(BA;{\mathbb Z})^{D_8} & \cong
S\{1,a,a_1,a'\}\{b\alpha\} \\
H^{\mathrm{odd}}(BA;{\mathbb Z})^{SD_{16}} & \cong
S\{1,a'\}\{b\alpha\}.
\end{align*}
\end{cor}
\begin{proof}
We only prove the case $G=A{\co}{\mathbb Z}/8$ since the proof of
the other cases are similar.
Note in $\S 5$ the
element $Q_1(\alpha)$ is written by $b$
and $b^2=a_1a_2$. Recall $S={\mathbb Z}/3[a_1+a_2,a_1a_2]$.
Hence we get
\begin{align*}
H^*(BA)^{{\langle}l{\rangle}}\cap J(G) & \cong
S\{1,a',ab,(a_1-a_2)b\}\cap {\rm Ideal}(b)\ \\
& =S\{b^2,b^2a',ab,(a_1-a_2)b\} \\
& = S\{b,ba',a,(a_1-a_2)\}\{Q_1(\alpha)\}.
\end{align*}
The corollary follows.
\end{proof}
By Lewis, we can write \cite{L2,T-Y}
\[H^{\mathrm{odd}}(BE;{\mathbb Z})\cong {\mathbb Z}/p[y_1,y_2]/
(y_1\alpha_2-y_2\alpha_1,
y_1^p\alpha_2-y_2^p\alpha_1)\{\alpha_1,\alpha_2\},
\] where $|\alpha_i|=3$.
It is also known that $Q_1(\alpha_i)=y_iv$ and
$Q_1{\co}H^{\mathrm{odd}}(BE;{\mathbb Z}_{(p)})\to H^{\mathrm{even}}(BE)\subset
H^{\mathrm{even}}(BE;{\mathbb Z})/p$
is injective \cite{Y1}. Using this we can prove the
following lemma.
\begin{lemma}[Yagita \cite{Y1}]
\label{lem:lem11.6}
Let $G$ have the $p$--Sylow subgroup
$E$. Then
\[Q_1 {\co}\ H^{\mathrm{odd}}(BG) \cong
(H^{\mathrm{even}}(BG)\cap J(G))\]
with $J(G)={\rm Ideal}(y_iv)\subset H^{\mathrm{even}}(BE).$
\end{lemma}
From the above lemma we easily compute the odd
dimensional elements.
Note that
\[D_2=CV\not \in J(E)\text{ but }
D_2^2=C^2V^2=(Y_1^2+Y_2^2-Y_1Y_2)V^2\in J(E).\]
Let us write $\alpha =(Y_1y_1^{p-2}\alpha _1
+Y_2y_2^{p-2}\alpha _2
-Y_1y_2^{p-2}\alpha _2)Vv^{p-2}$ so that $Q_1(\alpha)=D_2^2$.
\begin{cor} \label{cor:cor11.7}
$H^{\mathrm{odd}}(B^2F_4(2)';{\mathbb Z}_{(3)})\cong DA\{\alpha,\alpha '\}$
with $\alpha '=(y_1\alpha _1+y_2\alpha _2)v.$
\end{cor}
\begin{proof}
Recall that $H^*(B^2F_4(2)')\cong DA\{1,(Y_1+Y_2)V\}$
from the remark of \fullref{prop:prop6.3}. The
result is easily obtained
from $Q_1(\alpha)=D_2^2,Q_1(\alpha')=(Y_1+Y_2)V$.
\end{proof}
\begin{cor} \label{cor:cor11.8} There are isomorphisms
\begin{align*}
H^{\mathrm{odd}}(BRV_3;{\mathbb Z}_{(7)}) & \cong DA\{a,a^3,a^5\}\{\alpha'\}\\
H^{\mathrm{odd}}(BRV_2;{\mathbb Z}_{(7)}) & \cong DA\{1,a,\ldots,a^5\}\{\alpha
'\},
\end{align*}
with $\alpha'=(y_1\alpha_1+y_2\alpha_2)v$.
\end{cor}
\begin{proof}
We can easily compute
\[Q_1(\alpha')=Q_1((y_1\alpha_1+y_2\alpha_2)v)
=(y_1Q_1(\alpha_1)+y_2Q_1(\alpha_2))v
=(y_1^2+y_2^2)v^2=a.\]
Recall that $H^*(BRV_3)\cong DA\{1,a^2,a^4\}$. We get
\[H^*(BRV_3)\cap{\rm Ideal}(y_iv)=DA\{D_2^2,a^2,a^4\}
=DA\{a^5,a,a^3\}(Q_1\alpha '),\]
and the corollary follows.
\end{proof}
\begin{cor} \label{cor:cor11.9}
$H^{\mathrm{odd}}(BRV_1;{\mathbb Z}_{(7)})\cong DA\{\wbar b,\wbar b^3,\wbar b^5\}
\{\alpha''\}
\oplus DA\{\alpha\}$
where $\alpha''=y_1v\alpha_2$.
\end{cor}
\begin{proof}
Recall \fullref{cor:cor8.8}.We have
$C\wbar c''=C(Y_1+Y_2-2C)=-Y_1^2-Y_2^2+2Y_1Y_2.$
Hence we can see $Q_1(\alpha)=-D_2\wbar c''V$.
\end{proof}
\begin{cor} \label{cor:cor11.10}
The cohomology $H^{\mathrm{odd}}(B{\mathbb M};{\mathbb Z}_{(13)})$ is isomorphic to
\[ DA\{\alpha,\alpha_8,(u_8v^8)\alpha_8\}
\oplus CA\{\alpha _6,
(u_6v^3)\alpha_6,(u_6v^3)^2\alpha_6,\alpha_{12}\},\]
where
\begin{align*}
\alpha_8 &=y_2(y_2^6+9y_2^3y_1^3+8y_1^6)v^7\alpha_1 \\
\alpha_6 & =(y_2^5\alpha_2-9y_2^2y_1^3 \alpha_2+8y_1^5y\alpha_1)v^2 \\
\alpha_{12} & =C(y_2^{11}\alpha_2
-2y_2^8y_1^3\alpha_2+3y_2^2y_1^9\alpha_2+y_1^{11}\alpha_1)v^{11}
-3\alpha/V.
\end{align*}
\end{cor}
\begin{proof}
It is almost immediate that
\[Q_1(\alpha_8)=u_8v^8,\ \ Q_1(\alpha_6)=u_6v^3,\ \
Q_1(\alpha_{12})=(u_{12}-5u_{12}'-3C)CV.\]
From \fullref{thm:thm10.3}, we get the corollary.
\end{proof}
\end{document}
|
\begin{document}
\title{A new class of high-order methods for multirate differential equations hanks{Submitted to the editors DATE.
unding{This work was supported in part by the U.S. Department of
Energy, Office of Science, Office of Advanced Scientific Computing
Research, Scientific Discovery through Advanced Computing (SciDAC)
program through the FASTMath Institute under Lawrence Livermore
National Laboratory Subcontract B626484.}
\begin{abstract}
This work focuses on the development of a new class of high-order
accurate methods for multirate time integration of systems of ordinary
differential equations. The proposed methods are based on a specific
subset of explicit one-step exponential integrators. More precisely,
starting from an explicit exponential Runge--Kutta method of the
appropriate form, we derive a multirate algorithm to approximate the
action of the matrix exponential through the definition of modified
``fast'' initial-value problems. These fast problems may be solved
using any viable solver, enabling multirate simulations through use of
a subcycled method. Due to this structure, we name these
\emph{Multirate Exponential Runge--Kutta} (MERK) methods.
In addition to showing how MERK methods may be derived, we provide
rigorous convergence analysis, showing that for an overall method of
order $p$, the fast problems corresponding to internal stages may be
solved using a method of order $p-1$, while the final fast problem
corresponding to the time-evolved solution must use a method of order
$p$. Numerical simulations are then provided to demonstrate the
convergence and efficiency of MERK methods with orders three through
five on a series of multirate test problems.
\end{abstract}
\begin{keywords}
multirate time integration,
multiple time stepping,
exponential integrators,
exponential Runge--Kutta methods,
\end{keywords}
\begin{AMS}
65L06, 65M20, 65L20
\end{AMS}
\section{Introduction}
\label{section1}
In this paper, we focus on the construction, analysis, and implementation of
efficient, highly accurate, multirate time stepping algorithms, based
on various classes of explicit one-step exponential integrators. These
algorithms may be applied to initial value problems (IVPs) of the form
\begin{equation} \label{eq1}
u'(t) = F(t,u(t)) = \mathcal{L}u(t) + \mathcal{N}(t,u(t)), \quad u(t_0)=u_0,
\end{equation}
on the interval $t_0< t \leq T$, where the vector field $F(t,u(t))$
can be decomposed into a linear part $\mathcal{L}u(t)$ comprising the ``fast''
time scale, and a nonlinear part $\mathcal{N}(t,u(t))$ comprising the ``slow''
time scale. Such systems frequently result from so-called
``multi-physics'' simulations that couple separate physical processes
together, or from the spatial semi-discretization of time-dependent
partial differential equations (PDEs). Our primary interest in this
paper lies in the case where the fast component is much less costly to
compute than the slow component, thereby opening the door for methods
that evolve each component with different time step sizes -- so-called
multirate (or multiple time-stepping, MTS) methods. This case is
common in practice when using a non-uniform grid for the spatial
semi-discretization of PDEs, or in parallel computations where the
fast component is comprised of spatially-localized processes but the
slow component requires communication across the parallel network.
In recent years, there has been renewed interest in the construction
of multirate time integration methods for systems of ODEs.
Generally, these efforts have focused on techniques to achieve orders
of accuracy greater than two, since second-order methods may be
obtained through simple interpolation between time scales. These
recent approaches broadly fit into two categories: methods that attain
higher-order through extrapolation of low-order methods
\cite{Bouzarth2010, constantinescu2010, Constantinescu2013}, and methods
that directly satisfy order conditions for partitioned and/or additive
Runge--Kutta methods \cite{Fok2016, Gunther2001, Gunther2016,
Knoth2012, Knoth2014, Kvaerno2000, Sandu2018, Sarshar2018,
Schlegel2009, Schlegel2012a, Schlegel2012b, Sexton2018,
Wensch2009}.
Of these, the latter category promises increased efficiency due to the
need to traverse the time interval only once. However, only very
recently have methods of this type been constructed that can achieve
full fourth-order accuracy \cite{Sandu2018, Sexton2018}, and we know of
no previous methods having order five or higher.
Among numerical methods that use the same time step for all components
of \eqref{eq1}, exponential integrators have shown great promise
in recent years \cite{CM02, HL97, HLS98, HO05b, HO05a, HO10, LO13,LO14b, LO14a,LO16,Luan17,Luan18}.
Most such methods require the approximation of products of
matrix functions with vectors, i.e., $\phi(\mathcal{L})\, v$, for
$\mathcal{L}\in \mathbb{R}^{d\times d}$ and $v\in \mathbb{R}^{d}$.
Inspired by recent results on local-time stepping methods for problems
related to \eqref{eq1} \cite{Gander2012, Grote2013b, Grote2010,
Grote2013a}, and motivated by the idea in \cite[Sect.~5.3]{HO11}
that establishes a multirate procedure for exponential \emph{multistep}
methods of Adams-type, here we derive multirate procedures for exponential
\emph{one-step} methods. Starting from an $s$-stage explicit exponential
Runge--Kutta (ExpRK) method applied to \eqref{eq1}, we employ the idea
of backward error analysis to define $s-1$ modified differential
equations whose exact solutions coincide with the ExpRK internal
stages. These modified differential equations may then be
evolved using standard ODE solvers at the fast time scale. We name
the resulting methods as \emph{Multirate Exponential Runge--Kutta}
(MERK) methods.
The ability to construct modified ODEs for each slow ExpRK stage is
dependent on the form of the ExpRK method itself, and we
identify these restrictions within this manuscript.
Using this approach, we derive a general multirate algorithm
(Algorithm \ref{alg2}) that can be interpreted as a
particular implementation (without matrix functions) of explicit
exponential Runge-Kutta methods. With this algorithm in
hand, we perform a rigorous convergence analysis for the proposed
MERK methods. We additionally construct MERK schemes with orders of
accuracy three through five, based on some well-known ExpRK methods
from the literature.
We note that the resulting methods show strong similarities to the
\emph{MIS} methods in \cite{Knoth2012, Knoth2014, Schlegel2009,
Schlegel2012a, Schlegel2012b, Wensch2009} and the follow-on
\emph{RMIS} methods \cite{Sexton2018} and
\emph{MRI-GARK} methods \cite{Sandu2018}, in that the MERK algorithm
requires the construction of a set of modified ``fast'' initial-value
problems that must be solved to proceed between slow stages, and where
these modifications take the form of polynomials based on ``slow''
function data. While these approaches indeed result in similar
algorithmic structure, (R)MIS and MRI-GARK methods are based on
partitioned and generalized-structure additive Runge--Kutta theory
\cite{Sandu2015}, and as such their derivation requires satisfaction
of many more order conditions than MERK methods, particularly as the
desired method order increases, to the end that no MIS method of order
greater than three, and no RMIS or MRI-GARK methods of order greater
than four, have ever been proposed. Additionally, to obtain an overall
order $p$ method, all fast IVPs for (R)MIS and MRI-GARK methods must be
solved to order $p$, whereas the internal stages in MERK methods may
use an order $p-1$ solver. Finally, both (R)MIS and the MRI-GARK methods
require sorted abcissae $c_1\le c_2\le\cdots\le c_s$, a requirement
that is not present for MERK methods.
The outline of this paper is as follows: in Section~\ref{sec2}, we
derive the general class of exponential Runge-Kutta methods in a way
that facilitates construction of MERK procedures. In
Section~\ref{section3}, we then derive the general MERK algorithm for
exponential Runge--Kutta methods, and provide a rigorous convergence
analysis for these schemes. In Section~\ref{section4}, we derive
specific MERK methods based on existing exponential Runge--Kutta
methods. We present a variety of numerical examples in
Section~\ref{sec6} to illustrate the efficiency of the new MERK
schemes with order of accuracy up to five. The main contributions of
this paper are Algorithm~\ref{alg2}, convergence analysis for MERK
methods (Theorem \ref{theorem2}), and the proposed MERK schemes with
order of accuracy up to five.
\section{Motivation}
\label{sec2}
We begin with a general derivation of exponential Runge--Kutta methods \cite{HO05b,HO10}, which motivates a multirate procedure for solving \eqref{eq1}.
\subsection{Exponential Runge--Kutta methods}
\label{sec2.1}
When deriving ExpRK methods, it is crucial to represent the exact
solution of \eqref{eq1} at time $t_{n+1}=t_n +H$ using the
variation-of-constants formula,
\begin{equation} \label{eq3}
u(t_{n+1})=u(t_n +H)={\rm e}\hspace{1pt}^{H \mathcal{L}} u(t_n) + \int_{0}^{H} {\rm e}\hspace{1pt}^{(H-\tau)\mathcal{L}} \mathcal{N}(t_n+\tau, u(t_n+\tau)) \dd\tau.
\end{equation}
The integral in \eqref{eq3} is then approximated using a quadrature
rule having nodes $c_i$ and weights $b_i(H\mathcal{L})$ ($i=1,\ldots,s$), which
yields
\begin{equation} \label{eq3a}
u(t_{n+1})\approx {\rm e}\hspace{1pt}^{H \mathcal{L}} u(t_n) + H\sum_{i=1}^{s} b_i(H\mathcal{L})\, \mathcal{N}(t_n+c_i H, u(t_n+c_i H)).
\end{equation}
By applying \eqref{eq3} (with $c_i H$ in place of $H$), the unknown
intermediate values $u(t_n+c_i H)$ in \eqref{eq3a} can be represented
as
\begin{equation} \label{eq3b}
u(t_n+c_i H)={\rm e}\hspace{1pt}^{c_i H \mathcal{L}}u(t_n) + \int_{0}^{c_i H} {\rm e}\hspace{1pt}^{(c_i H-\tau)\mathcal{L}} \mathcal{N}(t_n+\tau, u(t_n+\tau)) \dd\tau.
\end{equation}
Again, one can use another quadrature rule with the same nodes $c_i$
as before (to avoid the generation of new unknowns) and new weights
$a_{ij}(H\mathcal{L})$ to approximate the integrals in \eqref{eq3b}. This gives
\begin{equation} \label{eq3c}
u(t_n+c_i H)\approx {\rm e}\hspace{1pt}^{c_i H \mathcal{L}}u(t_n) + H\sum_{j=1}^{s} a_{ij}(H\mathcal{L})\,\mathcal{N}(t_n+c_j H, u(t_n+c_j H)).
\end{equation}
Now, denoting the approximations $u_n \approx u(t_n)$ and $U_{n,i}
\approx u(t_n+c_i H)$, then from \eqref{eq3a} and \eqref{eq3c} one may
obtain the so-called \emph{exponential Runge--Kutta methods}
\begin{subequations} \label{eq4}
\begin{align}
U_{n,i}&= {\rm e}\hspace{1pt}^{c_i H \mathcal{L}}u_n +H \sum_{j=1}^{s}a_{ij}(H \mathcal{L})\, \mathcal{N}(t_n +c_j H, U_{n,j}), \quad i=1,\ldots,s, \label{eq4a} \\
u_{n+1} &= {\rm e}\hspace{1pt}^{H \mathcal{L}}u_n + H \sum_{i=1}^{s}b_{i}(H \mathcal{L})\, \mathcal{N}(t_n +c_i H, U_{n,i}). \label{eq4b}
\end{align}
\end{subequations}
The formula \eqref{eq4} is considered {\em explicit} when
$a_{ij}(H \mathcal{L})=0$ for all $i\leq j$ (thus $c_1=0$ and
consequentially $U_{n,1}=u_n$). Throughout this paper we restrict our
attention to explicit exponential Runge--Kutta methods,
which can be reformulated as (see \cite{LO14b,LO12b}):
\begin{subequations} \label{eq:expRK}
\begin{align}
U_{n,i}&= u_n + c_i H \varphi _{1} ( c_i H \mathcal{L})F(t_n, u_n) +
H \sum_{j=2}^{i-1}a_{ij}(H \mathcal{L}) D_{n,j}, \ i=2,\ldots,s, \label{eq:expRKa} \\
u_{n+1}& = u_n + H \varphi _{1} ( H \mathcal{L})F(t_n, u_n) + H \sum_{i=2}^{s}b_{i}(H \mathcal{L}) D_{n,i} \label{eq:expRKb},
\end{align}
\end{subequations}
where
\begin{equation} \label{eq:Dni}
D_{n,i}= \mathcal{N}(t_n+c_i H, U_{n,i})- \mathcal{N}(t_n, u_n ), \quadquad i=2,\ldots,s.
\end{equation}
Here, the coefficients $a_{ij}(H \mathcal{L})$ and $b_{i}(H \mathcal{L}) $ are often
linear combinations of the functions $\varphi _{k} (c_i H \mathcal{L})$ and
$\varphi_{k} (H \mathcal{L})$, respectively, wherein $\varphi_k (z)$ are given by
\begin{equation} \label{eq8}
\varphi_{k}(z)=\int_{0}^{1} {\rm e}\hspace{1pt}^{(1-\theta )z} \frac{\theta^{k-1}}{(k-1)!}\dd\theta , \quaduad k\geq 1,
\end{equation}
and satisfy the recurrence relations
\begin{equation} \label{eq9}
\varphi_{k}(z)=\frac{\varphi_{k-1}(z)-\varphi_{k-1}(0)}{z}, \quad \varphi_{0}(z)={\rm e}\hspace{1pt}^z.
\end{equation}
\subsection{Adopting the idea of backward error analysis}
\label{sec2.2}
Motivated by the idea of \cite[Sect. 5.3]{HO11}, and recalling the
equations \eqref{eq3} and \eqref{eq3b}, we note that $u(t_{n+1})$ and
$u(t_n +c_i H)$ are the exact solutions of the differential equation
\begin{equation} \label{eq6}
v'(\tau)=\mathcal{L} v(\tau) + \mathcal{N}(t_n+\tau,u(t_n+\tau)), \quad v(0)=u(t_n),
\end{equation}
evaluated at $\tau=H$ and $\tau=c_i H$, respectively. In other words,
solving \eqref{eq6} exactly (by means of using the
variation-of-constants formula) on the time intervals $[0, H]$ and
$[0, c_i H]$ shows that $v(H)=u(t_{n+1})$ and $v(c_i H)=u(t_n +c_i H)$.
Unfortunately, explicit representations of these analytical solutions
are generally impossible to find, since $u(t_n)$ and $u(t_n+\tau)$ are
unknown values. This observation, however, suggests the use of
backward error analysis (see, for instance \cite[Chap.~IX]{HLW06}).
Given an exponential Runge-Kutta method \eqref{eq4}, we therefore
search for modified differential equations of the form \eqref{eq6},
such that their exact solutions at $\tau=c_i H$ and $\tau=H$ coincide
with the ExpRK approximations $U_{n,i}$ ($i=2,\ldots,s$) and
$u_{n+1}$, respectively. We may then approximate solutions to these
modified equations to compute our overall approximation of \eqref{eq1}.
\section{Multirate exponential Runge--Kutta methods}
\label{section3}
In this section, we construct a new multirate procedure based on
approximation of ExpRK schemes; we call the resulting algorithms
\emph{Multirate Exponential Runge-Kutta} (MERK) methods. Following
this derivation, we present a rigorous stability and convergence
analysis.
\subsection{Construction of modified differential equations}
\label{section3.1}
We begin with the construction of MERK methods, through definition of
modified differential equations corresponding with the ExpRK stages
$U_{n,i}$ ($i=2,\ldots,s$) and solution $u_{n+1}$.
\begin{theorem}\label{theorem1}
Assuming that the coefficients $a_{ij}(H \mathcal{L})$ and $b_{i}(H
\mathcal{L})$ of an explicit exponential Runge--Kutta method
\eqref{eq:expRK} may be written as linear combinations
\begin{equation} \label{eq7}
a_{ij}(H \mathcal{L})=\sum_{k=1}^{\ell_{ij}}\alpha^{(k)}_{ij}\varphi_{k}(c_i H\mathcal{L}), \quad
b_{i}(H \mathcal{L})=\sum_{k=1}^{m_i}\beta^{(k)}_{i}\varphi_{k}(H \mathcal{L})
\end{equation}
for some positive integers $\ell_{ij}$ and $m_i$, and where the functions
$\varphi _{k} (c_i H \mathcal{L})$ and $\varphi_{k} (H \mathcal{L})$
are given in \eqref{eq8}, then $U_{n,i}$ and $u_{n+1}$ are the
exact solutions of the linear differential equations
\begin{subequations} \label{eq14}
\begin{align}
v'_{n,i}(\tau)&=\mathcal{L}v_{n,i}(\tau) + p_{n,i}(\tau), && v_{n}(0)=u_n, \quadquad i=2,\ldots,s, \label{eq14a} \\
v'_n(\tau)&=\mathcal{L}v_n(\tau) + q_{n}(\tau), && v_n(0)=u_n \hspace{2.3cm} \label{eq14b}
\end{align}
\end{subequations}
at the times $\tau= c_i H$ and $\tau= H$, respectively. Here
$p_{n,i}(\tau)$ and $q_{n}(\tau)$ are polynomials in $\tau$ given by
\begin{subequations} \label{eq13}
\begin{align}
p_{n,i}(\tau)&= \mathcal{N}(t_n, u_n)+\sum_{j=2}^{i-1} \Big(\sum_{k=1}^{\ell_{ij}}\dfrac{\alpha^{(k)}_{ij}}{c^k_i H^{k-1} (k-1)!}\tau^{k-1}\Big) D_{n,j}, \label{eq13a} \\
q_{n}(\tau) &= \mathcal{N}(t_n, u_n)+\sum_{i=2}^{s} \Big(\sum_{k=1}^{m_i}\dfrac{\beta^{(k)}_{i} }{H^{k-1}(k-1)!}\tau^{k-1} \Big) D_{n,i}. \label{eq13b}
\end{align}
\end{subequations}
\end{theorem}
\begin{proof}
By changing the integration variable to $\tau=H\theta$ in \eqref{eq8}, we obtain
\begin{equation} \label{eq10}
\varphi _{k}(z)=\frac{1}{H^k}\int_{0}^{H} {\rm e}\hspace{1pt}^{(H-\tau)\frac{z}{H}} \frac{\tau^{k-1}}{(k-1)!}\dd\tau , \quaduad k\geq 1.
\end{equation}
Substituting $z=c_i H\mathcal{L}$ and $z=H\mathcal{L}$ into \eqref{eq10} and inserting the obtained results for $\varphi _{k} (c_i H \mathcal{L})$ and $\varphi_{k} (H \mathcal{L})$ into \eqref{eq7} shows that
\begin{subequations}\label{eq11}
\begin{align}
a_{ij}(H \mathcal{L})&=\int_{0}^{c_i H} {\rm e}\hspace{1pt}^{(c_i H-\tau)\mathcal{L}} \sum_{k=1}^{\ell_{ij}}\dfrac{\alpha^{(k)}_{ij}}{(c_i H)^{k} (k-1)!}\tau^{k-1}\dd\tau, \label{eq11a}\\
b_{i}(H \mathcal{L})&=\int_{0}^{H} {\rm e}\hspace{1pt}^{(H-\tau)\mathcal{L}} \sum_{k=1}^{m_i}\dfrac{\beta^{(k)}_{i}}{H^{k}(k-1)!}\tau^{k-1}\dd\tau. \label{eq11b}
\end{align}
\end{subequations}
Using the fact that $F(t_n, u_n)=\mathcal{L}u_n +\mathcal{N}(t_n, u_n)$ and
\begin{equation} \label{eq:varphi1}
\varphi _{1} (Z)=\frac{1}{H}\int_{0}^{H} {\rm e}\hspace{1pt}^{(H-\tau)\frac{Z}{H}} \dd\tau=({\rm e}\hspace{1pt}^Z- I) Z^{-1}.
\end{equation}
we can write \eqref{eq:expRK} in an equivalent form,
\begin{subequations} \label{eq:expRKnew}
\begin{align}
U_{n,i}&= {\rm e}\hspace{1pt}^{c_i H \mathcal{L}} u_n + c_i H \varphi _{1} ( c_i H \mathcal{L})\mathcal{N}(t_n, u_n) +
H \sum_{j=2}^{i-1}a_{ij}(H \mathcal{L}) D_{n,j}, \label{eq:expRKnewa} \\
u_{n+1}& = {\rm e}\hspace{1pt}^{H \mathcal{L}}u_n+ H \varphi _{1} ( H \mathcal{L})\mathcal{N}(t_n, u_n) + H \sum_{i=2}^{s}b_{i}(H \mathcal{L}) D_{n,i} \label{eq:expRKnewb},
\end{align}
\end{subequations}
for $i=2,\ldots,s$. We now insert the integral form of
$\varphi_1(Z)$ in \eqref{eq:varphi1} (with $Z= c_i H \mathcal{L}$
and $Z=H \mathcal{L}$) and \eqref{eq11} into \eqref{eq:expRKnew} to
get
\begin{subequations} \label{eq12}
\begin{align}
U_{n,i}&= {\rm e}\hspace{1pt}^{c_i H \mathcal{L}} u_n +\int_{0}^{c_i H} {\rm e}\hspace{1pt}^{(c_i H-\tau)\mathcal{L}} p_{n,i}(\tau) \dd\tau, \quad i=2,\ldots,s, \label{eq12a} \\
u_{n+1} &= {\rm e}\hspace{1pt}^{H \mathcal{L}}u_n + \int_{0}^{H} {\rm e}\hspace{1pt}^{(h-\tau)\mathcal{L}} q_{n}(\tau) \dd\tau \label{eq12b}
\end{align}
\end{subequations}
with $p_{n,i}(\tau)$ and $q_{n}(\tau)$ as shown in \eqref{eq13}.
Clearly, these representations (variation-of-constant formulas)
show the conclusion of Theorem~\ref{theorem1}. In particular,
$U_{n,i}=v_{n,i}(c_i H)$ and $u_{n+1}=v_n(H)$. Thus one can consider
\eqref{eq14} as modified differential equations with identical
solutions as the ExpRK approximations to \eqref{eq6}.
\end{proof}
We note that the idea of using an ODE to represent a linear combination of matrix-vector $\varphi_{k}(A)v_k$ was also used in \cite{Niesen2012}.
\subsection{MERK methods and a multirate algorithm}
Clearly, the polynomials \\
$p_{n,i}(\tau)$ and $q_{n}(\tau)$ in
\eqref{eq13} are not given analytically since $D_{n,i}$ are unknowns;
however, these polynomials can be numerically determined as
follows. For simplicity, we illustrate our procedure by starting with
$n=0$ and $i=s=2$. In this case we know $u_0=u(t_0)$ and
$p_{0,2}(\tau)=\mathcal{N}(t_0, u_0)$, so one can solve the ODE
\eqref{eq14a} on $[0, c_2 H]$ to get an approximation to $U_{0,2}$,
$\widehat{U}_{0,2}\approx U_{0,2}=v_{0,2}(c_2 H)$. Then replacing the
unknown $U_{0,2}$ in \eqref{eq13b} by $\widehat{U}_{0,2}$, we have
\[
\hat{q}_0(\tau)=\mathcal{N}(t_0, u_0)+\sum_{k=1}^{m_i}\dfrac{\beta^{(k)}_{2} }{H^{k-1}(k-1)!}\tau^{k-1}
\widehat{D}_{0,2},
\]
where $\widehat{D}_{0,2}=\mathcal{N}(t_0+c_2 H, \widehat{U}_{0,2})-
\mathcal{N}(t_0, u_0)$. Since $\hat{q}_0(\tau) \approx q_0(\tau)$, we
may then solve the ODE \eqref{eq14b} on $[0, H]$ with $\hat{q}_0(\tau)$ in
place of $q_0(\tau)$ to obtain an approximation $\hat{u}_1 \approx u_1=v_0 (H)$.
This general process may be extended to larger numbers of stages $s\ge
2$ and for subsequent time steps $n\ge 0$. Approximating $\hat{u}_n \approx
u_n$ (with $\hat{u}_0=u_0$), then for $i=2,\ldots,s$, we define the
following perturbed linear ODEs over $\tau \in [0, c_i H]$:
\begin{equation} \label{eq13nc}
y'_{n,i}(\tau)=\mathcal{L}y_{n,i}(\tau) + \hat{p}_{n,i}(\tau), \quad y_{n,i}(0)=\hat{u}_n,
\end{equation}
with
\begin{align}
\label{eq:p_hat}
\hat{p}_{n,i}(\tau) &= \mathcal{N}(t_n, \hat{u}_n)+\sum_{j=2}^{i-1} \Big(\sum_{k=1}^{\ell_{ij}}\dfrac{\alpha^{(k)}_{ij}}{c^k_i H^{k-1} (k-1)!}\tau^{k-1}\Big) \widehat{D}_{n,j},\\
\label{eq:hatDni}
\widehat{D}_{n,i} &= \mathcal{N}(t_n+c_i H, \widehat{U}_{n,i}) - \mathcal{N}(t_n, \widehat{u}_n ),
\end{align}
that provide the approximations
\[
\widehat{U}_{n,i}\approx y_{n,i}(c_i H) \approx v_{n,i}(c_i H)=U_{n,i}.
\]
With these in place, we then solve the linear ODE
\begin{equation} \label{eq13nd}
y'_{n}(\tau)=\mathcal{L}y_{n}(\tau) + \hat{q}_{n}(\tau), \quad y_{n}(0)=\hat{u}_n
\end{equation}
over $\tau\in [0, H]$, with
\begin{equation} \label{eq:q_hat}
\hat{q}_{n}(\tau) = \mathcal{N}(t_n, \hat{u}_n)+\sum_{i=2}^{s} \Big(\sum_{k=1}^{m_i}\dfrac{\beta^{(k)}_{i} }{H^{k-1}(k-1)!}\tau^{k-1} \Big) \widehat{D}_{n,i},
\end{equation}
to obtain the approximate time-step solutions,
\[
\hat{u}_{n+1}\approx y_{n}(H) \approx v_{n}(H)=u_{n+1}.
\]
Since the above procedure uses a ``macro'' time step $H$ to integrate
the slow process, and a ``micro'' time step $h$ to integrate the fast
process (via solving the ODEs \eqref{eq13nc} and \eqref{eq13nd}), we
call the resulting methods \eqref{eq13nc}-\eqref{eq:hatDni}
\emph{Multirate Exponential Runge--Kutta (MERK)} methods. By
construction, these MERK methods offer several interesting features.
They reduce the solution of nonlinear problems \eqref{eq1} to the
solution of a sequence of linear differential equations \eqref{eq13nc} and
\eqref{eq13nd}, using very few evaluations of the nonlinear operator
$\mathcal{N}$. Thus they can be more efficient for problems where the
linear part is much less costly to compute than the nonlinear part.
Additionally, they do not require the computation of matrix functions,
as is the case with ExpRK methods. Moreover, these methods do not
require a starting value procedure as in multirate algorithms for
exponential multistep methods \cite{Demirel2015,HO11}.
We provide the following Algorithm~\ref{alg2} to give a succinct
overview of the implementation of our MERK methods.
\begin{algorithm}[h]
\caption{MERK method}
\label{alg2}
\begin{list}{$\bullet $}{}
\item \textbf{Input:} $\mathcal{L}$; $\mathcal{N}(t,u)$; $t_0$; $u_0$; $s$; $c_i$ ($i=1,\ldots,s$); $H$
\item \textbf{Initialization:} Set $n=0$; $\hat{u}_n=u_0$.\\
While $t_n<T$
\begin{enumerate}
\item Set $\widehat{U}_{n,1}=\hat{u}_n$.
\item For $i=2,\ldots,s$ do
\begin{enumerate}
\item Find $\hat{p}_{n,i}(\tau)$ as in \eqref{eq:p_hat}.
\item Solve \eqref{eq13nc} on $[0, c_i H]$ to obtain $\widehat{U}_{n,i}\approx y_{n,i}(c_i H)$.
\end{enumerate}
\item Find $\hat{q}_{n,s}(\tau)$ as in \eqref{eq:q_hat}
\item Solve \eqref{eq13nd} on $[0, H]$ to get $\hat{u}_{n+1}\approx y_{n}(H).$
\item Update $t_{n+1}:=t_n+H$, $n:=n+1$.
\end{enumerate}
\item \textbf{Output:} Approximate values $\hat{u}_n\approx u_n, n=1,2,\ldots$ (where
$u_n$ is the numerical solution at time $t_n$ obtained by an ExpRK method).
\end{list}
\end{algorithm}
\subsection{Stability and convergence analysis}
\label{sec:analysis}
Since MERK methods are constructed to approximate ExpRK methods, we
perform their error analysis in the framework of analytic semigroups
on a Banach space $X$, under the following assumptions (see
e.g., \cite{HO05a,LO14b}).
{\em Assumption 1. The linear operator $\mathcal{L}$ is the
infinitesimal generator of an analytic semigroup
${\rm e}\hspace{1pt}^{t\mathcal{L}}$ on $X$}. This implies that
\begin{equation} \label{eq:bound1}
\|{\rm e}\hspace{1pt}^{t\mathcal{L}}\|_{X\leftarrow X}\leq C, \quaduad t\geq 0
\end{equation}
and consequently $\varphi_k(H \mathcal{L})$, $a_{ij}(H \mathcal{L})$
and $b_{i}(H \mathcal{L})$ are bounded operators. \\
{\em Assumption 2 (for high-order methods). The solution $u:[t_0,
T]\to X$ of \eqref{eq1} is sufficiently smooth with derivatives in
$X$, and $\mathcal{N}:[t_0, T]\times X \to X$ is sufficiently
Fr\'echet differentiable in a strip along the exact solution.} All
derivatives occurring in the remainder of this section are therefore
assumed to be uniformly bounded.\\
We analyze the error in MERK methods starting with the local error of
ExpRK methods. Therefore, we first consider \eqref{eq4} (in its
explicit form) with exact initial value, $u_n=u(t_n)$:
\begin{subequations} \label{eq3.17}
\begin{align}
\breve{U}_{n,i}&= {\rm e}\hspace{1pt}^{c_i H \mathcal{L}} u(t_n) +H \sum_{j=1}^{i-1}a_{ij}(H \mathcal{L})\, \mathcal{N}(t_n+c_j H, \breve{U}_{n,j}), \quad i=2,\ldots,s, \label{eq3.17a} \\
\breve{u}_{n+1} &= {\rm e}\hspace{1pt}^{H \mathcal{L}}u(t_n) + H \sum_{i=1}^{s}b_{i}(H \mathcal{L})\, \mathcal{N}(t_n+c_i H, \breve{U}_{n,i}) \label{eq3.17b}
\end{align}
\end{subequations}
and thus the MERK methods \eqref{eq14}--\eqref{eq13} are considered with polynomials
\begin{subequations} \label{eq3.18}
\begin{align}
\breve{p}_{n,i}(\tau)&= \mathcal{N}(t_n, u(t_n))+\sum_{j=2}^{i-1} \Big(\sum_{k=1}^{\ell_{ij}}\dfrac{\alpha^{(k)}_{ij}}{c^k_i H^{k-1} (k-1)!}\tau^{k-1}\Big) \breve{D}_{n,j}, \label{eq3.18a} \\
\breve{q}_{n}(\tau) &= \mathcal{N}(t_n, u(t_n))+\sum_{i=2}^{s} \Big(\sum_{k=1}^{m_i}\dfrac{\beta^{(k)}_{i} }{H^{k-1}(k-1)!}\tau^{k-1} \Big) \breve{D}_{n,i}, \label{eq3.18b}
\end{align}
\end{subequations}
where $\breve{D}_{n,i}= \mathcal{N}(t_n+c_i H, \breve{U}_{n,i})- \mathcal{N}(t_n, u(t_n) )$.\\
\noindent \textbf{Error notation.}
Since MERK methods consist of approximations to approximations, we
must clearly isolate the errors induced at each approximation level.
To this end, we let $\hat{e}_{n+1} = \hat{u}_{n+1} - u(t_{n+1})$ denote
the global error at time $t_{n+1}$ of a MERK method
\eqref{eq13nc}-\eqref{eq:hatDni}. Let $\breve{e}_{n+1} =
\breve{u}_{n+1} - u(t_{n+1})$ denote the local error at $t_{n+1}$ of
the base ExpRK method. Let
$\hat{\varepsilon}_{n,i}=\widehat{U}_{n,i}- y_{n,i}(c_i H)$ and
$\hat{\varepsilon}_{n+1} =\hat{u}_{n+1} -y_n (H)$ denote the (global)
errors of the ODE solvers when integrating \eqref{eq13nc} on $[0, c_i
H]$ and \eqref{eq13nd} on $[0, H]$ (note that
$\hat{\varepsilon}_{n,1}=\hat{u}_n - y_{n,i}(0)=0$ since $c_1=0$).
First, we may write total error as the sum of the errors in each approximation,
\begin{equation} \label{eq3.19}
\hat{e}_{n+1} =
\hat{\varepsilon}_{n+1}+ (y_n (H) -\breve{u}_{n+1})+ \breve{e}_{n+1}.
\end{equation}
Applying the variation-of-constants formula to
\eqref{eq:q_hat} and using \eqref{eq11b}, we then write
\begin{equation} \label{eq3.20}
y_n(H)={\rm e}\hspace{1pt}^{H \mathcal{L}}\hat{u}_n + H \sum_{i=1}^{s}b_{i}(H \mathcal{L})\, \mathcal{N}(t_n+c_i H, \widehat{U}_{n,i}).
\end{equation}
Inserting $y_n (H)$ and $\breve{u}_{n+1}$ from \eqref{eq3.20} and
\eqref{eq3.17b} into \eqref{eq3.19} gives
\begin{equation} \label{eq3.21}
\hat{e}_{n+1} = {\rm e}\hspace{1pt}^{H \mathcal{L}}\hat{e}_n +\hat{\varepsilon}_{n+1}+ H \mathcal{S}_{n,s} + \breve{e}_{n+1},
\end{equation}
where
\begin{equation} \label{eq3.22}
\mathcal{S}_{n,s} =\sum_{i=1}^{s}b_{i}(H \mathcal{L})\big(\mathcal{N}(t_n+c_i H, \widehat{U}_{n,i}) -\mathcal{N}(t_n+c_i H, \breve{U}_{n,i}) \big).
\end{equation}
Next, we prove some preliminary results.
\begin{lemma} \label{lemma3.2}
Denoting $\widehat{E}_{n,i}=\widehat{U}_{n,i}-\breve{U}_{n,i}$
and $\breve{N}_{n,i}=\frac{\partial \mathcal{N}}{\partial u}(t_n
+ c_i H, \breve{U}_{n,i})$, we have
\begin{equation} \label{eq3.23}
\widehat{E}_{n,i}=\hat{\varepsilon}_{n,i}+ {\rm e}\hspace{1pt}^{c_i H \mathcal{L}} \hat{e}_n + H \sum_{j=1}^{i-1}a_{ij}(H\mathcal{L}) (\breve{N}_{n,j} \widehat{E}_{n,j} + \widehat{R}_{n,j})
\end{equation}
with
\begin{equation} \label{eq3.23a}
\widehat{R}_{n,j} =\int_{0}^{1} (1-\theta ) \frac{\partial^2 \mathcal{N}}{\partial u^2}(t_n + c_j H, \breve{U}_{n,j} + \theta \widehat{E}_{n,j})(\widehat{E}_{n,j}, \widehat{E}_{n,j})\dd\theta.
\end{equation}
Furthermore, under Assumption~2, the bound
\begin{equation} \label{eq3.24}
\|\widehat{R}_{n,j}\|\leqslant C \|\widehat{E}_{n,j}\|^2, \ \text{i.e.}, \ \widehat{R}_{n,j}=\mathcal{O}(\|\widehat{E}_{n,j}\|^2)
\end{equation}
is held as long as $\widehat{E}_{n,j}$ remains in a sufficiently small
neighborhood of $0$.
\end{lemma}
\begin{proof}
We first rewrite
\begin{equation} \label{eq3.25}
\widehat{E}_{n,i}=\widehat{U}_{n,i}- y_{n,i}(c_i H) + (y_{n,i}(c_i H) -\breve{U}_{n,i})=\hat{\varepsilon}_{n,i}+(y_{n,i}(c_i H) -\breve{U}_{n,i}).
\end{equation}
Here $y_{n,i}(c_i H)$ is the exact solution of \eqref{eq13nc}, which
can be represented by the variation-of-constants formula and then
rewritten by using \eqref{eq11a} and \eqref{eq:p_hat} as:
\begin{equation} \label{eq3.26}
y_{n,i}(c_i H)={\rm e}\hspace{1pt}^{c_i H \mathcal{L}} \hat{u}_n + H \sum_{j=1}^{i-1}a_{ij}(H \mathcal{L})\mathcal{N}(t_n+c_j H, \widehat{U}_{n,j}).
\end{equation}
Subtracting \eqref{eq3.17a} from \eqref{eq3.26} and inserting the
obtained result into \eqref{eq3.25} gives
\begin{equation} \label{eq3.27}
\widehat{E}_{n,i}=\hat{\varepsilon}_{n,i}+ {\rm e}\hspace{1pt}^{c_i H \mathcal{L}} \hat{e}_n + H \sum_{j=1}^{i-1}a_{ij}(H \mathcal{L}) \big(\mathcal{N}(t_n+c_j H, \widehat{U}_{n,j}) -\mathcal{N}(t_n+c_j H, \breve{U}_{n,j}) \big).
\end{equation}
Using the Taylor series expansion of $\mathcal{N}(t, u)$ at $(t_n +c_j H, \breve{U}_{n,j})$, we get
\begin{equation} \label{eq3.28}
\mathcal{N}(t_n+c_j H, \widehat{U}_{n,j}) -\mathcal{N}(t_n+c_j H, \breve{U}_{n,j})=\breve{N}_{n,j} \widehat{E}_{n,j} + \widehat{R}_{n,j}
\end{equation}
with the remainder $\widehat{R}_{n,j}$ given in \eqref{eq3.23a},
which clearly satisfies \eqref{eq3.24} due to Assumption~2.
Inserting \eqref{eq3.28} into \eqref{eq3.27} shows \eqref{eq3.23}.
\end{proof}
\begin{lemma} \label{lemma3.3}
Under Assumptions 1 and 2, there exist bounded operators
$\mathcal{T}_{n,i}(\hat{\varepsilon}_{n,i})$ and $\mathcal{B}_{n}
(\hat{e}_n)$ on $X$ such that
\begin{equation} \label{eq:Sni}
\mathcal{S}_{n,s} =\sum_{i=2}^{s}\big(b_{i}(H \mathcal{L})\breve{N}_{n,i}+ H\mathcal{T}_{n,i}(\hat{\varepsilon}_{n,i}) \big)\hat{\varepsilon}_{n,i}+ \mathcal{B}_{n} (\hat{e}_n)\hat{e}_n.
\end{equation}
Note that $\mathcal{T}_{n,i}$ also depends on $H$,
$\hat{\varepsilon}_{n,j}$, $a_{ij}(H \mathcal{L}) $,
$\breve{N}_{n,j}$ ($j=2,\ldots,i-1$), and $\hat{e}_{n}$; and
$\mathcal{B}_{n}$ also depends on $H$, $b_{i}(H \mathcal{L}), a_{ij}(H
\mathcal{L}) $, $c_i$, and $\breve{N}_{n,i}$.
\end{lemma}
\begin{proof}
Inserting \eqref{eq3.28} (with $i$ in place of $j$) into \eqref{eq3.22} gives
\begin{equation} \label{eq3.29}
\mathcal{S}_{n,s} =\sum_{i=1}^{s}b_{i}(H \mathcal{L})\big(\breve{N}_{n,i} \widehat{E}_{n,i} + \widehat{R}_{n,i}\big).
\end{equation}
Using the recursion \eqref{eq3.23} from Lemma~\ref{lemma3.2}, we
further expand $\widehat{E}_{n,i}$ as
\begin{equation} \label{eq3.30}
\begin{aligned}
\widehat{E}_{n,i}=\hat{\varepsilon}_{n,i} &+ H \sum_{j=1}^{i-1}a_{ij}(H\mathcal{L}) \breve{N}_{n,j} \hat{\varepsilon}_{n,j} + H^2 \sum_{j=1}^{i-1}a_{ij}(H\mathcal{L}) \breve{N}_{n,j} \sum_{k=1}^{j-1}a_{jk}(H\mathcal{L}) \breve{N}_{n,k} \widehat{E}_{n,k} \\
&+H\sum_{j=1}^{i-1}a_{ij}(H \mathcal{L}) \widehat{R}_{n,j} + H^2 \sum_{j=1}^{i-1}a_{ij}(H\mathcal{L}) \breve{N}_{n,j} \sum_{k=1}^{j-1}a_{jk}(H\mathcal{L}) \widehat{R}_{n,k} \\
\\
&+ \Big( {\rm e}\hspace{1pt}^{c_i H \mathcal{L}} + H \sum_{j=1}^{i-1}a_{ij}(H\mathcal{L}) \breve{N}_{n,j} {\rm e}\hspace{1pt}^{c_j H \mathcal{L}} \Big)\hat{e}_{n}.
\end{aligned}
\end{equation}
Using \eqref{eq3.23} and \eqref{eq3.24}
($\widehat{R}_{n,i}=\mathcal{O}(\|\widehat{E}_{n,i}\|^2$) and
proceeding by induction, one can complete the recursion \eqref{eq3.30}
for $\widehat{E}_{n,i}$. Inserting this recursion into \eqref{eq3.29}
(and noting that $\hat{\varepsilon}_{n,1}=0$) yields
\eqref{eq:Sni}. Based on the structure of \eqref{eq3.30} and
\eqref{eq3.29}, under the given assumptions it is clear that the
boundedness of $\mathcal{T}_{n,i}(\hat{\varepsilon}_{n,i})$ and
$\mathcal{B}_{n} (\hat{e}_n)$ follow from the boundedness of
$a_{ij}(H \mathcal{L}), b_{i}(H \mathcal{L})$, $\breve{N}_{n,i}$, and
$\widehat{R}_{n,i}$.
\end{proof}
We now present the main convergence result for MERK methods.
\begin{theorem}\label{theorem2}
Let the initial value problem \eqref{eq1} satisfy Assumptions
1--2. Consider for its numerical solution a MERK method
\eqref{eq13nc}--\eqref{eq:hatDni} that is constructed from an ExpRK
method of global order $p$.
We further assume that the ``fast'' ODEs \eqref{eq13nc} and \eqref{eq13nd}
associated with the MERK method are integrated with micro time step
$h=H/m$ by using ODE solvers that have global order of convergence $q$ and
$r$, respectively, and where $m$ is the number of fast steps per slow
step. Then, the MERK method is convergent, and has error bound
\begin{equation}\label{eq3.31}
\| u_n -u(t_n) \| \leq C_1 H^p + C_2 Hh^q + C_3h^r
\end{equation}
on compact time intervals \ $t_0 \leq t_n =t_0+nH \leq T$. Here,
the constant $C_1$ depends on $T-t_0$, but is independent of $n$ and
$H$; and the constants $C_2$ and $C_3$ also depend on the error
constants of the choosen ODE solvers.
\end{theorem}
\begin{proof}
We first note that since we only employ the fast ODE solvers on
time intervals $[0,c_iH]$ and $[0,H]$, then our assumption regarding
their accuracies of order $q$ and $r$ is typically
equivalent to \cite[Thm.~3.6]{hairer93}
\begin{subequations} \label{fastError}
\begin{align}
\label{fastError.a}
\hat{\varepsilon}_{n,i}
&= \frac{\tilde{c}_2}{\Lambda_i} h^q \left({\rm e}\hspace{1pt}^{\Lambda_i c_i H}-1\right)
= \tilde{c}_2 c_i \varphi_1 (\Lambda_i c_i H) H h^q
= c_2 H h^q,\\
\label{fastError.b}
\hat{\varepsilon}_n
&= \frac{\tilde{c}_3}{\Lambda} h^r \left({\rm e}\hspace{1pt}^{\Lambda H}-1\right)
= \tilde{c}_3 \varphi_1 (\Lambda H) H h^r
= c_3 H h^r,
\end{align}
\end{subequations}
(due to \eqref{eq:varphi1}) where
$\Lambda_i,\Lambda$ are the Lipschitz constants for the increment
functions of the ODE solvers applied to the problems \eqref{eq13nc}
and \eqref{eq13nd}, respectively.
For simplicity of notation, we denote $B_{n,i}=b_{i}(H \mathcal{L})\breve{N}_{n,i}+
H\mathcal{T}_{n,i}(\hat{\varepsilon}_{n,i})$. \\
Clearly, $B_{n,i}$ is a bounded operator and thus \eqref{eq:Sni} becomes
\begin{equation} \label{eq3.32}
\mathcal{S}_{n,s} =\sum_{i=2}^{s} B_{n,i}\hat{\varepsilon}_{n,i}+ \mathcal{B}_{n} (\hat{e}_n)\hat{e}_n.
\end{equation}
Inserting this into \eqref{eq3.21} gives
\begin{equation} \label{eq3.33}
\hat{e}_{n+1} = {\rm e}\hspace{1pt}^{H \mathcal{L}}\hat{e}_n + H\mathcal{B}_{n} (\hat{e}_n)\hat{e}_n +\breve{e}_{n+1}+ H\left(\sum_{i=2}^{s} B_{n,i}\hat{\varepsilon}_{n,i}+ \hat{\varepsilon}_{n+1}\right).
\end{equation}
Solving recursion \eqref{eq3.33} and using $\hat{e}_0=0$ (since
$\hat{u}_0 = u_0= u(t_0)$) finally yields
\begin{equation} \label{eq3.34}
\hat{e}_{n}=H\sum_{j=0}^{n-1} {\rm e}\hspace{1pt}^{(n-1-j)H \mathcal{L}} \mathcal{B}_j (\hat{e}_j)\hat{e}_j + \sum_{j=0}^{n-1} {\rm e}\hspace{1pt}^{jH \mathcal{L}}\Big(\breve{e}_{n-j} + H\sum_{i=2}^{s} B_{n-1-j,i}\hat{\varepsilon}_{n-1-j,i}+\hat{\varepsilon}_{n-j}\Big)
\end{equation}
Since the ExpRK method has global order $p$, we have the local
error $\breve{e}_{n-j}=\mathcal{O}(H^{p+1})$, and from
\eqref{fastError} we have
$\hat{\varepsilon}_{n-1-j,i}=\mathcal{O}(Hh^q)$,
$\hat{\varepsilon}_{n-j}=\mathcal{O}(Hh^r)$.
Using \eqref{eq:bound1} we derive from \eqref{eq3.34} that
\begin{equation} \label{eq3.35}
\|\hat{e}_{n}\| \leq H\sum_{j=0}^{n-1}C \|\hat{e}_j \| +
\sum_{j=0}^{n-1} C\big(c_1 H^{p+1} +c_2 H^2 h^{q} + c_3 H h^r \big).
\end{equation}
An application of a discrete Gronwall lemma to \eqref{eq3.35} results
in the bound \eqref{eq3.31}.
\end{proof}
\begin{remark}
Since $h=H/m$, Theorem~\ref{theorem2} implies that for a MERK method
\eqref{eq13nc}--\eqref{eq:hatDni} to converge with order $p$, the inner
ODE solvers for \eqref{eq13nc} and \eqref{eq13nd} must have orders $q
\ge p-1$ and $r\ge p$, respectively.
\end{remark}
\section{Derivation of MERK methods}
\label{section4}
Based on the theory presented in Section~\ref{section3}, we now derive
MERK schemes up to order 5, relying heavily on ExpRK schemes that fit
the assumption of Theorem~\ref{theorem1}. As we are interested in
problems with significant time scale separation $H\gg h$, we primarily
focus on stiffly-accurate ExpRK schemes. Since MERK methods involve
linear ODEs \eqref{eq13nc} and \eqref{eq13nd} with a fixed coefficient
matrix $\mathcal{L}$ for the fast portion, they are characterized by
the polynomials defined in \eqref{eq:p_hat} and
\eqref{eq:q_hat}. Therefore, when deriving MERK schemes we display
only their corresponding polynomials $\hat{p}_{n,i}(\tau)$ and
$\hat{q}_n(\tau)$.
\subsection{Second-order methods}
When searching for stiffly-accurate second-order ExpRK methods, we find the following scheme that uses $s=2$
stages (see \cite[Sect.~5.1]{HO05b}) and satisfies
Theorem~\ref{theorem1}:
\begin{equation} \label{eq:expRK2}
\begin{aligned}
U_{n,2}&= u_n + c_2 H \varphi _{1} ( c_2 H\mathcal{L})F(t_n, u_n) \\
u_{n+1}& = u_n + H \varphi _{1} ( H\mathcal{L})F(t_n, u_n) + h \tfrac{1}{c_2} \varphi _{2} ( H\mathcal{L})D_{n,2}.
\end{aligned}
\end{equation}
From this, using the conclusion of Theorem~\ref{theorem1}, we derive
the corresponding family of second-order MERK methods, which we call \texttt{MERK2}:
\begin{equation} \label{eq:MERK2}
\begin{aligned}
\hat{p}_{n,2}(\tau)&= \mathcal{N}(t_n, \hat{u}_n), \hspace{2cm} \tau \in [0, c_2 H] \\
\hat{q}_{n}(\tau) &= \mathcal{N}(t_n, \hat{u}_n)+\tfrac{\tau}{c_2 H} \widehat{D}_{n,2}, \quad \ \tau \in [0, H].
\end{aligned}
\end{equation}
Since we do not use this scheme in our numerical experiments, we do
not specify a value for $c_2$. We note that for these methods, the
fast time scale must be evolved a duration of $(1+c_2)H$ for each slow
time step.
\subsection{Third-order methods}
Also from \cite[Sect.~5.2]{HO05b} we consider the following family of
third-order, three-stage, ExpRK methods that satisfy Theorem~\ref{theorem1}:
\begin{equation} \label{eq:expRK3}
\begin{aligned}
U_{n,2}&= u_n + c_2 H \varphi _{1} ( c_2 H\mathcal{L})F(t_n, u_n) \\
U_{n,3}&= u_n + \tfrac{2}{3}H \varphi _{1} ( \tfrac{2}{3} H\mathcal{L})F(t_n, u_n)+\tfrac{4}{9 c_2} \varphi_{2} ( \tfrac{2}{3} H\mathcal{L})D_{n,2}, \\
u_{n+1}& = u_n + H \varphi _{1} ( H\mathcal{L})F(t_n, u_n) + h \tfrac{3}{2} \varphi _{2} ( H\mathcal{L})D_{n,3}.
\end{aligned}
\end{equation}
From these, we construct the following third-order \texttt{MERK3} scheme:
\begin{equation} \label{eq:MERK3}
\begin{aligned}
\hat{p}_{n2}(\tau)&= \mathcal{N}(t_n, \hat{u}_n), \hspace{2cm} \tau \in [0, c_2 H] \\
\hat{p}_{n3}(\tau)&= \mathcal{N}(t_n, \hat{u}_n)+ \tfrac{\tau}{c_2 H}\widehat{D}_{n,2}, \quad \tau \in [0, \tfrac{2}{3} H] \\
\hat{q}_{n}(\tau) &= \mathcal{N}(t_n, \hat{u}_n) + \tfrac{3\tau}{2 H} \widehat{D}_{n,3}, \quad \ \tau \in [0, H].
\end{aligned}
\end{equation}
In our numerical experiments with this scheme, we choose
$c_2=\tfrac{1}{2}$. Hence, the fast time scale must be evolved a
duration of $\frac{13}{6}H$ for each slow time step.
\subsection{Fourth-order methods}
To the best of our knowledge, the only 5 stage, stiffly-accurate
ExpRK method of order four was given in \cite[Sect. 5.3]{HO05b}.
However, this scheme does not satisfy Theorem~\ref{theorem1} due to
the coefficient
\[
a_{52}(H\mathcal{L})=\tfrac{1}{2}\varphi_{2}(c_5 H\mathcal{L})-\varphi_{3}(c_4 H\mathcal{L})+ \tfrac{1}{4}\varphi
_{2}(c_4 H\mathcal{L})-\tfrac{1}{2}\varphi_{3}(c_5 H\mathcal{L}),
\]
which is not a linear combination of $\{\varphi_{k}(c_5
H\mathcal{L})\}_{k=1}^5$. Therefore, we cannot use it to derive a
fourth-order MERK scheme. However, in a very recent submitted paper
\cite{Luan19}, we have derived a family of fourth-order, 6-stage,
stiffly-accurate ExpRK methods (named \texttt{expRK4s6}), that
additionally fulfill Theorem~\ref{theorem1}:
\begin{equation}\label{eq:expRK4}
\begin{aligned}
U_{n,2} = u_n &+\varphi_1 (c_2 H\mathcal{L}) c_2 HF(t_n, u_n), \\
U_{n,k} = u_n &+ \varphi_1 (c_k H\mathcal{L}) c_k HF(t_n, u_n)+ \varphi_2 (c_k H\mathcal{L}) \tfrac{c^2_k}{c_2} H D_{n,2}, \quad \hspace{1cm} k=3, 4 \\
U_{n,j} = u_n &+ \varphi_1 (c_j H\mathcal{L})c_j h F(t_n, u_n)+ \varphi_{2} (c_j H\mathcal{L}) \tfrac{c^2_j}{c_3-c_4} H \big(\tfrac{-c_4}{c_3}D_{n,3} +\tfrac{c_3}{c_4}D_{n,4}\big)\\
&+ \varphi_{3} (c_j H\mathcal{L}) \tfrac{2c^3_j}{c_3-c_4} H \big(\tfrac{1}{c_3}D_{n,3} -\tfrac{1}{c_4}D_{n,4}\big), \quad \quad \hspace{2.2cm} j=5,6 \\
u_{n+1} = u_n &+ \varphi_1 (H\mathcal{L}) h F(t_n, u_n)+ \varphi_{2} (H\mathcal{L}) \tfrac{1}{c_5-c_6} H \big(\tfrac{-c_6}{c_5}D_{n,5} +\tfrac{c_5}{c_6}D_{n,6}\big) \\
&+\varphi_{3} (H\mathcal{L}) \tfrac{2}{c_5-c_6} H \big(\tfrac{1}{c_5}D_{n,5} -\tfrac{1}{c_6}D_{n,6}\big).
\end{aligned}
\end{equation}
Since the pairs of internal stages $\{U_{n,3}, U_{n,4}\}$ and $\{U_{n,5}, U_{n,6}\}$
are independent of one other (they can be computed simultaneously)
and have the same format, this scheme behaves like a 4-stage method.
Hence, instead of using 6 polynomials we need only 4 to derive the
following family of fourth-order MERK schemes, which we call \texttt{MERK4}:
\begin{equation} \label{eq:MERK4}
\begin{aligned}
\hat{p}_{n,2}(\tau) &= \mathcal{N}(t_n, \hat{u}_n), \quad \hspace{6.05cm} \tau \in [0, c_2 H] \\
\hat{p}_{n,3}(\tau) &= \hat{p}_{n,4}(\tau) = \mathcal{N}(t_n, \hat{u}_n)+ \tfrac{\tau}{c_2 H}\widehat{D}_{n,2}, \quad \hspace{2.8cm} \tau \in [0, c_3 H] \\
\hat{p}_{n,5}(\tau) &= \hat{p}_{n,6}(\tau) = \mathcal{N}(t_n, \hat{u}_n)+ \tfrac{\tau}{H}\big(\tfrac{-c_4}{c_3(c_3 - c_4)}\widehat{D}_{n,3} + \tfrac{c_3}{c_4(c_3 - c_4)}\widehat{D}_{n,4} \big)\\
& + \tfrac{\tau^2}{H^2}\big(\tfrac{1}{c_3(c_3-c_4)}\widehat{D}_{n,3} - \tfrac{1}{c_4(c_3 - c_4)}\widehat{D}_{n,4} \big), \quad \hspace{2.4cm} \tau \in [0, c_5 H] \\
\hat{q}_{n}(\tau) & = \mathcal{N}(t_n, \hat{u}_n) + \tfrac{\tau}{H}\big(\tfrac{-c_6}{c_5(c_5 - c_6)}\widehat{D}_{n,5} + \tfrac{c_5}{c_6(c_5 - c_6)}\widehat{D}_{n,6} \big) \\
&+ \tfrac{\tau^2}{H^2}\big(\tfrac{1}{c_5(c_5-c_6)}\widehat{D}_{n,5} - \tfrac{1}{c_6(c_5 - c_6)}\widehat{D}_{n,6} \big), \quad \hspace{2.4cm} \tau \in [0, H].
\end{aligned}
\end{equation}
For our numerical experiments, we choose the coefficients $c_2=c_3=\tfrac{1}{2}$,
$c_4=c_6=\tfrac{1}{3}$, and $c_5=\tfrac{5}{6}$. With this choice,
we may then solve the linear ODE \eqref{eq13nc} using the polynomial
$\hat{p}_{n,3}(\tau)$ on $[0, c_3 H]$ to get both $\widehat{U}_{n,3}
\approx U_{n,3}=v_{n,3}(c_3 H)$ and $\widehat{U}_{n,4} \approx
U_{n,4}$ (since $c_4<c_3$) without solving an additional fast
differential equation on $[0, c_4 H]$.
Similarly, we may solve the linear ODE \eqref{eq13nc} with the
polynomial $\hat{p}_{n,5}(\tau)$ on $[0, c_5 H]$ to obtain both
$\widehat{U}_{n,5} \approx U_{n,5}$ and $\widehat{U}_{n,6} \approx
U_{n,6}$. As a result, the fast time scale must only be evolved for a
total duration of $\frac{17}{6}H$ for each slow time step.
\subsection{Fifth-order methods}
Simiar to fourth-order ExpRK methods, there are no
stiffly-accurate fifth-order methods available in the literature
that fulfill Theorem~\ref{theorem1}. In particular, the only existing
fifth-order scheme (\texttt{expRK5s8}, that requires 8 stages) was
constructed in \cite{LO14b}. However, its coefficients
$a_{75}(H\mathcal{L})$, $a_{76}(H\mathcal{L})$, $a_{85}(H\mathcal{L})$,
$a_{86}(H\mathcal{L})$ and $a_{87}(H\mathcal{L})$ involve several
different linear combinations of $\varphi_k (c_i H\mathcal{L})$ with
different scalings $c_6, c_7, c_8$, and may not be used to create a
MERK method. Again, in \cite{Luan19}, we have constructed a new
family of efficient, fifth-order, 10-stage, stiffly-accurate ExpRK
methods (called \texttt{expRK5s10}) that fulfills Theorem~\ref{theorem1}:
\begin{subequations}\label{eq:expRK5}
\begin{equation}\label{eq:expRK5a}
\begin{aligned}
U_{n,2} = u_n &+ \varphi_1 (c_2 H\mathcal{L}) c_2 HF(t_n,u_n), \\
U_{n,k} = u_n &+ \varphi_1 (c_k H\mathcal{L}) c_k HF(t_n, u_n) + \varphi_2 (c_k H\mathcal{L}) \tfrac{c^2_k}{c_2} H D_{n,2}, \hspace{0.9cm} k=3,4 \\
U_{n,j} = u_n &+ \varphi_1 (c_j H\mathcal{L})c_j HF(t_n, u_n)+ \varphi_{2} (c_j H\mathcal{L}) c^2_j H \big(\alpha_3 D_{n,3} +\alpha_4 D_{n,4}\big)\\
&+ \varphi_{3} (c_j H\mathcal{L}) c^3_j H \big(\beta_3 D_{n,3} -\beta_4 D_{n,4}\big), \quad \hspace{2.5cm} j=5,6,7 \\
\end{aligned}
\end{equation}
\begin{equation}\label{eq:expRK5b}
\begin{aligned}
U_{n,m} = u_n &+ \varphi_1 (c_m H\mathcal{L})c_m H F(t_n, u_n)\\
&+ \varphi_{2} (c_m H\mathcal{L}) c^2_m H \big(\alpha_5 D_{n,5} +\alpha_6 D_{n,6}+\alpha_7 D_{n,7} \big)\\
&+ \varphi_{3} (c_m H\mathcal{L}) c^3_m H \big(\beta_5 D_{n,5} -\beta_6 D_{n,6}-\beta_7 D_{n,7}\big) \\
&+ \varphi_{4} (c_m H\mathcal{L}) c^4_m H \big(\gamma_5 D_{n,5} +\gamma_6 D_{n,6}+\gamma_7 D_{n,7}\big), \hspace{1.1cm} m=8,9,10 \\
u_{n+1} = u_n &+ \varphi_1 (H\mathcal{L}) H F(t_n, u_n)+ \varphi_{2} (H\mathcal{L}) H \big(\alpha_8 D_{n,8} + \alpha_9 D_{n,9} +\alpha_{10} D_{n,10} \big) \\
&-\varphi_{3} (H\mathcal{L}) H \big(\beta_8 D_{n,8} + \beta_9 D_{n,9} +\beta_{10} D_{n,10} \big)\\
&+\varphi_{4} (H\mathcal{L}) H \big(\gamma_8 D_{n,8} + \gamma_9 D_{n,9} +\gamma_{10} D_{n,10} \big)
\end{aligned}
\end{equation}
with coefficients given by
\begin{equation}\label{eq:coefficients}
\begin{aligned}
\alpha_3 &= \tfrac{c_4}{c_3 (c_4-c_3)}, \ \alpha_4=\tfrac{c_3}{c_4 (c_3-c_4)},\\
\alpha_5 &= \tfrac{c_6 c_7}{c_5 (c_5-c_6)(c_5 - c_7)},\ \alpha_6=\tfrac{c_5 c_7}{c_6 (c_6-c_5)(c_6 - c_7)}, \ \alpha_7=\tfrac{c_5 c_6}{c_7 (c_7-c_5)(c_7 - c_6)},\\
\alpha_8 &= \tfrac{c_9 c_{10}}{c_8 (c_8-c_9)(c_8 - c_{10})},\ \alpha_9=\tfrac{c_8 c_{10}}{c_9 (c_9-c_8)(c_9 - c_{10})},\ \alpha_{10}=\tfrac{c_8 c_{9}}{c_{10} (c_{10}-c_8)(c_{10} - c_{9})} \\
\beta_3 &= \tfrac{2}{c_3 (c_3-c_4)}, \beta_4=\tfrac{2}{c_4 (c_3-c_4)}, \\
\beta_5 &= \tfrac{2(c_6+ c_7)}{c_5 (c_5-c_6)(c_5 - c_7)},\ \beta_6=\tfrac{2(c_5 +c_7)}{c_6 (c_6-c_5)(c_6 - c_7)}, \ \beta_7=\tfrac{2(c_5+ c_6)}{c_7 (c_7-c_5)(c_7 - c_6)},\\
\beta_8 &= \tfrac{2(c_9 +c_{10})}{c_8 (c_8-c_9)(c_8 - c_{10})},\ \beta_9=\tfrac{2(c_8+ c_{10})}{c_9 (c_9-c_8)(c_9 - c_{10})},\ \beta_{10}=\tfrac{2(c_8 +c_{9})}{c_{10} (c_{10}-c_8)(c_{10} - c_{9})}\\
\gamma_5 &= \tfrac{6}{c_5 (c_5-c_6)(c_5 - c_7)},\ \gamma_6=\tfrac{6}{c_6 (c_6-c_5)(c_6 - c_7)}, \ \gamma_7=\tfrac{6}{c_7 (c_7-c_5)(c_7 - c_6)},\\
\gamma_8 &= \tfrac{6}{c_8 (c_8-c_9)(c_8 - c_{10})},\ \gamma_9=\tfrac{6}{c_9 (c_9-c_8)(c_9 - c_{10})},\ \gamma_{10}=\tfrac{6}{c_{10} (c_{10}-c_8)(c_{10} - c_{9})}.
\end{aligned}
\end{equation}
\end{subequations}
Although this scheme has 10 stages, again its structure facilitates an
efficient implementation. Specifically, we note that there are
multiple stages $U_{n,i}$ which share the same format (same matrix
functions with different inputs $c_i$), and are independent of one another
(namely, $\{U_{n,3}, U_{n,4}\}$, $\{U_{n,5}, U_{n,6}, U_{n,7}\}$, and
$\{U_{n,8}, U_{n,9}, U_{n,10}\}$). These groups of stages can again
be computed simultaneously, allowing the scheme to behave like a
5-stage method. We therefore propose the corresponding fifth-order
MERK methods that use only 5 polynomials, which we name \texttt{MERK5}:
\begin{equation} \label{eq:MERK5}
\begin{aligned}
\hat{p}_{n,2}(\tau)&= \mathcal{N}(t_n, \hat{u}_n), \hspace{6.75cm} \tau \in [0, c_2 H] \\
\hat{p}_{n,3}(\tau)&=\hat{p}_{n,4}(\tau)= \mathcal{N}(t_n, \hat{u}_n)+ \tfrac{\tau}{c_2 H}\widehat{D}_{n,2}, \hspace{3.5cm} \tau \in [0, c_3 H] \\
\hat{p}_{n,5}(\tau)&=\hat{p}_{n,6}(\tau)=\hat{p}_{n,7}(\tau)= \mathcal{N}(t_n, \hat{u}_n)+ \tfrac{\tau}{H}\big(\alpha_3 \widehat{D}_{n,3} + \alpha_4 \widehat{D}_{n,4} \big)\\
& \hspace{3cm}+ \tfrac{\tau^2}{2 H^2}\big(\beta_3 \widehat{D}_{n,3} -\beta_3 \widehat{D}_{n,4} \big), \hspace{1.8cm} \tau \in [0, c_5 H] \\
\hat{p}_{n,8}(\tau)&=\hat{p}_{n,9}(\tau)=\hat{p}_{n,10}(\tau)= \mathcal{N}(t_n, \hat{u}_n)+ \tfrac{\tau}{H}\big(\alpha_5 \widehat{D}_{n,5} +\alpha_6 \widehat{D}_{n,6} +\alpha_7 \widehat{D}_{n,7} \big)\\
& \hspace{3cm} - \tfrac{\tau^2}{2 H^2}\big(\beta_5 \widehat{D}_{n,5} + \beta_6 \widehat{D}_{n,6} +\beta_7 \widehat{D}_{n,7} \big) \\
& \hspace{3cm} + \tfrac{\tau^3}{6 H^3}\big(\gamma_5 \widehat{D}_{n,5} +\gamma_6 \widehat{D}_{n,6}+\gamma_7 \widehat{D}_{n,7}\big), \hspace{0.4cm} \tau \in [0, c_8 H] \\
\hat{q}_{n}(\tau)&= \mathcal{N}(t_n, \hat{u}_n) + \tfrac{\tau}{H}(\alpha_8 \widehat{D}_{n,8} + \alpha_9 \widehat{D}_{n,9} +\alpha_{10} \widehat{D}_{n,10})\\
&\hspace{1.9cm}- \tfrac{\tau^2}{2 H^2} (\beta_8 \widehat{D}_{n,8} + \beta_9 \widehat{D}_{n,9} +\beta_{10} \widehat{D}_{n,10} )\\
&\hspace{1.9cm}+ \tfrac{\tau^3}{6 H^3}\big(\gamma_8 \widehat{D}_{n,8} + \gamma_9 \widehat{D}_{n,9} +\gamma_{10} \widehat{D}_{n,10} \big), \hspace{1.25cm} \tau \in [0, H].
\end{aligned}
\end{equation}
For our numerical experiments, we choose
$c_2=c_3=c_5=c_9=\tfrac{1}{2}$, $c_4=c_6=\tfrac{1}{3}$, $c_7=\tfrac{1}{4}$,
$c_8=\tfrac{7}{10}$, and $c_{10}=\tfrac{2}{3}$. Again, since $c_4<c_3$,
when solving the fast time-scale problem \eqref{eq13nc} with
polynomial $\hat{p}_{n,3}(\tau)$ on $[0, c_3 H]$ gives
$\widehat{U}_{n,3} \approx U_{n,3}=v_{n,3}(c_3 h)$ and
$\widehat{U}_{n,4} \approx U_{n,4}=v_{n,3}(c_4 h)$. Similarly, since
$c_7<c_6<c_5$, $\widehat{U}_{n,5}, \widehat{U}_{n,6}$, and
$\widehat{U}_{n,7}$ can be obtained by solving a single fast time-scale
problem with polynomial $\hat{p}_{n,5}(\tau)$ on $[0,
c_5 H]$. Finally since $c_9<c_{10}<c_8$, one can compute
$\widehat{U}_{n,8}$, $\widehat{U}_{n,9}$, and $\widehat{U}_{n,10}$ by
solving a single fast time-scale problem with
polynomial $\hat{p}_{n,8}(\tau)$ on $[0, c_8 H]$. The sum total of
these solves corresponds to evolving the fast time scale for an overall
duration of $\frac{16}{5}H$ for each slow time step.
\section{Numerical experiments}
\label{sec6}
In this section we present results from a variety of numerical tests
to examine the performance of the proposed {\texttt{MERK3}}, {\texttt{MERK4}} and
{\texttt{MERK5}} methods. These tests are designed to confirm the
theoretical convergence rates from Section \ref{sec:analysis}, and
compare efficiency against the Multirate Infinitesimal Step method
\texttt{MIS-KW3}, which uses a similar approach of evolving the fast
component using modified systems of differential equations
\cite{knothwolke98,Wensch2009,Schlegel2009,Schlegel2012b}. Unless
otherwise noted, we run these methods with inner explicit Runge-Kutta
ODE solvers of the same order of convergence as the MERK method, $p$:
\begin{itemize}
\item Third order {\texttt{MIS-KW3}} uses the \texttt{Knoth-Wolke-ERK} inner method \cite{knothwolke98};
\item Third order {\texttt{MERK3}} uses the \texttt{ERK-3-3} inner method,
$\begin{array}{c|ccc}
0 & & & \\
1/2 & 1/2 & & \\
1 & -1 & 2 & \\
\hline
& 1/6 & 2/3 & 1/6
\end{array}$;
\item Fourth order {\texttt{MERK4}} uses the \texttt{ERK-4-4} inner method
\cite[Table 1.2, left]{hairer93};
\item Fifth order {\texttt{MERK5}} uses the \texttt{Cash-Karp-ERK} inner
method \cite{cashkarp90}.
\end{itemize}
We note that although Theorem \ref{theorem2} guarantees that when
using a MERK method of order $p$, the internal stage solutions
\eqref{eq13nc} can be computed with a solver of order $q = p-1$
and the step solution \eqref{eq13nd} can use a solver of order
$r = p$, for simplicity we have used $r=q=p$ in the majority of our
tests. However, we more closely investigate these inner solver
order requirements in Section \ref{subsec:fast} below.
Not all of our test problems have convenient analytical solutions; for
these tests, we compute a reference solution using an 8th order
explicit or a 12th order implicit Runge-Kutta method with a time step
smaller than the smallest micro time step $h$. When computing
solution error, we report the maximum absolute error over all time
steps and solution components. From these, we compute convergence
rates using a linear least-squares fit of the log-error versus
log-macro time step $H$. For each test we present three types of
plots: one convergence plot (error vs $H$) and two efficiency plots.
Generally, efficiency plots present error versus the computational
cost. However in the multirate context, fast and slow function
costs can differ dramatically. As such, we separately consider
efficiency using total function calls and slow function calls. Since
the dominant number of total calls are from the fast function, the
``total'' plots represent the method efficiency for simulations with
comparable fast/slow function cost, whereas the ``slow-only'' plots
represent the method efficiency for simulations in which the slow
function calls are significantly more expensive (as explained in
Section \ref{section1} as our original motivation for multirate methods).
Individual applications will obviously lie somewhere between these
extremes, but we assume that they are typically closer to the
``slow-only'' results.
Applications scientists traditionally use multirate solvers for one
of two reasons. The first category are concerned with simulations of
stiff systems, but where they choose to use a subcycled explicit
method instead an implicit one for the stiff portion of the problem.
Generally, these applications are primarily concerned with selecting
$h$ to satisfy stability of the fast time scale (instead of accuracy).
The second category consider simulations wherein it is essential to
capture the coupling between the slow and fast times scales
accurately, since temporal errors at the fast time scale can
significantly deteriorate the slow time scale solution;
here $h$ is chosen based on accuracy considerations. We therefore
separately explore test problems in both of these categories in the
Sections \ref{subsec:category1} and \ref{subsec:category2} below.
To facilitate reproducibility of the results in this section, we have
provided an open-source MATLAB implementation of the {\texttt{MERK3}},
{\texttt{MERK4}}, {\texttt{MERK5}} and {\texttt{MIS-KW3}} methods, along with scripts to
perform all tests from this section \cite{MERK_repo}.
\subsection{Category I}
\label{subsec:category1}
As this category of problems is concerned with stability at the fast
time scale, we choose a fixed, linearly stable micro time step $h$,
and vary the macro time step $H$ (and similarly, $m=H/h$). To this
end, we focus on two stiff applications: a reaction diffusion problem
and the brusselator problem.
\subsubsection{Reaction Diffusion}
\label{subsubsec:reaction_diffusion}
We consider a reaction diffusion problem with a traveling wave
solution similar to the one considered by Savcenco et
al.~\cite{savcenco},
\begin{align*}
&u_t = \frac{1}{100} u_{xx} + u^2(1 - u), \quadquad 0<x<5, \quaduad 0<t\le 3,\\
&u_x(0,t) = u_x(5,t) = 0 , \quadquad u(x,0) = (1+ e^{\lambda(x-1)})^{-1},
\end{align*}
where $\lambda = 5 \sqrt{2}$.
We discretize in space using a second order accurate central finite
difference scheme using $1000$ spatial points. This gives
us a system for which we take $\mathcal{L}$ and $\mathcal{N}(t,u(t))$
to be the discretized versions of $\frac{1}{100} u_{xx}$ and $u^2(1 -
u)$ respectively. The micro time step is chosen to satisfy the
Courant-Friedrichs-Lewy (CFL) linear stability condition, $h = 10^{-3}$.
In the left of Figure \ref{fig:randd} we plot the method convergence
as $H$ is varied, which shows slighty convergence rates that are
better than predicted for all methods tested. As this behavior is not
consistently observed for the remaining test problems, we believe that
this is an artifact of this particular test problem. Here, we compute
the best-fit rates using only the error values larger than
$\sim10^{-13}$, where the error stagnates due to the accuracy of the
reference solution.
\begin{figure}
\caption{\small Reaction diffusion convergence (left) and
``slow-only'' efficiency (right). The best fit convergence rates are
3.03, 4.93, 5.71, 3.20 (\texttt{MERK3}
\label{fig:randd}
\end{figure}
The efficiency plots for both test problems in this category are very
similar, so we present the ``slow-only'' efficiency plot for this
problem in the right of Figure \ref{fig:randd}, saving the ``total''
efficiency plot for the next test. Here, we note that for tolerances
larger than $10^{-7}$, {\texttt{MERK3}} and {\texttt{MIS-KW3}} are the most efficient,
but for tighter tolerances {\texttt{MERK4}} is the best. Although
{\texttt{MERK5}} has a higher rate of convergence, the increased cost per
step causes it to lag behind until it reaches the reference solution
accuracy, where it begins to overtake {\texttt{MERK4}}.
\subsubsection{Brusselator}
\label{subsubsec:brusselator}
The brusselator is an oscillating chemical reaction problem for which
one of the reaction products acts as a catalyst. It is widely used as
a test for ODE solvers, including IMEX and multirate methods. We use a
variant of this stiff nonlinear ODE system given by:
\begin{align*}
\begin{bmatrix}
u \\ v \\ w
\end{bmatrix}' &= \begin{bmatrix}
a - (w + 1)u + u^2v\\ wu - u^2v\\ \frac{b-w}{\epsilon} - uw
\end{bmatrix},\quadquad
\mathbf{u}(0) = \begin{bmatrix} 1.2 \\ 3.1 \\ 3 \end{bmatrix},
\end{align*}
over the interval $t\in (0,2]$, with parameters $a = 1, b = 3.5$ and
$\frac{1}{\epsilon} = 100$. We convert this to have the multirate
form (\ref{eq1}) by defining
\begin{align*}
\mathcal{L} = \begin{bmatrix}
0 & 0 & 0 \\ 0 & 0 & 0 \\ 0 & 0 & \frac{-1}{\epsilon}
\end{bmatrix},\hspace{7mm} \mathcal{N}(t,\mathbf{u}(t)) = \begin{bmatrix}
a - (w + 1)u + u^2v\\ wu - u^2v\\ \frac{b}{\epsilon} - uw
\end{bmatrix}.
\end{align*}
In the left of Figure \ref{fig:brus} we plot the error versus $H$, and
list the corresponding best-fit convergence rates. We observe
that all the tested methods perform slightly worse than their
predicted convergence rates, which we attribute to order reduction due
to the stiffness of the problem; however, the relative convergence
rates of each method compare as expected against one another.
\begin{figure}
\caption{\small Brusselator convergence (left) and ``total''
efficiency (right). The best fit convergence rates are
2.62, 3.75, 4.36 and 2.61 (\texttt{MERK3}
\label{fig:brus}
\end{figure}
For this test problem, we plot the efficiency based on total function
calls in the right of Figure \ref{fig:brus}. We note that each curve
is almost vertical since the micro time step $h$ is held constant for
these tests, and is significantly smaller than $H$. Here, {\texttt{MIS-KW3}}
takes the least amount of total function calls since its structure
ensures that it only traverses the time step interval $[t_n,t_n+H]$
once when evaluating the modified ODEs, whereas {\texttt{MERK3}},
{\texttt{MERK4}} and {\texttt{MERK5}} require approximately 2, 3 and 3
traversals, respectively. We note that although these additional
traversals of the time step interval $[t_n,t_n+H]$ result in
significant increases in the number of fast function calls, the number
of potentially more costly slow function calls for all methods is
equal to the number of slow stages.
\subsection{Category II}
\label{subsec:category2}
Recalling that our second category of multirate applications focuses
on accurately coupling the fast and slow processes, for these test
problems we choose a fixed time scale separation factor $m$ for each
method/test, and vary $H$ (and proportionally, $h=H/m$). For this
group of tests we consider a linear multirate problem from Estep et
al.~\cite{estep} for which the fast variables are coupled into the
slow equation (one-directional coupling) and a linear multirate
problem of our own design where both the fast and slow variables are
coupled (bi-directional coupling). Since the ``optimal'' value of $m$
for each multirate algorithm is problem-dependent, we describe our
approach for determining this $m$ value in Section
\ref{subsubsec:one_directional} below.
\subsubsection{One-directional coupling}
\label{subsubsec:one_directional}
We consider a linear system of ODEs~\cite{estep}:
\begin{align}
\begin{bmatrix}
u \\ v \\ w
\end{bmatrix}' &= \begin{bmatrix}
0 & -50 & 0 \\ 50 & 0 & 0 \\ 1 & 1 & -1
\end{bmatrix}\begin{bmatrix}
u \\ v \\w
\end{bmatrix}, \label{eq:estep_problem}\quadquad
\mathbf{u}(0) = \begin{bmatrix} 1\\0\\2\end{bmatrix},
\end{align}
over the interval $t\in (0,1]$. This has analytical solution $u(t) =
\cos(50t)$, $v(t) = \sin(50t)$, and $w(t) = \frac{5051}{2501} e^{-t} -
\frac{49}{2501}\cos(50t) + \frac{51}{2501}\sin(50t)$.
We convert this problem to multirate form \eqref{eq1} by decomposing
it as:
\begin{align*}
\mathcal{L} = \begin{bmatrix}
0 & -50 & 0 \\ 50 & 0 & 0 \\ 1 & 1 & 0
\end{bmatrix},\hspace{7mm} \mathcal{N}(t,\mathbf{u}(t)) = \begin{bmatrix}
0 \\ 0 \\-w
\end{bmatrix}.
\end{align*}
We first discuss our approach in determining the ``optimal''
time-scale separation factor $m$. For illustration, we consider
{\texttt{MERK4}} on this problem; however, we apply this approach to all
methods for both this test and the following bi-directional coupling
test in Section \ref{subsubsec:bidirectional}. We begin by
repeatedly solving the problem \eqref{eq:estep_problem} using the
multirate method with different factors $m =
\{5,10,25,50,75,85,100,125\}$. For each value of $m$, we vary $H$ (and
hence $h=H/m$). We then analyze the resulting ``total'' and
``slow-only'' efficiency plots for each fixed $m$ value, as shown in
Figure \ref{fig:estepprocess_eff}.
\begin{figure}
\caption{Efficiency plots for {\texttt{MERK4}
\label{fig:estepprocess_eff}
\end{figure}
We first note that both plots show a group of $m$ values with
identical efficiency, along with other less efficient results. In
Figure \ref{fig:estepprocess_slo}, the more efficient group is
comprised of \emph{larger} $m$ values, whereas in Figure
\ref{fig:estepprocess_tot} the more efficient group has \emph{smaller}
$m$ values. This is unsurprising, since increases in $m$ for a fixed
$H$ correspond to decreases in $h$, leading to accuracy improvements
at the fast time scale alone. While this will results in increased
total function calls, the number of slow function calls will remain
fixed. We therefore define the ``optimal'' $m$ as the value where the
fast and slow solution errors are balanced. Hence, in Figure
\ref{fig:estepprocess_tot} this corresponds to the largest $m$ that
remains in the more efficient group, and in Figure
\ref{fig:estepprocess_slo} this corresponds to the smallest $m$ that
remains in the more efficient group. Inspecting both plots in Figure
\ref{fig:estepprocess_eff}, the optimal value for {\texttt{MERK4}} on this
problem is $m=50$. Carrying out a similar process for the other
methods on this problem, {\texttt{MERK3}} has an optimal value of
$m = 75$, {\texttt{MERK5}} $m = 25$, and {\texttt{MIS-KW3}} has an optimal value of
$m = 75$.
Using these $m$ values, In Figure \ref{fig:fasttoslo_conv} we plot the
convergence results for the four methods on this problem, confirming
the analytical orders of convergence, with errors stagnating around
$10^{-13}$ due to accumulation of floating-point roundoff. While we
find slightly better-than-expected convergence rates for the
MERK methods, and only the expected rate for {\texttt{MIS-KW3}}, we do not draw
conclusions regarding this behavior.
\begin{figure}
\caption{One-directional coupling convergence. \small Best fit
convergence rates are 3.16, 4.28, 5.26 and 3.04 ({\texttt{MERK3}
\label{fig:fasttoslo_conv}
\end{figure}
Similarly, in Figure \ref{fig:fasttoslo_eff} we plot both the
``total'' and ``slow-only'' efficiency of each method on the
one-directional test problem. When measuring only slow function
calls, both {\texttt{MIS-KW3}} and {\texttt{MERK3}} tie for errors larger than
$10^{-6}$, {\texttt{MERK4}} is the most efficient for errors between
$10^{-6}$ and $10^{-12}$ and {\texttt{MERK5}} is the most efficient at the
tightest error values. When the fast function calls are given equal
weight as the slow, however, {\texttt{MIS-KW3}} is the most efficient at errors
larger than $10^{-8}$, while {\texttt{MERK5}} is the most efficient at
tighter error thresholds.
\begin{figure}
\caption{One-directional coupling efficiency. The most efficient
method depends on how ``cost'' is measured, as well as on the
desired accuracy.}
\label{fig:fasttoslo_eff}
\end{figure}
\subsubsection{Bi-directional coupling}
\label{subsubsec:bidirectional}
Taking inspiration from the preceding one-directional test, we
designed a problem with coupling between both the fast and slow
components to further demonstrate the flexibility and robustness of
MERK methods. To this end, we consider the following test problem
\begin{align}
\begin{bmatrix}
u \\ v \\ w
\end{bmatrix}' &= \begin{bmatrix}
0 & 100 & 1 \\ -100 & 0 & 0 \\ 1 & 0 & -1
\end{bmatrix}\begin{bmatrix}
u \\ v \\w
\end{bmatrix},\label{eq:bidirectional_pb}\quadquad
\mathbf{u}(0) = \begin{bmatrix} 9001/10001\\
100000/10001 \\ 1000\end{bmatrix},
\end{align}
over $t\in (0,2]$. Converting to multirate form \eqref{eq1},
we set $\mathcal{L}$ and $\mathcal{N}(t,\mathbf{u}(t))$ as:
\begin{align*}
\mathcal{L} = \begin{bmatrix}
0 & 100 & 0 \\ -100 & 0 & 0 \\ 1 & 0 & 0
\end{bmatrix},\hspace{7mm} \mathcal{N}(t,\mathbf{u}(t)) = \begin{bmatrix}
w \\ 0 \\-w
\end{bmatrix}.
\end{align*}
While this is a linear test problem that may be solved using the
matrix exponential, this solution is difficult to represent in
closed-form, and so we use a reference solution for convenience.
Using the previously-described approach for determining the optimal
time-scale separation factor $m$ for each method on this problem, we
have $m = 50$ for {\texttt{MERK3}} and {\texttt{MERK4}}, $m = 10$ for
{\texttt{MERK5}} and $m=25$ for {\texttt{MIS-KW3}}.
In Figure \ref{fig:fastslocoup_conv} we plot the convergence rates of
each method on this test problem, again confirming the analytical
orders of convergence, with errors stagnating around $10^{-12}$ due to
the reference solution accuracy.
\begin{figure}
\caption{\small Bi-directional coupling convergence. Best fit
convergence rates are 3.03, 3.99, 4.97 and 3.06 ({\texttt{MERK3}
\label{fig:fastslocoup_conv}
\end{figure}
Similarly, in Figure \ref{fig:fastslocoup_eff} we plot both the
``slow-only'' and ``total'' efficiency plots for this problem. Here,
when measuring only the slow function calls, the most
efficient method is {\texttt{MERK3}} at error thresholds above $10^{-5}$,
and {\texttt{MERK5}} for smaller error values. Strikingly, when
considering the total number of function calls, the {\texttt{MERK5}}
is the most efficient at nearly all error thresholds. We note,
however, that the optimal time-scale separation factor for {\texttt{MERK5}}
is $m=10$ for this problem, which results in reduced fast function
calls per slow step, and hence an overal reduction in total function
calls.
\begin{figure}
\caption{Bi-directional coupling efficiency. Again, the most efficient
method depends on how ``cost'' is measured, as well as on the
desired accuracy, however {\texttt{MERK5}
\label{fig:fastslocoup_eff}
\end{figure}
\subsection{Variations in the fast method}
\label{subsec:fast}
We finish by demonstrating the effects of using inner methods with
differing orders of accuracy. Here, we consider only the
\texttt{MERK} methods, applied to the bi-directional coupling problem
\eqref{eq:bidirectional_pb}. Here, we vary the order of method
applied for computing both the internal stage solutions \eqref{eq13nc},
$q$, and the step solution \eqref{eq13nd}, $r$. Recalling the
convergence theory presented in Theorem \ref{theorem2}, a MERK
method of order $p$ should use inner methods of orders $q\ge
p-1$ and $r \ge p$. However, in these tests we apply other variations
on orders to ascertain whether (a) the inner methods could have even lower order
and still obtain overall order $p$, or (b) use of higher-order inner
methods can result in overall convergence higher than $p$. We present
the best-fit convergence rates for this ensemble of tests in Table
\ref{table:convrates}.
These numerical results show that in fact the inner method order
requirements presented in Theorem \ref{theorem2} are both
necessary and sufficient, i.e., the least-expensive combination for
attaining a MERK method of order $p$ is to compute stage solutions
\eqref{eq13nc} using an inner method of order $p-1$, and the time step
solution \eqref{eq13nd} using an inner method of order $p$.
Furthermore, use of higher-order inner methods with orders $q=r>p$
\emph{does not} result in overall order higher than $p$, due to the
first term $C_1 H^p$ in Theorem \ref{theorem2}, that corresponds to
the coupling between the fast and slow processes.
\begin{table}[h!]
\centering
\begin{tabular}{|c|c|c|c|c|c|c|c|c|}
\hline
\multicolumn{3}{|c|}{{\texttt{MERK3}}($p=3$)} & \multicolumn{3}{|c|}{{\texttt{MERK4}}($p=4$)} & \multicolumn{3}{|c|}{{\texttt{MERK5}}($p=5$)} \\
\hline
$q$ & $r$ & Observed order & $q$ & $r$ & Observed order & $q$ & $r$ & Observed order \\
\hline
2 & 2 & 2.00 & 3 & 3 & 3.01 & 4 & 4 & 4.00 \\
\hline
3 & 2 & 2.00 & 4 & 3 & 3.01 & 5 & 4 & 4.00 \\
\hline
2 & 3 & 3.03 & 3 & 4 & 3.99 & 4 & 5 & 4.97 \\
\hline
3 & 3 & 3.03 & 4 & 4 & 3.99 & 5 & 5 & 4.97 \\
\hline
4 & 4 & 3.03 & 5 & 5 & 3.99 & 6 & 6 & 4.96 \\
\hline
\end{tabular}
\caption{Convergence rate dependence on inner ODE solvers.}
\label{table:convrates}
\end{table}
\section{Conclusion}
\label{sec:conclusion}
We propose a novel class of multirate methods constructed from
explicit exponential Runge--Kutta methods, wherein the action of the
matrix exponential is approximated via solution of ``fast'' initial
value problems for each ExpRK stage. Algorithmically, these methods
offer a number of desirable properties. Since these are created
through defining a set of modified IVPs (like (R)MIS and MRI-GARK
methods), MERK implementations have near complete freedom
in evolving the problem at the fast time scale; however, unlike (R)MIS
and MRI-GARK, MERK methods may utilize inner solvers of reduced
accuracy for the internal stages. Additionally, since the
MERK structure follows directly from ExpRK methods satisfying Theorem
\ref{theorem1}, derivation of high-order MERK methods, including
versions supporting embeddings for temporal adaptivity, is much
simpler than for alternate multirate frameworks. As a result, MERK
methods constitute the first multirate algorithms of order five,
without requiring deferred correction or extrapolation techniques.
Furthermore, the proposed approach may be similarly applied to
exponential Rosenbrock methods, allowing for problems where the fast
time scale is nonlinear, although such methods are not considered in
this work.
In addition to proposing the MERK class of multirate methods and
providing rigorous analysis of their convergence, we provide numerical
comparisons of the performance of multiple MERK and MIS methods on a
variety of multirate test problems. Based on these experiments, we
find that the MERK methods indeed exhibit their theoretical orders of
convergence, including tests that clearly demonstrate our primary
convergence result in Theorem \ref{theorem2}. Furthermore, the
proposed methods compare favorably against standard MIS multirate
methods, particularly when increased accuracy is desired and for
problems wherein the ``slow'' right-hand side function is
significantly more costly than the ``fast.''
This work may be extended in numerous ways. As alluded to above,
extensions of these approaches to explicit exponential Rosenbrock
methods are straightforward, and are already under investigation.
Additionally, extensions to higher order will follow from related
developments of higher-order exponential methods. Finally, we plan to
investigate the use of embeddings at both the fast and slow time
scales to perform temporal adaptivity in both $H$ and $h$ for
efficient, tolerance-based calculations.
\section*{Acknowledgments}
The first author would like to thank Prof.~Hochbruck and Dr.~Demirel for their fruitful discussions during his visit at the Karlsruhe
Institute of Technology (KIT) in 2013 under the support of the DFG Research Training Group
1294 \textquotedblleft Analysis, Simulation and Design of
Nanotechnological Processes\textquotedblright.
\end{document}
|
\begin{document}
\title[Galois scaffolds]{Galois scaffolds and Galois module
structure in extensions of characteristic $p$ local
fields of degree $p^2$}
\author{Nigel P.~Byott and G.~Griffith Elder}
\email{[email protected]}
\email{[email protected]}
\address{Mathematics Research Institute, College of Engineering,
Mathematics and Physical Sciences, University of Exeter, Exeter
EX4 4QF U.K.}
\address{Mathematics Dept.,
University of Nebraska at Omaha,
Omaha, NE 68182-0243 U.S.A.}
\date{\today}
\begin{abstract}
A Galois scaffold, in a Galois extension of local fields with
perfect residue fields, is an adaptation of the normal basis to the
valuation of the extension field, and thus can be applied to answer
questions of Galois module structure. Here we give a sufficient
condition for a Galois scaffold to exist in fully ramified Galois
extensions of degree $p^2$ of characteristic $p$ local fields.
This condition becomes necessary when we restrict to $p=3$. For extensions
$L/K$ of degree $p^2$ that satisfy this condition, we determine the
Galois module structure of the ring of integers by finding necessary and
sufficient conditions for the ring of integers of $L$ to be free over
its associated order in $K[\mbox{Gal}(L/K)]$.
\end{abstract}
\maketitle
\section{Introduction}
The Galois module structure of the ring of integers in ramified
$C_p$-extensions of local fields $L/K$ of characteristic $p$ was
studied in \cite{aiba,deSmit}. Of basic importance to that work was a
$K$-basis for the group algebra $K[\mbox{Gal}(L/K)]$ whose effect on
the valuation of the elements of $L$ was easy to determine. In
\cite{elder:scaffold}, an attempt was made to capture the nice
properties of this basis with the definition of a Galois scaffold.
In this paper, we revise this definition slightly, and show that, in
general, a totally ramified Galois $p$-extension need not admit a
Galois scaffold. Indeed, the conditions, given in
\cite{elder:scaffold}, that are sufficient for a Galois scaffold to
exist in a fully ramified elementary abelian $p$-extension of
characteristic $p$ local fields are shown here to be necessary for
$C_3\times C_3$-extensions. This is technical work ({\em i.e.}
painstaking linear algebra). So we take the opportunity here to
extend the results of \cite{elder:scaffold} to
$C_{p^2}$-extensions. Thus in Theorem \ref{scaffold} we give
conditions that are sufficient for a Galois scaffold to exist in any
fully ramified, degree $p^2$ extension of characteristic $p$ local
fields with perfect residue fields, and then prove:
\begin{theorem} \label{assoc-main}
Let $L/K$ be a fully ramified Galois extension of degree $p^2$ that
because it satisfies the conditions of Theorem \ref{scaffold}
possesses a Galois scaffold. Let $\mathfrak A_{L/K}=\{\alpha\in
K[G]:\alpha\mathfrak O_L\subseteq \mathfrak O_L\}$ be the associated order of the
ring of integers $\mathfrak O_L$ of $L$. Then
$$\mathfrak O_L\mbox{ is free over }\mathfrak A_{L/K}\mbox{ if and only if }r(b)
\mid p^2-1,$$ where $r(b)$ denotes the least nonnegative residue
modulo $p^2$ of
the second (lower) ramification number of $L/K$. Furthermore, if
$\mathfrak O_L$ is free over $\mathfrak A_{L/K}$ then any element $\rho\in L$
with normalized valuation $v_L(\rho)=r(b)$ satisfies
$\mathfrak O_L=\mathfrak A_{L/K}\rho$.
\end{theorem}
The proof of this result appears in \S2.4.
\subsection{Notation}
Let $p$ be prime and let $\mathbb{F}_p$ be the finite field with $p$ elements.
Let
$\kappa$ be a perfect field containing $\mathbb{F}_p$, let
$K_0=\kappa((t))$ be the local function field with residue field
$\kappa$, and let $K_n/K_0$ be a fully ramified Galois extension of
degree $p^n$ with Galois group $G=\mbox{Gal}(K_n/K_0)$. The
ramification filtration of $G$ is the set of subgroups
$G_i=\{\sigma\in G: v_n((\sigma-1)\pi_n)\geq i+1\}$. Subscripts denote
field of reference. So, for example, $v_n$ is the additive valuation on $K_n$,
normalized so that $v_n(K_n^\times)=\mathbb{Z}$, $\pi_n$ is a prime
element of $K_n$ with $v_n(\pi_n)=1$, and $\mathfrak O_n=\{x\in K_n:
v_n(x)\geq 0\}$ is the valuation ring with maximal ideal $\mathfrak P_n=
\{x\in K_n: v_n(x)> 0\}$.
Quotients of consecutive ramification groups $G_i/G_{i+1}$ are either
trivial or elementary abelian $C_p\times \cdots \times C_p$
\cite[IV\S2 Prop 7 Cor 3]{serre:local}. Thus the usual ramification
filtration can be refined: There is a filtration $G=H_0\supsetneq
H_1\supsetneq \cdots \supsetneq H_{n-1}\supsetneq H_n=\{1\}$ such that
$H_i/H_{i+1}\cong C_p$ for $0\leq i\leq n-1$ and $\{H_i:0\leq i\leq
n\}\supseteq \{G_i:i\geq 1\}$. Choose one such filtration. Choose elements
$\sigma_{i+1}\in H_i\setminus H_{i+1}$ for each $0\leq i\leq n-1$ and
define $b_i=v_n((\sigma_{i}-1)\pi_n)-1$. Then $b_1\leq b_2\leq \cdots
\leq b_n$. Define the ramification multiset to be $\{b_i:1\leq i\leq
n\}$, which is independent of our choices \cite[IV\S1 Prop 3
Cor]{serre:local}, and thus should be considered a fundamental
invariant of the extension. As a set, it is just the set of (lower)
ramification numbers, subscripts $i$ with $G_i\supsetneq G_{i+1}$.
Define $K_i=K_n^{H_i}$ to be the fixed field of $H_i$. Thus we have
a path through the subfields of $K_n$, from $K_n$ down
to $K_0$, which is consistent with the ramification multiset:
$\{b_i:j< i\leq n\}$ is the ramification multiset for
$K_n/K_j$, $\{b_i:0< i\leq j\}$ is the ramification multiset
for $K_j/K_0$, and $b_i$ is the ramification number for
$K_i/K_{i-1}$.
Let $\mathfrak A_{K_n/K_0}=\{\alpha\in K_0[G]:\alpha\mathfrak O_n\subseteq \mathfrak O_n\}$
denote the associated order of $\mathfrak O_n$ in the group algebra $K_0[G]$.
Since $\mathfrak A_{K_n/K_0}$ is an $\mathfrak O_0$-order in $K_0[G]$ containing
$\mathfrak O_0[G]$ and $\mathfrak O_n$ is a module over $\mathfrak A_{K_n/K_0}$, it is
natural to ask about the structure of $\mathfrak O_n$ over $\mathfrak A_{K_n/K_0}$.
Although more general questions can be addressed ({\em
e.g.}~\cite{deSmit}), we follow \cite{aiba, byott:scaffold} here and
focus our attention on determining conditions that are necessary and
sufficient for $\mathfrak O_n$ to free over $\mathfrak A_{K_n/K_0}$.
Let $\lfloor x\rfloor$ and $\lceil x\rceil$ denote the greatest
integer and least integer functions, respectively. Let
$\wp(X)=X^p-X\in \mathbb{Z}[X]$ and $\binom{X}{i}=X(X-1)\cdots
(X-i+1)/i!$ denote the binomial coefficient. Define truncated
exponentiation by the following truncation of the binomial series:
$$(1+X)^{[Y]}:=\sum_{i=0}^{p-1}\binom{Y}{i}X^i\in\mathbb{Z}_{(p)}[X,Y],$$
where $\mathbb{Z}_{(p)}$ is the integers localized at $p$.
Vandermonde's Convolution Identity is
$\sum_{i=0}^{t}\binom{Y}{i}\binom{X}{t-i}
=\binom{X+Y}{t}\in\mathbb{Z}_{(p)}[X,Y]$
for $0\leq t\leq p-1$.
\subsection{Definition of Galois scaffold}
The term was introduced in \cite{elder:scaffold}. Its definition is
refined here. Two ingredients are required: A valuation criterion for
a normal basis generator and a generating set for a particularly nice
$K_0$-basis of the group algebra $K_0[G]$.
\subsubsection{Valuation criterion}
In a Galois extension of local fields $K_n/K_0$, a valuation criterion
for a normal basis generator is an integer $c$ such that if $\rho\in
L$ with $v_n(\rho)=c$ then $\{\sigma\rho:\sigma\in G\}$ is a normal
basis for $K_n$ over $K_0$. For fields of characteristic $p$, every
totally ramified Galois $p$-extension $K_n/K_0$ has a valuation
criterion. Indeed, if the extension is abelian, $c$ can then be any
integer $c\equiv b_n\bmod p^n$ \cite{elder:cor-criterion}.
\subsubsection{Generating set for the group algebra $K_0[G]$}
We have chosen a refined filtration $\{H_i\}$ of the Galois group
along with group elements $\sigma_i\in H_{i-1}\setminus H_i$. These
elements certainly generate the Galois group,
$G=\{\prod_{i=1}^n\sigma_i^{a_i}:0\leq a_i\leq p-1\}$, and thus generate
a basis for $K_0[G]$ over $K_0$, a basis that is naturally
associated with a normal basis for $K_n/K_0$. A Galois
scaffold occurs if there is a similar generating set of $n$ elements
$\{\Psi_i\}$ from the augmentation ideal $(\sigma-1:\sigma\in G)$ of
$K_0[G]$ that satisfies
a regularity condition and a spanning condition:
For all $0\leq j<p$ and all
$\rho,\rho'\in K_n$ that satisfy the valuation criterion,
$v_n(\rho),v_n(\rho')\equiv c\bmod p^n$,
\begin{equation}\label{strong-scaff}
v_n(\Psi_i^j\rho)-v_n(\rho)=j\cdot( (v_n(\Psi_i\rho')-v_n(\rho')).
\end{equation}
For $0\leq a<p^n$, define $\Psi^{(a)} = \Psi_{n}^{a_{(0)}}\Psi_{n-1}^{a_{(1)}}\cdots \Psi_{1}^{a_{(n-1)}}$
where $a$ is expanded $p$-adically as $a=\sum_i
a_{(i)} p^i$ with $0\leq a_{(i)}<p$. Then
for $v_n(\rho)\equiv c\bmod p^n$,
\begin{equation}\label{scaff}
\left \{v_n(\Psi^{(a)}\rho):0\leq a<p^n\right \}
\end{equation} is a complete set
of residues modulo $p^n$. Because $K_n/K_0$ is fully
ramified of degree $p^n$, this means that $\{\Psi^{(a)}:0\leq a<p^n\}$
is a $K_0$-basis for $K_0[G]$.
A quick comment now about the definition of Galois
scaffold in \cite{elder:scaffold}. While we explicitly require a
Galois scaffold here to have two properties, (\ref{strong-scaff}) and
(\ref{scaff}), the definition stated in \cite{elder:scaffold} required only
(\ref{scaff}) explicitly. Note however that the Galois scaffold given in
\cite{elder:scaffold} did satisfy both (\ref{strong-scaff}) and
(\ref{scaff}).
\section{Galois extensions of degree $p^2$ with Galois scaffold and their resulting Galois module structure}
\subsection{Characterizing the extensions}
Elementary abelian extensions of degree $p^2$ correspond to
2-dimensional subspaces of $K_0 / \wp(K_0)$, where $\wp(K_0)=\{ \wp(k)
: k \in K_0\}$. Cyclic extensions of degree $p^2$ correspond to Witt
vectors $(\beta_1,\beta_2)$ of length 2, and the extension is
unchanged if we add an element of $\wp(K_0)$ to $\beta_1$ or
$\beta_2$. Thus, in either case, the extensions are determined by a
pair of coset representations of $\wp(K_0)$. In this subsection, we
explain these correspondences and tie those coset representatives
(reduced representatives) that are distinguished for having maximal
valuation to the ramification numbers for $K_2/K_0$. We also set
up notation for the Galois action that is consistent with \S1.1.
\subsubsection{Elementary abelian}
The map that takes $K_2=K_0(x_1,x_2)$ with $\wp(x_i)=\beta_i\in K_0$
to $V=\mathbb{F}_p\beta_1+\mathbb{F}_p\beta_2+\wp(K_0)$ sets up
bijection between $C_p\times C_p$-extensions of $K_0$ and
$2$-dimensional $\mathbb{F}_p$-vector spaces of $K_0/\wp(K_0)$. Given
such a subspace $V$, choose $\beta_1$ so that
$v_0(\beta_1)=\max\{v_0(\beta):\beta\in V\}$. Choose $\beta_2 \in V$
so that $\beta_1$ and $\beta_2$ span $V$ and replace $\beta_2$ by
another representative of $\beta_2+\mathbb{F}_p\beta_1+\wp(K_0)$ if
necessary so that $v_0(\beta_2)=\max\{v_0(\beta):\beta\in
\beta_2+\mathbb{F}_p\beta_1+\wp(K_0)\}$. As a result,
$v_0(\beta_i)=-u_i$ with $0\leq u_1\leq u_2$ and $p\nmid u_i$ unless
$u_1=0$, in which case $K_2/K_0$ is not fully ramified.
Restrict to the situation where $K_2/K_0$ is fully ramified. Then
because of our choices for $\beta_1$ and $\beta_2$,
$\{u_1, u_2\}$ is the set of upper ramification numbers for $K_2/K_0$.
The lower ramification numbers are $b_1=u_1$ and $b_2=u_1+p(u_2-u_1)$
\cite[IV \S3]{serre:local}. Choose $\sigma_i\in G$ so that
$(\sigma_i-1)x_j=\delta_{i,j}$ where
$$\delta_{i,j}=\begin{cases}1&\mbox{for }i=j,\\
0&\mbox{for }i\neq j.\end{cases}$$
Set $H_1=\langle\sigma_2\rangle$, so that $K_0(x_1)=K_1=K_2^{\sigma_2}$.
Since the norm
$N_{K_1/K_0}(x_1)=\wp(x_1)=\beta_1$, we have $v_1(x_1)=-b_1$ as well.
Similarly, $v_2(x_2)=-pu_2$.
\subsubsection{Cyclic}
As shown in \cite{schmid:1936,schmid:1937}, each $C_{p^2}$-extension
of $K_0$ can be associated with a Witt vector $(\beta_1, \beta_2)$. We
can assume that $\beta_1\in K_0$ is the element of maximum valuation
in its nonzero coset of $\wp(K_0)$ and that $\beta_2\in K_0$ is a
element of maximum valuation in its coset of
$\mathbb{F}_p\beta_1+\wp(K_0)$. If we abuse notation by identifying
these cosets with their representatives, this gives a bijection
between $C_{p^2}$-extensions and the one-dimensional
$\mathbb{F}_p$-vector spaces
$\{(a\beta_1,a\beta_2):a\in\mathbb{F}_p\}$.
Restrict now to the situation where $K_2/K_0$ is fully ramified. Let
$\sigma_1$ generate the Galois group $G$, and set
$\sigma_2=\sigma_1^p$ and $H_1=\langle \sigma_2\rangle$. Then
$K_1=K_0(x_1)$, where $\wp(x_1)=\beta_1$, is the fixed field of
$\sigma_2$. Without loss of generality, $(\sigma_1-1)x_1=1$. Our
choice of $\beta_1$ means that $v_0(\beta_1)=-b_1<0$ with $p\nmid
b_1$. Since $\beta_1$ is the norm of $x_1$, $v_1(x_1)=-b_1$. Thus
$b_1$ is the ramification number for $K_1/K_0$, and also the first
(lower) ramification number for $K_2/K_0$.
The second (lower) ramification number $b_2$ of $K_2/K_0$ is also the
ramification number for $K_2/K_1$. It is dependent upon both
$v_0(\beta_1)=-b_1$ and $v_0(\beta_2)=-u_2^*$, which due to our
assumption on $\beta_2$ satisfies $0\leq u_2^*$ and if $u_2^*\neq 0$
then $p\nmid u_2^*$. Indeed, we will proceed now to show that
$b_2=\max\{(p^2-p+1)b_1,pu_2^*-(p-1)b_1\}$, and thus that the upper
ramification numbers are $u_1=b_1<u_2=\max\{pb_1,u_2^*\}$.
Let
$D_1=(x_1^p+\beta_1^p-(x_1+\beta_1)^p)/p=-\sum_{i=1}^{p-1}\frac{1}{p}\binom{p}{i}x_1^i\beta_1^{p-i}\in
K_1$. Observe that $v_1(D_1)=-(p^2-p+1)b_1$. As explained in
\cite{schmid:1936,schmid:1937}, $K_0(x^*_2)$ with $\wp(x^*_2)=D_1$ is
a $C_{p^2}$-extension of $K_0$ that contains $K_1$ (and is associated
with the Witt vector $(\beta_1,0)$).
Moreover, every
$C_{p^2}$-extension of $K_0$ that contains $K_1$ arises as
$K_2=K_0(x_2)$ with $\wp(x_2)=D_1+\beta_2$.
Then $x_2=x_2^*+z_2$
where $\wp(z_2)=\beta_2$,
and
$K_2=K_0(x_2)$ is contained in the $C_{p^2}\times C_p$-extension
$K_0(x_2^*,z_2)$. Without loss of generality, we may assume that
$\sigma_1\in\mbox{Gal}(K_0(x_2^*,z_2)/K_0)$ satisfies $(\sigma_1-1)
z_2=0$. Furthermore
$(\sigma_1-1)x_2=(\sigma_1-1)x_2^*=C_1$ where $C_1=(x_1^p+1-(x_1+1)^p)/p=
-\sum_{i=1}^{p-1}\frac{1}{p}\binom{p}{i}x_1^i$, and $(\sigma_2-1)
x_2=1$. Notice that $v_1(C_1)=-(p-1)b_1$.
We now work with the ramification filtrations of two different
$C_p\times C_p$-extensions: $K_0(x_1,z_2)/K_0$ and $K_1(x_2^*,
z_2)/K_1$. There are three possibilities for the set of upper
ramification numbers for $K_0(x_1,z_2)/K_0$: If $b_1\neq u_2^*$, the
set is $\{b_1,u_2^*\}$. If $b_1=u_2^*$, the set is either $\{b_1\}$
or $\{b_1,v\}$ (for some $v<b_1$). In each case, we pass to the lower
ramification numbers for $K_0(x_1,z_2)/K_0$, using \cite[IV
\S3]{serre:local}. The ramification number for $K_1(z_2)/K_1$ is
therefore $b_1+p(u_2^*-b_1)$ (when $u_2^*>b_1$) or some integer $\leq
b_1$ (when $u_2^*\leq b_1$). Now consider $K_1(x_2^*, z_2)/K_1$. It
is easy to see that the ramification number for $K_1(x_2^*)/K_1$ is
$-v_1(D_1)=(p^2-p+1)b_1$. This means, since if $u_2^*\neq 0$ then
$p\nmid u_2^*$, that the ramification numbers for $K_1(x_2^*)/K_1$ and
for $K_1(z_2)/K_1$ are distinct. As a result, these are the two
distinct upper ramification numbers for $K_1(x_2^*, z_2)/K_1$.
Passing to the lower ramification numbers for $K_1(x_2^*, z_2)/K_1$,
considering all the cases, we find that the ramification number of
$K_1(x_2)/K_1$ is $b_2=\max\{(p^2-p+1)b_1,pu_2^*-(p-1)b_1\}$.
\subsection{The Galois scaffold}
Since $p\nmid v_0(\beta_1)$,
the set
$\{v_0(\beta_1^t):0\leq t\leq p-1\}$ is a complete set of residues
modulo $p$. As a result, it is generically the case that $\beta_2=\sum_{t=0}^{p-1}\mu_t^p\beta_1^t$ for
some $\mu_t\in K_0$. Moreover, since we are only interested in the
expression for $\beta_2$ in $K_0/K_0^\wp$, we may assume that the
$t=0$ term satisfies $\mu_0^p\in \kappa$. Gather all terms except
$\mu_1^p\beta_1$ into an ``error term'' $\epsilon$. Replace $\mu_1$
with $\mu$, and let $m=-v_0(\mu)$.
Thus
$$\beta_2=\mu^p\beta_1+\epsilon$$ where we may assume either
$\epsilon\in \kappa$ or $p\nmid v_0(\epsilon)=-e<0$. Note that
$v_0(\epsilon)\not\equiv v_0(\mu^p\beta_1)\bmod p$. Thus $e\not\equiv
b_1\bmod p$. We are now prepared to state:
\begin{theorem}\label{scaffold}
Let $K_2/K_0$ be a fully ramified Galois extension of degree $p^2$.
Adopt the notation of this section, and assume that
$v_0(\epsilon)>v_0(\beta_2)+(p-1)b_1/p$. For $G\cong C_{p^2}$,
additionally assume $v_0(\beta_1^p)>v_0(\beta_2)+(p-1)b_1/p$. Then
there is a Galois scaffold. Define $\Psi_1\in K_0[G]$ by
$$\Psi_1+1=
\sigma_1\sigma_2^{[\mu]}=\sigma_1\sum_{i=0}^{p-1}\binom{\mu}{i}(\sigma_2-1)^i.$$
Let $\Psi_2=\sigma_2-1$.
Then for $\alpha\in K_2$ with $v_2(\alpha)\equiv b_2\bmod p^2$ and
$0\leq i,j\leq p-1$,
$$v_2\left(\Psi_2^i\Psi_1^j\alpha\right)=v_2(\alpha)+ib_2+jpb_1.$$
\end{theorem}
The proof of this theorem appears in \S2.3. First, we examine its
conditions in terms of the ramification numbers for $K_2/K_0$. In
\S2.1.2, we observed that for $G\cong C_{p^2}$,
$b_2=\max\{(p^2-p+1)b_1,pu_2^*-(p-1)b_1\}$. The requirement that
$v_0(\beta_1^p)>v_0(\beta_2)+(p-1)b_1/p$ means that
$pu_2^*-(p-1)b_1>p^2b_1$. Thus for $G\cong C_{p^2}$, $u_2=u_2^*$,
$b_2=pu_2-(p-1)b_1$ and so the requirement that
$v_0(\beta_1^p)>v_0(\beta_2)+(p-1)b_1/p$ is a strengthening of the
lower bound on $b_2$, from $b_2\geq (p^2-p+1)b_1$ to
\begin{equation}
b_2>p^2b_1.
\label{cyc-spread}
\end{equation}
The other condition $v_0(\epsilon)>v_0(\beta_2)+(p-1)b_1/p$, which is
a restriction for both $G\cong C_p\times C_p$ and $C_{p^2}$, implies
$\beta_2\equiv \mu^p\beta_1\bmod \mu^p\beta_1\mathfrak P_2$. Using
$b_2=b_1+
p(u_2-b_1)$ (and thus \eqref{cyc-spread}
when $G\cong C_{p^2}$), this means that
$v_0(\epsilon)>v_0(\beta_2)+(p-1)b_1/p$
can be rewritten as
\begin{equation}
b_2>pe.
\label{cyc-shape}
\end{equation}
\subsection{Proof of Theorem \ref{scaffold}}
The result for $G\cong C_p\times C_p$ follows from \cite[Thm
4.1]{elder:scaffold}. So we focus here on the result for $G\cong
C_{p^2}$ and recall the notation of \S2.1.2. There are three steps in
our argument. Thus three subsections.
\subsubsection{An explicit element satisfying the valuation criterion}
The hypothesis on $v_0(\epsilon)$ ensures at least that
$v_0(\epsilon) > v_0(\beta_2)=v_0(\mu^p \beta_1)$, so that $-b_1-pm <
-e$ and $u_2^*=pm + b_1$. Under this weaker assumption, we determine
$\epsilon_1\in K_1$ such that $X_2=x_2-\mu x_1+\epsilon_1\in K_2$ has
valuation $v_2(X_2)=-b_2=-\max\{pu_2^*-(p-1)b_1,(p^2-p+1)b_1\}$. Once
this is done, $\rho=\binom{X_2}{p-1}\binom{x_1}{p-1}\in K_2$ satisfies
$v_2(\rho)\equiv b_2\bmod p^2$.
The element $\epsilon_1\in K_1$ is determined by $\epsilon$.
Recall that either $\epsilon\in \kappa$ or $p\nmid -e<0$. If
$\epsilon\in \kappa$, we simply let $\epsilon_1=0$ (and also set
$E_1=\epsilon$). The interesting case occurs when $\epsilon\not\in \kappa$
and thus $K_1(z)/K_0$ with $\wp(z)=\epsilon$ is a fully ramified
$C_p\times C_p$ extension with upper ramification numbers
$e=-v_0(\epsilon)$ and $b_1$. Recall $e\not\equiv b_1\bmod p$. So
$e\neq b_1$. Passing to the lower numbering for $K_1(z)/K_0$ using
\cite[IV \S3]{serre:local}, we find that the ramification number for
$K_1(z)/K_1$ is $\max\{e,b_1+p(e-b_1)\}$ (either $e$ when $e< b_1$, or
$pe-(p-1)b_1$ when $e>b_1$). Using this information regarding
$K_1(z)/K_1$ there must be a coset representative $E_1$ for the coset
$\epsilon+\wp(K_1)$ in $K_1/\wp(K_1)$ such that
$v_1(E_1)=-\max\{e,b_1+p(e-b_1)\}$. Thus
$E_1=\epsilon+\wp(\epsilon_1)$ for some
$\epsilon_1\in K_1$. Since $v_1(E_1)>v_1(\epsilon)$, we have
$-pe=v_1(\epsilon)=v_1(\wp(\epsilon_1))$. This means that
$v_1(\epsilon_1)=-e$.
Observe, based upon \S2.1.2, that
$\wp(x_2)=D_1+\beta_2=D_1+\mu^p\beta_1+\epsilon$ and $\wp(\mu
x_1)=\mu^px_1^p-\mu x_1=\mu^p(x_1+\beta_1)-\mu
x_1=\wp(\mu)x_1+\mu^p\beta_1$. Therefore $\wp(X_2)=D_1-\wp(\mu
)x_1+E_1\in K_1$.
Because $-b_1-pm<-e$, $v_1(\wp(\mu
)x_1)=-b_1-p^2m<(p-1)b_1-pe\leq v_1(E_1)$. Thus
$v_1(-\wp(\mu)x_1+E_1)=-b_1-p^2m$. Furthermore
$v_1(D_1)=-(p^2-p+1)b_1$. Thus
$v_1(\wp(X_2))=\min\{-b_1-p^2m,-(p^2-p+1)b_1\}=-b_2$. Since
$\mbox{Norm}_{K_2/K_1}(X_2)=\wp(X_2)$, $v_2(X_2)=-b_2$.
\subsubsection{A Galois scaffold for the explicit element in \S2.3.1}
Observe that
$(\sigma_1-1)X_2=C_1-\mu+(\sigma-1)\epsilon_1$
and thus
$(\sigma_1-1)X_2=-\mu+\mathcal{E}$ where $\mathcal{E} =C_1+(\sigma_1-1)\epsilon_1\in
K_1$ satisfies $v_1(\mathcal{E} )=\min\{-(p-1)b_1,b_1-e\}$. Note that for
$e>0$ we have $p\nmid e$. So $(p-1)b_1\neq e-b_1$. In any case,
(\ref{cyc-spread}) means $-(p-1)b_1>b_1-b_2/p$
and (\ref{cyc-shape}) means that $b_1-e>b_1-b_2/p$. Together they yield
$v_1(\mathcal{E} )>b_1-b_2/p$. Thus $v_2(\mathcal{E} )>pb_1-b_2$.
Using truncated
exponentiation and Vandermonde's Convolution Identity,
$$\sigma_2^{[\mu]}\binom{X_2}{p-1}=\sum_{i=0}^{p-1}\binom{\mu}{i}(\sigma_2-1)^i
\binom{X_2}{p-1}=\sum_{i=0}^{p-1}\binom{\mu}{i} \binom{X_2}{p-i-1}=
\binom{X_2+\mu}{p-1}.$$ Therefore
$\sigma_1\sigma_2^{[\mu]}\binom{X_2}{p-1}=\binom{X_2+\mathcal{E} }{p-1}$. If
we expand $\binom{X_2+\mathcal{E} }{p-1}$
using Vandermonde's Convolution Identity, we
notice that for $0\leq i<p-1$,
$v_2(\binom{X_2}{i}\binom{\mathcal{E}}{p-i-1})>(p-i-1)pb_1-(p-1)b_2$.
So $v_2(\binom{X_2}{i}\binom{\mathcal{E}}{p-i-1})> pb_1-(p-1)b_2=v_2(\binom{X_2}{p-1}/x_1)$
for $0\leq i<p-1$ and thus
$$\sigma_1\sigma_2^{[\mu]}\binom{X_2}{p-1}=\binom{X_2+\mathcal{E} }{p-1}\equiv
\binom{X_2}{p-1}\bmod \binom{X_2}{p-1}\frac{1}{x_1} \mathfrak P_2.$$
Let $\Psi_1=\sigma_1\sigma_2^{[\mu]}-1$ and observe that for
$0\leq i\leq p-1$, $\Psi_1^i\binom{X_2}{p-1}\binom{x_1}{p-1}\equiv
\binom{X_2}{p-1}\binom{x_1}{p-i-1}\bmod
\binom{X_2}{p-1}\binom{x_1}{p-i-1}\mathfrak P_2$, which means that
with $\Psi_2=\sigma_2-1$,
$$\Psi_2^i\Psi_1^j\rho\equiv
\binom{X_2}{p-i-1}\binom{x_1}{p-j-1}\bmod
\binom{X_2}{p-i-1}\binom{x_1}{p-j-1}\mathfrak P_2,$$ and therefore
$v_2(\Psi_2^i\Psi_1^j\rho)=v_2(\rho)+ib_2+jpb_1$ for
$0\leq i,j\leq p-1$.
Note that $\{v_2(\rho)+ib_2+jpb_1:
0\leq i,j\leq p-1\}$
is a complete set of residues modulo $p^2$.
\subsubsection{The Galois scaffold holds for any element
$\alpha\in K_2$ with $v_2(\alpha)\equiv b_2\bmod p^2$} Express
$\alpha\in K_2$ with $v_2(\alpha)\equiv b_2\bmod p^2$ in the
$K_0$-basis $\{\Psi_2^m\Psi_1^n\rho: 0\leq m,n\leq p-1\}$. So
$\alpha=\sum_{0\leq m,n<p}A_{m,n}\Psi_2^m\Psi_1^n\rho$ for
some $A_{i,j}\in K_0$. Since $v_2(\alpha)\equiv v_2(\rho)\bmod p^2$,
$A_{0,0}\neq 0$ and it will be enough to prove the result
for $\alpha'=\alpha/A_{0,0}$. Therefore, without loss of
generality, we assume $A_{0,0}=1$ and $v_2(A_{m,n})+mb_2+npb_1>0$ for
$(m,n)\neq (0,0)$. Now apply $\Psi_2^i\Psi_1^j$ for $0\leq
i,j\leq p-1$ to $\alpha$. Clearly
$v_2(\Psi_2^i\Psi_1^j\rho)=v_2(\alpha)+ib_2+jpb_1$. The only
question then is whether $v_2(\Psi_2^i\Psi_1^j\cdot A_{m,n}
\Psi_2^m\Psi_1^n\rho)>v_2(\alpha)+ib_2+jpb_1$ for $(m,n)\neq
(0,0)$. Since $\Psi_2^p=0$ and $\Psi_1^p=\Psi_2$, the
interesting cases, when $\Psi_2^i\Psi_1^j\cdot
\Psi_2^m\Psi_1^n\neq 0$, occur only when $j+n<p$ and $i+m<p$,
or $j+n\geq p$ and $i+m+1<p$. Consider them separately. If $j+n<p$ and
$i+m<p$, then $v_2(\Psi_2^i\Psi_1^j\cdot A_{m,n}
\Psi_2^m\Psi_1^n\rho)=v_2(\rho)+v_2(A_{m,n})+(i+m)b_2+(j+n)pb_1>
v_2(\rho)+ib_2+jpb_1$. Of course $v_2(\rho)=v_2(\alpha)$. If $j+n\geq
p$ and $i+m+1<p$, then $v_2(\Psi_2^i\Psi_1^j\cdot A_{m,n}
\Psi_2^m\Psi_1^n\rho)=v_2(\rho)+v_2(A_{m,n})+(i+m+1)b_2+(j+n-p)pb_1>
v_2(\rho)+ib_2+jpb_1+(b_2-p^2b_1)$. Recall restriction
(\ref{cyc-spread}) that $b_2>p^2b_1$.
\subsection{Proof of Theorem \ref{assoc-main}}
The proof for $G=\mbox{Gal}(K_2/K_0)\cong C_p\times C_p$ is contained
in \cite{byott:scaffold}. Here we adjust that argument so that it
applies to $G\cong C_{p^2}$. Let $K_2/K_0$ satisfy the conditions in
Theorem \ref{scaffold}. So, in particular, $b_2\equiv b_1\equiv
r(b)\bmod p^2$. Recall
$\Psi_1^p=\Psi_2$ and
$\Psi_2^p=0$. This means that if we represent every nonnegative
integer $p$-adically ({\em i.e.} for $a\in\mathbb{Z}$ with $a\geq 0$
write $a=\sum_{i=0}^{\infty}a_{(i)}p^i$ for some $0\leq a_{(i)}\leq
p-1$), then we may define
$$\Psi^{(a)}=\begin{cases}\Psi_2^{a_{(1)}}\Psi_1^{a_{(0)}}& a<p^2,\\
0 &\mbox{ otherwise,} \end{cases}$$
and find that
$\Psi^{(a)}\Psi^{(a')}=\Psi^{(a+a')}$.
Furthermore,
if we define a function $\mathfrak b$ from the nonnegative integers to $\mathbb{Z}\cup\{\infty\}$:
$$\mathfrak b(a)=\begin{cases} (1+a_{(1)})b_2+a_{(0)}pb_1 & a<p^2,\\ \infty
&\mbox{ otherwise,} \end{cases}$$ then because of
Theorem \ref{scaffold}, given any $\rho\in K_2$ with
$v_2(\rho)= b_2$, we have
$v_2(\Psi^{(a)}\rho)=\mathfrak b(a)$.
For $0\leq a<p^2$, set
$$d_a=\left\lfloor\frac{\mathfrak b(a)}{p^2}\right\rfloor.$$
So $\mathfrak b(a)=d_ap^2+r(\mathfrak b(a))$ where $r(\mathfrak b(a))$
is the least nonnegative residue modulo $p^2$.
Let
$\rho_*\in K_2$ with $v_2(\rho_*)=r(b_2)$. Recall that $t$ is a
uniformizer for $K_0=\mathbb{F}((t))$. Set $\rho=t^{d_0}\rho_*$, so
$v_2(\rho)=b_2$. Moreover, for $0\leq a$ set
$$\rho_a=t^{-d_a}\Psi^{(a)}\cdot \rho,$$ which means that $\rho_a=0$
for $a\geq p^2$. Note that $v_2(\rho_a)=r(\mathfrak b(a))$ for $0\leq
a<p^2$. Thus $\{v_2(\rho_a):0\leq a<p^2\}=\{0,\ldots, p^2-1\}$,
$\{\rho_a\}_{0\leq a<p^2}$ is an $\mathfrak O_0$-basis for $\mathfrak O_2$, and
the elements $\Psi^{(a)}\rho$
span $K_2$ over $K_0$. By comparing dimensions, we see that $\rho$
generates a normal basis for the extension $K_2/K_0$, and
$\{\Psi^{(a)}\}_{0\leq a<p^2}$ is a $K_0$-basis for the group algebra
$K_0[G]$.
Observe that
\begin{equation}\label{psi-action}
\Psi^{(a_1)}\cdot \rho_{a_2}=t^{d_{a_1+a_2}-d_{a_2}}\rho_{a_1+a_2},
\end{equation}
and define
$w_j=\min\{d_{j+a}-d_{a}:0\leq a\leq j+a<p^2\}$ where
$0\leq j<p^2$. Note, in particular, that
$w_0=0$ and that we have $w_j\leq d_j-d_0$ for all $j$.
\begin{lemma}\label{d-lemma}
The associated order $\mathfrak A_{K_2/K_0}$ of $\mathfrak O_2$ has $\mathfrak O_0$-basis $\{t^{-w_j}\Psi^{(j)}\}_{0\leq j<p^2}$.
Moreover, $\mathfrak O_2$ is a free module over $\mathfrak A_{K_2/K_0}$ if and only if
$w_j=d_j-d_0$ for all $j$, and in this case $\rho_*$ is a
free generator of $\mathfrak O_2$ over $\mathfrak A_{K_2/K_0}$.
\end{lemma}
\begin{proof}
Follow \cite[Theorem 2.3]{byott:scaffold}. Since
$\{\Psi^{(j)}:0\leq j<p^2\}$ is a $K_0$-basis for $K_0[G]$, any
element $\alpha\in K_0[G]$ may be written
$\alpha=\sum_{j=0}^{p^2-1}c_j\Psi^{(j)}\mbox{ with }c_j\in K_0$.
Using (\ref{psi-action}) and the fact that $\{\rho_a\}_{0\leq a<p^2}$
is an $\mathfrak O_0$-basis for $\mathfrak O_2$, we find that $\alpha\in
\mathfrak A_{K_2/K_0}$ is equivalent to
$\alpha\rho_a=\sum_{j=0}^{p^2-1}c_j\Psi^{(j)}\rho_a\in \mathfrak O_2$ for all
$0\leq a<p^2$. This in turn is equivalent to $c_jt^{d_{j+a}-d_a}\in
\mathfrak O_0$ or $v_0(c_j)\geq d_a-d_{j+a}$ for all $0\leq a\leq
a+j<p^2$. But this is equivalent to $-v_0(c_j)\leq w_j$ for all $0\leq
j<p^2$. The first statement is proven.
Consider the second. Suppose that $w_j=d_j-d_0$ for all $j$. As
$\rho_*=\rho_0$, (\ref{psi-action}) yields $t^{-w_j}\Psi^{(j)}\cdot
\rho_*=\rho_a$, the basis elements $\{t^{-w_j}\Psi^{(j)}:0\leq
j<p^2\}$ take $\rho_*$ to the basis elements $\{\rho_j:0\leq j<p^2\}$
of $\mathfrak O_2$, which means that $\mathfrak O_2$ is a free
$\mathfrak A_{K_2/K_0}$-module. Conversely, suppose that $\mathfrak O_2$ is a free
$\mathfrak A_{K_2/K_0}$-module. So $\mathfrak O_2=\mathfrak A_{K_2/K_0}\eta$ for some
$\eta\in K_2$. Since $1\in \mathfrak A_{K_2/K_0}$, $\eta\in \mathfrak O_2$ and so
$\eta=\sum_{r=0}^{p^2-1}x_r\rho_r$ for some $x_r\in \mathfrak O_0$. We have
two $\mathfrak O_0$-bases for $\mathfrak O_2$, $\{\rho_j:0\leq j<p^2\}$ and
$\{t^{-w_i}\Psi^{(i)}\eta :0\leq i<p^2\}$. Because of
(\ref{psi-action}) the matrix that takes
the first of these to the second,
namely $M=(a_{i,j})$, is
upper triangular with
$$a_{i,j}=\begin{cases}0&i>j,\\
x_{j-i}t^{d_j-d_{j-i}-w_i} &i\leq j.
\end{cases}$$
Furthermore, it must have coefficients in $\mathfrak O_0$ and unit
determinant. Recall $x_r\in\mathfrak O_0$, so in particular $x_0\in\mathfrak O_0$.
Because the coefficients on the diagonal lie in $\mathfrak O_0$,
$x_0t^{-w_j+d_j-d_0}\in\mathfrak O_0$. Because the determinant
$\prod_{j=0}^{p^2-1}a_{j,j}=x_0^{p^2}
\prod_{j=0}^{p^2-1}t^{d_j-d_0-w_j}$
is a unit,
we have $w_j=d_j-d_0$ for all
$0\leq j<p^2$, as required.
\end{proof}
The condition $w_j=d_j-d_0$ for all $0\leq j<p^2$ can be restated as
$d_{x + y}-d_x\geq d_y-d_0$ for all $0\leq y<p^2$ and $0\leq
x<p^2-y$. In other words, $d_{x + y}+d_0\geq d_x+d_y$ for all $0\leq
x,y$ and $0\leq x+y<p^2$. As this is symmetric in $x,y$ we may assume
$y\leq x$. Thus we are concerned with the condition
\begin{equation}\label{cond1}
d_{x + y}+d_0\geq d_x+d_y\mbox{ for all }0\leq y\leq x\leq
x+y<p^2.
\end{equation}
We have the $p$-adic expressions: $x=x_{(0)}+x_{(1)}p$ and
$y=y_{(0)}+y_{(1)}p$. When we add these expressions, we get
$x+y=c_{(0)}+c_{(1)}p+\epsilon_{(1)}p^2$ where $0\leq c_{(i)}\leq
p-1$, $x_{(0)}+y_{(0)}=c_{(0)}+p\epsilon_{(0)}$,
$\epsilon_{(0)}+x_{(1)}+y_{(1)}=c_{(1)}+p\epsilon_{(1)}$ and the
$\epsilon_{(i)}\in\{0,1\}$ depend upon whether there is a carry. Note
$\epsilon_{(1)}=0$, since $x+y<p^2$.
Recall $b_2=b_1+p^2m$. Replace $b_2$ in
(\ref{cond1}) with $b_2=b_1+p^2m$, and get
\begin{equation}\label{cond'}
\left\lfloor\frac{(1+x'+y' )b_1+\epsilon_{(0)}D}{p^2}\right \rfloor+
\left\lfloor\frac{b_1}{p^2}\right \rfloor \geq
\left\lfloor\frac{(1+x')b_1}{p^2}\right \rfloor+
\left\lfloor\frac{(1+y')b_1}{p^2}\right \rfloor,
\end{equation}
where $x'=x_{(1)}+px_{(0)}$, $y'=y_{(1)}+py_{(0)}$ and
$D=(b_2-p^2b_1)$, all over the same range of $x,y$.
There are two cases to consider: $\epsilon_{(0)}=0$ and
$\epsilon_{(0)}=1$. We consider the case
$\epsilon_{(0)}=1$ first.
Using $b_2=b_1+p^2m$, observe that (\ref{cyc-spread}) means $m\geq
b_1-\lfloor b_1/p^2\rfloor$ and thus by replacing $m$ in
$b_2=b_1+p^2m$ with $ b_1-\lfloor b_1/p^2\rfloor$, we find $D\geq
b_1-p^2\lfloor b_1/p^2\rfloor$. It is enough therefore to show that
(\ref{cond'}) with $\epsilon_{(0)}=1$ holds when $D$ is replaced by
$b_1-p^2\lfloor b_1/p^2\rfloor$. In other words, it is enough to show
that
$$\left\lfloor\frac{(2+x'+y' )b_1}{p^2}\right \rfloor \geq
\left\lfloor\frac{(1+x')b_1}{p^2}\right \rfloor+
\left\lfloor\frac{(1+y')b_1}{p^2}\right \rfloor.$$ But this follows
from the generic fact: $\lfloor (a+b)/c\rfloor \geq \lfloor
a/c\rfloor+ \lfloor b/c\rfloor$ for positive integers $a,b,c$. The
case of (\ref{cond'}) for those $x,y$ with $\epsilon_{(0)}=0$ (so that
$x_{(i)}+y_{(i)}<p$ for both $i=0,1$) is equivalent to
\cite[(6)]{byott:scaffold}, which, because of
\cite{byott:QJM,byott:scaffold}, is equivalent to $r(b)\mid p^2-1$.
\section{Examples: $p=2$ and $3$}
In this section, we determine necessary conditions for a Galois
scaffold to exist when $p=2, 3$. Assuming the case $p=3$ to be
representative of the general case, $p$ odd, our results
suggest that the conditions in Theorem \ref{scaffold} are sharp.
We treat $p=2$ for the sake of completeness. Note that the condition
on the residue of the ramification numbers in Theorem \ref{assoc-main}
holds vacuously. Consequently, {\em every} fully ramified $C_2\times
C_2$-extension possesses a Galois scaffold \cite[Thm
5.1]{elder:scaffold}, and furthermore the ring of integers is free
over its associated order in {\em every} fully ramified $C_2\times
C_2$-extension \cite[Cor 1.3]{byott:scaffold}. This suggests that
$p=2$ is a special case. It also explains why we only consider
$C_4$-extensions here.
\subsection{Outline}
Recall that a Galois scaffold for an extension of degree $p^2$ requires two
elements $\Psi_2,\Psi_1\in K_0[G]$ satisfying
(\ref{strong-scaff}), (\ref{scaff}). Here we
outline a general procedure which, in principle, should enable us to
obtain a necessary condition for the existence of a Galois scaffold
for arbitrary $p$. In the remainder of this section, we implement this
procedure.
Adopt the notation of \S 2.1. So whether $G\cong C_p\times
C_p$ or $C_{p^2}$, we have $K_1=K_0(x_1)$ with $v_1(x_1)=-b_1$. Our
first step is then to identify an element $X_2\in K_2$ such that
$v_2(X_2)=-b_2$. Once this is done, we have
\begin{equation}
\alpha_{i,j}=\binom{X_2}{i}\binom{x_1}{j}, \quad 0\leq i,j<p
\label{alpha}
\end{equation}
satisfying $v_2(\alpha_{i,j})=-ib_2-jpb_1$. So
$\{v_2(\alpha_{i,j}):0\leq i,j<p\}$ is a complete set of residues
modulo $p^2$, and thus $\mathcal{B}=\{\alpha_{i,j}:0\leq i,j<p\}$ is a
basis for $K_2$ over $K_0$. Notice that $\alpha_{p-1,p-1}$ satisfies
$v_2(\alpha_{p-1,p-1})\equiv b_2\bmod p^2$
A basis for $K_0[G]$ is given by $\{(\sigma_2-1)^i(\sigma_1-1)^j:0\leq
i,j<p\}$. Our next step in each case is to express
$(\sigma_2-1)^i(\sigma_1-1)^j\alpha_{p-1,p-1}$ in terms of
$\mathcal{B}$. The fact for each $0\leq i<p$ both
$(\sigma_2-1)^i(\sigma_1-1)^0\alpha_{p-1,p-1}$ and
$(\sigma_2-1)^{p-1}(\sigma_1-1)^i\alpha_{p-1,p-1}$ are expressed as a
single element of $\mathcal{B}$ motivates the use of binomial
coefficients to create our basis $\mathcal{B}$ (rather than the more
naive basis $\{X_2^ix_1^j:0\leq i,j<p\}$).
At this point, we are prepared to identify elements $\Theta_j\in K_0[G]$
for $0\leq j<p$ such that
$v_2(\Theta_j\alpha_{p-1,p-1})=v_2(\alpha_{p-1,p-1})+jpb_1$. They
exist because $\alpha_{p-1,p-1}$ generates a normal basis
\cite{elder:cor-criterion}. Because
$\{v_2((\sigma_2-1)^i\Theta_j\alpha_{p-1,p-1}):0\leq i,j<p\}$ is a
complete set of residues, $K_2= \sum_{0\leq i,j<p}K_0\cdot
(\sigma_2-1)^i\Theta_j\alpha_{p-1,p-1}$. Therefore
$\{(\sigma_2-1)^i\Theta_j:0\leq i,j<p\}$ is a basis for $K_0[G]$.
If there is a Galois scaffold there must be $\Psi_2, \Psi_1$ in the
augmentation ideal $(\sigma-1:\sigma\in G)$ of $K_0[G]$ satisfying
\eqref{strong-scaff} and \eqref{scaff}. Because of \eqref{scaff},
there exist $0\leq i,j<p$ such that
$v_2(\Psi_2^i\Psi_1^j\alpha_{p-1,p-1})\equiv
v_2(\alpha_{p-1,p-1})+pb_1\bmod p^2$. Thus
$v_2(a\Psi_2^i\Psi_1^j\alpha_{p-1,p-1})= v_2(\alpha_{p-1,p-1})+pb_1$
for some $a\in K_0$. Clearly $a\Psi_2^i\Psi_1^j\in(\sigma-1:\sigma\in
G)^{i+j}$. Lemma 3.1 below gives $i+j=1$. Thus, without loss of generality,
we assume $i=0$ and $j=1$ and that $v_2(\Psi_1\alpha_{p-1,p-1})=
v_2(\alpha_{p-1,p-1})+pb_1$. Note that the augmentation ideal
$(\sigma-1:\sigma\in G)$ of $K_0[G]$ is also its Jacobson radical and
unique maximal ideal. Express $\Psi_1=\sum_{0\leq
i,j<p}a_{i,j}(\sigma_2-1)^i\Theta_j$ for some $a_{i,j}\in K_0$ with
$a_{0,0}=0$, and proceed to impose the first requirement of a Galois
scaffold, namely (\ref{strong-scaff}). How? This depends upon $p$.
\begin{lemma}
Given $\alpha\in K_2$ with $v_2(\alpha)\equiv b_2\bmod p^2$. If
$\theta$ lies in the augmentation ideal of $K_0[G]$,
$(\sigma-1:\sigma\in G)$, and $v_2(\theta\alpha)=v_2(\alpha)+pb_1$,
then $\theta\not\in (\sigma-1:\sigma\in G)^2$.
\end{lemma}
\begin{proof}
Let $\mbox{Tr}_{K_i/K_j}=(\sigma_2-1)^{p-1}$ denote the trace from $K_i$ down to $K_j$.
Using \cite[V\S3 Lemma 4]{serre:local},
$v_1(\mbox{Tr}_{K_2/K_1}\alpha)=(v_2(\alpha)+(p-1)b_2)/p \equiv
b_2\equiv b_1\bmod p$. So
$v_1((\sigma_1-1)^i\mbox{Tr}_{K_i/K_j}\alpha)\equiv (i+1)b_1\bmod p$
for $0\leq i<p$.
It is also the case that
$v_1(\mbox{Tr}_{K_2/K_1}\theta\alpha)=(v_2(\alpha)+pb_1+(p-1)b_2)/p
\equiv b_2+b_1\equiv 2b_1\bmod p$.
In particular,
$v_1(\mbox{Tr}_{K_2/K_1}\theta\alpha)<\infty$.
Let $\theta=\sum_{0\leq i,j<p}a_{i,j}(\sigma_1-1)^i(\sigma_2-1)^j$ with
$a_{i,j}\in K_0$. Since $\theta$ lies in the
augmentation ideal of $K_0[G]$, $a_{00}=0$.
If $\theta\in
(\sigma-1:\sigma\in G)^2$, then $a_{10}=0$ as well.
As a result, $\mbox{Tr}_{K_2/K_1}\theta\alpha=
\sum_{i=2}^{p-1}a_{i,0}(\sigma_1-1)^i(\sigma_2-1)^{p-1}\alpha=\sum_{i=2}^{p-1}a_{i,0}(\sigma_1-1)^i\mbox{Tr}_{K_2/K_1}\alpha$.
If $p=2$, the contradiction arises
because we can not have both $v_1(\mbox{Tr}_{K_2/K_1}\theta\alpha)<\infty$ and
$\mbox{Tr}_{K_2/K_1}\theta\alpha=0$. If $p>2$, the contraction arises because
for $2\leq i<p$, $2b_1\not\equiv (i+1)b_1\bmod p$.
\end{proof}
\subsection{$C_4$-extensions}
There are two conditions stated in Theorem \ref{scaffold}. They are sufficient
for a Galois scaffold. For $p=2$, one of these conditions holds
vacuously, which leaves $b_2>4b_1$, namely (\ref{cyc-spread}), as the
only interesting condition. Here we show that
$b_2\geq
4b_1-1$ is both necessary and sufficient for a Galois scaffold to
exist in a fully ramified $C_4$-extension.
Assume notation of \S2.1.2. So $v_0(\beta_1)=-b_1<0$ odd, and
$K_2/K_0$ satisfies $K_2=K_0(x_2)$ with
$\wp(x_2)=\beta_1x_1+\mu^2\beta_1+\epsilon$ where $\mu\in K_0$, and
because $p=2$, $\epsilon\in \kappa$. Recall
$(\sigma_1-1)x_2=x_1$ with $\wp(x_1)=\beta_1$. Let $m=-v_0(\mu)$ and
$X_2=x_2-\mu x_1\in K_2$. Then
$\wp(X_2)=(\beta_1+\wp(\mu))x_1+\epsilon$ where
$v_1((\beta_1+\wp(\mu))x_1)=-\max\{3b_1,b_1+4m\}=-b_2$. Thus
$v_2(X_2)=-b_2$. The basis $\mathcal{B}$ is $\{\alpha_{i,j}:0\leq
i,j\leq 1\} =\{1, x_1, X_2, X_2x_1\}$.
Note that $(\sigma_1-1)X_2=x_1-\mu$. So $\sigma_1X_2x_1=(X_2+x_1-\mu)(x_1+1)$
and thus $(\sigma_1-1)X_2x_1=X_2+\mu x_1+\beta_1+\mu$. Therefore
for $0\leq i,j\leq 1$ we have
$(\sigma_2-1)^i(\sigma_1-1)^j\alpha_{1,1}=\alpha_{1-i,1-j}+\epsilon_{i,j}$,
where the error term $\epsilon_{i,j}$ is zero for $(i,j)\in
\{(0,0),(1,0),(1,1)\}$, and
$\epsilon_{0,1}=\mu\alpha_{0,1}+(\beta_1+\mu)\alpha_{0,0}$.
Use this to find
$\Theta_1=(\sigma_1\sigma_2^{[\mu]}-1)+\beta_1(\sigma_1-1)(\sigma_2-1)$,
so that
the effect of $\Theta_1$, $(\sigma_2-1)$, $(\sigma_2-1)\Theta_1$
on $\alpha_{1,1}=X_2x_1$, $\alpha_{1,0}=X_2$, $\alpha_{0,1}=x_1$ is as follows:
\begin{equation}\label{table}
\begin{array}{r|ccc}
&X_2x_1&X_2&x_1\\ \hline
\Theta_1& X_2 &x_1&1\\
(\sigma_2-1)&x_1 &1&0\\
(\sigma_2-1)\Theta_1& 1&0&0
\end{array}\end{equation}
Now $\alpha_{1,1}$ satisfies the valuation criterion for a normal
basis generator, namely $v_2(\alpha_{1,1})=b_2 \mod 4$. Thus, if there is a
Galois scaffold, then there is a $\Psi_1$ in the augmentation ideal of
$K_0[G]$, which is expressible as
$\Psi_1=a_{0,1}\Theta_1+a_{1,0}(\sigma_2-1)+a_{1,1}(\sigma_2-1)\Theta_1$
with $a_{i,j}\in K_0$, such that $v_2(\Psi_1 \rho )=v_2(\rho)+2b_1$
for all $\rho\in K_2$ with $v_2(\rho)\equiv v_2(\alpha_{1,1})\bmod 4$.
Since $v_2(\Theta_1 \alpha_{1,1})=v_2(\alpha_{1,1})+2b_1$,
$v_2(a_{0,1})=0$ and $v_2(a_{i,j})+ib_2+2jb_1>2b_1$ for $(i,j)\neq
(0,1)$. Multiplying $\rho$ by an element of $K_0$ if necessary, we
may assume, without loss of generality, that $\rho=X_2x_1
+aX_2+bx_1+c$ with $a,b,c\in K_0$. So that
$v_2(\rho)=v_2(\alpha_{1,1})$, we require $v_2(a)>-2b_1$,
$v_2(b)>-b_2$ and $v_2(c)>-b_2-2b_1$. Note that
$\Psi_1\rho=a_{0,1}X_2+(a_{0,1}a+a_{1,0})x_1
+(a_{0,1}b+a_{1,0}a+a_{1,1})$. Using the bounds on $v_2(a)$,
$v_2(b)$, $v_2(c)$ and the $v_2(a_{i,j})$, we find $\Psi_1\rho\equiv
a_{0,1}(X_2+ax_1) \bmod X_2\mathfrak P_2$, which means that for a Galois
scaffold we require $v_2(ax_1)>v_2(X_2)$ for all $a\in K_0$ with
$v_0(a)\geq \lceil -2b_1/4\rceil$. Thus $\lfloor
b_1/2\rfloor\leq\lfloor(b_2-2b_1)/4\rfloor$, which since $b_1$ is odd
is equivalent to $b_2\geq 4b_1-1$. On the other hand, if $b_2\geq
4b_1-1$ a Galois scaffold exists. This follows from the observation
that for $\rho^*\in K_2$ with $v_2(\rho^*)$ odd, we have
$v_2((\sigma_2-1)\rho^*)=v_2(\rho^*)+b_2$.
\subsection{$C_9$-extensions}
We prove that for $C_9$-extensions the conditions in Theorem
\ref{scaffold} are sharp.
Assume $p=3$ in \S2.1.2. So
$v_0(\beta_1)=-b_1<0$ with $p\nmid b_1$. Either $v_0(\beta_2)<0$ with
$p\nmid v_0(\beta_2)$ or $\beta_2\in \kappa$.
In any case, there are $\mu_1$, $\mu_2 \in K_0$ (either or
both of which may be $0$)
and $k\in \kappa$
such that
$\beta_2=\mu_1^3\beta_1+\mu_2^3\binom{\beta_1}{2}+k$.
Let
$m_i=-v_0(\mu_i)$ for $i=1$, $2$. If $\beta_2 \neq 0$,
$v_0(\beta_2)=-\max\{3m_1+b_1,3m_2+2b_1,0\}$. Our $C_9$-extension
$K_2/K_0$ satisfies $K_2=K_0(x_2)$ with
$\wp(x_2)=-\beta_1x_1^2-\beta_1^2x_1+\mu_1^3\beta_1+\mu_2^3\binom{\beta_1}{2}+k$,
and $(\sigma_1-1)x_2=-x_1^2-x_1$ with $\wp(x_1)=\beta_1$. Let
$X_2=x_2-\mu_1 x_1-\mu_2\binom{x_1}{2}$. Then $\wp(X_2)
=-\beta_1x_1^2-\beta_1^2x_1-\wp(\mu_1)x_1-\wp(\mu_2)\binom{x_1}{2}-\mu_2^3\beta_1x_1+k$. Notice
that $v_1(\wp(X_2))=-b_2$ and
$(\sigma_1-1)X_2=-x_1^2-x_1-\mu_1-\mu_2x_1$. Thus $v_2(X_2)=-b_2$. We
have our basis $\mathcal{B}=\left \{\alpha_{i,j}:0\leq i,j\leq 2\right
\}$ using (\ref{alpha}).
Verify, using a software package like Maple, that
for $0\leq i,j\leq 2$ we have
$(\sigma_2-1)^i(\sigma_1-1)^j\alpha_{2,2}=\alpha_{2-i,2-j}+\epsilon_{i,j}$,
where the error term $\epsilon_{i,j}$ is zero for $(i,j)\in
\{(0,0),(1,0),(2,0).(2,1),(2,2)\}$. Otherwise
\begin{eqnarray*}
\epsilon_{0,1}&=&(1-\mu_1-\mu_2)(\alpha_{1,2}+\alpha_{1,1})+\beta_1\alpha_{1,1}+(\mu_2-1)\beta_1\alpha_{1,0}
\\ & &
+(\mu_1\mu_2+\mu_1-\mu_1^2+\mu_2-\mu_2^2)(\alpha_{0,2}+\alpha_{0,1})+\mu_2\beta_1\alpha_{0,2}\\& & +(\mu_2^2-\mu_1)\beta_1\alpha_{0,1}+
((\mu_1-\mu_2-\mu_1\mu_2+\mu_2^2)\beta_1 +\beta_1^2)\alpha_{0,0}\\ \epsilon_{1,1}
&=&(1-\mu_1-\mu_2)(\alpha_{0,2}+\alpha_{0,1})+\beta_1\alpha_{0,1}+(\mu_2-1)\beta_1\alpha_{0,0}
\end{eqnarray*}
\begin{eqnarray*}
\epsilon_{0,2}&=& (\mu_2-1)(\alpha_{1,2}-\alpha_{1,0})+\mu_1(\alpha_{1,1}+\alpha_{1,0}) \\
&&+(\mu_2^2-\mu_2+\mu_1^2-(1+\mu_2)\beta_1)\alpha_{0,2}\\
& &
+(-\mu_1\mu_2-\mu_1+(1+\mu_2-\mu_2^2+\mu_1)\beta_1)\alpha_{0,1}\\
& & +(\mu_2-\mu_2^2-\mu_1-\mu_1^2-\mu_1\mu_2+(\mu_1\mu_2-\mu_1-\mu_2-\mu_2^2-1)\beta_1-\beta_1^2)\alpha_{0,0}
\\
\epsilon_{1,2}&=& (\mu_2-1)(\alpha_{0,2}-\alpha_{0,0})+\mu_1(\alpha_{0,1}+\alpha_{0,0})
\end{eqnarray*}
Now observe that because
$b_2= \max\{7b_1,b_1+9m_1,4b_1+9m_2\}$, we have $v_2(\mu_1)\geq
b_1-b_2$ and $v_2(\mu_2)\geq 4b_1-b_2$. So
using the expressions for $(\sigma_2-1)^i(\sigma_1-1)^j\alpha_{2,2}$
and
Gaussian Elimination,
we find that
\begin{eqnarray*}
\Theta_1&=&
(\sigma_1\sigma_2^{[\mu_1]}-1)-\beta_1(\sigma_2-1)(\sigma_1-1)
-\mu_2\beta_1(\sigma_2-1)(\sigma_1-1)^2\\
&&+(\mu_2^2-\mu_1)\beta_1(\sigma_2-1)^2
+\left [(\mu_1\mu_2-\mu_1-\mu_2^2)\beta_1+\beta_1^2\right ](\sigma_2-1)^2(\sigma_1-1)\\
&&+\left [\mu_1\mu_2\beta_1+(1+\mu_2)\beta_1^2\right ](\sigma_2-1)^2(\sigma_1-1)^2,\\
\Theta_2&=&(\sigma_1\sigma_2^{[\mu_1]}-1)^2
-\mu_2(\sigma_2-1)
+(1+\mu_2)\beta_1(\sigma_2-1)^2\\
&&+(\mu_2^2-\mu_2)\beta_1(\sigma_2-1)^2(\sigma_1-1)
+[(\mu_2+\mu_2^2)\beta_1+\beta_1^2](\sigma_2-1)^2(\sigma_1-1)^2,
\end{eqnarray*}
give $\Theta_j\alpha_{2,2}\equiv\alpha_{2,2-j}\bmod \alpha_{2,2-j}\mathfrak P_2$. Let $\Theta_0=1$.
If there is a Galois scaffold then there is a $\Psi_1$ in the augmentation ideal of $K_0[G]$,
which is expressible as $\Psi_1=\sum_{0\leq i,j\leq
2}a_{i,j}(\sigma_2-1)^i\Theta_j$ for some $a_{i,j}\in K_0$, such
that $v_2(\Psi_1 \alpha_{2,2} )=v_2(\alpha_{2,2})+3b_1$ and
$v_2(\Psi_1^2\alpha_{2,2})=v_2(\alpha_{2,2})+6b_1$.
Note that $a_{0,0}=0$, since
$\Psi_1$ is in the augmentation ideal. Since
$v_2(\Psi_1 \alpha_{2,2} )=v_2(\alpha_{2,2})+3b_1$, we have $v_2(a_{0,1})=0$
and $v_2(a_{i,j})+ib_2+3jb_1>3b_1$ for $(i,j)\neq (0,1)$.
To determine $v_2(\Psi_1^2\alpha_{2,2})$, we expand $\Psi_1^2$ in
terms of the $K_0$-basis $\{(\sigma_2-1)^i\Theta_j:0\leq i,j\leq 2\}$
for $K_0[G]$. This requires the following identities, which can
be verified with a software package like Maple (establish
polynomial identities where $x=\sigma_1-1$, $x^3=\sigma_2-1$,
and $x^9=0$):
\begin{eqnarray*}
\Theta_1^2&=&\Theta_2+ \beta_1(\sigma_2-1)\Theta_2
-(\mu_2\beta_1+\beta_1^2)(\sigma_2-1)^2\Theta_2
\\
& & +\beta_1(\mu_2^2+\mu_2)(\sigma_2-1)^2\Theta_1+(\mu_2-1)\beta_1(\sigma_2-1)^2+\mu_2(\sigma_2-1),\\
\Theta_1\Theta_2&=&(\sigma_2-1)-\mu_2(\sigma_2-1)\Theta_1+\beta_1(\sigma_2-1)^2\Theta_1-\beta_1(\mu_2+\mu_2^2)(\sigma_2-1)^2\Theta_2\\
& & -\beta_1(\sigma_2-1)^2,\\
\Theta_2^2&=&(\sigma_2-1)\Theta_1+\beta_1(\sigma_2-1)^2\Theta_1
+\mu_2(\sigma_2-1)\Theta_2
-\beta_1(\sigma_2-1)^2\Theta_2\\
&&-\mu_2^2(\sigma_2-1)^2.
\end{eqnarray*}
In the expansion of $\Psi_1^2$ in terms of
$\{(\sigma_2-1)^i\Theta_j:0\leq i,j\leq 2\}$, we find the coefficient
of $(\sigma_2-1)$ to be $2a_{0,1}a_{0,2}+
a_{0,1}^2\mu_2$, while the coefficient of $(\sigma_2-1)\Theta_2$ is
$2a_{0,1}a_{1,1}+2a_{1,0}a_{0,2}
+a_{0,2}^2\mu_2+a_{0,1}^2\beta_1$. When we apply $\Psi_1^2$ to
$\alpha_{2,2}$, it must be that both
$v_2((2a_{0,1}a_{0,2}+a_{0,1}^2\mu_2)(\sigma_2-1)\alpha_{2,2})>v_2(\Theta_2\alpha_{2,2})=v_2(\alpha_{2,2})+6b_1$
and
$v_2((2a_{0,1}a_{1,1}+2a_{1,0}a_{0,2}+a_{0,2}^2\mu_2+a_{0,1}^2\beta_1)(\sigma_2-1)\Theta_2\alpha_{2,2})>v_2(\alpha_{2,2})+6b_1$.
We may discard those terms of valuation greater than
$v_2(\alpha_{2,2})+6b_1$, using $v_2(a_{0,1})=0$ and
$v_2(a_{i,j})+ib_2+3jb_1>3b_1$ for $(i,j)\neq (0,1)$. This means
that we can drop $-a_{0,1}a_{1,1}-a_{1,0}a_{0,2}$ from the
coefficient for $(\sigma_2-1)\Theta_2\alpha_{2,2}$, leaving
$(a_{0,2}^2\mu_2+a_{0,1}^2\beta_1)
(\sigma_2-1)\Theta_2\alpha_{2,2}$. If $v_2(\mu_2)<6b_1-b_2$, then
because
$v_2(a_{0,1}^2\mu_2(\sigma_2-1)\alpha_{2,2})<v_2(\alpha_{2,2})+6b_1$
we must have $v_0(\mu_2)=v_0(a_{0,2})$. If $b_2<9b_1$, then because
$v_2(a_{0,1}^2\beta_1(\sigma_2-1)\Theta_2\alpha_{2,2})<v_2(\alpha_{2,2})+6b_1$
we must have $2v_0(a_{0,2})+v_0(\mu_2)=v_0(\beta_1)$. So if
$v_2(\mu_2)<6b_1-b_2$ and $b_2<9b_1$, then we must have
$3v_0(\mu_2)=v_0(\beta_1)=-b_1$. But $3\nmid b_1$. This means that
we have $v_2(\mu_2)>6b_1-b_2$ or $b_2>9b_1$, and there are two cases
to consider. Suppose that $v_2(\mu_2)>6b_1-b_2$. Then because we
must have $v_2(
(a_{0,2}^2\mu_2+a_{0,1}^2\beta_1)(\sigma_2-1)\Theta_2\alpha_{2,2}
)>v_2(\alpha_{2,2})+6b_1$, we must have $v_2(
(a_{0,1}^2\beta_1)(\sigma_2-1)\Theta_2\alpha_{2,2}
)>v_2(\alpha_{2,2})+6b_1$, or $b_2>9b_1$. Suppose that $b_2>9b_1$.
Then because we must have
$v_2((a_{0,1}^2\mu_2-a_{0,1}a_{0,2})(\sigma_2-1)\alpha_{2,2})>v_2(\alpha_{2,2})+6b_1$,
we must have
$v_2(a_{0,1}^2\mu_2(\sigma_2-1)\alpha_{2,2})>v_2(\alpha_{2,2})+6b_1$,
or $v_2(\mu_2)>6b_1-b_2$. As a result, we have shown that in order
for a Galois scaffold to exist, both $b_2>9b_1$ and
$v_2(\mu_2)>6b_1-b_2$ must hold. The first condition agrees with
(\ref{cyc-spread}). The second condition agrees with
(\ref{cyc-shape}).
\subsection{$C_3\times C_3$-extensions}
We prove that for $C_3\times C_3$-extensions the conditions in Theorem
\ref{scaffold} are sharp.
Assume $p=3$ in \S2.1.1. So $v_0(\beta_2)\leq v_0(\beta_1)=-b_1<0$
with $p\nmid b_1, v_0(\beta_2)$. We follow \S3.3 closely, except that
the technical issues here are easier, since
expressions here are often truncations of the expressions in
\S3.3: Again, there are elements $\mu_1,\mu_2\in K_0$ with
$v_0(\mu_i)=-m_i$ and a $k\in \kappa$ such that
$\beta_2=\mu_1^3\beta_1+\mu_2^3\binom{\beta_1}{2}+k$. Since
$v_0(\beta_2)<0$, $v_0(\beta_2)=-\max\{3m_1+b_1,3m_2+2b_1\}$. Let
$\wp(x_i)=\beta_i$, and let $(\sigma_i-1)x_j=\delta_{i,j}$ be the
Kronecker delta function. Let $X_2=x_2-\mu_1
x_1-\mu_2\binom{x_1}{2}$. Then $\wp(X_2)
=-\wp(\mu_1)x_1-\wp(\mu_2)\binom{x_1}{2}-\mu_2^3\beta_1x_1+k$. So
$v_1(\wp(X_2))=-b_2$ and $(\sigma_1-1)X_2=-\mu_1-\mu_2x_1$. A basis
for $K_2/K_0$ is given by $\mathcal{B}=\left \{\alpha_{i,j}:0\leq
i,j\leq 2\right \}$ where $\alpha_{i,j}=\binom{X_2}{i}\binom{x_1}{j}$.
Verify, using a software package as in \S3.3, that
for $0\leq i,j\leq 2$ we have
$(\sigma_2-1)^i(\sigma_1-1)^j\alpha_{2,2}=\alpha_{2-i,2-j}+\epsilon_{i,j}$,
where the error term $\epsilon_{i,j}$ is zero for $(i,j)\in
\{(0,0),(1,0),(2,0).(2,1),(2,2)\}$. Otherwise
\begin{eqnarray*}
\epsilon_{0,1} &=&
-(\mu_1+\mu_2)(\alpha_{1,2}+\alpha_{1,1})+\mu_2\beta_1\alpha_{1,0}\\
&&+(\mu_1\mu_2-\mu_1-\mu_1^2-\mu_2-\mu_2^2)(\alpha_{0,2}
+\alpha_{0,1})+
\mu_2^2\beta_1\alpha_{0,1} \\
&&+(\mu_2-\mu_1\mu_2+\mu_2^2)\beta_1\alpha_{0,0},\\
\epsilon_{1,1} &=&
-(\mu_1+\mu_2)(\alpha_{0,2}+\alpha_{0,1})+\mu_2\beta_1\alpha_{0,0},\\
\epsilon_{0,2} &=&
\mu_2(\alpha_{1,2}-\alpha_{1,0})+\mu_1(\alpha_{1,1}+\alpha_{1,0})
+(\mu_2^2+\mu_2+\mu_1^2)\alpha_{0,2} \\
&&+(\mu_1-\mu_1\mu_2
-\mu_2^2\beta_1)\alpha_{0,1}\\
&&+(\mu_1-\mu_1^2-\mu_2-\mu_2^2-\mu_1\mu_2
+(\mu_1\mu_2
-\mu_2^2)\beta_1)\alpha_{0,0},\\
\epsilon_{1,2}
&=&\mu_2(\alpha_{0,2}-\alpha_{0,0})+\mu_1(\alpha_{0,1}+\alpha_{0,0}).\\
\end{eqnarray*}
Use this and the fact that because
$b_2= \max\{b_1+9m_1,4b_1+9m_2\}$, we have $v_2(\mu_1)\geq
b_1-b_2$ and $v_2(\mu_2)\geq 4b_1-b_2$ to find
that
$\Theta_j\alpha_{2,2}\equiv\alpha_{2,2-j}\bmod \alpha_{2,2-j}\mathfrak P_2$ for
\begin{multline*}
\Theta_1=(\sigma_1\sigma_2^{[\mu_1]}-1) +\mu_2^2\beta_1(\sigma_2-1)^2
-\mu_2\beta_1(\sigma_2-1)(\sigma_1-1)^2\\+(\mu_1\mu_2-\mu_2^2)\beta_1(\sigma_2-1)^2(\sigma_1-1)
+(\mu_1\mu_2-\mu_2)\beta_1(\sigma_2-1)^2(\sigma_1-1)^2,
\end{multline*}
\begin{multline*}
\Theta_2=
(\sigma_1\sigma_2^{[\mu_1]}-1)^2 -\mu_2(\sigma_2-1)
-\mu_2(\sigma_2-1)^2
+\mu_2^2\beta_1(\sigma_2-1)^2(\sigma_1-1)\\
+\mu_2^2\beta_1(\sigma_2-1)^2(\sigma_1-1)^2.
\end{multline*}
Let $\Theta_0=1$.
Using a software package as in \S3.3, we establish:
\begin{eqnarray*}
\Theta_1^2&=&\Theta_2 +\mu_2(\sigma_2-1)+\mu_2(\sigma_2-1)^2+\mu_2^2\beta_1(\sigma_2-1)^2\Theta_1,\\
\Theta_1\Theta_2&=&-\mu_2(\sigma_2-1)\Theta_1-\mu_2(\sigma_2-1)^2\Theta_1-\mu_2^2\beta_1(\sigma_2-1)^2\Theta_2\\
\Theta_2^2&=&
-\mu_2^2(\sigma_2-1)^2
+\mu_2(\sigma_2-1)\Theta_2
+\mu_2(\sigma_2-1)^2\Theta_2
\end{eqnarray*}
If there is a Galois scaffold then there is a $\Psi_1=\sum_{0\leq
i,j\leq 2}a_{i,j}(\sigma_2-1)^i\Theta_j$ in the augmentation ideal of $K_0[G]$
with
$a_{i,j}\in K_0$ and $a_{0,0}=0$,
such that $v_2(\Psi_1\alpha_{2,2})=v_2(\alpha_{2,2})+3b_1$ and thus
$v_2(a_{0,1})=0$ and for $(i,j)\neq (0,1)$,
$v_2(a_{i,j})+ib_2+3jb_1>3b_1$.
Furthermore
$v_2(\Psi_1^2\alpha_{2,2})=v_2(\alpha_{2,2})+6b_1$. Expand
$\Psi_1^2$ in terms of $\{(\sigma_2-1)^i\Theta_j:0\leq i,j\leq
2\}$. The coefficient of $(\sigma_2-1)$ is
$a_{0,1}^2\mu_2$. When we apply $\Psi_1^2$ to
$\alpha_{2,2}$, we must have
$v_2(a_{0,1}^2\mu_2(\sigma_2-1)\alpha_{2,2})>v_2(\Theta_2\alpha_{2,2})=v_2(\alpha_{2,2})+6b_1$.
This implies
$v_2(\mu_2)>6b_1-b_2$ and thus (\ref{cyc-shape}).
\end{document}
|
\begin{document}
\begin{center}
{\Large\textbf{Duality formula and its generalization for Schur multiple zeta functions}}
\end{center}
\begin{center}
{\large Maki Nakasuji\footnote{Supported by Grant-in-Aid for Scientific Research (C) 18K03223.}
and Yasuo Ohno\footnote{Supported by Grant-in-Aid for Scientific Research (C) 19K03437.} }
\end{center}
\begin{center}
{\textsc{abstract}}
\\
\end{center}
{\footnotesize In the study on multiple zeta values, the duality formula is one of the families of basic relations and plays an important role in the
investigation of algebraic structure of the space spanned by all multiple zeta values
along with the generalized duality formula (so called Ohno relation) obtained by the second author.
In this article, we will discuss them for the Schur multiple zeta values which are the values at positive integers of
the Schur multiple zeta function introduced by the first author, O. Phukswan and Y. Yamasaki.}
\\
\noindent
\textbf {2010 Mathematics Subject Classification : }11M41, 05E05\\
\textbf {Key words and phrases : }Schur multiple zeta function\\
\subjclass[2020]{11M41, 05E05}
\keywords{Schur multiple zeta function}
\vskip 1cm
\par\noindent
\section{Introduction}
The classical multiple zeta and zeta-star values are defined by
$$\zeta(k_1, \ldots, k_r)=\sum_{0<m_1<\cdots < m_r}\frac{1}{{m_1}^{k_1}\cdots m_r^{k_r}},
\quad
\zeta^{\star}(k_1, \ldots, k_r)=\sum_{0< m_1\leq \cdots \leq m_r}\frac{1}{{m_1}^{k_1}\cdots m_r^{k_r}}
$$
for $k_1, \ldots, k_{r-1}\geq 1$, $k_r>1$.
The ${\mathbb Q}$-vector spaces ${\mathcal Z}$ spanned by multiple zeta values and spanned by multiple zeta-star values
are the same, and its algebraic structure has been strongly investigated during these three decades.
One of the most popular results in this area is as follows:
\\
For any integer $k\geq 1$, we denote by ${\mathcal Z}_k$ the ${\mathbb Q}$-subspace of ${\mathcal Z}$ spanned by
multiple zeta values of weight $k$. Then
the dimension of ${\mathcal Z}_k$
is less than or equal to $d_k$, where $d_k$ satisfies $\displaystyle\frac{1}{1-t^2-t^3}=\sum_k d_k t^k$.
\\
This is obtained by Goncharov and Terasoma (\cite{Go}, \cite{Te}, \cite{DG}). Further, there is a large difference in the number of convergence indices of
multiple zeta values for each weight.
These results show that there are plenty of linear relations among these values.
However, their exact structure remains quite mysterious.
Some families of relations which are important to understand the structure of the ring of multiple zeta values
${\mathcal Z}={\mathbb Q}+\sum_k {\mathcal Z}_k$
are already known. Among them, the duality formula is one of the most important relations. This shows that
all multiple zeta values except the selfdual case have a pair of multiple zeta values of the same weight with same values.
The generalized duality formula called Ohno relation is a big family which includes
not only the duality but also other basic relations such as the sum formula and the derivation relation.
Moreover, by this formula we can see how
the duality in the space of a small weight contributes the relation in the space of a big weight, which may help us
to understand the structure of them.
On the other hand, compared with that the duality for multiple zeta values is well known as above, the duality formula for multiple zeta-star values
had not been obtained before introducing the Schur multiple zeta values.
The Schur multiple zeta functions introduced in \cite{NPY} are defined as sums over combinatorial objects called semi-standard Young tableaux
and generalize the classical multiple zeta and zeta-star functions.
Nakasuji-Phukswan-Yamasaki (\cite{NPY}) showed some determinant formulas for them such that Jacobi-Trudi, Giambelli and dual Cauchy formulas,
which lead to quite non-trivial algebraic relations among multiple zeta and zeta-star functions.
The Schur multiple zeta values are the values at the positive integers of the Schur multiple zeta function,
which also generalize the classical multiple zeta and zeta-star values.
Nakasuji-Phukswan-Yamasaki (\cite{NPY}) established iterated integral representations of the Schur multiple zeta values of ribbon type, which yield a duality formula for multiple zeta-star values.
The primary goal of this article is to obtain the duality formula for the Schur multiple zeta values of skew type including ribbon type. And as the second,
applying this formula, we will show the generalized duality formula called Ohno relation for the Schur type.
This article is organized as follows.
In Section 2, we review the basic terminology and some known results for the Schur multiple zeta function.
In Section 3, we first give the definition of dual index (tableau) of convergence index for Schur multiple zeta value, and
we will discuss the duality formula for the Schur multiple zeta values of skew type under some assumption.
In the proof, we will use the Jacobi-Trudi formula for the Schur multiple zeta function which is obtained in \cite{NPY} and the duality formula of the classical multiple zeta functions.
In Section 4, we will consider the Ohno relation for the Schur type.
In this part, we will use the key lemma in the extended Jacobi-Trudi formula for the Schur multiple zeta functions which is shown in \cite{NT}
and the Ohno relation for the classical one.
\section{Preliminaries}
We review the Schur multiple zeta function from \cite{NPY}.
Let $\mathbb{N}$, $\mathbb{C}$ be the set of positive integers, and of complex numbers,
respectively.
Let $\lambda=(\lambda_1, \cdots, \lambda_m)$ be a non-increasing sequence of
$n\in\mathbb{N}$, i.e. $\lambda_1\geq \lambda_2\geq \cdots \lambda_m>0$ with $|\lambda|:=\sum_i\lambda_i=n$.
Then a {\it Young diagram} $D_{\lambda}$ of shape $ \lambda$ is obtained by drawing $\lambda_i$ boxes in the $i$-th row.
We often identify $\lambda$ with the corresponding Young diagram.
Let $T_{\lambda}(X)$ be the set of all Young tableaux of shape $\lambda$ over a set $X$ and, in particular, $\mathrm{SSYT}_{\lambda}\subset T_{\lambda}(\mathbb{N})$ the set of all semi-standard Young tableaux of shape $\lambda$. Recall that $M=(m_{ij})\in \mathrm{SSYT}_{\lambda}$ if and only if $m_{i1}\le m_{i2}\le \cdots$ for all $i$ and $m_{1j}<m_{2j}<\cdots $ for all $j$. For ${\pmb s}=(s_{ij})\in T_{\lambda}(\mathbb{C}),$ the Schur multiple zeta-function associated with $\lambda$ is defined as in \cite{NPY} by the series
$$
\zeta_{\lambda}({ \pmb s})=\sum_{M\in \mathrm{SSYT}_{\lambda}}
{M^{ -\pmb s}},
$$
where $M^{ -\pmb s}=\displaystyle{\prod_{(i, j)\in \lambda}m_{ij}^{-s_{ij}}}$ for $M=(m_{ij})\in \mathrm{SSYT}_{\lambda}$. This series converges absolutely if ${\pmb s}\in W_{\lambda}$ where
\[
W_\lambda = W_\lambda({\mathbb C}) =
\left\{{\pmb s}=(s_{ij})\in T_{\lambda}(\mathbb{C})\,\left|\,
\begin{array}{l}
\text{$\Re(s_{ij})\ge 1$ for all $(i,j)\in D_{\lambda} \setminus C_{\lambda}$ } \\[3pt]
\text{$\Re(s_{ij})>1$ for all $(i,j)\in C_{\lambda}$}
\end{array}
\right.
\right\}
\]
with $C_{\lambda}$ being the set of all corners of $\lambda$.
The Schur multiple zeta functions have a natural extension to those of skew type. Let $\lambda$ and $\mu$
be partitions satisfying $\mu\subset \lambda$. The Young diagram of skew type $\lambda/\mu$ is the array of boxes contained in
$\lambda$ but not in $\mu$.
We may write $\delta=\lambda/\mu$.
The notations $T_{\delta}(X)$ for a set $X$, and $SSYT_{\delta}$ are the set of all Young tableau
of skew type
and semi-standard Young tableaux of that type, respectively.
Then,
for ${\pmb s}=(s_{ij})\in T_{\delta}(\mathbb{C}),$ the skew type Schur multiple zeta-function associated with $\delta$
is defined as in \cite{NPY} by the series
$$
\zeta_{\delta}({ \pmb s})=\sum_{M\in \mathrm{SSYT}_{\delta}}
{M^{ -\pmb s}},
$$
where $M^{ -\pmb s}=\displaystyle{\prod_{(i, j)\in \delta}m_{ij}^{-s_{ij}}}$ for
$M=(m_{ij})\in \mathrm{SSYT}_{\delta}$. This series converges absolutely if ${\pmb s}\in W_{\delta}$ where
$W_{\delta}$ is also similarly defined as $W_{\lambda}$.
In \cite{NPY}, they obtained some determinant formulas such as the Jacobi-Trudi, Giambelli and dual Cauchy formula for Schur
multiple zeta functions under certain assumption on variables.
Nakasuji and Takeda \cite{NT} showed certain extended Jacobi-Trudi formula.
We here review the (extended) Jacobi-Trudi formula with the key lemma.
A skew Young diagram is called a {\it ribbon} if it is connected and contains no $2\times 2$ block of boxes.
The maximal outer ribbon of $\lambda$ is called the rim of $\lambda$.
We peel the diagram $\lambda$ off into successive rims $\theta_t, \theta_{t-1}, \ldots, \theta_1$ beginning from the outside of $\lambda$
then a sequence $\Theta=(\theta_1, \ldots, \theta_t)$ of ribbons is called a {\it rim decomposition of $\lambda$}.
For $\lambda'=(\lambda_1', \ldots, \lambda_s')$,
if each $\theta_i$ starts from $(1, i)$ for all $1\leq i\leq s$, then a rim decomposition $\Theta$ of $\lambda$ is called an {\it E-rim decomposition}.
Here, we permit $\theta_i=\emptyset$. We denote by $Rim_E^{\lambda}$ the set of all $E$-rim decomposition of $\lambda$.
\begin{Example}
The following $\Theta=(\theta_1, \theta_2, \theta_3, \theta_4)$ is an $E$-rim decomposition of $\lambda=(4,3,3,2)$;
$$
\Theta=
\begin{ytableau}
1 & 2 & 3 & 4 \\
1 & 2 & 3\\
1 & 3 & 3\\
3 & 3
\end{ytableau},
$$
which means that
$\theta_1=
\begin{ytableau}
{}\\
{}\\
{}
\end{ytableau}
$,
$\theta_2=
\begin{ytableau}
{}\\
{}
\end{ytableau}
$,
$\theta_3=
\begin{ytableau}
\none & \none & {}\\
\none & \none & {}\\
\none & {} &{} \\
{} & {}
\end{ytableau}
$
and $\theta_4=
\begin{ytableau}
\\
\end{ytableau}
$.
\end{Example}
Now consider the patterns corresponding $E$-rim on the ${\mathbb Z}^2$ lattice.
Fix $N\in {\mathbb N}$. For a partition $\lambda'=(\lambda'_1, \ldots, \lambda'_s)$, let $c_i$ and $d_i$ be lattice points in ${\mathbb Z}^2$
respsctrively given by $c_i=(s+1-i, 1)$ and $d_i=(s+1-i+\lambda_i', N+1)$ for $1\leq i \leq s$.
An {\it E-pattern} corresponding to $\lambda$ is a tuple
$L=(\ell_1, \ldots, \ell_s)$ of directed paths on ${\mathbb Z}^2$, whose directions are allowed only to go one to the northeast or one up, such that $\ell_i$
starts from $c_i$ and ends to $d_{\sigma_i}$ for some $\sigma\in {\frak S}_s$.
Such $\sigma \in {\frak S}_s$ is called {\it type} of $L$ and we denote it by $\sigma={\rm{type}}(L)$.
Let ${\mathcal E}_{\lambda}^N$ be the set of all $E$-patterns corresponding to $\lambda$.
\begin{Example}
Let $\lambda=(4,3,3,2)$. Then $\lambda'=(4,4,3,1)$ and
the following $L=(\ell_1,\ell_2, \ell_3, \ell_4) $ is one of the element in $\mathcal{E}^{6}_{(4,3,3,2)}$ for $\sigma=(1\;2\; 3)\in {\frak S}_4$.
\begin{figure}
\caption{$L=(\ell_1, \ell_2, \ell_3, \ell_4)\in \mathcal{E}
\end{figure}
\end{Example}
Put $S_E^{\lambda}=\{{\rm{type}}(L)\in {\frak S}_s | L\in {\mathcal E}_{\lambda}^N\}$,
then the bijection map $\tau_E : {\rm{Rim}}_E^{\lambda}\to S_E^{\lambda}$ given by $\tau_E(\Theta)={\rm{type}}(L)$ exists. (cf. \cite[Lemma 3.12]{NPY}).
\begin{Lemma}\label{Lem31}
For any partition $\lambda=(\lambda_1, \ldots, \lambda_r)$ and its conjugate $\lambda'=(\lambda_1', \ldots, \lambda'_s)$,
let $\Theta^{\sigma}=(\theta_1^{\sigma}, \ldots, \theta_s^{\sigma})\in {\rm{Rim}}_E^{\lambda}$ be the $E$-rim decomposition such that $\tau_E(\Theta^{\sigma})=\sigma$ for $\sigma\in S_{E}^{\lambda}$.
For $\Theta=(\theta_1, \ldots, \theta_s)\in {\rm{Rim}}_E^{\lambda}$, $\theta_i({\pmb s})\in {\mathbb C}^{|\theta_i|}$ is the tuple obtained by reading contents of the shape restriction of ${\pmb s}$ to $\theta_i$ from the top right to the bottom left.
And let $\varepsilon_{\sigma}$ be the signature of $\sigma\in {\frak S}_s$.
Set $$W_{\lambda, E}=W_{\lambda, E}({\mathbb C})=\left\{{\pmb s}=(s_{ij})\in T_{\lambda}( {\mathbb C}) \left|
\begin{matrix}
\Re(s_{ij})\geq 1 \; {\rm{for\; all}}\; (i, j)\in D_{\lambda}\backslash E_{\lambda}\\
\hspace{-7mm}\Re(s_{ij})> 1 \; {\rm{for\; all}}\; (i, j)\in E_{\lambda}\\
\end{matrix}
\right. \right\},$$
where
$E_{\lambda}=\{(i, j)\in D_{\lambda} | i-j\in \{i-\lambda_i' | 1\leq i \leq s\}\}$.
When ${\pmb s}\in W_{\lambda, E}$,
we have
\begin{equation}\label{keylemma2}
\sum_{diag}\zeta_{\lambda}({\pmb s})=\sum_{diag}\sum_{\sigma\in S_E^{\lambda}}\varepsilon_{\sigma}\prod_{i=1}^s \zeta(\theta_i^{\sigma}({\pmb s})),
\end{equation}
where $\sum_{diag}=\sum_{\substack{\sigma_j\in {S}_j\\ j\in {\mathbb Z}}}\prod_{i\in {\mathbb Z}}\sigma_i$ for $S_j$ being the set of permunation of the elements of $I(J)=\{(k, \ell)\in D_{\lambda} | \ell-k=j\}$.
\end{Lemma}
\begin{proof}
The assertion is from similar calculation as in Lemma 3.1 in \cite{NT}.
\end{proof}
If all the elements in each diagonal line in ${\pmb s}$ are equal, $\sum_{diag}$ in \eqref{keylemma2} doesn't make sense and
we have following
\begin{Corollary}\label{Cor31}
Set $W_{\lambda}^{\mathrm{diag}}=\{{\pmb s}=(s_{ij})\in W_{\lambda} | s_{ij}=s_{pq}\; {\rm{if}}\; j-i=q-p\}$.
When ${\pmb s}\in W_{\lambda}^{\mathrm{diag}}$,
we have
\begin{equation}\label{keylemma1}
\zeta_{\lambda}({\pmb s})=\sum_{\sigma\in S_E^{\lambda}}\varepsilon_{\sigma}\prod_{i=1}^s \zeta(\theta_i^{\sigma}({\pmb s})).
\end{equation}
\end{Corollary}
The following Jacobi-Trudi formula for Schur multiple zeta function under suitable assumption on variables is obtained by Corollary \ref{Cor31}.
\begin{Theorem}(\cite[Theorem1.1(2)]{NPY})
Let $\lambda=(\lambda_1, \ldots, \lambda_r)$ be a partition and $\lambda'=(\lambda_1', \ldots, \lambda'_s)$ be the conjugate of $\lambda$.
Put
$$W_{\lambda}^{\rm JT}=\left\{{\pmb s}=(s_{ij})\in W_{\lambda}^{\mathrm{diag}}
\left|
\; \Re(s_{\lambda_i', i)}>1 \; {\rm for}\; 1\leq{}^{\forall}i\leq s
\right.\right\}$$
and we write $a_k=s_{i, i+k}$ for $k\in {\mathbb Z}$ (and for any $i\in {\mathbb N}$).
Then we have
\begin{equation}\label{JTE}
\zeta_{\lambda}({\pmb s})= \det \left[ \zeta (a_{j-1}, a_{j-2}, \ldots, a_{j-(\lambda'_i-i+j)})\right]_{1\leq i, j\leq s}.
\end{equation}
Here, we understand that $\zeta(\cdots)=1$ if $\lambda'_i-i+j=0$ and $0$ if $\lambda'_i-i+j<0$.
\end{Theorem}
For skew type Schur multiple zeta functions, we have the similar formula:
\begin{Lemma}\label{Lemdagger}
Let $\delta=\lambda/\mu$.
For ${\pmb s}
\in W_{\delta, E}({\mathbb C})$, we have
$$
\sum_{diag}\zeta_{\delta} ({\pmb s})
=\sum_{diag}\sum_{\sigma\in S_E^{\delta}}\varepsilon_{\sigma}\prod_{i=1}^s \zeta(\theta_i^{\sigma}({\pmb s})).
$$
\end{Lemma}
\begin{Theorem}\label{skewJT}(\cite[Theorem4.3(2)]{NPY})
Retain the above notations. Assume that ${\pmb s}=(s_{ij})\in W_{\delta}^{\rm{JT}}$ and we write $a_k=s_{i, i+k}$ for $k\in {\mathbb Z}$ (and for any $i\in {\mathbb N}$).
Then we have
\begin{equation}\label{JTEskew}
\zeta_{\delta}({\pmb s})= \det \left[ \zeta (a_{-\mu_j'+j-1}, a_{-\mu_j'+j-2}, \ldots, a_{-\mu_j'+j-(\lambda'_i-i+j)})\right]_{1\leq i, j\leq s}.
\end{equation}
Here, we understand that $\zeta(\cdots)=1$ if $\lambda'_i-\mu_j'-i+j=0$ and $\zeta(\cdots)=0$ if $\lambda'_i-\mu_j'-i+j<0$ or
at least one $a_n$ doesn't exist ($-\mu_j'+j-1\geq n \geq \mu_j'+j-(\lambda'-i+j)$) among the variants in the $ij$-th element.
\end{Theorem}
\section{Duality formula}\label{sectionresults}
For positive integers $k_1, \ldots, k_n$ with $k_n\geq 2$, the multiple zeta values $\zeta(k_1, \ldots, k_n)$ converges absolutely and such an index set ${\bf k}=(k_1, \ldots, k_n)$ is called {\it admissible} index set. When we write an admissible index set
${\bf k}$ as
$${\bf k}=(\underbrace{1, \ldots, 1}_{a_1-1}, b_1+1, \underbrace{1, \ldots, 1}_{a_2-1}, b_2+1, \ldots, \underbrace{1, \ldots, 1}_{a_m-1}, b_m+1)
$$
with $a_1, b_1, a_2, b_2, \cdots, a_m, b_m\in {\mathbb Z}_{\geq 1}$, the following index set is called {\it dual} index set of ${\bf k}$:
$${\bf k^{\dagger}}=(\underbrace{1, \ldots, 1}_{b_m-1}, a_m+1, \underbrace{1, \ldots, 1}_{b_{m-1}-1}, a_{m_1}+1, \ldots, \underbrace{1, \ldots, 1}_{b_1-1}, a_1+1).
$$
Then the well-known duality relation for multiple zeta values is as follows.
\begin{Theorem}\label{Thduality}
For any admissible index set ${\bf k}$ and its dual index set ${\bf k}^{\dagger}$, we have
$$\zeta({\bf k})=\zeta({\bf k}^{\dagger}).
$$
\end{Theorem}
Inspired by this formula, we will define a tableau which is ``dual" to ${\pmb k}\in T_{\delta}({\mathbb Z})$ with
$\lambda=(\lambda_1, \ldots, \lambda_r)$ and $\mu=(\mu_1, \ldots, \mu_r)$ being two partitions such that $\lambda_i\geq \mu_i$ for any $i$.
First, we denote a finer piece of index $\underbrace{1, \ldots, 1}_{a-1}, b+1$ as $A(a,b)$ and call it {\it admissible piece}. Then if we write $A_i:=A(a_i, b_i)$
and $A_i^{\dagger}:=A(b_i, a_i)$,
above
admissible index set ${\bf k}$ and its dual ${\bf k}^{\dagger}$ are written in terms of admissible pieces:
$${\bf k}=(A(a_1, b_1), A(a_2, b_2), \ldots, A(a_m, b_m))=(A_1, A_2, \ldots, A_m)
$$
and
$${\bf k^{\dagger}}=(A(b_m, a_m), A(b_{m-1}, a_{m-1}), \ldots, A(b_1, a_1))=(A_m^{\dagger}, A_{m-1}^{\dagger}, \ldots, A_1^{\dagger}).
$$
Let $T^{\mathrm{diag}}_\delta({\mathbb Z})=\{{\pmb s}\in T_{\delta}({\mathbb Z}) | s_{ij}=s_{pq} \; {\mathrm{if}}\; j-i=q-p\}$.
Put $I_{\delta}^D$ to be the set of elements in $T_{\delta}^{\mathrm{diag}}({\mathbb Z})$ consisted of
admissible pieces such that the right side of the top element in each column is not $1$.
For ${\pmb s}\in I_{\delta}^D$, in terms of admissible pieces,
the row which has topmost component is numbered as the first row.
We write the component in the $i$-th row and $j$-th column in terms of admissible pieces as $A_{ij}$.
We note that
the component in the upper-right corner which is in the $j$-th row is $A_{1j}$
and that $A_{ij}=A_{k\ell}$ if $j-i=\ell-k$ when they are not empty.
Further, we notice that in terms of tableau,
the top element in $A_{ij}$ and the bottom element in $A_{i(j+1)}$ are located side by side.
Next, we write ${\pmb k}\in I_{\delta}^{\rm D}$ as
\begin{equation}\label{notationk}
{\pmb k}={\pmb k}^{\mathrm{col}}_1\cdots {\pmb k}^{\mathrm{col}}_{\lambda_1},
\end{equation}
where ${\pmb k}^{\mathrm{col}}_{j}$ is the $j$-th column tableau of ${\pmb k}$. For example, when $\lambda=(3, 2, 1)$ and
$${\pmb k}=
\ytableausetup{boxsize=normal}
\begin{ytableau}
k_{11} & k_{12} & k_{13}\\
k_{21} & k_{22}\\
k_{31}
\end{ytableau},
$$
then
${\pmb k}^{\mathrm{col}}_{1}=
\begin{ytableau}
k_{11} \\
k_{21} \\
k_{31}
\end{ytableau}
$,
${\pmb k}^{\mathrm{col}}_{2}=
\begin{ytableau}
k_{12} \\
k_{22}
\end{ytableau}
$
and
${\pmb k}^{\mathrm{col}}_{3}=
\begin{ytableau}
k_{13}
\end{ytableau}
$.
We regard ${\pmb k}^{\mathrm{col}}_{j}$ ($1\leq j\leq \lambda_1$) as the corresponding admissible index set in order from top to bottom.
So, we may write it in terms of admissible pieces as well.
If the $j$-th column tableau ${\pmb k}^{\mathrm{col}}_{j}$ starts $A_{nj}$ for some $n$ and has $m+1$ admissible pieces,
then ${\pmb k}^{\mathrm{col}}_{j}={}^t (A_{nj}, \ldots A_{(n+m) j})$.
Then the dual tableau is ${\pmb k}^{\mathrm{col}, {\dagger}}_{j}={}^t (A_{(n+m)j}^{\dagger}, \ldots A_{nj}^{\dagger})$.
We define ${\pmb k}^{\dagger}$ by arranging ${\pmb
k}^{\mathrm{col}, {\dagger}}_{\lambda_1}, \ldots, {\pmb k}^{\mathrm{col}, {\dagger}}_{1}$ in this order from left to right,
where we put the top element in $A_{ij}^{\dagger}$ and the bottom element in $A_{i(j-1)}^{\dagger}$ side by side
for $2\leq j\leq \lambda_1$ if both $A_{ij}^{\dagger}$ and $A_{i(j-1)}^{\dagger}$ are not empty.
\begin{Example}
For $\delta=\lambda/\mu$ with $\lambda=(3,2,1)$ and $\mu=\emptyset$,
let ${\pmb k}=
\begin{ytableau}
\ytableausetup{centertableaux}
2 & 2 & 3 \\
4 & 2\\
5
\end{ytableau}$. Then
${\pmb k}=
{\pmb k}^{\mathrm{col}}_{1}{\pmb k}^{\mathrm{col}}_{2}{\pmb k}^{\mathrm{col}}_{3},
$
where
${\pmb k}^{\mathrm{col}}_{1}=
\begin{ytableau}
2\\
4\\
5
\end{ytableau}
$,
${\pmb k}^{\mathrm{col}}_{2}=
\begin{ytableau}
2\\
2
\end{ytableau}
$
and
${\pmb k}^{\mathrm{col}}_{3}=
\begin{ytableau}
3
\end{ytableau}
$.
In terms of admissible pieces,
${\pmb k}^{\mathrm{col}}_{1}={}^t(A(1,1), A(1,3), A(1,4))={}^t(A_{11}, A_{21}, A_{31})$,
${\pmb k}^{\mathrm{col}}_{2}={}^t(A(1,1), A(1,1))={}^t(A_{12}, A_{22})$ and
${\pmb k}^{\mathrm{col}}_{3}={}^t(A(1,2))={}^t(A_{13})$ and
${\pmb k}=
\begin{ytableau}
A_{11} & A_{12} & A_{13} \\
A_{21} & A_{23}\\
A_{31}
\end{ytableau}$
,
where
$$
\begin{array}{|c|c|c|c|}
\hline
& A(a_i,b_i) & A(a_i,b_i)^{\dagger} & {\rm pieces\; in}\; {\pmb k}\\
\hline
A(1,1) & 2 & 2 & A_{11}, A_{12}, A_{22}\\
\hline
A(1,2) & 3 & 1, 2 & A_{13}\\
\hline
A(1,3) & 4 & 1, 1, 2 & A_{21}\\
\hline
A(1,4) & 5 & 1, 1, 1, 2 & A_{31}\\
\hline
\end{array}.
$$
The dual tableau of
${\pmb k}^{\mathrm{col}, {\dagger}}_{j}$ are
${\pmb k}^{\mathrm{col}, {\dagger}}_{3}=
\begin{ytableau}
1\\
2
\end{ytableau}
=
\begin{ytableau}
A_{13}^{\dagger}
\end{ytableau}
$,
${\pmb k}^{\mathrm{col}, {\dagger}}_{2}=
\begin{ytableau}
2\\
2
\end{ytableau}
=
\begin{ytableau}
A_{22}^{\dagger}\\
A_{12}^{\dagger}
\end{ytableau}
$
and
${\pmb k}^{\mathrm{col}, {\dagger}}_{3}=
\begin{ytableau}
1\\
1\\1\\2\\
1\\1\\2\\2
\end{ytableau}
=
\begin{ytableau}
A_{31}^{\dagger}\\
A_{21}^{\dagger}\\
A_{11}^{\dagger}\\
\end{ytableau}
$. And so, the dual tableau
${\pmb k}^{\dagger}=
\begin{ytableau}
\none & \none & A_{31}^{\dagger}\\
\none & A_{22}^{\dagger} & A_{21}^{\dagger}\\
A_{13}^{\dagger} & A_{12}^{\dagger} & A_{11}^{\dagger}\\
\end{ytableau}
=
\begin{ytableau}
\none & \none & 1\\
\none & \none & 1\\
\none & \none & 1\\
\none & \none & 2\\
\none & \none & 1\\
\none & \none & 1\\
\none & 2 & 2\\
1& 2 & 2\\
2
\end{ytableau}.
$
\end{Example}
\begin{Example}
For $\delta=\lambda/\mu$ with $\lambda=(3,2,1)$ and $\mu=(1, 1)$,
let ${\pmb k}=
\begin{ytableau}
\none & 1 & 3 \\
4 & 2\\
5
\end{ytableau}$. Then
${\pmb k}=
\begin{ytableau}
A_{11} & A_{12} & A_{13} \\
A_{21}
\end{ytableau}$
and the dual tableau is
${\pmb k}^{\dagger}=
\begin{ytableau}
\none & \none & A_{21}^{\dagger}\\
A_{13}^{\dagger} & A_{12}^{\dagger} & A_{11}^{\dagger}\\
\end{ytableau}
=
\begin{ytableau}
\none & \none & 1\\
\none & \none & 1\\
\none & \none & 1\\
\none & \none & 2\\
\none & \none & 1\\
\none & \none & 1\\
1 & 3 & 2\\
2
\end{ytableau}.
$
\end{Example}
\begin{Theorem}\label{maintheorem}
Let $\lambda$ and $\mu$ be partitions. Put $\delta=\lambda/\mu$ and ${\pmb k} \in I_{\delta}^{\rm D}$.
For ${\pmb k}^{\dagger}$ being the dual tableau of ${\pmb k}$ and $\delta^{{\dagger}}$ being the shape of ${\pmb k}^{\dagger}$, we have
$$
\zeta_{\delta}({\pmb k})=\zeta_{\delta^{\dagger}}({\pmb k}^{\dagger}).
$$
\end{Theorem}
We can say this is an extension of \cite[Corollary 6.2]{NPY}.\\
\noindent
\begin{proof}
By the duality formula Theorem \ref{Thduality}, \eqref{JTEskew} equals
$$
\zeta_{\delta}({\pmb s})= \det \left[ \zeta (a_{-\mu_j'+j-1}, a_{-\mu_j'+j-2}, \ldots, a_{-\mu_j'+j-(\lambda'_i-\mu'_j-i+j)})^{\dagger}\right]_{1\leq i, j\leq s}.
$$
Replacing $i$ and $j$ with $s-j+1$ and $s-i+1$, respectively.
Then the property of the determinant of matrix leads to
$$ \det \left[ \zeta (a_{-\mu_{s-i+1}'+s-i}, a_{-\mu_{s-i+1}'+s-i-1}, \ldots, a_{-\mu_{s-i+1}'+s-i+1-(\lambda'_{s-j+1}-\mu'_{s-i+1}+j-i)})^{\dagger}\right]_{1\leq i, j\leq s}.
$$
Using the Jacobi-Trudi formula for Schur multiple zeta function of skew type \eqref{JTEskew} (\cite[Theorem 4.3 (2)]{NPY}), we have the desired relation.
\end{proof}
\begin{Example}
For $\delta=\lambda/\mu$ with $\lambda=(3,2,1)$ and $\mu=(1,1)$,
let ${\pmb k}=
\begin{ytableau}
\none & 1 & 3 \\
\none & 2\\
5
\end{ytableau}$. Then
${\pmb k}=
\begin{ytableau}
\none & A_{12} & A_{13} \\
A_{21}
\end{ytableau}$
and the dual tableau is
${\pmb k}^{\dagger}=
\begin{ytableau}
\none & \none & A_{21}^{\dagger}\\
A_{13}^{\dagger} & A_{12}^{\dagger}
\end{ytableau}
=
\begin{ytableau}
\none & \none & 1\\
\none & \none & 1\\
\none & \none & 1\\
\none & \none & 2\\
1 & 3 \\
2
\end{ytableau}.
$
We can see
$$\zeta_{\delta}(\pmb k)=
\left|
\begin{matrix}
\begin{ytableau}
5
\end{ytableau} & 0 & 0\\
0 & \begin{ytableau}
1\\
2
\end{ytableau} &
\begin{ytableau}
3\\
1\\
2
\end{ytableau}
\\
0 & 1 & \begin{ytableau}
3
\end{ytableau}
\end{matrix}
\right|
=
\left|
\begin{matrix}
\begin{ytableau}
1\\
1\\
1\\
2
\end{ytableau} & 0 & 0\\
0 & \begin{ytableau}
3\end{ytableau} &
\begin{ytableau}
3\\
1\\
2
\end{ytableau}
\\
0 & 1 & \begin{ytableau}
1\\
2
\end{ytableau}
\end{matrix}
\right|
=
\left|
\begin{matrix}
\begin{ytableau}
1\\
2
\end{ytableau}
&
\begin{ytableau}
3\\
1\\
2
\end{ytableau}
&
0\\
1 &
\begin{ytableau}
3\end{ytableau} &
0\\
0 & 0 &
\begin{ytableau}
1\\
1\\
1\\
2
\end{ytableau}
\end{matrix}
\right|
=\zeta_{{\delta}^{\dagger}}({\pmb k}^{\dagger}),
$$
where ${\delta}^{\dagger}$ is the shape of ${\pmb k}^{\dagger}$.
\end{Example}
\section{Ohno relation}\label{sectionresults2}
Ohno \cite{O} showed a following family of ${\mathbb Q}$-linear relations among multiple zeta values called
Ohno relation:
\begin{Theorem}\label{Ohnorelation}
For any $\ell\in {\mathbb Z_{\geq 0}}$ and
any admissible index set ${\bf k}=(k_1, \ldots, k_r)$ and its dual index set ${\bf k^{\dagger}}=(k^{\dagger}_1., \ldots, k^{\dagger}_s)$,
\begin{equation}\label{ohno}
\sum_{\substack{|\varepsilon|=\varepsilon_1+\cdots+\varepsilon_r=\ell\\
{}^{\forall}\varepsilon_i\geq 0}}\zeta(k_1+\varepsilon_1, \ldots, k_r+\varepsilon_r)=
\sum_{\substack{|\varepsilon'|=\varepsilon'_1+\cdots+\varepsilon'_s=\ell\\
{}^{\forall}\varepsilon\geq 0}} \zeta(k^{\dagger}_1+\varepsilon'_1, \ldots, k^{\dagger}_s+\varepsilon'_s).
\end{equation}
\end{Theorem}
Let $\lambda=(\lambda_1, \ldots, \lambda_r)$ and $\mu=(\mu_1, \ldots, \mu_r)$ be two partitions such that
$\lambda_i\geq \mu_i$ for all $i$.
Put $\delta=\lambda/\mu$. For ${\pmb k}\in W_\delta({\mathbb Z}_{\geq 1})$ and
${\pmb \varepsilon}\in T_{\delta}({\mathbb Z}_{\geq 0})$, we denote by
$${\mathcal O}({\pmb k}: \ell):=\sum_{\substack{|{\pmb \varepsilon}|=\ell}} \zeta_{\delta} ({\pmb k}+{\pmb \varepsilon})
$$
for $\ell\in {\mathbb Z_{\geq 0}}$.
For ${\pmb k}_{j}^{\mathrm{col}}\in I_{(1^n)}^D$ ($j_1\leq j\leq j_r$)'s, we define
$$
{\mathcal O}({\pmb k}_{j_1}^{\mathrm{col}}\times {\pmb k}_{j_2}^{\mathrm{col}}\times\cdots \times {\pmb k}_{j_r}^{\mathrm{col}} : \ell)
:=\sum_{\ell_1+\ell_2+\cdots+\ell_r=\ell}
{\mathcal O}({\pmb k}_{j_1}^{\mathrm{col}} : \ell_1) \cdots {\mathcal O}({\pmb k}_{j_r}^{\mathrm{col}} : \ell_r).
$$
Then we have following
\begin{Lemma}\label{Lemprodzeta}
For ${\pmb k}_{j}^{\mathrm{col}}\in I_{(1^n)}^D$ ($j_1\leq j\leq j_r$), we have
$$
{\mathcal O}({\pmb k}_{j_1}^{\mathrm{col}}\times {\pmb k}_{j_2}^{\mathrm{col}}\times\cdots \times {\pmb k}_{j_r}^{\mathrm{col}} : \ell) =
{\mathcal O}({\pmb k}_{j_r}^{\mathrm{col}, \dagger}\times \cdots \times {\pmb k}_{j_2}^{\mathrm{col}, \dagger}\times {\pmb k}_{j_1}^{\mathrm{col}, \dagger}: \ell) .
$$
\end{Lemma}
\begin{proof}
By Theorem \ref{Ohnorelation}, we have
\begin{align*}
{\mathcal O}({\pmb k}_{j_1}^{\mathrm{col}}\times {\pmb k}_{j_2}^{\mathrm{col}}\times\cdots \times {\pmb k}_{j_r}^{\mathrm{col}} : \ell)
&=\sum_{\ell_1+\ell_2+\cdots+\ell_r=\ell}
{\mathcal O}({\pmb k}_{j_1}^{\mathrm{col}} : \ell_1) \cdots {\mathcal O}({\pmb k}_{j_r}^{\mathrm{col}} : \ell_r)\\
&=\sum_{\ell_1+\ell_2+\cdots+\ell_r=\ell}
{\mathcal O}({\pmb k}_{j_1}^{\mathrm{col}, \dagger} : \ell_1) \cdots {\mathcal O}({\pmb k}_{j_r}^{\mathrm{col}, \dagger} : \ell_r)\\
&=\sum_{\ell_1+\ell_2+\cdots+\ell_r=\ell}
{\mathcal O}({\pmb k}_{j_r}^{\mathrm{col}, \dagger} : \ell_r) \cdots {\mathcal O}({\pmb k}_{j_1}^{\mathrm{col}, \dagger} : \ell_1)\\
&={\mathcal O}({\pmb k}_{j_r}^{\mathrm{col}, \dagger}\times {\pmb k}_{j_{r-1}}^{\mathrm{col}, \dagger}\times\cdots \times {\pmb k}_{j_1}^{\mathrm{col}, \dagger} : \ell) .
\end{align*}
\end{proof}
Applying Lemma \ref{Lemprodzeta}, we obtain the following formula which is the extension of Ohno relation(Theorem \ref{Ohnorelation}).
\begin{Theorem}\label{Thmohno}
Let $\lambda$ and $\mu$ be partitions. Put $\delta=\lambda/\mu$ and ${\pmb k} \in I_{\delta}^{\rm D}$
and ${\pmb k}^{\dagger}$ being the dual tableau of ${\pmb k}$, for $\ell\in {\mathbb Z_{\geq 0}}$ we have
$$
{\mathcal O}({\pmb k}: \ell)={\mathcal O}({\pmb k}^{\dagger}: \ell).
$$
\end{Theorem}
We will explain it with the example, first.
\begin{Example}
Let $\lambda=(2,2)$ and $\mu=\emptyset$. We consider ${\pmb k}=
\begin{ytableau}
2 & 3\\
4 &2
\end{ytableau}
\in I_{\lambda}^D$ and ${\pmb \varepsilon}\in T_{\lambda}({\mathbb Z}_{\geq 0})$
with $\ell=1$. Then
$$\left\{{\pmb k}+{\pmb \varepsilon}\; \left| \;|{\pmb \varepsilon}|=\ell| \right. \right\}=
\left\{
\begin{ytableau}
3 & 3\\
4 &2
\end{ytableau} , \;
\begin{ytableau}
2 & 4\\
4 &2
\end{ytableau} ,\;
\begin{ytableau}
2 & 3\\
5 &2
\end{ytableau} ,\;
\begin{ytableau}
2 & 3\\
4 &3
\end{ytableau}
\right\}.
$$
By Lemma \ref{Lem31}, we have
\begin{align}
&\sum_{\mathrm{diag}}
\zeta_{\lambda}\left(
\begin{ytableau}
3 & 3\\
4 &2
\end{ytableau}
\right)=
\zeta_{\lambda}\left(
\begin{ytableau}
3 & 3\\
4 &2
\end{ytableau}
\right)+
\zeta_{\lambda}\left(
\begin{ytableau}
2 & 3\\
4 &3
\end{ytableau}
\right)=\sum_{\mathrm{diag}}\sum_{\sigma\in S_E^{\lambda}}\varepsilon_{\sigma}\prod_{i=1}^2 \zeta\left(\theta_i^{\sigma}\left(
\begin{ytableau}
3 & 3\\
4 &2
\end{ytableau}
\right)\right)\label{diag1},\\
&
\sum_{\mathrm{diag}}
\zeta_{\lambda}\left(
\begin{ytableau}
2 & 4\\
4 &2
\end{ytableau}
\right)=
\zeta_{\lambda}\left(
\begin{ytableau}
2 & 4\\
4 &2
\end{ytableau}
\right)+
\zeta_{\lambda}\left(
\begin{ytableau}
2 & 4\\
4 &2
\end{ytableau}
\right)=2\sum_{\sigma\in S_E^{\lambda}}\varepsilon_{\sigma}\prod_{i=1}^2 \zeta\left(\theta_i^{\sigma}\left(
\begin{ytableau}
2 & 4\\
4 &2
\end{ytableau}
\right)\right)\label{diag2},\\
&
\sum_{\mathrm{diag}}
\zeta_{\lambda}\left(
\begin{ytableau}
2 & 3\\
5 &2
\end{ytableau}
\right)=
\zeta_{\lambda}\left(
\begin{ytableau}
2 & 3\\
5 &2
\end{ytableau}
\right)+
\zeta_{\lambda}\left(
\begin{ytableau}
2 & 3\\
5 &2
\end{ytableau}
\right)=2\sum_{\sigma\in S_E^{\lambda}}\varepsilon_{\sigma}\prod_{i=1}^2 \zeta\left(\theta_i^{\sigma}\left(
\begin{ytableau}
2 & 3\\
5 &2
\end{ytableau}
\right)\right)\label{diag3},\\
&\sum_{\mathrm{diag}}
\zeta_{\lambda}\left(
\begin{ytableau}
2 & 4\\
4 &3
\end{ytableau}
\right)=
\zeta_{\lambda}\left(
\begin{ytableau}
2 & 3\\
4 &3
\end{ytableau}
\right)+
\zeta_{\lambda}\left(
\begin{ytableau}
3 & 3\\
4 &2
\end{ytableau}
\right)=\sum_{\mathrm{diag}}\sum_{\sigma\in S_E^{\lambda}}\varepsilon_{\sigma}\prod_{i=1}^2 \zeta\left(\theta_i^{\sigma}\left(
\begin{ytableau}
2 & 3\\
4 &3
\end{ytableau}
\right)\right)\label{diag4}.
\end{align}
Since the equations \eqref{diag1} and \eqref{diag4} are the same, taking the summation both sides of them leads to
\begin{align}
2{\mathcal O}\left({\pmb k}:1\right)
&=
\sum_{\mathrm{diag}}\left(
\zeta_{\lambda}\left(
\begin{ytableau}
3 & 3\\
4 &2
\end{ytableau}
\right)
+
\zeta_{\lambda}\left(
\begin{ytableau}
2 & 4\\
4 &2
\end{ytableau}
\right)
+
\zeta_{\lambda}\left(
\begin{ytableau}
2 & 3\\
5 &2
\end{ytableau}
\right)
+
\zeta_{\lambda}\left(
\begin{ytableau}
2 & 3\\
4 &3
\end{ytableau}
\right)
\right)\notag\\
&=2\sum_{\sigma\in S_E^{\lambda}}\varepsilon_{\sigma}\prod_{i=1}^2
\left(
\zeta\left(\theta_i^{\sigma}\left(
\begin{ytableau}
3 & 3\\
4 &2
\end{ytableau}
\right)\right)
+
\zeta\left(\theta_i^{\sigma}\left(
\begin{ytableau}
2 & 4\\
4 &2
\end{ytableau}
\right)\right)
\right.\notag\\
&\hspace{1cm}+\left.
\zeta\left(\theta_i^{\sigma}\left(
\begin{ytableau}
2 & 3\\
5 &2
\end{ytableau}
\right)\right)
+
\zeta\left(\theta_i^{\sigma}\left(
\begin{ytableau}
2 & 3\\
4 &3
\end{ytableau}
\right)\right)
\right)\label{examplecal},
\end{align}
where the coefficient $2$ is the order of symmetric group whose order is the number of the elements in the diagonal. Dividing by $2$ in \eqref{examplecal}, we obtain
$${\mathcal O}\left({\pmb k}:1\right)=\sum_{|{\pmb \varepsilon}|=1}\sum_{\sigma\in S_E^{\lambda}}\varepsilon_{\sigma}\prod_{i=1}^2\zeta(\theta_i^{\sigma}({\pmb k}+{\pmb \varepsilon})).
$$
The right hand side is
\begin{align*}
&\sum_{\sigma\in S_E^{\lambda}}\varepsilon_{\sigma}\prod_{i=1}^2
\left(
\zeta\left(\theta_i^{\sigma}\left(
\ytableausetup{centertableaux}
\begin{ytableau}
3 & 3\\
4 &2
\end{ytableau}
\right)\right)
+
\zeta\left(\theta_i^{\sigma}\left(
\begin{ytableau}
2 & 4\\
4 &2
\end{ytableau}
\right)\right)
\right.\\
&\hspace{4cm}+\left.
\zeta\left(\theta_i^{\sigma}\left(
\begin{ytableau}
2 & 3\\
5 &2
\end{ytableau}
\right)\right)
+
\zeta\left(\theta_i^{\sigma}\left(
\begin{ytableau}
2 & 3\\
4 &3
\end{ytableau}
\right)\right)
\right)\\
\\
&=\zeta(3,4)\zeta(3,2)+
\zeta(2,4)\zeta(4,2)+\zeta(2,5)\zeta(3,2)+\zeta(2,4)\zeta(3,3)\\
&-(\zeta(3)\zeta(3,2,4)+\zeta(2)\zeta(4,2,4)+\zeta(2)\zeta(3,2,5)
+\zeta(2)\zeta(3,3,4))
\\
&=
{\mathcal O}\left(
\begin{ytableau}
2 \\
4
\end{ytableau}\times
\begin{ytableau}
3 \\
2
\end{ytableau}\; : 1\right)
-
{\mathcal O}\left(
\begin{ytableau}
2
\end{ytableau}\times
\begin{ytableau}
3 \\
2 \\
4
\end{ytableau}\; : 1\right).
\end{align*}
By Lemma \ref{Lemprodzeta}, this becomes
$$
{\mathcal O}\left(
\ytableausetup{centertableaux}
\begin{ytableau}
2 \\
1\\
2
\end{ytableau}\times
\begin{ytableau}
1 \\
1\\
2\\
2
\end{ytableau}\; : 1\right)
-
{\mathcal O}\left(
\begin{ytableau}
1 \\
1\\
2 \\
2\\
1\\
2
\end{ytableau}\times
\begin{ytableau}
2
\end{ytableau}\; : 1\right).
$$
Similar calculation above gives
\begin{align*}
&\sum_{\sigma\in S_E^{\lambda}}\varepsilon_{\sigma}\prod_{i=1}^2
\left(
\zeta\left(\theta_i^{\sigma}\left(
\begin{ytableau}
\none & 1\\
\none & 1\\
3 & 2\\
1 & 2\\
2
\end{ytableau}
\right)\right)
+
\zeta\left(\theta_i^{\sigma}\left(
\begin{ytableau}
\none & 1\\
\none & 1\\
2 & 2\\
2 & 2\\
2
\end{ytableau}
\right)\right)
+
\zeta\left(\theta_i^{\sigma}\left(
\begin{ytableau}
\none & 1\\
\none & 1\\
2 & 2\\
1 & 2\\
3
\end{ytableau}
\right)\right)
\right.\\
&+\left.
\zeta\left(\theta_i^{\sigma}\left(
\begin{ytableau}
\none & 2\\
\none & 1\\
2 & 2\\
1 & 2\\
2
\end{ytableau}
\right)\right)
+
\zeta\left(\theta_i^{\sigma}\left(
\begin{ytableau}
\none & 1\\
\none & 2\\
2 & 2\\
1 & 2\\
2
\end{ytableau}
\right)\right)
+
\zeta\left(\theta_i^{\sigma}\left(
\begin{ytableau}
\none & 1\\
\none & 1\\
2 & 3\\
1 & 2\\
2
\end{ytableau}
\right)\right)
+
\zeta\left(\theta_i^{\sigma}\left(
\begin{ytableau}
\none & 1\\
\none & 1\\
2 & 2\\
1 & 3\\
2
\end{ytableau}
\right)\right)
\right)\\
&={\mathcal O}\left(
\ytableausetup{centertableaux}
\begin{ytableau}
\none & 1\\
\none & 1\\
2 & 2\\
1 & 2\\
2
\end{ytableau}
\; :1
\right)={\mathcal O}({\pmb k}^{\dagger}\; : 1).
\end{align*}
\end{Example}
\noindent
{\it Proof of Theorem \ref{Thmohno}.}\;
Assume ${\pmb k}$ has $n$ diagonal lines with $2$ or more elements.
Each of them has $m_1, \ldots, m_n$ elements, respectively ($m_i\geq 2$ for $1\leq i\leq n$).
By Lemma \ref{Lem31},
\begin{align*}
|{\frak S}_{m_1}|\ldots |{\frak S}_{m_n}|{\mathcal O}({\pmb k}: \ell)
&=\sum_{\mathrm{diag}}\sum_{\substack{|{\pmb \varepsilon}|=\ell}} \zeta_{\delta} ({\pmb k}+{\pmb \varepsilon})\\
&=|{\frak S}_{m_1}|\ldots |{\frak S}_{m_n}|
\sum_{\substack{|{\pmb \varepsilon}|=\ell}}\sum_{\sigma\in S_E^{\delta}}\varepsilon_{\sigma}\prod_{i=1}^s \zeta(\theta_i^{\sigma}({\pmb k}+{\pmb \varepsilon})).
\end{align*}
This leads to
$${\mathcal O}({\pmb k}: \ell)=\sum_{\substack{|{\pmb \varepsilon}|=\ell}}\sum_{\sigma\in S_E^{\delta}}\varepsilon_{\sigma}\prod_{i=1}^s \zeta(\theta_i^{\sigma}({\pmb k}+{\pmb \varepsilon})).
$$
After changing the order of summations,
\begin{align*}
{\mathcal O}({\pmb k}: \ell)&=\sum_{\sigma\in S_E^{\delta}}\varepsilon_{\sigma}\sum_{\substack{
{\pmb \varepsilon}\in T_{\delta}({\mathbb Z}_{\geq 0})\\
|{\pmb \varepsilon}|=\ell}}\prod_{i=1}^s \zeta(\theta_i^{\sigma}({\pmb k}+{\pmb \varepsilon}))\\
&=\sum_{\sigma\in S_E^{\lambda}}\varepsilon_{\sigma}{\mathcal O}(\theta_1^{\sigma}({\pmb k})\times \theta_2^{\sigma}({\pmb k})\times \ldots \times \theta_s^{\sigma}({\pmb k}) : \ell).
\end{align*}
By Lemma \ref{Lemprodzeta}, this becomes
\begin{align*}
{\mathcal O}({\pmb k}: \ell)&=\sum_{\sigma'\in S_E^{\delta^{\dagger}}}\varepsilon_{\sigma}{\mathcal O}(\theta_s^{\sigma'}({\pmb k}^{\dagger})\times \theta_{s-1}^{\sigma'}({\pmb k}^{\dagger})\times \ldots \times \theta_1^{\sigma'}({\pmb k}^{\dagger}) : \ell)\\
&=\sum_{\sigma'\in S_E^{\delta^{\dagger}}}\varepsilon_{\sigma'}\sum_{\substack{{\pmb \varepsilon}'\in T_{\delta^{\dagger}}({\mathbb Z}_{\geq 0})\\
|{\pmb \varepsilon}'|=\ell}}\prod_{i=1}^s \zeta(\theta_i^{\sigma'}({\pmb k}^{\dagger}+{\pmb \varepsilon}'))\\
&=\sum_{\substack{{\pmb \varepsilon}'\in T_{\delta^{\dagger}}({\mathbb Z}_{\geq 0})\\
|{\pmb \varepsilon}'|=\ell}}\sum_{\sigma'\in S_E^{\delta^{\dagger}}}\varepsilon_{\sigma'}\prod_{i=1}^s \zeta(\theta_i^{\sigma'}({\pmb k}^{\dagger}+{\pmb \varepsilon}'))
={\mathcal O}({\pmb k}^{\dagger}: \ell).
\end{align*}
\hspace{15cm}$\Box$
\noindent
\textsc{Maki Nakasuji}\\
Department of Information and Communication Science, Faculty of Science, \\
Sophia University, \\
7-1 Kio-cho, Chiyoda-ku, Tokyo, 102-8554, Japan \\
\texttt{[email protected]}
\noindent
\textsc{Yasuo Ohno}\\
Mathematical Institute, \\
Tohoku University, \\
Sendai 980-8578, Japan \\
\texttt{[email protected]}
\end{document}
|
\begin{document}
\title{3-colored asymmetric bipartite Ramsey number of connected matchings and cycles}
\begin{abstract}
Let $k,l,m$ be integers and $r(k,l,m)$ be the minimum integer $N$ such that for any red-blue-green coloring of $K_{N,N}$, there is a red matching of size at least $k$ in a component, or a blue matching of at least size $l$ in a component, or a green matching of size at least $m$ in a component.
In this paper, we determine the exact value of $r(k,l,m)$ completely. Applying a technique originated by {\L}uczak that applies Szemer\'edi's Regularity Lemma to reduce the problem of showing the existence of a monochromatic cycle to show the existence of a monochromatic matching in a component, we obtain the 3-colored asymmetric bipartite Ramsey number of cycles asymptotically.
\end{abstract}
\section{Introduction}
Let $k\geq 2$ be an integer and $H_{1},...,H_{k}$ be graphs. The Ramsey number $R(H_{1},...,H_{k})$ is the minimum integer $N$ such that any $k$-edge-coloring of $K_{N}$ contains a monochromatic $H_{i}$ in color $i$ for some $1\leq i\leq k$. If $H_{1}=H_{2}=...=H_{k}=H$, we simplify the notation as $R_{k}(H)$.
In 1967, Gerencs\'er and Gy\'arf\'as \cite{GG} showed that $R_{2}(P_{n}) =\lfloor \frac{3n}{2}-1 \rfloor$ where $P_{n}$ is a path on $n$ vertices. Bondy and Erd\H{o}s~\cite{JB73}, Faudree and Schelp \cite{FS}, and Rosta \cite{R} determined the 2-colour Ramsey number of cycles. The case of 3-coloring is more difficult and there is almost no result until 1999, {\L}uczak \cite{L} determined that for odd cycles and showed that $R(C_{n},C_{n},C_{n})\leq (4+o(1))n$ for $n$ sufficiently large. In \cite{L}, {\L}uczak introduced a technique that applies the Szemer\'edi's Regularity Lemma to reduce the problem to show the existence of a large enough monochromatic matching in a component.
This technique has become useful in determining the asymptotical value of Ramsey number of cycles. In 2007, {\L}uczak and Figaj \cite{LF} determined the 3-colour Ramsey numbers for paths and even cycles asymptotically. These results were strengthened: Kohayakawa, Simonovits and Skokan \cite{KSS} extended to long odd cycles, Gy\'arf\'as, Ruszink\'o, S\'ark\"ozy and Szemer\'edi \cite{GRSS} extended to long paths, and Benevides and Skokan \cite{BS} extended to long even cycles. Jenssen and Skokan \cite{JS} proved that $R_{k}(C_{n})= 2^{k-1}(n-1)+1$ for every $k$ and sufficiently large odd $n$, Day and Johnson \cite{DJ} showed that it does not hold for all $k$ and $n$.
It is natural to replace the underlying complete graph by a complete bipartite graph. Let $k\geq 2$ be an integer and given bipartite graphs $H_{1},...,H_{k}$. The bipartite Ramsey number $br(H_{1},...,H_{k})$ is the minimum integer $N$ such that any $k$-edge-coloring of $K_{N,N}$ contains a monochromatic $H_{i}$ in color $i$ for some $1\leq i\leq k$. The study of bipartite Ramsey number was initiated in the early 70s by Faudree and Schelp \cite{FS1} and independently by Gy\'arf\'as and Lehel \cite{GL} who showed that
\[br_{2}(P_{n})= \left \{
\begin{array}{ll}
n-1 & \mbox {if $n$ is even,}\\
n & \mbox {if $n$ is odd.}
\end{array}
\right.
\]
Zhang and Sun \cite{ZS} determined $br(C_{2n},C_{4})=n+1$ for $n\geq 4$ and Zhang, Sun and Wu \cite{ZSW} determined the value of $br(C_{2n}, C_{6})$ for $n\geq 4$. Goddard, Henning and Oellermann \cite{GHO} determined $br(C_{4},C_{4},C_{4})=11$. Joubert \cite{J} showed that $$br(C_{2t_{1}},C_{2t_{2}},...,C_{2t_{k}})\leq k(t_{1}+t_{2}+...+t_{k}-k+1)$$ where $t_{i}$ is an integer and $2\leq t_{i}\leq 4$ for all $1\leq i\leq k$. Recently, Shen, Lin and Liu \cite{SLL} gave the asymptotic value of $br(C_{2n}, C_{2n})$. In \cite{LP} and \cite{LP1}, Liu and Peng gave the asymptotic value of $br(C_{2\lfloor \alpha_{1} n\rfloor}, C_{2\lfloor \alpha_{1} n\rfloor})$ and $br(C_{2\lfloor \alpha_{1} n\rfloor}, C_{2\lfloor \alpha_{1} n\rfloor},...,C_{2\lfloor \alpha_{r} n\rfloor})$ in some conditions. They also gave a minimum degree condition. The best known lower bound on $br_{2}(K_{n,n})$ is due to Hattingh and Henning \cite{HH} and the best known upper bound is due to Conlon \cite{C}.
\begin{defi}\label{def2.1}
We say that $M$ is a $k$-connected matching in graph $G$, if $M$ is a component (a maximal connected subgraph) and the size of a maximum matching in $M$ is at least $k$.
\end{defi}
\begin{defi}\label{def1.1}
Let $r(k,l,m)$ denote the smallest integer $n$ such that for any 3-coloring of $K_{n,n}$, there is a monochromatic $k$-connected matching in color 1 or a monochromatic $l$-connected matching in color 2 or a monochromatic $m$-connected matching in color 3.
\end{defi}
Buci\'c, Letzter and Sudakov \cite{BLS} have determined the exact value of $r(k,l,l)$ by applying K\"onig's theorem as a tool.
\begin{theo}[Buci\'c, Letzter, Sudakov \cite{BLS}] \label{theo1.1}
\[r(k,l,l)= \left \{
\begin{array}{llll}
k+2l-2 & \mbox {if $l \leq \frac{k+1}{2}$,}\\
4l-2 & \mbox {if $\frac{k+1}{2} < l \leq \frac{2k}{3}$,}\\
2k+l-2 & \mbox {if $\frac{2k}{3} < l < k$,}\\
k+2l-2 & \mbox {if $k \leq l$.}
\end{array}
\right.
\]
\end{theo}
Applying the above theorm and Regularity Lemma ({\L}uczak's technique), they have determined that $br(C_{2n},C_{2n}, C_{2n})=(3+o(1))n$ when $n$ is sufficiently large. They commented that it is natural to consider the asymmetric 3-color bipartite Ramsey numbers for cycles, and determining $r(k,l,m)$ is interesting in its own right.
Applying K\"onig's theorem as in \cite{BLS}, we determine the exact value of $r(k,l,m)$ completely as stated in the following theorem.
\begin{theo}\label{theo3}
Let $2\leq k< l< m$, then we have
\[r(k,l,m)=\left \{
\begin{array}{lll}
k+2m-2 & \mbox {if $3\leq k< l <m \leq l+\frac{k-1}{2}$,}\\
2k+2l-3 & \mbox {if $3\leq k< l, l+\frac{k-1}{2}< m< k+l-1$,}\\
k+l+m-2 &\mbox {if $2\leq k< l, m\geq k+l-1$.}
\end{array}
\right.
\]
\end{theo}
Then applying the technique of {\L}uczak, we obtain the asymptotic value of \\ $br(C_{2\lfloor \alpha_{1}n\rfloor}, C_{2\lfloor \alpha_{2}n\rfloor}, C_{2\lfloor \alpha_{3}n\rfloor})$.
\begin{theo}\label{theo2}
Let $\alpha_{1}>0$, then
\[br(C_{2\lfloor \alpha_{1}n\rfloor}, C_{2\lfloor \alpha_{2}n\rfloor}, C_{2\lfloor \alpha_{3}n\rfloor})=\left \{
\begin{array}{lll}
(\alpha_{1}+ 2\alpha_{3}+ o(1))n &\mbox {if $\alpha_{1}< \alpha_{2}< \alpha_{3}\leq \frac{\alpha_{1}}{2}+\alpha_{2}$,}\\
(2\alpha_{1}+ 2\alpha_{2}+ o(1))n &\mbox {if $\alpha_{1}< \alpha_{2}, \frac{\alpha_{1}}{2}+\alpha_{2}< \alpha_{3}< \alpha_{1}+\alpha_{2}$,}\\
(\alpha_{1}+ \alpha_{2}+ \alpha_{3}+ o(1))n &\mbox {if $\alpha_{1}< \alpha_{2}, \alpha_{3}\geq \alpha_{1}+\alpha_{2}$.}
\end{array}
\right.
\]
\end{theo}
The organization of the paper is: In section 2, we give some preliminaries on definitions and useful facts. In section 3, we give the proof of $r(k,l)= k+l-1$ which mentioned in \cite{BLS} without a detailed proof. In section 4, we give the proof of Theorem \ref{theo3}. As mentioned earlier, applying Theorem \ref{theo3} and Regularity lemma, we can obtain Theorem \ref{theo2}. This technique of {\L}uczak has become fairly standard in this area, for the completeness, we give the proof of Theorem \ref{theo2} in Appendix.
\section{Preliminaries}
We assume that bipartite graphs under consideration have bipartition $V_1\cup V_2$.
\begin{defi}\label{def2.2}
We say that a component is in $V_{i}$ if there exist a minimum vertex cover of this component in $V_{i}$.
\end{defi}
\begin{defi}\label{def2.3}
We call a vertex a cover vertex of a component if there is a minimum vertex cover of this component containing it. Furthermore, if this component is monochromatic in color $i$, then we call a cover vertex of this component an $i$ cover vertex.
\end{defi}
\begin{remark}\label{remark2.1}
If a component $C$ is in $V_{1}$, then $C\cap V_{1}$ is a minimum vertex cover of $C$ and each vertex in $C\cap V_{1}$ is a cover vertex. \hspace*{\fill}$\Box$
\end{remark}
\begin{remark}\label{remark2.2}
If $C$ is a $k$-connected matching, then $|C\cap V_{1}|\geq k$ and $|C\cap V_{2}|\geq k$. Moreover, if no minimum vertex cover of $C$ is in $V_{1}$, then $|C\cap V_{1}|\geq k+1$. \hspace*{\fill}$\Box$
\end{remark}
\begin{defi}\label{def2.4}
We call a vertex a red-blue vertex, if this vertex belongs to the intersection of a red component and a blue component.
\end{defi}
The low bound of $r(k_{1}, k_{2},..., k_{p})$ can be easily obtained by constructing:
Let $n= \sum_{i=1}^p k_{i}-p$ and $K_{n,n}$ with vertex set $V_{1}\cup V_{2}$. Partition $V_{1}$ into $p$ parts $\cup ^{p}_{i=1}S_{i}$ with $|S_{i}|= k_{i}-1$. Coloring all edges between $S_{i}$ and $V_{2}$ in $i$-th color . It is easy to see that there is no monochromatic $k_{i}$-connected matching for any $i\in [1,p]$.
\begin{fact}\label{fact2.2}
$r(k_{1}, k_{2},..., k_{p})\geq \sum_{i=1}^p k_{i}-p+1$. \hspace*{\fill}$\Box$
\end{fact}
One of the most important tools in this paper is K\"onig's theorem:
\begin{theo}\label{theo1}
In a bipartite graph, the size of a maximum matching is equal to the number of vertices in a minimum vertex cover.\hspace*{\fill}$\Box$
\end{theo}
\section{The exact value of r(k,l)}
\begin{fact}\label{fact2.3}
$r(2,2)=3$.
\end{fact}
\begin{proof}
By Fact \ref{fact2.2}, $r(2,2) \geq 3$, so we need to show that $r(2,2) \leq 3$. If $r(2,2)\neq 3$, then there exist a 2-edge-coloring of $K_{3,3}$ such that there is no monochromatic 2-connected matching. Since every vertex has degree three, every vertex is incident to at least two edges in the same color. W.L.O.G, suppose that $v\in V_{1}$ is incident to two red edges $vu_{1}, vu_{2}$. Then the edges connecting $u_{1}$, $u_{2}$ with others vertices in $V_{1}$ must be blue for there is no red 2-connected matching. But this generates a blue 2-connected matching. A contradiction. So we have shown that $r(2,2)\leq 3$. \hspace*{\fill}$\Box$
\end{proof}
\begin{lemma}\label{lem2.4}
$r(k,k)=2k-1$ for any $k\geq 2$.
\end{lemma}
\begin{proof}
By Fact \ref{fact2.2}, $r(k,k) \geq 2k-1$, so we need to show that $r(k,k) \leq 2k-1$.
Use induction on $k$. The assertion holds for $k=2$ by Fact \ref{fact2.3}. Assume that $k \geq 2$ and the assertion holds for $k$. If $r(k+1,k+1) \neq 2k+1$, then there exist a 2-edge-coloring of $K_{2k+1,2k+1}$ such that there is no monochromatic $(k+1)$-connected matching. Note that it contains at most two red $k$-connected matchings $\{R_{1}, R_{2}\}$ and at most two blue $k$-connected matchings $\{B_{1}, B_{2}\}$. Clearly, the size of a maximum matching in these monochromatic $k$-connected matchings is exactly $k$.
Case 1: There are exactly two red $k$-connected matchings and two blue $k$-connected matchings.
By Remark \ref{remark2.2}, each $k$-connected matching not in $V_{i}$ contains at least $k+1$ vertices in $V_{i}$ and $|V_{i}|=2k+1$, then $V_{i}$ contains at least one red $k$-connected matching and one blue $k$-connected matching $(i=1, 2)$.
Subcase 1.1: Four monochromatic $k$-connected matchings are in $V_{1}$, then it is easy to show that $R_{1}\cap B_{1}\cap V_{1}\neq \emptyset, R_{2}\cap B_{2}\cap V_{1}\neq \emptyset$ (interchange the subscripts of $B_{i}'s$ if necessary). By the assumption, every vertex in $R_{i} \cap V_{1}(B_{j}\cap V_{1})$ is a red (blue) cover vertex of $R_{i}(B_{j})$. Then remove one red-blue cover vertex from $R_{1}\cap B_{1}\cap V_{1}$ and $R_{2}\cap B_{2}\cap V_{1}$ respectively and two vertices in $V_{2}$. Then the cardinality of a minimum vertex cover of each $R_{i},B_{j}$ is reduced by one. By K\"{o}nig's theorem, after the removal of these four vertices, there is neither red $k$-connected matching nor blue $k$-connected matching in the remaining $K_{2k-1,2k-1}$. A contradiction to our induction hypothesis that $r(k,k)=2k-1$.
Subcase 1.2: Three monochromatic $k$-connected matchings are in $V_{1}$ and another is not in $V_{1}$.
W.L.O.G, suppose two red $k$-connected matchings $\{R_{1}, R_{2}\}$ and one blue $k$-connected matching $B_{1}$ are in $V_{1}$. Again, every vertex of these $k$-connected matchings in $V_{1}$ is a cover vertex of these $k$-connected matchings. Note that $B_{1}$ must intersect with at least one red $k$-connected matching (assume that is $R_{1}$) in $V_{1}$. Remove one cover vertex from $R_{1}\cap B_{1}\cap V_{1}$, one cover vertex of $R_{2}\cap V_{1}$, and remove one cover vertex from $B_{2}\cap V_{2}$, one of any other vertices in $V_{2}$. Then there is neither red $k$-connected matching nor blue $k$-connected matching in the remaining $K_{2k-1,2k-1}$. A contradiction again.
Subcase 1.3: Two $k$-connected matchings are in $V_{1}$ and others are not in $V_{1}$, then remove one cover vertex from two $k$-connected matchings in $V_{1}$ respectively and one cover vertex from other two $k$-connected matchings in $V_{2}$ respectively. A contradiction again.
Case 2: There are two red $k$-connected matchings and one blue $k$-connected matching. (The argument is the same if there are one red $k$-connected matching and two blue $k$-connected matchings.) In this case, by Remark \ref{remark2.2} and $|V_{i}|=2k+1$ $(i=1, 2)$, at least one red $k$-connected matching is in $V_{1}$ and at least one red $k$-connected matching is in $V_{2}$.
Subcase 2.1: All of them are in $V_{1}$. It is similar to subcase 1.2 which we have discussed.
Subcase 2.2: One red $k$-connected matching is in $V_{1}$ (say $R_{1}$) and at least one of the other two monochromatic $k$-connected matchings (say $R_{2}$) has at least 1 cover vertex in $V_{2}$. Then remove one cover vertex from $R_{1}\cap V_{1}$ and one cover vertex from $R_{2}\cap V_{2}$. Remove one cover vertex from the blue $k$-connected matching and one more vertex in $V_{1}$ or $V_{2}$ such that two vertices have been removed from each of $V_{i}$ ($i=1,2$). A contradiction again.
Case 3: There are two monochromatic $k$-connected matchings.
Remove one cover vertex from each of these two monochromatic $k$-connected matchings. Remove other vertices from $V_{i}$ ($i=1,2$) until two vertices have been removed from each of $V_{i}$ ($i=1,2$). A contradiction again.
Case 4: There is one monochromatic $k$-connected matching.
Remove one cover vertex from this monochromatic $k$-connected matching. Remove other vertices from $V_{i}$ ($i=1,2$) until two vertices have been removed from each of $V_{i}$ ($i=1,2$). A contradiction again.
There is at least one monochromatic $k$-connected matching, so we have discussed all possible cases and have shown that $r(k, k) \leq 2k-1$. \hspace*{\fill}$\Box$
\end{proof}
\begin{lemma}\label{lem2.5}
$r(k,k+1)=2k$ for $k\geq 2$.
\end{lemma}
\begin{proof}
By Fact \ref{fact2.2}, $r(k,k+1) \geq 2k$, so we need to show that $r(k,k+1) \leq 2k$. If $r(k,k+1) \neq 2k$, then there exist a 2-edge-coloring of $K_{2k,2k}$ such that there is no red $k$-connected matching and at most two blue $k$-connected matchings. Clearly, the size of a maximum matching in these monochromatic $k$-connected matchings is exactly $k$.
Case 1: There are two blue $k$-connected matchings. Then each of two blue $k$-connected matching must have $k$ vertices in each part of $K_{2k,2k}$. Note that each vertex of a blue $k$-connected matching is a cover vertex. Then remove one cover vertex of a blue $k$-connected matching in $V_{1}$ and one cover vertex of another blue $k$-connected matching in $V_{2}$. By K\"onig's theorem there is neither red $k$-connected matching nor blue $k$-connected matching in the remaining $K_{2k-1,2k-1}$. A contradiction to Lemma \ref{lem2.4}.
Case 2: There is one blue $k$-connected matching (say $B$). Then remove one cover vertex from $B\cap V_{i}$ $(i=1$ or $2)$ and any other vertex from $V_{3-i}$. A contradiction again.\hspace*{\fill}$\Box$
\end{proof}
\begin{theo}\label{theo2.1}
$r(k,l)=k+l-1$ for $l\geq k\geq 2$.
\end{theo}
\begin{proof}
By Fact \ref{fact2.2}, $r(k,l)\geq k+l-1$, so we need to show that $r(k,l)\leq k+l-1$.
Use induction on $l$. The assertion holds for $l=k$ by Lemma \ref{lem2.4} and for $l=k+1$ by Lemma \ref{lem2.5}. Assume that $l\geq k+1$ and the assertion holds for $l$. If $r(k,l+1) \neq k+l$, then there exist a 2-edge-coloring of $K_{k+l,k+l}$ such that there is no red $k$-connected matching and at most one blue $l$-connected matching. Clearly, the size of a maximum matching in the blue $k$-connected matching is exactly $l$.
Remove one cover vertex from this blue $l$-connected matching in $V_{i}$ $(i=1,2)$ and remove any other vertex in $V_{3-i}$. Then by K\"onig's theorem, there is neither red $k$-connected matching nor blue $l$-connected matching in the remaining $K_{k+l-1,k+l-1}$. A contradiction to our induction hypothesis. So $r(k,l+1) \leq k+l$ when $l\geq k+1$. The proof is complete. \hspace*{\fill}$\Box$
\end{proof}
\section{Exact value of r(k,l,m)}
In this section, we will determine the exact value of $r(k,l,m)$. The exact value of $r(k,k,k)$ and $r(k,l,l)$ have been determined in \cite{BLS}. So we assume that $m >l >k\geq 2$ in this section.
\begin{lemma}\label{lemma5.2}
$r(k,l,m)\geq k+2m-i-2$ for $0\leq i\leq k-2, 3\leq k< l< m\leq \frac{k+i-1}{2}+l$.
\end{lemma}
\begin{proof}
Let $k,l,m,i$ satisfy the conditions. We show that there exist a 3-edge-coloring of $K_{k+2m-i-3,k+2m-i-3}$ such that there is no monochromatic $k$-connected matching, no monochromatic $l$-connected matching and no monochromatic $m$-connected matching. Then we have $r(k,l,m)\geq k+2m-i-2$.
In $K_{k+2m-i-3,k+2m-i-3}$, partition $V_{1}$ into 3 sets: $S_{1}, S_{2}, S_{3}$ with $\mid S_{1}\mid=m-1$, $\mid S_{2}\mid=m-1$, $\mid S_{3}\mid=k-1-i$. Partition $V_{2}$ into 4 sets: $T_{1}, T_{2}, T_{3}, T_{4}$. If $2(m-l)-i> 0$, then let $\mid T_{1}\mid=l-1$, $\mid T_{2}\mid=l-1$, $\mid T_{3}\mid=k-1$, $\mid T_{4}\mid=2(m-l)-i$. Else let $\mid T_{1}\mid \leq l-1$, $\mid T_{2}\mid \leq l-1$, $\mid T_{3}\mid \leq k-1$, $\mid T_{4}\mid= 0$ ($k+2l-3\geq k+2m-i-3$ since $2(m-l)-i\leq 0$). It is easy to see that $2(m-l)-i\leq k-1$ since $l< m\leq \frac{k+i-1}{2}+l$. Let C: $(i,j)$ represent that all of the edges between $S_{i}$ and $T_{j}$ are colored C. We colour $K_{k+2m-i-3,k+2m-i-3}$ as follows:
Red: $(1,4), (2,3), (3,1), (3,2)$;
Blue: $(1,2), (2,1), (3,3), (3,4)$;
Green: $(1,1), (1,3), (2,2), (2,4)$.
It is easy to verify that there is no red $k$-connected matching, no blue $l$-connected matching and no green $m$-connected matching in this 3-coloring. This shows that\\ $r(k,l,m)\geq k+2m-i-2$.\hspace*{\fill}$\Box$
\end{proof}
We will show that the lower bound is also an upper bound.
\begin{lemma}\label{lemma5.1}
$r(k,l,l+1)\leq k+2l$ for $l> k\geq 2$.
\end{lemma}
\begin{proof}
By Theorem \ref{theo1.1}, we have $r(k,l,l)=k+2l-2$ for $l> k$.
If $r(k,l,l+1)\neq k+2l$, then there exist a 3-edge-coloring of $K_{k+2l,k+2l}$ such that there is no red $k$-connected matching, no blue $l$-connected matching and at most two green $l$-connected matchings. Clearly, the size of maximum matching in these green $l$-connected matchings is exactly $l$.
If there are two green $l$-connected matchings, then remove one cover vertex from each of these two green $l$-connected matchings and remove other vertices from $V_{i}$ ($i=1,2$) until two vertices have been removed from each of $V_{i}$ ($i=1,2$). Then there is no red $k$-connected matching, no blue $l$-connected matching and no green $l$-connected matching in the remaining $K_{k+2l-2, k+2l-2}$. A contradiction to $r(k,l,l)=k+2l-2$.
If there is one green $l$-connected matching, then remove one cover vertex from this green $l$-connected matching and remove other vertices from $V_{i}$ until two vertices have been removed from each of $V_{i}$ $(i=1,2)$. A contradiction again.
So we have shown that $r(k,l,l+1)\leq k+2l$ for $l> k\geq 2$.\hspace*{\fill}$\Box$
\end{proof}
\begin{theo}\label{theo5.1}
$r(k,l,m)=k+2m-2$ for $3\leq k< l< m\leq l+\frac{k-1}{2}$.
\end{theo}
\begin{proof}
Let $i=0$ in Lemma \ref{lemma5.2}, then we have $r(k,l,m)\geq k+2m-2$ when $3\leq k< l< m\leq l+ \frac{k-1}{2}$. So what left is to prove that $r(k,l,m)\leq k+2m-2$ for $3\leq k< l< m\leq l+ \frac{k-1}{2}$. In fact, we will prove a slightly stronger result: $r(k,l,m)\leq k+2m-2$ for $2\leq k< l< m$.
Use induction on $m$. The assertion holds for $m=l+1$ by Lemma \ref{lemma5.1}. Assume $m\geq l+1$ and the assertion holds for $m$. If $r(k,l,m+1)\neq k+2m$, then there exist a 3-edge-coloring of $K_{k+2m,k+2m}$ such that there is no red $k$-connected matching, no blue $l$-connected matching and at most two green $m$-connected matchings. Clearly, the size of maximum matching in these green $m$-connected matchings is exactly $m$.
If there are two green $m$-connected matchings, then remove one cover vertex from each of these two green $m$-connected matchings and remove other vertices from $V_{i}$ ($i=1,2$) until two vertices have been removed from each of $V_{i}$ ($i=1,2$). Now there is no red $k$-connected matching, no blue $l$-connected matching and no green $m$-connected matching in the remaining $K_{k+2m-2, k+2m-2}$. A contradiction to our induction hypothesis.
If there is one green $m$-connected matching, then remove one cover vertex from this green $m$-connected matching and remove other vertices from $V_{i}$ ($i=1,2$) until two vertices have been removed from each of $V_{i}$ ($i=1,2$). A contradiction again.
So we have shown that $r(k,l,m)\leq k+2m-2$ for $2\leq k< l< m$.\hspace*{\fill}$\Box$
\end{proof}
\begin{lemma}\label{lemma5.3}
$r(k,l,k+l-1)=2k+2l-3$ for $l> k\geq 2$.
\end{lemma}
\begin{proof}
By Fact \ref{fact2.2}, $r(k,l,k+l-1)\geq 2k+2l-3$, so we need to show that $r(k,l,k+l-1)\leq 2k+2l-3$. If $r(k,l,k+l-1)\neq 2k+2l-3$, then there exist a 3-edge-coloring of $K_{2k+2l-3,2k+2l-3}$ such that there is no red $k$-connected matching, no blue $l$-connected matching and no green $(k+l-1)$-connected matching. We will show that it contradicts to $r(k,l,l)=k+2l-2$ guaranteed by Theorem \ref{theo1.1} when $l> k\geq 2$.
Note that there are at most three green components which contain matchings with size in $[l,k+l-2]$ because $4l> 2k+2l-3$.
Case 1: There is one green component whose size of a maximum matching is $i$ for some $i\in [l,k+l-2]$. We can remove $k-1$ cover vertices in $G$ and remove other vertices in $V_{1}\cup V_{2}$ such that $k-1$ vertices have been removed from each of $V_{i}$ $(i=1,2)$. Now, $G$ is at most a $(l-1)$-connected matching because $i-(k-1)\leq (k+l-2)-(k-1)=l-1$. So there is no red $k$-connected matching, no blue $l$-connected matching and no green $l$-connected matching in the remaining $K_{k+2l-2,k+2l-2}$. A contradiction to $r(k,l,l)=k+2l-2$.
Case 2: There are one green component whose size of a maximum matching is $i$ (say $G_{1}$) and one green component whose size of a maximum matching is $j$ (say $G_{2}$), where $l\leq i\leq j\leq k+l-2$.
Subcase 2.1: $G_{1}$ has at most $k+l-2$ vertices in both $V_{1}$ and $V_{2}$. By pigeonhole principle, $G_{2}$ has at least $\lceil \frac{j}{2} \rceil$ cover vertices in $V_{1}$ or $V_{2}$. W.L.O.G, assume that $G_{2}$ has at least $\lceil \frac{j}{2} \rceil$ cover vertices in $V_{1}$. Then we remove $p= \min\{\lceil \frac{j}{2} \rceil,k-1\}$ cover vertices from $G_{2}\cap V_{1}$ and $k-1-p$ other vertices from $V_{1}$, remove $k-1$ vertices from $G_{1}\cap V_{2}$. Now, what remains in $G_{2}$ is not a green $l$-connected matching because of the following: the number of cover vertices left in $G_{2}$ is $j-(k-1)\leq (k+l-2)-(k-1)=l-1$ if $p=k-1$ or if $p=\lceil \frac{j}{2}\rceil$, then the number of cover vertices left in $G_{2}$ is at most $\lfloor \frac{j}{2}\rfloor \leq \lceil \frac{j}{2}\rceil \leq k-1 \leq l-1$. And the number of remaining vertices in $G_{1}\cap V_{2}$ is at most $(k+l-2)-(k-1)=l-1$. Hence there is no red $k$-connected matching, no blue $l$-connected matching and no green $l$-connected matching in the remaining $K_{k+2l-2,k+2l-2}$. A contradiction again.
Subcase 2.2: $G_{1}$ has at least $k+l-1$ vertices in $V_{1}$ or $V_{2}$. W.L.O.G assume that $G_{1}$ has at least $k+l-1$ vertices in $V_{1}$. So $G_{2}$ has at most $k+l-2$ vertices in $V_{1}$.
If $G_{1}$ has at most $k+l-2$ vertices in $V_{2}$. Then remove $k-1$ vertices from $G_{2}\cap V_{1}$ and remove $k-1$ vertices from $G_{1}\cap V_{2}$. Now in the remaining $K_{k+2l-2,k+2l-2}$, the size of a maximum matching in $G_{1}$ and $G_{2}$ is at most $(k+l-2)-(k-1)=l-1$. A contradiction again.
Else $G_{1}$ has at least $k+l-1$ vertices in $V_{2}$. Then $G_{2}$ has at most $k+l-2$ vertices in $V_{2}$. It is the same as subcase 2.1.
Case 3: There are one green component whose size of a maximum matching is $i$ (say $G_{1}$), one green component whose size of a maximum matching is $j$ (say $G_{2}$), and one green component whose size of a maximum matching is $t$ (say $G_{3}$), where $l\leq i\leq j\leq t\le k+l-2$.
Note that $l\leq 2k-3$ in this case because $3l> 2k+2l-3$ if $l> 2k-3$ and note that $l\leq |G_{i}\cap V_{j}|\leq 2k-3$ $(i=1,2,3; j=1,2)$. Also note that $t\geq l$ and $2k+2l-3-t\leq 2k+l-3$, then $G_{1}\cup G_{2}$ has at most $2k+l-3$ vertices in both $V_{1}$ and $V_{2}$.
By pigeonhole principle, $G_{3}$ has at least $\lceil \frac{t}{2} \rceil$ cover vertices in $V_{1}$ or $V_{2}$. W.L.O.G, assume that $G_{3}$ has at least $\lceil \frac{t}{2} \rceil$ cover vertices in $V_{1}$. Then remove $k-1$ vertices from $G_{3}\cap V_{1}$ such that they contain as many cover vertices $(\min \{\lceil \frac{t}{2} \rceil, k-1\})$ of $G_{3}$ as possible. Remove $k-1$ vertices from $V_{2}$ such that $G_{1}$ has at most $l-1$ vertices in $V_{2}$ and $G_{2}$ has at most $l-1$ vertices in $V_{2}$. This is possible due to the following reason: If $G_{1}$ has $s$ vertices in $V_{2}$ (recall that $l\leq s\leq 2k-3$), then remove $s-(l-1)$ vertices from $G_{1}\cap V_{2}$ and $(k-1)-(s-l+1)$ vertices from $G_{2}\cap V_{2}$. Then $G_{2}$ has at most $(2k+l-3)-s-[(k-1)-(s-l+1)]=k-1\leq l-1$ vertices in $V_{2}$ in the remaining $K_{k+2l-2,k+2l-2}$. $G_{1}$ has at most $\max \{(2k-3)-(k-1), \lfloor \frac{t}{2}\rfloor \}\leq k-1\leq l-1$ cover vertices in the remaining $K_{k+2l-2, k+2l-2}$. Now there is no red $k$-connected matching, no blue $l$-connected matching and no green $l$-connected matching in the remaining $K_{k+2l-2,k+2l-2}$. A contradiction again.
So we have shown that $r(k,l,k+l-1)\leq 2k+2l-3$ for $l> k\geq 2$.\hspace*{\fill}$\Box$
\end{proof}
\begin{theo}\label{theo5.4}
$r(k,l,m)=2k+2l-3$ for $3\leq k< l, l+\frac{k-1}{2}< m< k+l-1$.
\end{theo}
\begin{proof}
Actually, by Lemma \ref{lemma5.2}, we have $r(k,l,m)\geq k+2m-i-2$ for $1\leq i\leq k-2, 3\leq k< l, \frac{k+i-2}{2}+l< m\leq \frac{k+i-1}{2}+l$. Let $k,l,m,i$ satisfy the condition. Note that $2k+2l-4< k+2m-i-2\leq 2k+2l-3$. Since $k,l,m,i$ are integers, then $k+2m-i-2=2k+2l-3$. So we have $r(k,l,m)\geq k+2m-i-2=2k+2l-3$. By Lemma \ref{lemma5.3}, we have $r(k,l,k+l-1)=2k+2l-3$. Since $m< k+l-1$, then $r(k,l,m)\leq r(k,l,k+l-1)= 2k+2l-3$.\hspace*{\fill}$\Box$
\end{proof}
\begin{theo}\label{theo5.2}
$r(k,l,m)=k+l+m-2$ for $2\leq k<l ,m\geq k+l-1$.
\end{theo}
\begin{proof}
By Fact \ref{fact2.2}, $r(k,l,m)\geq k+l+m-2$, so we need to show that $r(k,l,m)\leq k+l+m-2$ for $2\leq k< l, m\geq k+l-1$.
Using induction on $m$. The assertion holds for $m=k+l-1$ by Lemma \ref{lemma5.3}. Assume that $m\geq k+l-1$ and the assertion holds for $m$. If $r(k,l,m+1)\neq k+l+m-1$, then there exist a 3-edge-coloring of $K_{k+l+m-1,k+l+m-1}$ such that there is no red $k$-connected matching, no blue $l$-connected matching and at most two green $m$-connected matchings.
Case 1: There is one green $m$-connected matching (say $G$). Then remove one cover vertex from $G$ in $V_{i}$ $(i=1,2)$ and any other vertex in $V_{3-i}$. Now, there is no red $k$-connected matching, no blue $l$-connected matching and no green $m$-connected matching in the remaining $K_{k+l+m-2,k+l+m-2}$. A contradiction to our induction hypothesis.
Case 2: There are two green $m$-connected matchings (say $G_{1}, G_{2}$). Then $2m\leq k+l+m-1$ which shows that $m=k+l-1$. So each of $G_{i}$ has exactly $k+l-1$ vertices in both $V_{1}$ and $V_{2}$ and each vertex in $G_{i}\cap V_{j}$ is a cover vertex for $G_{i}$ $(1\leq i,j \leq 2)$. Remove one cover vertex from $G_{1}\cap V_{1}$ and one cover vertex from $G_{2}\cap V_{2}$. A contradiction again.
So we have shown that $r(k,l,m)\leq k+l+m-2$ for $2\leq k< l, m\geq k+l-1$.\hspace*{\fill}$\Box$
\end{proof}
Combining Theorem \ref{theo5.1}, \ref{theo5.4}, \ref{theo5.2}, we obtain Theorem \ref{theo3}.
\section{Appendix: Proof of Theorem \ref{theo2}}
\subsection{Monochromatic connected matchings in almost complete bipartite graphs}
The main result in this section is to extend Theorem \ref{theo3} to an almost complete bipartite graph. We just give the proof for the case $\alpha_{1}< \alpha_{2}, \alpha_{3}\geq \alpha_{1}+\alpha_{2}$, two other cases can be proven in the same way.
\begin{theo}\label{theo4.1}
Let $0< \alpha_{1}< \alpha_{2}, \alpha_{3}\geq \alpha_{1}+\alpha_{2}, \beta=(2\lceil \frac{\alpha_{1}+ \alpha_{2}+\alpha_{3}+1}{\alpha_{1}} \rceil)^6,\gamma= \frac{12}{\alpha_{1}} \lceil \frac{\alpha_{1}+ \alpha_{2}+ \alpha_{3}+ 1}{\alpha_{1}} \rceil$. For every $0< \varepsilon <\frac{1}{\beta+ (\alpha_{1}+ \alpha_{2}+\alpha_{3})\gamma}$, there is $n_{0} = n_{0}(\varepsilon)$ such that the following holds. For $n> n_{0}$, let $G$ be a bipartite graph with partition $\{V_{1}, V_{2}\}$ and $|V_{1}|=|V_{2}|=N$, where $N\geq (\alpha_{1}+ \alpha_{2}+\alpha_{3}+(\beta+ (\alpha_{1}+ \alpha_{2}+\alpha_{3})\gamma)\varepsilon)n$. Suppose that every vertex in $V_{1}$ has at most $\varepsilon n$ non-neighbours in $V_{2}$ and vice versa. Then for every red-blue-green-edge-coloring of $G$, there is a red $\lfloor \alpha_{1}n \rfloor$-connected matching or a blue $\lfloor \alpha_{2}n \rfloor$-connected matching or a green $\lfloor \alpha_{3}n \rfloor$-connected matching.
\end{theo}
The proof follows from \cite{BLS}. The idea is to add the non-edge to $G$ such that it becomes a complete bipartite graph, then apply Theorem \ref{theo3}. Let $N= \lceil (\alpha_{1}+ \alpha_{2}+\alpha_{3}+(\beta+ (\alpha_{1}+ \alpha_{2}+\alpha_{3})\gamma)\varepsilon)n \rceil$.
\begin{defi}\label{def4.1}
We call $C_{R,i}$ a red virtual component if $C_{R,i}$ is a red component of order at least $\alpha_{1}n$ or a maximal union of red components with order no more than $2\alpha_{1}n$ (the order of each of components is no more than $\alpha_{1}n$). Define blue virtual component and green virtual component in the same way.
\end{defi}
\begin{remark}\label{remark4.1}
It is obvious that each virtual component has no intersection with other virtual components in the same color. The maximum number of virtual components in $G$ is at most $2\lceil \frac{\alpha_{1}+ \alpha_{2}+\alpha_{3}+1}{\alpha_{1}} \rceil$ in each of colors.
\end{remark}
\begin{proof}
W.L.O.G, consider red virtual components. It is easy to know that all but at most one of the virtual components have order at least $\alpha_{1}n$. So the maximum number of red virtual components in $G$ is at most $2\times \lceil \frac{(\alpha_{1}+ \alpha_{2}+\alpha_{3}+(\beta+ (\alpha_{1}+ \alpha_{2}+\alpha_{3})\gamma )\varepsilon )n}{\alpha_{1}n}\rceil \leq 2\lceil \frac{\alpha_{1}+ \alpha_{2}+\alpha_{3}+1}{\alpha_{1}} \rceil$ by the choice of $\varepsilon$. Similarly, the maximum number of blue (green) virtual components in $G$ is at most $2\lceil \frac{\alpha_{1}+ \alpha_{2}+\alpha_{3}+1}{\alpha_{2}} \rceil \leq 2\lceil \frac{\alpha_{1}+ \alpha_{2}+\alpha_{3}+1}{\alpha_{1}} \rceil$ ($2\lceil \frac{\alpha_{1}+ \alpha_{2}+\alpha_{3}+1}{\alpha_{3}} \rceil \leq 2\lceil \frac{\alpha_{1}+ \alpha_{2}+\alpha_{3}+1}{\alpha_{1}} \rceil$).
\end{proof}
\begin{defi}\label{def4.2}
We call a non-edge bad if it is not contained in any virtual component.
\end{defi}
\begin{lemma}\label{lemma4.1}
There is a set of at most $\beta \varepsilon n$ vertices that cover all bad non-edges in $G$ in each $V_{i}$ $(i=1,2)$.
\end{lemma}
\begin{proof}
Each bad edge can be represented by Type $(a,b,c,d,e,f)$ ($a\neq d, b\neq e, c\neq f; a,b,c,d,e,f\in [1,2\lceil \frac{\alpha_{1}+ \alpha_{2}+\alpha_{3}+1}{\alpha_{1}} \rceil]$) such that one of its ends belongs to $C_{R,a}\cap C_{B,b}\cap C_{G,c}= U$ and another end belongs to $C_{R,d}\cap C_{B,e}\cap C_{G,f}= W$. (Recall that $\beta=(2\lceil \frac{\alpha_{1}+ \alpha_{2}+\alpha_{3}+1}{\alpha_{1}} \rceil)^6$ and there are $\beta$ choices for $(a,b,c,d,e,f)$. ) Now for each fixed $(a,b,c,d,e,f)$, we claim that $|U|, |W|\leq \varepsilon n$. Otherwise, since each vertex has at most $\varepsilon n$ non-neighbors, then there is an edge between $U$ and $W$. W.L.O.G, assume that it is in red (same as blue or green), then $C_{R,a}$ and $C_{R,d}$ are not disconnected in red, a contradiction to Remark \ref{remark4.1}. So we have shown that all bad non-edges of $(a,b,c,d,e,f)$ can be covered by a set ($U\cup W$) with at most $\varepsilon n$ vertices in each $V_{i}$ $(i=1,2)$.\hspace*{\fill}$\Box$
\end{proof}
Take a virtual component $C$ and a minimum vertex cover $W$ in $C$. Now add non-edges to $G$ which are incident with $W$ inside $C$ and colour them by the same color as $C$. Repeat it until no non-edge can be added and denote the resulting graph by $G_{1}$. It is easy to see that the cardinality of a minimum cover of $C$ in $G_{1}$ is the same as in $G$, and by K\"onig's theorem, the size of the maximum matching in each virtual component in $G_{1}$ is the same as in $G$.
A pair of vertices not an edge in $G_{1}$ is called a missing edge. The next lemma says that there is not too much pairwise disjoint missing edges in each virtual components in $G_{1}$.
\begin{lemma}\label{lemma4.2}
Let $C_{R}$ $(C_{B}, C_{G})$ be a red (blue, green) virtual component in $G_{1}$, and let $M$ be a matching of missing edges spanned by $C_{R}$ $(C_{B}, C_{G})$. Then $M$ contains at most $\gamma \varepsilon \alpha_{1} n$ $(\gamma \varepsilon \alpha_{2} n, \gamma \varepsilon \alpha_{3} n)$ missing edges.
\end{lemma}
\begin{proof}
W.L.O.G, assume that the virtual component in $G_{1}$ is red. Let $\{x_{1}y_{1},...,x_{t}y_{t}\}$ be a matching of missing edges spanned by $C_{R}$ and suppose that $t\geq \gamma \varepsilon \alpha_{1} n$. Since $x_{i}y_{i}$ is missing, neither $x_{i}$ nor $y_{i}$ is in a minimum cover $W_{R}$ of $C_{R}$. Otherwise, we would have added $x_{i}y_{i}$ to $G_{1}$. So no red edges are spanned by $\{x_{1},...,x_{t},y_{1},...,y_{t}\}$. Otherwise, assume that $x_{i}y_{j}$ is a red edge, then $x_{i}y_{j}$ is not covered by $W_{R}$. A contradiction to that $W_{R}$ is a minimum cover of $C_{R}$.
Since there are at most $2\lceil \frac{\alpha_{1}+ \alpha_{2}+\alpha_{3}+1}{\alpha_{1}} \rceil$ blue virtual components, there exist a blue virtual component $C_{B}$ that contains at least $\frac{t}{2\lceil \frac{\alpha_{1}+ \alpha_{2}+\alpha_{3}+1}{\alpha_{1}} \rceil}$ of $x_{i}$'s. Let $s = \frac{t}{2\lceil \frac{\alpha_{1}+ \alpha_{2}+\alpha_{3}+1}{\alpha_{1}} \rceil}$. Suppose that $x_{1},...,x_{s}\in C_{B}$.
If there is at least half of the vertices $y_{1},...,y_{s}$ are in $C_{B}$. Assume $Y=\{y_{1},...,y_{\frac{s}{2}}\}$ is contained in $C_{B}$ and let $X=\{x_{1},...,x_{\frac{s}{2}}\}$. As discussed above, for a minimum cover $W_{B}$ of $C_{B}$, $x_{i}$ and $y_{i}$ are not in $W_{B}$ which implies that $X\cup Y$ spans no blue edges.
Else there is at least half of the vertices $y_{1},...,y_{s}$ which are not in $C_{B}$. Suppose that the set $Y=\{y_{1},...,y_{\frac{s}{2}}\}$ is disjoint from $C_{B}$ and let $X=\{x_{1},...,x_{\frac{s}{2}}\}$. Then there are no blue edges between $X$ and $Y$.
In each case, $X\cup Y$ spans neither red edge nor blue edge. Since $\frac{s}{2}\geq \frac{3\varepsilon \alpha_{1} n}{\alpha_{1}} = 3\varepsilon n$ and each vertex has at least $\frac{s}{2}-\varepsilon n \geq 2\varepsilon n$ neighbors (recall that $\gamma= \frac{12}{\alpha_{1}} \lceil \frac{\alpha_{1}+ \alpha_{2}+ \alpha_{3}+ 1}{\alpha_{1}} \rceil$), then $G_{1}[X,Y]$ is connected in green. Let $C_{G}$ be the green virtual component containing $X\cup Y$. Since every vertex in $X\cup Y$ is incident with a missing edge spanned by $C_{G}$ (the structure of $G_{1}$), it follows that none of the vertices in $X\cup Y$ is in a minimum cover $W_{G}$ of $C_{G}$. Hence, $X\cup Y$ cannot span any green edge. A contradiction.\hspace*{\fill}$\Box$
\end{proof}
For each missing edge in $G_{1}$ that is not bad, take a virtual component containing it and add the edge to $G_{1}$ in color of the chosen component. Denote the resulting graph by $G_{2}$. Now, we are ready to prove the main result in this section.\\
\noindent \emph{Proof of Theorem \ref{theo4.1}:}
Let $W$ be a set of vertices that cover all bad edges with the same number of vertices on both sides. By Lemma \ref{lemma4.1}, $W$ has size at most $2\beta \varepsilon n$. Let $G_{3}=G_{2}\setminus W$. Then $G_{3}$ is a 3-edge-colored complete bipartite graph with at least $N-\beta \varepsilon n$ vertices on each side. By Theorem \ref{theo5.2}, $G_{3}$ contains a red $((1+\gamma \varepsilon)\alpha_{1}n)$-connected matching since $\frac{\alpha_{1}}{\alpha_{1}+\alpha_{2}+\alpha_{3}} \times (N-\beta \varepsilon n)\geq (1+\gamma \varepsilon)\alpha_{1}n$, or a blue $((1+\gamma \varepsilon)\alpha_{2}n)$-connected matching since $\frac{\alpha_{2}}{\alpha_{1}+\alpha_{2}+\alpha_{3}} \times (N-\beta \varepsilon n)\geq (1+\gamma \varepsilon)\alpha_{2}n$, or a green $((1+\gamma \varepsilon)\alpha_{3}n)$-connected matching since $\frac{\alpha_{3}}{\alpha_{1}+ \alpha_{2}+\alpha_{3}} \times (N-\beta \varepsilon n)\geq (1+\gamma \varepsilon)\alpha_{3}n$. W.L.O.G, assume that $G_{3}$ contains a red $((1+\gamma \varepsilon) \alpha_{1}n)$-connected matching $M$. By the construction of $G_{2}$, $M$ is contained in a red virtual component $C_{R}$. Note that $M$ spans more than $\alpha_{1} n$ edges, so $C_{R}$ must be connected (not a union of several red components). By Lemma \ref{lemma4.2}, at most $\gamma \varepsilon \alpha_{1} n$ of the edges in $M$ are missing in $G_{1}$. That means $C_{R}$ spans a matching on at least $\alpha_{1} n$ edges in $G_{1}$. By the construction of $G_{1}$, the component $C_{R}$ spans a matching with at least $\alpha_{1} n$ edges which shows that $G$ contains a red $\alpha_{1} n$-connected matching.\hspace*{\fill}$\Box$
In the same way, we can obtain:
\begin{theo}\label{theo4.2}
Let $0< \alpha_{1}< \alpha_{2}< \alpha_{3}\leq \frac{\alpha_{1}}{2}+ \alpha_{2}, \beta= (2\lceil \frac{\alpha_{1}+ 2\alpha_{3}+ 1}{\alpha_{1}} \rceil)^6,\gamma= \frac{12}{\alpha_{1}} \lceil \frac{\alpha_{1}+ 2\alpha_{3}+ 1}{\alpha_{1}} \rceil$. For every $0< \varepsilon <\frac{1}{\beta+ (\alpha_{1}+ 2\alpha_{3}) \gamma}$, there is $n_{0} = n_{0}(\varepsilon)$ such that the following holds. For $n> n_{0}$, let $G$ be a bipartite graph with partition $\{V_{1}, V_{2}\}$ and $|V_{1}|=|V_{2}|=N$, where $N\geq (\alpha_{1}+ 2\alpha_{3}+(\beta+ (\alpha_{1}+ 2\alpha_{3})\gamma )\varepsilon )n$. Suppose that every vertex in $V_{1}$ has at most $\varepsilon n$ non-neighbours in $V_{2}$ and vice versa. Then for every red-blue-green-edge-coloring of $G$, there is a red $\lfloor \alpha_{1}n \rfloor$-connected matching or a blue $\lfloor \alpha_{2}n \rfloor$-connected matching or a green $\lfloor \alpha_{3}n \rfloor$-connected matching.
\end{theo}
\begin{theo}\label{theo4.3}
Let $0< \alpha_{1}< \alpha_{2}, \frac{\alpha_{1}}{2}+ \alpha_{2}< \alpha_{3}< \alpha_{1}+ \alpha_{2}, \beta= (2\lceil \frac{2\alpha_{1}+ 2\alpha_{2}+ 1}{\alpha_{1}} \rceil)^6,\gamma= \frac{12}{\alpha_{1}} \lceil \frac{2\alpha_{1}+ 2\alpha_{2}+ 1}{\alpha_{1}} \rceil$. For every $0< \varepsilon <\frac{1}{\beta+ (2\alpha_{1}+ 2\alpha_{2}) \gamma}$, there is $n_{0} = n_{0}(\varepsilon)$ such that the following holds. For $n> n_{0}$, let $G$ be a bipartite graph with partition $\{V_{1}, V_{2}\}$ and $|V_{1}|=|V_{2}|=N$, where $N\geq (2\alpha_{1}+ 2\alpha_{2}+(\beta+ (2\alpha_{1}+ 2\alpha_{2})\gamma )\varepsilon )n$. Suppose that every vertex in $V_{1}$ has at most $\varepsilon n$ non-neighbours in $V_{2}$ and vice versa. Then for every red-blue-green-edge-coloring of $G$, there is a red $\lfloor \alpha_{1}n \rfloor$-connected matching or a blue $\lfloor \alpha_{2}n \rfloor$-connected matching or a green $\lfloor \alpha_{3}n \rfloor$-connected matching.
\end{theo}
\subsection{Proof of Theorem \ref{theo2}}
In this section, we will use Regularity Lemma and Theorems \ref{theo4.1}, \ref{theo4.2}, \ref{theo4.3} to complete the proof of Theorem \ref{theo2}. We only give the proof for the case $\alpha_{1}< \alpha_{2}, \alpha_{3}\geq \alpha_{1}+\alpha_{2}$, other two cases can be verified in the same way.
Let us recall some basic definitions related to the Regularity Lemma.
\begin{defi}\label{def5.1}
Let $A, B$ be disjoint subsets of vertices in a graph $G$. Denote the number of edges in $G$ with one endpoint in $A$ and another in $B$ by $e_{G}(A, B)$ and denote the edge density by $d_{G}(A,B) = \frac{e_{G}(A, B)}{|A||B|}$. Given $\varepsilon >0$, we say that the pair $(A, B)$ is $\varepsilon$-regular (with respect to the graph $G$) if for every $A'\subseteq A$ and $B'\subseteq B$ satisfying $|A'|\geq \varepsilon |A|$ and $|B'|\geq \varepsilon |B|$, we have $$|d_{G}(A',B') - d_{G}(A,B)|\leq \varepsilon.$$
\end{defi}
\begin{defi}\label{def5.2}
A partition $\mathcal{P} = \{P_{0}, P_{1},...,P_{k}\}$ of the vertex set $V$ is said to be $(\varepsilon, k)$-equitable if $|P_{0}|\leq \varepsilon |V|$ and $|P_{1}| =...= |P_{k}|$. And an $(\varepsilon, k)$-equitable partition $\mathcal{P}$ is $(\varepsilon, k)$-regular if all but at most $\varepsilon \tbinom {k}{2}$ pairs $(P_{i}, P_{j})$ with $1\leq i< j\leq k$ are $\varepsilon$-regular.
\end{defi}
Szemer\'edi's regularity lemma states that for any $\varepsilon$ and $k_{0}$ there are $K_{0} = K_{0}(\varepsilon,k_{0})$ such that any graph admits an $(\varepsilon, k)$-regular partition with $k_{0}\leq k\leq K_{0}$. We will apply the following multicolored version of Regularity Lemma for bipartite graphs.
\begin{lemma}[\cite{BLS}] \label{lemma6.1}
For any $\varepsilon >0$ and $k_{0}$ there exist $K_{0} = K_{0}(\varepsilon ,k_{0})$, such that the following holds. Let $G$ be a 3-colored bipartite graph, with partition $\{V_{1}, V_{2}\}$, where $|V_{1}| = |V_{2}| = n$. Then there exists an $(\varepsilon, 2k)$-equitable partition $\mathcal{P} = \{V_{0}, U_{1}, U_{2},...,U_{k}, W_{1}, W_{2},..., W_{k}\}$ of $V(G)$ such that the following properties hold:
(a) every $U_{i}$, for $i\geq 1$, is contained in $V_{1}$ and every $W_{j}$, for $j\geq 1$, is contained in $V_{2}$;
(b) $|V_{0}\cap V_{1}|=|V_{0}\cap V_{2}|$;
(c) $k_{0}\leq k\leq K_{0}$;
(d) for every $i\in [k]$, for all but at most $\varepsilon k$ values of $j\in [k]$, $(U_{i},W_{j})$ is $\varepsilon$-regular with respect to each of colours of $G$.
\end{lemma}
\begin{defi}\label{def5.3}
Given an edge-colored graph $G$ and a partition $\mathcal{P} = \{V_{0}, U_{1}, U_{2},...,U_{k},\\
W_{1}, W_{2},..., W_{k}\}$, the $(\varepsilon,d)$-reduced graph $\Gamma$ is the graph whose vertices are $U_{1}, U_{2},...,U_{k}, \\
W_{1}, W_{2},..., W_{k}$ and $U_{i}W_{j}$ is an edge if and only if $(U_{i}, W_{j})$ is $\varepsilon$-regular with respect to each colour of $G$ and its density in $G$ is at least $d$. We colour each edge $U_{i}W_{j}$ with majority color in $G[U_{i},W_{j}]$.
\end{defi}
The following lemma is used to lift a connected matching found in the reduced graph to a cycle in the original graph. It was proved by Figaj and {\L}uczak in \cite{LF}.
\begin{lemma}\label{lemma6.2}
Given $\varepsilon, d, k$ such that $0 <20\varepsilon <d <1$ there is an $n_{0}$ such that the following holds. Let $\mathcal{P}$ be an $(\varepsilon, k)$-equitable partition of a graph $G$ on $n\geq n_{0}$ vertices, and let $\Gamma$ be the corresponding $(\varepsilon, d)$-reduced graph. Suppose that $\Gamma$ contains a monochromatic $m$-connected matching. Then $G$ contains an even cycle of the same colour and of length $l$ for every even $l\leq 2(1-9\varepsilon d^{-1})m|U_{1}|$.
\end{lemma}
Now, we prove the following result.
\begin{theo}
Let $\alpha_{1}< \alpha_{2}, \alpha_{3}\geq \alpha_{1}+\alpha_{2}$, then $br(C_{2\lfloor \alpha_{1}n\rfloor}, C_{2\lfloor \alpha_{2}n\rfloor}, C_{2\lfloor \alpha_{3}n\rfloor}) \leq (\alpha_{1}+ \alpha_{2}+ \alpha_{3}+ o(1))n$ for $n$ sufficiently large.
\end{theo}
\begin{proof}
Let $\mu >0$ and $N = (\alpha_{1}+\alpha_{2}+\alpha_{3}+\mu)n$. Suppose $n$ is sufficiently large and $\varepsilon'$ is sufficiently small. Apply the Regularity Lemma (Lemma \ref{lemma6.1}) to graph $G$ with parameter $\varepsilon'$, and let $\mathcal{P}$ be a partition satisfying the conditions of the lemma. Consider the corresponding $(\varepsilon' ,1)$-reduced graph $\Gamma$. Note that by $(a)$ and $(b)$ in Lemma \ref{lemma6.1}, $\Gamma$ is a balanced bipartite graph. Denote the number of vertices in each side by $k$ so that $\mathcal{P} = \{V_{0}, U_{1}, U_{2},...,U_{k}, W_{1}, W_{2},..., W_{k}\}$. Furthermore, every pair $(U_{i},W_{j})$ has density 1 in the original graph. Hence, $U_{i}W_{j}$ is an edge in $\Gamma$ if $(U_{i},W_{j})$ is $\varepsilon'$-regular with respect to each color. From $(d)$, $\Gamma$ has minimum degree at least $(1-2\varepsilon')k$.
Let $n'=\frac{k}{\alpha_{1}+ \alpha_{2}+ \alpha_{3}+\zeta \varepsilon'}\geq \frac{k}{\alpha_{1}+ \alpha_{2}+ \alpha_{3}+1}$ ($\zeta =2(\alpha_{1} +\alpha_{2} +\alpha_{3}+1)(\beta+(\alpha_{1}+\alpha_{2}+ \alpha_{3}) \gamma)$, $\varepsilon'$ is sufficiently small and $\beta, \gamma$ are the same as in section 5.1). Since every vertex in one side of $\Gamma$ has at most $2\varepsilon' k\leq 2(\alpha_{1}+ \alpha_{2}+ \alpha_{3}+ 1) \varepsilon' n'$ non-neighbors. Apply Theorem \ref{theo4.1} by taking $\varepsilon = 2(\alpha_{1}+ \alpha_{2}+ \alpha_{3}+ 1)\varepsilon' ,n= n' ,N =k$. Then $\Gamma$ contains a red $\alpha_{1}n'$-connected matching or a blue $\alpha_{2}n'$-connected matching or a green $\alpha_{3}n'$-connected matching. W.L.O.G, assume that $\Gamma$ contains a red $\alpha_{1}n'$-connected matching. Applying Lemma \ref{lemma6.2}, $G$ contains a red even cycle of length $l$ for any $l\leq 2(1-9\varepsilon')\alpha_{1} n' |U_{1}|$. Note that:
\begin{equation*}
\begin{aligned}
2(1-9\varepsilon')\alpha_{1} n' |U_{1}|
& = 2(1-9\varepsilon') \alpha_{1} \cdot \frac{k}{\alpha_{1}+ \alpha_{2}+ \alpha_{3}+\zeta \varepsilon'} \cdot |U_{1}| \\
& \geq 2(1-9\varepsilon')(1-\varepsilon') \cdot \frac{\alpha_{1} N}{\alpha_{1}+ \alpha_{2}+ \alpha_{3}+\zeta \varepsilon'} \\
& = 2(1-9\varepsilon')(1-\varepsilon') \cdot \frac{\alpha_{1}(\alpha_{1}+\alpha_{2}+ \alpha_{3}+\mu)n}{\alpha_{1}+ \alpha_{2}+ \alpha_{3}+\zeta \varepsilon'}\\
& \geq 2\alpha_{1}n,
\end{aligned}
\end{equation*}
where the first inequality follows as $k|U_{1}| =N-\frac{|V_{0}|}{2}\geq (1-\varepsilon')N$, and for the last since $\varepsilon'$ is sufficiently small compared to $\mu$. Hence, there is a red cycle of length at least $2\lfloor \alpha_{1}n\rfloor$.\hspace*{\fill}$\Box$
\end{proof}
Applying the same procedure by applying Theorem \ref{theo4.2} and Theorem \ref{theo4.3}, we can obtain:
\begin{theo}\label{theo6.1}
\[br(C_{2\lfloor \alpha_{1}n\rfloor}, C_{2\lfloor \alpha_{2}n\rfloor}, C_{2\lfloor \alpha_{3}n\rfloor})\leq \left \{
\begin{array}{lll}
(\alpha_{1}+ 2\alpha_{3}+ o(1))n &\mbox {if $\alpha_{1}< \alpha_{2}< \alpha_{3}\leq \frac{\alpha_{1}}{2}+\alpha_{2},$}\\
(2\alpha_{1}+ 2\alpha_{2}+ o(1))n &\mbox {if $\alpha_{1}< \alpha_{2}, \frac{\alpha_{1}}{2}+\alpha_{2}< \alpha_{3}< \alpha_{1}+\alpha_{2},$}\\
(\alpha_{1}+ \alpha_{2}+ \alpha_{3}+ o(1))n &\mbox {if $\alpha_{1}< \alpha_{2}, \alpha_{3}\geq \alpha_{1}+\alpha_{2}.$}
\end{array}
\right.
\]
\end{theo}
It is easy to see that $br(C_{2\lfloor \alpha_{1}n\rfloor}, C_{2\lfloor \alpha_{2}n\rfloor}, C_{2\lfloor \alpha_{3}n\rfloor})\geq r(\lfloor \alpha_{1}n\rfloor, \lfloor \alpha_{2}n\rfloor, \lfloor \alpha_{3}n\rfloor)$. By Theorem \ref{theo3}, we have:
\begin{theo}\label{theo6.2}
\[br(C_{2\lfloor \alpha_{1}n\rfloor}, C_{2\lfloor \alpha_{2}n\rfloor}, C_{2\lfloor \alpha_{3}n\rfloor})\geq \left \{
\begin{array}{lll}
(\alpha_{1}+ 2\alpha_{3}+ o(1))n &\mbox {if $\alpha_{1}< \alpha_{2}< \alpha_{3}\leq \frac{\alpha_{1}}{2}+\alpha_{2},$}\\
(2\alpha_{1}+ 2\alpha_{2}+ o(1))n &\mbox {if $\alpha_{1}< \alpha_{2}, \frac{\alpha_{1}}{2}+\alpha_{2}< \alpha_{3}< \alpha_{1}+\alpha_{2},$}\\
(\alpha_{1}+ \alpha_{2}+ \alpha_{3}+ o(1))n &\mbox {if $\alpha_{1}< \alpha_{2}, \alpha_{3}\geq \alpha_{1}+\alpha_{2}.$}
\end{array}
\right.
\]
\end{theo}
Combining Theorem \ref{theo6.1} and Theorem \ref{theo6.2}, we complete the proof of Theorem \ref{theo2}.
\end{document}
|
\begin{document}
\begin{abstract}
The Kato square root problem for
divergence form elliptic operators with potential $V : \mathbb{R}^{n}
\rightarrow \mathbb{C}$ is the
equivalence statement $\norm{\br{L + V}^{\frac{1}{2}} u}_{2} \simeq \norm{\nabla u}_{2} +
\norm{V^{\frac{1}{2}} u}_{2}$, where
$L + V := - \mathrm{div} (A \nabla) + V$ and the
perturbation $A$ is an $L^{\infty}$
complex matrix-valued function satisfying an accretivity
condition. This relation is proved for any potential with range
contained in some positive sector and satisfying
$\norm{\abs{V}^{\frac{\alpha}{2}} u}_{2} + \norm{(-\Delta)^{\frac{\alpha}{2}}u}_{2} \lesssim
\norm{\br{\abs{V} - \Delta}^{\frac{\alpha}{2}}u}_{2}$ for all $u \in
D(\abs{V} - \Delta)$ and some $\alpha \in (1,2]$. The class of
potentials that will satisfy such a condition is known to contain the reverse H\"{o}lder
class $RH_{2}$ and
$L^{\frac{n}{2}}\br{\mathbb{R}^{n}}$ in dimension $n > 4$. To prove the Kato estimate with potential, a non-homogeneous version of the framework introduced by A. Axelsson,
S. Keith and A. McIntosh for proving quadratic estimates is
developed. In addition to applying this non-homogeneous framework to the
scalar Kato problem with zero-order potential, it will also
be applied to the Kato
problem for systems of equations with
zero-order potential.
\end{abstract}
\maketitle
\section{Introduction}
\label{sec:intro}
\xdef\@thefnmark{}\@footnotetext{\textit{Key words and phrases.}
Kato problem; non-homogeneous; Schr\"{o}dinger operator; divergence form operator;
potential; quadratic estimates. \\
Mathematics Subject Classification. Primary 42B37 $\cdot$ Secondary
35J10 \\
This research was partially supported by the Australian
Research Council through the Discovery Project DP160100941.}
For Hilbert spaces $\mathcal{H}$ and $\mathcal{K}$, let $\mathcal{L}\br{\mathcal{H}, \mathcal{K}}$ denote the
space of bounded linear operators from $\mathcal{H}$ to $\mathcal{K}$
and set $\mathcal{L} \br{\mathcal{H}} := \mathcal{L} \br{\mathcal{H}, \mathcal{H}}$. Fix
$n \in \mathbb{N}^{*} = \mathbb{N} \setminus \lb 0 \rb$ and let $A \in L^{\infty} \br{\mathbb{R}^{n}; \mathcal{L}
\br{\mathbb{C}^{n}}}$. Consider the sesquilinear form $\mathfrak{l}^{A} :
H^{1}\br{\mathbb{R}^{n}} \times H^{1} \br{\mathbb{R}^{n}} \rightarrow \mathbb{C}$ defined by
$$
\mathfrak{l}^{A} \brs{u,v} := \int_{\mathbb{R}^{n}} \langle A(x) \nabla u (x) , \nabla
v(x) \rangle_{\mathbb{C}^{n}} \, dx
$$
for $u, \, v \in H^{1}\br{\mathbb{R}^{n}}$. Suppose that $\mathfrak{l}^{A}$
satisfies the G{\aa}rding inequality
\begin{equation}
\label{eqtn:Garding0}
\mathrm{Re} \br{\mathfrak{l}^{A}\brs{u,u}} \geq \kappa^{A} \norm{\nabla u}^{2}_{2}
\end{equation}
for all $u \in H^{1}\br{\mathbb{R}^{n}}$, for some $\kappa^{A} > 0$. A well-known
representation theorem from classical form theory (c.f. {\cite[Thm.~VI.2.1]{kato1980perturbation}})
asserts the existence of an associated operator $L : D
\br{L} \subset L^{2} \br{\mathbb{R}^{n}} \rightarrow L^{2} \br{\mathbb{R}^{n}}$
for which
$$
\mathfrak{l}^{A} \brs{u,v} = \langle L u, v \rangle_{2}
$$
for all $v \in H^{1}\br{\mathbb{R}^{n}}$ and $u$ in the domain of
$L$,
$$
D \br{L} = \lb u \in H^{1} \br{\mathbb{R}^{n}} : \exists \, w \in
L^{2} \br{\mathbb{R}^{n}} \ s.t. \ \mathfrak{l}^{A} \brs{u,v} = \langle w , v
\rangle_{2} \ \forall \ v \in H^{1} \br{\mathbb{R}^{n}} \rb.
$$
The operator $L$ is a densely defined maximal accretive operator that
is denoted
$$
L = - \mathrm{div}( A \nabla).
$$
Due to the accretivity of the operator, it is possible to define a square root operator
$\sqrt{L}$, with core $D \br{L}$, that satisfies
$\sqrt{L} \cdot \sqrt{L} =
L$. The Kato square root problem asks: what is the domain of definition
of the square root operator? This problem, first posed by Tosio Kato over 50
years ago, was conjectured to have the following solution.
\begin{thm}[Kato Square Root]
\label{thm:KatoOriginal}
The domain of $\sqrt{L}$ is
$D(\sqrt{L}) = H^{1}\br{\mathbb{R}^{n}}$
and for any $u \in H^{1}\br{\mathbb{R}^{n}}$
\begin{equation}
\label{eqtn:OriginalKato}
\norm{\sqrt{L} u}_{2} \simeq \norm{\nabla u}_{2}.
\end{equation}
\end{thm}
This long-standing problem withstood
solution until 2002 where it was proved using local T(b) methods by Steve Hofmann, Michael
Lacey and Alan McIntosh in \cite{hofmann2002solution} under the additional assumption of Gaussian
heat kernel bounds and in full generality by
Pascal Auscher, Steve Hofmann, Michael Lacey, Alan McIntosh and
Phillipe Tchamitchian in \cite{auscher2002solution}.
We will be interested in an alternate method of proof that was
built from similar principles and appeared a few years later.
Let $\Pi := \Gamma + \Gamma^{*}$ be a Dirac-type operator on a Hilbert
space $\mathcal{H}$ and $\Pi_{B}
:= \Gamma + B_{1} \Gamma^{*} B_{2}$ be a perturbation of $\Pi$ by
bounded operators $B_{1}$ and $B_{2}$. Typically, $\Pi$ is considered
to be a first-order system acting on $\mathcal{H} := L^{2} \br{\mathbb{R}^{n};
\mathbb{C}^{N}}$ for some $n, \, N \in \mathbb{N}^{*}$ and the perturbations $B_{1}$ and
$B_{2}$ are multiplication by matrix-valued functions $B_{1}, \, B_{2}
\in L^{\infty} \br{\mathbb{R}^{n}; \mathcal{L} \br{\mathbb{C}^{N}}}$. In their seminal paper
\cite{axelsson2006quadratic}, A. Axelsson, S. Keith and A. McIntosh
developed a general framework for proving that the perturbed operator
$\Pi_{B}$ possessed a bounded holomorphic functional calculus. This
ultimately amounted to obtaining square function estimates of
the form
\begin{equation}
\label{eqtn:Intro1}
\int^{\infty}_{0} \norm{Q^{B}_{t} u}^{2} \frac{dt}{t} \simeq \norm{u}^{2},
\end{equation}
where $Q^{B}_{t} := t \Pi_{B} \br{I + t^{2} \Pi_{B}^{2}}^{-1}$ and $u$ is
contained in the range $\overline{R \br{\Pi_{B}}}$. They
proved that this estimate would follow entirely from a set of simple
conditions imposed upon the operators $\Gamma$, $B_{1}$ and $B_{2}$,
labelled (H1) - (H8). Then, by checking this list of simple
conditions, the Axelsson-Keith-McIntosh framework, or AKM framework
by way of abbreviation, could be used to conclude that the
particular selection of operators
\begin{equation}
\label{eqtn:OperatorsClassic}
\Gamma := \br{\begin{array}{c c}
0 & 0 \\ \nabla & 0
\end{array}}, \quad B_{1} = I, \quad B_{2} = \br{\begin{array}{c c}
I & 0 \\ 0 & A
\end{array}},
\end{equation}
defined on $L^{2} \br{\mathbb{R}^{n}} \oplus L^{2} \br{\mathbb{R}^{n};\mathbb{C}^{n}}$, would
satisfy \eqref{eqtn:Intro1} and therefore $\Pi_{B}$ would possess a bounded
holomorphic functional calculus. The Kato square root estimate then
followed almost trivially from this.
\vspace*{0.1in}
Many classical problems from harmonic analysis will have a direct
counterpart in the Schr\"{o}dinger operator setting. Adhering with
this theme, one can consider the Kato square root problem with
potential. Let $V : \mathbb{R}^{n} \rightarrow \mathbb{C}$ be a measurable function
that is contained in $L^{1}_{loc}\br{\mathbb{R}^{n}}$.
$V$ can be
viewed as a densely defined closed multiplication operator on
$L^{2}\br{\mathbb{R}^{n}}$ with domain
$$
D \br{V} = \lb u \in L^{2}\br{\mathbb{R}^{n}} : V \cdot u \in L^{2} \br{\mathbb{R}^{n}} \rb.
$$
The density of $D(V)$ follows from the condition $V \in L^{1}_{loc}(\mathbb{R}^{n})$.
Define the subspace
\begin{equation}
\label{eqtn:H1V}
H^{1,V} \br{\mathbb{R}^{n}} := H^{1}\br{\mathbb{R}^{n}} \cap D
\br{V^{\frac{1}{2}}} := \lb u \in
H^{1} \br{\mathbb{R}^{n}} : V^{\frac{1}{2}} \cdot u \in L^{2} \br{\mathbb{R}^{n}} \rb.
\end{equation}
Here
the complex square root $V^{\frac{1}{2}}$ is defined via the principal
branch cut along the negative real axis. It is easy to see that
$H^{1,V}(\mathbb{R}^{n})$ is dense in $L^{2}\br{\mathbb{R}^{n}}$ since
$C^{\infty}_{0}(\mathbb{R}^{n}) \subset H^{1,V}(\mathbb{R}^{n})$.
Let $A \in L^{\infty}\br{\mathbb{R}^{n};\mathcal{L} \br{\mathbb{C}^{n}}}$ be as before with
\eqref{eqtn:Garding0} satisfied for some $\kappa^{A} > 0$. Consider the sesquilinear form $\mathfrak{l}_{V}^{A} : H^{1,V}
\br{\mathbb{R}^{n}} \times H^{1,V} \br{\mathbb{R}^{n}} \rightarrow \mathbb{C}$
defined through
$$
\mathfrak{l}_{V}^{A} \brs{u,v} := \mathfrak{l}^{A} \brs{u,v} + \int_{\mathbb{R}^{n}} V(x)
u(x) \overline{v}(x) \, dx
$$
for $u, \, v \in H^{1,V} \br{\mathbb{R}^{n}}$. Suppose that there exists
some $\kappa_{V}^{A} > 0$ for which
$\mathfrak{l}_{V}^{A}$ satisfies the associated G{\aa}rding inequality
\begin{equation}
\label{eqtn:Garding}
\mathrm{Re} \br{\mathfrak{l}_{V}^{A} \brs{u,u}} \geq \kappa_{V}^{A}
\br{\norm{ V^{\frac{1}{2}} u}_{2}^{2} + \norm{\nabla u}_{2}^{2}}
\end{equation}
for all $u \in H^{1,V} \br{\mathbb{R}^{n}}$.
\begin{rmk}
If the range of $V$ is contained in some sector
$$
S_{\mu^{+}} := \lb z \in \mathbb{C} \cup \lb \infty \rb :
\abs{\mathrm{arg}\br{z}} \leq \mu \ or \ z = 0, \, \infty \rb
$$
for some $\mu \in [0,\frac{\pi}{2})$, then \eqref{eqtn:Garding} will follow
automatically from \eqref{eqtn:Garding0}.
\end{rmk}
Once again, the accretivity of
$\mathfrak{l}_{V}^{A}$ implies the existence of a maximal accretive operator
associated with this form denoted by
$$
L + V = - \mathrm{div} (A \nabla) + V
$$
and defined on
$$
D(L + V) = \lb u \in H^{1,V}(\mathbb{R}^{n}) : \exists \, w \in L^{2}(\mathbb{R}^{n}) \
s.t. \ \mathfrak{l}^{A}_{V}[u,v] = \langle w, v \rangle_{2} \ \forall \ v
\in H^{1,V}(\mathbb{R}^{n}) \rb.
$$
For
$\alpha \in [1,2]$, define $\mathcal{W}_{\alpha}$ to be the
class of all $V \in L^{1}_{loc}(\mathbb{R}^{n})$ for which
$$
\brs{V}_{\alpha} := \sup_{u \in D(\abs{V} - \Delta)} \frac{\norm{\abs{V}^{\frac{\alpha}{2}}
u}_{2} + \norm{\br{- \Delta}^{\frac{\alpha}{2}}u}_{2}}{\norm{\br{\abs{V} - \Delta}^{\frac{\alpha}{2}} u}_{2}} < \infty.
$$
As will be proved in Section \ref{subsec:W}, the collection of
potential classes $\lb \mathcal{W}_{\alpha} \rb_{\alpha \in [1,2]}$ is
decreasing. The largest class $\mathcal{W}_{1}$ consists of all
locally integrable potentials with no additional restrictions and the
smallest class $\mathcal{W}_{2}$ contains $RH_{2}$ in any dimension
and $L^{\frac{n}{2}}(\mathbb{R}^{n})$ in dimension $n > 4$.
In this paper, our aim is to prove the
potential dependent Kato estimate as presented in the following
theorem.
\begin{thm}[Kato with Potential]
\label{thm:KatoPotential}
Let $V \in \mathcal{W}_{\alpha}$ for some $\alpha \in (1,2]$ and $A \in L^{\infty} \br{\mathbb{R}^{n}; \mathcal{L}
\br{\mathbb{C}^{n}}}$. Suppose that the G{\aa}rding inequalities
\eqref{eqtn:Garding0} and \eqref{eqtn:Garding} are both satisfied
with constants $\kappa^{A} > 0$ and $\kappa_{V}^{A} > 0$
respectively. There exists a constant $C_{V} > 0$ such that
\begin{equation}
\label{eqtn:KatoPotential} \tag{KP}
C_{V}^{-1} \br{\norm{ V^{\frac{1}{2}} u}_{2} + \norm{\nabla u}_{2}} \leq
\norm{\sqrt{L + V} u}_{2} \leq C_{V}
\br{\norm{V^{\frac{1}{2}} u}_{2} + \norm{\nabla u}_{2}}
\end{equation}
for all $u \in H^{1,V}\br{\mathbb{R}^{n}}$. Moreover, the constant
$C_{V}$ depends on $V$ and $\alpha$ through
$$
C_{V} = \tilde{C}_{V} \br{\alpha - 1}^{-1} (1 + \brs{V}_{\alpha}^{2}),
$$
where $\tilde{C}_{V}$ only depends on $V$ through $\kappa_{V}^{A}$ and
is independent of $\alpha$.
\end{thm}
This theorem is actually a statement concerning the domain of the
square root operator $\sqrt{L + V}$. Indeed,
\eqref{eqtn:KatoPotential} implies the equality
$$
D(\sqrt{L + V}) = H^{1,V}(\mathbb{R}^{n}).
$$
\begin{rmk}
\label{rmk:AInf}
The above theorem tells us, in particular, that the Kato estimate
with potential, \eqref{eqtn:KatoPotential}, is valid for any
potential $V$ with range contained in $S_{\mu+}$ for some $\mu \in
[0,\frac{\pi}{2})$ and such that $\abs{V}$ is either contained in $RH_{2}$ for any
dimension or in $L^{\frac{n}{2}}(\mathbb{R}^{n})$ in dimension $n > 4$.
In
{\cite[pg.~3]{auscher2007maximal}}, it is suggested but not
proved
that $\brs{V}_{\alpha} < \infty$ will occur
provided that $\abs{V} \in RH_{q}$ with $q > \alpha$. If this is
indeed the case then Theorem \ref{thm:KatoPotential} will imply that
the Kato estimate is true for any potential
$V$ with range contained in $S_{\mu+}$ for some $\mu \in
[0,\frac{\pi}{2})$ and
with $\abs{V}$ contained in $RH_{1} = A_{\infty}$.
\end{rmk}
In direct analogy to the potential free case, the Kato problem with
potential will be solved by constructing appropriate potential
dependent Dirac-type operators and demonstrating that they retain
a bounded holomorphic functional calculus under perturbation. In
particular, this strategy will be applied to the Dirac-type operator
\begin{equation}
\label{eqtn:OperatorsPotential}
\Pi_{\abs{V}^{\frac{1}{2}}} := \Gamma_{\abs{V}^{\frac{1}{2}}} + \Gamma_{\abs{V}^{\frac{1}{2}}}^{*} := \br{\begin{array}{c c c}
0 & 0 & 0 \\
\abs{V}^{\frac{1}{2}} & 0 &
0
\\
\nabla & 0 & 0
\end{array}} + \br{\begin{array}{c c c}
0 & \abs{V}^{\frac{1}{2}} & - \mathrm{div} \\
0 & 0 & 0 \\
0 & 0 & 0
\end{array}}
\end{equation}
defined on $L^{2}(\mathbb{R}^{n}) \oplus L^{2}\br{\mathbb{R}^{n}} \oplus L^{2}
\br{\mathbb{R}^{n} ; \mathbb{C}^{n}}$, under the perturbation
\begin{equation}
\label{eqtn:OperatorsPotential2}
B_{1} = I, \quad B_{2} := \br{\begin{array}{c c c}
I & 0 & 0 \\
0 & e^{i \cdot \mathrm{arg} V} & 0 \\
0 & 0 & A
\end{array}}.
\end{equation}
It should be observed that the operator
$\Gamma_{\abs{V}^{\frac{1}{2}}}$ is not first-order homogeneous due to
the presence of the zero-order potential term. It will therefore not
necessarily satisfy the two conditions of the original AKM framework that
are intended to capture the first-order homogeneity property, the cancellation and coercivity conditions, (H7)
and (H8). As such, the original AKM framework cannot be directly applied. The key
difficulty in proving our result is then to alter
the original framework in order to allow for such operators. In
particular, a non-homogeneous version of the Axelsson-Keith-McIntosh
framework will be developed to handle operators of the form
\begin{equation}
\label{eqtn:NonHomog}
\Gamma_{J} := \br{\begin{array}{c c c}
0 & 0 & 0 \\
J & 0 & 0 \\
D & 0 & 0
\end{array}},
\end{equation}
where $D$ is a homogeneous first-order differential operator and $J$
is a possibly non-homogeneous differential operator of order less than
or equal to one. The technical challenge presented
by the inclusion of the non-homogeneous part $J$ will be overcome by separating our
square function norm into components and demonstrating that the
non-homogeneous term will allow for the first two components to be bounded
while the third component can be bound using an argument
similar to the classical argument of \cite{axelsson2006quadratic}.
Since the operator $\Gamma_{J}$ is of a more general form than
$\Gamma_{\abs{V}^{\frac{1}{2}}}$, the non-homogeneous AKM framework that we develop will
have applications not confined to zero-order scalar
potentials. Indeed, the non-homogeneous framework will also be used to
prove Kato estimates for systems of equations with
zero-order potential and for scalar equations with first-order
potentials.
The structure of this paper is as follows. Section
\ref{sec:Holomorphic} is quite classical in nature. It provides a brief survey of
the natural functional calculus for bisectorial operators. It also
describes how
to prove the boundedness of holomorphic functional calculus from
square function estimates for a bisectorial operator. These classical proofs are repeated
here to track the dependence of the estimates on relevant
constants. This will be required to determine the potential dependence
of the constants in
Theorem \ref{thm:KatoPotential}. Section \ref{sec:NonHomog}
describes the non-homogeneous AKM framework and
states the main results associated with it. Section \ref{sec:Square} contains most of the technical
machinery and is dedicated to a proof of our main result. Section
\ref{sec:Applications} will apply the non-homogeneous AKM framework to
the scalar Kato problem with potential, the Kato problem for systems
with zero-order potential and the scalar Kato problem with first-order
potential. It is here that a proof of Theorem \ref{thm:KatoPotential}
will be completed.
Finally, in Section \ref{sec:Literature}, we will provide a meta-discussion
on the proof techniques used and compare our work with what has been
previously accomplished on non-homogeneous Kato-type
estimates.
\section{Preliminaries}
\label{sec:Holomorphic}
Let's outline the construction of the natural functional calculus
associated with a bisectorial operator. The treatment of
functional calculi found here follows closely to
\cite{haase2006functional} with significant detail
omitted. Appropriate changes are made to account for the fact that we
consider bisectorial operators instead of sectorial operators. Other
thorough treatments of functional calculus for sectorial operators can
be found in \cite{albrecht1996operator} or \cite{hytonen2017analysis}.
For $\mu \in [0,\pi)$ define the open and closed sectors
$$
S^{o}_{\mu+} := \left\lbrace \begin{array}{c c}
\lb z \in \mathbb{C} \setminus \lb 0 \rb : \abs{\mathrm{arg} \br{
z}} < \mu \rb & \mu \in (0,\pi) \\
(0,\infty) & \mu = 0
\end{array} \right.
$$
and
$$
S_{\mu+} := \left\lbrace \begin{array}{c c}
\lb z \in \mathbb{C} \cup \lb \infty \rb: \abs{\mathrm{arg} \br{
z}} \leq \mu \ or \ z = 0, \ \infty \rb & \mu \in (0,\pi) \\
\left[ 0 , \infty \right] & \mu = 0.
\end{array} \right.
$$
Then, for $\mu \in \left[0,\frac{\pi}{2}\right)$, define the open and closed bisectors
$$
S^{o}_{\mu} := \br{S^{o}_{\mu+}} \cup \br{-S^{o}_{\mu+}}
$$
and
$$
S_{\mu} := \br{S_{\mu+}} \cup \br{- S_{\mu +}}
$$
respectively. Throughout this section we consider bisectorial operators defined on a
Hilbert space $\mathcal{H}$ with norm and inner product denoted by
$\norm{\cdot}$ and $\langle \cdot, \cdot \rangle$ respectively.
\begin{deff}[Bisectorial Operator]
\label{def:Bisectorial}
A linear operator $T : D \br{T} \subseteq \mathcal{H} \rightarrow \mathcal{H}$ is said
to be $\omega$-bisectorial for $\omega \in \left[0,\frac{\pi}{2}\right)$ if
the spectrum $\sigma \br{T}$ is contained in the bisector
$S_{\omega}$ and if for any $\mu \in \br{\omega, \frac{\pi}{2}}$, there exists $C_{\mu} > 0$ such that the resolvent
bound
\begin{equation}
\label{eqtn:ResolventEst}
\abs{\zeta} \norm{\br{\zeta I - T}^{-1}} \leq C_{\mu}
\end{equation}
holds for all $\zeta \in \mathbb{C} \setminus S_{\mu}$. $T$ is said to be
bisectorial if it is $\omega$-bisectorial for some $\omega \in \left[0, \frac{\pi}{2}\right)$.
\end{deff}
Sectorial operators are defined identically except with the sector
$S_{\mu+}$ performing the role of the bisector $S_{\mu}$. An important fact concerning bisectorial
operators is the following decomposition result.
\begin{prop}[{\cite[Thm.~3.8]{cowling1996banach}}]
\label{prop:Decomposition}
Let $T : D \br{T} \subset \mathcal{H} \rightarrow \mathcal{H}$ be a bisectorial
operator. Then $T$ is necessarily densely defined and
the Hilbert space $\mathcal{H}$ admits the following decomposition
$$
\mathcal{H} = N \br{T} \oplus \overline{R\br{T}}.
$$
\end{prop}
Let $T$ be an $\omega$-bisectorial operator for $\omega \in \left[0,
\frac{\pi}{2} \right)$ and $\mu \in \br{\omega, \frac{\pi}{2}}$. Let $\mathcal{M} \br{S^{o}_{\mu}}$ denote
the algebra of all meromorphic functions on the open bisector
$S^{o}_{\mu}$ and define the following subalgebras,
$$
H\br{S^{o}_{\mu}} := \lb f \in \mathcal{M} \br{S^{o}_{\mu}} : f \
holomorphic \ on \ S^{o}_{\mu} \rb,
$$
$$
H^{\infty} \br{S^{o}_{\mu}} := \lb f \in H \br{S^{o}_{\mu}} :
\norm{f}_{\infty} := \sup_{z \in S^{o}_{\mu}} \abs{f(z)} < \infty \rb
$$
and
$$
H^{\infty}_{0} \br{S^{o}_{\mu}} := \lb f \in H^{\infty} \br{S^{o}_{\mu}} :
\exists \ C, \, \alpha > 0 \ s.t. \ \abs{f(z)} \leq C \cdot
\frac{\abs{z}^{\alpha}}{1 + \abs{z}^{2 \alpha}} \ \forall \ z \in S^{o}_{\mu}\rb.
$$
For any $f \in H^{\infty}_{0} \br{S^{o}_{\mu}}$, one can associate an
operator $f(T)$ as follows. For $u \in \mathcal{H}$, define
$$
f\br{T}u := \frac{1}{2 \pi i} \oint_{\gamma} f \br{z} \br{z I -
T}^{-1} u \, d z,
$$
where the curve
$$
\gamma := \lb \pm r e^{\pm i \nu} : 0 \leq r < \infty \rb
$$
for some $\nu \in (\omega, \mu)$ is traversed anticlockwise. Using the
resolvent bounds of our operator in combination with the size
estimates for functions in $H^{\infty}_{0}(S^{o}_{\mu})$, it can
easily be shown that the operator $f(T)$ is well-defined. Moreover, it
can also be proved that this association constitutes an algebra homomorphism.
\begin{thm}[{\cite[Lem.~2.3.1]{haase2006functional}}]
\label{thm:PsiFunctional}
The map
$
\Phi_{0}^{T} : H^{\infty}_{0} \br{S^{o}_{\mu}} \rightarrow \mathcal{L} \br{\mathcal{H}}
$
defined through
$$
\Phi_{0}^{T}(f) := f\br{T}
$$
is a well-defined algebra homomorphism. Moreover, it is independent
of the value of $\nu$.
\end{thm}
Since the functions in $H^{\infty}_{0} \br{S^{o}_{\mu}}$ approach
zero at the origin we should expect that the null space of the newly
formed operator will be larger than the null space of the original
operator. This is indeed the case as stated in the below proposition.
\begin{prop}[{\cite[Thm.~2.3.3]{haase2006functional}}]
\label{prop:NullSpaceInclusion}
For a bisectorial operator $T : D \br{T} \subseteq \mathcal{H}
\rightarrow \mathcal{H}$, the null-space inclusion
$$
N \br{T} \subseteq N \br{f \br{T}}
$$
holds for all $f \in H^{\infty}_{0} \br{S^{o}_{\mu}}$.
\end{prop}
Define the subalgebra of functions
$$
\mathcal{E} \br{S^{o}_{\mu}} := H^{\infty}_{0} \br{S^{o}_{\mu}} \oplus \langle
\br{z + i}^{-1} \rangle \oplus
\langle 1 \rangle.
$$
$\Phi_{0}^{T}$ has an extension
$$
\Phi_{p}^{T} : \mathcal{E}\br{S^{o}_{\mu}} \rightarrow \mathcal{L} \br{H}
$$
defined through
$$
\Phi_{p}^{T}(g) := g(T) := f(T) + c \cdot
\br{T + i}^{-1} + d \cdot I
$$
for $g = f + c \cdot \br{z + i}^{-1} + d \in \mathcal{E}
\br{S^{o}_{\mu}}$, where $f \in H^{\infty}_{0} \br{S^{o}_{\mu}}$ and
$c, \, d \in \mathbb{C}$.
\begin{thm}[{\cite[Thm.~2.3.3]{haase2006functional}}]
\label{thm:PrimaryFunctional}
The map $\Phi_{p}^{T}$ is an algebra homomorphism called the primary
functional calculus associated with $T$.
\end{thm}
This map can be extended once more through the process of
regularization. A function $f \in
\mathcal{M}\br{S^{o}_{\mu}}$ is said to be regularizable with respect
to the primary functional calculus $\Phi_{p}^{T} : \mathcal{E}
\br{S^{o}_{\mu}} \rightarrow \mathcal{L} \br{\mathcal{H}}$ if there
exists $e \in \mathcal{E} \br{S^{o}_{\mu}}$ such that $e \br{T}$ is
injective and $e \cdot f \in \mathcal{E} \br{S^{o}_{\mu}}$. The
notation $\mathcal{E}\br{S^{o}_{\mu}}_{r}$ will be used to denote the
algebra of regularizable functions. Let $\mathcal{C} \br{\mathcal{H}}$
denote the set of closed operators from $\mathcal{H}$ to itself. Then define the extension
$$
\Phi^{T} : \mathcal{E} \br{S^{o}_{\mu}}_{r} \rightarrow \mathcal{C} \br{H}
$$
through
$$
\Phi^{T} \br{f} := f(T) := \Phi_{p}^{T}(e)^{-1} \cdot \Phi_{p}^{T}(e \cdot f)
$$
for $f \in \mathcal{E} \br{S^{o}_{\mu}}_{r}$ and $e \in
\mathcal{E} \br{S^{o}_{\mu}}$ a
regularizing function for $f$. This definition is independent of the
chosen regularizer $e$ for $f$ and therefore $\Phi^{T}$ is well-defined. We have the following important theorem
that establishes the desired properties of a functional calculus for
this extension. Thus the map $\Phi^{T}$ is known as the natural
functional calculus for the operator $T$.
\begin{thm}[{\cite[Thm.~1.3.2]{haase2006functional}}]
\label{thm:FundFunctCalc}
Let $T$ be an $\omega$-bisectorial operator on a Hilbert space
$\mathcal{H}$ for some $\omega \in \left[0,\frac{\pi}{2}\right)$. Let
$\mu \in \br{\omega, \frac{\pi}{2}}$. The following assertions hold.
\begin{enumerate}
\item $\mathbf{1}\br{T} = I$ and $\br{z}\br{T} = T$, where
$\mathbf{1} : S^{o}_{\mu} \rightarrow \mathbb{C}$ is the constant function defined by $\mathbf{1}(z) :=
1$ for $z \in S^{o}_{\mu}$.
\item Let $f, \, g \in \mathcal{E} \br{S^{o}_{\mu}}_{r}$. Then
$$
f(T) + g(T) \subset \br{f + g}(T), \qquad f(T) g(T) \subset
\br{f \cdot g}(T)
$$
and $D \br{f(T) g(T)} = D
\br{\br{f \cdot g}(T)} \cap D \br{g(T)}$. One will
have equality in these relations if $g(T) \in \mathcal{L}(H)$.
\end{enumerate}
\end{thm}
The ensuing definition plays a vital role in the
solution method to the Kato square root problem using the AKM framework.
\begin{deff}
\label{def:BoundedHCalc}
Let $0 \leq \omega < \mu < \frac{\pi}{2}$. An $\omega$-bisectorial operator
$T : D(T) \subset \mathcal{H} \rightarrow \mathcal{H}$ is said to have
a bounded $H^{\infty}\br{S^{o}_{\mu}}$-functional calculus if there
exists $c > 0$ such that
\begin{equation}
\label{eqtn:BoundedHolomorphic}
\norm{f \br{T}} \leq c \cdot \norm{f}_{\infty}
\end{equation}
for all $f \in
H^{\infty}_{0} \br{S^{o}_{\mu}}$. $T$ is said to have a bounded holomorphic functional calculus if it
has a bounded $H^{\infty}\br{S^{o}_{\mu}}$-functional calculus for some $\mu$.
\end{deff}
\begin{rmk}
\label{rmk:Injective}
Note that a more intuitive definition for a bounded $H^{\infty}
\br{S^{o}_{\mu}}$-functional calculus would be to require that
\eqref{eqtn:BoundedHolomorphic} hold for all $f \in H^{\infty}
\br{S^{o}_{\mu}}$. Unfortunately at this stage it is impossible to
ascertain whether
$H^{\infty} \br{S^{o}_{\mu}} \subset \mathcal{E}
\br{S^{o}_{\mu}}_{r}$. When this inclusion does not hold, the
operator $f(T)$ will not be
well-defined for all $f \in H^{\infty} \br{S^{o}_{\mu}}$. If $T$ so happens
to be injective, then each $f \in H^{\infty} \br{S^{o}_{\mu}}$ is in fact
regularizable by $z \br{1 + z^{2}}^{-1}$ and the estimate
\eqref{eqtn:BoundedHolomorphic} makes sense for all $f \in H^{\infty}
\br{S^{o}_{\mu}}$. Fortunately, in this situation the two definitions
coincide. That is, \eqref{eqtn:BoundedHolomorphic} will be true
for all $f \in H^{\infty}_{0} \br{S^{o}_{\mu}}$ if and only if it is
true for all $f \in H^{\infty} \br{S^{o}_{\mu}}$ when $T$ is injective.
\end{rmk}
\begin{deff}[Square Function Norms]
\label{def:SquareFunctionNorms}
Let $\psi \in H^{\infty}_{0}\br{S^{o}_{\mu}}$. For $t > 0$, define $\psi_{t} :
S^{o}_{\mu} \rightarrow \mathbb{C}$ to
be the function $\psi_{t}(z) := \psi(tz)$ for $z \in S^{o}_{\mu}$. The square function
norm associated with $\psi$ and $T$ is defined through
$$
\norm{u}_{\psi,T} := \br{\int^{\infty}_{0} \norm{\psi_{t}(T)u}^{2} \frac{dt}{t}}^{\frac{1}{2}}
$$
for $u \in \mathcal{H}$. Let $q : S^{o}_{\mu} \rightarrow \mathbb{C}$ be the
function given by
$$
q(z) := \frac{z}{1 + z^{2}}, \quad z \in S^{o}_{\mu}.
$$
$\norm{\cdot}_{q,T}$ is called the canonical square function norm
for the operator $T$.
\end{deff}
For injective $T$, true to its name, the square function norms
$\norm{\cdot}_{\psi,T}$, for $\psi \in H^{\infty}_{0}\br{S^{o}_{\mu}}$ not identically
equal to zero on either $S^{o}_{\mu+}$ or $\br{-S^{o}_{\mu+}}$,
are indeed norms on $\mathcal{H}$. For non-injective $T$, however, they
are at most seminorms on $\mathcal{H}$ and will only be norms
following a restriction to the subspace $\overline{R(T)}$.
\begin{deff}[Square Function Estimates]
\label{def:SectSqFuncEst}
A bisectorial operator $T$ on a Hilbert space $\mathcal{H}$ is said
to satisfy square function estimates if there exists a constant $C_{SF} > 0$ such that
\begin{equation}
\label{eqtn:def:SectSqFuncEst}
C^{-\frac{1}{2}}_{SF} \cdot \norm{u} \leq \norm{u}_{q,T} \leq C_{SF}^{\frac{1}{2}} \cdot \norm{u}
\end{equation}
for all $u \in \overline{R \br{T}}$.
\end{deff}
The above definition is the same as saying that the canonical square
function norm $\norm{\cdot}_{q,T}$ is norm equivalent to $\norm{\cdot}_{\mathcal{H}}$ when restricted to
the Hilbert subspace $\overline{R \br{T}}$.
\begin{rmk}
\label{rmk:Arbitrary}
The use of the canonical norm $\norm{\cdot}_{q,T}$ in the above
definition of square function estimates is somewhat
arbitrary. Indeed, it can be swapped with $\norm{\cdot}_{\psi,T}$
for any $\psi \in H^{\infty}_{0} \br{S^{o}_{\mu}}$ not identically
equal to zero on either $S^{o}_{\mu+}$ or $\br{-S^{o}_{\mu+}}$. This
follows from the equivalence of these two norms as stated in
\cite[Thm.~7.3.1]{haase2006functional}. The implicit constant in the
norm equivalence will depend on the function $\psi$ under consideration.
\end{rmk}
\begin{prop}[Resolution of the Identity]
\label{prop:Resolution}
For $\psi \in H^{\infty}_{0}\br{S^{o}_{\mu}}$ and any $u \in \mathcal{H}$,
\begin{equation}
\label{eqtn:prop:Resolution}
c_{\psi} \br{I - \mathbb{P}_{N(T)}}u = \int^{\infty}_{0}
\psi_{t}(T)^{2} u \, \frac{dt}{t},
\end{equation}
where $\mathbb{P}_{N(T)}$ denotes the projection operator onto the
subspace $N \br{T}$ and
$$
c_{\psi} := \int^{\infty}_{0} \psi(t)^{2} \, \frac{dt}{t}.
$$
\end{prop}
\begin{proof}
Equality follows from Proposition \ref{prop:NullSpaceInclusion} for
$u \in N \br{T}$. For $u \in \overline{R \br{T}}$ this is given by
Theorem $5.2.6$ of \cite{haase2006functional} in the sectorial
case. The bisectorial case can be proved similarly.
\end{proof}
\begin{cor}
\label{cor:SA}
Suppose that $T$ is self-adjoint and $\psi \in H^{\infty}_{0}\br{S^{o}_{\mu}}$. Then for any $u \in \mathcal{H}$,
$$
\int^{\infty}_{0} \norm{\psi_{t}(T) u}^{2} \frac{dt}{t} \leq c_{\psi} \norm{u}^{2}
$$
where $c_{\psi}$ is as defined in the previous proposition.
Equality will hold if $u \in \overline{R \br{T}}$.
\end{cor}
\begin{proof}
As $T$ is self-adjoint, if follows from the definition of $\psi_{t}(T)$
that it must also be self-adjoint. On expanding the square function norm,
\begin{align*}\begin{split}
\int^{\infty}_{0} \norm{\psi_{t}(T)u}^{2}\frac{dt}{t} &=
\int^{\infty}_{0} \langle \psi_{t}(T) u, \psi_{t}(T) u \rangle \frac{dt}{t}
\\
&= \left\langle u , \int^{\infty}_{0} \psi_{t}(T)^{2} u \frac{dt}{t}
\right\rangle.
\end{split}\end{align*}
The previous proposition then gives
\begin{align*}\begin{split}
\int^{\infty}_{0} \norm{\psi_{t}(T)u}^{2} \frac{dt}{t} &= \left\langle
u, c_{\psi} \br{I - \mathbb{P}_{N(T)}} u \right\rangle \\
&\leq c_{\psi} \norm{u}^{2}.
\end{split}\end{align*}
Equality will clearly hold in the above if $u \in \overline{R(T)}$.
\end{proof}
\begin{thm}
\label{thm:BoundedHolomorphic}
Let $T$ be an $\omega$-bisectorial operator on $\mathcal{H}$ for
$\omega \in \left[0,\frac{\pi}{2}\right)$. Suppose that $T$ satisfies square function
estimates with constant $C_{SF} > 0$. Then $T$ must have a bounded
$H^{\infty}\br{S^{o}_{\mu}}$-holomorphic functional calculus for any
$\mu \in \br{\omega, \frac{\pi}{2}}$. In particular, there exists a constant $c > 0$,
independent of $T$, such that
$$
\norm{f(T)} \leq \br{c \cdot C_{SF} \cdot C_{\mu}} \cdot \norm{f}_{\infty}
$$
for all $f \in H^{\infty}_{0} \br{S^{o}_{\mu}}$, where $C_{\mu} > 0$
is the constant from the resolvent estimate \eqref{eqtn:ResolventEst}.
\end{thm}
\begin{proof}
Let $f \in H^{\infty}_{0} \br{S^{o}_{\mu}}$. For $u \in N \br{T}$,
the bound
\begin{equation}
\label{eqtn:BoundedHolomorphic0}
\norm{f \br{T} u} \leq \br{c \cdot C_{SF} \cdot C_{\mu} \cdot \norm{f}_{\infty}} \cdot \norm{u}
\end{equation}
follows trivially from Proposition
\ref{prop:NullSpaceInclusion} for any $c > 0$. Fix $u \in \overline{R
\br{T}}$. On applying the lower square function estimate to $f(T) u
\in \overline{R \br{T}}$,
\begin{align*}\begin{split}
\norm{f(T) u}^{2} &\leq C_{SF} \int_{0}^{\infty}
\norm{q_{s}(T) f\br{T} u}^{2} \frac{ds}{s} \\
&= 2 C_{SF} \int^{\infty}_{0} \norm{q_{s}(T) f\br{T}
\int_{0}^{\infty} \br{q_{t}(T)}^{2} u \frac{dt}{t} }^{2} \frac{ds}{s}
\\
&\leq 2 C_{SF} \int^{\infty}_{0} \br{\int^{\infty}_{0}
\norm{q_{s}(T) f\br{T} q_{t}(T)} \norm{q_{t}(T) u}
\frac{dt}{t} }^{2} \frac{ds}{s},
\end{split}\end{align*}
where in the second line we used the resolution of the identity
Proposition \ref{prop:Resolution}.
The Cauchy-Schwarz inequality then gives
\begin{align}\begin{split}
\label{eqtn:CauchySchwarz}
\norm{f \br{T} u}^{2} &\leq 2 C_{SF} \int^{\infty}_{0}
\br{\int^{\infty}_{0} \norm{q_{s}(T) f \br{T} q_{t}(T)} \frac{dt}{t}}
\br{ \int^{\infty}_{0} \norm{q_{s}(T) f \br{T}
q_{t}(T)} \norm{q_{t}(T) u}^{2} \frac{dt}{t}} \frac{ds}{s}.
\end{split}\end{align}
From the homomorphism property for the $H^{\infty}_{0}\br{S^{o}_{\mu}}$-functional calculus,
$$
q_{s}(T) f
\br{T} q_{t}(T) = \br{q_{s} \cdot f \cdot q_{t}}
\br{T}.
$$
Since our operator $T$ satisfies resolvent bounds with constant
$C_{\mu} > 0$,
\begin{align*}\begin{split}
\norm{q_{s}(T) f(T) q_{t}(T)} &= \norm{\br{q_{s}
\cdot f \cdot q_{t}}\br{T}} \\
&= \frac{1}{2 \pi} \norm{\int_{\gamma} \br{q_{s} \cdot f \cdot q_{t}}(z)
\br{T - z I}^{-1} dz} \\
&\leq \frac{C_{\mu}}{2 \pi} \cdot \norm{f}_{\infty} \cdot \int_{\gamma}
\abs{q_{s}(z)} \abs{q_{t}(z)} \frac{\abs{dz}}{\abs{z}}.
\end{split}\end{align*}
On noting that $q \in H^{\infty}_{0} \br{S^{o}_{\mu}}$,
$$
\norm{q_{s}(T) f \br{T} q_{t}(T)} \leq
c \cdot C_{\mu} \cdot \norm{f}_{\infty} \cdot \int_{\gamma} \frac{\abs{s
z}^{\alpha}}{1 + \abs{s z}^{2 \alpha}} \frac{\abs{t
z}^{\alpha}}{1 + \abs{t z}^{2 \alpha}} \frac{\abs{dz}}{\abs{z}}
$$
for some $\alpha > 0$ and constant $c > 0$ independent of $T$. Thus we obtain the estimate
$$
\norm{q_{s}(T) f \br{T} q_{t}(T)} \leq
c \cdot C_{\mu} \cdot \norm{f}_{\infty} \cdot \left\lbrace \begin{array}{c c}
\br{\frac{t}{s}}^{\alpha} \br{1 + \log \br{\frac{s}{t}}} & for \ 0 <
t \leq s <
\infty \\
& \\
\br{\frac{s}{t}}^{\alpha}
\br{1 + \log
\br{\frac{t}{s}}} & for
\
0 <
s
<
t < \infty,
\end{array} \right.
$$
where the value of the $T$ independent constant $c$ is allowed to change. This then implies that
$$
\sup_{s > 0}\int^{\infty}_{0} \norm{q_{s}(T)f(T)
q_{t}(T)} \frac{dt}{t}, \quad \sup_{t > 0}
\int^{\infty}_{0}\norm{q_{s}(T) f(T) q_{t}(T)} \frac{ds}{s}
\leq c \cdot C_{\mu} \cdot \norm{f}_{\infty}.
$$
On applying this estimate to \eqref{eqtn:CauchySchwarz},
\begin{align*}\begin{split}
\norm{f \br{T} u}^{2} &\leq c \cdot C_{\mu} \cdot C_{SF} \cdot \norm{f}_{\infty} \int^{\infty}_{0} \int^{\infty}_{0} \norm{q_{s}(T) f
\br{T} q_{t}(T)} \norm{q_{t}(T) u}^{2} \frac{dt}{t} \frac{ds}{s} \\
&= c \cdot C_{\mu} \cdot C_{SF} \cdot \norm{f}_{\infty} \int^{\infty}_{0}
\norm{q_{t}(T) u}^{2} \int^{\infty}_{0} \norm{q_{s}(T) f
\br{T} q_{t}(T)} \frac{ds}{s} \frac{dt}{t} \\
&\leq c^{2} \cdot C_{\mu}^{2} \cdot C_{SF} \cdot \norm{f}_{\infty}^{2} \int_{0}^{\infty}
\norm{q_{t}(T) u}^{2} \frac{dt}{t} \\
&\lesssim c^{2} \cdot C_{\mu}^{2} \cdot C_{SF}^{2} \cdot \norm{f}_{\infty}^{2} \norm{u}^{2}.
\end{split}\end{align*}
\end{proof}
Finally, the following Kato-type estimate follows from a well-known
classical argument.
\begin{cor}
\label{cor:Kato}
Suppose that the bisectorial operator $T$ satisfies square function
estimates with constant $C_{SF} > 0$ and the constant in the resolvent
estimate \eqref{eqtn:ResolventEst} is $C_{\mu} > 0$. Then there
exists a constant $c > 0$, independent of $T$, such that
\begin{equation}
\label{cor:eqtn:Kato}
\br{c \cdot C_{SF} \cdot C_{\mu}}^{-1} \cdot \norm{T u} \leq \norm{\sqrt{T^{2}}u} \leq
\br{c \cdot C_{SF} \cdot C_{\mu}} \cdot \norm{T u}
\end{equation}
for any $u \in D\br{T}$.
\end{cor}
\begin{proof}
Consider the restriction $S := T \vert_{\overline{R\br{T}}}$. $S$ is
an injective bisectorial operator that satisfies square function
estimates with constant $C_{SF} > 0$. Since $S$ is injective it follows that $f(S)$ is
well-defined for any $f \in H^{\infty} \br{S^{o}_{\mu}}$ by Remark
\ref{rmk:Injective}. This allows us to define the operators $f_{1}(S)$
and $f_{2}(S)$, where the functions $f_{1}$ and $f_{2}$ are defined by
$$
f_{1}(z) := \frac{\sqrt{z^{2}}}{z} \quad and \quad f_{2}(z) :=
\frac{z}{\sqrt{z^{2}}} \quad for \quad z \in S^{o}_{\mu}.
$$
The previous theorem allows us to deduce that both of these operators
are norm bounded by $c \cdot C_{SF} \cdot C_{\mu}$ for some $T$
independent constant $c > 0$. Applying the multiplicative part of Theorem
\ref{thm:FundFunctCalc} to the functions $f = f_{1}$ and $g(z) = z$
implies that
\begin{equation}
\label{eqtn:cor:Kato1}
\frac{\sqrt{S^{2}}}{S} \cdot S = \sqrt{S^{2}}
\end{equation}
on
\begin{equation}
\label{eqtn:cor:Kato2}
D(S) = D \br{\frac{\sqrt{S^{2}}}{S} \cdot S} = D \br{\sqrt{S^{2}}}
\cap D \br{S}.
\end{equation}
Similarly, applying the multiplicative part of Theorem
\ref{thm:FundFunctCalc} to $f = f_{2}$ and $g(z) = \sqrt{z^{2}}$ gives
\begin{equation}
\label{eqtn:cor:Kato3}
\frac{S}{\sqrt{S^{2}}} \cdot \sqrt{S^{2}} = S
\end{equation}
on
\begin{equation}
\label{eqtn:cor:Kato4}
D \br{\sqrt{S^{2}}} = D \br{\frac{S}{\sqrt{S^{2}}} \cdot
\sqrt{S^{2}}} = D \br{S} \cap D \br{\sqrt{S^{2}}}.
\end{equation}
Equations \eqref{eqtn:cor:Kato2} and \eqref{eqtn:cor:Kato4} together
imply that the domains $D \br{\sqrt{S^{2}}}$ and $D \br{S}$ coincide
and therefore both \eqref{eqtn:cor:Kato1} and \eqref{eqtn:cor:Kato3}
will remain valid on all of $D \br{S}$.
Let $u \in D \br{T}$. Proposition \ref{prop:Decomposition} states that $u$ has the decomposition $u = u_{1} \oplus
u_{2} \in N \br{T} \oplus \overline{R \br{T}}$. Then
\begin{align*}\begin{split}
\norm{T u} &= \norm{S u_{2}} \\
&= \norm{\frac{S}{\sqrt{S^{2}}} \cdot \sqrt{S^{2}} u_{2}} \\
&\leq c \cdot C_{SF} \cdot C_{\mu} \cdot \norm{\sqrt{S^{2}} u_{2}} \\
&= c \cdot C_{SF} \cdot C_{\mu} \cdot \norm{\sqrt{T^{2}}u},
\end{split}\end{align*}
where in the last line we used the fact that the functional
calculus commutes with the restriction map as given in Proposition
2.6.5 of \cite{haase2006functional}. Also,
\begin{align*}\begin{split}
\norm{\sqrt{T^{2}}u} &= \norm{\sqrt{S^{2}}u_{2}} \\
&= \norm{\frac{\sqrt{S^{2}}}{S} \cdot S u_{2}} \\
&\leq c \cdot C_{SF} \cdot C_{\mu} \cdot \norm{S u_{2}} \\
&= c \cdot C_{SF} \cdot C_{\mu} \cdot \norm{T u}.
\end{split}\end{align*}
\end{proof}
\section{Non-Homogeneous Axelsson-Keith-McIntosh}
\label{sec:NonHomog}
In this section we describe how the Axelsson-Keith-McIntosh framework
can be altered to account for non-homogeneous operators of the form \eqref{eqtn:NonHomog}. Our main
results for this framework will also be stated.
\subsection{AKM without Cancellation and Coercivity}
The operators that we wish to consider, $\Gamma_{J}$,
will satisfy the first six conditions of
\cite{axelsson2006quadratic}. However, they will not necessarily
satisfy the cancellation condition (H7) and the coercivity condition
(H8). It will therefore be fruitful to see what happens to the
original AKM framework when the cancellation and coercivity conditions
are removed.
Similar to the original result, we begin by assuming that we have operators that satisfy the
hypotheses (H1) - (H3) from \cite{axelsson2006quadratic}. Recall these
conditions for operators $\Gamma$, $B_{1}$ and $B_{2}$ on a Hilbert
space $\mathcal{H}$ with norm $\norm{\cdot}$ and inner product
$\langle \cdot, \cdot \rangle$.
\vspace*{0.1in}
\begin{enumerate}
\item[(H1)] $\Gamma : D(\Gamma) \rightarrow \mathcal{H}$ is a closed,
densely defined, nilpotent operator.
\\
\item[(H2)] $B_{1}$ and $B_{2}$ satisfy the accretivity conditions
$$
\mathrm{Re} \langle B_{1} u, u \rangle \geq \kappa_{1} \norm{u}^{2}
\qquad and \qquad
\mathrm{Re} \langle B_{2} v, v \rangle \geq \kappa_{2} \norm{v}^{2}
$$
for all $u \in R(\Gamma^{*})$ and $v \in R \br{\Gamma}$ for some
$\kappa_{1}$, $\kappa_{2} > 0$.
\\
\item[(H3)] The operators $\Gamma$ and $\Gamma^{*}$ satisfy
$$
\Gamma^{*} B_{2} B_{1} \Gamma^{*} = 0 \qquad and \qquad
\Gamma B_{1} B_{2} \Gamma
= 0.
$$
\end{enumerate}
In \cite{axelsson2006quadratic} Section $4$, the authors assume that they
have operators that satisfy the
hypotheses (H1) - (H3) and they derive several important operator
theoretic consequences from only these hypotheses. As our operators
$\Gamma$, $B_{1}$ and $B_{2}$ also satisfy (H1) - (H3), it follows
that any result proved in {\cite[Sec.~4]{axelsson2006quadratic}}
must also be true for our operators and can be used with
impunity. In the
interest of making this article as self-contained as possible, we will now
restate any such result that is to be used in this paper.
\begin{prop}[The Hodge Decomposition, {\cite[Prop.~2.2]{axelsson2006quadratic}}]
\label{prop:Hodge}
Suppose that the operators $\lb \Gamma, B_{1}, B_{2} \rb$ satisfy (H1)
- (H3). Define the perturbation dependent operators
$$
\Gamma^{*}_{B} := B_{1} \Gamma^{*} B_{2}, \quad \Gamma_{B} :=
B_{2}^{*} \Gamma B_{1}^{*} \quad and \quad \Pi_{B} := \Gamma + \Gamma_{B}^{*}.
$$
The Hilbert space $\mathcal{H}$ has the following Hodge decomposition
into closed subspaces:
\begin{equation}
\label{eqtn:Hodge}
\mathcal{H} = N \br{\Pi_{B}} \oplus \overline{R \br{\Gamma^{*}_{B}}}
\oplus \overline{R \br{\Gamma}}.
\end{equation}
Moreover, we have $N \br{\Pi_{B}} = N \br{\Gamma^{*}_{B}} \cap N
\br{\Gamma}$ and $\overline{R \br{\Pi_{B}}} = \overline{R
\br{\Gamma^{*}_{B}}} \oplus \overline{R \br{\Gamma}}$. When $B_{1} =
B_{2} = I$ these decompositions are orthogonal, and in general the
decompositions are topological. Similarly, there is also a
decomposition
$$
\mathcal{H} = N \br{\Pi_{B}^{*}} \oplus \overline{R \br{\Gamma_{B}}}
\oplus \overline{R \br{\Gamma^{*}}}.
$$
\end{prop}
\begin{prop}[{\cite[Prop.~2.5]{axelsson2006quadratic}}]
\label{prop:Bisectoriality}
Suppose that the operators $\lb \Gamma, B_{1}, B_{2} \rb$ satisfy (H1)
- (H3).
The perturbed Dirac-type operator $\Pi_{B}$ is an $\omega$-bisectorial operator with $\omega
:= \frac{1}{2} \br{\omega_{1} + \omega_{2}}$ where
$$
\omega_{1} := \sup_{u \in R \br{\Gamma^{*}} \setminus \lb 0 \rb}
\abs{\mathrm{arg} \langle B_{1} u, u \rangle} < \frac{\pi}{2}
$$
and
$$
\omega_{2} := \sup_{u \in R \br{\Gamma} \setminus \lb 0 \rb}
\abs{\mathrm{arg} \langle B_{2}u, u \rangle} < \frac{\pi}{2}.
$$
\end{prop}
The bisectoriality of $\Pi_{B}$ ensures that the following operators
will be well-defined.
\begin{deff}
\label{def:Operators}
Suppose that the operators $\lb \Gamma, B_{1}, B_{2} \rb$ satisfy (H1)
- (H3). For $t \in \mathbb{R} \setminus \lb 0 \rb$, define the perturbation dependent operators
$$
R^{B}_{t} := \br{I + i t \Pi_{B}}^{-1}, \quad P^{B}_{t} := \br{I +
t^{2} \br{\Pi_{B}}^{2}}^{-1},
$$
$$
Q_{t}^{B} := t \Pi_{B} P_{t}^{B} \quad and \quad \Theta_{t}^{B} := t \Gamma^{*}_{B} P_{t}^{B}.
$$
When there is no perturbation, i.e. when $B_{1} =
B_{2} = I$, the $B$ will dropped from the superscript or subscript. For example,
instead of $\Theta^{I}_{t}$ or $\Pi_{I}$ the notation $\Theta_{t}$ and
$\Pi$ will be employed.
\end{deff}
\begin{rmk}
An easy consequence of Proposition \ref{prop:Bisectoriality} is that
the operators $R^{B}_{t}$, $P^{B}_{t}$ and $Q^{B}_{t}$ are all uniformly
$\mathcal{H}$-bounded in $t$. Furthermore, on taking the Hodge decomposition
Proposition \ref{prop:Hodge} into account, it is clear that the
operators $\Theta^{B}_{t}$ will also be uniformly $\mathcal{H}$-bounded in
$t$.
\end{rmk}
The next result tells us how the operators $\Pi_{B}$ and $P^{B}_{t}$ interact
with $\Gamma$ and $\Gamma^{*}_{B}$.
\begin{lem}[{\cite[Rmk.~4.5]{axelsson2006quadratic}}]
\label{lem:Commutation}
Suppose that the operators $\lb \Gamma, B_{1}, B_{2} \rb$ satisfy (H1)
- (H3). The following relations are true.
$$
\Pi_{B} \Gamma u = \Gamma^{*}_{B} \Pi_{B} u \quad for \ all \ u \in D
\br{\Gamma^{*}_{B} \Pi_{B}},
$$
$$
\Pi_{B} \Gamma^{*}_{B} u = \Gamma \Pi_{B} u \quad for \ all \ u \in D
\br{\Gamma \Pi_{B}},
$$
$$
\Gamma P_{t}^{B} u = P^{B}_{t} \Gamma u \quad for \ all \ u \in D
\br{\Gamma}, \quad and
$$
$$
\Gamma^{*}_{B} P^{B}_{t} u = P^{B}_{t} \Gamma^{*}_{B}u \quad for \ all
\ u \in D \br{\Gamma^{*}_{B}}.
$$
\end{lem}
The subsequent lemma provides a square function estimate for the
unperturbed Dirac-type operator $\Pi$. When considering square
function estimates for the perturbed operator, there will be several
instances where the perturbed case can be reduced with the assistance
of this unperturbed estimate. Its proof follows directly from the
self-adjointness of the operator $\Pi$ and Corollary \ref{cor:SA}.
\begin{lem}[{\cite[Lem.~4.6]{axelsson2006quadratic}}]
\label{lem:Unperturbed}
Suppose that the operators $\lb \Gamma, B_{1}, B_{2} \rb$ satisfy (H1)
- (H3). The quadratic estimate
\begin{equation}
\label{eqtn:Unperturbed}
\int^{\infty}_{0} \norm{Q_{t}u}^{2} \frac{dt}{t} \leq \frac{1}{2} \norm{u}^{2}
\end{equation}
holds for all $u \in \mathcal{H}$. Equality holds on $\overline{R\br{\Pi}}$.
\end{lem}
The following result will play a crucial role in
the reduction of the square function estimate \eqref{eqtn:Intro1}.
\begin{prop}[{\cite[Prop.~4.8]{axelsson2006quadratic}}]
\label{prop:Reduction}
Suppose that the operators $\lb \Gamma, B_{1}, B_{2} \rb$ satisfy (H1)
- (H3). Assume that the estimate
\begin{equation}
\label{eqtn:prop:Reduction}
\int^{\infty}_{0} \norm{\Theta^{B}_{t} P_{t} u}^{2} \frac{dt}{t}
\leq c \cdot \norm{u}^{2}
\end{equation}
holds for all $u \in R \br{\Gamma}$ and some constant $c > 0$, together
with three similar estimates obtained on replacing $\lb \Gamma,
B_{1}, B_{2} \rb$ by $\lb \Gamma^{*}, B_{2}, B_{1} \rb$, $\lb
\Gamma^{*}, B_{2}^{*}, B_{1}^{*} \rb$ and $\lb \Gamma, B_{1}^{*},
B_{2}^{*} \rb$. Then $\Pi_{B}$ satisfies the quadratic estimate
\begin{equation}
\label{eqtn:prop:Reduction1}
\br{c \cdot C}^{-1} \cdot \norm{u}^{2} \leq \int^{\infty}_{0} \norm{Q^{B}_{t} u}^{2}
\frac{dt}{t} \leq c \cdot C \cdot \norm{u}^{2}
\end{equation}
for all $u \in \overline{R \br{\Pi_{B}}}$, for some $C > 0$ entirely
dependent on (H1) - (H3).
\end{prop}
The constant dependence of \eqref{eqtn:prop:Reduction1} is not
explicitly mentioned in Proposition 4.8 of \cite{axelsson2006quadratic}, but it is
relatively easy to trace through their argument and record where
\eqref{eqtn:prop:Reduction} is used. The following corollary is proved
during the course of the proof of
{\cite[Prop.~4.8]{axelsson2006quadratic}}.
\begin{cor}[High Frequency Estimate]
\label{cor:HighFrequency}
Suppose that the operators $\lb \Gamma, B_{1}, B_{2} \rb$ satisfy (H1)
- (H3). There exists a constant $c > 0$ such
that for any $u \in R\br{\Gamma}$,
$$
\int^{\infty}_{0} \norm{\Theta^{B}_{t} \br{I - P_{t}}u}^{2}
\frac{dt}{t} \leq c \cdot \norm{u}^{2}.
$$
\end{cor}
From this point onwards, it will also be assumed that our operators
satisfy the additional hypotheses (H4) - (H6). These hypotheses are
stated below for reference.
\vspace*{0.1in}
\begin{enumerate}
\item[(H4)] The Hilbert space is $\mathcal{H} = L^{2} \br{\mathbb{R}^{n};
\mathbb{C}^{N}}$ for some $n, \, N \in \mathbb{N}^{*}$.
\\
\item[(H5)] The operators $B_{1}$ and $B_{2}$ represent
multiplication by matrix-valued functions. That is,
$$
B_{1}(f)(x) = B_{1}(x) \cdot f(x) \qquad and \qquad B_{2}(f)(x) =
B_{2}(x) \cdot f(x)
$$
for all $f \in \mathcal{H}$ and $x \in \mathbb{R}^{n}$, where $B_{1}$, $B_{2}
\in L^{\infty}\br{\mathbb{R}^{n}; \mathcal{L}\br{\mathbb{C}^{N}}}$.
\\
\item[(H6)] For every bounded Lipschitz function $\eta : \mathbb{R}^{n}
\rightarrow \mathbb{C}$, we have that $\eta D(\Gamma) \subset D
\br{\Gamma}$ and $\eta D \br{\Gamma^{*}} \subset D
\br{\Gamma^{*}}$. Moreover, the commutators $\brs{\Gamma,
\eta I}$ and $\brs{\Gamma^{*}, \eta I}$ are
multiplication operators that satisfy the bound
$$
\abs{\brs{\Gamma, \eta I}(x)}, \ \abs{\brs{\Gamma^{*}, \eta I}(x)} \leq c \abs{\nabla \eta(x)}
$$
for all $x \in \mathbb{R}^{n}$ and some constant $c > 0$.
\end{enumerate}
\vspace*{0.1in}
In contrast to the original result, our operators will not be assumed to satisfy the cancellation
condition (H7) or the coercivity condition (H8). Without these two
conditions, many of the results from Section $5$ of
\cite{axelsson2006quadratic} will fail. One notable exception to this
is that the bounded operators associated with our perturbed
Dirac-type operator $\Pi_{B}$ will satisfy off-diagonal estimates.
\begin{deff}[Off-Diagonal Bounds]
\label{def:OffDiagonal}
Define
$
\langle x \rangle := 1 + \abs{x}
$
for $x \in \mathbb{C}$ and $ \mathrm{dist}(E,F) := \inf \lb \abs{x - y} : x
\in E, y \in F \rb$ for $E$, $F \subset \mathbb{R}^{n}$.
Let $\lb U_{t} \rb_{t > 0}$ be a family of operators on $\mathcal{H} =
L^{2}\br{\mathbb{R}^{n};\mathbb{C}^{N}}$.
This collection is said to have off-diagonal bounds of order $M
> 0$ if there exists $C_{M} > 0$ such that
\begin{equation}
\label{def:eqtn:OffDiagonal}
\norm{U_{t}u}_{L^{2}\br{E}} \leq C_{M}\langle \mathrm{dist}(E,F) / t
\rangle^{-M} \norm{u}
\end{equation}
whenever $E, \, F \subset \mathbb{R}^{n}$ are Borel sets and $u \in
\mathcal{H}$ satisfies $\mathrm{supp} \, u \subset F$.
\end{deff}
\begin{prop}[{\cite[Prop.~5.2]{axelsson2006quadratic}}]
\label{prop:OffDiagonal}
Suppose that the operators $\lb \Gamma, B_{1}, B_{2} \rb$ satisfy
(H1) - (H6).
Let $U_{t}$ be given by either $R^{B}_{t}$,
$R^{B}_{-t}$, $P^{B}_{t}$, $Q^{B}_{t}$ or $\Theta^{B}_{t}$ for every $t >
0$. The collection of operators $\lb U_{t} \rb_{t
> 0}$ has off-diagonal bounds of every order $M > 0$. Moreover, the constant $C_{M}$ in the estimate
\eqref{def:eqtn:OffDiagonal} depends only on $M$ and the
hypotheses (H1) - (H6).
\end{prop}
Introduce the following dyadic decomposition of $\mathbb{R}^{n}$. Let $\Delta
= \cup_{j = - \infty}^{\infty} \Delta_{2^{j}}$ where $\Delta_{t} :=
\lb 2^{j} \left(k + (0,1]^{n}\right) : k \in \mathbb{Z}^{n} \rb$ if $2^{j-1} < t
\leq 2^{j}$ for $j \in \mathbb{Z}$. Define the averaging operator $A_{t} : \mathcal{H}
\rightarrow \mathcal{H}$ through
$$
A_{t}u(x) := \Xint-_{Q(x,t)} u(y) \, dy := \frac{1}{\abs{Q(x,t)}} \int_{Q(x,t)} u(y) \, dy
$$
for $x \in \mathbb{R}^{n}$, $t > 0$ and $u \in \mathcal{H}$, where $Q(x,t)$ is
the unique dyadic cube in $\Delta_{t}$ that contains the point $x$.
For an operator family $\lb U_{t} \rb_{t > 0}$ that satisfies
off-diagonal bounds of every order, there exists an extension $U_{t}:
L^{\infty} \br{\mathbb{R}^{n}; \mathbb{C}^{N}} \rightarrow L^{2}_{loc}
\br{\mathbb{R}^{n};\mathbb{C}^{N}}$ for each $t > 0$. This is constructed by defining
$$
U_{t} u(x) := \lim_{r \rightarrow \infty} \sum_{\substack{R \in
\Delta_{t} \\ \mathrm{dist} \br{Q,R}} < r} U_{t}
\br{\mathbbm{1}_{R} u}(x),
$$
for $x \in Q \in \Delta_{t}$ and $u \in L^{\infty} \br{\mathbb{R}^{n};
\mathbb{C}^{N}}$. The convergence of the above limit is guaranteed
by the off-diagonal bounds of $\lb U_{t} \rb_{t > 0}$. Further detail
on this construction can be found in \cite{axelsson2006quadratic},
\cite{egert2016kato}, \cite{morris2012kato} or \cite{frey2018conical}. The above
extension then allows us to introduce the principal part of the
operator $U_{t}$.
\begin{deff}
\label{def:PrincipalPart}
Let $\lb U_{t} \rb_{t > 0}$ be operators on $\mathcal{H}$ that satisfy
off-diagonal bounds of every order. For $t > 0$, the principal part of $U_{t}$ is
the operator $\zeta_{t} : \mathbb{R}^{n} \rightarrow \mathcal{L}\br{\mathbb{C}^{N}}$
defined through
$$
\brs{\zeta_{t}(x)}\br{w} := \br{U_{t} w}(x)
$$
for each $x \in \mathbb{R}^{n}$ and $w \in \mathbb{C}^{N}$.
\end{deff}
The following generalisation of Corollary $5.3$ of
\cite{axelsson2006quadratic} will also be true with an identical proof.
\begin{prop}
\label{prop:OffDiagonal}
Let $\lb U_{t} \rb_{t > 0}$ be operators on $\mathcal{H}$ that satisfy
off-diagonal bounds of every order. Let $\zeta_{t} : \mathbb{R}^{n} \rightarrow
\mathcal{L}\br{\mathbb{C}^{N}}$ denote the principal part of the operator
$U_{t}$. Then there exists $c > 0$ such that
$$
\Xint-_{Q} \abs{\zeta_{t}(y)}^{2} dy \leq c
$$
for all $Q \in \Delta_{t}$ and $t > 0$. Moreover, the operators $\zeta_{t} A_{t}$
are uniformly $L^{2}$-bounded in $t$.
\end{prop}
Finally, the ensuing partial result will also be valid. Its proof follows in
an identical manner to the first part of the proof of Proposition 5.5
of \cite{axelsson2006quadratic}.
\begin{prop}
\label{prop:PrincPart}
Let $\lb U_{t} \rb_{t > 0}$ be operators on $\mathcal{H}$ that satisfy
off-diagonal bounds of every order. Let $\zeta_{t} : \mathbb{R}^{n} \rightarrow
\mathcal{L} \br{\mathbb{C}^{N}}$ denote the principal part of $U_{t}$. Then
there exists $c > 0$ such that
\begin{equation}
\label{eqtn:PrincPart}
\norm{\br{U_{t} - \zeta_{t} A_{t}} v} \leq c \cdot \norm{t \nabla v}.
\end{equation}
for any $v \in H^{1} \br{\mathbb{R}^{n};\mathbb{C}^{N}} \subset \mathcal{H}$ and $t > 0$.
\end{prop}
\subsection{Additional Structure}
\label{subsec:Additional}
At this point, further structure will be imposed upon our
operators in order to generalise the non-homogeneous operator $\Gamma_{\abs{V}^{\frac{1}{2}}}$ defined in
\eqref{eqtn:OperatorsPotential}. This additional structure will later be exploited in order to obtain square
function estimates of the form \eqref{eqtn:prop:Reduction}.
\vspace*{0.1in}
Let $\mathbb{C}^{N} = V_{1} \oplus V_{2} \oplus V_{3}$
where $V_{1}, \, V_{2}$ and $V_{3}$ are finite-dimensional complex Hilbert spaces. Let $\mathbb{P}_{i} : \mathbb{C}^{N} \rightarrow \mathbb{C}^{N}$ be
the projection operator onto the space $V_{i}$ for $i = 1, \, 2$ and $3$. Our Hilbert space will have the following
orthogonal decomposition
$$
\mathcal{H} := L^{2} \br{\mathbb{R}^{n}; \mathbb{C}^{N}} = L^{2}\br{\mathbb{R}^{n};V_{1}} \oplus L^{2}\br{\mathbb{R}^{n};V_{2}} \oplus L^{2}(\mathbb{R}^{n};V_{3}).
$$
The notation $\mathbb{P}_{i}$ will also be used to denote the natural
projection operator from $\mathcal{H}$ onto
$L^{2}\br{\mathbb{R}^{n};V_{i}}$. For a vector $v \in \mathcal{H}$, $v_{i} \in
L^{2}\br{\mathbb{R}^{n};V_{i}}$ will denote the $i$th component for $i = 1, \,
2$ or $3$.
Let $\Gamma_{J}$ be an operator on $\mathcal{H}$ of the form
$$
\Gamma_{J} := \br{\begin{array}{c c}
0 & 0 \\ D_{J} & 0
\end{array}} := \br{\begin{array}{c c c}
0 & 0 & 0 \\
J & 0 & 0 \\
D & 0 & 0
\end{array}},
$$
where $J$ and $D$ are closed densely defined operators
$$
J : L^{2}\br{\mathbb{R}^{n};V_{1}} \rightarrow L^{2}\br{\mathbb{R}^{n};V_{2}} \ and
$$
$$
D : L^{2}\br{\mathbb{R}^{n};V_{1}} \rightarrow L^{2}\br{\mathbb{R}^{n};V_{3}},
$$
and $D_{J} : L^{2}\br{\mathbb{R}^{n};V_{1}} \rightarrow L^{2}\br{\mathbb{R}^{n};V_{2}}
\oplus L^{2}\br{\mathbb{R}^{n};V_{3}}$ is the operator $D_{J} = \br{\begin{array}{c}
J \\ D
\end{array}}$. Define the operators
$$
\Gamma_{0} := \br{\begin{array}{c c c}
0 & 0 & 0 \\
0 & 0 & 0 \\
D & 0 & 0
\end{array}}, \qquad M_{J} := \br{\begin{array}{c c c}
0 & 0 & 0 \\
J & 0 & 0 \\
0 & 0 & 0
\end{array}},
$$
$$
\Pi_{0} := \Gamma_{0} + \Gamma_{0}^{*}, \quad S_{J} := M_{J} +
M_{J}^{*} \quad and \quad \Pi_{J} := \Gamma_{J} + \Gamma_{J}^{*}.
$$
\vspace*{0.1in}
Let $B_{1}, \, B_{2} \in L^{\infty} \br{\mathbb{R}^{n}; \mathcal{L}\br{\mathbb{C}^{N}}}$ be matrix-valued multiplication operators. The
following key assumption will be imposed on our operators throughout
the entirety of this article.
\vspace*{0.2in}
\fbox{\begin{minipage}{33em}
\textbf{Key Assumption.} \textit{The family of operators
$\lb \Gamma_{0}, B_{1}, B_{2} \rb$ satisfies the conditions (H1) -
(H8) of \cite{axelsson2006quadratic} while $\lb \Gamma_{J}, B_{1},
B_{2} \rb$ satisfies (H1) - (H6).}
\end{minipage}}
\vspace*{0.2in}
For reference, the cancellation condition (H7)
and the coercivity condition (H8) are shown below for the operator
$\Gamma_{0}$.
\vspace*{0.1in}
\begin{enumerate}
\item[(H7)] For any $u \in D \br{\Gamma_{0}}$ and $v \in D
\br{\Gamma^{*}_{0}}$, both compactly supported,
$$
\int_{\mathbb{R}^{n}} \Gamma_{0} u = 0 \quad and \quad \int_{\mathbb{R}^{n}} \Gamma^{*}_{0} v
= 0.
$$
\vspace*{0.1in}
\item[(H8)] There exists $c > 0$ such that
$$
\norm{\nabla u} \leq c \cdot \norm{\Pi_{0} u}
$$
for all $u \in R \br{\Pi_{0}} \cap D \br{\Pi_{0}}$.
\end{enumerate}
\vspace*{0.1in}
\begin{example}
Typical examples of operators that satisfy the previous key
assumption are when both $D$ and $J$ are partial differential
operators of order less than or equal to one. If
the perturbations $B_{1}$ and $B_{2}$ satisfy suitable accretivity
conditions then the families of operators $\lb \Gamma_{0}, B_{1}, B_{2} \rb$ and $\lb
\Gamma_{J}, B_{1}, B_{2} \rb$ will both satisfy (H1) - (H6). If, in
addition, $D$ is first-order homogeneous and there exists $c > 0$ for which
$$
\norm{\nabla u} \leq c \cdot \norm{D u}
$$
for all $u \in R \br{D^{*}} \cap D \br{D}$ and
$$
\norm{\nabla v} \leq c \cdot \norm{D^{*}v}
$$
for all $v \in R \br{D}\cap D \br{D^{*}}$ then $\lb \Gamma_{0}, B_{1},
B_{2}\rb$ will also satisfy (H7) and (H8). A particular example of
such a situation is given by the operator
$\Gamma_{\abs{V}^{\frac{1}{2}}}$ together with perturbations $B_{1}$
and $B_{2}$ as defined in \eqref{eqtn:OperatorsPotential} and
\eqref{eqtn:OperatorsPotential2} with \eqref{eqtn:Garding0} and
\eqref{eqtn:Garding} satisfied.
\end{example}
\begin{rmk}
Since the operator $\Gamma_{0}$, together with the
perturbations $B_{1}$ and $B_{2}$, satisfy all
eight conditions (H1) - (H8) of \cite{axelsson2006quadratic}, it follows that any
result from that paper must be valid for these operators.
\end{rmk}
\begin{deff}
\label{def:Operators2}
For $t \in \mathbb{R} \setminus \lb 0 \rb$, define the perturbation dependent operators
$$
\Gamma_{J,B} := B_{2}^{*} \Gamma_{J} B_{1}^{*}, \quad \Gamma_{J,B}^{*} := B_{1} \Gamma^{*}_{J} B_{2}, \quad \Pi_{J,B} :=
\Gamma_{J} + \Gamma_{J,B}^{*},
$$
$$
R^{J,B}_{t} := \br{I + i t \Pi_{J,B}}^{-1}, \quad P^{J,B}_{t} := \br{I +
t^{2} \br{\Pi_{J,B}}^{2}}^{-1},
$$
$$
Q_{t}^{J,B} := t \Pi_{J,B} P_{t}^{J,B} \quad and \quad \Theta_{t}^{J,B} := t \Gamma^{*}_{J,B} P_{t}^{J,B}.
$$
When there is no perturbation, i.e. when $B_{1} =
B_{2} = I$, the $B$ will dropped from the superscript or subscript. For example,
instead of $\Theta^{J,I}_{t}$ the notation $\Theta^{J}_{t}$ will be
employed.
\end{deff}
We now introduce coercivity conditions to serve as a
replacement for (H8) for the operators $\lb \Gamma_{J}, B_{1}, B_{2} \rb$. These conditions will not be automatically imposed
upon our operators but, rather, will be taken as hypotheses for
our main results.
\vspace*{0.1in}
\begin{enumerate}
\item[(H8D$\alpha$)] Let $\alpha \in (1,2]$. The domain inclusion
$$
D\br{(D_{J}^{*}D_{J})^{\frac{\alpha}{2}}} \subset D \br{(D^{*}D)^{\frac{\alpha}{2}}}
$$
holds and there exists a
constant $C > 0$ such that
$$
\norm{\br{D^{*} D}^{\frac{\alpha}{2}} u} \leq C \cdot
\norm{\br{D_{J}^{*}D_{J}}^{\frac{\alpha}{2}}u}
$$
for all $u \in D \br{(D_{J}^{*}D_{J})^{\frac{\alpha}{2}}}$.
\vspace*{0.1in}
\item[(H8J)] $B_{2}$ is of the form
\begin{equation}
\label{eqtn:B2Form}
B_{2} = \br{\begin{array}{c c}
I & 0 \\ 0 & \hat{A}
\end{array}} := \br{\begin{array}{c c c}
I & 0 & 0 \\
0 & A_{22} & A_{23} \\
0 & A_{32} & A_{33}
\end{array}},
\end{equation}
where $A_{ij} \in L^{\infty} \br{\mathbb{R}^{n}; \mathcal{L} \br{V_{j},
V_{i}}}$ for $i, \, j = 2$ or $3$. The inclusion
$$
D \br{D_{J}^{*} D_{J}} \subset D \br{D_{J}^{*} \hat{A} \br{\begin{array}{c}
J \\ 0
\end{array}}}
$$
is satisfied. Furthermore, there exists a constant $C > 0$ such that for all $u
\in D \br{D_{J}^{*} D_{J}}$,
$$
\norm{ D_{J}^{*} \hat{A} \br{\begin{array}{c}
J \\ 0
\end{array}}u} \leq C \cdot
\norm{D_{J}^{*} D_{J} u}.
$$
\end{enumerate}
\vspace*{0.1in}
\begin{rmk}
\label{rmk:H8J}
The situation of most interest to us is when $A_{32} = 0$ and
$$
\norm{J^{*} A_{22} J u} = \norm{J^{*}J u}
$$
for all $u \in D \br{J^{*} A_{22} J} = D \br{J^{*} J}$. In this case,
the domain inclusion of (H8J) becomes $D(D^{*}_{J} D_{J}) \subset
D(J^{*}J)$ and the Riesz transform condition becomes the perturbation free condition
$$
\norm{J^{*}J u} \leq C \cdot \norm{D_{J}^{*} D_{J} u}
$$
for all $u \in D \br{D_{J}^{*} D_{J}}$. Furthermore, when this occurs, the Riesz
transform condition of (H8J) will
be equivalent to the condition
$$
\norm{S_{J} u} \leq C \cdot \norm{\Pi_{J} u}
$$
or equivalently
$$
\norm{\Pi_{0} u} \leq C \cdot \norm{\Pi_{J} u}
$$
for all $u \in D \br{\Pi_{J}} \cap R \br{\Pi_{J}}$.
\end{rmk}
\vspace*{0.1in}
The Kato square root estimate is a first-order Riesz transform
condition. To some extent, it then seems intuitively unnatural to use a
second-order Riesz transform condition as in (H8J) as our
hypotheses. Indeed, when the conditions of the above remark are satisfied and $J$ is a
positive operator, it will be
sufficient to consider a lower-order version of (H8J) as given below.
\vspace*{0.1in}
\begin{enumerate}
\item[(H8J$\alpha$)] Let $\alpha \in (1,2]$. The perturbation $B_{2}$
is of the form \eqref{eqtn:B2Form} with $A_{32} = 0$. $J$ is a
positive operator and
$$
\norm{J A_{22} J u} = \norm{J^{2} u}
$$
for all $u \in D \br{J A_{22} J} = D \br{J^{2}}$. The domain inclusion
$$
D \br{(D_{J}^{*} D_{J})^{\frac{\alpha}{2}}} \subset D \br{J^{\alpha}}
$$
holds. Furthermore, there exists a
constant $C > 0$ such that
$$
\norm{J^{\alpha} u} \leq C \cdot \norm{\br{D_{J}^{*} D_{J}}^{\frac{\alpha}{2}}u}
$$
for all $u \in D \br{(D_{J}^{*} D_{J})^{\frac{\alpha}{2}}}$.
\end{enumerate}
\vspace*{0.1in}
\begin{notation}
For $\alpha \in (1,2]$, let $b^{D}_{\alpha}, \, b^{J}$ and
$b^{J}_{\alpha}$ denote the smallest constant for which
(H8D$\alpha$), (H8J) or (H8J$\alpha$) are satisfied respectively. If the criteria
for one of these conditions is not met then the corresponding
constant will be set to infinity. For example, if (H8J) is not
satisfied then $b^{J} = \infty$. Also define
$$
c_{\alpha}^{J} := \br{1 + \br{b^{D}_{\alpha}}^{2} + \br{\min \lb
b^{J}, b^{J}_{\alpha} \rb}^{2}} \cdot (\alpha - 1)^{-1}.
$$
For the remainder of this article we
introduce the notation $A \lesssim B$ and $A \simeq B$ to denote that there exists a constant $C >
0$, independent of (H8D$\alpha$), (H8J) and (H8J$\alpha$), for which $A \leq C \cdot B$ and $C^{-1}
\cdot B \leq A \leq C \cdot B$ respectively. $C$ is still
allowed to depend on (H1) - (H8) for $\lb \Gamma_{0}, B_{1}, B_{2}
\rb$ and (H1) - (H6) for $\lb \Gamma_{J}, B_{1}, B_{2} \rb$. Note that
this implies that $C$ will only depend on $J$ through the constants in
(H2) for $\lb \Gamma_{J}, B_{1}, B_{2} \rb$ and it will be independent
of $\alpha$.
\end{notation}
We are now in a position
where the main result of the non-homogeneous AKM framework can finally
be stated.
\begin{thm}
\label{thm:MainSqEst}
Let $\lb \Gamma_{J}, B_{1}, B_{2} \rb$ be as defined
above. Consider the following square function estimate:
\begin{equation}
\label{eqtn:MainSqEst}
\int^{\infty}_{0} \norm{\Theta^{J,B}_{t} \mathbb{P}_{i} P_{t}^{J}u}^{2} \frac{dt}{t}
\lesssim C \cdot \norm{u}^{2}
\end{equation}
for all $u \in R \br{\Gamma_{J}}$, for some $C > 0$, for $i = 1, \, 2$ or
$3$.
\begin{enumerate}[(i)]
\item \label{Main1} The estimate is trivially satisfied for $i = 1$ for any
$C \geq 0$.
\vspace*{0.1in}
\item \label{Main2} If (H8J) is satisfied then \eqref{eqtn:MainSqEst} is satisfied
for $i = 2$ with $C = \br{b^{J}}^{2}$.
\vspace*{0.1in}
\item \label{Main3} If (H8J$\alpha$) is satisfied for some $\alpha \in (1,2]$
then \eqref{eqtn:MainSqEst} is satisfied
for $i = 2$ with $C = (1+\br{b_{\alpha}^{J}}^{2}) (\alpha - 1)^{-1}$.
\vspace*{0.1in}
\item \label{Main4} If (H8D$\alpha$) is satisfied for some $\alpha \in (1,2]$ and
either (H8J) or (H8J$\alpha$) is also satisfied then
\eqref{eqtn:MainSqEst} holds for $i = 3$ with constant $C = c_{\alpha}^{J}$.
\end{enumerate}
\end{thm}
\begin{proof}
The proof of (iii) and (iv) will be postponed until Section
\ref{sec:Square}. For (i), simply note that since $\Gamma_{J}$
commutes with the operator $P_{t}^{J}$ by Lemma \ref{lem:Commutation} we must have $\mathbb{P}_{1}
P_{t}^{J} u = 0$ for any $u \in R \br{\Gamma_{J}}$.
It remains to consider (ii). Suppose that (H8J) holds. First it will be proved that for $u \in R
\br{\Gamma_{J}}$ we have $\mathbb{P}_{2} P_{t}^{J} u \in D
\br{\Gamma_{J,B}^{*}}$.
Since $u \in R \br{\Gamma_{J}}$, $u = \Gamma_{J} v$ for some
$v \in D \br{\Gamma_{J}}$. As
$$
P_{t}^{J} u = P_{t}^{J} \Gamma_{J} v = \Gamma_{J} P_{t}^{J} v
$$
by Lemma \ref{lem:Commutation} and $P_{t}^{J} u \in D \br{\Pi_{J}}$,
it follows that $\br{P_{t}^{J} v}_{1}\in D \br{D_{J}^{*} D_{J}}$,
which by (H8J) is contained in $D \br{D_{J}^{*} \hat{A} \br{\begin{array}{c}
J \\ 0
\end{array}}}$. This implies that
$$
\br{\begin{array}{c}
J (P_{t}^{J} v)_{1} \\ 0
\end{array}} = \br{\begin{array}{c}
(P_{t}^{J}u)_{2} \\ 0
\end{array}} \in D (D_{J}^{*} \hat{A})
$$
and therefore $\mathbb{P}_{2} P_{t}^{J} u \in D (\Gamma^{*}_{J,B})$.
Since $\mathbb{P}_{2} P_{t}^{J} u \in D \br{\Gamma_{J,B}^{*}}$, it
follows from Lemma \ref{lem:Commutation} that
$$
\Theta^{J,B}_{t} \mathbb{P}_{2} P_{t}^{J}u = P^{J,B}_{t}
t \Gamma^{*}_{J,B} \mathbb{P}_{2} P_{t}^{J}u.
$$
The estimate in (H8J) gives
$$
\norm{\Gamma_{J,B}^{*} \mathbb{P}_{2} \tilde{v}} \leq b^{J} \cdot
\norm{\Gamma^{*}_{J} \tilde{v}}
$$
for any $\tilde{v} \in R \br{\Gamma_{J}} \cap D (\Pi_{J})$. Since $P_{t}^{J}$ and $\Gamma_{J}$
commute by Lemma \ref{lem:Commutation}, it follows that
$$
\norm{\Gamma_{J,B}^{*} \mathbb{P}_{2} P_{t}^{J} u} \leq b^{J}
\cdot \norm{\Gamma^{*}_{J} P_{t}^{J} u}
$$
for $u \in R\br{\Gamma_{J}}$. On applying the uniform
$L^{2}$-boundedness of the $P_{t}^{J,B}$ operators,
\begin{align*}\begin{split}
\int^{\infty}_{0}\norm{\Theta^{J,B}_{t} \mathbb{P}_{2} P_{t}^{J}
u}^{2}\frac{dt}{t} &=
\int^{\infty}_{0} \norm{P_{t}^{J,B} t \Gamma^{*}_{J,B} \mathbb{P}_{2} P_{t}^{J} u}^{2}
\frac{dt}{t} \\ &\lesssim \int^{\infty}_{0} \norm{t \Gamma_{J,B}^{*} \mathbb{P}_{2} P_{t}^{J}
u}^{2} \frac{dt}{t} \\
&\leq \br{b^{J}}^{2} \int^{\infty}_{0} \norm{t \Gamma_{J}^{*} P_{t}^{J}u}^{2} \frac{dt}{t}.
\end{split}\end{align*}
On successively applying
Proposition \ref{prop:Hodge} and Lemma \ref{lem:Unperturbed} we obtain
\begin{align*}\begin{split}
\int^{\infty}_{0} \norm{\Theta^{J,B}_{t} \mathbb{P}_{2} P_{t}^{J} u}^{2}
\frac{dt}{t} &\lesssim \br{b^{J}}^{2} \int^{\infty}_{0} \norm{t \Pi_{J} P_{t}^{J}
u}^{2} \frac{dt}{t} \\
&= \br{b^{J}}^{2} \int^{\infty}_{0} \norm{Q_{t}^{J} u}^{2} \frac{dt}{t} \\
&= \frac{1}{2} \br{b^{J}}^{2} \norm{u}^{2}.
\end{split}\end{align*}
This shows that \eqref{eqtn:MainSqEst} is valid for $i = 2$ with constant $C = \br{b^{J}}^{2}$.
\end{proof}
Let's consider an estimate that serves as a dual to \eqref{eqtn:MainSqEst}.
\begin{prop}
\label{prop:PropSwap}
For $t > 0$, define the operator
$$
\underline{P}^{J,B}_{t} := \br{I + t^{2} \br{\Gamma_{J}^{*} + B_{2}
\Gamma_{J} B_{1}}^{2}}^{-1}.
$$
The square function estimate
\begin{equation}
\label{eqtn:PropSwap}
\int^{\infty}_{0} \norm{ t B_{2} \Gamma_{J} B_{1} \underline{P}^{J,B}_{t}
P_{t}^{J} u}^{2} \frac{dt}{t} \lesssim \norm{u}^{2}
\end{equation}
will hold for all $u \in \mathcal{H}$ when $B_{1} = I$.
\end{prop}
\begin{proof}
Since $\lb \Gamma_{J}, B_{1}, B_{2} \rb$ satisfies (H1) - (H6) it
follows that $\lb \Gamma_{J}^{*}, B_{2}, B_{1} \rb$ must also satisfy
(H1) - (H6). Proposition \ref{prop:Bisectoriality} then implies that the operators
$\underline{P}_{t}^{J,B}$ are well-defined and uniformly
$L^{2}$-bounded. Since $B_{1} = I$, it follows that $P_{t}^{J} u \in
D(B_{2} \Gamma_{J} B_{1})$ for any $u \in \mathcal{H}$ and therefore,
by Lemma \ref{lem:Commutation},
$$
B_{2} \Gamma_{J} B_{1} \underline{P}_{t}^{J,B} P_{t}^{J} u =
\underline{P}_{t}^{J,B} B_{2} \Gamma_{J} B_{1} P_{t}^{J} u =
\underline{P}_{t}^{J} B_{2} \Gamma_{J} P_{t}^{J} u.
$$
This together with the uniform $L^{2}$-boundedness of the
$\underline{P}_{t}^{J,B}$ operators implies that
\begin{align*}\begin{split}
\int^{\infty}_{0} \norm{t B_{2} \Gamma_{J} B_{1}
\underline{P}_{t}^{J,B} P_{t}^{J} u}^{2} \, \frac{dt}{t} &= \int^{\infty}_{0} \norm{\underline{P}_{t}^{J,B} t B_{2} \Gamma_{J}
P_{t}^{J}u}^{2} \frac{dt}{t} \\
&\lesssim \int^{\infty}_{0} \norm{t \Gamma_{J} P_{t}^{J} u}^{2} \frac{dt}{t}
\\
&\leq \int^{\infty}_{0} \norm{t \Pi_{J} P_{t}^{J} u}^{2}
\frac{dt}{t}\\
&= \int^{\infty}_{0} \norm{Q_{t}^{J}u}^{2} \frac{dt}{t} \\
&= \frac{1}{2} \norm{u}^{2},
\end{split}\end{align*}
where the inequality $\norm{\Gamma_{J} v} \leq \norm{\Pi_{J} v}$ for $v
\in D \br{\Pi_{J}}$ follows immediately from the three-by-three matrix form
of the operators and Lemma \ref{lem:Unperturbed} was applied to obtain
the last line.
\end{proof}
From our main result, Theorem \ref{thm:MainSqEst}, and the previous proposition, the upper and lower square function
estimates for $Q^{J,B}_{t}$ can be proved.
\begin{thm}
\label{thm:MainSqEst1}
Suppose that $B_{1} = I$. Suppose further that
(H8D$\alpha$) is satisfied for some $\alpha \in (1,2]$ and either
(H8J) or (H8J$\alpha$) is satisfied. Then
\begin{equation}
\label{eqtn:ThmMainSqEst1}
\br{c_{\alpha}^{J}}^{-1} \cdot \norm{u}^{2} \lesssim \int^{\infty}_{0}
\norm{Q^{J,B}_{t}u}^{2} \frac{dt}{t} \lesssim c_{\alpha}^{J} \cdot \norm{u}^{2}
\end{equation}
for all $u \in \overline{R \br{\Pi_{J}}}$.
\end{thm}
\begin{proof}
Proposition \ref{prop:Reduction} states that in order to prove the square function estimate
\eqref{eqtn:ThmMainSqEst1}, it is sufficient for the estimate
\eqref{eqtn:MainSqEst} to be valid for all $i = 1, \, 2$ and $3$ for the permutations of operators
$\lb \Gamma_{J}, B_{1}, B_{2} \rb$, $\lb \Gamma_{J}, B_{1}^{*},
B_{2}^{*} \rb$, $\lb \Gamma_{J}^{*},B_{2}, B_{1} \rb$ and $\lb
\Gamma_{J}^{*}, B_{2}^{*}, B_{1}^{*} \rb$. The permutations $\lb
\Gamma_{J}, B_{1}, B_{2} \rb$ and $\lb \Gamma_{J}, B_{1}^{*},
B_{2}^{*} \rb$ both come under the umbrella of Theorem
\ref{thm:MainSqEst} and the permutations $\lb \Gamma_{J}^{*}, B_{2},
B_{1} \rb$ and $\lb \Gamma_{J}^{*}, B_{2}^{*}, B_{1}^{*} \rb$ are
handled by Proposition \ref{prop:PropSwap}.
\end{proof}
From the upper and lower estimate of the previous theorem, Theorem
\ref{thm:BoundedHolomorphic} then implies that $\Pi_{J,B}$ has a bounded holomorphic functional calculus.
\begin{thm}
\label{thm:PiBLCalc}
Suppose that $B_{1} = I$. Suppose further that
(H8D$\alpha$) is satisfied for some $\alpha \in (1,2]$ and either
(H8J) or (H8J$\alpha$) is satisfied. Then $\Pi_{J,B}$ has a bounded
$H^{\infty}\br{S^{o}_{\mu}}$-holomorphic functional calculus for any
$\mu \in \br{\omega_{J}, \frac{\pi}{2}}$, where
$$
\omega_{J} := \frac{1}{2}\br{\sup_{u \in R \br{\Gamma_{J}^{*}}
\setminus \lb 0 \rb} \abs{\mathrm{arg} \langle B_{1} u, u \rangle}
+ \sup_{u \in R \br{\Gamma_{J}} \setminus \lb 0 \rb} \abs{\mathrm{arg} \langle
B_{2} u, u
\rangle}}.
$$
In particular,
$$
\norm{f(\Pi_{J,B})} \lesssim c_{\alpha}^{J} \cdot \sup_{\zeta \in
S^{o}_{\mu}} \abs{f(\zeta)}
$$
for any $f \in H^{\infty}_{0} \br{S^{o}_{\mu}}$.
\end{thm}
\begin{cor}
\label{cor:LBJCalc}
Suppose that $B_{1} = I$. Suppose further that
(H8D$\alpha$) is satisfied for some $\alpha \in (1,2]$ and either
(H8J) or (H8J$\alpha$) is satisfied. The operator
$$
L_{B}^{J} := D_{J}^{*} \hat{A} D_{J}
$$
is a $2 \omega_{J}$-sectorial operator with a bounded
$H^{\infty}\br{S^{o}_{2\mu+}}$-functional calculus for any $\mu \in
\br{\omega_{J}, \frac{\pi}{2}}$ and
\begin{equation}
\label{eqtn:LBJCalc}
\norm{\sqrt{L_{B}^{J}} u} \simeq c_{\alpha}^{J} \cdot \br{\norm{J u} +
\norm{D u}}
\end{equation}
for all $u \in D \br{J} \cap D(D)$.
\end{cor}
\begin{proof}
The bounded $H^{\infty}\br{S^{o}_{2 \mu+}}$-functional calculus of
$L_{B}^{J}$ follows from the bounded
$H^{\infty}\br{S^{o}_{\mu}}$-functional calculus of $\Pi_{J,B}$ and
that $\Pi_{J,B}^{2}$ is of the form
$$
\Pi_{J,B}^{2} = \br{\begin{array}{c c c}
L_{B}^{J} & 0 & 0 \\
0 & * & * \\
0 & * & *
\end{array}}.
$$
The estimate \eqref{eqtn:LBJCalc} follows from Corollary
\ref{cor:Kato} applied to the operator $\Pi_{J,B}$ and an element
$(u,0,0) \in \mathcal{H}$ with $u \in D \br{J} \cap D(D)$.
\end{proof}
\section{Square Function Estimates}
\label{sec:Square}
In this section, a proof of our main result, Theorem
\ref{thm:MainSqEst}, will be provided. The first part of the proof
consists in showing that the operators $P_{t}^{J}$ can effectively be
diagonalised when estimating square function norms from above. This diagonalisation
will be applied to bound the second component of our square
function norm when (H8J$\alpha$) is satisfied thus proving the third
part of Theorem \ref{thm:MainSqEst}.
To prove the fourth and most challenging part of Theorem \ref{thm:MainSqEst} we will use this diagonalisation and an argument similar to
the original result \cite{axelsson2006quadratic}. That is, a $T(1)$-type
reduction will be applied to reduce the third component of the square
function norm to a Carleson measure norm which will subsequently be
proved to be bounded.
\subsection{Diagonalisation of the $P_{t}^{J}$ Operators}
Define the bounded operator $\mathcal{P}_{t}^{J} : \mathcal{H}
\rightarrow \mathcal{H}$ through
$$
\mathcal{P}_{t}^{J} := \br{\begin{array}{c c c}
\br{I + t^{2} D_{J}^{*} D_{J}}^{-1} & 0 & 0 \\
0 & \br{I + t^{2} J J^{*}}^{-1} & 0 \\
0 & 0 & \br{I + t^{2} D D^{*}}^{-1}
\end{array}},
$$
for $t > 0$. Observe that since the operators $D_{J}^{*} D_{J}$,
$J J^{*}$ and $D D^{*}$ are all self-adjoint, it follows from Corollary
\ref{cor:SA} that square function estimates hold for each of these
operators with constant independent of $J$ and $D$. Therefore each of
these operators possesses a bounded holomorphic functional calculus
with constant independent of $J$ and $D$. It can be deduced from this
that the operators $\mathcal{P}_{t}^{J}$ are uniformly $L^{2}$-bounded
with constant independent of $J$ and $D$.
Let us prove that the operator $P_{t}^{J}$ can be
effectively diagonalised when evaluating square function
estimates. Specifically, the following theorem will be proved.
\begin{thm}
\label{thm:Diagonalisation}
Suppose that (H8D$\alpha$) is satisfied for some $\alpha \in (1,2]$
then
$$
\int^{\infty}_{0} \norm{\mathbb{P}_{3}(\mathcal{P}_{t}^{J} -
P_{t}^{J})u}^{2} \frac{dt}{t} \lesssim (1 + (b^{D}_{\alpha})^{2})
\cdot \norm{u}^{2}
$$
for all $u \in R(\Gamma_{J})$. Suppose, in addition, that
(H8J$\alpha$) is also satisfied. Then
\begin{equation}
\label{eqtn:DiagonalEst}
\int^{\infty}_{0} \norm{\br{\mathcal{P}_{t}^{J} -
P_{t}^{J} }u}^{2} \frac{dt}{t}\lesssim c_{\alpha}^{J}
\cdot \norm{u}^{2}
\end{equation}
for all $u \in R \br{\Gamma_{J}}$.
\end{thm}
Such a
diagonalisation will aid us tremendously in the bounding of our main
square function estimate \eqref{eqtn:MainSqEst} for the second and
third component. This theorem will be proved by inspecting each component separately.
\begin{rmk}
\label{rmk:TrivialFirst}
It is easy to see that the diagonalisation estimate
\eqref{eqtn:DiagonalEst} is trivially satisfied on the first component
for any $u \in \mathcal{H}$ since
$\mathbb{P}_{1} \mathcal{P}_{t}^{J} = \mathbb{P}_{1} P_{t}^{J}$.
\end{rmk}
\begin{prop}
\label{prop:LowerOrder1}
For any $u \in \mathcal{H}$,
\begin{equation}
\label{eqtn:LowerOrder1}
\int^{\infty}_{0} \norm{\mathcal{P}_{t}^{J}
\br{P_{t}^{J} - I} u}^{2} \frac{dt}{t} \lesssim \norm{u}^{2}.
\end{equation}
\end{prop}
\begin{proof}
The estimate is trivially satisfied for any $u \in N(\Pi_{J})$ since
\begin{align*}\begin{split}
\br{P_{t}^{J} - I} u &= \br{\br{I + t^{2} \Pi^{2}_{J}}^{-1} - I} u \\
&= \br{I + t^{2} \Pi^{2}_{J}}^{-1} \br{I - \br{I + t^{2} \Pi^{2}_{J}}} u \\
&= 0
\end{split}\end{align*}
for any $t > 0$. Suppose that
$u \in \overline{R \br{\Pi_{J}}}$. On applying the resolution of the
identity, Proposition \ref{prop:Resolution},
\begin{align*}\begin{split}
\int^{\infty}_{0} \norm{\mathcal{P}_{t}^{J}\br{P_{t}^{J} - I} u}^{2}
\frac{dt}{t} &= \int^{\infty}_{0} \norm{\mathcal{P}_{t}^{J} \br{P_{t}^{J} - I}
2 \int^{\infty}_{0} \br{Q_{s}^{J}}^{2} u \frac{ds}{s}}^{2}
\frac{dt}{t} \\
&\lesssim \int^{\infty}_{0} \br{\int^{\infty}_{0}
\norm{\mathcal{P}_{t}^{J}
\br{P_{t}^{J} - I} \br{Q_{s}^{J}}^{2} u} \frac{ds}{s}}^{2} \frac{dt}{t}.
\end{split}\end{align*}
The Cauchy-Schwarz inequality leads to
\begin{align}\begin{split}
\label{eqtn:Middle0}
\int^{\infty}_{0} &\norm{\mathcal{P}_{t}^{J} \br{P_{t}^{J} - I} u}^{2}
\frac{dt}{t} \lesssim \\ & \quad \int^{\infty}_{0} \br{\int^{\infty}_{0}
\norm{\mathcal{P}_{t}^{J} \br{P_{t}^{J} - I} Q_{s}^{J}} \frac{ds}{s}} \cdot
\br{\int^{\infty}_{0} \norm{ \mathcal{P}_{t}^{J} \br{P_{t}^{J} - I} Q_{s}^{J}}
\norm{Q_{s}^{J}u}^{2} \frac{ds}{s}} \frac{dt}{t}.
\end{split}\end{align}
Let's estimate the term $\norm{\mathcal{P}_{t}^{J} \br{P_{t}^{J} - I} Q_{s}^{J}} $. First
assume that $t \leq s$. On noting that $\br{P_{t}^{J} - I} Q_{s}^{J} =
\frac{t}{s} Q_{t}^{J} \br{P_{s}^{J} - I}$ we obtain
\begin{equation}
\label{eqtn:Middle01}
\norm{\mathcal{P}_{t}^{J} \br{P_{t}^{J} - I} Q_{s}^{J}} \lesssim \norm{\br{P_{t}^{J} - I}
Q_{s}^{J}} \lesssim \frac{t}{s} \norm{Q_{t}^{J} \br{P_{s}^{J} - I}} \lesssim \frac{t}{s}.
\end{equation}
Next, suppose that $t > s$. Then the equality $P_{t}^{J} Q_{s}^{J} =
\frac{s}{t} Q_{t}^{J} P_{s}^{J}$ gives
$$
\norm{\mathcal{P}_{t}^{J} \br{P_{t}^{J} - I}Q_{s}^{J}} \lesssim \norm{P_{t}^{J} Q_{s}^{J}} +
\norm{\mathcal{P}_{t}^{J} Q_{s}^{J}} \lesssim \frac{s}{t}
+ \norm{\mathcal{P}_{t}^{J} Q_{s}^{J}}.
$$
The term $\mathcal{P}_{t}^{J} Q_{s}^{J}$ will be considered
component-wise. For the first component, recall that $\mathbb{P}_{1}
\mathcal{P}_{t}^{J} = \mathbb{P}_{1} P_{t}^{J}$ and observe that
\begin{align*}\begin{split}
\norm{\mathbb{P}_{1} \mathcal{P}_{t}^{J} Q_{s}^{J}} &=
\norm{\mathbb{P}_{1} P_{t}^{J} s \Pi_{J} P_{s}^{J}} \\
&= \frac{s}{t} \norm{\mathbb{P}_{1} P_{t}^{J} t \Pi_{J} P_{s}^{J}} \\
&= \frac{s}{t} \norm{\mathbb{P}_{1} Q_{t}^{J} P_{s}^{J}} \\
&\lesssim \frac{s}{t}.
\end{split}\end{align*}
For the second component, note that
$$
\mathbb{P}_{2} \mathcal{P}_{t}^{J} = \br{I + t^{2} S_{J}^{2}}^{-1} \mathbb{P}_{2}.
$$
Also observe
$$
\mathbb{P}_{2} \Pi_{J} u = \mathbb{P}_{2} \Pi_{J} \mathbb{P}_{1} u =
\mathbb{P}_{2} S_{J} \mathbb{P}_{1} u
$$
for $u \in D(\Pi_{J})$.
This gives
\begin{align*}\begin{split}
\norm{\mathbb{P}_{2} \mathcal{P}_{t}^{J} Q_{s}^{J}} &=
\norm{\br{I + t^{2} S_{J}^{2}}^{-1} \mathbb{P}_{2} s \Pi_{J}
P_{s}^{J}} \\
&= \norm{(I + t^{2} S_{J}^{2})^{-1} \mathbb{P}_{2} s S_{J}
\mathbb{P}_{1} P_{s}^{J}} \\
&= \frac{s}{t} \norm{\mathbb{P}_{2} t S_{J} (I + t^{2}
S_{J}^{2})^{-1} \mathbb{P}_{1} P_{s}^{J}} \\
&\lesssim \frac{s}{t},
\end{split}\end{align*}
where the last line follows from the fact that $S_{J}$ is self-adjoint
and therefore possesses a bounded holomorphic functional calculus with
constant independent of $J$. Lastly, for the third component, we have
$$
\mathbb{P}_{3} \mathcal{P}_{t}^{J} = \mathbb{P}_{3} P_{t}^{0} =
P_{t}^{0} \mathbb{P}_{3}
$$
and
$$
\mathbb{P}_{3} \Pi_{J} u = \mathbb{P}_{3} \Pi_{J} \mathbb{P}_{1} u =
\mathbb{P}_{3} \Pi_{0} \mathbb{P}_{1} u
$$
for $u \in D(\Pi_{J})$. This leads to
\begin{align*}\begin{split}
\norm{\mathbb{P}_{3} \mathcal{P}_{t}^{J} Q_{s}^{J}}
&= \norm{P_{t}^{0} \mathbb{P}_{3} s \Pi_{J} P_{s}^{J}} \\
&= \norm{P_{t}^{0} \mathbb{P}_{3} s \Pi_{0} \mathbb{P}_{1} P_{s}^{J}}
\\
&= \frac{s}{t} \norm{\mathbb{P}_{3} t \Pi_{0} P_{t}^{0}
\mathbb{P}_{1} P_{s}^{J}} \\
&\lesssim \frac{s}{t}.
\end{split}\end{align*}
Putting everything together gives
\begin{equation}
\label{eqtn:PrincMin}
\norm{ \mathcal{P}_{t}^{J} \br{P_{t}^{J} - I} Q_{s}^{J}} \lesssim \min \lb
\frac{t}{s}, \frac{s}{t} \rb.
\end{equation}
This bound can then be applied to \eqref{eqtn:Middle0} to give \eqref{eqtn:LowerOrder1}.
\end{proof}
\begin{prop}
\label{prop:LowerOrder2}
Suppose that the condition (H8J$\alpha$) is satisfied for some
$\alpha \in (1,2]$. Then
$$
\int^{\infty}_{0} \norm{\mathbb{P}_{2} \br{I - \mathcal{P}_{t}^{J}}
P_{t}^{J} u}^{2} \frac{dt}{t} \lesssim (\alpha - 1)^{-1} \br{b_{\alpha}^{J}}^{2} \cdot \norm{u}^{2}
$$
for any $u \in R \br{\Gamma_{J}}$.
\end{prop}
\begin{proof}
It will first be proved that $\br{P_{t}^{J} u}_{2} \in D
\br{J^{\alpha - 1}}$. Since $\Gamma_{J}$ commutes with $P_{t}^{J}$ we must have
$$
P_{t}^{J} u = \Gamma_{J} \br{\begin{array}{c}
v \\ 0 \\ 0
\end{array}}
$$
for some $\br{v,0,0} \in D \br{\Gamma_{J}}$. This then gives
$\br{P_{t}^{J} u}_{2} = J v$. Therefore $\br{P_{t}^{J}u}_{2} \in
D(J^{\alpha - 1})$ if and only if $v \in D(J^{\alpha})$. We know that
$P_{t}^{J} u \in D(\Pi_{J})$ which implies that $v \in D(D_{J}^{*}
D_{J})$ and therefore $v \in D \br{(D_{J}^{*}
D_{J})^{\frac{\alpha}{2}}}$. Our hypothesis
(H8J$\alpha$) then tells us that $v \in D(J^{\alpha})$
which allows us to conclude, using the previous reasoning, that
$\br{P_{t}^{J}u}_{2} \in D(J^{\alpha - 1})$.
Since $(P_{t}^{J}u)_{2} \in D(J^{\alpha - 1})$, it follows that
\begin{align}\begin{split}
\label{eqtn:prop:LowerOrder21}
\mathbb{P}_{2} \br{I - \mathcal{P}_{t}^{J}}P_{t}^{J}u &= \br{0,t^{2} J^{2}
\br{I + t^{2} J^{2}}^{-1} \br{ P_{t}^{J} u}_{2},0} \\
&= \br{0, g_{\alpha}^{t}\br{J} t^{\alpha - 1} J^{\alpha - 1} \br{
P_{t}^{J} u}_{2},0},
\end{split}\end{align}
where $g_{\alpha}^{t} : S^{o}_{\mu} \rightarrow \mathbb{C}$ is the bounded
holomorphic function defined through
\begin{equation}
\label{eqtn:galphat}
g_{\alpha}^{t}(z) := \frac{t^{2} z^{2}}{\br{I + t^{2} z^{2}} t^{\alpha -
1} \br{\sqrt{z^{2}}}^{\alpha - 1}}.
\end{equation}
As $J$ is self-adjoint, it follows from Corollary \ref{cor:SA} that $J$ possesses a bounded holomorphic functional calculus with
constant independent of $J$. This, together with
\eqref{eqtn:prop:LowerOrder21}, gives
$$
\norm{\mathbb{P}_{2} \br{I - \mathcal{P}_{t}^{J}} P_{t}^{J} u}
\lesssim \norm{t^{\alpha - 1} J^{\alpha - 1} \br{P_{t}^{J} u}_{2}}.
$$
On applying (H8J$\alpha$),
\begin{align*}\begin{split}
\norm{J^{\alpha - 1} \br{P_{t}^{J} u}_{2}} &= \norm{J^{\alpha}
v} \\
&\leq b_{\alpha}^{J} \cdot \norm{\br{D_{J}^{*}D_{J}}^{\frac{\alpha}{2}} v} \\
&= b_{\alpha}^{J} \cdot \norm{\abs{\Pi_{J}}^{\alpha}
(v,0,0)} \\
&\simeq b_{\alpha}^{J} \cdot \norm{\abs{\Pi_{J}}^{\alpha - 1}
\Pi_{J} (v,0,0)} \\
&= b_{\alpha}^{J} \cdot \norm{\abs{\Pi_{J}}^{\alpha - 1}
P_{t}^{J} u},
\end{split}\end{align*}
where $\abs{\Pi_{J}} := \sqrt{\Pi_{J}^{2}}$ and in the fourth line we applied the bounded holomorphic
functional calculus of the operator $\Pi_{J}$. Therefore
\begin{align*}\begin{split}
\int^{\infty}_{0} \norm{\mathbb{P}_{2} \br{I - \mathcal{P}_{t}^{J}}
P_{t}^{J} u}^{2} \frac{dt}{t} &\lesssim \br{b_{\alpha}^{J}}^{2} \cdot
\int^{\infty}_{0}\norm{t^{\alpha - 1} \abs{\Pi_{J}}^{\alpha - 1}
P_{t}^{J} u}^{2} \frac{dt}{t} \\
&\lesssim (\alpha - 1)^{-1} \br{b_{\alpha}^{J}}^{2} \cdot \norm{u}^{2},
\end{split}\end{align*}
where we used the fact that $\Pi_{J}$ is self-adjoint
and Corollary \ref{cor:SA} in the last line.
\end{proof}
\begin{prop}
\label{prop:ThirdAtPt1}
Suppose that (H8D$\alpha$) is satisfied for some $\alpha \in
(1,2]$. Then
\begin{equation}
\label{eqtn:ThirdAtPt1}
\int^{\infty}_{0} \norm{\mathbb{P}_{3} \br{I - \mathcal{P}_{t}^{J}}
P_{t}^{J} u}^{2} \frac{dt}{t} \lesssim (\alpha - 1)^{-1} \br{b_{\alpha}^{D}}^{2} \cdot \norm{u}^{2}
\end{equation}
for all $u \in R \br{\Gamma_{J}}$.
\end{prop}
\begin{proof}
First note that the left-hand side of \eqref{eqtn:ThirdAtPt1} can be
re-written as
\begin{align*}\begin{split}
\int^{\infty}_{0} \norm{\mathbb{P}_{3} \br{I - \mathcal{P}_{t}^{J}}
P_{t}^{J} u}^{2} \frac{dt}{t} &= \int^{\infty}_{0}
\norm{\mathbb{P}_{3} \br{I - P_{t}^{0}} P_{t}^{J}u}^{2} \frac{dt}{t}
\\
&= \int^{\infty}_{0}\norm{\br{I - P_{t}^{0}} \mathbb{P}_{3}
P_{t}^{J}u}^{2} \frac{dt}{t}.
\end{split}\end{align*}
It will be shown that $\mathbb{P}_{3} P_{t}^{J} u \in D
\br{\abs{\Pi_{0}}^{\alpha - 1}}$. Since $\Gamma_{J}$ commutes with the
operator $P_{t}^{J}$ and $u \in R(\Gamma_{J})$, we must have
$P_{t}^{J} u = \Gamma_{J} P_{t}^{J}(v,0,0)$ for some $(v,0,0) \in
D(\Gamma_{J})$. This implies that
$$
\mathbb{P}_{3} P_{t}^{J} u = \mathbb{P}_{3} \Gamma_{J} P_{t}^{J}
(v,0,0) = \mathbb{P}_{3} \Pi_{0} P_{t}^{J}(v,0,0)
$$
and therefore $\mathbb{P}_{3} P_{t}^{J} u \in D
\br{\abs{\Pi_{0}}^{\alpha - 1}}$ will follow from
$P_{t}^{J}(v,0,0) \in D(\abs{\Pi_{0}}^{\alpha - 1} \mathbb{P}_{3}
\Pi_{0})$ which itself will follow from $P_{t}^{J}(v,0,0) \in D (\abs{\Pi_{0}}^{\alpha - 1} \Pi_{0})$. The bounded
holomorphic functional calculus of the operator $\Pi_{0}$ tells us
that $D(\abs{\Pi_{0}}^{\alpha - 1} \Pi_{0}) = D
(\abs{\Pi_{0}}^{\alpha})$ and it is therefore sufficient to prove that
$P_{t}^{J} (v,0,0) \in D(\abs{\Pi_{0}}^{\alpha})$. Since
$P_{t}^{J}(v,0,0)$ is non-zero only in the first component, this in turn is
equivalent to proving that
$$
(P_{t}^{J}(v,0,0))_{1} \in D\br{(D^{*}D)^{\frac{\alpha}{2}}}.
$$
This however follows directly from our hypothesis
(H8D$\alpha$) and the fact that $(P_{t}^{J}(v,0,0))_{1}
\in D \br{D_{J}^{*}D_{J}} \subset D\br{(D_{J}^{*}D_{J})^{\frac{\alpha}{2}}}$. This completes the
proof of our claim that $\mathbb{P}_{3} P_{t}^{J}u \in D
\br{\abs{\Pi_{0}}^{\alpha - 1}}$ .
Since $\mathbb{P}_{3} P_{t}^{J} u \in D \br{\abs{\Pi_{0}}^{\alpha - 1}}$ we must have
$$
\br{I - P_{t}^{0}} \mathbb{P}_{3} P_{t}^{J} u = g_{\alpha}^{t} \br{\Pi_{0}}
t^{\alpha - 1} \abs{\Pi_{0}}^{\alpha - 1} \mathbb{P}_{3} P_{t}^{J} u,
$$
where $g_{\alpha}^{t}$ is as defined in \eqref{eqtn:galphat}.
From the bounded holomorphic functional calculus of $\Pi_{0}$ we then obtain
$$
\int^{\infty}_{0} \norm{\mathbb{P}_{3} \br{I - \mathcal{P}_{t}^{J}}
P_{t}^{J} u}^{2} \frac{dt}{t} \lesssim \int^{\infty}_{0}
\norm{\mathbb{P}_{3} t^{\alpha - 1} \abs{\Pi_{0}}^{\alpha - 1}
P_{t}^{J} u}^{2} \frac{dt}{t}.
$$
On recalling that $P_{t}^{J} u = \Gamma_{J} P_{t}^{J} \br{v, 0, 0}$ for some $\br{v, 0 ,
0} \in D \br{\Gamma_{J}}$,
\begin{align*}\begin{split}
\int^{\infty}_{0}
\norm{\mathbb{P}_{3} t^{\alpha - 1} \abs{\Pi_{0}}^{\alpha - 1}
P_{t}^{J} u}^{2} \frac{dt}{t} &= \int^{\infty}_{0}
\norm{\mathbb{P}_{3} t^{\alpha - 1} \abs{\Pi_{0}}^{\alpha - 1}
\Gamma_{J} P_{t}^{J} (v,0,0)}^{2} \frac{dt}{t} \\
&= \int^{\infty}_{0}\norm{\mathbb{P}_{3}
t^{\alpha - 1} \abs{\Pi_{0}}^{\alpha - 1}
\Pi_{0} P_{t}^{J} (v,0,0)}^{2} \frac{dt}{t}.
\end{split}\end{align*}
On exploiting the bounded holomorphic functional calculus of the
operator $\Pi_{0}$ once more,
$$
\int^{\infty}_{0}\norm{\mathbb{P}_{3}
t^{\alpha - 1} \abs{\Pi_{0}}^{\alpha - 1}
\Pi_{0} P_{t}^{J} (v,0,0)}^{2} \frac{dt}{t} \lesssim \int^{\infty}_{0}
\norm{t^{\alpha - 1} \abs{\Pi_{0}}^{\alpha} P_{t}^{J}
(v,0,0)}^{2} \frac{dt}{t}.
$$
Observe that since $P_{t}^{J}(v,0,0)$ is non-zero only in the first entry,
\begin{align*}\begin{split}
\norm{\abs{\Pi_{0}}^{\alpha} P_{t}^{J} (v,0,0)} &= \norm{\br{D^{*}D}^{\frac{\alpha}{2}} \br{P_{t}^{J}(v,0,0)}_{1}} \\
&\leq
b_{\alpha}^{D} \cdot
\norm{\br{D_{J}^{*}
D_{J}}^{\frac{\alpha}{2}} \br{P_{t}^{J}(v,0,0)}_{1}}
\\
&=
b_{\alpha}^{D}
\cdot\norm{\abs{\Pi_{J}}^{\alpha} P_{t}^{J} (v,0,0)}.
\end{split}\end{align*}
On then applying the bounded holomorphic functional calculus of the
operator $\Pi_{J}$,
\begin{align*}\begin{split}
\br{b_{\alpha}^{D}}^{2} \cdot \int^{\infty}_{0}
\norm{t^{\alpha - 1} \abs{\Pi_{J}}^{\alpha} P_{t}^{J} (v,0,0)}^{2} \frac{dt}{t} &\lesssim \br{b_{\alpha}^{D}}^{2}
\int^{\infty}_{0} \norm{t^{\alpha - 1} \abs{\Pi_{J}}^{\alpha - 1}
\Pi_{J} P_{t}^{J} (v,0,0)}^{2} \frac{dt}{t} \\
&= \br{b_{\alpha}^{D}}^{2} \cdot \int^{\infty}_{0} \norm{t^{\alpha - 1}
\abs{\Pi_{J}}^{\alpha - 1} P_{t}^{J} u}^{2}
\frac{dt}{t} \\
&\lesssim (\alpha - 1)^{-1} \br{b_{\alpha}^{D}}^{2} \cdot \norm{u}^{2},
\end{split}\end{align*}
where we used the fact that $\Pi_{J}$ is self-adjoint
and Corollary \ref{cor:SA} in the final line.
\end{proof}
Combining Propositions \ref{prop:LowerOrder1}, \ref{prop:LowerOrder2} and \ref{prop:ThirdAtPt1}
together then gives Theorem \ref{thm:Diagonalisation}. With this
diagonalisation in hand we can now return to our proof of Theorem
\ref{thm:MainSqEst}. In particular the second component of our square
function norm will now be bounded.
\vspace*{0.1in}
\textsc{Proof of Theorem} \ref{thm:MainSqEst}.\ref{Main3}. On splitting the second component of our square function norm from above,
\begin{align*}\begin{split}
\int^{\infty}_{0} \norm{\Theta^{J,B}_{t} \mathbb{P}_{2} P_{t}^{J}
u}^{2} \frac{dt}{t} \lesssim \int^{\infty}_{0}
\norm{\Theta^{J,B}_{t} \mathbb{P}_{2} \br{\mathcal{P}_{t}^{J} -
P_{t}^{J}}u}^{2} \frac{dt}{t} + \int^{\infty}_{0}
\norm{\Theta^{J,B}_{t} \mathbb{P}_{2} \mathcal{P}_{t}^{J} u}^{2} \frac{dt}{t}.
\end{split}\end{align*}
The uniform $L^{2}$-boundedness of the operators $\Theta^{J,B}_{t}$
together with Propositions \ref{prop:LowerOrder1} and
\ref{prop:LowerOrder2} give
$$
\int^{\infty}_{0}\norm{\Theta^{J,B}_{t} \mathbb{P}_{2}
\br{\mathcal{P}_{t}^{J} - P_{t}^{J}} u}^{2} \frac{dt}{t} \lesssim
\br{1 + \br{b_{\alpha}^{J}}^{2}} \br{\alpha - 1}^{-1} \cdot \norm{u}^{2}.
$$
It remains to bound the second term in the splitting. In order to do
so, it will first be shown that $\mathbb{P}_{2}
\mathcal{P}_{t}^{J} u \in D (\Gamma^{*}_{J,B})$. From the definition
of the adjoint, $\mathbb{P}_{2} \mathcal{P}_{t}^{J} u \in
D(\Gamma^{*}_{J,B})$ if and only if there exists some $u' \in
\mathcal{H}$ such that
$$
\langle \Gamma_{J,B} w, \mathbb{P}_{2} \mathcal{P}_{t}^{J} u \rangle =
\langle w, u' \rangle
$$
for all $w \in D(\Gamma_{J,B})$, where $\Gamma_{J,B} = B_{2}^{*}
\Gamma_{J} B_{1}^{*}$. For $w \in D(\Gamma_{J,B})$,
\begin{align}\begin{split}
\label{eqtn:AdjointSecondComp}
\langle \Gamma_{J,B} w, \mathbb{P}_{2} \mathcal{P}_{t}^{J} u \rangle
&= \langle B_{2}^{*} \Gamma_{J} B_{1}^{*} w, \mathbb{P}_{2}
\mathcal{P}_{t}^{J} u \rangle \\
&= \langle \Gamma_{J} B_{1}^{*} w, B_{2} \mathbb{P}_{2}
\mathcal{P}_{t}^{J} u \rangle \\
&= \langle M_{J} B_{1}^{*} w, B_{2} \mathbb{P}_{2}
\mathcal{P}_{t}^{J} u \rangle,
\end{split}\end{align}
where in the last line we used the fact that $A_{32} = 0$ by
(H8J$\alpha$) and therefore $B_{2} \mathbb{P}_{2} =
\mathbb{P}_{2} B_{2} \mathbb{P}_{2}$.
This proves that $\mathbb{P}_{2}\mathcal{P}_{t}^{J}u \in
D(\Gamma^{*}_{J,B})$ will follow from $B_{2} \mathbb{P}_{2}
\mathcal{P}_{t}^{J} u \in D \br{M_{J}^{*}}$ which, in turn, will
follow from $(\mathcal{P}_{t}^{J}u)_{2} \in D(J^{*}A_{22}) = D(J A_{22})$.
Note that $u \in R \br{\Gamma_{J}}$ implies
that $u = \Gamma_{J}(v,0,0)$ for some $(v,0,0)\in D
\br{\Gamma_{J}}$. Then
\begin{align*}\begin{split}
\br{\mathcal{P}_{t}^{J}u}_{2} &= \br{I + t^{2} J^{2}}^{-1} J v \\
&= J \br{I + t^{2} J^{2}}^{-1} v.
\end{split}\end{align*}
(H8J$\alpha$) states that $D(J^{2}) = D(J A_{22}J)$ with $\norm{J^{2}
\tilde{u}} = \norm{J A_{22} J \tilde{u}}$ for $\tilde{u} \in D
\br{J^{2}}$. Since $\br{I + t^{2} J^{2}}^{-1}v \in D \br{J^{2}}$ we
must have $\br{I + t^{2} J^{2}}^{-1}v \in D \br{J A_{22}J}$ and
therefore $\br{\mathcal{P}_{t}^{J} u}_{2} \in D \br{J A_{22}}$. This
allows us to conclude that $\mathbb{P}_{2} \mathcal{P}_{t}^{J}u \in D
\br{\Gamma_{J,B}^{*}}$. Moreover, from
\eqref{eqtn:AdjointSecondComp} we know that $\Gamma_{J,B}^{*}
\mathbb{P}_{2} \mathcal{P}_{t}^{J}u = B_{1} M_{J}^{*} B_{2}
\mathbb{P}_{2} \mathcal{P}_{t}^{J}u$ and therefore
\begin{align*}\begin{split}
\norm{\Gamma^{*}_{J,B} \mathbb{P}_{2} \mathcal{P}_{t}^{J}u} &=
\norm{B_{1} M_{J}^{*} B_{2} \mathbb{P}_{2} \mathcal{P}_{t}^{J}u} \\
&\lesssim \norm{M_{J}^{*} B_{2} \mathbb{P}_{2} \mathcal{P}_{t}^{J} u} \\
&= \norm{J A_{22} J \br{I + t^{2}J^{2}}^{-1}v} \\
&= \norm{J^{2} \br{I + t^{2} J^{2}}^{-1} v} \\
&= \norm{J \br{I + t^{2} J^{2}}^{-1} u_{2}}.
\end{split}\end{align*}
This together with Lemma \ref{lem:Commutation} gives
\begin{align*}\begin{split}
\int^{\infty}_{0} \norm{\Theta^{J,B}_{t} \mathbb{P}_{2} \mathcal{P}_{t}^{J} u}^{2}
\frac{dt}{t} &= \int^{\infty}_{0} \norm{P_{t}^{J,B} t
\Gamma^{*}_{J,B} \mathbb{P}_{2}
\mathcal{P}_{t}^{J} u}^{2} \frac{dt}{t} \\
&\lesssim \int^{\infty}_{0} \norm{t \Gamma_{J,B}^{*}
\mathbb{P}_{2} \mathcal{P}_{t}^{J} u}^{2} \frac{dt}{t} \\
&\lesssim \int^{\infty}_{0} \norm{t J \br{I + t^{2}J^{2}}^{-1} u_{2}} \frac{dt}{t}.
\end{split}\end{align*}
The theorem then follows from the fact that $J$ is self-adjoint and
therefore satisfies square function estimates with constant
independent of $J$ by Corollary \ref{cor:SA}.
\BoldSquare
\subsection{The Third Component}
\label{subsec:ThirdComponent}
This section is dedicated to bounding the third component of our
square function norm and thus proving the fourth and final part of
Theorem \ref{thm:MainSqEst}. Specifically, it will be proved that when
(H8D$\alpha$) is satisfied for some $\alpha \in (1,2]$ and either
(H8J) or (H8J$\alpha$) is satisfied the estimate
\begin{equation}
\label{eqtn:ReducedMain}
\int^{\infty}_{0} \norm{\Theta^{J,B}_{t} \mathbb{P}_{3} P_{t}^{J} u}^{2}
\frac{dt}{t} \lesssim c_{\alpha}^{J} \cdot \norm{u}^{2}
\end{equation}
will hold for any $u \in R \br{\Gamma_{J}}$. A similar argument to that of
\cite{axelsson2006quadratic} will be used, but one will need to keep
track of the effect of the projection $\mathbb{P}_{3}$.
\subsubsection{$T(1)$-Reduction}
Our first step towards a $T(1)$-reduction is to use the splitting
$$
\int^{\infty}_{0} \norm{\Theta^{J,B}_{t} \mathbb{P}_{3} P_{t}^{J}
u}^{2} \frac{dt}{t} \lesssim \int^{\infty}_{0}
\norm{\Theta^{J,B}_{t} \mathbb{P}_{3} \br{P_{t}^{J} -
\mathcal{P}_{t}^{J}} u}^{2} \frac{dt}{t} + \int^{\infty}_{0}
\norm{\Theta^{J,B}_{t} \mathbb{P}_{3} \mathcal{P}_{t}^{J} u}^{2} \frac{dt}{t}.
$$
The uniform $L^{2}$-boundedness of the operators $\Theta^{J,B}_{t}$
and Theorem \ref{thm:Diagonalisation} can
be applied to the first term to obtain
$$
\int^{\infty}_{0} \norm{\Theta^{J,B}_{t} \mathbb{P}_{3}
\br{\mathcal{P}_{t}^{J} - P_{t}^{J}}u}^{2} \frac{dt}{t} \lesssim
c_{\alpha}^{J} \cdot \norm{u}^{2}.
$$
On recalling that $\mathbb{P}_{3} \mathcal{P}_{t}^{J} = \mathbb{P}_{3}
P_{t}^{0}$, this reduces the task of proving our square function
estimate to obtaining the bound
$$
\int^{\infty}_{0}\norm{\Theta^{J,B}_{t} \mathbb{P}_{3} P_{t}^{0}
u}^{2} \frac{dt}{t} \lesssim c_{\alpha}^{J} \cdot \norm{u}^{2}.
$$
Introduce the notation $\tilde{\Theta}^{J,B}_{t}$ to denote the operators $\tilde{\Theta}^{J,B}_{t} := \Theta^{J,B}_{t}
\mathbb{P}_{3}$.
Let $\gamma^{J,B}_{t}$ and $\tilde{\gamma}^{J,B}_{t}$ denote the
principal parts of the operators $\Theta^{J,B}_{t}$ and
$\tilde{\Theta}^{J,B}_{t}$ respectively. That is, they are the
multiplication operators defined through
$$
\gamma^{J,B}_{t}(x)w := \Theta^{J,B}_{t}(w)(x) \quad and \quad \tilde{\gamma}^{J,B}_{t}(x)(w) := \br{\Theta^{J,B}_{t} \mathbb{P}_{3}}(w)(x),
$$
for $w \in \mathbb{C}^{N}$ and $x \in \mathbb{R}^{n}$. Evidently we must have
$\tilde{\gamma}^{J,B}_{t}(x)w = \gamma^{J,B}_{t}(x)
\mathbb{P}_{3}w$.
Our square function norm can be reduced to this
principal part by applying the splitting
\begin{equation}
\label{eqtn:MainProof1}
\int^{\infty}_{0} \norm{\tilde{\Theta}^{J,B}_{t} P_{t}^{0} u}^{2}
\frac{dt}{t}
\lesssim \int^{\infty}_{0} \norm{\br{\tilde{\Theta}^{J,B}_{t} -
\tilde{\gamma}^{J,B}_{t} A_{t}} P_{t}^{0} u}^{2} \frac{dt}{t} + \int^{\infty}_{0}
\norm{\tilde{\gamma}^{J,B}_{t} A_{t} P_{t}^{0} u}^{2} \frac{dt}{t}.
\end{equation}
Since the operator $\Theta^{J,B}_{t}$ satisfies the conditions
of Proposition \ref{prop:PrincPart}, it follows that
\begin{align*}\begin{split}
\int^{\infty}_{0} \norm{\br{\tilde{\Theta}^{J,B}_{t} -
\tilde{\gamma}^{J,B}_{t} A_{t}} P_{t}^{0} u}^{2} \frac{dt}{t}
&= \int^{\infty}_{0} \norm{\br{\Theta^{J,B}_{t} - \gamma^{J,B}_{t}
A_{t}} \mathbb{P}_{3} P_{t}^{0}u}^{2} \frac{dt}{t} \\
&\lesssim \int^{\infty}_{0} \norm{t \nabla \mathbb{P}_{3}
P_{t}^{0}u}^{2} \frac{dt}{t} \\
&\lesssim \int^{\infty}_{0} \norm{t \Pi_{0} P_{t}^{0} u}^{2}
\frac{dt}{t} \\
&= \int^{\infty}_{0}\norm{Q_{t}^{0} u}^{2}
\frac{dt}{t} \\
&= \frac{1}{2} \norm{u}^{2},
\end{split}\end{align*}
where the estimate $\norm{\nabla \mathbb{P}_{3} P_{t}^{0} u} \lesssim
\norm{\Pi_{0} P_{t}^{0} u}$ follows from (H8) for the operator
$\Gamma_{0}$. It should be noted that in order to use (H8) we had to
use the fact that $u = \Gamma_{J} v$ for some $v \in D
\br{\Gamma_{J}}$ and therefore
$$
\mathbb{P}_{3} P_{t}^{0}u = P_{t}^{0} \mathbb{P}_{3} \Gamma_{J} v =
P_{t}^{0} \mathbb{P}_{3} \Gamma_{0} v = \Gamma_{0} P_{t}^{0} v \in R \br{\Gamma_{0}}.
$$
Our theorem has thus been reduced to a proof of the following
square function estimate
$$
\int^{\infty}_{0} \norm{\tilde{\gamma}^{J,B}_{t} A_{t}P_{t}^{0} u}^{2}
\frac{dt}{t} \lesssim c_{\alpha}^{J} \cdot \norm{u}^{2}.
$$
On splitting from above using the triangle inequality,
\begin{equation}
\label{eqtn:BeforeCarleson}
\int^{\infty}_{0} \norm{\tilde{\gamma}^{J,B}_{t} A_{t} P_{t}^{0}
u}^{2} \frac{dt}{t} \lesssim \int^{\infty}_{0}
\norm{\tilde{\gamma}^{J,B}_{t} A_{t} \br{P_{t}^{0} - I}u}^{2}
\frac{dt}{t} + \int^{\infty}_{0} \norm{\tilde{\gamma}^{J,B}_{t}
A_{t} u}^{2} \frac{dt}{t}.
\end{equation}
Proposition \ref{prop:OffDiagonal} states that the uniform estimate
$\norm{\tilde{\gamma}_{t}^{J,B} A_{t}} \lesssim 1$ is true for all $t
> 0$.
Furthermore, notice that $A_{t}^{2} = A_{t}$ and $\mathbb{P}_{3}
A_{t} = A_{t} \mathbb{P}_{3}$ for all $t > 0$. These facts combine
together to produce
\begin{align*}\begin{split}
\int^{\infty}_{0} \norm{\tilde{\gamma}^{J,B}_{t} A_{t} \br{P_{t}^{0}
- I}u}^{2} \frac{dt}{t} &= \int^{\infty}_{0} \norm{\gamma^{J,B}_{t}
A_{t} \mathbb{P}_{3} A_{t} \br{P_{t}^{0} - I} u}^{2} \frac{dt}{t}
\\
&\lesssim \int^{\infty}_{0} \norm{\mathbb{P}_{3} A_{t} \br{P_{t}^{0}
- I} u}^{2} \frac{dt}{t}.
\end{split}\end{align*}
According to the argument from Proposition 5.7 of
\cite{axelsson2006quadratic}, this final term can be bounded by
$$
\int^{\infty}_{0} \norm{A_{t} \br{P_{t}^{0} - I}u}^{2}
\frac{dt}{t}\lesssim \norm{u}^{2},
$$
since $\lb \Gamma_{0}, B_{1}, B_{2} \rb$ by hypothesis satisfies (H1)
- (H8).
For the second term in \eqref{eqtn:BeforeCarleson}, apply Carleson's
theorem (\cite[pg.~59]{stein1993harmonic}) to obtain
$$
\int^{\infty}_{0} \norm{\tilde{\gamma}^{J,B}_{t} A_{t} u}^{2}
\frac{dt}{t} \lesssim \norm{\mu}_{\mathcal{C}} \cdot \norm{u}^{2},
$$
where $\mu$ is the measure on $\mathbb{R}^{n+1}$ defined through
$$
d \mu(x,t) := \abs{\tilde{\gamma}^{J,B}_{t}(x)}^{2} \frac{dx \, dt}{t}
$$
for $x \in \mathbb{R}^{n}$ and $t > 0$ and $\norm{\mu}_{\mathcal{C}}$ denotes
its Carleson norm. The proof of our theorem has thus been reduced to showing that the measure
$\mu$ is a Carleson measure with constant smaller than a multiple of $c_{\alpha}^{J}$.
\subsubsection{Carleson Measure Estimate}
\label{subsec:Carleson}
Our goal now is to prove the following Carleson measure
estimate,
\begin{equation}
\label{eqtn:CarlesonMain}
\sup_{Q \in \Delta} \frac{1}{\abs{Q}} \int^{l(Q)}_{0} \int_{Q}
\abs{\tilde{\gamma}^{J,B}_{t}(x)}^{2}
\frac{dx \, dt}{t} \lesssim c_{\alpha}^{J} < \infty.
\end{equation}
Let $\mathcal{L}_{3}$ denote the subspace
\begin{equation}
\label{eqtn:L3}
\mathcal{L}_{3} := \lb \nu \in \mathcal{L} \br{\mathbb{C}^{N}} \setminus \lb 0 \rb
: \nu \mathbb{P}_{3} = \nu \rb.
\end{equation}
By construction, we have $\tilde{\gamma}_{t}^{J,B}(x) \in
\mathcal{L}_{3}$ for any $t > 0$ and $x \in \mathbb{R}^{n}$ since
\begin{align*}\begin{split}
\tilde{\gamma}_{t}^{J,B}(x) \mathbb{P}_{3}w &= \br{\Theta^{J,B}_{t}
\mathbb{P}_{3}} \br{\mathbb{P}_{3}w}(x) \\
&= \br{\Theta^{J,B}_{t} \mathbb{P}_{3}}(w)(x) \\
&= \tilde{\gamma}_{t}^{J,B}(x)(w).
\end{split}\end{align*}
Let $\sigma > 0$ be a constant to be determined at a later time. Let $\mathcal{V}$ be a finite set consisting of $\nu \in \mathcal{L}_{3}$ with
$\abs{\nu} = 1$ such that $\cup_{\nu \in \mathcal{V}} K_{\nu} =
\mathcal{L}_{3} \setminus \lb 0 \rb$, where
$$
K_{\nu} := \lb \nu' \in \mathcal{L}_{3} \setminus \lb 0 \rb :
\abs{\frac{\nu'}{\abs{\nu'}} - \nu} \leq \sigma \rb.
$$
Then, in order to prove our Carleson measure estimate
\eqref{eqtn:CarlesonMain}, it is sufficient to fix $\nu \in
\mathcal{V}$ and prove that
\begin{equation}
\label{eqtn:CarlesonMain2}
\sup_{Q \in \Delta} \frac{1}{\abs{Q}} \int \int_{\substack{(x,t) \in
R_{Q} \\ \tilde{\gamma}^{J,B}_{t}(x) \in K_{\nu}}}
\abs{\tilde{\gamma}^{J,B}_{t}(x)}^{2} \frac{dx \, dt}{t} \lesssim
c_{\alpha}^{J} < \infty,
\end{equation}
where $R_{Q} := Q \times [0,l(Q))$. Recall the John-Nirenberg lemma for
Carleson measures as applied in \cite{axelsson2006quadratic} and \cite{auscher2002solution}.
\begin{lem}[The John-Nirenberg Lemma for Carleson Measures]
\label{lem:JN}
Let $\rho$ be a measure on $\mathbb{R}^{n+1}_{+}$ and $\beta > 0$. Suppose
that for every $Q \in \Delta$ there exists a collection $\lb Q_{k}
\rb_{k} \subset \Delta$ of disjoint subcubes of $Q$ such that $E_{Q}
:= Q \setminus \cup_{k} Q_{k}$ satisfies $\abs{E_{Q}} > \beta \abs{Q}$
and such that
\begin{equation}
\label{eqtn:JN1}
\sup_{Q \in \Delta} \frac{\rho(E^{*}_{Q})}{\abs{Q}} \leq C
\end{equation}
for some $C > 0$, where $E^{*}_{Q} := R_{Q} \setminus \cup_{k} R_{Q_{k}}$. Then
\begin{equation}
\label{eqtn:JN2}
\sup_{Q \in \Delta} \frac{\rho(R_{Q})}{\abs{Q}} \leq \frac{C}{\beta}.
\end{equation}
\end{lem}
\begin{proof}
Fix $Q \in \Delta$ and let $\lb Q_{k_{1}} \rb_{k_{1}}$ be a collection
of subcubes as in the hypotheses of the lemma. Apply the bound \eqref{eqtn:JN1} to the decomposition
$$
\rho(R_{Q}) = \rho \br{E^{*}_{Q}} + \sum_{k_{1}} \rho \br{R_{Q_{k_{1}}}}
$$
to obtain
$$
\rho(R_{Q}) \leq C \abs{Q} + \sum_{k_{1}} \rho \br{R_{Q_{k_{1}}}}.
$$
For each $k_{1}$, let $\lb Q_{k_{1},k_{2}} \rb_{k_{2}}$ be a collection of subcubes of
$Q_{k_{1}}$ that satisfy the hypotheses of the lemma. Decompose
$\rho(R_{Q_{k_{1}}})$ and once again apply \eqref{eqtn:JN1} to obtain
\begin{align*}\begin{split}
\rho(R_{Q}) &\leq C \abs{Q} + \sum_{k_{1}} \br{ \rho(E^{*}_{Q_{k_{1}}}) +
\sum_{k_{2}} \rho(R_{Q_{k_{1},k_{2}}})} \\
&\leq C \abs{Q} + \sum_{k_{1}} C \abs{Q_{k_{1}}} + \sum_{k_{1},k_{2}} \rho(R_{Q_{k_{1},k_{2}}}) \\
&\leq C \abs{Q} + C \abs{Q} \br{1 - \beta} + \sum_{k_{1},k_{2}} \rho(R_{Q_{k_{1},k_{2}}}).
\end{split}\end{align*}
Iterating this process and summing the resulting geometric series
gives \eqref{eqtn:JN2}.
\end{proof}
With this tool at our disposal, the proof of our theorem is reduced to the
following proposition.
\begin{prop}
\label{prop:Carleson}
There exists $\beta > 0$ and $\sigma > 0$ that will satisfy the
following conditions. For every $\nu \in \mathcal{V}$ and $Q \in
\Delta$, there is a collection $\lb Q_{k} \rb_{k} \subset \Delta$ of
disjoint subcubes of $Q$ such that $E_{Q,\nu} = Q \setminus \cup_{k} Q_{k}$
satisfies $\abs{E_{Q,\nu}} > \beta \abs{Q}$ and such that
\begin{equation}
\label{eqtn:Carleson1}
\sup_{Q \in \Delta} \frac{1}{\abs{Q}} \int \int_{\substack{(x,t) \in
E^{*}_{Q,\nu} \\ \tilde{\gamma}^{J,B}_{t}(x) \in K_{\nu}}}
\abs{\tilde{\gamma}^{J,B}_{t}(x)}^{2} \frac{dx \,
dt}{t} \lesssim c_{\alpha}^{J} < \infty,
\end{equation}
where $E^{*}_{Q,\nu} := R_{Q} \setminus \cup_{k} R_{Q_{k}}$. Moreover, $\beta$
and $\sigma$ are entirely independent of the conditions (H8D$\alpha$),
(H8J) and (H8J$\alpha$).
\end{prop}
For now, fix $\nu \in \mathcal{V}$ and $Q \in \Delta$. Let $w^{\nu}$,
$\hat{w}^{\nu} \in \mathbb{C}^{N}$ with $\abs{\hat{w}^{\nu}} =
\abs{w^{\nu}} = 1$ and $\nu^{*} \br{\hat{w}^{\nu}} = w^{\nu}$. To
simplify notation, when superfluous, this dependence will be kept
implicit by defining $w := w^{\nu}$ and $\hat{w} :=
\hat{w}^{\nu}$. Notice that since $\nu$ satisfies $\nu = \nu
\mathbb{P}_{3}$, $w$ must satisfy $\mathbb{P}_{3} w = w$.
For $\epsilon > 0$ the function $f^{w}_{Q,
\epsilon}$ can be defined in an identical manner to
\cite{axelsson2006quadratic}. Specifically, let $\eta_{Q} : \mathbb{R}^{N}
\rightarrow [0,1]$ be a smooth cutoff function equal to $1$ on $2 Q$,
with support in $4 Q$ and with $\norm{\nabla \eta_{Q}}_{\infty} \leq
\frac{1}{l}$, where $l := l(Q)$. Then define $w_{Q} := \eta_{Q} \cdot w$ and
$$
f^{w}_{Q, \epsilon} := w_{Q} - \epsilon l i \Gamma_{J} \br{I + \epsilon l
i \Pi_{J,B}}^{-1} w_{Q} = \br{I + \epsilon l i \Gamma^{*}_{J,B}} \br{I +
\epsilon l i \Pi_{J,B}}^{-1} w_{Q}.
$$
\begin{lem}
\label{lem:LocalTb1}
There exists a constant $C > 0$, independent of (H8D$\alpha$), (H8J)
and (H8J$\alpha$), that
satisfies $\norm{f^{w}_{Q, \epsilon}} \leq C \abs{Q}^{\frac{1}{2}}$
and
\begin{equation}
\label{eqtn:LocalTb1}
\abs{\Xint-_{Q} \mathbb{P}_{3} f^{w}_{Q, \epsilon} - w} \leq C \cdot \epsilon^{\frac{1}{2}},
\end{equation}
for any $\epsilon > 0$.
\end{lem}
\begin{proof}
The first claim follows from
\begin{align*}\begin{split}
\norm{f^{w}_{Q,\epsilon}} &\lesssim \norm{w_{Q}} + \norm{\epsilon l i
\Gamma_{J} \br{I + \epsilon l i \Pi_{J,B}}^{-1} w_{Q}} \\
&\lesssim \abs{Q}^{\frac{1}{2}} + \norm{\epsilon l i \Pi_{J,B} \br{I
+ \epsilon l i \Pi_{J,B}}^{-1}w_{Q}} \\
&\lesssim \abs{Q}^{\frac{1}{2}}.
\end{split}\end{align*}
On recalling that $w$ is zero in the first two components,
\begin{align*}\begin{split}
\abs{\Xint-_{Q} \mathbb{P}_{3} f^{w}_{Q,\epsilon} - w}^{2} &=
\abs{\Xint-_{Q} \mathbb{P}_{3} \epsilon l i \Gamma_{J} \br{I + \epsilon
l i \Pi_{J,B}}^{-1} w_{Q}}^{2} \\
&= \abs{\Xint-_{Q} \epsilon l i \Gamma_{0} \br{I + \epsilon l i \Pi_{J,B}}^{-1}w_{Q}}^{2}.
\end{split}\end{align*}
At this point, apply Lemma 5.6 of \cite{axelsson2006quadratic} to the
operator $\Upsilon = \Gamma_{0}$ to obtain
\begin{align*}\begin{split}
\abs{\Xint-_{Q} \epsilon l i \Gamma_{0} \br{I + \epsilon l i
\Pi_{J,B}}^{-1}w_{Q}}^{2} &\lesssim \frac{\br{\epsilon l}^{2}}{l}
\br{\Xint-_{Q} \abs{\br{I + \epsilon l i \Pi_{J,B}}^{-1}
w_{Q}}^{2}}^{\frac{1}{2}} \\ & \qquad \qquad \cdot \br{\Xint-_{Q} \abs{\Gamma_{0}
\br{I + \epsilon l i \Pi_{J,B}}^{-1} w_{Q}}^{2}}^{\frac{1}{2}} \\
&\lesssim \epsilon \br{\Xint-_{Q} \abs{\epsilon l i \Gamma_{0} \br{I +
\epsilon l i \Pi_{J,B}}^{-1} w_{Q}}^{2}}^{\frac{1}{2}} \\
&\leq \epsilon \br{\Xint-_{Q} \abs{\epsilon l i \Gamma_{J} \br{I +
\epsilon l i \Pi_{J,B}}^{-1} w_{Q}}^{2}}^{\frac{1}{2}} \\
&\lesssim \epsilon,
\end{split}\end{align*}
where the inequality $\norm{\Gamma_{0} v} \leq \norm{\Gamma_{J} v}$
for $v \in D \br{\Gamma_{J}}$ follows trivially from the matrix form
of $\Gamma_{0}$ and $\Gamma_{J}$.
\end{proof}
\begin{lem}
\label{lem:LocalTb2}
There exists a constant $D > 0$, independent of (H8D$\alpha$), (H8J)
and (H8J$\alpha$), such
that
\begin{equation}
\label{eqtn:LocalTb2}
\int \int_{R_{Q}} \abs{\Theta^{J,B}_{t} f^{w}_{Q,\epsilon}(x)}^{2}
\frac{dx \, dt}{t} \leq D \frac{\abs{Q}}{\epsilon^{2}}.
\end{equation}
\end{lem}
\begin{proof}
First observe that
\begin{align*}\begin{split}
\Theta^{J,B}_{t} f^{w}_{Q,\epsilon} &= P_{t}^{J,B} t \Gamma^{*}_{J,B}
\br{I + \epsilon l i \Gamma^{*}_{J,B}} \br{I + \epsilon l i
\Pi_{J,B}}^{-1} w_{Q} \\
&= \frac{t}{\epsilon l} P_{t}^{J,B} \epsilon l \Gamma^{*}_{J,B} \br{I
+ \epsilon l i \Pi_{J,B}}^{-1} w_{Q}.
\end{split}\end{align*}
Therefore
\begin{align*}\begin{split}
\int^{l}_{0} \int_{Q} \abs{\Theta^{J,B}_{t}
f^{w}_{Q,\epsilon}(x)}^{2} \frac{dx \, dt}{t} &= \int^{l}_{0}
\br{\frac{t}{\epsilon l}}^{2} \int_{Q} \abs{P_{t}^{J,B} \epsilon l
\Gamma^{*}_{J,B} \br{I + \epsilon l i \Pi_{J,B}}^{-1} w_{Q}}^{2}
dx \, \frac{dt}{t} \\
&\lesssim \int^{l}_{0} \br{\frac{t}{\epsilon l}}^{2} \norm{\epsilon l
i \Gamma^{*}_{J,B} \br{I + \epsilon l i \Pi_{J,B}}^{-1} w_{Q}}^{2}
\frac{dt}{t} \\
&\lesssim \frac{\abs{Q}}{\br{\epsilon l}^{2}} \int^{l}_{0} t \, dt \\
&\simeq \frac{\abs{Q}}{\epsilon^{2}}.
\end{split}\end{align*}
\end{proof}
From this point forward, with $C$ as in Lemma \ref{lem:LocalTb1}, set
$\epsilon := \frac{1}{4 C^{2}}$ and introduce the notation $f^{w}_{Q} := f^{w}_{Q,
\epsilon}$. With this choice of $\epsilon$ it must be true that
$$
\abs{\Xint-_{Q} \mathbb{P}_{3}f^{w}_{Q} - w} \leq \frac{1}{2}.
$$
That is,
\begin{align*}\begin{split}
1 - 2 \mathrm{Re} \left\langle \Xint-_{Q} \mathbb{P}_{3} f^{w}_{Q}, w \right\rangle &=
\abs{w}^{2} - 2 \mathrm{Re} \left\langle \Xint-_{Q} \mathbb{P}_{3} f^{w}_{Q}, w \right\rangle
\\
&\leq \abs{\Xint-_{Q} \mathbb{P}_{3} f^{w}_{Q} - w}^{2} \\
&\leq \frac{1}{4}.
\end{split}\end{align*}
On rearranging we find that
\begin{equation}
\label{eqtn:RealPart}
\mathrm{Re} \left\langle \Xint-_{Q} \mathbb{P}_{3}f^{w}_{Q}, w \right\rangle \geq \frac{1}{4}.
\end{equation}
In this context, Lemma 5.11 of \cite{axelsson2006quadratic} will take on the below form.
\begin{lem}
\label{lem:LocalTb3}
There exists $\beta$, $c_{1}$, $c_{2} > 0$ and a collection $\lb Q_{k} \rb$ of dyadic cubes of $Q$
such that $\abs{E_{Q,\nu}} > \beta \abs{Q}$ and such that
$$
\mathrm{Re} \left\langle w, \Xint-_{Q'} \mathbb{P}_{3} f^{w}_{Q} \right\rangle
\geq c_{1} \quad and \quad \Xint-_{Q'} \abs{\mathbb{P}_{3}f^{w}_{Q}}
\leq c_{2}
$$
for all dyadic subcubes $Q' \in \Delta$ of $Q$ which satisfy $R_{Q'}
\cap E^{*}_{Q, \nu} \neq \emptyset$. Moreover, $\beta$, $c_{1}$ and
$c_{2}$ are independent of (H8D$\alpha$), (H8J), (H8J$\alpha$), $Q$, $\sigma$ and $\nu$.
\end{lem}
The proof of this statement follows in an identical manner to the
argument in \cite{axelsson2006quadratic}. If we set $\sigma = \frac{c_{1}}{2 c_{2}}$, then the following pointwise estimate can be
deduced.
\begin{lem}
\label{lem:LocalTb4}
If $(x,t) \in E^{*}_{Q, \nu}$ and $\tilde{\gamma}_{t}^{J,B}(x) \in
K_{\nu}$ then
\begin{equation}
\label{eqtn:LocalTb4}
\abs{\tilde{\gamma}_{t}^{J,B}(x) \br{A_{t} f^{w}_{Q}(x)}} \geq \frac{1}{2}
c_{1} \abs{\tilde{\gamma}^{J,B}_{t}(x)}.
\end{equation}
\end{lem}
\begin{proof}
First observe that
\begin{align*}\begin{split}
\abs{\nu \br{A_{t} f^{w}_{Q}(x)}} &\geq \mathrm{Re} \left\langle \hat{w},
\nu \br{A_{t} f^{w}_{Q}(x)} \right\rangle \\
&= \mathrm{Re} \left\langle w , A_{t} f^{w}_{Q}(x) \right\rangle \\
&= \mathrm{Re} \left\langle w, A_{t} \mathbb{P}_{3} f^{w}_{Q}(x) \right\rangle \\
&\geq c_{1}.
\end{split}\end{align*}
Then
\begin{align*}\begin{split}
\abs{\frac{\tilde{\gamma}^{J,B}_{t}(x)}{\abs{\tilde{\gamma}^{J,B}_{t}(x)}}
\br{A_{t} f^{w}_{Q}(x)}} &= \abs{\frac{\tilde{\gamma}^{J,B}_{t}(x)}{\abs{\tilde{\gamma}^{J,B}_{t}(x)}}
\br{A_{t} \mathbb{P}_{3} f^{w}_{Q}(x)}} \\
&\geq \abs{\nu \br{A_{t} f^{w}_{Q}(x)}} -
\abs{\frac{\tilde{\gamma}^{J,B}_{t}(x)}{\abs{\tilde{\gamma}^{J,B}_{t}(x)}}
- \nu} \abs{A_{t} \mathbb{P}_{3} f^{w}_{Q}(x)} \\
&\geq c_{1} - \sigma c_{2} \\
&= \frac{1}{2} c_{1}.
\end{split}\end{align*}
\end{proof}
\textsc{Proof of Proposition \ref{prop:Carleson}.} From the pointwise bound of the previous lemma,
$$
\int \int_{\substack{(x,t) \in E^{*}_{Q,\nu} \\
\tilde{\gamma}_{t}^{J,B}(x) \in K_{\nu}}}
\abs{\tilde{\gamma}_{t}^{J,B}(x)}^{2} \frac{dx \, dt}{t} \lesssim \int \int_{R_{Q}}
\abs{\tilde{\gamma}^{J,B}_{t}(x) A_{t} f^{w}_{Q}(x)}^{2}
\frac{dx \, dt}{t}.
$$
At this stage we can begin to unravel our square function norm,
\begin{align}\begin{split}
\label{eqtn:Final0}
\int \int_{R_{Q}} \abs{\tilde{\gamma}^{J,B}_{t}(x) A_{t}
f^{w}_{Q}(x)}^{2} \frac{dx \, dt}{t} &\lesssim \int \int_{R_{Q}} \abs{\Theta^{J,B}_{t}
f^{w}_{Q}(x) - \tilde{\gamma}^{J,B}_{t}(x)A_{t} f^{w}_{Q}(x)}^{2}
\frac{dx \, dt}{t} \\
& \qquad \qquad + \int \int_{R_{Q}} \abs{\Theta^{J,B}_{t} f^{w}_{Q}(x)}^{2}
\frac{dx \, dt}{t}.
\end{split}\end{align}
Lemma \ref{lem:LocalTb2} states that the final term in the above
estimate will be bounded from above by a multiple of $\abs{Q}$. This reduces the task of
proving the proposition to bounding the first term of the above
splitting.
Recall that $f^{w}_{Q}$ can be expressed in the form
$$
f^{w}_{Q} := w_{Q} - u^{w}_{Q},
$$
where $u^{w}_{Q} \in R \br{\Gamma_{J}}$ is given by
$$
u^{w}_{Q} := \epsilon l i \Gamma_{J} \br{I + \epsilon l i
\Pi_{J,B}}^{-1} w_{Q}.
$$
An application of the triangle inequality then leads to
\begin{align}\begin{split}
\label{eqtn:Final01}
\int \int_{R_{Q}} &\abs{ \Theta^{J,B}_{t} f^{w}_{Q}(x) -
\tilde{\gamma}^{J,B}_{t}(x) A_{t} f^{w}_{Q}(x)}^{2} \frac{dx \,
dt}{t} \\ &\qquad \qquad \lesssim \int \int_{R_{Q}} \abs{\Theta^{J,B}_{t} w_{Q}(x) -
\tilde{\gamma}^{J,B}_{t}(x) A_{t} w_{Q}(x)}^{2} \frac{dx \,
dt}{t} \\ & \qquad \qquad \qquad + \int \int_{R_{Q}} \abs{\Theta^{J,B}_{t} u^{w}_{Q}(x) -
\tilde{\gamma}^{J,B}_{t}(x) A_{t} u^{w}_{Q}(x)}^{2} \frac{dx \, dt}{t}.
\end{split}\end{align}
On noticing that for every $x \in Q$ and $0 < t < l(Q)$
\begin{align*}\begin{split}
\Theta^{J,B}_{t} w_{Q}(x) - \tilde{\gamma}^{J,B}_{t}(x) A_{t}
w_{Q}(x) &= \Theta^{J,B}_{t} w_{Q}(x) - \Theta^{J,B}_{t} \br{A_{t}
w_{Q}(x)}(x) \\
&= \Theta^{J,B}_{t} \br{\br{\eta_{Q} - 1} w}(x),
\end{split}\end{align*}
it is clear that the first term in \eqref{eqtn:Final01} can be handled
in an identical manner as in the proof
of Proposition 5.9 from \cite{axelsson2006quadratic}. Specifically,
since $\br{\mathrm{supp} \br{\eta_{Q} - 1} w} \cap 2 Q = \emptyset$,
the off-diagonal estimates of the operator $\Theta^{J,B}_{t}$ lead to
$$
\int_{Q} \abs{\Theta^{J,B}_{t} \br{\br{\eta_{Q} - 1} w}(x)}^{2} dx
\lesssim \frac{t \abs{Q}}{l},
$$
which implies that
$$
\int \int_{R_{Q}} \abs{\Theta^{J,B}_{t} w_{Q}(x) -
\tilde{\gamma}^{J,B}_{t}(x) A_{t} w_{Q}(x)}^{2} \frac{dx \, dt}{t} \lesssim \abs{Q}.
$$
As for the second term in \eqref{eqtn:Final01},
\begin{align}\begin{split}
\label{eqtn:Final1}
\int \int_{R_{Q}} &\abs{\Theta^{J,B}_{t} u^{w}_{Q} -
\tilde{\gamma}^{J,B}_{t}(x) A_{t} u^{w}_{Q}(x)}^{2} \frac{dx \,
dt}{t} \\ &\qquad \qquad\lesssim \int \int_{R_{Q}} \abs{\Theta^{J,B}_{t} \br{I -
P_{t}^{J}}u^{w}_{Q}(x)}^{2} \frac{dx \, dt}{t} \\
& \qquad \qquad \qquad + \int \int_{R_{Q}} \abs{\Theta^{J,B}_{t} P_{t}^{J} u^{w}_{Q}(x) -
\tilde{\gamma}^{J,B}_{t}(x) A_{t} u^{w}_{Q}(x)}^{2} \frac{dx
\, dt}{t}.
\end{split}\end{align}
Since $u_{Q}^{w} \in R \br{\Gamma_{J}}$, Corollary \ref{cor:HighFrequency} gives
\begin{align*}\begin{split}
\int \int_{R_{Q}} \abs{\Theta^{J,B}_{t} \br{I - P_{t}^{J}}
u^{w}_{Q}}^{2} \frac{dx \, dt}{t} &\lesssim
\norm{u^{w}_{Q}}^{2} \\
&\lesssim \abs{Q}.
\end{split}\end{align*}
For the remaining term in \eqref{eqtn:Final1},
\begin{align}\begin{split}
\label{eqtn:Final2}
\int \int_{R_{Q}} &\abs{\Theta^{J,B}_{t} P_{t}^{J} u^{w}_{Q}(x) -
\tilde{\gamma}^{J,B}_{t}(x) A_{t} u^{w}_{Q}(x)}^{2} \frac{dx \,
dt}{t} \\ & \qquad \qquad \lesssim \int \int_{R_{Q}} \abs{\Theta^{J,B}_{t} \br{I -
\mathbb{P}_{3}} P_{t}^{J} u^{w}_{Q}}^{2}\frac{dx \, dt}{t} \\
& \qquad \qquad \qquad + \int \int_{R_{Q}} \abs{\tilde{\Theta}^{J,B}_{t} P_{t}^{J}
u^{w}_{Q}(x) - \tilde{\gamma}^{J,B}_{t}(x) A_{t} u^{w}_{Q}(x)}^{2}
\frac{dx \, dt}{t}.
\end{split}\end{align}
Since we have already proved the boundedness of the first and second components,
\begin{align*}\begin{split}
\int \int_{R_{Q}} \abs{\Theta^{J,B}_{t} \br{I - \mathbb{P}_{3}}
P_{t}^{J} u^{w}_{Q}}^{2} \frac{dx \, dt}{t}
&\lesssim c_{\alpha}^{J} \cdot \norm{u^{w}_{Q}}^{2} \\
&\lesssim c_{\alpha}^{J} \cdot \abs{Q}.
\end{split}\end{align*}
For the second term in \eqref{eqtn:Final2},
\begin{align}\begin{split}
\label{eqtn:Final3}
\int \int_{R_{Q}} &\abs{\tilde{\Theta}^{J,B}_{t} P_{t}^{J}
u^{w}_{Q}(x) - \tilde{\gamma}^{J,B}_{t}(x) A_{t} u^{w}_{Q}(x)}^{2}
\frac{dx \, dt}{t}
\\ & \qquad \qquad\lesssim \int \int_{R_{Q}} \abs{\tilde{\Theta}^{J,B}_{t} P_{t}^{J} u^{w}_{Q}(x) -
\tilde{\gamma}^{J,B}_{t}(x) A_{t} P_{t}^{J} u^{w}_{Q}(x)}^{2}
\frac{dx \, dt}{t} \\
& \qquad \qquad \qquad + \int \int_{R_{Q}} \abs{\gamma^{J,B}_{t}(x) \mathbb{P}_{3} \br{ A_{t}
P_{t}^{J} - A_{t}} u^{w}_{Q}(x)}^{2} \frac{dx \, dt}{t}.
\end{split}\end{align}
To bound the first term on the right-hand side of the above estimate
notice that
$$
\tilde{\Theta}^{J,B}_{t} P_{t}^{J} u^{w}_{Q}(x) -
\tilde{\gamma}^{J,B}_{t}(x) A_{t} P_{t}^{J} u^{w}_{Q}(x) =
\br{\Theta^{J,B}_{t} - \gamma^{J,B}_{t} A_{t}} \mathbb{P}_{3}
P_{t}^{J} u^{w}_{Q}(x).
$$
Theorem \ref{thm:Diagonalisation} then allows us to diagonalise our
$P_{t}^{J}$ operators in the first term of
\eqref{eqtn:Final3} to get
$$
\int^{l(Q)}_{0} \norm{\br{\Theta^{J,B}_{t} - \gamma^{J,B}_{t}
A_{t}} \mathbb{P}_{3} P_{t}^{J} u^{w}_{Q}}^{2}_{L^{2}\br{Q}} \frac{dt}{t} \lesssim
c_{\alpha}^{J} \abs{Q} + \int^{\infty}_{0} \norm{\br{\Theta^{J,B}_{t}
- \gamma^{J,B}_{t} A_{t}} \mathbb{P}_{3} \mathcal{P}_{t}^{J}
u^{w}_{Q}}^{2} \frac{dt}{t}.
$$
From Proposition
\ref{prop:PrincPart} we know that
\begin{align*}\begin{split}
\int^{\infty}_{0} \norm{\br{\Theta^{J,B}_{t} - \gamma_{t}^{J,B}
A_{t}} \mathbb{P}_{3} \mathcal{P}_{t}^{J} u^{w}_{Q}}^{2}
\frac{dt}{t} &\lesssim \int^{\infty}_{0} \norm{t \nabla
\mathbb{P}_{3} \mathcal{P}_{t}^{J} u^{w}_{Q}}^{2} \frac{dt}{t} \\
&= \int^{\infty}_{0}\norm{t \nabla \mathbb{P}_{3} P_{t}^{0}
u^{w}_{Q}}^{2} \frac{dt}{t} \\
&\lesssim \int^{\infty}_{0} \norm{t \Pi_{0} P_{t}^{0} u^{w}_{Q}}^{2}
\frac{dt}{t} \\
&= \int^{\infty}_{0} \norm{Q_{t}^{0}u^{w}_{Q}}^{2} \frac{dt}{t} \\
&\lesssim \abs{Q},
\end{split}\end{align*}
where in the third line we applied (H8) for the operators $\lb
\Gamma_{0},B_{1}, B_{2} \rb$. It should be noted that in order to use
(H8), we had to use the fact that
\begin{align*}\begin{split}
\mathbb{P}_{3} P_{t}^{0} u^{w}_{Q} &= P_{t}^{0} \mathbb{P}_{3}
u^{w}_{Q} = P_{t}^{0} \mathbb{P}_{3} \epsilon l i \Gamma_{J} (I +
\epsilon l i \Pi_{J,B})^{-1} w_{Q} \\
&= P_{t}^{0} \mathbb{P}_{3} \epsilon l i \Gamma_{0} (I + \epsilon l i
\Pi_{J,B})^{-1} w_{Q} = \epsilon l i \Gamma_{0} P_{t}^{0} (I +
\epsilon l i \Pi_{J,B})^{-1} w_{Q} \in R (\Gamma_{0}).
\end{split}\end{align*}
It remains to bound the second term in \eqref{eqtn:Final3},
$$
\int \int_{R_{Q}} \abs{\gamma^{J,B}_{t}(x) \mathbb{P}_{3} A_{t}
\br{P_{t}^{J} - I} u^{w}_{Q}(x)}^{2} \frac{dx \, dt}{t} = \int
\int_{R_{Q}} \abs{\gamma^{J,B}_{t} A_{t} \mathbb{P}_{3} A_{t}
\br{P_{t}^{J} - I} u^{w}_{Q}(x)}^{2} \frac{dx \, dt}{t}
$$
On noting the uniform $L^{2}$-boundedness of the $\gamma^{J,B}_{t}
A_{t}$ operators and applying the triangle inequality,
\begin{align*}\begin{split}
\int
\int_{R_{Q}} \abs{\gamma^{J,B}_{t} A_{t} \mathbb{P}_{3} A_{t}
\br{P_{t}^{J} - I} u^{w}_{Q}(x)}^{2} \frac{dx \, dt}{t} &\lesssim
\int_{0}^{\infty} \int_{\mathbb{R}^{n}} \abs{\mathbb{P}_{3} A_{t} \br{P_{t}^{J} - I}
u^{w}_{Q}(x)}^{2} \frac{dx \, dt}{t} \\
&\lesssim \int_{0}^{\infty} \int_{\mathbb{R}^{n}} \abs{\mathbb{P}_{3} A_{t} \br{P_{t}^{J} -
\mathcal{P}_{t}^{J}} u^{w}_{Q}(x)}^{2} \frac{dx \, dt}{t} \\
&\qquad + \int_{0}^{\infty} \int_{\mathbb{R}^{n}} \abs{\mathbb{P}_{3} A_{t} \br{\mathcal{P}_{t}^{J} - I}
u^{w}_{Q}(x)}^{2} \frac{dx \, dt}{t}.
\end{split}\end{align*}
Applying Theorem \ref{thm:Diagonalisation} and recalling that $\mathbb{P}_{3}
\mathcal{P}_{t}^{J} = \mathbb{P}_{3} P_{t}^{0}$,
\begin{align*}\begin{split}
\int \int_{R_{Q}} \abs{\gamma^{J,B}_{t} A_{t} \mathbb{P}_{3} A_{t}
\br{P_{t}^{J} - I} u^{w}_{Q}(x)}^{2} \frac{dx \, dt}{t} &\lesssim
c_{\alpha}^{J} \cdot \norm{u}^{2} + \int_{0}^{\infty} \int_{\mathbb{R}^{n}} \abs{\mathbb{P}_{3} A_{t} \br{P_{t}^{0} - I}
u^{w}_{Q}(x)}^{2} \frac{dx \, dt}{t}.
\end{split}\end{align*}
From the proof of Proposition 5.7 of \cite{axelsson2006quadratic} we
know that
$$
\int_{0}^{\infty} \int_{\mathbb{R}^{n}} \abs{A_{t} \br{P_{t}^{0} - I}u^{w}_{Q}(x)}^{2}
\frac{dx \, dt}{t} \lesssim \abs{Q},
$$
allowing us to finally conclude our proof.
\BoldSquare
\section{Applications}
\label{sec:Applications}
Our non-homogeneous framework will now be applied to three
different contexts. We begin with the case that serves as the primary
motivation for this article,
the scalar Kato square root problem with zero-order
potential.
\subsection{Scalar Kato with Zero-Order Potential}
\label{subsec:Scalar}
Theorem \ref{thm:KatoPotential},
the promised result of the introductory section, will now be
proved. Fix $V \in \mathcal{W}_{\alpha}$ for some $\alpha \in (1,2]$.
Brand the definition of the operators $\Gamma_{J}$, $B_{1}$ and $B_{2}$ to be as
follows. Define our Hilbert space to be
$$
\mathcal{H} := L^{2}\br{\mathbb{R}^{n}} \oplus L^{2}\br{\mathbb{R}^{n}}
\oplus L^{2}(\mathbb{R}^{n};\mathbb{C}^{n}),
$$
for some $n \in \mathbb{N}^{*}$. Set $J = \abs{V}^{\frac{1}{2}}$ and $D = \nabla$.
Our operator $\Gamma_{J}$ is
then given by
$$
\Gamma_{J} = \Gamma_{\abs{V}^{\frac{1}{2}}} = \br{\begin{array}{c c}
0 & 0 \\ \nabla_{\abs{V}^{\frac{1}{2}}} & 0
\end{array}} = \br{\begin{array}{c c c}
0 & 0 & 0 \\
\abs{V}^{\frac{1}{2}} & 0 & 0 \\
\nabla & 0 & 0
\end{array}},
$$
defined on the dense domain $H^{1,V}\br{\mathbb{R}^{n}} \oplus L^{2} \br{\mathbb{R}^{n}} \oplus
L^{2}\br{\mathbb{R}^{n};\mathbb{C}^{n}}$, where $H^{1,V} \br{\mathbb{R}^{n}}$ is as defined
in the introductory section.
The density of $H^{1,V}\br{\mathbb{R}^{n}}$ in $L^{2} \br{\mathbb{R}^{n}}$ follows
from $V \in L^{1}_{loc}(\mathbb{R}^{n})$.
Let $A \in L^{\infty} \br{\mathbb{R}^{n}; \mathcal{L}\br{\mathbb{C}^{n}}}$ be a
matrix-valued multiplication operator and suppose that the G{\aa}rding
inequalities \eqref{eqtn:Garding0} and \eqref{eqtn:Garding} are
satisfied with constants $\kappa^{A} > 0$ and $\kappa_{V}^{A} > 0$ respectively.
Define our perturbations $B_{1}$ and $B_{2}$ through
$$
B_{1} = I \qquad and
\qquad B_{2} := \br{\begin{array}{c c}
I & 0 \\ 0 & \hat{A}
\end{array}} := \br{\begin{array}{c c c}
I & 0 & 0 \\
0 & e^{i \cdot \mathrm{arg} V} & 0 \\
0 & 0 & A
\end{array}}.
$$
Our perturbed Dirac-type operator then becomes
$$
\Pi_{B,\abs{V}^{\frac{1}{2}}} := \Gamma_{\abs{V}^{\frac{1}{2}}} +
\Gamma^{*}_{\abs{V}^{\frac{1}{2}}} \br{\begin{array}{c c}
I & 0 \\ 0 & \hat{A}
\end{array}} = \br{\begin{array}{c c}
0 & \nabla_{\abs{V}^{\frac{1}{2}}}^{*} \hat{A} \\
\nabla_{\abs{V}^{\frac{1}{2}}} & 0
\end{array}}.
$$
The square of our perturbed Dirac-type operator is then given by
$$
\Pi_{B,\abs{V}^{\frac{1}{2}}}^{2} = \br{\begin{array}{c c}
\nabla^{*}_{\abs{V}^{\frac{1}{2}}} \hat{A}
\nabla_{\abs{V}^{\frac{1}{2}}}
& 0 \\
0 &
\nabla_{\abs{V}^{\frac{1}{2}}}
\nabla^{*}_{\abs{V}^{\frac{1}{2}}} \hat{A}
\end{array}} = \br{\begin{array}{c c}
- \mathrm{div} (A \nabla) + V & 0 \\
0 & \nabla_{\abs{V}^{\frac{1}{2}}}
\nabla^{*}_{\abs{V}^{\frac{1}{2}}} \hat{A}
\end{array}}.
$$
It is clear from the form of our operator $\Gamma_{0}$ and the fact
that $A$ satisfies \eqref{eqtn:Garding0} that the operators $\lb
\Gamma_{0}, B_{1}, B_{2} \rb$ satisfy (H1) - (H8). Similarly, since
$A$ and $V$ satisfy \eqref{eqtn:Garding}, it follows that $\lb
\Gamma_{J}, B_{1}, B_{2} \rb$ satisfy the properties (H1) - (H6).
\begin{lem}
\label{lem:WAlphaImplies}
For $V \in \mathcal{W}_{\alpha}$, the conditions (H8D$\alpha$) and
(H8J$\alpha$) are both satisfied and
$c_{\alpha}^{J} \lesssim \br{1 + \brs{V}_{\alpha}^{2}}\br{\alpha - 1}^{-1}$.
\end{lem}
\begin{proof}
The condition $V \in \mathcal{W}_{\alpha}$ tells us that
\begin{equation}
\label{eqtn:WAlphaImplies1}
\norm{\abs{V}^{\frac{\alpha}{2}}u}_{2} +
\norm{(-\Delta)^{\frac{\alpha}{2}}u}_{2} \leq \brs{V}_{\alpha}
\norm{\br{\abs{V} - \Delta}^{\frac{\alpha}{2}}u}_{2}
\end{equation}
for all $u \in D(\abs{V} - \Delta)$. Let's first prove that
(H8D$\alpha$) is satisfied. In order to prove this
condition, it is sufficient to show that $D \br{(\abs{V} -
\Delta)^{\frac{\alpha}{2}}} \subset D
\br{(-\Delta)^{\frac{\alpha}{2}}}$ and
\begin{equation}
\label{eqtn:WAlphaImplies2}
\norm{(-\Delta)^{\frac{\alpha}{2}}u}_{2} \leq \brs{V}_{\alpha}
\norm{(\abs{V} - \Delta)^{\frac{\alpha}{2}}u}_{2}
\end{equation}
for all $u \in D \br{(\abs{V} - \Delta)^{\frac{\alpha}{2}}}$. Fix $u
\in D \br{(\abs{V} - \Delta)^{\frac{\alpha}{2}}}$. Since
$D(\abs{V} - \Delta)$ is a core for $(\abs{V} -
\Delta)^{\frac{\alpha}{2}}$, there must exist some $\lb u_{n} \rb_{n
= 1}^{\infty} \subset D(\abs{V} - \Delta)$ with
\begin{equation}
\label{eqtn:WAlphaImplies3}
\norm{u_{n} - u}_{2} + \norm{(\abs{V} - \Delta)^{\frac{\alpha}{2}}(u_{n} - u)}_{2}
\xrightarrow{n \rightarrow \infty} 0.
\end{equation}
We then have for $n, \, m \in \mathbb{N}$,
$$
\norm{u_{n} - u_{m}}_{2} + \norm{(-\Delta)^{\frac{\alpha}{2}}(u_{n} - u_{m})}_{2} \leq
\norm{u_{n} - u_{m}}_{2} + \brs{V}_{\alpha} \norm{(\abs{V} -
\Delta)^{\frac{\alpha}{2}} (u_{n} - u_{m})}_{2}.
$$
This proves that $\lb u_{n} \rb_{n = 1}^{\infty}$ is Cauchy in the
graph norm of $(-\Delta)^{\frac{\alpha}{2}}$. The sequence $\lb u_{n}
\rb_{n = 1}^{\infty}$ must therefore
converge to some $\tilde{u} \in D \br{(-\Delta)^{\frac{\alpha}{2}}}$
in the graph norm of $(-\Delta)^{\frac{\alpha}{2}}$,
$$
\norm{u_{n} - \tilde{u}}_{2} + \norm{(-\Delta)^{\frac{\alpha}{2}} (u_{n} -
\tilde{u})}_{2} \xrightarrow{n \rightarrow \infty} 0.
$$
This combined with \eqref{eqtn:WAlphaImplies3} shows that $u =
\tilde{u}$ and therefore $u \in
D\br{(-\Delta)^{\frac{\alpha}{2}}}$. Moreover, we have that
\begin{align*}\begin{split}
\norm{(-\Delta)^{\frac{\alpha}{2}}u}_{2} &= \lim_{n \rightarrow \infty}
\norm{(-\Delta)^{\frac{\alpha}{2}}u_{n}}_{2} \\
&\leq \brs{V}_{\alpha} \lim_{n \rightarrow \infty} \norm{(\abs{V} -
\Delta)^{\frac{\alpha}{2}}u_{n}}_{2} \\
&= \brs{V}_{\alpha} \norm{(\abs{V} - \Delta)^{\frac{\alpha}{2}}u}_{2},
\end{split}\end{align*}
completing the proof of (H8D$\alpha$) with
$b^{D}_{\alpha} \lesssim \brs{V}_{\alpha}$. An identical proof can be
used to obtain the condition (H8J$\alpha$) with
constant $b^{J}_{\alpha} \lesssim \brs{V}_{\alpha}$.
\end{proof}
Combining the above Lemma with Corollary \ref{cor:LBJCalc} completes the proof of Theorem \ref{thm:KatoPotential}.
\subsection{The Class $\mathcal{W}$}
\label{subsec:W}
Define the potential class
$$\label{not:W}
\mathcal{W} := \bigcup_{\alpha \in (1,2]} \mathcal{W}_{\alpha}.
$$
It has so far been proved that the Kato estimate holds for any potential
in the class $\mathcal{W}$.
At this stage, however,
the unperturbed condition $V \in \mathcal{W}$ remains in quite an
abstract form. It will therefore be instructive to unpack this
condition and compare $\mathcal{W}$ with other commonly used classes
of potentials. It is first interesting to note that $\mathcal{W}_{1}$
is the collection of all potentials with no additional restrictions.
\begin{prop}
\label{prop:W1}
For any locally integrable $V: \mathbb{R}^{n} \rightarrow \mathbb{C}$ we have $\brs{V}_{1} \leq
2$. That is, $\mathcal{W}_{1} = L^{1}_{loc}(\mathbb{R}^{n})$.
\end{prop}
\begin{proof}
We have
\begin{align*}\begin{split}
\norm{(-\Delta)^{\frac{1}{2}}u}_{2}^{2} &= \langle
(-\Delta)^{\frac{1}{2}}u, (-\Delta)^{\frac{1}{2}} u \rangle_{2} = \langle
(-\Delta)u, u \rangle_{2} \\
&\leq \langle - \Delta u, u \rangle_{2} + \langle \abs{V} u, u \rangle_{2} \\
&= \langle (\abs{V} - \Delta)^{\frac{1}{2}}u, (\abs{V} -
\Delta)^{\frac{1}{2}}u \rangle_{2} = \norm{(\abs{V} - \Delta)^{\frac{1}{2}}u}_{2}^{2}
\end{split}\end{align*}
for all $u \in D(\abs{V} - \Delta)$. The second estimate follows
in a similar manner from the non-negativity of $(-\Delta)$.
\end{proof}
Using this as an endpoint, it can then be proved using an interpolation style argument that the
potential classes $\lb \mathcal{W}_{\alpha} \rb_{\alpha \in [1,2]}$
form a decreasing collection.
\begin{prop}
\label{lem:Independence}
Suppose that the potential $V : \mathbb{R}^{n} \rightarrow \mathbb{C}$ is in
$\mathcal{W}_{\alpha}$ for some $\alpha \in (1,2]$. Then $V \in \mathcal{W}_{\beta}$ for any $\beta \in [1,\alpha]$ with
\begin{equation}
\label{eqtn:Independence}
\brs{V}_{\beta} \leq 2 \brs{V}_{\alpha}^{\frac{\beta - 1}{\alpha - 1}}.
\end{equation}
Therefore for $\beta \leq \alpha$
$$
\mathcal{W}_{\beta} \supset \mathcal{W}_{\alpha}.
$$
\end{prop}
\begin{proof}
Assume that $V \in \mathcal{W}_{\alpha}$ for some $\alpha \in (1,2]$. It
will be shown through the Hadamard three-lines theorem that $V \in \mathcal{W}_{\beta}$ for
$\beta \in [1,\alpha]$ with the constant given in
\eqref{eqtn:Independence}.
Let $St := \lb z \in \mathbb{C} : 0 < \mathrm{Re} \, z < 1 \rb$ and set
$\theta := \frac{\beta - 1}{\alpha - 1}$. Fix $u \in D(\abs{V} - \Delta)$ and define $f : \overline{St} \rightarrow \mathbb{R}$ to be the function given by
$$
f(z) := \norm{\abs{V}^{\frac{1}{2} + \br{\frac{\alpha - 1}{2}} z}
\br{\abs{V} - \Delta}^{\br{\frac{\alpha - 1}{2}} \br{\theta - z}}u}_{2}.
$$
$f$ is holomorphic on $St$ and continuous on the closed strip
$\overline{St}$. In order to apply the three-lines theorem, it must
first be proved that $f$ is bounded on
$\overline{St}$. For $z = s + it$ where $0 \leq s \leq 1$ and $t \in
\mathbb{R}$ we have
\begin{align}\begin{split}
\label{eqtn:Hadamard1}
f(z) &= \norm{\abs{V}^{\frac{1}{2} + \br{\frac{\alpha - 1}{2}}
\br{s+ i t}}
\br{\abs{V} - \Delta}^{\frac{\beta - 1}{2} - \br{\frac{\alpha -
1}{2}}(s + i t)}u}_{2} \\
&= \norm{\abs{V}^{\frac{1}{2} + \br{\frac{\alpha - 1}{2}}s}
\br{\abs{V} - \Delta}^{-\frac{1}{2} - \br{\frac{\alpha - 1}{2}}s}
\br{\abs{V} - \Delta}^{-\br{\frac{\alpha - 1}{2}} i t} \br{\abs{V} -
\Delta}^{\frac{\beta}{2}}u}_{2}.
\end{split}\end{align}
The function
$$
s \mapsto \norm{\abs{V}^{\frac{1}{2} + \br{\frac{\alpha - 1}{2}}s}
\br{\abs{V} - \Delta}^{-\frac{1}{2} - \br{\frac{\alpha - 1}{2}}s}v}_{2}
$$
is continuous on $[0,1]$ for $v \in L^{2}\br{\mathbb{R}^{n}}$. This, together with \eqref{eqtn:Hadamard1}
then gives
\begin{align*}\begin{split}
f(z) &\lesssim \norm{\br{\abs{V} - \Delta}^{-\br{\frac{\alpha -
1}{2}}i t} \br{\abs{V} - \Delta}^{\frac{\beta}{2}}u}_{2} \\
&\leq \norm{\br{\abs{V} - \Delta}^{\frac{\beta}{2}}u}_{2},
\end{split}\end{align*}
where we used the fact that $\br{\abs{V} - \Delta}^{i a}$ is a
contraction operator on $L^{2}\br{\mathbb{R}^{n}}$ for $a \in \mathbb{R}$. This
demonstrates that $f$ is bounded on $\overline{St}$.
For $t \in \mathbb{R}$, Proposition \ref{prop:W1} implies that
\begin{align*}\begin{split}
f(it) &= \norm{\abs{V}^{\frac{1}{2} + \br{\frac{\alpha - 1}{2}} i t}
\br{\abs{V} - \Delta}^{\br{\frac{\alpha - 1}{2}} \br{\theta - i
t}}u}_{2} \\
&= \norm{\abs{V}^{\frac{1}{2}} \br{\abs{V} -
\Delta}^{\br{\frac{\alpha - 1}{2}} \br{\theta - i t}} u}_{2} \\
&\leq \norm{\br{\abs{V} - \Delta}^{\frac{1}{2}} \br{\abs{V} -
\Delta}^{\br{\frac{\alpha - 1}{2}} \br{\theta - i t}}u}_{2} \\
&\leq \norm{\br{\abs{V} - \Delta}^{\frac{\beta}{2}}u}_{2}.
\end{split}\end{align*}
We also have
\begin{align*}\begin{split}
f(1 + i t) &= \norm{\abs{V}^{\frac{\alpha}{2} + \br{\frac{\alpha -
1}{2}} i t} \br{\abs{V} - \Delta}^{\br{\frac{\alpha - 1}{2}}
\br{\theta - 1 - i t}}u}_{2} \\
&= \norm{\abs{V}^{\frac{\alpha}{2}} \br{\abs{V} -
\Delta}^{\br{\frac{\alpha - 1}{2}} \br{\theta - 1 - i t}}u}_{2} \\
&\leq \brs{V}_{\alpha} \norm{\br{\abs{V} - \Delta}^{\frac{\alpha}{2}}
\br{\abs{V} - \Delta}^{\br{\frac{\alpha - 1}{2}} \br{\theta - 1 - i
t}}u}_{2} \\
&\leq \brs{V}_{\alpha} \norm{\br{\abs{V} - \Delta}^{\frac{\alpha}{2} +
\br{\frac{\alpha - 1}{2}} \br{\theta - 1}}u}_{2} \\
&= \brs{V}_{\alpha} \norm{\br{\abs{V} - \Delta}^{\frac{\beta}{2}}u}_{2}.
\end{split}\end{align*}
The Hadamard three-lines theorem then gives the bound
\begin{equation}
\label{eqtn:Hadamard2}
f(\theta) = \norm{\abs{V}^{\frac{\beta}{2}}u}_{2} \leq
\brs{V}_{\alpha}^{\br{\frac{\beta - 1}{\alpha - 1}}} \norm{\br{\abs{V}
- \Delta}^{\frac{\beta}{2}}u}_{2}.
\end{equation}
A similar argument can be applied to obtain the bound
\begin{equation}
\label{eqtn:Hadamard3}
\norm{\br{- \Delta}^{\frac{\beta}{2}}u}_{2} \leq
\brs{V}_{\alpha}^{\br{\frac{\beta - 1}{\alpha - 1}}} \norm{\br{\abs{V}
- \Delta}^{\frac{\beta}{2}}u}_{2}
\end{equation}
for all $u \in D(\abs{V} - \Delta)$, one must simply remember that
the imaginary powers of the positive self-adjoint operator
$\br{-\Delta}$ are contraction operators on
$L^{2}\br{\mathbb{R}^{n}}$. Combining \eqref{eqtn:Hadamard2} and
\eqref{eqtn:Hadamard3} then gives $\brs{V}_{\beta} \leq 2
\brs{V}_{\alpha}^{\frac{\beta - 1}{\alpha - 1}}$.
\end{proof}
Recall that a non-negative potential $V \in L^{q}_{loc}(\mathbb{R}^{n})$ for index
$1 < q < \infty$ is said to
belong to the reverse H\"{o}lder class $RH_{q}$ if there exists $C > 0$ such that
$$
\br{\frac{1}{\abs{B}} \int_{B} V^{q} \, dx}^{\frac{1}{q}} \leq C
\br{\frac{1}{\abs{B}} \int_{B} V \, dx}
$$
for every ball $B \subset \mathbb{R}^{n}$. For $V \in RH_{q}$, the condition $\brs{V}_{\alpha} < \infty$ for
$\alpha = 2$ was proved in \cite{shen1995lp} for $q \geq \frac{n}{2}$ and $n \geq 3$. This
was later improved to $q
\geq 2$ and arbitrary dimension in \cite{auscher2007maximal}. Note
that in {\cite[Thm.~1.1]{auscher2007maximal}} the estimate $\norm{\Delta u}_{2} +
\norm{\abs{V} u}_{2} \lesssim \norm{(\abs{V} -\Delta)u}_{2}$ was
proved for $u \in C^{\infty}_{0}(\mathbb{R}^{n})$. However, this estimate can
then be extended to all of $D(\abs{V} - \Delta)$ since $\abs{V} \in
L^{2}_{loc}(\mathbb{R}^{n})$ implies that $C^{\infty}_{0}(\mathbb{R}^{n})$ is a core
for $(\abs{V} - \Delta)$ (c.f. \cite{semenov1977schrodinger}).
\begin{thm}[\cite{shen1995lp}, \cite{auscher2007maximal}]
\label{thm:Shen2}
Let $V \in L^{1}_{loc}(\mathbb{R}^{n})$ and suppose that $\abs{V} \in
RH_{q}$ for some $q \geq 2$. Then $V \in \mathcal{W}_{2} \subset \mathcal{W}$.
\end{thm}
Notice that since the absolute value of any polynomial is contained in
$RH_{q}$ for any $q \in (1,\infty)$ we automatically obtain the following corollary.
\begin{cor}
\label{cor:KatoPoly}
For any polynomial $P$, we have $P \in \mathcal{W}_{2}$. As a result, the Kato estimate holds for any polynomial with range
contained in $S_{\mu+}$ for some $\mu \in [0,\frac{\pi}{2})$.
\end{cor}
The ensuing proposition demonstrates that
the inclusion of the reverse H\"{o}lder potentials in
$\mathcal{W}_{2}$ is strict, at least in dimension $n > 4$.
\begin{prop}
\label{prop:Ln2}
For $n > 4$,
$$
L^{\frac{n}{2}} \br{\mathbb{R}^{n}} \subset \mathcal{W}_{2}.
$$
\end{prop}
\begin{proof}
Fix $V \in L^{\frac{n}{2}}\br{\mathbb{R}^{n}}$. For $\varepsilon > 0$, the
resolvent $(\abs{V} + \varepsilon - \Delta)^{-1}$ is well-defined as a bounded
operator on $L^{2}(\mathbb{R}^{n})$. H\"{o}lder's inequality followed by
the fact that $(\abs{V} + \varepsilon - \Delta)^{-1}u \leq
(-\Delta + \varepsilon)^{-1}u$ for any $u \in L^{2}$ and then
finally the uniform boundedness of $(-\Delta + \varepsilon)^{-1}$
from $L^{\frac{2 n}{n - 4}}$ to $L^{2}$ (c.f. {\cite[Sec.~6.1.2]{grafakos2009modern}}) produces
\begin{align*}\begin{split}
&\norm{(\abs{V}+ \varepsilon) \br{\abs{V} + \varepsilon -
\Delta}^{-1} u}_{L^{2}(B(0,N))} \\ & \qquad \qquad \qquad \leq
\norm{\abs{V} + \varepsilon}_{L^{\frac{n}{2}}(B(0,N))} \cdot \norm{\br{\abs{V} + \varepsilon -
\Delta}^{-1}u}_{L^{\frac{2n}{n - 4}}(B(0,N))} \\
& \qquad \qquad \qquad \leq \norm{\abs{V} + \varepsilon}_{L^{\frac{n}{2}}(B(0,N))} \cdot
\norm{\br{-\Delta + \varepsilon}^{-1}u}_{L^{\frac{2n}{n - 4}}(B(0,N))} \\
& \qquad \qquad \qquad \lesssim \norm{\abs{V} + \varepsilon}_{L^{\frac{n}{2}}(B(0,N))} \norm{u}_{2}
\end{split}\end{align*}
for any $N > 0$, where $B(0,N) \subset \mathbb{R}^{n}$ is the open ball of radius $N$ centered
at the origin. Therefore,
\begin{align*}\begin{split}
\norm{(\abs{V} + \varepsilon)u}_{L^{2}(B(0,N))} &\lesssim \norm{\abs{V} +
\varepsilon}_{L^{\frac{n}{2}}(B(0,N))} \cdot \norm{(\abs{V} + \varepsilon -
\Delta)u}_{2} \\ &\leq \norm{\abs{V} + \varepsilon}_{L^{\frac{n}{2}}(B(0,N))}
\cdot \br{\norm{(\abs{V} -\Delta)u}_{2} + \varepsilon \norm{u}_{2}}
\end{split}\end{align*}
for all $u \in D(\abs{V} + \varepsilon - \Delta) = D(\abs{V} -
\Delta)$. Letting $\varepsilon \rightarrow 0$ followed by $N
\rightarrow \infty$ gives the estimate
$$
\norm{\abs{V} u}_{2} \lesssim \norm{V}_{\frac{n}{2}} \cdot \norm{(\abs{V} - \Delta )u}_{2}
$$
for all $u \in D(\abs{V} - \Delta)$. The triangle inequality then
yields the bound $\norm{(-\Delta)u}_{2} \lesssim \norm{(\abs{V} -
\Delta)u}_{2}$ and thus $V \in \mathcal{W}_{2}$.
\end{proof}
The above statements demonstrate clearly that the class of potentials
$\mathcal{W}_{2}$ is quite large. In light of Proposition
\ref{lem:Independence}, however, it is also evident that $\mathcal{W}_{2}$ is the smallest
class out of the collection $\lb \mathcal{W}_{\alpha}
\rb_{\alpha \in (1,2]}$. One can then neatly surmise that
$\mathcal{W}$, the class of all potentials for which the Kato
estimate holds, is certain to be quite large itself.
\subsection{Systems with Zero-Order Potential}
\label{subsec:Systems}
Fix $m \in \mathbb{N}^{*}$ and $A \in L^{\infty}\br{\mathbb{R}^{n}; \mathcal{L}
\br{\mathbb{C}^{n} \otimes \mathbb{C}^{m}}}$.
Let $V : \mathbb{R}^{n} \rightarrow
\mathcal{L} \br{\mathbb{C}^{m}}$ be a measurable matrix-valued function
contained in $L^{1}_{loc}(\mathbb{R}^{n};\mathcal{L}(\mathbb{C}^{m}))$ with $V(x)$
normal for almost every $x \in \mathbb{R}^{n}$. $V$ can
be viewed as a densely defined closed multiplication operator on
$L^{2} \br{\mathbb{R}^{n};\mathbb{C}^{m}}$ with domain
$$
D \br{V} = \lb u \in L^{2} \br{\mathbb{R}^{n};\mathbb{C}^{m}} : V \cdot u \in L^{2} \br{\mathbb{R}^{n};\mathbb{C}^{m}} \rb.
$$
Similar to the scalar case, one can define forms $\mathfrak{l}^{A}$
and $\mathfrak{l}_{V}^{A}$ respectively through
$$
\mathfrak{l}^{A}\brs{u,v} := \int_{\mathbb{R}^{n}} \langle A(x) \nabla u(x),
\nabla v(x) \rangle_{\mathbb{C}^{n} \otimes \mathbb{C}^{m}} \, dx
$$
for $u, \, v \in H^{1}\br{\mathbb{R}^{n};\mathbb{C}^{m}}$ and
$$
\mathfrak{l}_{V}^{A} \brs{u',v'} := \mathfrak{l}^{A} \brs{u',v'} +
\int_{\mathbb{R}^{n}} \langle V(x) u'(x), v'(x) \rangle_{\mathbb{C}^{m}} \, dx
$$
for $u'$ and $v'$ contained in
$$
H^{1,V} \br{\mathbb{R}^{n};\mathbb{C}^{m}} := H^{1} \br{\mathbb{R}^{n};\mathbb{C}^{m}} \cap D(\abs{V}^{\frac{1}{2}}),
$$
where $\abs{V(x)} := \sqrt{V(x)^{*}V(x)}$ for $x \in
\mathbb{R}^{n}$. The density of $H^{1,V}(\mathbb{R}^{n};\mathbb{C}^{m})$ in
$L^{2}(\mathbb{R}^{n};\mathbb{C}^{m})$ follows from the fact that
$C^{\infty}_{0}(\mathbb{R}^{n};\mathbb{C}^{m}) \subset H^{1,V}(\mathbb{R}^{n};\mathbb{C}^{m})$.
Assume that the forms $\mathfrak{l}^{A}$ and
$\mathfrak{l}_{V}^{A}$ satisfy the system equivalents of the G{\aa}rding inequalities \eqref{eqtn:Garding0} and
\eqref{eqtn:Garding} with constants $\kappa^{A} > 0$ and
$\kappa_{V}^{A} > 0$
respectively. Then $\mathfrak{l}^{A}$ and $\mathfrak{l}_{V}^{A}$ will
both have a unique associated maximal accretive operator, $L$
and $L + V$.
In the article \cite{auscher2001kato}, the Kato square
root property $\norm{\sqrt{L} u}_{2} \simeq \norm{\nabla u}_{2}$ was proved
for elliptic systems without potential. Using the non-homogeneous
machinery that we have developed, the corresponding property can be
proved for the operator with potential. Define $\brs{V}_{\alpha}$ and
$\mathcal{W}_{\alpha}(\mathbb{R}^{n};\mathcal{L}(\mathbb{C}^{m}))$ for $\alpha \in [1,2]$ to be the system
analogues of the corresponding scalar objects. In the below theorem, our non-homogeneous
framework will be applied to determine the domain of $\sqrt{L + V}$ for normal potentials.
\begin{thm}
\label{thm:Systems}
Suppose that $V \in \mathcal{W}_{\alpha}(\mathbb{R}^{n};
\mathcal{L}(\mathbb{C}^{m}))$ for some $\alpha \in (1,2]$ and that $V(x)$ is a normal matrix for almost every $x \in
\mathbb{R}^{n}$. Suppose that the system equivalents of the G{\aa}rding inequalities \eqref{eqtn:Garding0} and
\eqref{eqtn:Garding} are both satisfied with constants $\kappa^{A}$
and $\kappa_{V}^{A}$ respectively.
Then there must exist some $C_{V} >
0$ such that
$$
C_{V}^{-1} \br{\norm{\abs{V}^{\frac{1}{2}} u}_{2} +
\norm{\nabla u}_{2}} \leq \norm{\sqrt{L + V} u}_{2} \leq C_{V} \br{\norm{\abs{V}^{\frac{1}{2}} u}_{2} + \norm{\nabla u}_{2}}
$$
for all $u \in H^{1,V}\br{\mathbb{R}^{n};\mathbb{C}^{m}}$. Moreover, the constant
$C_{V}$ depends on $V$ and $\alpha$ through
$$
C_{V} = \tilde{C}_{V} (\alpha - 1)^{-1} (1 + \brs{V}_{\alpha}^{2}),
$$
where $\tilde{C}_{V}$ only depends on $V$ through $\kappa_{A}^{V}$ and
is independent of $\alpha$.
\end{thm}
\begin{proof}
The polar decomposition theorem asserts the existence of some $U :
\mathbb{R}^{n} \rightarrow \mathcal{L}(\mathbb{C}^{m})$, with $U(x)$ unitary for all
$x \in \mathbb{R}^{n}$, such that
$$
V(x) = U(x) \abs{V(x)}
$$
for all $x \in \mathbb{R}^{n}$. As
$V(x)$ is normal, the matrices $U(x)$ and $\abs{V(x)}$ are well-known
to commute. We therefore have the decomposition
\begin{equation}
\label{eqtn:PolarDecomp}
V(x) = \abs{V(x)}^{\frac{1}{2}} U(x) \abs{V(x)}^{\frac{1}{2}}
\end{equation}
for almost every $x \in \mathbb{R}^{n}$.
Set
$$
D := \nabla : H^{1}\br{\mathbb{R}^{n};\mathbb{C}^{m}} \subset L^{2} \br{\mathbb{R}^{n} ; \mathbb{C}^{m}} \rightarrow
L^{2}\br{\mathbb{R}^{n};\mathbb{C}^{n} \otimes \mathbb{C}^{m}}
$$
and
$$
J := \abs{V}^{\frac{1}{2}} : D(\abs{V}^{\frac{1}{2}}) \subset
L^{2}\br{\mathbb{R}^{n};\mathbb{C}^{m}} \rightarrow L^{2} \br{\mathbb{R}^{n};\mathbb{C}^{m}},
$$
both defined as operators on $L^{2} \br{\mathbb{R}^{n};\mathbb{C}^{m}}$. Define the
perturbation matrices
$$
B_{1} := I \qquad and \qquad B_{2} = \br{\begin{array}{c c c}
I & 0 & 0 \\
0 & U & 0 \\
0 & 0 & A
\end{array}}.
$$
It is not too difficult to see that the operators $\lb \Gamma_{0},
B_{1}, B_{2} \rb$ will satisfy conditions (H1) - (H8) and $\lb
\Gamma_{J}, B_{1}, B_{2} \rb$ will satisfy (H1) - (H6). Indeed, the only
non-trivial condition for both sets of operators is (H2) and this
follows from the respective G{\aa}rding inequalities
\eqref{eqtn:Garding0} and \eqref{eqtn:Garding}. It is also clear from
the fact that $V \in \mathcal{W}_{\alpha}(\mathbb{R}^{n}; \mathcal{L}(\mathbb{C}^{m}))$ that (H8D$\alpha$) and
(H8J$\alpha$) will both be satisfied. This follows from an argument
identical to that of Lemma \ref{lem:WAlphaImplies}. The Kato estimate then follows
from Corollary \ref{cor:LBJCalc} with constant $\tilde{C}_{V} (\alpha
- 1)^{-1}(1 + \brs{V}_{\alpha}^{2})$. It should be noted that
\eqref{eqtn:PolarDecomp} was needed so that we would have $L^{J}_{B} =
L + V$.
\end{proof}
In analogy to the scalar case, it is quite likely that a similar reverse
H\"{o}lder type condition will be sufficient to imply the boundedness
of the operator $\abs{V} \br{\abs{V} - \Delta}^{-1}$ for $m > 1$. However,
as far as the author is aware, this is still an open problem for $m > 1$. What is
apparent is that the condition that the potential belongs to
$L^{\frac{n}{2}}$ will once again be sufficient to imply that it
belongs to $\mathcal{W}_{2}$. The following proposition has an
identical proof to that of Proposition \ref{prop:Ln2}.
\begin{prop}
\label{prop:SystemsLn2}
For $n > 4$ and $m \in \mathbb{N}^{*}$,
$$
L^{\frac{n}{2}}\br{\mathbb{R}^{n} ; \mathcal{L}\br{\mathbb{C}^{m}}} \subset
\mathcal{W}_{2}(\mathbb{R}^{n}; \mathcal{L}(\mathbb{C}^{m})).
$$
\end{prop}
\subsection{First Order Potentials}
\label{subsec:FirstOrder}
Let $b : \mathbb{R}^{n} \rightarrow \mathbb{C}^{n}$ be contained in
$L^{1}_{loc}\br{\mathbb{R}^{n};\mathbb{C}^{n}}$ and $A \in L^{\infty}
\br{\mathbb{R}^{n};\mathcal{L}(\mathbb{C}^{n})}$. Suppose that $A$ satisfies the standard G{\aa}rding inequality
$$
\mathrm{Re} \int_{\mathbb{R}^{n}} \langle A(x) \nabla u(x), \nabla u(x)
\rangle_{\mathbb{C}^{n}} \, dx \geq \kappa^{A} \cdot \norm{\nabla u}_{2}^{2}
$$
for all $u \in H^{1}\br{\mathbb{R}^{n};\mathbb{C}}$, for some $\kappa^{A} > 0$. Consider the accretive sesquilinear form
$$
\mathfrak{h}^{A}_{b}[u,v] := \langle A \nabla u, \nabla v \rangle_{2}
+ \langle (\nabla + b) u, (\nabla + b)v \rangle_{2}
$$
defined on the dense subspace $H^{1}_{b}(\mathbb{R}^{n}) := \lb u \in
H^{1}(\mathbb{R}^{n}) : \abs{b u} \in L^{2}(\mathbb{R}^{n}) \rb
\subset L^{2}(\mathbb{R}^{n})$.
\begin{lem}
The form $\mathfrak{h}^{A}_{b}$ is both continuous and closed.
\end{lem}
\begin{proof}
Let's first prove that $\mathfrak{h}^{A}_{b}$ is continuous. It must
be shown that $\abs{\mathfrak{h}^{A}_{b}[u,v]} \lesssim
\norm{u}_{\mathfrak{h}^{A}_{b}} \norm{v}_{\mathfrak{h}^{A}_{b}}$ for
all $u, \, v \in H^{1}_{b}$,
where $\norm{u}_{\mathfrak{h}^{A}_{b}} := \sqrt{\mathrm{Re} \br{
\mathfrak{h}^{A}_{b}[u,u]} + \norm{u}_{2}^{2}}$. On consecutively
applying the boundedness of $A$ and then the accretivity of $A$,
\begin{align*}\begin{split}
\abs{\mathfrak{h}^{A}_{b}[u,v]} &= \abs{\langle A \nabla u, \nabla v
\rangle_{2} + \langle (\nabla + b)u, (\nabla + b) v \rangle_{2}} \\
&\lesssim \norm{\nabla u}_{2} \norm{\nabla v}_{2} + \norm{(\nabla +
b)u}_{2} \norm{(\nabla + b) v}_{2} \\
&\lesssim \sqrt{\mathrm{Re} \, \langle A \nabla u, \nabla u
\rangle_{2}} \sqrt{\mathrm{Re} \, \langle A \nabla v, \nabla v
\rangle_{2}} + \norm{(\nabla + b)u}_{2} \norm{(\nabla + b)v}_{2} \\
&\lesssim \norm{u}_{\mathfrak{h}^{A}_{b}} \norm{v}_{\mathfrak{h}^{A}_{b}}.
\end{split}\end{align*}
Let's now prove that $\mathfrak{h}^{A}_{b}$ is closed. That is, it
must be proved that $H^{1}_{b}(\mathbb{R}^{n})$ is complete under the norm
$\norm{\cdot}_{\mathfrak{h}^{A}_{b}}$. From the boundedness of $A$,
\begin{align*}\begin{split}
\norm{u}_{\mathfrak{h}^{A}_{b}} &= \sqrt{\mathrm{Re}
\br{\mathfrak{h}^{A}_{b}[u,u]} + \norm{u}_{2}^{2}} \\
&= \sqrt{\mathrm{Re} \br{\langle A \nabla u, \nabla u \rangle_{2}} +
\norm{(\nabla + b)u}_{2}^{2} + \norm{u}_{2}^{2}} \\
&\lesssim \sqrt{\norm{\nabla u}^{2}_{2} + \norm{b u}^{2}_{2} +
\norm{u}_{2}^{2}} \\
&\lesssim \norm{u}_{H^{1}_{b}},
\end{split}\end{align*}
where $\norm{u}_{H^{1}_{b}} := \norm{u}_{2} + \norm{\nabla u}_{2} +
\norm{b u}_{2}$. Conversely, the G{\aa}rding inequality for $A$ implies
that
\begin{align*}\begin{split}
\norm{u}_{\mathfrak{h}^{A}_{b}} &= \sqrt{\mathrm{Re} \br{\langle A
\nabla u, \nabla u \rangle_{2}} + \norm{(\nabla + b)u}_{2}^{2} +
\norm{u}_{2}^{2}} \\
&\gtrsim \norm{\nabla u}_{2} + \norm{(\nabla + b) u}_{2} + \norm{u}_{2} \\
&\geq \norm{\nabla u}_{2} + \frac{1}{2}\norm{(\nabla + b)u}_{2} +
\norm{u}_{2} \\
&\geq \norm{\nabla u}_{2} + \frac{1}{2}\norm{b u}_{2} - \frac{1}{2}
\norm{\nabla u}_{2} + \norm{u}_{2} \\
&\simeq \norm{u}_{H^{1}_{b}}.
\end{split}\end{align*}
This shows that the norm $\norm{\cdot}_{\mathfrak{h}^{A}_{b}}$ is
equivalent to $\norm{\cdot}_{H^{1}_{b}}$ on $H^{1}_{b}$. Since
$H^{1}_{b}$ is known to be complete under the norm $\norm{\cdot}_{H^{1}_{b}}$ it then follows that it must
also be complete under $\norm{\cdot}_{\mathfrak{h}^{A}_{b}}$.
\end{proof}
The previous lemma implies, in particular, that there exists a maximal
accretive operator $\mathcal{L}^{A}_{b}$ associated with this form
(c.f. {\cite[Sec.~1.2]{ouhabaz2005analysis}}). This operator will be denoted by
$$
\mathcal{L}^{A}_{b} = (\nabla + b)^{*} (\nabla + b) - \mathrm{div}(A \nabla).
$$
Define the Hilbert space to be
$$
\mathcal{H} := L^{2} \br{\mathbb{R}^{n}} \oplus L^{2} \br{\mathbb{R}^{n};\mathbb{C}^{n}}
\oplus L^{2} \br{\mathbb{R}^{n};\mathbb{C}^{n}}.
$$
Then set
$$
J := \nabla + b : L^{2}\br{\mathbb{R}^{n}; \mathbb{C}} \rightarrow L^{2} \br{\mathbb{R}^{n};
\mathbb{C}^{n}} \quad and \quad D := \nabla : L^{2}\br{\mathbb{R}^{n} ; \mathbb{C}}
\rightarrow L^{2}\br{\mathbb{R}^{n};\mathbb{C}^{n}}.
$$
Also, let $B_{1} = I$ as usual and
$$
B_{2} = \br{\begin{array}{c c c}
I & 0 & 0 \\
0 & I & 0 \\
0 & 0 & A
\end{array}}.
$$
Then the operator $L_{B}^{J}$ as in Corollary \ref{cor:LBJCalc} is given
by $L_{B}^{J} = \mathcal{L}^{A}_{b}$. Since $A$ satisfies the standard
G{\aa}rding inequality it follows that $\lb \Gamma_{0}, B_{1},
B_{2} \rb$ and $\lb \Gamma_{J}, B_{1}, B_{2} \rb$ will both satisfy
(H2). This in turn implies that $\lb \Gamma_{0}, B_{1}, B_{2} \rb$
satisfies (H1) - (H8) and $\lb \Gamma_{J}, B_{1}, B_{2} \rb$ satisfies
(H1) - (H6). The non-homogeneous framework, in the form of Corollary \ref{cor:LBJCalc}, applied to these operators
then produces the following theorem.
\begin{thm}
\label{thm:FirstOrder2}
Suppose that $D \br{\br{\nabla + b}^{*} \br{\nabla + b} -
\Delta} \subset D (-\Delta) \cap D(\br{\nabla + b}^{*} \br{\nabla + b})$ and there exists some $c_{b} > 0$ such that
\begin{equation}
\label{eqtn:FirstOrder2}
\norm{\Delta u}_{2} \leq c_{b} \norm{\brs{\br{\nabla + b}^{*} \br{\nabla +
b} - \Delta}u}_{2}
\end{equation}
for all $u \in D \br{\br{\nabla + b}^{*} \br{\nabla + b} -
\Delta}$. Then there exists some constant $c > 0$, independent of
$b$, for which
$$
\br{c \cdot \br{1 + c_{b}}}^{-2} \br{\norm{\br{\nabla + b} u}_{2} + \norm{\nabla u}_{2}} \leq
\norm{\sqrt{\mathcal{L}^{A}_{b}}u}_{2} \leq \br{c \cdot \br{1 + c_{b}}}^{2}
\br{\norm{\br{\nabla + b} u}_{2} + \norm{\nabla u}_{2}}
$$
for all $u \in H^{1}_{b}(\mathbb{R}^{n})$.
\end{thm}
To see that the above theorem is true, simply note that
\eqref{eqtn:FirstOrder2} implies both (H8D$\alpha$) and (H8J$\alpha$) for
$\alpha = 2$ in this context. The independence of
the constant $c$ from $b$ follows from the fact that (H2) is satisfied
by $\lb \Gamma_{J}, B_{1}, B_{2} \rb$ with constant independent of $b$.
\section{Literature Comparison}
\label{sec:Literature}
It is important to note that this is not the first time that Kato-type
estimates have been studied for non-homogeneous operators. We will now
take some time to outline how our article differs in techniques and
results from each of these previous forays.
Recently, in \cite{gesztesy2015stability} and
\cite{gesztesy2016stability}, F. Gesztesy, S. Hofmann and R. Nichols
studied the domains of square root operators using techniques
distinct from those developed in \cite{axelsson2006quadratic}. The
article \cite{gesztesy2015stability} considers potentials in the class
$L^{p} + L^{\infty}$ but is not directly
relevant since it considers bounded
domains. On the other hand, \cite{gesztesy2016stability} does not
impose a boundedness assumption on the domain and considers the
potential class $L^{\frac{n}{2}} + L^{\infty}$. There is already an immediate comparison with our
potential class since it was
shown in Proposition \ref{prop:Ln2} that $L^{\frac{n}{2}} \subset
\mathcal{W}$ in dimension $n > 4$. It is not immediately clear whether
$L^{\infty}$ is contained within our class.
Axelsson, Keith and McIntosh themselves considered non-homogeneous
operators on Lipschitz domains with mixed boundary conditions in
\cite{axelsson2006kato}. The potentials that they considered were, however,
bounded both from above and below and thus contained in $RH_{2}
\subset \mathcal{W}$. In \cite{egert2014kato} and
\cite{egert2016kato}, M. Egert, R. Haller-Dintelmann and
P. Tolksdorf generalised this to certain non-smooth
domains.
The articles \cite{axelsson2006kato},
\cite{egert2014kato} and \cite{egert2016kato} are built upon the
original AKM framework, similar to this one. A key step in the original proof of the
AKM framework is the proof of the estimate
\begin{equation}
\label{eqtn:GlobalAtPt}
\int^{\infty}_{0} \norm{\br{A_{t} - P_{t}}u}^{2} \frac{dt}{t} \lesssim
\norm{u}^{2}.
\end{equation}
This estimate allows for the $A_{t}$ and $P_{t}$ operators to be
freely interchanged at several stages in the proof granting use of
some of the more enviable properties of the $A_{t}$
operator. As has been demonstrated in this article through the
diagonalisation theorem, Theorem \ref{thm:Diagonalisation},
\eqref{eqtn:GlobalAtPt} will not hold for a general potential. The
articles \cite{axelsson2006kato}, \cite{egert2014kato} and \cite{egert2016kato} circumvent this problem by imposing boundedness
of the potential from above and below. The boundedness of the
potential from above and below allows one to absorb the potential into
the multiplicative perturbation $B_{2}$ so that you are instead considering
the operators
$$
\Gamma_{1} = \br{\begin{array}{c c c}
0 & 0 & 0 \\
1 & 0 & 0 \\
\nabla & 0 & 0
\end{array}}, \quad B_{1} = I \quad and \quad B_{2} =
\br{\begin{array}{c c c}
I & 0 & 0 \\
0 & V & 0 \\
0 & 0 & A
\end{array}}.
$$
For this set of operators, the large time-scale estimate
$$
\int^{\infty}_{1} \norm{\Theta_{t}^{B,1} P_{t}^{1} u}^{2} \frac{dt}{t}
\lesssim \int^{\infty}_{1} \norm{P_{t}^{1} u}^{2} \frac{dt}{t}
\lesssim \int^{\infty}_{1} \norm{t \Pi_{1} P_{t}^{1} u}^{2}
\frac{dt}{t} \lesssim \norm{u}^{2}
$$
for $u \in R (\Gamma_{1})$ follows almost trivially. Then one only requires a small time-scale
version of \eqref{eqtn:GlobalAtPt} to
hold, namely
$$
\int^{1}_{0} \norm{\br{A_{t} - P_{t}^{1}}u}^{2}
\frac{dt}{t} \lesssim \norm{u}^{2}
$$
for all $u \in R \br{\Gamma_{1}}$. Such an
estimate is then proved to be true.
This is a crude explanation
as to why the techniques developed in \cite{axelsson2006kato} cannot be
directly applied for a general potential that is not bounded both from
above and below. There are similar obstructions, for example in
the selection of test functions in the Carleson measure
proof. However, these also disappear when the potential is bounded both
from above and below.
In this paper, our method has been to instead treat the potential as
part of the unperturbed operator and then to subsequently exploit the algebraic
structure of the operators $\Gamma_{\abs{V}^{\frac{1}{2}}}$, $B_{1}$ and
$B_{2}$. This exploitation has allowed us to conclude that the estimate
\eqref{eqtn:GlobalAtPt} will at least hold on the third
component which, it turns out, is all that is required. Similar obstructions in the proof of the main square
function estimate also vanish when
considered component-wise. As a consequence of this three-by-three mindset we have been able to obtain square
function estimates for potentials that aren't necessarily bounded from
above or below and, moreover, are not contained in
$L^{p}\br{\mathbb{R}^{n}}$ for any $1 \leq p \leq \infty$.
\end{document}
|
\begin{document}
\title{On Coherence of Assistance and Regularized Coherence of Assistance}
\author{Ming-Jing Zhao$^1$}
\author{Teng Ma$^2$ }
\author{Shao-Ming Fei$^{3,4}$ }
\affiliation{
$^1$School of Science,
Beijing Information Science and Technology University, Beijing, 100192, China\\
$^2$State Key Laboratory of Low-Dimensional Quantum Physics and Department of Physics, Tsinghua University, Beijing 100084, China\\
$^3$School of Mathematical Sciences, Capital Normal
University, Beijing
100048, China\\
$^4$Max-Planck-Institute for Mathematics in the Sciences, 04103
Leipzig, Germany}
\pacs{03.65.Ud, 03.67.-a}
\begin{abstract}
We study the relation between the coherence of assistance and the regularized coherence of assistance introduced in [Phys. Rev. Lett. {\bf 116}, 070402 (2016)]. The
necessary and sufficient conditions that these two quantities coincide are provided.
Detailed examples are analyzed and the optimal pure state decompositions such that the coherence of assistance equals to the regularized coherence of assistance are derived.
Moreover, we present the protocol for obtaining the maximal relative entropy coherence, assisted by another party under local measurement and one-way communication in one copy setting.
\end{abstract}
\maketitle
\section{Introduction}
Quantum coherence is an important feature in quantum physics \cite{A. Streltsov-rev}. It is also a powerful resource for quantum metrology \cite{V. Giovannetti}, entanglement creation \cite{J. K. Asboth}, and biological processes \cite{E. Collini,N. Lambert,J. Cai,E. J.OReilly}.
Due to the significant roles played in many novel quantum phenomena, it has attracted much attention recently. A rigorous framework for the quantification of coherence is introduced and some intuitive and
computable measures of coherence are identified, for example, the relative entropy coherence and $l_1$ norm coherence \cite{T. Baumgratz}. The relative entropy coherence of a state is defined as the difference of von Neumann entropy between the density matrix and the diagonal matrix given by its diagonal entries. The $l_1$ norm coherence depends on the magnitudes of off-diagonal entries of a density matrix.
Trace norm coherence is a coherence measure for qubits \cite{L. H. Shao}, but it is only
a coherence monotone for X states \cite{S. Rana}. Besides, the coherence can also be quantified via the convex roof construction \cite{X. Yuan}.
More than that, there are operational coherence measures such as distillable coherence and
coherence cost which characterize the optimal rate of performance for certain information processing tasks \cite{A. Winter}. In Ref. \cite{A. Winter}, they
reveal the appealing feature of distillable coherence being equal to the relative entropy coherence.
As the maximal average relative entropy coherence of a quantum state, the coherence of assistance $C_a$ is another coherence monotone \cite{E. Chitambar}.
This quantity $C_a$ has an operational interpretation.
Suppose Bob holds a state $\rho^B$. Alice holds another part of the purified state of $\rho^B$. With the help of Alice by performing local measurements and telling Bob her measurement outcomes by classical communication,
the relative entropy coherence of $\rho^B$ can be increased to $C_a(\rho^B)$ maximally.
In many copy setting, if Alice is allowed to make joint measurement across her many copies and telling Bob her measurement results by classical communication,
averagely,
the relative entropy coherence of $\rho^B$ can be increased to $C_a^{\infty}(\rho^B)$, which is called the regularized coherence of assistance \cite{E. Chitambar}.
For the process of increasing relative entropy coherence with the help of another party under the local measurement and one way classical communication, an interesting and meaningful question is when the coherence $C_a$ obtained in one copy setting equals to that $C_a^{\infty}$ in many copy setting.
Obviously, for quantum states $\rho$ such that $C_a(\rho)=C_a^{\infty}(\rho)$, one copy setting is enough, and many copy setting is redundant and wasteful. In this paper, we aim to answer this question and provide analytical results for the equivalence of the coherence of assistance and the regularized coherence of assistance.
First we present the necessary and sufficient conditions when the coherence of assistance attains the regularized coherence of assistance.
Detailed examples are analyzed for two dimensional, three dimensional and high dimensional systems.
In these examples, the optimal decompositions for the saturation of the coherence of assistance $C_a$ with the regularized coherence of assistance $C_a^{\infty}$ are provided. The optimal protocol of obtaining maximal relative entropy coherence assisted by an assistant using local measurement and one way communication in one copy setting is designed finally.
\section{Coherence of assistance}
Under fixed reference basis, the coherence of assistance of a state $\rho$ is characterized by the maximal average relative entropy coherence,
\begin{eqnarray}
C_a(\rho)=\max \sum_i p_i C_r(|\psi_i\rangle),
\end{eqnarray}
where the maximization is taken over all pure state decompositions of $\rho=\sum_i p_i |\psi_i\rangle\langle\psi_i|$, $C_r(\rho)=S(\Delta(\rho))-S(\rho)$ is the relative entropy of coherence,
$\Delta(\rho)$ denotes the state given by the diagonal entries of $\rho$, $S(\rho)$ is the von Neumann entropy \cite{E. Chitambar}.
Coherence of assistance can be interpreted operationally.
For given quantum state $\rho$, its initial relative entropy coherence is $C_r(\rho)$. Now suppose Bob holds a state $\rho^B$ and an assistant Alice holds another part of a purification of $\rho^B$. With the help of Alice by performing local measurement and telling Bob her measurement outcomes by classical communication, the quantum state in Bob will be in one pure state ensemble $\{ p_i,\ |\psi_i\rangle\}$ with relative entropy coherence $\sum_i p_i C_r(|\psi_i\rangle)$. The relative entropy coherence in Bob is increased as relative entropy coherence is monotonic under selective measurements on average. Maximally, the relative entropy coherence can be increased to $C_a(\rho^B)$ in this process.
Similarly,
the regularized coherence of assistance is introduced as the average coherence of assistance in many copy setting,
\begin{eqnarray}\label{def regularized ca}
C_a^{\infty}(\rho)=\lim_{n\to \infty} \frac{1}{n}C_a(\rho^{\otimes n}).
\end{eqnarray}
It is obvious that the coherence of assistance is bounded by the regularized coherence of assistance from above
\begin{eqnarray}\label{relation between ca and cainf}
C_a(\rho)\leq C_a^{\infty}(\rho).
\end{eqnarray}
Utilizing the relation between the regularized coherence of assistance and the regularized entanglement of assistance \cite{D. DiVincenzo, E. Rains}, the authors in \cite{E. Chitambar} have shown a closed form expression for the regularized coherence of assistance,
\begin{eqnarray}\label{exp cainf}
C_a^{\infty}(\rho)=S(\Delta(\rho)).
\end{eqnarray}
Based on this formula, we can get the first necessary and sufficient condition for the saturation of the coherence of assistance with the regularized coherence of assistance as follows.
\begin{theorem}\label{upper bound of ca}
For any quantum state $\rho$, $C_a(\rho)= C_a^{\infty}(\rho)$
if and only if there exists a pure state decomposition
$\rho=\sum_i p_i |\psi_i\rangle\langle\psi_i|$ such that all $\Delta(|\psi_i\rangle)=\Delta(\rho)$.
\end{theorem}
[{Proof}]. By definition, we have
$C_a(\rho)=\max \sum_i p_i C_r(|\psi_i\rangle)
=\max \sum_i p_i S(\Delta(|\psi_i\rangle))
\leq \max S(\sum_i p_i \Delta(|\psi_i\rangle))
=S(\Delta(\rho))=C_a^{\infty}(\rho)$,
where the second equation is due to $S(|\psi_i\rangle)=0$ for pure state $|\psi_i\rangle$, and the third inequality is from the concavity of the Von Neumann entropy. The third inequality becomes equality if and only if $\Delta(|\psi_i\rangle)$ are the same for all $i$. Hence, the coherence of assistance equals to the regularized coherence of assistance if and only if there exists a pure state decomposition $\rho=\sum_i p_i |\psi_i\rangle\langle\psi_i|$ such that all $\Delta(|\psi_i\rangle)=\Delta(\rho)$, that is all components in the pure state decomposition have the same diagonal entries as the density matrix. \qed
From theorem \ref{upper bound of ca} one can get another necessary and sufficient condition which is easy to prove.
\begin{corollary}\label{th 1'}
For any quantum state $\rho$, $C_a(\rho)= C_a^{\infty}(\rho)$
if and only if there exists a pure state decomposition
$\rho=\sum_i p_i |\psi_i\rangle\langle\psi_i|$ such that each pure state $|\psi_i\rangle$ has relative entropy coherence $S(\Delta(\rho))$.
\end{corollary}
Theorem \ref{upper bound of ca} and corollary \ref{th 1'} are both necessary and sufficient conditions for the coincidence of the coherence of assistance and the regularized coherence of assistance. The former gives more explicit form of the optimal pure state ensemble and the latter is more easy to understand.
$C_a$ is called additive theoretically if $C_a= C_a^{\infty}$. In
Ref. \cite{E. Chitambar} it has been shown that $C_a$ fails to be additive in general, with an example in 4 dimensional system showing the nonadditivity.
Nevertheless, $C_a$ is additive in two dimensional system. Furthermore, we can find one optimal decomposition for the balance of the coherence of assistance and the regularized coherence of assistance by theorem \ref{upper bound of ca}.
Consider two dimensional quantum states
\begin{equation}\label{2-dim state}
\rho=\sum_{i,j=1}^2 \rho_{ij} |i\rangle\langle j|.
\end{equation}
If the coefficient $\rho_{12}$ is real, we choose
\begin{equation}
\begin{array}{rcl}
|\psi_0\rangle&=&\sqrt{\rho_{11}}|1\rangle + \sqrt{\rho_{22}}|2\rangle,\\
|\psi_1\rangle&=&\sqrt{\rho_{11}}|1\rangle - \sqrt{\rho_{22}}|2\rangle,
\end{array}
\end{equation}
and $p_0=\frac{1}{2}(1+\rho_{12}/\sqrt{\rho_{11}\rho_{22}})$, $p_1=\frac{1}{2}(1-\rho_{12}/\sqrt{\rho_{11}\rho_{22}})$ for nonzero $\rho_{11}$ and $\rho_{22}$.
If the coefficient $\rho_{12}$ is complex, with $|\rho_{12}|$ the magnitude and $\arg(\rho_{12})$ the argument, we set
\begin{equation}
\begin{array}{rcl}
|\psi_0\rangle&=&\sqrt{\rho_{11}}|1\rangle + \sqrt{\rho_{22}}e^{-{\rm i}\arg(\rho_{12})}|2\rangle,\\
|\psi_1\rangle&=&\sqrt{\rho_{11}}|1\rangle + \sqrt{\rho_{22}}e^{-{\rm i}(\pi+\arg(\rho_{12}))}|2\rangle,
\end{array}
\end{equation}
and $p_0=\frac{1}{2}(1+|\rho_{12}|/\sqrt{\rho_{11}\rho_{22}})$, $p_1=\frac{1}{2}(1-|\rho_{12}|/\sqrt{\rho_{11}\rho_{22}})$ for nonzero $\rho_{11}$ and $\rho_{22}$.
Thus $\{p_i, |\psi_i\rangle\}$ is an optimal pure state decomposition of $\rho$ such that the coherence of assistance attains the regularized coherence of assistance.
In fact, there are infinitely many optimal decompositions as the choices of the relative phase in $|\psi_0\rangle$ are infinite.
However, once $|\psi_0\rangle$ is fixed, $|\psi_1\rangle$ and the corresponding probabilities $p_0$ and $p_1$ are determined.
Moreover, if one of the elements $\rho_{11}$ and $\rho_{22}$ is zero, the quantum state $\rho$ is pure, and
its coherence of assistance, regularized coherence of assistance and relative entropy coherence are the same.
Now we consider the equality $C_a(\rho)= C_a^{\infty}(\rho)$ in $n$-dimensional systems and investigate the requirement quantum states should satisfy.
For an $n$-dimensional quantum state $\rho=\sum_{ij} \rho_{ij} |i\rangle\langle j|$, we define the matrix equation,
\begin{equation}\label{equation for p}
AP=B.
\end{equation}
Here
\begin{equation}
A=\left(
\begin{array}{ccccccc}
e^{{\rm i}\theta^{(1)}_{{12}}} & e^{{\rm i}\theta^{(2)}_{{12}}} & \cdots & e^{{\rm i}\theta^{(T)}_{{12}}}\\
e^{{\rm i}\theta^{(1)}_{{13}}} & e^{{\rm i}\theta^{(2)}_{{13}}} & \cdots & e^{{\rm i}\theta^{(T)}_{{13}}}\\
\cdots & \cdots & \cdots & \cdots \\
e^{{\rm i}\theta^{(1)}_{{1n}}} & e^{{\rm i}\theta^{(2)}_{{1n}}} & \cdots & e^{{\rm i}\theta^{(T)}_{{1n}}}\\
e^{{\rm i}\theta^{(1)}_{{23}}} & e^{{\rm i}\theta^{(2)}_{{23}}} & \cdots & e^{{\rm i}\theta^{(T)}_{{23}}}\\
\cdots & \cdots & \cdots & \cdots \\
e^{{\rm i}\theta^{(1)}_{{2n}}} & e^{{\rm i}\theta^{(2)}_{{2n}}} & \cdots & e^{{\rm i}\theta^{(T)}_{{2n}}}\\
\cdots & \cdots & \cdots & \cdots \\
e^{{\rm i}\theta^{(1)}_{{n-1,n}}} & e^{{\rm i}\theta^{(2)}_{{n-1,n}}} & \cdots & e^{{\rm i}\theta^{(T)}_{{n-1,n}}}\\
1 & 1 & \cdots & 1
\end{array}
\right)_{(\frac{n(n-1)}{2}+1)\times T}
\end{equation}
with ${(n-1)(n-2)}/{2}$ constraints
\begin{equation}\label{theta constrains}
\left\{\begin{array}{rcl}
\theta^{(k)}_{1s}-\theta^{(k)}_{2s}&=&\theta^{(k)}_{12},\ \ s=3,\cdots,n,\\
\theta^{(k)}_{2s}-\theta^{(k)}_{3s}&=&\theta^{(k)}_{23},\ \ s=4,\cdots,n,\\
\cdots\\
\theta^{(k)}_{n-2,n}-\theta^{(k)}_{n-1,n}&=&\theta^{(k)}_{n-2,n-1},
\end{array}
\right.
\end{equation}
for all $k$. There are essentially $n-1$ independent variables $\theta^{(k)}_{ij}$ for each $k$, $k=1,2,\cdots,T$, which are all between 0 and $2\pi$. $P=(p_1,p_2,\cdots,p_T)^t$, $0\leq p_k\leq 1$ for $k=1,2...,T$.
\begin{small}
\begin{equation}
B=(\frac{\rho_{12}}{\sqrt{\rho_{11}\rho_{22}}}, \frac{\rho_{13}}{\sqrt{\rho_{11}\rho_{33}}},\cdots,\frac{\rho_{1n}}{\sqrt{\rho_{11}\rho_{nn}}},\frac{\rho_{23}}{\sqrt{\rho_{22}\rho_{33}}}, \cdots,
\frac{\rho_{2n}}{\sqrt{\rho_{22}\rho_{nn}}},\cdots,\frac{\rho_{n-1,n}}{\sqrt{\rho_{n-1,n-1}\rho_{nn}}},1)^t,
\end{equation}
\end{small}
with superscript $t$ denoting transpose.
For vector $B$, although its components are all fractions, if one denominator is zero, then the corresponding numerator must be zero because of the positivity of density matrix. Therefore, vector $B$ is a well defined $\frac{n(n-1)}{2}+1$ dimensional vector and is decided by the coefficients of density matrix. In Eq. (\ref{equation for p}), the vector $B$ is known and given by the density matrix, the
matrix $A$ and the vector $P$ are unknown.
\begin{theorem}\label{th n if and only if}
For $n$-dimensional quantum state $\rho$, $C_a(\rho)=C_a^{\infty}(\rho)$ if and only if the equation (\ref{equation for p}) has solutions for unknowns $P$ and $\theta^{(k)}_{{ij}}$ satisfying conditions (\ref{theta constrains}).
\end{theorem}
[{Proof}].
Let $\{p_k, |\psi_k\rangle\}$
be an optimal pure state ensemble such that $C_a(\rho)=\sum_{k=1}^T p_k C_r(|\psi_k\rangle\langle\psi_k|)$. If $C_a(\rho)$ attains its upper bound $C_a^{\infty}(\rho)$, then $C_r(|\psi_k\rangle\langle\psi_k|)=S(\Delta(\rho))$ by corollary \ref{th 1'} and
$|\psi_k\rangle\langle\psi_k|$ should be of the form
\begin{equation}\label{eq n pure}
\left(
\begin{array}{cccccc}
\rho_{11}& \sqrt{\rho_{11}\rho_{22}}e^{{\rm i}\theta^{(k)}_{{12}}} & \sqrt{\rho_{11}\rho_{33}}e^{{\rm i}\theta^{(k)}_{{13}}} & \cdots
& \sqrt{\rho_{11}\rho_{nn}}e^{{\rm i}\theta^{(k)}_{{1n}}}\\
\sqrt{\rho_{11}\rho_{22}}e^{{\rm -i}\theta^{(k)}_{{12}}} & \rho_{22} & \sqrt{\rho_{22}\rho_{33}}e^{{\rm i}\theta^{(k)}_{{23}}} & \cdots
& \sqrt{\rho_{22}\rho_{nn}}e^{{\rm i}\theta^{(k)}_{{2n}}}\\
\sqrt{\rho_{11}\rho_{33}}e^{{\rm -i}\theta^{(k)}_{{13}}} & \sqrt{\rho_{22}\rho_{33}}e^{{-\rm i}\theta^{(k)}_{{23}}} & \rho_{33}& \cdots
& \sqrt{\rho_{33}\rho_{n,n}}e^{{\rm i}\theta^{(k)}_{{3n}}}\\
\cdots &\cdots &\cdots &\cdots &\cdots\\
\sqrt{\rho_{11}\rho_{nn}}e^{{\rm -i}\theta^{(k)}_{{1n}}} & \sqrt{\rho_{22}\rho_{nn}}e^{{\rm -i}\theta^{(k)}_{{2n}}}
&\sqrt{\rho_{33}\rho_{nn}}e^{{\rm -i}\theta^{(k)}_{{3n}}} & \cdots
&\rho_{nn}
\end{array}
\right)
\end{equation}
by theorem \ref{upper bound of ca} for all $k$.
The ${(n-1)(n-2)}/{2}$ constraints in Eqs. (\ref{theta constrains})
guarantee that the rank of $|\psi_k\rangle\langle\psi_k|$ in Eq. (\ref{eq n pure}) is one.
$\rho=\sum_{k=1}^T p_k |\psi_k\rangle\langle\psi_k|$ demands
\begin{equation}
\sum_{k=1}^T p_k \sqrt{\rho_{ii}\rho_{jj}}e^{{\rm i}\theta^{(k)}_{{ij}}}=\rho_{ij},
\end{equation}
or
\begin{equation}
\sum_{k=1}^T p_k e^{{\rm i}\theta^{(k)}_{{ij}}}=\rho_{ij}/\sqrt{\rho_{ii}\rho_{jj}},
\end{equation}
for $1\leq i<j\leq n$, which gives rise to the equation (\ref{equation for p}). Thus $C_a(\rho)=C_a^{\infty}(\rho)$ if and only if the equation (\ref{equation for p}) has solutions for $P$ and $\theta^{(k)}_{{ij}}$ satisfying conditions (\ref{theta constrains}).
\qed
In Theorem 1 and Corollary 1, the necessary and sufficient conditions are provided for the saturation of the coherence of assistance $C_a(\rho)$ with the regularized coherence of assistance $C_a^{\infty}(\rho)$.
In theorem 2, we present the way to find the optimal pure state ensemble for this saturation. The solution $P$ in matrix equation (8)
is just the probabilities $\{p_k\}$ in the optimal decomposition $\{p_k, |\psi_k\rangle\}$. The solution $\theta^{(k)}_{{ij}}$ in $A$ in (8) is the argument of the entries in the $i$-th row and the
$j$-th column with magnitude $\sqrt{\rho_{ii}\rho_{jj}}$ for the component $|\psi_k\rangle\langle\psi_k|$ in the optimal decomposition $\{p_k, |\psi_k\rangle\}$. The problem of theorem 2
is that the matrix $A$ and $P$ scale quadratically with respect to the dimension of the density matrix, which implies
more unknowns $P$ and arguments $\theta$ in $A$ are involved when the dimension increases. In solving the matrix equation,
one can select proper independent arguments first, then subsequently the matrix $A$.
The vector $P$ is then determined by $A$ and the previous vector $B$.
If $P=(p_1,p_2,\cdots,p_T)^t$ is the solution satisfying $0\leq p_k\leq 1$ for $k=1,2...,T$, then the solution is obtained
and the coherence of assistance $C_a(\rho)$ equals to regularized coherence of assistance $C_a^{\infty}(\rho)$.
Otherwise, one chooses different independent arguments.
{\it Example 1}. Consider the following three dimensional state,
\begin{equation}\label{3-dim state}
\rho=\sum_{i,j=1}^3 \rho_{ij} |i\rangle\langle j|.
\end{equation}
According to Theorem 2, $C_a(\rho)=C_a^{\infty}(\rho)$ if and only if matrix equation
\begin{equation}\label{eq dim 3}
\left(
\begin{array}{ccccccc}
e^{{\rm i}\theta^{(0)}_{{12}}} & e^{{\rm i}\theta^{(1)}_{{12}}} & \cdots & e^{{\rm i}\theta^{(T-1)}_{{12}}}\\
e^{{\rm i}\theta^{(0)}_{{23}}} & e^{{\rm i}\theta^{(1)}_{{23}}} & \cdots & e^{{\rm i}\theta^{(T-1)}_{{23}}}\\
e^{{\rm i}\theta^{(0)}_{{13}}} & e^{{\rm i}\theta^{(1)}_{{13}}} & \cdots & e^{{\rm i}\theta^{(T-1)}_{{13}}}\\
1 & 1 & \cdots & 1
\end{array}
\right)
\left(
\begin{array}{ccccccc}
p_0\\p_1\\\cdots\\p_{T-1}
\end{array}
\right)
=\left(
\begin{array}{ccccccc}
\rho_{12}/\sqrt{\rho_{11}\rho_{22}}\\
\rho_{23}/\sqrt{\rho_{22}\rho_{33}}\\
\rho_{13}/\sqrt{\rho_{11}\rho_{33}}\\
1
\end{array}
\right)
\end{equation}
with $\theta^{(k)}_{{12}}+\theta^{(k)}_{{23}}=\theta^{(k)}_{{13}}$,
have solutions for $P$ satisfying $0\leq p_k\leq 1$ and free arguments $\theta^{(k)}_{{12}}$ and $\theta^{(k)}_{{23}}$.
For simplicity, suppose $\rho_{12}$, $\rho_{23}$ and $\rho_{13}$ are all non-zero real numbers.
Denote $\rho_{12}/\sqrt{\rho_{11}\rho_{22}}=r_1$, $\rho_{23}/\sqrt{\rho_{22}\rho_{33}}=r_2$ and $\rho_{13}/\sqrt{\rho_{11}\rho_{33}}=r_3$.
First, set $T=4$ and $\theta^{(0)}_{{12}}=\theta^{(0)}_{{23}}=0$, $\theta^{(1)}_{{12}}=\pi$, $\theta^{(1)}_{{23}}=0$, $\theta^{(2)}_{{12}}=\theta^{(2)}_{{23}}=\pi$, $\theta^{(3)}_{{12}}=0$, $\theta^{(3)}_{{23}}=\pi$.
Then the matrix equation (\ref{eq dim 3}) becomes
\begin{equation}
\left(
\begin{array}{ccccccc}
1 & -1 & -1 & 1\\
1 & 1 & -1 & -1\\
1& -1 & 1 & -1\\
1 & 1 & 1 & 1
\end{array}
\right)
\left(
\begin{array}{ccccccc}
p_0\\p_1\\p_2\\p_3
\end{array}
\right)
=\left(
\begin{array}{ccccccc}
r_1\\
r_2\\
r_3\\
1
\end{array}
\right).
\end{equation}
The unique solution of the matrix equation above is $p_0=\frac{1}{4}(r_1+r_2+r_3+1)$, $p_1=\frac{1}{4}(r_2-r_1-r_3+1)$, $p_2=\frac{1}{4}(r_3-r_1-r_2+1)$, $p_3=\frac{1}{4}(r_1-r_2-r_3+1)$.
Obviously, $p_0,p_1,p_2,p_3\leq 1$. Therefore, if $r_1+r_2+r_3+1\geq 0$, $r_1-r_2-r_3+1\geq 0$, $r_2-r_1-r_3+1\geq 0$ and $r_3-r_1-r_2+1\geq 0$, then $\{p_i\}$ and $\{\theta_{ij}^{(k)}\}$ are one set of solutions of Eq. (\ref{eq dim 3}) for $C_a(\rho)=C_a^{\infty}(\rho)$. Therefore the probabilities $\{p_i\}$ with pure states
\begin{equation}\label{3-dim optimal dec}
\begin{array}{rcl}
|\psi_0\rangle&=&\sqrt{\rho_{11}}|1\rangle+\sqrt{\rho_{22}}|2\rangle+\sqrt{\rho_{33}}|3\rangle, \\ |\psi_1\rangle&=&-\sqrt{\rho_{11}}|1\rangle+\sqrt{\rho_{22}}|2\rangle+\sqrt{\rho_{33}}|3\rangle,\\
|\psi_2\rangle&=&\sqrt{\rho_{11}}|1\rangle-\sqrt{\rho_{22}}|2\rangle+\sqrt{\rho_{33}}|3\rangle,\\
|\psi_3\rangle&=&\sqrt{\rho_{11}}|1\rangle+\sqrt{\rho_{22}}|2\rangle-\sqrt{\rho_{33}}|3\rangle.
\end{array}
\end{equation}
constitute the optimal decomposition of $\rho$ in Eq. (\ref{3-dim state}) giving $C_a(\rho)=C_a^{\infty}(\rho)$.
Such quantum states all belongs to the polyhedron in Fig. 1.
\begin{center}
\begin{figure}
\caption{(Color online) Quantum states in this polyhedron satisfy four inequalities: $r_1+r_2+r_3+1\geq 0$, $r_1-r_2-r_3+1\geq 0$, $r_2-r_1-r_3+1\geq 0$ and $r_3-r_1-r_2+1\geq 0$. The coherence of assistance
attains the regularized coherence of assistance for these quantum states.}
\label{fig}
\end{figure}
\end{center}
{\it Example 2}. Consider an $n$-dimensional state $\rho=\sum_{i,j=1}^n \rho_{ij} |i\rangle \langle j|$
such that
\begin{eqnarray}\label{eq n-dim}
\sum_{k=1}^{n-1} p_k f(k)+p_0=\rho_{ij}/\sqrt{\rho_{ii}\rho_{jj}}, \ \ i<j,
\end{eqnarray}
holds for some probabilities $p_k$, where $f(k)=1$ for $i\leq k<j$, and $f(k)=-1$ otherwise, $0\leq p_k\leq 1$ for $k=0,1,\cdots,n-1$.
Eq. (\ref{eq n-dim}) is derived by inserting Eq. (\ref{equation for p}) with
$\theta_{1j}^{(1)}=\pi$, $j=2,\cdots, n$; $\theta_{1j}^{(2)}=\theta_{2j}^{(2)}=\pi$, $j=3,\cdots, n$;
$\cdots$;
$\theta_{1n}^{(n-1)}=\cdots=\theta_{nn}^{(n-1)}=\pi$; and other arguments 0.
Therefore if $n$ dimensional quantum state $\rho$ satisfies Eq. (\ref{eq n-dim}) for some probabilities $p_k$, then it allows solution for Eq. (\ref{equation for p}) for some probabilities $p_k$ and $\theta$ defined above. Such quantum state $\rho$ satisfying Eq. (\ref{eq n-dim}) makes $C_a(\rho)=C_a^{\infty}(\rho)$.
For the given arguments
$\theta_{ij}^{(k)}$, we find the corresponding pure states are
\begin{equation}\label{n-dim optimal dec}
\begin{array}{rcl}
|\psi_0\rangle&=&\sqrt{\rho_{11}}|1\rangle+\sqrt{\rho_{22}}|2\rangle+\cdots+\sqrt{\rho_{nn}}|n\rangle,\\ |\psi_1\rangle&=&-\sqrt{\rho_{11}}|1\rangle+\sqrt{\rho_{22}}|2\rangle+\cdots+\sqrt{\rho_{nn}}|n\rangle, \\ |\psi_2\rangle&=&-\sqrt{\rho_{11}}|1\rangle-\sqrt{\rho_{22}}|2\rangle+\cdots+\sqrt{\rho_{nn}}|n\rangle,\\ &\cdots&\\
|\psi_{n-1}\rangle&=&-\sqrt{\rho_{11}}|1\rangle-\sqrt{\rho_{22}}|2\rangle+\cdots-\sqrt{\rho_{n-1,n-1}}|n-1\rangle+\sqrt{\rho_{nn}}|n\rangle.
\end{array}
\end{equation}
Then $\{p_k, |\psi_k\rangle\}$ constitutes an optimal decomposition of $\rho$ with $P=(p_0,p_1,\cdots,p_n)^t$ the solution of Eq. (\ref{eq n-dim}) and $\{|\psi_k\rangle\}$ in Eqs. (\ref{n-dim optimal dec}).
As coherence of assistance $C_a(\rho)$ is the maximal relative entropy coherence obtained with the help of another party making local measurement and one way classical communication in one copy setting. It can be increased more generally in many copy setting. For quantum state $\rho$, the equality $C_a(\rho)=C_a^{\infty}(\rho)$ means to increase the relative entropy coherence in one copy setting is equivalent to the result in many copy setting. Therefore, many copy setting and joint measurement of assistant is redundant.
By theorem 2 we have presented some classes of quantum states whose coherence of assistance $C_a(\rho)$ reaches regularized coherence of assistance $C_a^{\infty}(\rho)$, together with the corresponding optimal pure state decompositions for each class of quantum states. Based on these results, the protocol of obtaining the maximal relative entropy coherence with the help of assistant using local measurement and one way communication can be schemed explicitly.
As an example let us consider the three dimensional quantum state given by Eq. (\ref{3-dim state}), denoted as $\rho_B$, which is held by Bob. As a purification
we first prepare a pure entangled state $|\psi\rangle_{AB}=\sum_{i=0}^3 |i\rangle_A|\psi_i\rangle_B$, with $\{|\psi_i\rangle\}_{i=0}^3$ given in Eqs. (\ref{3-dim optimal dec}). Then Alice performs optimal von Neumann measurements on the basis $\{|i\rangle_A\}$.
If Alice's part is projected to state $|i\rangle_A$, the state of Bob will be collapsed to $|\psi_i\rangle_B$, with relative entropy coherence $S(\Delta(\rho_B))$.
After receiving Alice's measurement outcomes via
classical communication channel, Bob can obtain his state in a four-state ensemble that each state has the same relative entropy coherence $S(\Delta(\rho_B))$. Therefore the final relative entropy coherence for Bob is $S(\Delta(\rho_B))$, which is the maximal relative entropy coherence he can get in this one way assisted protocol.
\section{Conclusions}
To summarize, we have investigated the saturation of the coherence of assistance $C_a(\rho)$ with its upper bound regularized coherence of assistance $C_a^{\infty}(\rho)$. Necessary and sufficient conditions have been provided. Especially, for some special quantum states in two dimensional, three dimensional and general high dimensional systems, the optimal decompositions for the coincidence of $C_a(\rho)$ and $C_a^{\infty}(\rho)$ have been
presented. And the corresponding optimal protocol of obtaining the maximal relative entropy coherence with the help of assistant using local measurement and one way communication has been schemed.
These results are of significant implications in two folds. Firstly, the equality $C_a(\rho)=C_a^{\infty}(\rho)$ implies the additivity of coherence of assistance $C_a(\rho)$. We have investigated which kind of quantum states allow the coherence of assistance additive mathematically. Secondly,
the equality $C_a(\rho)=C_a^{\infty}(\rho)$ shows the equivalence of the maximal relative entropy coherence in one way assisted protocol in one copy setting and that in many copy setting. Here we have revealed the conditions for which kind of quantum states
the maximal relative entropy coherence obtained in one way assisted protocol
with one copy setting is enough.
Note that coherence of assistance $C_a(\rho)$ is the maximal relative entropy coherence attained with the help of another part by local measurements and one way
communication in one copy setting, while the relative entropy coherence is in fact the distillable coherence. Therefore, coherence of assistance $C_a$ quantifies
the one way coherence distillation rate with the help of another part in one copy setting.
In many copy setting, higher one way coherence distillation rate can be obtained. In average
$C_a^{\infty}(\rho)$ characterizes the one way coherence distillation rate in infinite copy setting.
The equality $C_a(\rho)=C_a^{\infty}(\rho)$ shows the equivalence of one way distillation rate in one copy setting and the one way distillation rate in many copy setting assisted by another party.
In Ref. \cite{K. D. Wu}, an experimental realization in linear optical system for obtaining the maximal relative entropy coherence for two dimensional quantum states in assisted distillation
protocol has been presented. Their results are based on one copy setting as the optimal distillable rate of two dimensional quantum states can be reached with one copy scenario. Our research may help for assisted distillation of coherence in high dimensional systems experimentally.
\noindent{\bf Acknowledgments}\, This work is supported by the NSF of China under
Grant Nos. 11401032 and 11675113.
\end{document}
|
\begin{document}
\title
{Recovering Functions Defined on $\Bbb S^{n - 1}$ by Integration on Subspheres Obtained from Hyperplanes Tangent to a Spheroid}
\alphauthor{Yehonatan Salman \\ Weizmann Institute of Science \\ Email: [email protected]}
\date{}
\maketitle
\begin{abstract}
The aim of this article is to introduce a method for recovering functions, defined on the $n - 1$ dimensional unit sphere $\Bbb S^{n - 1}$, using their spherical transform, which integrates functions on $n - 2$ dimensional subspheres, on a prescribed family of subspheres of integration. This family of subspheres is obtained as follows, we take a spheroid $\Sigma$ inside $\Bbb S^{n - 1}$ which contains the points $\pm e_{n}$ and then each subsphere of integration is obtained by the intersection of a hyperplane, which is tangent to $\Sigma$, with $\Bbb S^{n - 1}$. In particular, we obtain as a limiting case, by shrinking the spheroid into its main axis, a method for recovering functions in case where the subspheres of integration pass through a common point in $\Bbb S^{n - 1}$.
\end{abstract}
\section{Introduction and Motivation}
Recovering a function $f$, defined on a manifold $\Omegamega$, by integrating $f$ on a family $\Gamma$ of submanifolds of $\Omegamega$, in case when one can obtain a well-posed problem (i.e., when the dimension of the family $\Gamma$ is equal to the dimension of $\Omegamega$), is one of the main subjects of research in Integral Geometry. In many cases, a solution can be found by assuming some symmetric properties on the manifold $\Omegamega$ such as translation and rotation invariance.
In case where $\Omegamega$ is a sphere then one can use its special geometry in order to reconstruct a function $f$ in case where the family $\Gamma$ consists of subspheres of $\Omegamega$, where by a subsphere we mean a nonempty intersection of $\Omegamega$ with a hyperplane. If we assume, without loss of generality, that $\Omegamega$ is the unit sphere $\Bbb S^{n - 1}$, then the recovery problem for $\Omegamega$ was studied and solved in cases where the family $\Gamma$ of subspheres of integration has a specific geometric flavor. Some notable examples are when $\Gamma$ consists of great subspheres (i.e., intersections of hyperplanes which pass through the origin with $\Bbb S^{n - 1}$) (\cite{3, 5, 6, 7, 9, 11, 12, 13, 15}), of subspheres which pass through a common point which lies on $\Bbb S^{n - 1}$ (\cite{9, 13, 15}), of subspheres which are orthogonal to a subsphere of $\Bbb S^{n - 1}$ (\cite{2, 8, 10, 16}) and when $\Gamma$ consists of subspheres obtained by intersections of $\Bbb S^{n - 1}$ with hyperplanes which pass through a common point inside $\Bbb S^{n - 1}$(\cite{13, 14, 15}).
The main aim of this paper is to continue the research obtained in the above mentioned papers and obtain inversion procedures for families of subspheres of $\Bbb S^{n - 1}$ which have a specific geometry. In our case, each subsphere in the family $\Gamma$ is obtained by the intersection of $\Bbb S^{n - 1}$ with a hyperplane which is tangent to a fixed spheroid $\Sigma$ inside $\Bbb S^{n - 1}$ containing the north and south poles $\pm e_{n}$. In particular, we will show how by shrinking $\Sigma$ into its main axis one can obtain an inversion procedure for the case of the so called spherical slice transform (see \cite[Chapter 3, page 108]{9}) where the subspheres of integration pass through a common point $p$ which lies on $\Bbb S^{n - 1}$ (where in our case $p$ will be the south pole $-e_{n}$). It should be mentioned however that in this paper the solution for the above reconstruction problem is given as a series of functions rather than in a closed form. This is because the method used here includes, at some stage, an expansion into spherical harmonics. Expansion into spherical harmonics in our case can be used since the spheroid $\Sigma$ has a rotational symmetry with respect to its main axis. Of course, if $\Sigma$ is a general ellipsoid inside $\Bbb S^{n - 1}$ then one cannot use the method present here, the solution for this general problem is left for future research.
Our paper is organized as follows, in Chapter 2 we give all the necessary mathematical background for the formulation of the main result Theorem 2.1 and formulate the main result. In Chapter 3 we discuss the method behind the proof of Theorem 2.1 and show how the limiting case, where $\Sigma$ shrinks into its main axis, yields an inversion procedure for the spherical slice transform. In Chapter 4 we give the proof of Theorem 2.1. Chapter 5 is more technical and contains a characterization of the stereographic projections of the subspheres of integration and also contains a proof of the factorization of the infinitesimal volume measure, of each subsphere of integration, under the stereographic projection.
\section{Mathematical Background and the Main Result}
Denote by $\Bbb R^{n}$ the $n$ dimensional Euclidean space and by $\langle\hskip 0.1cmskip 0.2cmip0.1cm,\hskip 0.1cmskip 0.2cmip0.1cm\rangle$ the standard scalar product on $\Bbb R^{n}$. Denote by $\Bbb R^{+}$ the ray $[0,\infty)$, by $\Bbb S^{n - 1}$ the $n - 1$ dimensional unit sphere of $\Bbb R^{n}$, i.e., $\Bbb S^{n - 1} = \left\{x\in\Bbb R^{n}:|x| = 1\right\}$ and by $\omegamega_{n - 1} = 2\pi^{n / 2}/\Gamma(n / 2)$ the volume of $\Bbb S^{n - 1}$. Denote by $C\left(\Bbb S^{n - 1}\right)$ the set of continuous functions defined on $\Bbb S^{n - 1}$ and on $C\left(\Bbb S^{n - 1}\right)$ define the following inner product
$$\hskip 0.1cmskip 0.2cmip-4.5cm\left\langle f_{1}, f_{2}\right\rangle_{\Bbb S^{n - 1}} = \int_{\Bbb S^{n - 1}}f_{1}(\psi)\omegaverline{f_{2}(\psi)}d\psi, f_{1},f_{2}\in C\left(\Bbb S^{n - 1}\right)$$
where $d\psi$ is the standard infinitesimal volume measure on $\Bbb S^{n - 1}$.
For a point $\psi$ in $\Bbb S^{n - 1}$ define the following $n - 2$ dimensional subsphere of $\Bbb S^{n - 1}$:
$$\hskip 0.1cmskip 0.2cmip-8cm \Bbb S_{\psi}^{n - 2} = \left\{x\in\Bbb S^{n - 1}:\langle x,\psi\rangle = 0\right\}.$$
For a fixed real number $\lambda > 0$ define the following spheroid in $\Bbb R^{n}$:
\begin{equation}\hskip 0.1cmskip 0.2cmip-4.5cm\Sigma_{\lambda} = \{x\in\Bbb R^{n}:x_{n}^{2} + (x_{1}^{2} + ... + x_{n - 1}^{2})\cosh^{2}\lambda = 1\}.\end{equation}
Define the following stereographic and inverse stereographic projections respectively,
$$\hskip 0.1cmskip 0.2cmip-4.3cm\Lambda:\Bbb S^{n - 1}\setminus\{e_{n}\} \rightarrow \Bbb R^{n - 1}, \Lambda(x) = \left(\frac{x_{1}}{1 - x_{n}},...,\frac{x_{n - 1}}{1 - x_{n}}\right),$$
$$\hskip 0.1cmskip 0.2cmip-2cm\Lambda^{-1}:\Bbb R^{n - 1} \rightarrow \Bbb S^{n - 1}\setminus\{e_{n}\}, \Lambda(y) = \left(\frac{2y_{1}}{1 + |y|^{2}},...,\frac{2y_{n - 1}}{1 + |y|^{2}}, \frac{- 1 + |y|^{2}}{1 + |y|^{2}}\right).$$
We define the "stereographic projection" $f^{\alphast}$ of a function $f$ in $C(\Bbb S^{n - 1})$ by
$$\hskip 0.1cmskip 0.2cmip-8.75cm f^{\alphast}:\Bbb R^{n - 1}\rightarrow\Bbb R, f^{\alphast} = f\circ\Lambda^{-1}.$$
We will also define the function
\begin{equation}\hskip 0.1cmskip 0.2cmip-5.4cm f^{\alphast\alphast}(x) = \frac{(f\circ\Lambda^{-1})(x)}{|x|^{n - 2}(1 + |x|^{2})^{n - 2}}\hskip 0.1cmskip 0.2cmip0.1cm\textrm{where}\hskip 0.1cmskip 0.2cmip0.1cm x\in\Bbb R^{n - 1}\setminus\{0\}.\end{equation}
Denote by $\mathcal{G}$ the isotropic group of rotations in $\Bbb S^{n - 1}$ which leave the unit vector $e_{n}$ fixed. That is
\vskip-0.2cm
$$\hskip 0.1cmskip 0.2cmip-8.5cm \mathcal{G} = \{g\in SO(n):ge_{n} = e_{n}\}.$$
Define the Gegenbauer polynomials $C_{l}^{\lambda} = C_{l}^{\lambda}(t)$ of order $\lambda > -\frac{1}{2}$ and degree $l$ by the following orthogonality relations
\[
\hskip 0.1cmskip 0.2cmip-4.1cm\int_{-1}^{1}C_{l}^{\lambda}(t)C_{k}^{\lambda}(t)(1 - t^{2})^{\lambda - \frac{1}{2}}dt =
\begin{cases}
0, \hskip 0.1cmskip 0.2cmip2.1cm l\neq k,\\
\frac{2^{2\lambda - 1}\Gamma^{2}\left(\lambda + \frac{1}{2}\right)l!}{(l + \lambda)\Gamma(l + 2\lambda)}, l = k,
\end{cases}
\]
and for $\lambda = -\frac{1}{2}$ define $C_{l}^{-\frac{1}{2}}(t) = \cos(l\alpharccos(t))$.
For every integer $m\geq0$ define the following function $h_{m,\lambda}:\Bbb R^{+}\rightarrow\Bbb R$ by
\begin{equation}\hskip 0.1cmskip 0.2cmip-13.5cm h_{m,\lambda}(x)\end{equation}
\[ \hskip 0.1cmskip 0.2cmip-1.5cm = \begin{cases}
\left(2^{4 - n}\tanh^{3 - n}\lambda\right)xC_{m}^{\frac{n - 3}{3}}\left(\frac{x^{2} + 1 - \tanh^{2}\lambda}{2x}\right)\\
\left((1 + \tanh\lambda - x)(1 + \tanh\lambda + x)\right.\\
\left.(x - 1 + \tanh\lambda)(x + 1 - \tanh\lambda)\right)^{\frac{n - 4}{2}}, 1 - \tanh\lambda \leq x \leq 1 + \tanh\lambda,\\
0, \hskip 0.1cmskip 0.2cmip6.5cm o.w
\end{cases}
\]
For a function $F$, defined on $\Bbb R^{+}$, define the Mellin transform $\mathcal{M}F$ of $F$ by
$$\hskip 0.1cmskip 0.2cmip-7.5cm(\mathcal{M}F)(s) = \int_{0}^{\infty}y^{s - 1}F(y)dy, s\in\Bbb C$$
where it should be noted that the above integral might not converge for every $s\in\Bbb C$.\\
For the Mellin transform we have the following inversion and convolution formulas for two functions $F_{1}$ and $F_{2}$ defined on $\Bbb R^{+}$ (see \cite{4}, Chapter 8.2 and 8.3):
\begin{equation}\hskip 0.1cmskip 0.2cmip-5.7cm\mathcal{M}^{-1}(F_{1})(r) = \frac{1}{2\pi i}\int_{\varrho - i\infty}^{\varrho + i\infty}r^{-s}\mathcal{M}(F_{1})(s)ds,\end{equation}
\begin{equation}\hskip 0.1cmskip 0.2cmip-5.75cm\mathcal{M}(F_{1}\star F_{2})(r) = \mathcal{M}(F_{1})(r)\mathcal{M}(F_{2})(1 - r)\end{equation}
where $\varrho\in(0,1)$ and where the convolution $F_{1}\star F_{2}$ is defined by
\vskip-0.2cm
$$\hskip 0.1cmskip 0.2cmip-7cm (F_{1}\star F_{2})(s) = \int_{0}^{\infty}F_{1}(ss')F_{2}(s')ds'.$$
As we will show later, if $F_{1},F_{2}\in L^{1}(\Bbb R^{+})$ then formulas (2.4) and (2.5) are valid when $0 < \Re(r) < 1$.
For a function $f$ in $C\left(\Bbb S^{n - 1}\right)$ define its spherical transform $Sf$ to be the integral transform which integrates $f$ on $n - 2$ dimensional subspheres in $\Bbb S^{n - 1}$. That is,
$$\hskip 0.1cmskip 0.2cmip-9.2cm (Sf)(\mathbf{\mathcal{S}}) = \int_{\mathbf{\mathcal{S}}}f(x)dS_{x}$$
where $dS_{x}$ is the standard infinitesimal volume measure on the subsphere $\mathbf{\mathcal{S}}$ of integration. Our aim is to recover functions in $C(\Bbb S^{n - 1})$ using their spherical transform where each subsphere of integration is obtained by the intersection of $\Bbb S^{n - 1}$ with a hyperplane which is tangent to the spheroid $\Sigma_{\lambda}$. Let us denote this family of subspheres by $\Upsilon_{\lambda}$. From Lemma 5.1 it follows that the family of $n - 2$ dimensional spheres $\Upsilon_{\lambda}^{\alphast}$, which is obtained by projecting each subsphere in $\Upsilon_{\lambda}$ using the stereographic projection $\Lambda$, can be parameterized as in (5.1). Thus, by taking the inverse stereographic projection $\Lambda^{-1}$ we obtain the following parametrization for $\Upsilon_{\lambda}$:
\begin{equation}\hskip 0.1cmskip 0.2cmip-3cm\Upsilon_{\lambda} = \underset{\psi\in\Bbb S^{n - 2}, c > 0}{\bigcup}\left\{\{\Lambda^{-1}\left(c\psi + c(\tanh\lambda)\omegamega\right):\omegamega\in\Bbb S^{n - 2}\}\right\}.\end{equation}
Thus, if we define the following $n - 2$ dimensional sphere in $\Bbb S^{n - 1}$
$$\hskip 0.1cmskip 0.2cmip-5.3cm\mathrm{S}_{\psi,c} = \left\{\Lambda^{-1}\left(c\psi + c(\tanh\lambda)\omegamega\right), \omegamega\in\Bbb S^{n - 2}\right\}$$
then our data consists of the family of integrals
\begin{equation}\hskip 0.1cmskip 0.2cmip-5.3cm(Sf)(\mathrm{S}_{\psi,c}) = \int_{\mathrm{S}_{\psi,c}}f(x)d\mathrm{S}_{\psi,c}, \psi\in\Bbb S^{n - 2}, c > 0\end{equation}
where $d\mathrm{S}_{\psi,c}$ is the standard measure on $\mathrm{S}_{\psi,c}$. We have the following result for the recovering of a function $f$ in $C\left(\Bbb S^{n - 1}\right)$ from the family of integrals (2.7).
\begin{thm}
Let $f$ be a function in $C\left(\Bbb S^{n - 1}\right)$ such that $f^{\alphast\alphast}$ as defined in (2.2) is in $L^{1}\left(\Bbb R^{n - 1}\setminus\{0\}\right)$ and such that
$$\hskip 0.1cmskip 0.2cmip-6.7cm f^{\alphast}(r\zeta) = \sum_{m = 0}^{\infty}\sum_{l = 1}^{d_{m}}f_{m,l}(r)Y_{l}^{m}(\xi)$$
is the spherical harmonic expansion of $f^{\alphast} = f\circ\Lambda^{-1}$. Then, for each $m\geq0$ and $1\leq l\leq d_{m}$ the term $f_{m,l}$ can be recovered from the integral transform (2.7) as follows
$$\hskip 0.1cmskip 0.2cmip-2.5cm f_{m,l}(r) = \frac{(1 + r^{2})^{n - 2}}{2\pi i}\int_{\varrho - i\infty}^{\varrho + i\infty}r^{-s}\frac{\mathcal{M}\left(\mathcal{K}_{m,l}\right)(s)}{\mathcal{M}\left(h_{m,\lambda}\right)(1 - s)}ds$$
where
$$\hskip 0.1cmskip 0.2cmip-2.5cm \mathcal{K}_{m,l}(c) = \frac{1}{(2c\tanh\lambda)^{n - 2}\omegamega_{n - 3}}\int_{\Bbb S^{n - 2}}(Sf)(\mathrm{S}_{\psi,c})\omegaverline{Y_{l}^{m}(\psi)}d\psi,$$
$h_{m,\lambda}$ is defined as in (2.3) and $\varrho$ is any number in the interval $(0, 1)$.
\end{thm}
\section{The Method Behind the Proof of Theorem 2.1 and the Limiting Case $\lambda\rightarrow\infty$}
The idea behind the proof of Theorem 2.1 consists mainly of three steps.
In the first step we use the stereographic projection in order to project our family $\Upsilon_{\lambda}$ of subspheres in $\Bbb S^{n - 1}$ into a family of hyperspheres in $\Bbb R^{n - 1}$. As Lemma 5.1 shows, the family $\Upsilon_{\lambda}$ is projection into a well defined family $\Upsilon_{\lambda}^{\alphast}$ of hyperspheres in $\Bbb R^{n - 1}$. More specifically, each sphere in $\Upsilon_{\lambda}^{\alphast}$ has its radius proportional, with the factor $\tan\lambda$, to the distance of its center from the origin. As Lemma 5.2 shows, the infinitesimal volume measure of each subsphere in $\Upsilon_{\lambda}$ is factored under the stereographic projection and thus we can make all of our analysis on $\Bbb R^{n - 1}$ with the family $\Upsilon_{\lambda}^{\alphast}$ of spheres of integration.
In the second step we exploit the rotational invariance (with respect to the origin) of the family $\Upsilon_{\lambda}^{\alphast}$ in order to reduce our problem to each term in the spherical harmonic expansion of the modified projection $g(x) = f^{\alphast}(x) / (1 + |x|^{2})^{n - 2}$ of the function $f$ in question to be recovered. For each such term $g_{m,l}$ we obtain a convolution type equation relating $g_{m,l}$ to its corresponding term $\mathcal{K}_{m,l}$ in the expansion of the integral transform (2.7) into spherical harmonics. Using the inversion and convolution formulas for the Mellin transform one is able to express the term $g_{m,l}$ in terms of $\mathcal{K}_{m,l}$. Since we can extract each such term $g_{m,l}$ we can obviously recover $g$ and thus we can also recover $f^{\alphast}$.
In the third and final step we just use the inverse stereographic projection on $f^{\alphast}$ in order return to our original function $f$.\\
Observe that by Lemma 5.1 it follows that for $\lambda\rightarrow\infty$ the projected family of spheres $\Upsilon_{\lambda}^{\alphast}$ has the following parametrization
$$\Upsilon_{\lambda}^{\alphast} = \underset{\psi\in\Bbb S^{n - 2}, c > 0}{\bigcup}\left\{\{c\psi + c\omegamega:\omegamega\in\Bbb S^{n - 2}\}\right\}.$$
That is, $\Upsilon_{\lambda}^{\alphast}$ consists of the hyperspheres in $\Bbb R^{n - 1}$ passing through the origin. By taking the inverse stereographic projection it is an easy exercise to show that the corresponding family $\Upsilon_{\lambda}$ of subspheres consists of all the subspheres which pass through the south pole $-e_{n}$. Hence, this limiting case yields an inversion procedure for the case where the subspheres of integration pass through a common point which lies on $\Bbb S^{n - 1}$. Observe that for this case the function $h_{m,\lambda}$ has the simpler form
\[ h_{m,\infty}(x) =
\begin{cases}
2^{4 - n}x^{n - 3}(4 - x^{2})^{\frac{n - 4}{2}}C_{m}^{\frac{n - 3}{3}}\left(\frac{x}{2}\right), 0 \leq x \leq 2,\\
0, \hskip 0.1cmskip 0.2cmip5.15cm x\geq2.
\end{cases}
\]
\section{Proof of Theorem 2.1}
Denote $x = \Lambda^{-1}\left(c\psi + c(\tanh\lambda)\omegamega\right)$, then, by Lemma 5.2, $dx = d\mathrm{S}_{\psi,c}$ is given by formula (5.3). Hence we can write
\vskip-0.2cm
$$\hskip 0.1cmskip 0.2cmip-12.5cm(Sf)(\mathrm{S}_{\psi,c})$$
$$ \hskip 0.1cmskip 0.2cmip-1.5cm = \int_{\Bbb S^{n - 2}}\frac{(2c\tanh\lambda)^{n - 2}f\left(\Lambda^{-1}\left(c\psi + c(\tanh\lambda)\omegamega\right)\right)d\omegamega}{\left(1 + c^{2}\left(1 + \tanh^{2}\lambda + 2\langle\omegamega,\psi\rangle\tanh\lambda\right)\right)^{n - 2}}, \psi\in\Bbb S^{n - 2}, c > 0.$$
Hence, if we define
\vskip-0.2cm
\begin{equation}\hskip 0.1cmskip 0.2cmip-5cm (Sf)^{\alphast}(\mathrm{S}_{\psi,c}) = \frac{(Sf)(\mathrm{S}_{\psi,c})}{(2c\tanh\lambda)^{n - 2}}, g(x) = \frac{\left(f\circ\Lambda^{-1}\right)(x)}{(1 + |x|^{2})^{n - 2}},\end{equation}
then we have
\vskip-0.2cm
\begin{equation}\hskip 0.1cmskip 0.2cmip-3.7cm (Sf)^{\alphast}(\mathrm{S}_{\psi,c}) = \int_{\Bbb S^{n - 2}}g\left(c\psi + c(\tanh\lambda)\omegamega\right)d\omegamega, \psi\in\Bbb S^{n - 2}, c > 0.\end{equation}
Let us expand $g$ into spherical harmonics in $\Bbb R^{n - 1}$:
$$\hskip 0.1cmskip 0.2cmip-6cm g(r\zeta) = \sum_{m = 0}^{\infty}\sum_{l = 1}^{d_{m}}g_{m,l}(r)Y_{l}^{m}(\zeta), r\geq0, \zeta\in\Bbb S^{n - 2}.$$
Observe that if $r\zeta = c\psi + c(\tanh\lambda)\omegamega$ then
$$r = c\sqrt{1 + \tanh^{2}\lambda + 2\tanh\lambda\langle\psi,\omegamega\rangle},\hskip 0.1cmskip 0.2cmip0.1cm \zeta = \frac{\psi + (\tanh\lambda)\omegamega}{\sqrt{1 + \tanh^{2}\lambda + 2\tanh\lambda\langle\psi,\omegamega\rangle}}.$$
Hence, inserting the spherical harmonic expansion of $g$ into equation (4.2) we obtain
$$\hskip 0.1cmskip 0.2cmip-3cm(Sf)^{\alphast}(\mathrm{S}_{\psi,c}) = \sum_{m = 0}^{\infty}\sum_{l = 1}^{d_{m}}\int_{\Bbb S^{n - 2}}g_{m,l}\left(c\sqrt{1 + \tanh^{2}\lambda + 2\tanh\lambda\langle\psi,\omegamega\rangle}\right)$$ $$\hskip 0.1cmskip 0.2cmip4cm \times Y_{l}^{m}\left(\frac{\psi + (\tanh\lambda)\omegamega}{\sqrt{1 + \tanh^{2}\lambda + 2\tanh\lambda\langle\psi,\omegamega\rangle}}\right)d\omegamega, \psi\in\Bbb S^{n - 2}, c > 0.$$
Making the change of variables
$$\hskip 0.1cmskip 0.2cmip-3cm\omegamega = (\cos\varphi)\psi + (\sin\varphi)\eta, (\varphi,\eta)\in[0,\pi]\times\Bbb S_{\psi}^{n - 3}, d\omegamega = \sin^{n - 3}\varphi d\eta d\varphi$$
we have
$$\hskip 0.1cmskip 0.2cmip-3cm(Sf)^{\alphast}(\mathrm{S}_{\psi,c}) = \sum_{m = 0}^{\infty}\sum_{l = 1}^{d_{m}}\int_{0}^{\pi}\int_{\Bbb S_{\psi}^{n - 3}}g_{m,l}\left(c\sqrt{1 + \tanh^{2}\lambda + 2\tanh\lambda\cos\varphi}\right)$$ \begin{equation}\hskip 0.1cmskip 0.2cmip2cm \times Y_{l}^{m}\left(\frac{(1 + \tanh\lambda\cos\varphi)\psi + (\tanh\lambda\sin\varphi)\eta}{\sqrt{1 + \tanh^{2}\lambda + 2\tanh\lambda\cos\varphi}}\right)\sin^{n - 3}\varphi d\eta d\varphi, \psi\in\Bbb S^{n - 2}, c > 0.\end{equation}
Observe that for constant $\varphi$ and $\lambda$ we can denote
$$\cos\xi = \frac{1 + \tanh\lambda\cos\varphi}{\sqrt{1 + \tanh^{2}\lambda + 2\tanh\lambda\cos\varphi}}, \sin\xi = \frac{\tanh\lambda\sin\varphi}{\sqrt{1 + \tanh^{2}\lambda + 2\tanh\lambda\cos\varphi}}$$
for some $\xi$ in $[-\pi,\pi]$. Since each $Y_{l}^{m}, m\geq0, 1\leq l\leq d_{m}$ is an eigenfunction of the Laplace-Beltrami operator in $\Bbb S^{n - 2}$ it follows (see for example \cite{1}) that
$$\hskip 0.1cmskip 0.2cmip-3.5cm\int_{\Bbb S_{\psi}^{n - 3}}Y_{l}^{m}((\cos\xi)\psi + (\sin\xi)\eta)d\eta = \omegamega_{n - 3}C_{m}^{\frac{n - 3}{2}}(\cos\xi)Y_{l}^{m}(\psi).$$
Hence, from equation (4.3) we have
$$\hskip 0.1cmskip 0.2cmip-1.5cm(Sf)^{\alphast}(\mathrm{S}_{\psi,c}) = \omegamega_{n - 3}\sum_{m = 0}^{\infty}\sum_{l = 1}^{d_{m}}Y_{l}^{m}(\psi)\int_{0}^{\pi}g_{m,l}\left(c\sqrt{1 + \tanh^{2}\lambda + 2\tanh\lambda\cos\varphi}\right)$$ $$\hskip 0.1cmskip 0.2cmip2cm \times C_{m}^{\frac{n - 3}{3}}\left(\frac{1 + \tanh\lambda\cos\varphi}{\sqrt{1 + \tanh^{2}\lambda + 2\tanh\lambda\cos\varphi}}\right)\sin^{n - 3}\varphi d\varphi, \psi\in\Bbb S^{n - 2}, c > 0.$$
From the orthonormality relations we have for $\left\{Y_{l}^{m}\right\}_{m\geq0, l = 1,...,d_{m}}$ on $\Bbb S^{n - 2}$ it follows that
\vskip-0.2cm
$$\hskip 0.1cmskip 0.2cmip-2.5cm \frac{1}{\omegamega_{n - 3}}\langle (Sf)^{\alphast}(\mathrm{S}_{\cdot,c}), Y_{l}^{m}\rangle_{\Bbb S^{n - 2}} = \int_{0}^{\pi}g_{m,l}\left(c\sqrt{1 + \tanh^{2}\lambda + 2\tanh\lambda\cos\varphi}\right)$$
\begin{equation}\hskip 0.1cmskip 0.2cmip2cm \times C_{m}^{\frac{n - 3}{3}}\left(\frac{1 + \tanh\lambda\cos\varphi}{\sqrt{1 + \tanh^{2}\lambda + 2\tanh\lambda\cos\varphi}}\right)\sin^{n - 3}\varphi d\varphi, c > 0.\end{equation}
Let us make the following change of variables
$$x = \sqrt{1 + \tanh^{2}\lambda + 2\tanh\lambda\cos\varphi}, dx = \frac{\tanh\lambda\sin\varphi d\varphi}{\sqrt{1 + \tanh^{2}\lambda + 2\tanh\lambda\cos\varphi}},$$
in equation (4.4). Then, we have
\vskip-0.2cm
$$\hskip 0.1cmskip 0.2cmip-10cm \frac{1}{\omegamega_{n - 3}}\langle (Sf)^{\alphast}(\mathrm{S}_{\cdot,c}), Y_{l}^{m}\rangle_{\Bbb S^{n - 2}}$$
$$ \hskip 0.1cmskip 0.2cmip-3cm = \frac{1}{2^{n - 4}\tanh^{n - 3}\lambda}\int_{1 - \tanh\lambda}^{1 + \tanh\lambda}xg_{m,l}\left(cx\right)
C_{m}^{\frac{n - 3}{3}}\left(\frac{x^{2} + 1 - \tanh^{2}\lambda}{2x}\right)$$ $$\left((1 + \tanh\lambda - x)(1 + \tanh\lambda + x)(x - 1 + \tanh\lambda)(x + 1 - \tanh\lambda)\right)^{\frac{n - 4}{2}}dx$$
\begin{equation} \hskip 0.1cmskip 0.2cmip-8.5cm = \int_{0}^{\infty}g_{m,l}(cx)h_{m,\lambda}(x)dx, c > 0.\end{equation}
Thus, if we denote
$$\hskip 0.1cmskip 0.2cmip-6cm \mathcal{K}_{m,l}(c) = \frac{1}{\omegamega_{n - 3}}\langle (Sf)^{\alphast}(\mathrm{S}_{\cdot,c}), Y_{l}^{m}\rangle_{\Bbb S^{n - 2}}$$
then by using the Mellin convolution formula on equation (4.5) we obtain
\begin{equation}\mathcal{M}\left(\mathcal{K}_{m,l}\right)(s) = \mathcal{M}\left(g_{m,l}\right)(s)\mathcal{M}\left(h_{m,\lambda}\right)(1 - s).\end{equation}
In Lemma 5.3 it is proved that both $\mathcal{M}\left(g_{m,l}\right)$ and $\mathcal{M}\left(h_{m,\lambda}\right)(1 - \cdot)$ exist on the strip $0 < \Re(s) < 1$ and thus equation (4.6) is valid in this domain. Thus, after dividing both sides of equation (4.6) by $\mathcal{M}\left(h_{m,\lambda}\right)(1 - s)$ and using the Melling inversion formula and the fact that $\mathcal{M}\left(\mathcal{K}_{m,l}\right), \mathcal{M}\left(g_{m,l}\right)$ and $\mathcal{M}\left(h_{m,\lambda}\right)(1 - \cdot)$ are all defined on the strip $0 < \Re(s) < 1$ we have
$$g_{m,l}(r) = \frac{1}{2\pi i}\int_{\varrho - i\infty}^{\varrho + i\infty}r^{-s}\frac{\mathcal{M}\left(\mathcal{K}_{m,l}\right)(s)}{\mathcal{M}\left(h_{m,\lambda}\right)(1 - s)}ds $$
where $\varrho$ is any number in the interval $(0, 1)$. Using the relation (4.1) between $g$ and $f^{\alphast} = f\circ\Lambda^{-1}$ proves Theorem 2.1.\hskip 0.1cmskip 0.2cmip9cm$\square$
\section{Appendix}
\begin{lem}
The family $\Upsilon_{\lambda}^{\alphast}$ of all the $n - 2$ dimensional spheres in $\Bbb R^{n - 1}$ obtained by projections, under $\Lambda$, of the $n - 2$ dimensional spheres obtained by intersections of $\Bbb S^{n - 1}$ with hyperplanes which are tangent to the spheroid $\Sigma_{\lambda}$, has the following parametrization
\begin{equation}\Upsilon_{\lambda}^{\alphast} = \underset{\psi\in\Bbb S^{n - 2}, c > 0}{\bigcup}\left\{\{c\psi + c(\tanh\lambda)\omegamega:\omegamega\in\Bbb S^{n - 2}\}\right\}.\end{equation}
Furthermore, the projection of the intersection of $\Bbb S^{n - 1}$ with the unique hyperplane $H$ which is tangent to $\Sigma_{\lambda}$ and has the unit normal $n = ((\cos\theta)\psi, \sin\theta)$ ($\psi\in\Bbb S^{n - 2}, \theta\in\left[-\frac{\pi}{2}, \frac{\pi}{2}\right]$) is the $n - 2$ dimensional sphere in $\Bbb R^{n - 1}$ which corresponds, in the parametrization (5.1), to the subsphere with the parameters $c$ and $\psi$ where the relation between $c$ and $\theta$ is given by
\begin{equation}\hskip 0.1cmskip 0.2cmip-3cm c = \frac{\cosh\lambda\cos\theta}{\sqrt{1 + \sinh^{2}\lambda\sin^{2}\theta} - \cosh\lambda\sin\theta},\hskip 0.1cmskip 0.2cmip0.1cm \theta = \alpharctan\left(\frac{c^{2} - \cosh^{2}\lambda}{2c\cdot\cosh^{2}\lambda}\right).\end{equation}
\end{lem}
\begin{proof}
Since $\Sigma_{\lambda}$ is invariant with respect to rotations in the group $\mathcal{G}$, it follows that we can make our analysis first on the two dimensional plane $X_{1}X_{n}$ and then use rotations in $\mathcal{G}$ to obtain a complete parametrization of $\Upsilon_{\lambda}^{\alphast}$. Hence, on the plane $X_{1}X_{n}$ and for $\theta\in\left[-\frac{\pi}{2},\frac{\pi}{2}\right]$ we want to find at which distance $t\geq0$ the line $x_{1}\cos\theta + x_{n}\sin\theta = t$ intersects the ellipse $x_{n}^{2} + (\cosh^{2}\lambda)x_{1}^{2} = 1$ at exactly one point. Extracting the variable $x_{n}$ from the first equation an inserting it in the second we obtain the following quadratic equation
\vskip-0.2cm
$$\hskip 0.1cmskip 0.2cmip-2cm (\sin^{2}\theta\cosh^{2}\lambda + \cos^{2}\theta)x_{1}^{2} -2x_{1}t\cos\theta + t^{2} - \sin^{2}\theta = 0$$
and we need to find the value of $t$ for which the discriminant of the last equation is equal to zero. The discriminant of the last equation is zero when
$t = \sqrt{1 + \sinh^{2}\lambda\sin^{2}\theta} / \cosh\lambda$. Hence, the line
$$l_{\theta}:(x_{1}\cos\theta + x_{n}\sin\theta)\cosh\lambda = \sqrt{1 + \sinh^{2}\lambda\sin^{2}\theta}$$
is the unique line which is tangent to the ellipse $x_{n}^{2} + (\cosh^{2}\lambda)x_{1}^{2} = 1$ and its normal makes an angle $\theta$ with the $X_{1}$ axis. Now we want to find for each $\theta\in\left[-\frac{\pi}{2},\frac{\pi}{2}\right]$ at which points the line $l_{\theta}$ intersects the circle $x_{1}^{2} + x_{n}^{2} = 1$. By a direct substitution we obtain that the intersection points $p_{1} = (x_{1},y_{1})$ and $p_{2} = (x_{2},y_{2})$ are given by
$$\left(\frac{\left(\sinh\lambda\sin\theta + \sqrt{1 + \sinh^{2}\lambda\sin^{2}\theta}\right)\cos\theta}{\cosh\lambda}, \frac{\sqrt{1 + \sinh^{2}\lambda\sin^{2}\theta}\sin\theta - \sinh\lambda\cos^{2}\theta}{\cosh\lambda}\right),$$
$$\left(\frac{\left(-\sinh\lambda\sin\theta + \sqrt{1 + \sinh^{2}\lambda\sin^{2}\theta}\right)\cos\theta}{\cosh\lambda}, \frac{\sqrt{1 + \sinh^{2}\lambda\sin^{2}\theta}\sin\theta + \sinh\lambda\cos^{2}\theta}{\cosh\lambda}\right).$$
Thus, restricting the stereographic projection $\Lambda$ to the circle $\Bbb S^{1}$ in the plane $X_{1}X_{n}$ we have the following images for the points $p_{1}$ and $p_{2}$ under $\Lambda$:
$$\hskip 0.1cmskip 0.2cmip-3.5cm p_{1}^{\alphast} = \Lambda(p_{1}) = \frac{\left(\sinh\lambda\sin\theta + \sqrt{1 + \sinh^{2}\lambda\sin^{2}\theta}\right)\cos\theta}{\cosh\lambda - \sqrt{1 + \sinh^{2}\lambda\sin^{2}\theta}\sin\theta + \sinh\lambda\cos^{2}\theta},$$
$$\hskip 0.1cmskip 0.2cmip-3.5cm p_{2}^{\alphast} = \Lambda(p_{2}) = \frac{\left(-\sinh\lambda\sin\theta + \sqrt{1 + \sinh^{2}\lambda\sin^{2}\theta}\right)\cos\theta}{\cosh\lambda - \sqrt{1 + \sinh^{2}\lambda\sin^{2}\theta}\sin\theta - \sinh\lambda\cos^{2}\theta}.$$
Returning back to the whole space $\Bbb R^{n}$, it follows that the image, under $\Lambda$, of the intersection of the unit sphere $\Bbb S^{n - 1}$ with the unique hyperplane $H$ which is tangent to the spheroid $\Sigma_{\lambda}$ and has a unit normal $n = e_{1}\cos\theta + e_{n}\sin\theta$, is the $n - 2$ dimensional sphere in $\Bbb R^{n - 1}$ which has a center at
$$\hskip 0.1cmskip 0.2cmip-1.5cm C_{H} = \left(\frac{1}{2}\left(p_{1}^{\alphast} + p_{2}^{\alphast}\right),\omegaverline{0}\right) = \left(\frac{\cosh\lambda\cos\theta}{\sqrt{1 + \sinh^{2}\lambda\sin^{2}\theta} - \cosh\lambda\sin\theta},\omegaverline{0}\right)$$
and radius
$$\hskip 0.1cmskip 0.2cmip-1.5cm R_{H} = \frac{1}{2}\left|p_{1}^{\alphast} - p_{2}^{\alphast}\right| = \frac{\sinh\lambda\cos\theta}{\sqrt{1 + \sinh^{2}\lambda\sin^{2}\theta} - \cosh\lambda\sin\theta}.$$
Observe that for every $\theta\in\left[-\frac{\pi}{2},\frac{\pi}{2}\right]$ we have the relation $R_{H} = \tanh\lambda\cdot\left|C_{H}\right|$ and thus the above sphere has the following parametrization
$$\hskip 0.1cmskip 0.2cmip-4.5cm \mathrm{S}_{\psi,c}^{\alphast}:ce_{1} + c(\tanh\lambda)\omegamega, \omegamega\in\Bbb S^{n - 2}$$
such that the relation between $c$ and $\theta$ is given by
$$\hskip 0.1cmskip 0.2cmip-3cm c = \frac{\cosh\lambda\cos\theta}{\sqrt{1 + \sinh^{2}\lambda\sin^{2}\theta} - \cosh\lambda\sin\theta},\hskip 0.1cmskip 0.2cmip0.1cm \theta = \alpharctan\left(\frac{c^{2} - \cosh^{2}\lambda}{2c\cdot\cosh^{2}\lambda}\right).$$
Now, we want to find for a general unit normal $n = ((\cos\theta)\psi, \sin\theta)$, $\left(\psi\in\Bbb S^{n - 2}, \theta\in\left[-\frac{\pi}{2}, \frac{\pi}{2}\right]\right)$ the image, under $\Lambda$, of the intersection of the unit sphere $\Bbb S^{n - 1}$ with the unique hyperplane $H$ which is tangent to the spheroid $\Sigma_{\lambda}$ and has the unit normal $n$. From the rotational symmetry of the spheroid $\Sigma_{\lambda}$ it follows that this is the $n - 2$ dimensional sphere $\mathrm{S}_{\psi,c}^{\alphast}$ in $\Bbb R^{n - 1}$ which has the following parametrization:
\vskip-0.35cm
$$\hskip 0.1cmskip 0.2cmip-4.5cm \mathrm{S}_{\psi,c}:c\psi + c(\tanh\lambda)\omegamega, \omegamega\in\Bbb S^{n - 2}$$
where again the relation between $c$ and $\theta$ is given by (5.2).
\end{proof}
\begin{lem}
Let $\mathrm{S}_{\psi,c}$ be the $n - 2$ dimensional subsphere of $\Bbb S^{n - 1}$ given by the following parametrization
$$\hskip 0.1cmskip 0.2cmip-5.5cm \mathrm{S}_{\psi,c}:\Lambda^{-1}\left(c\psi + c(\tanh\lambda)\omegamega\right),\omegamega\in\Bbb S^{n - 2},$$
then
\begin{equation}\hskip 0.1cmskip 0.2cmip-3cm d\mathrm{S}_{\psi,c} = \frac{(2c\tanh\lambda)^{n - 2}d\omegamega}{\left(1 + c^{2}\left(1 + \tanh^{2}\lambda + 2\langle\omegamega,\psi\rangle\tanh\lambda\right)\right)^{n - 2}}.\end{equation}
\end{lem}
\begin{proof}
From the definition of the inverse stereographic projection $\Lambda^{-1}$ it follows that $\mathrm{S}_{\psi,c}$ has the following parametrization
$$\hskip 0.1cmskip 0.2cmip-6cm\mathrm{S}_{\psi,c} = \left\{\left(\frac{2c(\psi + (\tanh\lambda)\omegamega)}{1 + c^{2}\left(1 + \tanh^{2}\lambda + 2\langle\psi,\omegamega\rangle\tanh\lambda\right)},\right.\right.$$ $$\hskip 0.1cmskip 0.2cmip-1.5cm\left.\left. \frac{- 1 + c^{2}\left(1 + \tanh^{2}\lambda + 2\langle\psi,\omegamega\rangle\tanh\lambda\right)}{1 + c^{2}\left(1 + \tanh^{2}\lambda + 2\langle\psi,\omegamega\rangle\tanh\lambda\right)}\right):\omegamega\in\Bbb S^{n - 2}\right\}.$$
Making the following parametrization for $\omegamega$:
$$\hskip 0.1cmskip 0.2cmip-4cm\omegamega = (\cos\rho)\psi + (\sin\rho)\psi^{\alphast}, \psi^{\alphast}\in\Bbb S_{\psi}^{n - 3}, \rho\in[0,\pi],$$
we have the following parametrization for $\mathrm{S}_{\psi,c}$
$$\hskip 0.1cmskip 0.2cmip-6cm\mathrm{S}_{\psi,c} = \left\{\left(\frac{2c((1 + \cos\rho\tanh\lambda)\psi + (\sin\rho\tanh\lambda)\psi^{\alphast})}{1 + c^{2}\left(1 + \tanh^{2}\lambda + 2(\cos\rho)\tanh\lambda\right)},\right.\right.$$ \begin{equation}\hskip 0.1cmskip 0.2cmip1cm\left.\left. \frac{- 1 + c^{2}\left(1 + \tanh^{2}\lambda + 2(\cos\rho)\tanh\lambda\right)}{1 + c^{2}\left(1 + \tanh^{2}\lambda + 2(\cos\rho)\tanh\lambda\right)}\right):\psi^{\alphast}\in\Bbb S_{\psi}^{n - 3}, \rho\in[0,\pi]\right\}.\end{equation}
By Lemma 5.1 it follows that the subsphere $\mathrm{S}_{\psi,c}$ is the intersection of $\Bbb S^{n - 1}$ with the unique hyperplane which is tangent to $\Sigma_{\lambda}$ and has the unit normal $n = ((\cos\theta)\psi, \sin\theta)$ where the relation between $\theta$ and $c$ is given by equation (5.2). Since the variable $\psi$ in the unit normal $n$ comes from rotations in the group $\mathcal{G}$ and since the infinitesimal measure of any subset in $\Bbb S^{n - 1}$ is invariant with respect to rotations in $\mathcal{G}$, it follows that $\mathrm{S}_{\psi,c}$ is independent of $\psi$. Hence, we will assume from now on that $\psi = e_{n - 1}$. In this case observe that for the following rotation matrix
$$\hskip 0.1cmskip 0.2cmip-6cm A_{\theta} = \left(\begin{array}{cccccc}
1 & 0 & ... & 0 & 0 & 0\\
0 & 1 & ... & 0 & 0 & 0\\
& .............\\
0 & 0 & ... & 1 & 0 & 0\\
0 & 0 & ... & 0 & -\sin\theta & \cos\theta\\
0 & 0 & ... & 0 & \cos\theta & \sin\theta
\end{array}\right) $$
we have
$$\hskip 0.1cmskip 0.2cmip-6cm A_{\theta}\left(\mathrm{S}_{e_{n - 1},c}\right) = \left\{\left(\frac{2c(\sin\rho\tanh\lambda)\psi^{\alphast}}{1 + c^{2}\left(1 + \tanh^{2}\lambda + 2(\cos\rho)\tanh\lambda\right)},\right.\right.$$ $$\hskip 0.1cmskip 0.2cmip1cm \frac{\cos\theta\sinh\lambda}{\cosh^{3}\lambda}\cdot\frac{\cos\rho\cosh^{2}\lambda + (2\sinh\lambda\cosh\lambda + (2\cosh^{2}\lambda - 1)\cos\rho)c^{2}}{1 + c^{2}\left(1 + \tanh^{2}\lambda + 2(\cos\rho)\tanh\lambda\right)},$$
\begin{equation} \left.\left. \hskip 0.1cmskip 0.2cmip-3.5cm\frac{\left(\cosh^{2}\lambda + c^{2}\right)\cos\theta}{2c\cosh^{2}\lambda}\right):\psi^{\alphast}\in\Bbb S_{e_{n - 1}}^{n - 3}, \rho\in[0,\pi]\right\}\end{equation}
where we used the relation (5.2) between the variables $\theta$ and $c$. Denote
\begin{equation}\hskip 0.1cmskip 0.2cmip-2cm \mathrm{G}(\lambda, c) = \sqrt{1 - \frac{\left(\cosh^{2}\lambda + c^{2}\right)^{2}\cos^{2}\theta}{4c^{2}\cosh^{4}\lambda}} = \frac{2c\cosh\lambda\sinh\lambda}{\sqrt{4c^{2}\cosh^{4}\lambda + (c^{2} - \cosh^{2}\lambda)^{2}}},\end{equation}
then dividing the parametrization (5.5) of $A_{\theta}\left(\mathrm{S}_{e_{n - 1},c}\right)$ by $\mathrm{G}(\lambda, c)$ we have
$$\hskip 0.1cmskip 0.2cmip-10.5cm \frac{1}{\mathrm{G}(\lambda, c)}A_{\theta}\left(\mathrm{S}_{e_{n - 1},c}\right)$$
$$ \hskip 0.1cmskip 0.2cmip-5.5cm = \left\{\left(\frac{1}{\mathrm{G}(\lambda, c)}\cdot\frac{2c(\sin\rho\tanh\lambda)\psi^{\alphast}}{1 + c^{2}\left(1 + \tanh^{2}\lambda + 2(\cos\rho)\tanh\lambda\right)},\right.\right.$$ $$\hskip 0.1cmskip 0.2cmip1cm \frac{1}{\mathrm{G}(\lambda, c)}\cdot\frac{\cos\theta\sinh\lambda}{\cosh^{3}\lambda}\cdot\frac{\cos\rho\cosh^{2}\lambda + (2\sinh\lambda\cosh\lambda + (2\cosh^{2}\lambda - 1)\cos\rho)c^{2}}{1 + c^{2}\left(1 + \tanh^{2}\lambda + 2(\cos\rho)\tanh\lambda\right)},$$
\begin{equation} \left.\left. \hskip 0.1cmskip 0.2cmip-5.5cm \frac{\sqrt{1 - \mathrm{G}^{2}(\lambda, c)}}{\mathrm{G}(\lambda, c)}\right):\psi^{\alphast}\in\Bbb S_{e_{n - 1}}^{n - 3}, \rho\in[0,\pi]\right\}.\end{equation}
Observe that the right hand side of the parametrization (5.7) is of the form
\begin{equation}\hskip 0.1cmskip 0.2cmip-12cm\left(\sqrt{1 - r^{2}}\psi^{\alphast},r, C\right)\end{equation}
where
$$r(\rho) = \frac{1}{\mathrm{G}(\lambda, c)}\cdot\frac{\cos\theta\sinh\lambda}{\cosh^{3}\lambda}\cdot\frac{\cos\rho\cosh^{2}\lambda + (2\sinh\lambda\cosh\lambda + (2\cosh^{2}\lambda - 1)\cos\rho)c^{2}}{1 + c^{2}\left(1 + \tanh^{2}\lambda + 2(\cos\rho)\tanh\lambda\right)}$$
and $C$ is a constant which does not depend on $r$ and $\psi^{\alphast}$. Since $C$ does not depend on $r$ or $\psi^{\alphast}$ it can be easily verified that the infinitesimal volume measure given by the parametrization (5.8) is $(1 - r^{2})^{\frac{n - 4}{2}}d\psi^{\alphast}dr$.
Since
\vskip-0.2cm
$$\hskip 0.1cmskip 0.2cmip-1.5cm\frac{dr}{d\rho} = \frac{1}{\mathrm{G}(\lambda, c)}\cdot\frac{\cos\theta\sinh\lambda}{\cosh^{5}\lambda}\frac{\left(\cosh^{4}\lambda + 2c^{2}\cosh(2\lambda)\cosh^{2}\lambda + c^{4}\right)\sin\rho}{\left(1 + c^{2}\left(1 + \tanh^{2}\lambda + 2(\cos\rho)\tanh\lambda\right)\right)^{2}}$$
it follows that
\vskip-0.2cm
$$\hskip 0.1cmskip 0.2cmip-10.5cm d\left(\frac{1}{\mathrm{G}(\lambda, c)}A_{\theta}\left(\mathrm{S}_{e_{n - 1},c}\right)\right)$$
\begin{equation} = \frac{d\rho d\psi^{\alphast}\sin^{n - 3}\rho}{\mathrm{G}^{n - 3}(\lambda, c)}\frac{\cos\theta\sinh\lambda}{\cosh^{5}\lambda}\frac{(2c\tanh\lambda)^{n - 4}\left(\cosh^{4}\lambda + 2c^{2}\cosh(2\lambda)\cosh^{2}\lambda + c^{4}\right)}{\left(1 + c^{2}\left(1 + \tanh^{2}\lambda + 2(\cos\rho)\tanh\lambda\right)\right)^{n - 2}}.\end{equation}
Observe that since $\omegamega = (\cos\rho)e_{n - 1} + (\sin\rho)\psi^{\alphast}$ we have $d\omegamega = \sin^{n - 3}\rho d\rho d\psi^{\alphast}$ and since the rotation matrix $A_{\theta}$ does not change the infinitesimal measure in the left hand side of equation (5.9) it follows that
\vskip-0.2cm
$$\hskip 0.1cmskip 0.2cmip-12.5cm\frac{d\mathrm{S}_{e_{n - 1},c}}{\mathrm{G}^{n - 2}(\lambda, c)}$$
\begin{equation} \hskip 0.1cmskip 0.2cmip-0.5cm = \frac{d\omegamega}{\mathrm{G}^{n - 3}(\lambda, c)}\frac{\cos\theta\sinh\lambda}{\cosh^{5}\lambda}\frac{(2c\tanh\lambda)^{n - 4}\left(\cosh^{4}\lambda + 2c^{2}\cosh(2\lambda)\cosh^{2}\lambda + c^{4}\right)}{\left(1 + c^{2}\left(1 + \tanh^{2}\lambda + 2(\cos\rho)\tanh\lambda\right)\right)^{n - 2}}.\end{equation}
Multiplying equation (5.10) by $\mathrm{G}^{n - 2}(\lambda,c)$ and using the explicit formula (5.6) for $\mathrm{G}(\lambda,c)$ we obtain that
\vskip-0.2cm
$$\hskip 0.1cmskip 0.2cmip-13cm d\mathrm{S}_{e_{n - 1},c}$$
$$ \hskip 0.1cmskip 0.2cmip-1.6cm = \left(\frac{(2c)^{n - 3}\cos\theta\sinh^{n - 2}\lambda}{\cosh^{n}\lambda}\frac{\left(\cosh^{4}\lambda + 2c^{2}\cosh(2\lambda)\cosh^{2}\lambda + c^{4}\right)}{\left(1 + c^{2}\left(1 + \tanh^{2}\lambda + 2(\cos\rho)\tanh\lambda\right)\right)^{n - 2}}\right)$$ $$\hskip 0.1cmskip 0.2cmip-8.2cm \times\frac{d\omegamega}{\sqrt{4c^{2}\cosh^{4}\lambda + (c^{2} - \cosh^{2}\lambda)^{2}}}.$$
Using the fact that
\vskip-0.2cm
$$\hskip 0.1cmskip 0.2cmip-6cm \cos\theta = \frac{2c\cosh^{2}\lambda}{\sqrt{4c^{2}\cosh^{4}\lambda + (c^{2} - \cosh^{2}\lambda)^{2}}}$$
and that $\cos\rho = \langle\omegamega,\psi\rangle$ we obtain Lemma 5.2.
\end{proof}
\begin{lem}
Let $g$ be a function defined on $\Bbb R^{n - 1}$ such that $g(x) / |x|^{n - 2}$ belongs to $L_{1}(\Bbb R^{n - 1}\setminus\{0\})$. Let
\vskip-0.2cm
$$\hskip 0.1cmskip 0.2cmip-6cm g(r\zeta) = \sum_{m = 0}^{\infty}\sum_{l = 1}^{d_{m}}g_{m,l}(r)Y_{l}^{m}(\zeta), r\geq0, \zeta\in\Bbb S^{n - 2}$$
be the expansion of $g$ into spherical harmonics in $\Bbb R^{n - 1}$. Then, for every $m\geq0, 1\leq l \leq d_{m}$ the Mellin transform $\mathcal{M}(g_{m,l})$ of $g_{m,l}$ exists in the strip $0 < \Re(\rho) < 1$. Also, if $h_{m,\lambda}$ is defined as in (2.3), then $\mathcal{M}(h_{m,\lambda})(1 - \cdot)$ exists in the same strip.
\end{lem}
\begin{proof}
By the definition of the Mellin transform, we have
$$\hskip 0.1cmskip 0.2cmip-4cm\mathcal{M}(g_{m,l})(s) = \int_{0}^{\infty}y^{s - 1}g_{m,l}(y)dy.$$
Assume that $0 < \Re(s) < 1$, then $s = a + ib$ where $0 < a < 1$, $b\in\Bbb R$ and thus
$$\left|\mathcal{M}(g_{m,l})(s)\right| = \left|\int_{0}^{\infty}y^{s - 1}g_{m,l}(y)dy\right| \leq \int_{0}^{\infty}\left|y^{s - 1}\right||g_{m,l}(y)|dy$$
$$ = \int_{0}^{\infty}\left|y^{a + ib - 1}\right||g_{m,l}(y)|dy.$$
Since $y^{ib} = \exp(ib\log(y))$ and $\log(y)$ is real when $y > 0$, it follows that $|y^{ib}| = 1$. Thus
\begin{equation}
\left|\mathcal{M}(g_{m,l})(s)\right| \leq \int_{0}^{\infty}\left|y^{a - 1}\right||g_{m,l}(y)|dy \leq \int_{0}^{1}\left|y^{a - 1}\right||g_{m,l}(y)|dy + \int_{1}^{\infty}\left|y^{a - 1}\right||g_{m,l}(y)|dy.
\end{equation}
In Lemma 5.4 (see the end of this lemma) it is proved that if $g(x) / |x|^{n - 2}$ belongs to $L_{1}(\Bbb R^{n - 1}\setminus\{0\})$, then $g_{m,l}\in L_{1}(\Bbb R^{+})$ for $m\geq0, 1\leq l \leq d_{m}$. Thus, the first integral in the right hand side of equation (5.11) converges since $- 1 < a - 1 < 0$ and $g_{m,l}$ is bounded in the interval $(0,1)$ and the second integral converges since $g_{m,l}\in L_{1}(\Bbb R^{+})$ and $\left|y^{a - 1}\right||g_{m,l}(y)|\leq|g_{m,l}(y)|$ when $y\rightarrow\infty$.
By the definition of $h_{m,\lambda}$, it follows easily that $h_{m,\lambda}\in L_{1}(\Bbb R^{+})$ when the dimension $n$ is greater than or equals to $2$. Thus it can be proved in the same way that the Mellin transform $\mathcal{M}(h_{m,\lambda})$ of $h_{m,\lambda}$ exists on the strip $0 < \Re(\rho) < 1$ which is equivalent to the existence of $\mathcal{M}(h_{m,\lambda})(1 - \cdot)$ on the strip $0 < \Re(\rho) < 1$.
\end{proof}
\begin{lem}
Let $g$ be a function in $C^{\infty}(\Bbb R^{n - 1})$ such that $g(x) / |x|^{n - 2}$ belongs to $L_{1}(\Bbb R^{n - 1}\setminus\{0\})$. Let
\vskip-0.2cm
$$\hskip 0.1cmskip 0.2cmip-6cm g(r\zeta) = \sum_{m = 0}^{\infty}\sum_{l = 1}^{d_{m}}g_{m,l}(r)Y_{l}^{m}(\zeta), r\geq0, \zeta\in\Bbb S^{n - 2}$$
be the expansion of $g$ into spherical harmonics in $\Bbb R^{n - 1}$. Then for every $m\geq0, 1\leq l \leq d_{m}$, $g_{m,l}\in L_{1}(\Bbb R^{+})$.
\end{lem}
\begin{proof}
From the orthogonality condition for spherical harmonics, we have
$$\hskip 0.1cmskip 0.2cmip-4cm g_{m,l}(r) = \int_{\Bbb S^{n - 2}}g(r\zeta)Y_{l}^{m}(\zeta)dS(\zeta).$$
Thus,
$$\hskip 0.1cmskip 0.2cmip-2cm \int_{0}^{\infty}|g_{m,l}(r)|dr = \int_{0}^{\infty}\left|\int_{\Bbb S^{n - 2}}g(r\zeta)Y_{l}^{m}(\zeta)dS(\zeta)\right|dr$$
$$ \leq \int_{0}^{\infty}\int_{\Bbb S^{n - 2}}|g(r\zeta)Y_{l}^{m}(\zeta)|dS(\zeta)dr\leq M_{l,m}\int_{0}^{\infty}\int_{\Bbb S^{n - 2}}|g(r\zeta)|dS(\zeta)dr$$
where $M_{l,m}$ is an upper bound for $|Y_{l}^{m}(\zeta)|, \zeta\in \Bbb S^{n - 2}$. Making the change of variables $x = r\zeta$, $dx = r^{n - 2}dS(\zeta)dr$ yields
\begin{equation}
\int_{0}^{\infty}|g_{m,l}(r)|dr \leq M_{l,m}\int_{\Bbb R^{n - 1}}\frac{|g(x)|}{|x|^{n - 2}}dx
\end{equation}
and since $g(x) / |x|^{n - 2}$ belongs to $L_{1}(\Bbb R^{n - 1}\setminus\{0\})$, it follows that the integral in the right hand side of equation (5.12) converges which proves the lemma.
\end{proof}
\end{document}
|
\begin{document}
t\wedge\tau_N^nitle[Stochastic convective Brinkman-Forchheimer equations]{$t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{L}^p$-solutions of deterministic and stochastic convective Brinkman-Forchheimer equations
\mathrm{A}ddresses}
\author[M. T. Mohan ]{Manil T. Mohant\wedge\tau_N^nextsuperscript{1*}}
t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}aketitle
\begin{abstract}
In the first part of this work, we establish the existence and uniqueness of a local mild solution to the deterministic convective Brinkman-Forchheimer (CBF) equations defined on the whole space, by using properties of the heat semigroup and fixed point arguments based on an iterative technique. The second part is devoted for establishing the existence and uniqueness of a pathwise mild solution upto a random time to the stochastic CBF equations perturbed by L\'evy noise by exploiting the contraction mapping principle. We also discuss the local solvability of the stochastic CBF equations subjected to fractional Brownian noise.
\mathbf{v}arepsilonnd{abstract}
t\wedge\tau_Nection{Introduction}\label{sec1}t\wedge\tau_Netcounter{equation}{0}
t\wedge\tau_Nubsection{Deterministic convective Brinkman-Forchheimer equations} The Cauchy problem for the convective Brinkman-Forchheimer equations (CBF) in $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d,d\mathbf{g}eq 2$ can be written as
\begin{align}\label{1}
\mathbf{f}rac{\mathbf{p}artial \mathbf{u}(t,x)}{\mathbf{p}artial t}-t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}u \mathrm{D}elta\mathbf{u}(t,x)&+(\mathbf{u}(t,x)\cdot\mathbf{n}abla)\mathbf{u}(t,x)+\alpha\mathbf{u}(t,x)+\beta|\mathbf{u}(t,x)|^{r-1}\mathbf{u}(t,x)\mathbf{n}onumbernumber\\+\mathbf{n}abla p(t,x)&=t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbf{f}(t,x), \ t\wedge\tau_N^next{ in } \ (0,T)t\wedge\tau_N^nimest\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d,
\mathbf{v}arepsilonnd{align}
with the conditions
\begin{equation}\label{2}
t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{
\begin{aligned}
\mathbf{n}abla\cdot\mathbf{u}(t,x)&=0, \ t\wedge\tau_N^next{ in } \ (0,T)t\wedge\tau_N^nimest\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d, \\
\mathbf{u}(0,x)&=\mathbf{u}^0(x) \ t\wedge\tau_N^next{ in } \ \{0\}t\wedge\tau_N^nimest\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d,\\
|\mathbf{u}(t,x)|&t\wedge\tau_N^no 0\ t\wedge\tau_N^next{ as }\ |x|t\wedge\tau_N^no\infty, \ t\in(0,T).
\mathbf{v}arepsilonnd{aligned}\aftergroup\mathbf{v}arepsilongroup\originalright.
\mathbf{v}arepsilonnd{equation}
In \mathbf{v}arepsilonqref{1}, $\mathbf{u}(t , x) \in t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d$ stands for the velocity field at time $t$ and position $x$, $p(t,x)\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}$ represents the pressure field, $\mathbf{f}(t,x)\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d$ is an external forcing. The constant $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}u$ denotes the positive Brinkman coefficient (effective viscosity), the positive constants $\alpha$ and $\beta$ represent the Darcy (permeability of porous medium) and Forchheimer (proportional to the porosity of the material) coefficients, respectively. For $\alpha=\beta=0$, we obtain the classical Navier-Stokes equations (NSE). The absorption exponent $r\in[1,\infty)$ and the case $r=3$ is known as the critical exponent. The critical homogeneous CBF equations \mathbf{v}arepsilonqref{1} have the same scaling as NSE only when $\alpha=0$ (see Proposition 1.1, \cite{KWH} and no scale invariance property for other values of $\alpha$ and $r$). Since $\alpha$ does not play a major role in our analysis, we fix $\alpha=0$ and we scale $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}u$ and $\beta$ to unity in the rest of the paper. The existence and uniqueness of weak as well as strong solutions of the system \mathbf{v}arepsilonqref{1}-\mathbf{v}arepsilonqref{2} in the whole space and periodic domains is discussed in the works \cite{ZCQJ,PAM,KWH,MTM5}, etc and the references therein.
\iffalse
Let us discuss some of the global solvability results available in the literature for deterministic 3D CBF equations in the whole space and periodic domains. The Cauchy problem corresponding to \mathbf{v}arepsilonqref{1} is considered in \cite{ZCQJ}, and the authors established the existence of global weak solutions, for any $r\mathbf{g}eq 1$, global strong solutions, for any $r\mathbf{g}eq 7/2$ and that the strong solution is unique, for any $7/2\leq r\leq 5$. The authors in \cite{ZZXW} improved the above result and showed that the above problem possesses global strong solutions, for any $r>3$ and the strong solution is unique, when $3<r\leq 5$. Later, the authors in \cite{YZ} proved that the strong solution exists globally for $r\mathbf{g}eq 3$, and they established two regularity criteria, for $1\leq r<3$. Furthermore, for any $r\mathbf{g}eq 1$, they established that the strong solution is unique even among weak solutions. The global well-posedness of the traditional a 3D Brinkman-Forchheimer-extended Darcy model with periodic boundary conditions is described in \cite{PAM}. On a 3D periodic domain, the authors in \cite{KWH} obtained a simple proof of the existence of global-in-time smooth solutions for the CBF equations \mathbf{v}arepsilonqref{1}-\mathbf{v}arepsilonqref{2} with $r>3$. For the critical absorption exponent, they proved that the unique global, regular solutions exist, provided that the coefficients satisfy the relation $4\betat\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}u\mathbf{g}eq 1$ and they resolved the issue of establishing the energy equality satisfied by the weak solutions. Using the monotonicity property of the linear and nonlinear operators as well as the Minty-Browder technique, the existence of a global weak as well as strong solutions of the CBF equations \mathbf{v}arepsilonqref{1}-\mathbf{v}arepsilonqref{2} with $r\mathbf{g}eq 3$ ($2\betat\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}u\mathbf{g}eq 1$ for $r=3$) in bounded and periodic domains is showed in \cite{MTM5}. Recently, the authors in \cite{KWH1} proved that the strong solutions of 3D CBF equations in periodic domains with the absorption exponent $r\in[1,3]$ remain strong under small enough changes of initial condition and forcing function.
\mathbf{f}i
t\wedge\tau_Nubsection{Abstract formulation and mild solution}
The \mathbf{v}arepsilonmph{Helmholtz-Hodge projection} denoted by $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}$ is a bounded linear operator from $\mathbb{L}^p(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d)$ to $\mathbb{J}_p:=t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}\mathbb{L}^p(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d)$, $1<p<\infty$. Note that the space $\mathbb{J}_p$ is a separable Banach space with $\mathbb{L}^p(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d)$-norm denoted by $\|\cdot\|_p$ and the operator $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}$ is an orthogonal projection of $\mathbb{L}^2(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d)$ onto the subspace $\mathbb{H}:=\mathbb{J}_2$. Remember that $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}$ can be expressed in terms of the Riesz transform (cf. \cite{MTSS} for more details). We use the notation $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{L}(\mathbb{H},\mathbb{J}_p)$ for the space of all bounded linear operators from $\mathbb{H}$ to $\mathbb{J}_p$. Let us apply the projection operator $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}$ to the system (\mathrm{r}ef{1}) to obtain
\begin{equation}\label{3}
t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{
\begin{aligned}
\mathbf{f}rac{\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathbf{u}(t)}{\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ t}+\mathrm{A}\mathbf{u}(t)+\mathrm{B}(\mathbf{u}(t))+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u}(t))&=t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}\mathbf{f}(t), \ t\in(0,T),\\
\mathbf{u}(0)=\mathbf{x},
\mathbf{v}arepsilonnd{aligned}\aftergroup\mathbf{v}arepsilongroup\originalright.
\mathbf{v}arepsilonnd{equation}
where
\begin{align*}
\mathrm{A}\mathbf{u}&=-t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}\mathrm{D}elta\mathbf{u},t\wedge\tau_N^nextrm{ with domain }\mathrm{D}_p(\mathrm{A})=\mathrm{D}_p(\mathrm{D}elta)\cap\mathbb{J}_p,\\
\mathrm{B}(\mathbf{u})&=\mathrm{B}(\mathbf{u},\mathbf{u}),t\wedge\tau_N^nextrm{ with
}\mathrm{B}(\mathbf{u},\mathbf{v})=t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}[(\mathbf{u}\cdot\mathbf{n}abla)\mathbf{v}]=t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}[\mathbf{n}abla\cdot(\mathbf{u}\otimes\mathbf{v})],\\
t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u})&=t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}[|\mathbf{u}|^{r-1}\mathbf{u}],
\mathbf{v}arepsilonnd{align*}
and $\mathbf{x}\in\mathbb{J}_p$. For $r\mathbf{g}eq 1$, the operator $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\cdot)$ is Gateaux differentiable with the Gateaux derivative
\begin{align}\label{29}
t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}'(\mathbf{u})\mathbf{v}&=t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{\begin{array}{cl}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}(\mathbf{v}),&t\wedge\tau_N^next{ for }r=1,\\ t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{\begin{array}{cc}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}(|\mathbf{u}|\mathbf{v})+(r-1)t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\mathbf{f}rac{\mathbf{u}}{|\mathbf{u}|^{3-r}}(\mathbf{u}\cdot\mathbf{v})\aftergroup\mathbf{v}arepsilongroup\originalright),&\ t\wedge\tau_N^next{ if }\ \mathbf{u}\mathbf{n}eq t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbf{0},\\t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbf{0},&\ t\wedge\tau_N^next{ if }\ \mathbf{u}=t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbf{0},\mathbf{v}arepsilonnd{array}\aftergroup\mathbf{v}arepsilongroup\originalright.&t\wedge\tau_N^next{ for } 1<r<3,\\ t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}(|\mathbf{u}|^{r-1}\mathbf{v})+(r-1)t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}(\mathbf{u}|\mathbf{u}|^{r-3}(\mathbf{u}\cdot\mathbf{v})), &t\wedge\tau_N^next{ for }r\mathbf{g}eq 3,\mathbf{v}arepsilonnd{array}\aftergroup\mathbf{v}arepsilongroup\originalright.
\mathbf{v}arepsilonnd{align}
for all $\mathbf{u},\mathbf{v}\in\mathbb{L}^{p}(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d)$, for $p\in[2,\infty)$. It should be recalled that $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}\mathrm{D}elta=\mathrm{D}eltat\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}$ (cf. \cite{MTSS}), and hence $\mathrm{A}$ is essentially equal to $-\mathrm{D}elta$ and $e^{-t\mathrm{A}}$ is substantially the heat semigroup (Gauss-Weierstrass semigroup, \cite{Kato5}) and is given by $$(e^{-t\mathrm{A}}\mathbf{u})(x)=\int_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d}\mathrm{P}si(t,x-y)\mathbf{u}(y)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ y, \ t\wedge\tau_N^next{ where }\ \mathrm{P}si(t,x)=\mathbf{f}rac{1}{(4\mathbf{p}i t)^{\mathbf{f}rac{d}{2}}}e^{-\mathbf{f}rac{|x|^2}{4t}}, \ t>0,\ x\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d,$$ and $\mathbf{u}\in\mathbb{L}^q(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d)$, $q\in[1,\infty)$. Thus, the operator system \mathbf{v}arepsilonqref{3} can be transformed into a nonlinear integral equation as follows:
\begin{align}\label{4}
\mathbf{u}(t)=e^{-t\mathrm{A}}\mathbf{x}-\int_0^te^{-(t-s)\mathrm{A}}[\mathrm{B}(\mathbf{u}(s))+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u}(s))]\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s+\int_0^te^{-(t-s)\mathrm{A}}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}\mathbf{f}(s)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s,
\mathbf{v}arepsilonnd{align}
for all $t\in[0,T]$. For a given $\mathbf{x}\in\mathbb{J}_p$ and $\mathbf{f}\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^1(0,T;\mathbb{J}_p)$, a function $\mathbf{u}\in\mathrm{C}([0,T];\mathbb{J}_p),$ for $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{d,\mathbf{f}rac{d(r-1)}{2}\aftergroup\mathbf{v}arepsilongroup\originalright\}<p<\infty$ satisfying \mathbf{v}arepsilonqref{4} is called a \mathbf{v}arepsilonmph{mild solution} to the system \mathbf{v}arepsilonqref{3}.
Since $e^{-t\mathrm{A}}$ is an analytic semigroup, we infer that $e^{-t\mathrm{A}}:\mathbb{L}^pt\wedge\tau_N^no\mathbb{L}^q$ is a bounded map whenever $1<p\leq q<\infty$ and $t>0$, and there exists a constant $C$ depending on $p$ and $q$ such that (see \cite{Kato5})
\begin{align}
\|e^{-t\mathrm{A}}\mathbf{g}\|_{q}&\leq Ct^{-\mathbf{f}rac{d}{2}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\mathbf{f}rac{1}{p}-\mathbf{f}rac{1}{q}\aftergroup\mathbf{v}arepsilongroup\originalright)}\|\mathbf{g}\|_p,\label{1.6}\\
\|\mathbf{n}abla e^{-t\mathrm{A}}\mathbf{g}\|_{q}&\leq Ct^{-\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\mathbf{f}rac{1}{p}-\mathbf{f}rac{1}{q}\aftergroup\mathbf{v}arepsilongroup\originalright)}\|\mathbf{g}\|_p,\label{1.5}
\mathbf{v}arepsilonnd{align}
for all $t\in(0,T]$ and $\mathbf{g}\in\mathbb{L}^p(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d)$. Using the estimates \mathbf{v}arepsilonqref{1.6}-\mathbf{v}arepsilonqref{1.5}, one can estimate $\|e^{-t\mathrm{A}}\mathrm{B}(\mathbf{u},\mathbf{v})\|_p$ as
\begin{align}\label{1.7}
\|e^{-t\mathrm{A}}\mathrm{B}(\mathbf{u},\mathbf{v})\|_p&\leq Ct^{-t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\mathbf{f}rac{1}{2}+\mathbf{f}rac{d}{2p}\aftergroup\mathbf{v}arepsilongroup\originalright)}\|\mathbf{u}\|_p\|\mathbf{v}\|_p,
\mathbf{v}arepsilonnd{align}
for all $t\in(0,T]$ and $\mathbf{u},\mathbf{v}\in\mathbb{J}_p$. Furthermore, using the estimate \mathbf{v}arepsilonqref{1.6}, we calculate $\|e^{-t\mathrm{A}}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u})\|_{p}$ and $\|e^{-t\mathrm{A}}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}'(\mathbf{u})\mathbf{v}\|_{p}$ as
\begin{align}
\|e^{-t\mathrm{A}}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u})\|_{p}&\leq Ct^{-\mathbf{f}rac{d(r-1)}{2p}}\|\mathbf{u}\|_{p}^{r},\label{1.8}\\
\|e^{-t\mathrm{A}}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}'(\mathbf{u})\mathbf{v}\|_{p}&\leq Ct^{-\mathbf{f}rac{d(r-1)}{2p}}\|\mathbf{u}\|_{p}^{r-1}\|\mathbf{v}\|_{p},\label{1.9}
\mathbf{v}arepsilonnd{align}
for all $t\in(0,T]$ and $\mathbf{u},\mathbf{v}\in\mathbb{J}_p$. For the existence of local mild solution in $\mathbb{L}^p$ to the 3D NSE in whole space and bounded domains, the interested readers are referred to see \cite{FJR,FBW1,Kato5,YGTM}, etc.
t\wedge\tau_Nubsection{Stochastic CBF equations perturbed by L\'evy noise} Let $(\Omega,t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{F},t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P})$ be a complete probability space equipped with an increasing family of sub-sigma fields $\{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{F}_t\}_{0\leq t\leq T}$ of $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{F}$ satisfying the usual conditions. On taking the external forcing as L\'evy noise, one can rewrite the stochastic counterpart of the problem \mathbf{v}arepsilonqref{3} for $t\in(0,T)$ as
\begin{equation}\label{5}
t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{
\begin{aligned}
{\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathbf{u}(t)}+[\mathrm{A}\mathbf{u}(t)+\mathrm{B}(\mathbf{u}(t))+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u}(t))]\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ t&=\mathrm{P}hi\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathrm{W}(t)+\int_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{Z}}\mathbf{g}amma(s-,z)\mathbf{w}idetilde{\mathbf{p}i}(\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s,\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ z), \\
\mathbf{u}(0)&=\mathbf{x}.
\mathbf{v}arepsilonnd{aligned}\aftergroup\mathbf{v}arepsilongroup\originalright.
\mathbf{v}arepsilonnd{equation}
In \mathbf{v}arepsilonqref{5}, $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{W}=\{\mathrm{W}(t)\}_{0\leq t\leq T}$ is a cylindrical Wiener process and for an orthonormal basis $\{e_j(x)\}_{j=1}^{\infty}$ in $\mathbb{H}:=\mathbb{J}_2$, $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{W}(\cdot)$ can be represented as $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{W}(t)=\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/isplaystyle{t\wedge\tau_Num_{j=1}^{\infty}}e_j(x)\beta_j(t)$, where $\{\beta_j(\cdot)\}_{j=1}^{\infty}$'s are a sequence of one-dimensional mutually independent Brownian motions (\cite{DaZ}). The bounded linear operator
$\mathrm{P}hi:\mathbb{H}t\wedge\tau_N^no\mathbb{J}_p$, $p\in[2,\infty)$ is a $\mathbf{g}amma$-radonifying
operator in $\mathbb{J}_p$ such that
\begin{align*}
\mathrm{P}hi\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{W}(t)=t\wedge\tau_Num_{j=1}^{\infty}\mathrm{P}hi e_j(x)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\beta_j(t)=
t\wedge\tau_Num_{j=1}^{\infty}\int_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{K}(x,y)e_j(y)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ y\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/
\beta_j(t),
\mathbf{v}arepsilonnd{align*}
where $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{K}(\cdot,\cdot)$ is the kernel of the operator
$\mathrm{P}hi$ (Theorem 2.2, \cite{BLH}). In particular, the operator $\mathrm{P}hi\in\mathbf{g}amma(\mathbb{H},\mathbb{J}_p)$
satisfies
$$\|\mathrm{P}hi\|_{\mathbf{g}amma(\mathbb{H},\mathbb{J}_p)}
\mathbf{v}arepsilonquivt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/isplaystyle{\int_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d}}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft[\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/isplaystyle{\int_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d}}|t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{K}(x,y)|^2\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/
y\aftergroup\mathbf{v}arepsilongroup\originalright]^{p/2}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ x\aftergroup\mathbf{v}arepsilongroup\originalright\}^{1/p}<+\infty,$$
where $\mathbf{g}amma(\mathbb{H},\mathbb{J}_p)$ is the space of all $\mathbf{g}amma$-radonifying
operators from $\mathbb{H}$ to $\mathbb{J}_p$.\mathbf{f}ootnote{Let $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}$ be a real separable Hilbert space and $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{X}$ be a Banach space. A bounded linear operator $R\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{L}(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U},t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{X})$ is $\mathbf{g}amma$-radonifying provided that there exists a centered Gaussian probability $\mathbf{n}u$ on $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{X}$ such that $\int_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{X}}\mathbf{v}arphi(x)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathbf{n}u(x)=\|R^*\mathbf{v}arphi\|_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}},\ \mathbf{v}arphi\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{X}^*$. Such a measure is at most one, and hence we set $\|R\|_{\mathbf{g}amma(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U},t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{X})}^2:=\int_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{X}}\|x\|_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{X}}^2\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathbf{n}u(x).$ We denote $\mathbf{g}amma(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U},t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{X})$ for the space of $\mathbf{g}amma$-radonifying operators, and $\mathbf{g}amma(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U},t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{X})$ equipped with the norm $\|\cdot\|_{\mathbf{g}amma(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U},t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{X})}$ is a separable Banach space.}
Let us denote by $\mathrm{Z}$, a measurable subspace of some Hilbert space (for example
measurable subspaces of $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d$, $\mathbb{L}^2(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d)$, etc) and $\lambda(\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/
z)$, a $t\wedge\tau_Nigma$-finite L\'{e}vy measure on $\mathrm{Z}$ with an associated
Poisson random measure $\mathbf{p}i(\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ t,\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ z)$. We define $\mathbf{w}i\mathbf{p}i(\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ t,\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/
z):=\mathbf{p}i(\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ t,\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ z)-\lambda(\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ z)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ t$ as the compensated
Poisson random measure. The jump noise coefficient
$\mathbf{g}amma(t,z):=\mathbf{g}amma(t,z,x)$ is such that $\mathbf{g}amma:[0,T]t\wedge\tau_N^nimes \mathrm{Z}t\wedge\tau_N^nimes
\mathbb{J}_p t\wedge\tau_N^no\mathbb{J}_p$, $p\in[2,\infty)$ and in particular, $\mathbf{g}amma$ satisfies
$$\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/isplaystyle{\int_0^T\int_{\mathrm{Z}}}\|\mathbf{g}amma(t,z)\|_p^2\lambda(\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ z)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/
t<+\infty.$$ The processes
$t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{W}(\cdot)$ and $\mathbf{p}i(\cdot,\cdot)$ are mutually
independent. The existence and uniqueness of pathwise strong solutions to the stochastic CBF equations and related models perturbed by Gaussian as well as jump noises in the whole space or periodic domains are available in the literature and the interested readers are referred to see \cite{MRXZ1,ZBGD,ZDRZ,MTM4,MTM6}, etc, and the references therein.
\iffalse
Let us now discuss some of the global solvability results for the \mathbf{v}arepsilonqref{1}-\mathbf{v}arepsilonqref{2} and related models in the whole space or on a torus. The existence of a unique strong solution
\begin{align}\label{112}
\mathbf{u}\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^2(\Omega;t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^{\infty}(0,T;\mathbb{H}^1(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{O})))\capt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^2(0,T;\mathbb{H}^2(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{O})),
\mathbf{v}arepsilonnd{align}
with $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s. continuous paths in $\mathbb{H}^1(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{O}),$ for $\mathbf{u}_0\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^2(\Omega;\mathbb{H}^1(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{O})),$ where $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{O}=t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^3$ or periodic domains, to the stochastic tamed 3D NSE perturbed by multiplicative Gaussian noise is obtained in \cite{MRXZ1}. Recently, \cite{ZBGD} improved their results for a slightly simplified system. The authors in \cite{WLMR} established the global existence and uniqueness of strong solutions for general stochastic nonlinear evolution equations with coefficients satisfying some local monotonicity and generalized coercivity conditions subjected to multiplicative Gaussian noise. In \cite{WL}, the author showed the existence and uniqueness of strong solutions for a large class of SPDEs perturbed by multiplicative Gaussian noise, where the coefficients satisfy the local monotonicity and Lyapunov condition, and he provided the stochastic tamed 3D Navier-Stokes equations as an example. The works described above established the existence and uniqueness of strong solutions in the regularity class given in \mathbf{v}arepsilonqref{112}. The authors in \cite{HBAM} described the global solvability of the 3D Navier-Stokes equations in the whole space with a Brinkman-Forchheimer type term subject to an anisotropic viscosity and a random perturbation of multiplicative Gaussian type. In the paper \cite{ZDRZ}, the authors showed the existence and uniqueness of a strong solution to stochastic 3D tamed Navier-Stokes equations driven by multiplicative L\'evy noise with periodic boundary conditions, based on Galerkin's approximation and a kind of local monotonicity of the coefficients. The existence and uniqueness of strong solutions satisfying energy equality (It\^o's formula) to stochastic CBF equations perturbed by multiplicative Gaussian noise ($r> 3,$ for any $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}u$ and $\beta$, $r=3$ for $2\betat\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}u\mathbf{g}eq 1$) in bounded or periodic domains with $\mathbf{u}_0\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^2(\Omega;\mathbb{L}^2(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{O})))$ is obtained in \cite{MTM4} with the regularity
\begin{align}\label{113}\mathbf{u}\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^2(\Omega;t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^{\infty}(0,T;\mathbb{L}^2(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{O}))\capt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^2(0,T;\mathbb{H}_0^1(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{O})))\capt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^{r+1}(\Omega;t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^{r+1}(0,T;\mathbb{L}^{r+1}(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{O}))),\mathbf{v}arepsilonnd{align} with $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s. paths in $\mathrm{C}([0,T];\mathbb{L}^2(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{O}))$. The author in \cite{MTM6} extended this work to pure multiplicative jump noise and established strong solutions in the class given in \mathbf{v}arepsilonqref{113} satisfying energy equality with $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s. c\`{a}dl\`{a}g paths in $\mathbb{L}^2(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{O})$, for all $t\in[0,T]$.
\mathbf{f}i
We transform the operator system \mathbf{v}arepsilonqref{3} into a stochastic nonlinear integral equation as follows:
\begin{equation}\label{6}
\begin{aligned}
\mathbf{u}(t)&=e^{-t\mathrm{A}}\mathbf{x}-\int_0^te^{-(t-s)\mathrm{A}}[\mathrm{B}(\mathbf{u}(s))+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u}(s))]\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s+\int_0^te^{-(t-s)\mathrm{A}}\mathrm{P}hi\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ \mathrm{W}(s)\\&\mathbf{q}uad+\int_0^t\int_{\mathrm{Z}}e^{-(t-s)\mathrm{A}}\mathbf{g}amma(s-,z)\mathbf{w}i\mathbf{p}i(\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s,\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ z),
\mathbf{v}arepsilonnd{aligned}
\mathbf{v}arepsilonnd{equation}
for all $t\in[0,T]$. The existence of pathwise mild solutions for 2D and 3D NSE perturbed by Gaussian as well as jump noise is available in \cite{GDJZ,FRS,MTSS,JZZB1}, etc and the references therein.
t\wedge\tau_Nubsection{Stochastic CBF equations perturbed by fractional Brownian noise} Let us now consider the stochastic CBF equations perturbed by fractional Brownian noise as
\begin{equation}\label{1.13}
t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{
\begin{aligned}
{\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathbf{u}(t)}+[\mathrm{A}\mathbf{u}(t)+\mathrm{B}(\mathbf{u}(t))+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u}(t))]\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ t&=\mathrm{P}hi\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathrm{W}^{H}(t), \\
\mathbf{u}(0)&=\mathbf{x}.
\mathbf{v}arepsilonnd{aligned}\aftergroup\mathbf{v}arepsilongroup\originalright.
\mathbf{v}arepsilonnd{equation}
where $\mathrm{P}hi\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{L}(\mathbb{H},\mathbb{J}_p)$ and $\mathrm{W}^{H}(\cdot)$ is the cylindrical fractional Brownian motion with Hurst parameter $H\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{\mathbf{f}rac{1}{2},\mathbf{f}rac{d}{4}\aftergroup\mathbf{v}arepsilongroup\originalright\},1\aftergroup\mathbf{v}arepsilongroup\originalright),$ where $d=2,3$ (see section \mathrm{r}ef{sec4} for more details). One can transform the operator system \mathbf{v}arepsilonqref{1.13} into a stochastic nonlinear integral equation as
\begin{equation}\label{1.14}
\begin{aligned}
\mathbf{u}(t)&=e^{-t\mathrm{A}}\mathbf{x}-\int_0^te^{-(t-s)\mathrm{A}}[\mathrm{B}(\mathbf{u}(s))+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u}(s))]\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s+\int_0^te^{-(t-s)\mathrm{A}}\mathrm{P}hi\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ \mathrm{W}^{H}(s),
\mathbf{v}arepsilonnd{aligned}
\mathbf{v}arepsilonnd{equation}
for all $t\in[0,T]$. For the well-posedness and existence of density for 2D stochastic NSE perturbed by fractional Brownian noise, we refer the interested readers to \cite{LFPS,EHPA}, respectively.
t\wedge\tau_Nubsection{Major objectives}
The purpose of this work is two folded.
\begin{enumerate}
\item [(i)] In the first part, we show the existence of a unique local mild solution to the deterministic CBF equations \mathbf{v}arepsilonqref{3} in $\mathbb{L}^p$-spaces, for $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{d,\mathbf{f}rac{d(r-1)}{2}\aftergroup\mathbf{v}arepsilongroup\originalright\}<p<\infty$, $d\mathbf{g}eq 2$ (Theorem \mathrm{r}ef{th2.1}).
\item [(ii)] In the second part, we consider the stochastic counterpart of the problem considered in part (i).
\begin{itemize}\item [(a)] We first establish the existence and uniqueness of pathwise local mild solutions (up to a random time) to the stochastic CBF equations perturbed by additive L\'evy noise in $\mathbb{J}_p$, for $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{d,\mathbf{f}rac{d(r-1)}{2}\aftergroup\mathbf{v}arepsilongroup\originalright\}<p<\infty,$ $d\mathbf{g}eq 2$ (Theorems \mathrm{r}ef{thm3.2} and \mathrm{r}ef{thm3.3}). \item [(b)] By considering the noise as fractional Brownian motion, we show the existence and uniqueness of local pathwise mild solutions (up to a random time) to the stochastic CBF equations in $\mathbb{J}_p$, for $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{d,\mathbf{f}rac{d(r-1)}{2}\aftergroup\mathbf{v}arepsilongroup\originalright\}<p<\infty$ and $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{\mathbf{f}rac{1}{2},\mathbf{f}rac{d}{4}\aftergroup\mathbf{v}arepsilongroup\originalright\}<H<1,$ $d=2,3$ (Theorem \mathrm{r}ef{thm3.4}). \item [(c)] Finally, we discuss the local solvability of stochastic CBF equations perturbed by $\alpha$-regular Volterra processes in $\mathbb{J}_p$, for $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{d,\mathbf{f}rac{d(r-1)}{2}\aftergroup\mathbf{v}arepsilongroup\originalright\}<p<\infty$, $d=2,3$ (Remark \mathrm{r}ef{rem4.3}).
\mathbf{v}arepsilonnd{itemize}
\mathbf{v}arepsilonnd{enumerate}
The difficulty in establishing the existence of local mild solutions to the deterministic CBF equations \mathbf{v}arepsilonqref{3} lies under estimating the nonlinear terms, which we successfully overcame using the estimates \mathbf{v}arepsilonqref{1.7}-\mathbf{v}arepsilonqref{1.9}. On the stochastic counterpart along with these difficulties, additional complication arise due to the presence of noise term (proper regularity of stochastic convolution). In the case of L\'evy noise, we handle this obstacle by using the results obtained in \cite{JZZB}. For the fractional Brownian noise and $\alpha$-regular Volterra processes, we overcame this hurdle by using the stochastic convolution results established in \cite{PCBM}. Thus, making use of the estimates \mathbf{v}arepsilonqref{1.6}-\mathbf{v}arepsilonqref{1.9} and fixed point arguments (iterative technique) or contraction mapping principle to achieve our goals. It can be easily seen that in the sub-critical and critical cases (that is, for $r\in[1,3]$), the condition on $p$ is $d<p<\infty$, which same as that of NSE (cf. \cite{FJR,FRS}, etc) and for the super-critical case (that is, $r\in(3,\infty)$), the condition on $p$ becomes $\mathbf{f}rac{d(r-1)}{2}<p<\infty$.
t\wedge\tau_Nection{Existence and Uniqueness of Deterministic CBF equations}\label{sec2}t\wedge\tau_Netcounter{equation}{0}
In this section, we present the existence and uniqueness of local mild solution to the problem \mathbf{v}arepsilonqref{3}. We use fixed point arguments (by using a simple iterative technique) to obtain the required result.
\iffalse
Let us first set
\begin{align}
\mathrm{S}igma(K,T_*)=t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{\mathbf{u}\in\mathrm{C}([0,T_*];\mathbb{J}_p):\|\mathbf{u}(t)\|_{p}\leq K, \ t\wedge\tau_N^next{ for all }\ t\in[0,T_*]\aftergroup\mathbf{v}arepsilongroup\originalright\}.
\mathbf{v}arepsilonnd{align}
Clearly the space $\mathrm{S}igma(K,T_*)$ equipped with supremum topology is a complete metric space.
\begin{theorem}\label{thm2.1}
For $K>\|\mathbf{x}\|_{p}$ and $\mathbf{f}\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^1(0,T;\mathbb{J}_p)$, there exists a time $0<T_*<T$ such that \mathbf{v}arepsilonqref{3} has a unique mild solution \mathbf{v}arepsilonqref{4} in $\mathrm{S}igma(K,T_*)$.
\mathbf{v}arepsilonnd{theorem}
\begin{proof}
Let us take any $\mathbf{u}\in \mathrm{S}igma(K,T_*)$ and define $\mathbf{z}(t)=\mathrm{G}(\mathbf{u})(t)$ by
\begin{align}
\mathbf{z}(t)=e^{-t\mathrm{A}}\mathbf{x}-\int_0^te^{-(t-s)\mathrm{A}}[\mathrm{B}(\mathbf{u}(s))+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u}(s))]\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s+\int_0^te^{-(t-s)\mathrm{A}}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}\mathbf{f}(s)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s.
\mathbf{v}arepsilonnd{align}
Firstly, we show that $\mathrm{G}:\mathrm{S}igma(K,T_*)t\wedge\tau_N^no\mathrm{S}igma(K,T_*).$ Making use of the estimates \mathbf{v}arepsilonqref{1.6}-\mathbf{v}arepsilonqref{1.9} and the fact that $e^{-t\mathrm{A}}$ is a contraction, we find
\begin{align}
\|\mathbf{z}(t)\|_{p}&\leq\|e^{-t\mathrm{A}}\mathbf{x}\|_{p}+\int_0^t\|e^{-(t-s)\mathrm{A}}\mathrm{B}(\mathbf{u}(s))\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s+\int_0^t\|e^{-(t-s)\mathrm{A}}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u}(s))\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\mathbf{q}uad+\int_0^t\|e^{-(t-s)\mathrm{A}}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}\mathbf{f}(s)\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s \mathbf{n}onumbernumber\\&\leq \|\mathbf{x}\|_{p}+C\int_0^t(t-s)^{-t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\mathbf{f}rac{1}{2}+\mathbf{f}rac{d}{2p}\aftergroup\mathbf{v}arepsilongroup\originalright)}\|\mathbf{u}(s)\|_{p}^2\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s+C\int_0^t(t-s)^{-\mathbf{f}rac{d(r-1)}{2p}}\|\mathbf{u}(s)\|_{p}^r\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\mathbf{q}uad+C\int_0^t\|\mathbf{f}(s)\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\leq t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{\|\mathbf{x}\|_{p}+C\int_0^t\|\mathbf{f}(s)\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\aftergroup\mathbf{v}arepsilongroup\originalright\}+Ct^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}t\wedge\tau_Nup_{s\in[0,t]}\|\mathbf{u}(s)\|_{p}^2+Ct^{1-\mathbf{f}rac{d(r-1)}{2p}}t\wedge\tau_Nup_{s\in[0,t]}\|\mathbf{u}(s)\|_{p}^r\mathbf{n}onumbernumber\\&\leq t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{\|\mathbf{x}\|_{p}+C\int_0^{T_*}\|\mathbf{f}(s)\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\aftergroup\mathbf{v}arepsilongroup\originalright\}+C{T_*}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}K^2+C{T_*}^{1-\mathbf{f}rac{d(r-1)}{2p}}K^r,
\mathbf{v}arepsilonnd{align}
for all $t\in[0,T_*]$. Now, since $K>\|\mathbf{x}\|_{p}$ and $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{d,\mathbf{f}rac{d(r-1)}{2}\aftergroup\mathbf{v}arepsilongroup\originalright\}<p<\infty$, one can choose $0<T_*<T$ in such a way that $\|\mathbf{z}(t)\|_{p}\leq K$, for all $t\in[0,T_*]$, provided
\begin{align*}
t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{\|\mathbf{x}\|_{p}+C\int_0^{T_*}\|\mathbf{f}(s)\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\aftergroup\mathbf{v}arepsilongroup\originalright\}+C{T_*}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}K^2+C{T_*}^{1-\mathbf{f}rac{d(r-1)}{2p}}K^r\leq K.
\mathbf{v}arepsilonnd{align*}
Therefore $\mathbf{z}\in\mathrm{S}igma(K,T_*)$.
Let us now show that $\mathrm{G}:\mathrm{S}igma(K,T_*)t\wedge\tau_N^no\mathrm{S}igma(K,T_*)$ is a contraction. We consider $\mathbf{u}_1,\mathbf{u}_2\in\mathrm{S}igma(K,T_*)$ and set $\mathbf{z}_i(t)=\mathrm{G}(\mathbf{u}_i)(t)$, for all $t\in[0,T_*]$ and $i\in\{1,2\}$ and $\mathbf{z}=\mathbf{z}_1-\mathbf{z}_2$. Then $\mathbf{z}(\cdot)$ satisfies
\begin{align*}
\mathbf{z}(t)&=-\int_0^te^{-(t-s)\mathrm{A}}[\mathrm{B}(\mathbf{u}_1(s))-\mathrm{B}(\mathbf{u}_2(s))]\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s-\int_0^te^{-(t-s)\mathrm{A}}[t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u}_1(s))-t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u}_2(s))]\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s,
\mathbf{v}arepsilonnd{align*}
for all $t\in[0,T_*]$. Using the bilinearity of $\mathrm{B}(\cdot)$ and Taylor's formula (see Theorem 7.9.1, \cite{PGC}), we find
\begin{align}
\|\mathbf{z}(t)\|_{p}&\leq \int_0^t\|e^{-(t-s)\mathrm{A}}\mathrm{B}(\mathbf{u}_1(s)-\mathbf{u}_2(s),\mathbf{u}_1(s))\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\mathbf{q}uad+ \int_0^t\|e^{-(t-s)\mathrm{A}}\mathrm{B}(\mathbf{u}_2(s),\mathbf{u}_1(s)-\mathbf{u}_2(s))\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\mathbf{q}uad+\int_0^tt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\|e^{-(t-s)\mathrm{A}}\int_0^1t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}'(t\wedge\tau_N^nheta\mathbf{u}_1(s)+(1-t\wedge\tau_N^nheta)\mathbf{u}_2(s))(\mathbf{u}_1(s)-\mathbf{u}_2(s))\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/t\wedge\tau_N^nheta\aftergroup\mathbf{v}arepsilongroup\originalright\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\leq C\int_0^t(t-s)^{-t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\mathbf{f}rac{1}{2}+\mathbf{f}rac{d}{2p}\aftergroup\mathbf{v}arepsilongroup\originalright)}(\|\mathbf{u}_1(s)\|_{p}+\|\mathbf{u}_2(s)\|_{p})\|\mathbf{u}_1(s)-\mathbf{u}_2(s)\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\mathbf{q}uad+C\int_0^t(t-s)^{-\mathbf{f}rac{d(r-1)}{2p}}(\|\mathbf{u}_1(s)\|_{p}^{r-1}+\|\mathbf{u}_2(s)\|_{p}^{r-1})\|\mathbf{u}_1(s)-\mathbf{u}_2(s)\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\leq C{t}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}t\wedge\tau_Nup_{s\in[0,t]}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\|\mathbf{u}_1(s)\|_{p}+\|\mathbf{u}_2(s)\|_{p}\aftergroup\mathbf{v}arepsilongroup\originalright)t\wedge\tau_Nup_{s\in[0,t]}\|\mathbf{z}(s)\|_{p}\mathbf{n}onumbernumber\\&\mathbf{q}uad+Ct^{1-\mathbf{f}rac{d(r-1)}{2p}}t\wedge\tau_Nup_{s\in[0,t]}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\|\mathbf{u}_1(s)\|_{p}^{r-1}+\|\mathbf{u}_2(s)\|_{p}^{r-1}\aftergroup\mathbf{v}arepsilongroup\originalright)t\wedge\tau_Nup_{s\in[0,t]}\|\mathbf{z}(s)\|_{p}\mathbf{n}onumbernumber\\&\leq Ct\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft({T_*}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}K+{T_*}^{1-\mathbf{f}rac{d(r-1)}{2p}}K^{r-1}\aftergroup\mathbf{v}arepsilongroup\originalright)t\wedge\tau_Nup_{t\in[0,T_*]}\|\mathbf{z}(t)\|_{p},
\mathbf{v}arepsilonnd{align}
for all $t\in[0,T_*]$. For $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{d,\mathbf{f}rac{d(r-1)}{2}\aftergroup\mathbf{v}arepsilongroup\originalright\}<p<\infty$, one can choose $0<T_*<T$ in such a way that $$Ct\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft({T_*}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}K+{T_*}^{1-\mathbf{f}rac{d(r-1)}{2p}}K^{r-1}\aftergroup\mathbf{v}arepsilongroup\originalright)<1.$$ Hence, $\mathrm{G}$ is a strict contraction on $\mathrm{S}igma(K,T_*)$ and an application of contraction mapping principle yields the existence of mild solution $\mathbf{u}(\cdot)$ to the problem \mathbf{v}arepsilonqref{3}. Uniqueness follows form the representation \mathbf{v}arepsilonqref{4}.
\mathbf{v}arepsilonnd{proof}
\mathbf{f}i
\begin{theorem}\label{th2.1}
For $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{d,\mathbf{f}rac{d(r-1)}{2}\aftergroup\mathbf{v}arepsilongroup\originalright\}<p<\infty$, let $\mathbf{x}\in\mathbb{J}_p$ and $\mathbf{f}\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^1(0,T;\mathbb{J}_p)$ be given. Then, there exists a time $0<T_*<T$ such that \mathbf{v}arepsilonqref{3} has a unique mild solution given by \mathbf{v}arepsilonqref{4} in $\mathrm{C}([0,T_*];\mathbb{J}_p)$.
\mathbf{v}arepsilonnd{theorem}
\begin{proof}
As discussed in \cite{FJR,Kato5}, etc, in order to prove the theorem, we use an iterative technique. Let us set
\begin{align}
\mathbf{u}_0(t)&=e^{-t\mathrm{A}}\mathbf{x},\\
\mathbf{u}_{n+1}(t)&=\mathbf{u}_0+\mathrm{G}(\mathbf{u}_n)(t), \ n=0,1,2,\ldots,
\mathbf{v}arepsilonnd{align}
where $$\mathrm{G}(\mathbf{u})(t)=-\int_0^te^{-(t-s)\mathrm{A}}[\mathrm{B}(\mathbf{u}(s))+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u}(s))]\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s+\int_0^te^{-(t-s)\mathrm{A}}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}\mathbf{f}(s)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s,$$ which is continuous for all $t\in[0,T]$. Since $e^{-t\mathrm{A}}$ is a contraction semigroup on $\mathbb{L}^p(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d)$, first we note that $$\|\mathbf{u}_0(t)\|_{p}=\|e^{-t\mathrm{A}}\mathbf{x}\|_{p}\leq \|\mathbf{x}\|_{p}.$$ Using the estimates \mathbf{v}arepsilonqref{1.6}-\mathbf{v}arepsilonqref{1.9}, we find
\begin{align}
\|\mathbf{u}_{n+1}(t)\|_{p}&\leq\|\mathbf{x}\|_{p}+ \int_0^t\|e^{-(t-s)\mathrm{A}}\mathrm{B}(\mathbf{u}_n(s))\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s+\int_0^t\|e^{-(t-s)\mathrm{A}}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u}_n(s))\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\mathbf{q}uad+\int_0^t\|e^{-(t-s)\mathrm{A}}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{P}\mathbf{f}(s)\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\leq \|\mathbf{x}\|_{p}+C\int_0^t(t-s)^{-t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\mathbf{f}rac{1}{2}+\mathbf{f}rac{d}{2p}\aftergroup\mathbf{v}arepsilongroup\originalright)}\|\mathbf{u}_n(s)\|_{p}^2\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s+C\int_0^t(t-s)^{-\mathbf{f}rac{d(r-1)}{2p}}\|\mathbf{u}_n(s)\|_{p}^r\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\mathbf{q}uad+C\int_0^t\|\mathbf{f}(s)\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\leq t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{\|\mathbf{x}\|_{p}+C\int_0^t\|\mathbf{f}(s)\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\aftergroup\mathbf{v}arepsilongroup\originalright\}+Ct^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}t\wedge\tau_Nup_{s\in[0,t]}\|\mathbf{u}_n(s)\|_{p}^2+Ct^{1-\mathbf{f}rac{d(r-1)}{2p}}t\wedge\tau_Nup_{s\in[0,t]}\|\mathbf{u}_n(s)\|_{p}^r\mathbf{n}onumbernumber\\&\leq t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{\|\mathbf{x}\|_{p}+C\int_0^{T}\|\mathbf{f}(s)\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\aftergroup\mathbf{v}arepsilongroup\originalright\}+C{T}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}f_n^2+C{T}^{1-\mathbf{f}rac{d(r-1)}{2p}}f_n^r,
\mathbf{v}arepsilonnd{align}
for all $t\in[0,T]$, where $$f_n=t\wedge\tau_Nup_{t\in[0,T]}\|\mathbf{u}_n(t)\|_{p}.$$ For $f_0=t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{\|\mathbf{x}\|_{p}+C\int_0^{T}\|\mathbf{f}(s)\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\aftergroup\mathbf{v}arepsilongroup\originalright\}$, from the above relation, it is immediate that
\begin{align}
f_{n+1}\leq f_0+C{T}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}f_n^2+C{T}^{1-\mathbf{f}rac{d(r-1)}{2p}}f_n^r,\ n=0,1,2,\ldots,
\mathbf{v}arepsilonnd{align}
which is a nonlinear recurrence relation. One can easily show by induction that if $$\mathbf{f}rac{1}{2}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\mathbf{f}rac{1}{4C{T}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}}\aftergroup\mathbf{v}arepsilongroup\originalright),t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\mathbf{f}rac{1}{4C{T}^{1-\mathbf{f}rac{d(r-1)}{2p}}}\aftergroup\mathbf{v}arepsilongroup\originalright)^{\mathbf{f}rac{1}{r-1}}\aftergroup\mathbf{v}arepsilongroup\originalright\}>f_0,$$ then $$f_n\leq t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\mathbf{f}rac{1}{4C{T}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}}\aftergroup\mathbf{v}arepsilongroup\originalright),t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\mathbf{f}rac{1}{4C{T}^{1-\mathbf{f}rac{d(r-1)}{2p}}}\aftergroup\mathbf{v}arepsilongroup\originalright)^{\mathbf{f}rac{1}{r-1}}\aftergroup\mathbf{v}arepsilongroup\originalright\}=:K, \ t\wedge\tau_N^next{ for all } \ n=1,2,3,\ldots,$$ so that the sequence $\{f_n\}$ is uniformly bounded.
Let us now consider
\begin{align}
\mathbf{v}_{n+2}(t)&= \mathbf{u}_{n+2}(t)-\mathbf{u}_{n+1}(t)\mathbf{n}onumbernumber\\&= -\int_0^te^{-(t-s)\mathrm{A}}[\mathrm{B}(\mathbf{u}_{n+1}(s))-\mathrm{B}(\mathbf{u}_n(s))]\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s-\int_0^te^{-(t-s)\mathrm{A}}[t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u}_{n+1}(s))-t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u}_n(s))]\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s,
\mathbf{v}arepsilonnd{align}
for all $t\in[0,T]$. Once again using the estimates \mathbf{v}arepsilonqref{1.6}-\mathbf{v}arepsilonqref{1.9}, we obtain
\begin{align}\label{210}
\|\mathbf{v}_{n+2}(t)\|_{p}&=\| \mathbf{u}_{n+2}(t)-\mathbf{u}_{n+1}(t)\|_{p}\mathbf{n}onumbernumber\\& \leq \int_0^t\|e^{-(t-s)\mathrm{A}}\mathrm{B}(\mathbf{u}_{n+1}(s)-\mathbf{u}_{n}(s),\mathbf{u}_{n+1}(s))\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\mathbf{q}uad+ \int_0^t\|e^{-(t-s)\mathrm{A}}\mathrm{B}(\mathbf{u}_{n}(s),\mathbf{u}_{n+1}(s)-\mathbf{u}_{n}(s))\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\mathbf{q}uad+\int_0^tt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\|e^{-(t-s)\mathrm{A}}\int_0^1t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}'(t\wedge\tau_N^nheta\mathbf{u}_{n+1}(s)+(1-t\wedge\tau_N^nheta)\mathbf{u}_n(s))(\mathbf{u}_{n+1}(s)-\mathbf{u}_n(s))\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/t\wedge\tau_N^nheta\aftergroup\mathbf{v}arepsilongroup\originalright\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\leq C\int_0^t(t-s)^{-t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\mathbf{f}rac{1}{2}+\mathbf{f}rac{d}{2p}\aftergroup\mathbf{v}arepsilongroup\originalright)}(\|\mathbf{u}_{n+1}(s)\|_{p}+\|\mathbf{u}_n(s)\|_{p})\|\mathbf{u}_{n+1}(s)-\mathbf{u}_n(s)\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\mathbf{q}uad+C\int_0^t(t-s)^{-\mathbf{f}rac{d(r-1)}{2p}}(\|\mathbf{u}_{n+1}(s)\|_{p}^{r-1}+\|\mathbf{u}_n(s)\|_{p}^{r-1})\|\mathbf{u}_{n+1}(s)-\mathbf{u}_n(s)\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\leq C{t}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}t\wedge\tau_Nup_{s\in[0,t]}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\|\mathbf{u}_{n+1}(s)\|_{p}+\|\mathbf{u}_{n}(s)\|_{p}\aftergroup\mathbf{v}arepsilongroup\originalright)t\wedge\tau_Nup_{s\in[0,t]}\|\mathbf{v}_{n+1}(s)\|_{p} \mathbf{n}onumbernumber\\&\mathbf{q}uad+Ct^{1-\mathbf{f}rac{d(r-1)}{2p}}t\wedge\tau_Nup_{s\in[0,t]}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\|\mathbf{u}_{n+1}(s)\|_{p}^{r-1}+\|\mathbf{u}_{n}(s)\|_{p}^{r-1}\aftergroup\mathbf{v}arepsilongroup\originalright)t\wedge\tau_Nup_{s\in[0,t]}\|\mathbf{v}_{n+1}(s)\|_{p},
\mathbf{v}arepsilonnd{align}
for all $t\in[0,T]$. Therefore, we deduce that
\begin{align}\label{211}
t\wedge\tau_Nup_{t\in[0,T]}\| \mathbf{v}_{n+2}(t)\|_{p}&\leq Ct\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(K{T}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}+K^{r-1}T^{1-\mathbf{f}rac{d(r-1)}{2p}}\aftergroup\mathbf{v}arepsilongroup\originalright)t\wedge\tau_Nup_{t\in[0,T]}\|\mathbf{v}_{n+1}(t)\|_{p}\mathbf{n}onumbernumber\\&\leq C^{n+1}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(K{T}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}+K^{r-1}T^{1-\mathbf{f}rac{d(r-1)}{2p}}\aftergroup\mathbf{v}arepsilongroup\originalright)^{n+1}t\wedge\tau_Nup_{t\in[0,T]}\|\mathbf{v}_1(t)\|_{p}\mathbf{n}onumbernumber\\&\leq 2KC^{n+1}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(K{T}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}+K^{r-1}T^{1-\mathbf{f}rac{d(r-1)}{2p}}\aftergroup\mathbf{v}arepsilongroup\originalright)^{n+1}, \ n=0,1,2,\ldots.
\mathbf{v}arepsilonnd{align}
Let us now consider the infinite series of the form
\begin{align}\label{212}\mathbf{u}_0(t)+\mathbf{v}_1(t)+\mathbf{v}_2(t)+\cdots+\mathbf{v}_n(t)+\cdots.\mathbf{v}arepsilonnd{align} The $n^{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{th}}$ partial sum of the series is $\mathbf{u}_n(t)$, that is, \begin{align}\label{213}\mathbf{u}_n(t)=\mathbf{u}_0(t)+t\wedge\tau_Num_{m=0}^{n-1}\mathbf{v}_{m+1}(t).\mathbf{v}arepsilonnd{align} Therefore, the sequence $\{\mathbf{u}_n(t)\}$ converges if and only if the series \mathbf{v}arepsilonqref{212} converges. From the inequality \mathbf{v}arepsilonqref{211}, we have
\begin{align}
&t\wedge\tau_Nup_{t\in[0,T]}\|\mathbf{u}_0(t)\|_{p}+t\wedge\tau_Num_{m=0}^{\infty}t\wedge\tau_Nup_{t\in[0,T]}\|\mathbf{v}_{m+1}(t)\|_{p}\mathbf{n}onumbernumber\\&\leq \mathbf{f}rac{K}{2}+t\wedge\tau_Num_{m=0}^{\infty}2KC^mt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(K{T}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}+K^{r-1}T^{1-\mathbf{f}rac{d(r-1)}{2p}}\aftergroup\mathbf{v}arepsilongroup\originalright)^m\mathbf{n}onumbernumber\\&=\mathbf{f}rac{K}{2}+\mathbf{f}rac{2K}{1-Ct\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(K{T}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}+K^{r-1}T^{1-\mathbf{f}rac{d(r-1)}{2p}}\aftergroup\mathbf{v}arepsilongroup\originalright)}<+\infty,
\mathbf{v}arepsilonnd{align}
provided $$Ct\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(K{T}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}+K^{r-1}T^{1-\mathbf{f}rac{d(r-1)}{2p}}\aftergroup\mathbf{v}arepsilongroup\originalright)<1.$$ Thus, we can choose a time $0<T_*<T$ in such a way that the above condition is satisfied. Therefore the series \mathbf{v}arepsilonqref{212} converges uniformly in $[0,T_*]$ and we denote the sum of the series by $\mathbf{u}(t)$. Then, the relation \mathbf{v}arepsilonqref{213} provides $$\mathbf{u}(t)=\lim_{nt\wedge\tau_N^no\infty}\mathbf{u}_n(t).$$ The uniform convergence of $\mathbf{u}_n(t)$ to $\mathbf{u}(t)$ and the continuity of the operator $\mathrm{B}(\cdot)+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\cdot)$ gives us $$\mathbf{u}(t)=\mathbf{u}_0+\mathrm{G}(\mathbf{u})(t),$$ which is a mild solution to the problem \mathbf{v}arepsilonqref{3} in the interval $[0,T_*]$. The continuity of the function $\mathbf{u}(\cdot)$ follows from the uniform convergence and the continuity of the sequence $\{\mathbf{u}_n(\cdot)\}_{n=0}^{\infty}$.
Let us now show the uniqueness. Let $\mathbf{u}_1(\cdot)$ and $\mathbf{u}_2(\cdot)$ be two local mild solutions of the problem \mathbf{v}arepsilonqref{3}. Then $\mathbf{u}=\mathbf{u}_1-\mathbf{u}_2$ satisfies:
\begin{align}
\mathbf{u}(t)=-\int_0^te^{-(t-s)\mathrm{A}}[\mathrm{B}(\mathbf{u}_1(s))-\mathrm{B}(\mathbf{u}_2(s))]\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s-\int_0^te^{-(t-s)\mathrm{A}}[t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u}_1(s))-t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u}_2(s))]\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s.
\mathbf{v}arepsilonnd{align}
A calculation similar to \mathbf{v}arepsilonqref{210} yields
\begin{align}
\|\mathbf{u}(t)\|_{p}&\leq C{T_*}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}t\wedge\tau_Nup_{t\in[0,T_*]}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\|\mathbf{u}_{1}(s)\|_{p}+\|\mathbf{u}_{2}(s)\|_{p}\aftergroup\mathbf{v}arepsilongroup\originalright)t\wedge\tau_Nup_{t\in[0,T_*]}\|\mathbf{u}(s)\|_{p} \mathbf{n}onumbernumber\\&\mathbf{q}uad+C{T_*}^{1-\mathbf{f}rac{d(r-1)}{2p}}t\wedge\tau_Nup_{t\in[0,T_*]}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\|\mathbf{u}_{1}(s)\|_{p}^{r-1}+\|\mathbf{u}_{2}(s)\|_{p}^{r-1}\aftergroup\mathbf{v}arepsilongroup\originalright)t\wedge\tau_Nup_{t\in[0,T_*]}\|\mathbf{u}(s)\|_{p}\mathbf{n}onumbernumber\\&\leq Ct\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(K{T_*}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}+K^{r-1}{T_*}^{1-\mathbf{f}rac{d(r-1)}{2p}}\aftergroup\mathbf{v}arepsilongroup\originalright)t\wedge\tau_Nup_{t\in[0,T_*]}\|\mathbf{u}(s)\|_{p},
\mathbf{v}arepsilonnd{align}
for all $t\in[0,T^*]$. One can choose a $T_*$ such that $Ct\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(K{T_*}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}+K^{r-1}{T_*}^{1-\mathbf{f}rac{d(r-1)}{2p}}\aftergroup\mathbf{v}arepsilongroup\originalright)<1$ and hence the uniqueness of $\mathbf{u}\in\mathrm{C}([0,T_*];\mathbb{J}_p)$ follows.
\mathbf{v}arepsilonnd{proof}
t\wedge\tau_Nection{Existence and Uniqueness of Stochastic CBF equations}\label{sec3}t\wedge\tau_Netcounter{equation}{0}
This section is devoted for establishing the existence and uniqueness of mild solution up to a random time to the system \mathbf{v}arepsilonqref{5}. We use the contraction mapping principle to obtain the required result.
t\wedge\tau_Nubsection{The linear problem}
For $p\in[2,\infty)$, we know that $e^{-t\mathrm{A}}$ is a $\mathrm{C}_0$-contraction
semigroup on $\mathbb{L}^p(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d)$, and $\mathbb{L}^p(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d)$ is an martingale type 2 Banach space and also a $2$-smooth Banach space.
\iffalse
\begin{theorem}\label{modn}
Let $e^{-t\mathrm{A}}$ be a $C_0$-contraction semigroup on $\mathbb{L}^p(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d)$, for
$p\in[2,\infty)$. Then, there exists an $\mathbb{L}^p$-valued c\`{a}dl\`{a}g
modification to the stochastic convolution
\begin{align}
t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}(t)=\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/isplaystyle{\int_0^t}e^{-(t-s)\mathrm{A}}\mathrm{P}hi\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{W}(s)+\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/isplaystyle{\int_0^t\int_{\mathrm{Z}}}e^{-(t-s)\mathrm{A}}\mathbf{g}amma(s-,z)\mathbf{w}i\mathbf{p}i(\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/
s,\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ z),\mathbf{v}arepsilonnd{align}
such that for every stopping time $t\wedge\tau_N^nau> 0$, for every $T>0$
and $p\in[2,\infty)$, we have
\begin{align}\label{mmm}
t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{E}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft[t\wedge\tau_Nup_{0\leq t\leq
T\mathbf{w}edget\wedge\tau_N^nau}\|t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}(t)\|_{p}^{2}\aftergroup\mathbf{v}arepsilongroup\originalright]\leq
Ct\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\|\mathrm{P}hi\|^2_{\mathbf{g}amma(\mathbb{H},\mathbb{J}_p)}T+\int_0^T\int_{\mathrm{Z}}\|\mathbf{g}amma(s,z)\|_{p}^2\lambda(\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/
z)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\aftergroup\mathbf{v}arepsilongroup\originalright),
\mathbf{v}arepsilonnd{align}
where $C>0$ is a constant independent of $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}(\cdot)$.
\mathbf{v}arepsilonnd{theorem}
\begin{proof}
See Theorem 1.2, \cite{NJZ} and Theorem 1.1, \cite{BHZ}.
\mathbf{v}arepsilonnd{proof}
\mathbf{f}i
Let us now consider the \mathbf{v}arepsilonmph{stochastic Stokes equation:}
\begin{equation}\label{se1}
t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft.
\begin{aligned}
\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathbf{w}(t)+\mathrm{A}\mathbf{w}(t)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ t&=\mathrm{P}hi\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/
t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{W}(t)+\int_{\mathrm{Z}}\mathbf{g}amma(t-,z)\mathbf{w}i\mathbf{p}i(\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ t,\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ z),\\
\mathbf{w}(0)&=t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbf{0}.
\mathbf{v}arepsilonnd{aligned}
\aftergroup\mathbf{v}arepsilongroup\originalright\}
\mathbf{v}arepsilonnd{equation}
Making use of Theorem 3.6, \cite{JZZB}, the unique solution of the problem (\mathrm{r}ef{se1}) with paths in
$t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^{\infty}(0,T;\mathbb{J}_p)$, $p\in[2,\infty),$ $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s., can be
represented by the stochastic convolution
\begin{align}\label{se3}
\mathbf{w}(t)=\int_0^te^{-(t-s)\mathrm{A}}\mathrm{P}hi\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{W}(s)+\int_0^t\int_{\mathrm{Z}}e^{-(t-s)\mathrm{A}}\mathbf{g}amma(s-,z)\mathbf{w}i\mathbf{p}i(\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/
s,\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ z),
\mathbf{v}arepsilonnd{align}
for all $t\in[0,T]$, and (\mathrm{r}ef{se3}) has a
c\`{a}dl\`{a}g modification such that
\begin{align}\label{se4}
t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{E}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft[t\wedge\tau_Nup_{0\leq t\leq T}\|\mathbf{w}(t)\|_p^{2}\aftergroup\mathbf{v}arepsilongroup\originalright]\leq
Ct\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\|\mathrm{P}hi\|^2_{\mathbf{g}amma(\mathbb{H},\mathbb{J}_p)}T+\int_0^T\int_{\mathrm{Z}}\|\mathbf{g}amma(t,z)\|_p^2\lambda(\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/
z)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ t\aftergroup\mathbf{v}arepsilongroup\originalright),
\mathbf{v}arepsilonnd{align}
and $t\wedge\tau_Nup\limits_{0\leq t\leq T}\|\mathbf{w}(t)\|_p<\infty$, $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s.
t\wedge\tau_Nubsection{The nonlinear problem} Let us now establish the existence of a local mild solution to the stochastic CBF system \mathbf{v}arepsilonqref{5}.
\begin{definition}\label{def3.1}
A $\mathbb{J}_p$-valued and $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{F}_{t}$-adapted stochastic process $\mathbf{u}:[0,T]t\wedge\tau_N^nimes t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^dt\wedge\tau_N^nimes \Omega
\aftergroup\mathbf{v}arepsilongroup\originalrightarrow t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}$ with $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s. c\`{a}dl\`{a}g trajectories for $t\in [0,T]$, is a \mathbf{v}arepsilonmph{mild solution} to the system (\mathrm{r}ef{5}), if for any $T>0$, $\mathbf{u}(t):=\mathbf{u}(t,\cdot,\cdot)$ satisfies the integral equation \mathbf{v}arepsilonqref{6}
$t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s., for each $t\in [0,T].$
\mathbf{v}arepsilonnd{definition}
Let us set $\mathbf{v}=\mathbf{u}-\mathbf{w}$. Then, $\mathbf{v} (\cdot)$ satisfies the following system $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s.:
\begin{equation}\label{3.4}
t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{
\begin{aligned}
\mathbf{f}rac{\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathbf{v}(t)}{\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ t}+[\mathrm{A}\mathbf{v}(t)+\mathrm{B}(\mathbf{v}(t)+\mathbf{w}(t))+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{v}(t)+\mathbf{w}(t))]&=t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbf{0}, \\
\mathbf{u}(0)&=\mathbf{x}.
\mathbf{v}arepsilonnd{aligned}\aftergroup\mathbf{v}arepsilongroup\originalright.
\mathbf{v}arepsilonnd{equation}
Note that for each fixed $\omega\in\Omega$, \mathbf{v}arepsilonqref{3.4} is a deterministic system. The operator system \mathbf{v}arepsilonqref{3.4} can be transformed into an nonlinear integral equation as
\begin{align}\label{3.5}
\mathbf{v}(t)=e^{-t\mathrm{A}}\mathbf{x}-\int_0^te^{-(t-s)\mathrm{A}}[\mathrm{B}(\mathbf{v}(s)+\mathbf{w}(s))+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{v}(s)+\mathbf{w}(s))]\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s,
\mathbf{v}arepsilonnd{align}
for all $t\in[0,T]$. As in the case of deterministic CBF equations, we obtain the existence of a unique local mild solution to the system \mathbf{v}arepsilonqref{3.4} by using the contraction mapping principle in the space $\mathrm{C}([0,\mathbf{w}i T];\mathbb{J}_p)$, $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s., for $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{d,\mathbf{f}rac{d(r-1)}{2}\aftergroup\mathbf{v}arepsilongroup\originalright\}<p<\infty$, where $0<\mathbf{w}i T<T$ is a random time. Let us set
\begin{align}
\mathrm{S}igma(M,\mathbf{w}i T)=t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{\mathbf{v}\in\mathrm{C}([0,\mathbf{w}i T];\mathbb{J}_p):\|\mathbf{v}(t)\|_{p}\leq M, \ t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}t\wedge\tau_N^next{-a.s.,} \ t\wedge\tau_N^next{ for all }\ t\in[0,\mathbf{w}i T]\aftergroup\mathbf{v}arepsilongroup\originalright\}.
\mathbf{v}arepsilonnd{align}
Clearly the space $\mathrm{S}igma(M,\mathbf{w}i T)$ equipped with supremum topology is a complete metric space.
\begin{theorem}\label{thm3.2}
For $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{d,\mathbf{f}rac{d(r-1)}{2}\aftergroup\mathbf{v}arepsilongroup\originalright\}<p<\infty$, let the $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{F}_0$-measurable initial data $\mathbf{x}\in\mathbb{J}_p$, $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s. be given. For $M>\|\mathbf{x}\|_{p}$, there exists a random time $\mathbf{w}i T$ such that \mathbf{v}arepsilonqref{3.4} has a unique mild solution in $\mathrm{S}igma(M,\mathbf{w}i T)$.
\mathbf{v}arepsilonnd{theorem}
\begin{proof}
Let us take any $\mathbf{v}\in \mathrm{S}igma(M,\mathbf{w}i T)$ and define $\mathbf{y}(t)=\mathrm{F}(\mathbf{v})(t)$ by
\begin{align}
\mathbf{y}(t)=e^{-t\mathrm{A}}\mathbf{x}-\int_0^te^{-(t-s)\mathrm{A}}[\mathrm{B}(\mathbf{v}(s)+\mathbf{w}(s))+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{v}(s)+\mathbf{w}(s))]\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s,
\mathbf{v}arepsilonnd{align}
for all $t\in[0,\mathbf{w}i T]$. Let us first establish that $\mathrm{G}:\mathrm{S}igma(M,\mathbf{w}i T)t\wedge\tau_N^no\mathrm{S}igma(M,\mathbf{w}i T).$ Making use of the estimates \mathbf{v}arepsilonqref{1.6}-\mathbf{v}arepsilonqref{1.9}, we find
\begin{align}
\|\mathbf{y}(t)\|_{p}&\leq\|e^{-t\mathrm{A}}\mathbf{x}\|_{p}+\int_0^t\|e^{-(t-s)\mathrm{A}}[\mathrm{B}(\mathbf{v}(s)+\mathbf{w}(s))+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{v}(s)+\mathbf{w}(s))]\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\leq \|\mathbf{x}\|_{p}+C\int_0^t(t-s)^{-t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\mathbf{f}rac{1}{2}+\mathbf{f}rac{d}{2p}\aftergroup\mathbf{v}arepsilongroup\originalright)}\|\mathbf{v}(s)+\mathbf{w}(s)\|_{p}^2\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\mathbf{q}uad+C\int_0^t(t-s)^{-\mathbf{f}rac{d(r-1)}{2p}}\|\mathbf{v}(s)+\mathbf{w}(s)\|_{p}^r\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s \mathbf{n}onumbernumber\\&\leq \|\mathbf{x}\|_{p}+Ct^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}t\wedge\tau_Nup_{s\in[0,t]}\|\mathbf{v}(s)+\mathbf{w}(s)\|_{p}^2+Ct^{1-\mathbf{f}rac{d(r-1)}{2p}}t\wedge\tau_Nup_{s\in[0,t]}\|\mathbf{v}(s)+\mathbf{w}(s)\|_{p}^r\mathbf{n}onumbernumber\\&\leq \|\mathbf{x}\|_{p}+C{\mathbf{w}i T}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(t\wedge\tau_Nup_{t\in[0,\mathbf{w}i T]}\|\mathbf{v}(t)\|_{p}^2+t\wedge\tau_Nup_{t\in[0,\mathbf{w}i T]}\|\mathbf{w}(t)\|_{p}^2\aftergroup\mathbf{v}arepsilongroup\originalright)\mathbf{n}onumbernumber\\&\mathbf{q}uad+C{\mathbf{w}i T}^{1-\mathbf{f}rac{d(r-1)}{2p}}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(t\wedge\tau_Nup_{t\in[0,\mathbf{w}i T]}\|\mathbf{v}(t)\|_{p}^r+t\wedge\tau_Nup_{t\in[0,\mathbf{w}i T]}\|\mathbf{w}(t)\|_{p}^r\aftergroup\mathbf{v}arepsilongroup\originalright)\mathbf{n}onumbernumber\\&\leq \|\mathbf{x}\|_{p}+C{\mathbf{w}i T}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}(M^2+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}u_p^2)+C{\mathbf{w}i T}^{1-\mathbf{f}rac{d(r-1)}{2p}}(M^r+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}u_p^r),
\mathbf{v}arepsilonnd{align}
$t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s., for all $t\in[0,\mathbf{w}i T]$, where $$t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}u_p=t\wedge\tau_Nup_{t\in[0,T]}\|\mathbf{w}(t)\|_{p}.$$ Now, since $M>\|\mathbf{x}\|_{p}$, $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s., and $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{d,\mathbf{f}rac{d(r-1)}{2}\aftergroup\mathbf{v}arepsilongroup\originalright\}<p<\infty$, one can choose $0<\mathbf{w}i T<T$ in such a way that $\|\mathbf{y}(t)\|_{p}\leq M$, for all $t\in[0,\mathbf{w}i T]$, provided
\begin{align*}
\|\mathbf{x}\|_{p}+C{\mathbf{w}i T}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}(M^2+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}u_p^2)+C{\mathbf{w}i T}^{1-\mathbf{f}rac{d(r-1)}{2p}}(M^r+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}u_p^r)\leq M.
\mathbf{v}arepsilonnd{align*}
Therefore $\mathbf{y}\in\mathrm{S}igma(M,\mathbf{w}i T)$.
Our next aim is to show that $\mathrm{F}:\mathrm{S}igma(M,\mathbf{w}i T)t\wedge\tau_N^no\mathrm{S}igma(M,\mathbf{w}i T)$ is a contraction. Let us consider $\mathbf{v}_1,\mathbf{v}_2\in\mathrm{S}igma(M,\mathbf{w}i T)$ and set $\mathbf{y}_i(t)=\mathrm{G}(\mathbf{v}_i)(t)$, for all $t\in[0,\mathbf{w}i T]$ and $i\in\{1,2\}$ and $\mathbf{y}=\mathbf{y}_1-\mathbf{y}_2$. Then $\mathbf{y}(\cdot)$ satisfies
\begin{align*}
\mathbf{y}(t)&=-\int_0^te^{-(t-s)\mathrm{A}}[\mathrm{B}(\mathbf{v}_1(s)+\mathbf{w}(s))-\mathrm{B}(\mathbf{v}_2+\mathbf{w}(s))+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{v}_1(s)+\mathbf{w}(s))-t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{v}_2(s)+\mathbf{w}(s))]\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s,
\mathbf{v}arepsilonnd{align*}
$t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s., for all $t\in[0,\mathbf{w}i T]$. Once again using the bilinearity of $\mathrm{B}(\cdot)$ and Taylor's formula, we find
\begin{align}
\|\mathbf{y}(t)\|_{p}&\leq \int_0^t\|e^{-(t-s)\mathrm{A}}\mathrm{B}(\mathbf{v}_1(s)-\mathbf{v}_2(s),\mathbf{v}_1(s)+\mathbf{w}(s))\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\mathbf{q}uad+ \int_0^t\|e^{-(t-s)\mathrm{A}}\mathrm{B}(\mathbf{v}_2(s)+\mathbf{w}(s),\mathbf{v}_1(s)-\mathbf{v}_2(s))\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\mathbf{q}uad+\int_0^tt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\|e^{-(t-s)\mathrm{A}}\int_0^1t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}'(t\wedge\tau_N^nheta\mathbf{v}_1(s)+(1-t\wedge\tau_N^nheta)\mathbf{v}_2(s)+\mathbf{w}(s))(\mathbf{v}_1(s)-\mathbf{v}_2(s))\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/t\wedge\tau_N^nheta\aftergroup\mathbf{v}arepsilongroup\originalright\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\leq C\int_0^t(t-s)^{-t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\mathbf{f}rac{1}{2}+\mathbf{f}rac{d}{2p}\aftergroup\mathbf{v}arepsilongroup\originalright)}\|\mathbf{v}_1(s)+\mathbf{w}(s)\|_{p}\|\mathbf{v}_1(s)-\mathbf{v}_2(s)\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\mathbf{q}uad+C\int_0^t(t-s)^{-t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\mathbf{f}rac{1}{2}+\mathbf{f}rac{d}{2p}\aftergroup\mathbf{v}arepsilongroup\originalright)}\|\mathbf{v}_2(s)+\mathbf{w}(s)\|_{p}\|\mathbf{v}_1(s)-\mathbf{v}_2(s)\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\mathbf{n}onumbernumber\\&\mathbf{q}uad+C\int_0^t(t-s)^{-\mathbf{f}rac{d(r-1)}{2p}}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\|\mathbf{v}_1(s)\|_{p}+\|\mathbf{v}_2(s)\|_{p}+\|\mathbf{w}(s)\|_{p}\aftergroup\mathbf{v}arepsilongroup\originalright)^{r-1}\|\mathbf{v}_1(s)-\mathbf{v}_2(s)\|_{p}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s \mathbf{n}onumbernumber\\&\leq C{t}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}t\wedge\tau_Nup_{s\in[0,t]}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\|\mathbf{v}_1(s)\|_{p}+\|\mathbf{v}_2(s)\|_{p}+\|\mathbf{w}(s)\|_{p}\aftergroup\mathbf{v}arepsilongroup\originalright)t\wedge\tau_Nup_{s\in[0,t]}\|\mathbf{y}(s)\|_{p}\mathbf{n}onumbernumber\\&\mathbf{q}uad+Ct^{1-\mathbf{f}rac{d(r-1)}{2p}}t\wedge\tau_Nup_{s\in[0,t]}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\|\mathbf{v}_1(s)\|_{p}^{r-1}+\|\mathbf{v}_2(s)\|_{p}^{r-1}+\|\mathbf{w}(s)\|_{p}^{r-1}\aftergroup\mathbf{v}arepsilongroup\originalright)t\wedge\tau_Nup_{s\in[0,t]}\|\mathbf{y}(s)\|_{p}\mathbf{n}onumbernumber\\&\leq Ct\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft({\mathbf{w}i T}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}(M+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}u_p)+{\mathbf{w}i T}^{1-\mathbf{f}rac{d(r-1)}{2p}}(M^{r-1}+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}u_p^{r-1})\aftergroup\mathbf{v}arepsilongroup\originalright)t\wedge\tau_Nup_{t\in[0,\mathbf{w}i T]}\|\mathbf{y}(t)\|_{p},
\mathbf{v}arepsilonnd{align}
for all $t\in[0,\mathbf{w}i T]$. For $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{d,\mathbf{f}rac{d(r-1)}{2}\aftergroup\mathbf{v}arepsilongroup\originalright\}<p<\infty$, one can choose $0<\mathbf{w}i T<T$ in such a way that $$Ct\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft({\mathbf{w}i T}^{\mathbf{f}rac{1}{2}-\mathbf{f}rac{d}{2p}}(M+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}u_p)+{\mathbf{w}i T}^{1-\mathbf{f}rac{d(r-1)}{2p}}(M^{r-1}+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}u_p^{r-1})\aftergroup\mathbf{v}arepsilongroup\originalright)<1.$$ Hence, $\mathrm{F}$ is a strict contraction in $\mathrm{S}igma(M,\mathbf{w}i T)$ and an application of the contraction mapping principle provides the existence of mild solution to the problem \mathbf{v}arepsilonqref{3.4} up to a random time $0<\mathbf{w}i T<T$. Uniqueness follows form the representation \mathbf{v}arepsilonqref{3.5}.
\mathbf{v}arepsilonnd{proof}
Since $\mathbf{u}=\mathbf{v}+\mathbf{w}$, we immediately obtain the following Theorem on the existence of mild solution to the system \mathbf{v}arepsilonqref{5}.
\begin{theorem}\label{thm3.3}
For $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{d,\mathbf{f}rac{d(r-1)}{2}\aftergroup\mathbf{v}arepsilongroup\originalright\}<p<\infty$, let the $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{F}_0$-measurable initial data $\mathbf{x}\in\mathbb{J}_p$, $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s. be given. Then there exists a random time $0<\mathbf{w}i T<T$ such that \mathbf{v}arepsilonqref{5} has a unique mild solution $\mathbf{u}\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^{\infty}(0,\mathbf{w}i T;\mathbb{J}_p)$, $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s. with a c\`{a}dl\`{a}g modification.
\mathbf{v}arepsilonnd{theorem}
t\wedge\tau_Nection{Stochastic CBF equations subjected to fraction Brownian motion}\label{sec4}t\wedge\tau_Netcounter{equation}{0} In this section, we obtain the existence and uniqueness of a local mild solution up to a random time for the stochastic CBF equations \mathbf{v}arepsilonqref{1.13}, for $d=2,3$.
t\wedge\tau_Nubsection{Fractional Brownian motion}
The first study on fractional Brownian motion (fBm) within the Hilbertian framework is reported in \cite{ANK}. Due to various practical applications, the stochastic analysis of fBm has been intensively developed starting from the nineties. For a comprehensive study, the interested readers are referred to see \cite{VPMT,DNu}, etc. In this subsection, we provide a brief description of fBm and its stochastic integral representation in separable Hilbert spaces (cf. sections 4 and 5, \cite{EIMR} for separable Banach spaces). Let us consider a time interval $[0,T]$, where $T$ is an arbitrary fixed time horizon.
\begin{definition}
A fractional Brownian motion (fBm) with Hurst parameter $H\in(0,1)$ is a centered Gaussian process $\mathrm{W}^H$ with covariance $$R_{H}(t,s):= \mathbb{E}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft[\mathrm{W}^{H}(t)\mathrm{W}^{H}(s)\aftergroup\mathbf{v}arepsilongroup\originalright]=\mathbf{f}rac{1}{2}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(t^{2H}+s^{2H}-|t-s|^{2H}\aftergroup\mathbf{v}arepsilongroup\originalright),$$ where $s,t\in[0,T]$.
\mathbf{v}arepsilonnd{definition}Note that if $H=\mathbf{f}rac{1}{2}$, then $\mathrm{W}^{\mathbf{f}rac{1}{2}}$ is the standard Brownian motion. It should be recalled that fBm is not a Markov process except in the case $H=\mathbf{f}rac{1}{2}$. The fBm is the only $H$-self-similar Gaussian process (that is, for
any constant $a > 0$, the processes $\{a^{-H}\mathrm{W}^{H}(at)\}_{0\leq t\leq T}$ and $\mathrm{W}^{H}=\{\mathrm{W}^{H}(t)\}_{0\leq t\leq T}$ have
the same distribution) with stationary increments (Proposition 1.1, \cite{CAT}) $$\mathbb{E}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft[(\mathrm{W}^{H}(t)-\mathrm{W}^{H}(s))^2\aftergroup\mathbf{v}arepsilongroup\originalright]=|t-s|^{2H}.$$ Furthermore, the process $\mathrm{W}^{H}$ admits the Wiener integral representation of the form
\begin{align}\label{41}\mathrm{W}^{H}(t)=\int_0^tK_H(t,s)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathrm{W}(s),\mathbf{v}arepsilonnd{align} where $\mathrm{W} =\{\mathrm{W}(t)\}_{0\leq t\leq T}$ is a Wiener process, and $K_H(\cdot,\cdot)$ is the kernel given by $$K_{H}(t,s)=d_{H}(t-s)^{H-\mathbf{f}rac{1}{2}}+s^{H-\mathbf{f}rac{1}{2}}\mathrm{F}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\mathbf{f}rac{t}{s}\aftergroup\mathbf{v}arepsilongroup\originalright),$$ where $d_H$ is a constant and $$\mathrm{F}(z)=d_Ht\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\mathbf{f}rac{1}{2}-H\aftergroup\mathbf{v}arepsilongroup\originalright)\int_0^{z-1}t\wedge\tau_N^nheta^{H-\mathbf{f}rac{3}{2}}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(1-(t\wedge\tau_N^nheta+1)^{H-\mathbf{f}rac{1}{2}}\aftergroup\mathbf{v}arepsilongroup\originalright)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/t\wedge\tau_N^nheta.$$ For $H>\mathbf{f}rac{1}{2}$, the kernel $K_H(\cdot,\cdot)$ has the simpler expression $$K_H(t,s)=c_Hs^{\mathbf{f}rac{1}{2}-H}\int_s^t(u-s)^{H-\mathbf{f}rac{3}{2}}u^{H-\mathbf{f}rac{1}{2}}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ u,$$ where $t>s$ and $c_H=t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\mathbf{f}rac{H(H-1)}{\beta(2-2H,H-\mathbf{f}rac{1}{2})}\aftergroup\mathbf{v}arepsilongroup\originalright)^{\mathbf{f}rac{1}{2}},$ $\beta(\cdot,\cdot)$ being the beta function. The fact that the process defined by \mathbf{v}arepsilonqref{41} is a fBm follows from the equality $$\int_0^{t\mathbf{w}edge s}K_H(t,u)K_H(s,u)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ u=R_H(t,s).$$ Moreover, the kernel $K_H(\cdot,\cdot)$ satisfies the condition $$\mathbf{f}rac{\mathbf{p}artial}{\mathbf{p}artial t}K_H(t,s)=d_Ht\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(H-\mathbf{f}rac{1}{2}\aftergroup\mathbf{v}arepsilongroup\originalright)t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(\mathbf{f}rac{s}{t}\aftergroup\mathbf{v}arepsilongroup\originalright)^{\mathbf{f}rac{1}{2}-H}(t-s)^{H-\mathbf{f}rac{3}{2}}.$$ Note that the fBm is an $\alpha$-regular Volterra process for $\alpha=H-\mathbf{f}rac{1}{2}$, where $H>\mathbf{f}rac{1}{2}$ (see Remark 2.2, \cite{PCBM} for more details).
Let $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}$ be a separable Hilbert space with scalar product $(\cdot,\cdot)$. Let $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{E}_{H}$ denote the linear space of $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}$-valued step functions on $[0,T]$ of the form \begin{align}\label{42}\mathbf{v}arphi(t)=t\wedge\tau_Num_{i=0}^{m-1}x_it\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athds{1}_{[t_{i},t_{i+1})}(t),\mathbf{v}arepsilonnd{align} where $0=t_0,t_1,t_2,\ldots,t_m\in[0,T]$, $m\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{N}$, $x_i\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}$. The space $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{E}_{H}$ is equipped with the inner product $$t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft(t\wedge\tau_Num_{i=0}^{m-1}x_it\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athds{1}_{[0,t_i)},t\wedge\tau_Num_{j=0}^{n-1}y_jt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athds{1}_{[0,s_j)}\aftergroup\mathbf{v}arepsilongroup\originalright)_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{H}}=t\wedge\tau_Num_{i=0}^{m-1}t\wedge\tau_Num_{j=0}^{n-1}(x_i,y_j)R_{H}(t_i,s_j).$$ Note that $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{E}_H$ is a pre-Hilbert space and we denote the completion of $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{E}_H$ with respect to $(\cdot,\cdot)_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{H}}$ by $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{H}$. For $\mathbf{v}arphi\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{E}_{H}$ of the form \mathbf{v}arepsilonqref{42}, let us define its Wiener integral with respect to the fBm as $$\int_0^T\mathbf{v}arphi(s)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathrm{W}^{H}(s)=t\wedge\tau_Num_{i=0}^{m-1}x_i(\mathrm{W}^{H}(t_{i+1})-\mathrm{W}^{H}(t_i)).$$ It is clear that the mapping $\mathbf{v}arphi=t\wedge\tau_Num_{i=1}^mx_it\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athds{1}_{(t_{i},t_{i+1}]}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}apsto\int_0^T\mathbf{v}arphi(s)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathrm{W}^{H}(s)$ is an isometry between $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{E}_{H}$ and the linear space $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{span}\{\mathrm{W}^{H}(t):t\in[0,T]\}$ viewed as a subspace of $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^2(\Omega;t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U})$, since $$\mathbb{E}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft[t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\|\int_0^T\mathbf{v}arphi(s)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathrm{W}^{H}(s)\aftergroup\mathbf{v}arepsilongroup\originalright\|_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}}^2\aftergroup\mathbf{v}arepsilongroup\originalright]=\|\mathbf{v}arphi\|_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{H}}^2.$$ The image of an element $\mathbf{v}arphi\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{H}$ under this isometry is called the Wiener integral of $\mathbf{v}arphi$ with respect to the fBm $\mathrm{W}^H$. For $0<s<T$, we consider the operator $K^*:t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{E}_Ht\wedge\tau_N^not\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^2(0,T;t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U})$ as $$(K_T^*\mathbf{v}arphi)(s)=K(T,s)\mathbf{v}arphi(s)+\int_s^T(\mathbf{v}arphi(r)-\mathbf{v}arphi(s))\mathbf{f}rac{\mathbf{p}artial K}{\mathbf{p}artial r}(r,s)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ r.$$ For $H>\mathbf{f}rac{1}{2}$, the operator $K^*$ has the simpler expression
$$(K_T^*\mathbf{v}arphi)(s)=\int_s^T\mathbf{v}arphi(r)\mathbf{f}rac{\mathbf{p}artial K}{\mathbf{p}artial r}(r,s)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ r.$$ The integrals appearing on the right-hand side are both Bochner integrals. Since the operator $K^*$ satisfies $(K^*\mathbf{v}arphi,K^*\mathbf{p}si)_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^2(0,T;t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U})}=(\mathbf{v}arphi,\mathbf{p}si)_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{H}},\ t\wedge\tau_N^next{ for all }\ \mathbf{v}arphi,\mathbf{p}si\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{E}_H,$ $K^*$ can be extended to an isometry between $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{H}$ and $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^2(0,T;t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U})$ in the sense that $$\mathbb{E}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft[t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\|\int_0^T\mathbf{v}arphi(s)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathrm{W}^{H}(s)\aftergroup\mathbf{v}arepsilongroup\originalright\|_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}}^2\aftergroup\mathbf{v}arepsilongroup\originalright]=\|K^*\mathbf{v}arphi\|_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^2(0,T;t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U})}^2=\|\mathbf{v}arphi\|_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{H}}^2,\ t\wedge\tau_N^next{ for all }\ \mathbf{v}arphi\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{H}.$$ Hence we have the following connection with the Wiener process $\mathrm{W}$ \begin{align}\label{43}\int_0^t\mathbf{v}arphi(s)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathrm{W}^{H}(s)=\int_0^t(K_t^*\mathbf{v}arphi)(s)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathrm{W}( s),\mathbf{v}arepsilonnd{align} for every $t\in[0,T]$, and $\mathbf{v}arphit\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athds{1}_{[0,t]}\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{H}$ if and only if $K^*\mathbf{v}arphi\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^2(0,T;t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U})$. Furthermore, if $\mathbf{v}arphi,\mathbf{p}si\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{H}$ are such that $\int_0^T\int_0^T|\mathbf{v}arphi(t)||\mathbf{p}si(t)||t-s|^{2H-2}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ t<\infty,$ then their scalar product in $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{H}$ is given by \begin{align*}(\mathbf{v}arphi,\mathbf{p}si)_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{H}}=\int_0^T\int_0^T\mathbf{v}arphi(t)\mathbf{p}si(t)|t-s|^{2H-2}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ t.\mathbf{v}arepsilonnd{align*}
In general, careful justification is needed for the existence of right hand side of \mathbf{v}arepsilonqref{43} (cf. section 5.1, \cite{DNu}). As we are discussing the case of Wiener integrals over the Hilbert space $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}$, we point out that if $\mathbf{v}arphi\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^2(0,T;t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U})$ is a deterministic function, then the relation \mathbf{v}arepsilonqref{43} holds, and the right hand is well defined in $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^2(\Omega;t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U})$ if $K_H^*\mathbf{v}arphi\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^2(0,T;t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U})$.
t\wedge\tau_Nubsection{Cylindrical Brownian motion} For a Hilbert space $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}$, let us now define the standard cylindrical fractional Brownian motion in $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}$ as the formal series (cf. \cite{TED,STCAT})
\begin{align}\label{44}
\mathrm{W}^{H}(t)=t\wedge\tau_Num_{n=0}^{\infty}e_n\mathrm{W}_n^{H}(t),
\mathbf{v}arepsilonnd{align}
where $\{e_n\}_{n=1}^{\infty}$ is a complete orthonormal basis in $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}$ and $\mathrm{W}_n^{H}$ is an one dimensional fBm. It should be noted that the series \mathbf{v}arepsilonqref{44} does not converge in $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^2(\Omega;t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}),$ and thus $\mathrm{W}^{H}(t)$ is not a well-defined $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}$-valued random variable. But, one can consider a Hilbert space $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}_1$ such that $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}t\wedge\tau_Nubsett\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}_1,$ the linear embedding is a Hilbert-Schmidt operator, therefore, the series \mathbf{v}arepsilonqref{44} defines a $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}_1$-valued Gaussian random variable and $\{\mathrm{W}^{H}(t)\}_{t\in[0,T]}$ is a $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}_1$-valued cylindrical fBm.
Let $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{Y}$ be an another real and separable Hilbert space and $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{L}_2(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U},t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{Y})$ denote the space of Hilbert-Schmidt operators from $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}$ to $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{Y}$. As discussed in \cite{DaZ}, it is possible to define a stochastic integral of the form:
\begin{align}\label{45}
\int_0^T\mathbf{v}arphi(t)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathrm{W}^{H}(t),
\mathbf{v}arepsilonnd{align}
where $\mathbf{v}arphi:[0,T]t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}apstot\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{L}(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U},t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{Y})$, and the integral \mathbf{v}arepsilonqref{45} is a $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{Y}$-valued random variable, which is independent of choice of $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}_1$. Let $\mathbf{v}arphi$ be a deterministic function with values in $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{L}_2(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U},t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{Y})$ satisfying:
\begin{enumerate}
\item [(i)] for each $x\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}$, $\mathbf{v}arphi(\cdot)x\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^p(0,T;t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{Y})$, for $p>\mathbf{f}rac{1}{H}$,
\item [(ii)] $\int_0^T\int_0^T\|\mathbf{v}arphi(s)\|_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{L}_2(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U},t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{Y})}\|\mathbf{v}arphi(t)\|_{t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{L}_2(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U},t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{Y})}|s-t|^{2H-2}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ t<\infty.$
\mathbf{v}arepsilonnd{enumerate}
Then the stochastic integral \mathbf{v}arepsilonqref{45} can be expressed as
\begin{align}\label{46}
\int_0^T\mathbf{v}arphi(t)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathrm{W}^{H}(t):=t\wedge\tau_Num_{n=1}^{\infty}\int_0^t\mathbf{v}arphi(s)e_n\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathrm{W}_n^{H}(s)=t\wedge\tau_Num_{n=1}^{\infty}\int_0^t(K_H^*\mathbf{v}arphi e_n)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathrm{W}_n(s),
\mathbf{v}arepsilonnd{align}
where $\mathrm{W}_n$ is the standard Brownian motion connected to fBm $\mathrm{W}_n^{H}$ by the representation formula \mathbf{v}arepsilonqref{41}. Since $H\in(\mathbf{f}rac{1}{2},1)$ implies $\mathbf{v}arphi e_n\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{L}^2(0,T;t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{Y})$, for each $n\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{N}$, so that the terms of the series \mathbf{v}arepsilonqref{46} are well-defined. Moreover, the sequence of random variables $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{\int_0^t\mathbf{v}arphi(s)e_n\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathrm{W}_n^{H}(s)\aftergroup\mathbf{v}arepsilongroup\originalright\}_{n=1}^{\infty}$ are mutually independent Gaussian random variables (cf. \cite{TED}).
For cylindrical Brownian motions in a separable Banach space $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{Y}$, the interested readers are referred to see sections 4 and 5, \cite{EIMR}. For stochastic integrals in $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{Y}$, a series expansion similar to \mathbf{v}arepsilonqref{46} is available, where the Hilbert-Schmidt operators from $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}$ to $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{Y}$ are replaced by $\mathbf{g}amma$-radonifying operators from $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}$ to $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{Y}$ (see \cite{EIMR} for more details). One can refer \cite{JCOC,MMEM}, etc for the local solvability in $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{L}^p$-spaces for some mathematical models like semilinear heat equation, Hardy-H\'enon parabolic equations, etc perturbed by fBm.
t\wedge\tau_Nubsection{SCBF equations perturbed by fractional Brownian motion}
We consider $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{U}=t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{H}=\mathbb{J}_2$, $\{e_j\}_{j=1}^{\infty}$ as the complete orthonormal basis of $\mathbb{J}_2$, and we take $d=2,3$. Next, we consider the following \mathbf{v}arepsilonmph{stochastic Stokes equation} perturbed by fractional Brownian noise as
\begin{equation}\label{47}
t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft.
\begin{aligned}
\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathbf{w}(t)+\mathrm{A}\mathbf{w}(t)\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ t&=\mathrm{P}hi\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathrm{W}^{H}(t),\\
\mathbf{w}(0)&=t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbf{0},
\mathbf{v}arepsilonnd{aligned}
\aftergroup\mathbf{v}arepsilongroup\originalright\}
\mathbf{v}arepsilonnd{equation}
where $\mathrm{P}hi\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{L}(\mathbb{H},\mathbb{J}_p)$ and $\mathrm{W}^{H}=\{\mathrm{W}^{H}(t)\}_{t\in[0,T]}$ is a cylindrical fractional Brownian process. Since the operator $\mathrm{A}$ generates an analytic semigroup on $\mathbb{J}_p$, by standard estimates on Green's function, we obtain (cf. \cite{PCBM}) \begin{align}\label{4.7}\|\mathrm{S}(t)\mathrm{P}hi\|_{\mathbf{g}amma(\mathbb{H},\mathbb{J}_p)}\leq Ct^{-\mathbf{f}rac{d}{4}},\ t\wedge\tau_N^next{ for }\ t>0.\mathbf{v}arepsilonnd{align} Using Corollary 4.1, \cite{PCBM} (see Remark 4.2 and Section 5.2 also), under the assumption $$t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{\mathbf{f}rac{1}{2},\mathbf{f}rac{d}{4}\aftergroup\mathbf{v}arepsilongroup\originalright\}<H<1,$$ the unique solution of the problem (\mathrm{r}ef{se1}) with paths in
$t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{C}([0,T];\mathbb{J}_p)$, $p\in[2,\infty),$ $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s., can be
represented by the stochastic convolution
\begin{align}\label{48}
\mathbf{w}(t)=\int_0^te^{-(t-s)\mathrm{A}}\mathrm{P}hi\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{W}^{H}(s),
\mathbf{v}arepsilonnd{align}
for all $t\in[0,T]$ has a modification such that
\begin{align}\label{49}
t\wedge\tau_Nup_{t\in[0,T]}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\|\int_0^te^{-(t-s)\mathrm{A}}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{W}^{H}(s)\aftergroup\mathbf{v}arepsilongroup\originalright\|_{p}<\infty, \ t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}t\wedge\tau_N^next{-a.s.}
\mathbf{v}arepsilonnd{align}
Then the following theorem can be established in a similar way as that of Theorems \mathrm{r}ef{thm3.2} and \mathrm{r}ef{thm3.3}.
\begin{theorem}\label{thm3.4}
For $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{d,\mathbf{f}rac{d(r-1)}{2}\aftergroup\mathbf{v}arepsilongroup\originalright\}<p<\infty$ and $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{\mathbf{f}rac{1}{2},\mathbf{f}rac{d}{4}\aftergroup\mathbf{v}arepsilongroup\originalright\}<H<1,$ $d=2,3,$ let the $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athscr{F}_0$-measurable initial data $\mathbf{x}\in\mathbb{J}_p$, $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s. be given. Then there exists a random time $\mathbf{w}idehat{T}$ such that \mathbf{v}arepsilonqref{5} has a unique mild solution $\mathbf{u}\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{C}([0,\mathbf{w}idehat{T}];\mathbb{J}_p)$, $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s. satisfying \mathbf{v}arepsilonqref{1.14}.
\mathbf{v}arepsilonnd{theorem}
\begin{remark}\label{rem4.3}
One can also consider the stochastic CBF equations perturbed by $\alpha$-regular Volterra processes as
\begin{equation}\label{50}
t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{
\begin{aligned}
{\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/\mathbf{u}(t)}+[\mathrm{A}\mathbf{u}(t)+\mathrm{B}(\mathbf{u}(t))+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u}(t))]\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ t&=\mathrm{P}hi\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{B}(t), \\
\mathbf{u}(0)&=\mathbf{x},
\mathbf{v}arepsilonnd{aligned}\aftergroup\mathbf{v}arepsilongroup\originalright.
\mathbf{v}arepsilonnd{equation}
where $\mathrm{P}hi\int\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{L}(\mathbb{H},\mathbb{J}_p)$ satisfies
\mathbf{v}arepsilonqref{4.7} and $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{B}$ is an infinite-dimensional $\alpha$-regular cylindrical Volterra process with $\alpha\in(0,\mathbf{f}rac{1}{2})$, which belongs to a finite Wiener chaos (see \cite{PCBM} for more details on $\alpha$-regular Volterra processes). Then for $$\alpha>\mathbf{f}rac{d}{4}-\mathbf{f}rac{1}{2},\ d=2,3,$$ the process $$ \mathbf{w}(t)=\int_0^te^{-(t-s)\mathrm{A}}\mathrm{P}hi\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{B}(s),$$ has a modification in $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{C}([0,T];\mathbb{J}_p)$, $p\in[\mathbf{f}rac{2}{1+2\alpha},\infty),$ $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s. Thus a result similar to Theorem \mathrm{r}ef{thm3.4} can be obtained in this case also for the system \mathbf{v}arepsilonqref{50}, that is, the existence and uniqueness of a mild solution $$\mathbf{u}(t)=e^{-t\mathrm{A}}\mathbf{x}-\int_0^te^{-(t-s)\mathrm{A}}[\mathrm{B}(\mathbf{u}(s))+t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{C}(\mathbf{u}(s))]\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/ s+\int_0^te^{-(t-s)\mathrm{A}}\mathrm{P}hi\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athrm{d}\/t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athcal{B}(s),$$ for $t\in[0,\overline{T}]$, where $0<\overline{T}<T$ is a random time, to the system \mathbf{v}arepsilonqref{50} with $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{P}$-a.s. continuous modification with trajectories in $\mathbb{J}_p$, for $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{d,\mathbf{f}rac{d(r-1)}{2}\aftergroup\mathbf{v}arepsilongroup\originalright\}<p<\infty$.
\mathbf{v}arepsilonnd{remark}
\mathbf{v}skip 0.2 cm
\mathbf{n}onumberindentt\wedge\tau_N^nextbf{Conclusions and future plans:} The existence and uniqueness of a local mild solution in $\mathbb{L}^p(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d)$ with $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{d,\mathbf{f}rac{d(r-1)}{2}\aftergroup\mathbf{v}arepsilongroup\originalright\}<p<\infty$ for deterministic and stochastic CBF equations in $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^d$ (for various kinds of noises) is established in this work. The case of $p=t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}axt\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athopen{}t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athclose\bgroup\originalleft\{d,\mathbf{f}rac{d(r-1)}{2}\aftergroup\mathbf{v}arepsilongroup\originalright\}$ is an interesting problem and it will be addressed in a future work (for similar works, see \cite{Kato5} for the deterministic NSE and \cite{MTSS} for stochastic NSE).
\mathbf{v}skip 0.2 cm
t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}edskip\mathbf{n}onumberindent
{\bf Acknowledgments:} M. T. Mohan would like to thank the Department of Science and Technology (DST), India for Innovation in Science Pursuit for Inspired Research (INSPIRE) Faculty Award (IFA17-MA110).
\mathbf{v}skip 0.2 cm
\mathbf{n}onumberindentt\wedge\tau_N^nextbf{Conflict of interest:} The author has no conflicts of interest to declare that are relevant to the content of this article.
\begin{thebibliography}{99}
\bibitem{BLH}
\mathbf{n}ewblock Z. Brze\'{z}niak and H. Long,
\mathbf{n}ewblock {A note on $\mathbf{g}amma$-radonifying and summing operators},
\mathbf{n}ewblock \mathbf{v}arepsilonmph{Stochastic Analysis}, Banach Center Publications, Institute of Mathematics, Polish Academy of Sciences, Warszawa, t\wedge\tau_N^nextbf{105} (2015), 43--57.
\bibitem{ZBGD} Z. Brze\'zniak and Gaurav Dhariwal, Stochastic tamed Navier-Stokes equations on $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^3$: the existence and the uniqueness of solutions and the existence of an invariant measure, \mathbf{v}arepsilonmph{Journal of Mathematical Fluid Mechanics}, {\bf 22}, Article number: 23 (2020).
\bibitem{BHZ}
\mathbf{n}ewblock Z. Brze\'{z}niak, E. Hausenblas and J. Zhu,
\mathbf{n}ewblock {Maximal inequality for stochastic convolutions driven by compensated Poisson random measures in Banach spaces},
\mathbf{n}ewblock \mathbf{v}arepsilonmph{Ann. Inst. Henri Poincar\'e Probab. Stat.,} t\wedge\tau_N^nextbf{53} (2017), 937--956.
\bibitem{ZCQJ} Z. Cai and Q. Jiu, Weak and Strong solutions for the incompressible Navier-Stokes equations with damping, \mathbf{v}arepsilonmph{Journal of Mathematical Analysis and Applications}, {\bf 343} (2008), 799--809.
\bibitem{JCOC} J. Clarke and C. Olivera, Local $L^p$-solution for semilinear heat equation with fractional noise, \mathbf{u}rl{https://arxiv.org/abs/1902.06084}.
\bibitem{PCBM} P. Coupek, B. Maslowski and M. Ondrejat, $L^p$-valued stochastic convolution integral driven by Volterra noise, \mathbf{v}arepsilonmph{Stoch. Dyn.}, {\bf 18}(6) (2018), 1850048.
\bibitem{DaZ}
\mathbf{n}ewblock G. Da Prato and J. Zabczyk,
\mathbf{n}ewblock \mathbf{v}arepsilonmph{Stochastic Equations in Infinite Dimensions},
\mathbf{n}ewblock Cambridge University Press, 1992.
\bibitem{GDJZ}
\mathbf{n}ewblock G. Da Prato and J. Zabczyk,
\mathbf{n}ewblock \mathbf{v}arepsilonmph{Ergodicity for Infinite Dimensional Systems},
\mathbf{n}ewblock London Mathematical Society Lecture Notes, {\bf 229}, Cambridge
University Press, 1996.
\bibitem{ZDRZ} Z. Dong and R. Zhang, 3D tamed Navier-Stokes equations driven by multiplicative L\'evy noise: Existence, uniqueness and large deviations, \mathbf{u}rl{https://arxiv.org/pdf/1810.08868.pdf}.
\bibitem{TED} T.E. Duncan, B. Pasik-Duncan,and B. Maslowski, Fractional Brownian motion and stochastic equations in Hilbert spaces, \mathbf{v}arepsilonmph{Stoch. Dyn.} {\bf 2} (2002), 225--250.
\bibitem{FJR}
\mathbf{n}ewblock E. B. Fabes, B. F. Jones and N. M. Riviere,
\mathbf{n}ewblock {The Initial value problem for the Navier-Stokes equations with data in $\mathbb{L}^p$},
\mathbf{n}ewblock \mathbf{v}arepsilonmph{Archive for Rational Mechanics and Analysis}, t\wedge\tau_N^nextbf{45} (1972), 222--240.
\bibitem{LFPS} L. Fang, and P. Sundar and F. G. Viens, Two-dimensional stochastic Navier-Stokes equations with fractional Brownian noise, \mathbf{v}arepsilonmph{Random Oper. Stoch. Equ.}, {\bf 21}(2) (2013), 135--158.
\bibitem {FRS}
\mathbf{n}ewblock B. P. W. Fernando, B. R\"{u}diger and S. S. Sritharan,
\mathbf{n}ewblock {Mild solutions of stochastic Navier-Stokes equation with jump noise in $\mathbb{L}^p$-spaces},
\mathbf{n}ewblock \mathbf{v}arepsilonmph{Mathematische Nachrichten}, t\wedge\tau_N^nextbf{288} (2015), 1615--1621.
\iffalse
\bibitem{MEF} M. Efendiev, \mathbf{v}arepsilonmph{Attractors for Degenerate Parabolic Type Equations}, American Mathematical Society, Providence, Rhode Island, 2013.
\bibitem{Evans} L. C. Evans, \mathbf{v}arepsilonmph{Partial Differential Equations}, Graduate studies in Mathematics, American Mathematical Society, 2nd Ed, 2010.
\bibitem{CFM} C. Foias, O. Manley, R. Rosa, et al, \mathbf{v}arepsilonmph{Navier-Stokes Equations and Turbulence}, Cambridge University
Press, Cambridge, 2001.
\bibitem{CFM1} C. Foias, O. P. Manley, R. Temam, et al, Asymptotic analysis of the Navier-Stokes equations, \mathbf{v}arepsilonmph{Phys. D},
{\bf 9}(1--2) (1983), 157--188.
\mathbf{f}i
\bibitem{KWH} K. W. Hajduk and J. C. Robinson, Energy equality for the 3D critical convective Brinkman-Forchheimer equations, \mathbf{v}arepsilonmph{Journal of Differential Equations}, {\bf 263} (2017), 7141--7161.
\bibitem{EHPA} E. Hausenblas and P. A. Razafimandimby, Existence of a density of the 2-dimensional stochastic Navier Stokes equation driven by L\'evy processes or fractional Brownian motion, \mathbf{v}arepsilonmph{Stochastic Process. Appl.}, {\bf 130}(7) (2020), 4174--4205.
\bibitem{Kato5}
\mathbf{n}ewblock T. Kato,
\mathbf{n}ewblock {Strong $\mathbb{L}^p$-solutions of the Navier-Stokes equation in $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^m$, with applications to weak solutions},
\mathbf{n}ewblock \mathbf{v}arepsilonmph{Mathematische Zeitschrift}, t\wedge\tau_N^nextbf{187} (1984), 471--480.
\bibitem{YGTM} Y. Giga and T. Miyakawa, Solutions in $\mathbb{L}^r$ of the Navier-Stokes initial value problem, \mathbf{v}arepsilonmph{Arch. Ration. Mech. Anal.}, {\bf 89}(3) (1985), 267--281.
\bibitem{EIMR} E. Issoglio, and M. Riedle,
Cylindrical fractional Brownian motion in Banach spaces,
\mathbf{v}arepsilonmph{Stochastic Process. Appl.}, {\bf 124 } (11) (2014), 3507--3534.
\bibitem{ANK} A.N. Kolmogorov, Wienerische Spiralen und einige andere interessante Kurven im Hilbertschen Raum, \mathbf{v}arepsilonmph{C. R. (Doklady). Acad. URSS (N.S.)}, {\bf 26} (1940), 115--118.
\bibitem{MMEM} M. Majdoub and E. Mliki, Well-posedness for Hardy-H\'enon parabolic equations with fractional Brownian noise, \mathbf{v}arepsilonmph{Analysis and Mathematical Physics} {11:20} (2021).
\bibitem{PAM} P.A. Markowich, E.S. Titi and S. Trabelsi, Continuous data assimilation for the three-dimensional Brinkman-Forchheimer-extended Darcy model, \mathbf{v}arepsilonmph{Nonlinearity}, {\bf 29}(4), 2016, 1292-1328.
\bibitem{MTSS} M. T. Mohan and S. S. Sritharan, $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{L}^p$-solutions of the stochastic Navier-Stokes equations subject to L\'evy noise with $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{L}^m(t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{R}^m)$-initial data, \mathbf{v}arepsilonmph{ Evol. Equ. Control Theory}, {\bf 6}(3) (2017), 409--425.
\bibitem{MTM5} M. T. Mohan, On convective Brinkman-Forchheimer equations, \mathbf{v}arepsilonmph{Submitted}.
\bibitem{MTM4} M. T. Mohan, Stochastic convective Brinkman-Forchheimer equations, \mathbf{v}arepsilonmph{Submitted}, \mathbf{u}rl{https://arxiv.org/abs/2007.09376}.
\bibitem{MTM6} M. T. Mohan, Well-posedness and asymptotic behavior of the stochastic convective Brinkman-Forchheimer equations perturbed by pure jump noise, \mathbf{v}arepsilonmph{Submitted}, \mathbf{u}rl{https://arxiv.org/abs/2008.08577}.
\bibitem{DNu} D. Nualart, \mathbf{v}arepsilonmph{The Malliavin calculus and related topics}, 2nd Ed. Probability and Its Application (New York), Springer, Berlin (2006).
\iffalse
\bibitem{OAP7} A. P. Oskolkov, The uniqueness and solvability in the large of boundary value problems for the equations
of motion of aqueous solutions of polymers, \mathbf{v}arepsilonmph{Zap. Nauchn. Sem. LOMI}, {\bf 38} (1973), 98--136.
\bibitem{OAP2} A. P. Oskolkov, Theory of nonstationary flows of Kelvin-Voigt fluids, \mathbf{v}arepsilonmph{Zap. Nauchn. Sem. LOMI}, {\bf 115} (1982), 191--202.
\bibitem{OAP} A. P. Oskolkov, Initial boundary value problems for the equations of motion of Kelvin-Voigt fluids and
Oldroyd fluids, \mathbf{v}arepsilonmph{Proceedings of Steklov Institute of Mathematics}, {\bf 2}(1989) , 137–182.
\bibitem{OAP4} A. P. Oskolkov, Nonlocal problems for the equations of motion of Kelvin-Voight fluids, \mathbf{v}arepsilonmph{Journal of Mathematical Sciences},
{\bf 75}(6) (1995), 2058--2078.
\bibitem{OAP1} A. P. Oskolkov, R. D. Shadiev, Nonlocal problems of the theory of the equations of motion for Kelvin-Voight fluids, \mathbf{v}arepsilonmph{Zap. Nauchn. Sem. LOMI}, {\bf 181} (1990), 146--185.
\bibitem{OAP5} A. P. Oskolkov and R. D. Shadiev, Nonlocal problems in the theory of equations of motion of Kelvin-Voight fluids. 2, \mathbf{v}arepsilonmph{Journal of Mathematical Sciences},
{\bf 59}(6) (1992), 1206--1214.
\bibitem{OAP6} A. P. Oskolkov and R. D. Shadiev, Towards a theory of global solvability on $[0, \infty)$ of initial-boundary value problems for the equations of motion of oldroyd and Kelvin-Voight fluids, \mathbf{v}arepsilonmph{Journal of Mathematical Sciences}, {\bf 68}(2) (1994), 240--253.
\bibitem{OST} E. Olson and E. S. Titi, Determining modes for continuous data assimilation in 2D turbulence, \mathbf{v}arepsilonmph{J. Stat.
Phys.}, {\bf 113}(5--6) (2003), 799--840.
\bibitem{PZ} V. Pata, A. Zucchi, Attractors for a damped hyperbolic equation with linear memory, \mathbf{v}arepsilonmph{Adv. Math. Sci. Appl.} { \bf 11} (2001), 505--529.
\mathbf{f}i
\iffalse
\bibitem{FRT} F. Ramos and E. S. Titi, Invariant measures for the 3D Navier-Stokes-Voigt equations and their Navier-Stokes limit, \mathbf{v}arepsilonmph{Discrete and Continuous Dynamical Systems}, {\bf 28}(1) (2010), 375--403.
\bibitem{RR} R. Rosa, The global attractor for the 2D Navier-Stokes flow on some unbounded domains, \mathbf{v}arepsilonmph{Nonlinear Analysis: Theory, Methods $\&$ Applications}, {\bf 32} (1), {\bf 1998}, 71--85.
\bibitem{JCR} J. C. Robinson, \mathbf{v}arepsilonmph{Infinite-Dimensional Dynamical Systems: An Introduction to Dissipative Parabolic PDEs and the Theory of Global Attractors}, Cambridge University Press, 2001.
\bibitem{SoH} H. Sohr, \mathbf{v}arepsilonmph{The Navier-Stokes equations}, Birkh\"auser Verlag, Basel, 2001.
\mathbf{f}i
\bibitem{VPMT} V.Pipiras and M.Taqqu, Integration questions related to the fractional Brownian motion, \mathbf{v}arepsilonmph{Probab.
Theory Relat. Fields}, {\bf 118}(2) (2001), 251--281.
\bibitem{MRXZ1} M. R\"ockner and X. Zhang, Stochastic tamed 3D Navier-Stokes equation: existence, uniqueness and ergodicity, \mathbf{v}arepsilonmph{Probability Theory and Related Fields}, {\bf 145} (2009) 211--267.
\bibitem{STCAT} S. Tindel, C. A. Tudor, and F. Viens, Stochastic evolution equations with fractional Brownian motion, \mathbf{v}arepsilonmph{Probab. Theory Related Fields}, {\bf 127}(2) (2003), 186--204.
\bibitem{CAT} C. A. Tudor, \mathbf{v}arepsilonmph{Analysis of Variations for Self-similar Processes, A Stochastic Calculus Approach}, Springer International Publishing Switzerland 2013.
\bibitem{FBW1} F. B. Weissler, The Navier-Stokes initial value problem in $t\wedge\tau_N^nextbf{W}_{t\wedge\tau_N^nheta_n}athbb{L}^p$, \mathbf{v}arepsilonmph{Arch. Ration. Mech. Anal.}. {\bf 74}, 219--230 (1980).
\bibitem{JZZB} J. Zhu, Z. Brze\'zniak, and W. Liu, \mathbf{v}arepsilonmph{Maximal inequalities and exponential estimates for stochastic convolutions driven by L\'evy-type processes in Banach spaces with application to stochastic quasi-geostrophic equations},
\mathbf{v}arepsilonmph{SIAM J. Math. Anal.}, {\bf 51} (3) (2019), pp. 2121--2167.
\bibitem{JZZB1} J. Zhu, Z. Brze\'zniak, and W. Liu,
$\mathbb{L}^p$-solutions for stochastic Navier-Stokes equations with jump noise,
\mathbf{v}arepsilonmph{Statist. Probab. Lett.}, {\bf 155} (2019), 108563, 9 pp.
\mathbf{v}arepsilonnd{thebibliography}
\mathbf{v}arepsilonnd{document}
|
\ensuremath{^{9}\mathrm{Be}^+\,}gin{document}
\preprint{APS/123-QED}
\title{Quantum computing hardware in the cloud:\\ Should a computational chemist care?}
\author{Alessandro Rossi}
\email{[email protected]}
\affiliation{
Department of Physics, SUPA, University of Strathclyde, Glasgow G4 0NG, United Kingdom}
\affiliation{National Physical Laboratory, Hampton Road, Teddington TW11 0LW, United Kingdom
}
\author{Paul G. Baity}
\affiliation{
James Watt School of Engineering, University of Glasgow, Glasgow G12 8LT, United Kingdom
}
\author{Vera M. Sch\"{a}fer}
\affiliation{
Department of Physics, University of Oxford, Clarendon Laboratory, Parks Road, Oxford OX1 3PU, United Kingdom
}
\author{Martin Weides}
\affiliation{
James Watt School of Engineering, University of Glasgow, Glasgow G12 8LT, United Kingdom
}
\date{\today}
\ensuremath{^{9}\mathrm{Be}^+\,}gin{abstract}
Within the last decade much progress has been made in the experimental realisation of quantum computing hardware based on a variety of physical systems. Rapid progress has been fuelled by the conviction that sufficiently powerful quantum machines will herald enormous computational advantages in many fields, including chemical research. A quantum computer capable of simulating the electronic structures of complex molecules would be a game changer for the design of new drugs and materials. Given the potential implications of this technology, there is a need within the chemistry community to keep abreast with the latest developments as well as becoming involved in experimentation with quantum prototypes. To facilitate this, here we review the types of quantum computing hardware that have been made available to the public through cloud services. We focus on three architectures, namely superconductors, trapped ions and semiconductors. For each one we summarise the basic physical operations, requirements and performance. We discuss to what extent each system has been used for molecular chemistry problems and highlight the most pressing hardware issues to be solved for a chemistry-relevant quantum advantage to eventually emerge.
\end{abstract}
\maketitle
\,\mathrm{s}ection{\label{sec:intro}Introduction}
This year marks exactly 40 years since Richard Feynman famously said~\cite{Feynman}: “Nature isn't classical, dammit, and if you want to make a simulation of nature, you'd better make it quantum mechanical, and by golly it's a wonderful problem, because it doesn't look so easy”. On the one hand, the visionary physicist anticipated the possibility (and the inherent difficulty) of building a new type of computing apparatus operating according to the laws of quantum mechanics. On the other hand, he had immediately identified one of its most useful areas of application, i.e. simulations of chemical and physical systems.\\\indent
Computational chemists will indeed benefit from future quantum computers for calculations of molecular energies to within chemical accuracy, defined to be the target accuracy necessary to estimate chemical reaction rates at room temperature ( $\approx 1$~kcal/mol)~\cite{elfving2020quantum}. Fully-fledged, error-free quantum systems will enable predictions and simulations that are not possible today in terms of both accuracy and speed. This could have a revolutionary impact on the design of drugs, catalysts and materials by allowing computational methods to replace lengthy and expensive experimental procedures. Unfortunately, we are still in the infancy of the development of quantum computing technology and a machine that provides a quantum advantage in molecular chemistry over classical super-computers has not emerged yet. However, the progress in handling increasingly complex molecular and material chemistry has been relentless. Small-scale quantum machines developed by academic or corporate research centres have been initially used to simulate simple diatomic or triatomic molecules made up of just H and He atoms~\cite{Colless,Shen,OMalley2016}. Recently, more powerful quantum computers have been used to simulate larger compounds containing N, Li and Be atoms~\cite{Arute2020_1,Kandala2017,Kandala2019}. Although these studies do not show a clear advantage in using quantum computing over the conventional computational methods that have been used for their validation, they do indicate that hurdles are being tackled and viable ways forward are becoming available.\\\indent
The major impediments that currently stifle quantum computers are limits to the number of computational units and computational errors. The units of quantum information are called quantum bits (qubits) in analogy with the binary bits of classical computers. Quantum algorithms for chemical calculations use qubit-based Hamiltonians to map molecular many-body Hamiltonians and evaluate the system wavefunction through repeated sampling of the qubit register states
~\cite{Lanyon2010,Aspuru-Guzik1704,RevModPhys.92.015003}. One particular algorithm, namely the variational quantum eigensolver (VQE)~\cite{peruzzo2014}, has acquired prominence because it alleviates the computational burden on today's limited quantum machines by using a classical co-processor to support the calculation. To date, the most advanced VQE simulations have mapped just 24 molecular orbitals onto 12 qubits~\cite{Arute2020_1}, a relatively easy feat for traditional computers. In order to calculate the energy ground state of more complex systems with chemical accuracy, it is expected that the number of qubits available will need to increase by orders of magnitude. A recent estimate~\cite{elfving2020quantum} indicates that more than 1500 orbitals are required for a VQE calculation that could outperform classical super computers.\\\indent
The other hurdle to consider is that qubits are error-prone due to noise-limited phase coherence, with inherent challenges in reading and writing their states properly (qubit fidelity). Ultimately, there is a limit to the number and duration of operations (qubit gates) that a quantum computer can carry out before error propagation leads to computational failure. Quantum error correction (QEC) schemes to correct these errors have been identified~\cite{Fowler2012,Ofek2016}.
The main drawback is that QEC leads to hardware aggravation, given that several physical qubits are required to realise a single error-corrected ``logical'' qubit. Some estimates based on realistic qubit noise levels conclude that the ratio of physical to logical qubits to reach fault tolerant machines could be at least $1$,$000:1$~\cite{martinis2015}.\\\indent
It is, therefore, evident that, to approach quantum chemistry simulations in a meaningful way, quantum computers with millions of physical qubits will be required, if one has to accurately map thousands of spin-orbitals. By contrast, today's quantum computers rely on a small number of noisy qubits (less than 100 at present) because the ability to manufacture, interconnect and error-correct qubits on larger scales is not yet sufficiently developed. This is why quantum machines are presently dubbed NISQ (Noisy Intermediate-Scale Quantum)~\cite{Preskill2018quantumcomputingin}. An important figure of merit for NISQ systems is called quantum volume (QV)~\cite{Cross2019}, which combines in one convenient metric the number of qubits available, how extensively they are interconnected, and their gate fidelity. A larger QV indicates that more complex quantum algorithms can be successfully run. This metric clearly shows that, to increase the computational power, is not sufficient to build machines with more qubits if these remain affected by high levels of noise. Hence, the challenge of improving quantum computing power is a coordinated effort in scaling up qubits, making them as interconnected as possible, and reducing the error rates.\\\indent
NISQ computers come in a variety of hardware implementations. Different from classical computers for which the Central Processing Unit (CPU) is invariably made with silicon integrated technology, Quantum Processing Units (QPU) can also be realised with superconductor microchips, ions or neutral atoms trapped in a vacuum, and on-chip photonic waveguides. Different technologies present different trade-offs in terms of number of qubits, phase coherence time, qubit fidelity, connectivity etc. Here, we are going to focus on a specific subset of quantum hardware types. Specifically, we will look at digital programmable QPUs, as opposed to adiabatic or analog systems~\cite{albash2018,Hauke_2020}. Among these, we shall discuss computers available to the general public through cloud services. On the one hand, being available to the public, and not just to specialised quantum developers, indicates these systems have reached superior maturity. On the other hand, we feel that a description of how these systems operate at hardware level will benefit the reader who may have to navigate through offers and subscription packages to identify the most relevant service for the computational chemistry application of interest. This may indeed become a daunting task without prior knowledge given the pace with which these services are becoming available and compete to acquire large customer bases. Global corporations offering cloud access include Google, IBM, Microsoft and Amazon. We shall limit our discussion to three types of hardware in the cloud: superconductor-, ion trap- and silicon-based quantum computers. For each one of these systems we discuss how qubits are physically embodied, initialised, read and manipulated. We will describe the operational requirements and the main performance parameters of each implementation. We will provide some use cases relevant to quantum chemical simulations to exemplify the usefulness of different machines in relevant contexts. The remainder of this Article is organised as follows. Superconductor devices are described in Section~\ref{sec:super}, ion trap systems in Section~\ref{sec:ion}, and a silicon processor in Section~\ref{sec:silicon}. These technologies are compared in Section~\ref{sec:disc}, and finally an outlook for future developments is discussed in Section~\ref{sec:conc}.
\,\mathrm{s}ection{\label{sec:super}Superconducting quantum computers}
Superconducting (SC) circuits are the most widely used systems for quantum computing. Many industry leaders, such as Google, IBM, and Rigetti, use superconducting quantum circuits to realize their quantum computers. Qubits implemented on superconducting devices fulfil the requirements \cite{Divincenzo2000} for scalable quantum computing, and therefore micron-sized quantum circuits and associated integrated-circuit processing techniques can be scaled up when implemented using superconducting quantum technologies. Whereas trapped ion and silicon devices control and read (sub-)atom scale components as their quantum systems, in SC circuits information is encoded into a \emph{macroscopic} quantum state of the condensate of paired electrons (so-called Cooper pairs), which collectively participate in a charged superfluid state with a wave function $\Psi(\vec{r},t) = |\Psi(\vec{r},t)|e^{i\phi(\vec{r},t)}$ \cite{BCS, Tinkham}. Here, the wave function parameters $|\Psi(\vec{r},t)|^2$ and $\phi(\vec{r},t)$ describe the density of Cooper pairs and complex phase of the condensate as a function of position $\vec{r}$ and time $t$.
Superconducting qubits, such as the one shown in Fig.~\ref{fig:transmon}(a), consist of islands of superconducting material, such as aluminium, connected by one or more Josephson junctions \cite{Tinkham}, which are nm-thin insulating barriers made from e.g. aluminium oxide.
\ensuremath{^{9}\mathrm{Be}^+\,}gin{figure}[]
\includegraphics[scale=0.5]{transmonfigure_3.png}
\ensuremath{\mathrm{Ca}^+\,}ption{(a) Concentric transmon qubit design from Ref.~\cite{Stehli2020} and (inset) its equivalent circuit diagram. Two superconducting islands (green and blue) are shunted by a small Josephson junction bridge (orange). The qubit state is read out using a coplanar waveguide resonator (red). This readout resonator is inductively coupled to a signal line (black). (b) The states of the transmon qubit are determined by the sinusoidal potential (black solid line) of the Josephson junction. Solved in the phase basis ($\Delta\phi$), the Eigen energies (solid colored lines) can be approximated by a harmonic oscillator (dashed lines, respective colors) whose degeneracies are lifted by first order corrections from capacitive charging energy on the junction \cite{Koch2007, Wilkinson2018, Krantz2019}. (c) Diagram of the Bloch sphere. The ground $\ket{0}$ and first excited $\ket{1}$ states are used to define the qubit's logical state $\ket{\,\mathrm{ps}i}$, which is a linear combination of $\ket{0}$ and $\ket{1}$ with respective complex amplitudes $\ensuremath{^{27}\mathrm{Al}^+\,}pha$ and $\ensuremath{^{9}\mathrm{Be}^+\,}ta$. $\ket{\,\mathrm{ps}i}$ can be manipulated by voltage pulses and gating operations and read out by projection onto a specified measurement basis.}
\label{fig:transmon}
\end{figure}
The current $I$ passing through the Josephson junction depends on the phase difference $\Delta\phi$ between the superconductors at either side of the junction by the relation $I = I_0 \,\mathrm{s}in(\Delta\phi)$, where $I_0$ is the largest supercurrent supported by the junction. When a voltage difference $V$ occurs across the junction, $\Delta\phi$ changes as $\frac{d\Delta\phi}{dt}=2eV/\hbar$ \cite{Josephson, Tinkham}, where $e$ is the electron charge and $\hbar$ is the reduced Planck's constant. This time dependence leads to non-linear resonance behavior with quantized states that are determined by flux, charge, and phase degrees of freedom \cite{Clarke2008}.
The effective circuit diagram of a superconducting qubit is shown in the inset of Fig.~\ref{fig:transmon}(a) and can be described by the Hamiltonian \cite{Koch2007,Krantz2019}
\ensuremath{^{9}\mathrm{Be}^+\,}gin{equation}
H=4E_C (\Delta n)^2-E_J \cos(\Delta\phi), \label{transmonhamiltonian}
\end{equation}
where $E_J$ is the energy of the current passing through the junction and $E_C$ is the capacitive charging energy between the two superconducting islands.
Quantum states are usually determined in either the basis of $\Delta \phi$, as shown in Fig.~\ref{fig:transmon}(b), or capacitive charge number $\Delta n$, depending on the relative strengths of $E_J$ and $E_C$. Similar to the conjugate variables of position and momentum, $\Delta \phi$ and $\Delta n$ have a non-zero commutation $[\Delta \phi, \Delta n] = i$ \cite{Krantz2019} and uncertainty relation $\,\mathrm{s}igma_{\Delta\phi}\,\mathrm{s}igma_{\Delta n} \gtrsim 1$ \cite{Tinkham}. In either basis, quantum states are approximated by a harmonic oscillator in which degeneracy is lifted by the non-dominant energy term. This degeneracy allows for the differentiation of qubit states. Superconducting qubits are typically operated at the transition between the ground $\ket{0}$ and first excited $\ket{1}$ states.
One of the benefits of superconducting qubits is the ability to engineer a wide range of operational parameters by tuning the parameters $E_J$ and $E_C$ through intentional design choices. Perhaps the most widely used design choice is to have $E_J/E_C \,\mathrm{s}im 10^2$. This is the so-called transmon qubit design \cite{Koch2007}, which has been widely used by both academic and industry leaders to realize quantum computers. This ratio of $E_J/E_C$ creates an exponential cutoff for charge fluctuations, leading to longer lifetimes. Since $E_J$ is large compared to $E_C$, the quantum Eigen states are determined in the $\Delta \phi$ basis as shown in Fig.~\ref{fig:transmon}(c). The Eigen energies have an approximate $\,\mathrm{s}qrt{8E_J E_C}$ separation, while first order corrections on the scale of $E_C$ create the essential anharmonicity between energy levels that is required for two-state control \cite{Wilkinson2018}. Therefore the transmon's $E_J/E_C$ ratio is large to reduce charge noise but small enough to prevent excitation beyond the first excited state.
\,\mathrm{s}ubsection{Qubit initialization \& readout}
The qubit is a quantum mechanical two-level system with logical states $\ket{0}$ and $\ket{1}$, in analogy to a classical bit. Without any external or thermal excitation, the superconducting qubit state $\ket{\,\mathrm{ps}i}$ relaxes into the $\ket{0}$ state. Under a resonant drive $\ket{\,\mathrm{ps}i}$ will oscillate between $\ket{0}$ and $\ket{1}$ as a superposition on the surface of the Bloch sphere, shown in Fig.~\ref{fig:transmon}(c). The measured period of these so-called Rabi oscillations is used to calibrate the applied microwave drives for qubit control. Reading the state of the qubit requires projecting $\ket{\,\mathrm{ps}i}$ onto the quantization axis. Information about the probability distribution along other directions is obtained by fast rotations of the axis in question onto the quantization axis and subsequent measurement. Fast, in this context, means that the pulse length is short compared to the respective decoherence times. By measuring in quick succession, the qubit state can be inferred from the probabilities of the measurement results.
Superconducting qubit states are usually determined using dispersive readout \cite{Krantz2019}, where $\ket{\,\mathrm{ps}i}$ is not measured directly but is inferred from measurements of a coupled photon resonator. The interaction between the resonator and qubit shifts the effective frequency of the resonator by an amount dependent on the projection of $\ket{\,\mathrm{ps}i}$. Therefore, the qubit state can be inferred by measurements of the resonator frequency. However the resonator frequency, $\omega_{r}$, must be detuned from the qubit frequency, $\omega_{01}$, to prevent measurements from interfering with the qubit state. The detuning frequency, $\Delta = \omega_{01} - \omega_{r}$, is greater than the coupling rate, $g$, between the qubit and resonator to ensure that energy is not coherently exchanged between the qubit and resonator. This condition prevents a measurement from affecting subsequent measurements (quantum non-demolition). This control scheme does have a drawback: since the qubit is coupled to the resonator, noise within the resonator can cause arbitrary phase decoherence in the qubit. Therefore, measurement signals used to probe the resonator frequency must be attenuated and filtered to reduce noise and ensure qubit fidelity.
\,\mathrm{s}ubsection{Qubit manipulation}
The microwave tones used for qubit manipulation are referred to as \emph{gates} or \emph{pulses}. Qubit manipulation is achieved with a heterodyning technique, where the pulse signal is generated by a mixer, modulating a baseband signal of a local oscillator operating close to the desired frequency, with an envelope at lower frequency. The envelope is generated by fast digital-to-analog converters, which generate both components of the manipulation or readout pulses in the respective baseband. For readout the returning microwave signal from the readout resonator gets down-converted with the same local oscillator used for the up-conversion, yielding the demodulated baseband signal. After low pass filtering to suppress leakage of the carrier frequency and further amplification, the signal is digitized by an analog-to-digital converter card. Fourier transformation of the incoming signal for both quadratures gives the complex scattering parameter and in the case of dispersive readout, the state of the qubit.
Single qubit gates correspond to rotations of a Bloch vector about some axis of the Bloch sphere while multi-qubit gates take two or more qubits as input to manipulate at least one qubit state. Multi-qubit gates require entangling two or more qubits together, while a series of pulses is applied to one or multiple qubits \cite{Kjaergaard2020}. Since the qubits are entangled, the readout of one qubit can be manipulated via a second, ancillary qubit. An example is the Controlled NOT gate (or CNOT gate), where a target qubit state is flipped if and only if a second, control qubit state is $\ket{1}$. Quantum logic gates are the fundamental basic quantum circuit building block, operating on a small number of qubits (usually one or two). However, it should be noted that due to the planar structure of superconducting circuitry, connectivity between qubits is currently limited to nearest-neighbor interactions. This imposes constraints on gating capabilities, as operations between non-neighboring qubits cannot be performed. There are current aims to realize 3D-integrated superconducting circuits \cite{Rosenberg2017,Yost2020}, which will allow additional connections for beyond-nearest-neighbor interactions, overcoming the current limitations.
\,\mathrm{s}ubsection{Operational conditions \& performance indicators}
Like other quantum systems, calculations are limited by the longitudinal and transverse relaxation times, $T_1$ and $T_2$. With current technology, decoherence rates below 1 MHz can be achieved \cite{Krantz2019}, allowing for the creation and manipulation of single or multiple quantum excitations in superconducting qubits with fast (nanosecond) control. Improvements to qubit lifetimes have been achieved primarily through qubit design, improvements in fabrication quality, and material selection. For current systems, lifetimes are long enough to ensure computational fidelity. Indeed, for the very best SC QPUs in the cloud, 1- and 2-qubit gate fidelities exceed 99\% with qubit readout errors in the range of 2-3\%.
Regardless of design, qubits must be operated well under the superconducting transition temperature $T_c$. Furthermore, since SC qubits are strongly coupled to their environment and readout circuitry, thermal and electromagnetic noise should be reduced as much as possible. Therefore, qubits are usually measured and operated at $T=10$~mK in dilution refrigerators with magnetically shielded environments. As mentioned previously, measurement lines are also typically thermalized and attenuated to reduce noise. The need for cryogenic environments currently imposes a limitation on the size of SC quantum computers, since each measurement line leaks heat into to the system and decreases the effective temperature of the refrigerator. This limitation can be overcome by implementation of cryogenic processing and multiplexing of classical signals. Two potential platform solutions are the cryogenic complementary metal-oxide-semiconducting (Cryo-CMOS) \cite{Homulle2016,Charbon2019,Pauka2021} and rapid single flux quantum (RSFQ) \cite{Likharev1991} hardware platforms, which can serve as low-temperature interfaces between classical and quantum systems.
Another limit is the speed at which qubits can be operated. At high frequencies, superconductivity breaks down as single electrons are excited out of the superfluid \cite{Tinkham}. The presence of these quasiparticles leads to dissipation and decoherence, and thus qubits are typically designed to operate at frequencies $\omega_{01}\ll k_B T_c/\hbar$. For aluminium with $T_c = 1$~K, qubits are typically designed to operate at $\omega_{01} < 20$~GHz. Additionally, while the macroscopic nature of superconducting qubits allows for customization of qubit parameters, this benefit comes with a drawback in producing identical qubits, as small deviations in fabrication uniformity can be difficult to control.
\,\mathrm{s}ubsection{Use case}
\label{sec:super:use}
Superconducting quantum circuits have been used to simulate many physical systems.
Spin systems have been a particular focus for quantum simulation through both analog \cite{Guo2019,Xu2020,Fitzpatrick2017,Harris2018} and digital \cite{Salathe2015,Smith2019} methods. However, with regard to digital simulations, a recent study \cite{Smith2019} performed on an IBM QPU has concluded that the current state of SC quantum computers is too error-limited to produce dependable quantitative results for larger (six spins or more) systems.
Chemical binding energies of molecules have been calculated using VQEs
\cite{OMalley2016, Kandala2017, McCaskey2019, Karalekas2020} implemented on SC circuits. The VQE method has had relatively good success in determining binding energies of H$_2$, LiH, BeH$_2$, NaH, KH, and RbH. Due to the circuitry scale, these studies consider only a limited number of basis states (e.g. spin orbitals), allowing for a comparison to the exact, diagonalized solutions. In this context, the calculated results are in good agreement with theoretical expectations. More recently, binding energies of hydrogen chains up to H$_{12}$ have been modeled using Google's Sycamore QPU \cite{Arute2020_1}. However, it should be noted that several postprocessing techniques were required to mitigate errors in the raw results and achieve quantitative chemical accuracy for bonding energies. This work also simulated diazene (H$_2$N$_2$) isomerization energies for converting cis-diazene to trans-diazene, marking the first time a chemical transition has been modeled on a quantum computer. Therefore, despite the limitations from noise and basis size, digital simulations on SC QPUs show promise for chemical simulations.
\,\mathrm{s}ection{\label{sec:ion}Quantum computing with trapped ions}
Trapped ions \cite{Wineland1998,Haffner2008} were one of the first platforms proposed for building a quantum computer as they form a natural representation of an ideal qubit: all ions are identical by nature, their high degree of isolation from the environment leads to excellent coherence times and interaction with radio-frequency (rf) and laser light allows for high-fidelity gate operations.
Qubits are encoded in the electronic states of individual ions trapped by electric fields in an rf Paul trap.
Two-dimensional traps can be micro-fabricated on silicon chips, called surface traps, and can contain multiple trapping and interaction zones as well as integrated microwave and laser access \cite{Chiaverini2005,Revelle2020,Mehta2020,Niffenegger2020}.
Interaction of the electronic states of neighbouring ions is negligibly small \cite{Kotler2014}, but ions are strongly coupled via their motion which can be exploited to create entanglement between different ions necessary for multi-qubit gates \cite{Monroe1995}.
Ions are confined in long chains, within which all ions can interact with each other.
Chains can be split and merged, and ions can be moved across the chip between different zones, providing large flexibility of connections \cite{Blakestad2011,Bowler2012,Walther2012}.
Many different elements are used as ion species, but all ions are typically singly-charged and have a single remaining valence electron.
Popular choices of ion are $\mathrm{Yb}^+$, $\mathrm{Ca}^+$ and $\mathrm{Be}^+$ \cite{Debnath2016b,Baldwin2020,Ballance2016,Erhard2019,Gaebler2016}.
Qubit states can either both be encoded in ground-state levels (hyperfine- and Zeeman-qubits \cite{Harty2014,Ballance2015}) with transition frequencies in the rf range, or with the excited state encoded in a meta-stable state (eg. $\mathrm{D}_{5/2}$) leading to optical transition frequencies \cite{Benhelm2008}.
Different properties of the atomic species affect the qubit performance.
For example, some hyperfine qubits are robust to magnetic field noise, which is the main source of decoherence in trapped ion qubits, and therefore have greatly enhanced coherence times \cite{Harty2014,Wang2020}.
Other important factors are the existence of low-lying D manifolds, which can assist readout but cause errors due to scattering in laser gate operations; the ions' mass where lighter ions allow faster gates; excited state lifetimes for optical qubits; and transition frequencies depending on the availability of suitable lasers.\\\indent
Scaling up devices from tens to thousands or millions of qubits is arguably the biggest challenge in realising a quantum computer.
The trapped ion community pursues several paths towards scalability.
In the quantum charge-coupled-device (QCCD) architecture \cite{Kielpinski2002a,Wineland1998} ion chains are broken up into smaller groups in individual zones, instead of forming a single long string.
For scalability beyond a single chip proposals include connecting separate traps via photonic links \cite{Monroe2013a,Nigmatullin2016a,Stephenson2020} and shuttling of ions across arrays of chips \cite{Lekitsch2017}.
Another important ingredient for scalability is the simultaneous use of different ion species, which allows sympathetic cooling of ions without affecting the electronic state of the logic and memory qubits \cite{Kielpinski2000} and better spectral isolation for ion-photon entanglement.
Strings of ions can be split, merged and shuttled between different zones with negligible effect on the spin state and coherence, but a slight increase in ion temperature \cite{Blakestad2011,Bowler2012,Walther2012}.
While ion traps can be operated at room temperature, their performance is enhanced at cryogenic temperatures due to a reduction in heating rate and an increase in ion lifetime.
Cooling down to $\,\mathrm{s}im10\mathrm{K}$ with liquid helium cryostats suffices for this purpose.\\\indent
Trapped ions have the longest coherence times of all contending platforms for building a quantum computer. Even though their individual operations are slower than in solid state systems, they still possess a superior ratio of gate operation time to coherence time, which ultimately results in record single- and two-qubit gate fidelity.
While technology and infrastructure for solid-state systems is more mature than laser technology due to developments made for classical computer chips, rapid progress in the stability, miniaturisation, and integration of laser and ion trap systems has been achieved in the last few years due to the influx of resources and increase in demand.
Trapped ion quantum computers also benefit from the absence of noisy direct environments which are present in solid state systems, and the high degree of connectivity and flexibility of connections in trapped ion systems.
Remaining challenges are to reduce gate errors for larger numbers of qubits, which tend to increase with the number of ions, and to improve automatisation, robustness and crosstalk for building larger devices.
Further research is also required in trap fabrication, as one of the major gate error sources stems from anomalous heating of the ion crystals, thought to be caused by surface effects on the ion trap electrodes \cite{Hite2017,Sedlacek2017,Boldin2018,Noel2019}.
\,\mathrm{s}ubsection{Qubit readout, initialisation and cooling}
Qubits are read out via state-dependent fluorescence detection.
All ion species used for quantum computing have a short-lived excited state that predominantly decays back into the qubit ground state manifold.
For optical qubits and some hyperfine qubits the qubit frequency is sufficiently large that the fluorescence laser only couples to one of the qubit states, the `bright' state.
Together with selection rules preventing decay from the excited state into the opposite `dark' qubit state, this allows direct fluorescence readout.
For qubits without direct state selectivity of the fluorescence laser, the dark state is transferred into a `shelf' state that does not couple to the fluorescence laser and the excited state.
Ion-position resolved fluorescence can be detected with arrays of photomultiplier tubes or avalanche photodiodes, on an electron-multiplying charge coupled device camera \cite{Myerson2008}, or with superconducting nanowire single-photon detectors integrated into the trap chip \cite{Todaro2020}.
Fluorescence can be collected over a fixed time-bin and analysed with threshold or maximum likelihood algorithms, or with real-time analysis and adaptive readout duration.
With sufficiently low background counts and high photon collection and detection efficiency, real-time analysis achieves the same fidelities as fixed-time threshold analysis, but is considerably faster \cite{Noek2013b,Todaro2020}.
Qubit initialisation is performed via optical pumping, using the same excited states as for fluorescence readout.
Either frequency or polarisation selectivity is used to ensure that population is excited out of all ground states apart from the target initial state.
Different states can be prepared by applying a sequence of single qubit operations after optical pumping.
For optimum gate fidelities ion crystals need to be cooled close to their motional ground states, which is performed with laser cooling.
Typically ions are continuously Doppler cooled during idle time.
Before an experiment resolved-sideband cooling (RSBC) is used to further cool relevant motional modes to an average motional mode occupation of $\ensuremath{^{138}\mathrm{Ba}^+\,}r{\mathrm{n}}\lesssim 0.1$.
Alternatively electromagnetically-induced transparency cooling can be used to cool all modes simultaneously \cite{Lechner2016}.
While considerably faster than RSBC, especially for larger ion strings, the final temperature reached is slightly higher.
\,\mathrm{s}ubsection{Qubit manipulation}
Single qubit gates can be driven directly using rf in Zeeman- and hyperfine-qubits, or using a narrow-linewidth laser to drive the quadrupole transition in optical qubits.
Alternatively a pair of lasers which are far detuned from the excited state and have the qubit frequency as their frequency difference can be used to drive qubit rotations via two-photon Raman transitions.
Rotations around the z-axis can be performed trivially by propagating the phase of all future operations.
The phase is defined by a direct digital synthesis frequency source that is either applied directly on the ions as rf or controls the frequency, amplitude and phase of the laser beams via an acousto-optic modulator (AOM).
Rf operations couple only very weakly to the motion due to their low photon energy and can already be performed at Doppler-cooled temperatures at very high fidelities \cite{Harty2014}.
They also have superior phase stability compared to lasers and can easily be integrated into surface traps, but are harder to address onto single ions.
Multi-qubit gates create entanglement between different qubits and require the ions' motion as a bus of interaction between the ions.
There are different schemes for entangling gates, with the most established ones being the closely related M\o lmer-S\o rensen (MS) gate \cite{Sorensen1999} and the \,\mathrm{ps}z\ geometric phase (ZGP) gate\cite{Leibfried2003}.
Both create a spin-dependent force on the ions; the MS gate in the \ensuremath{\ket{+}}\,,\ensuremath{\ket{-}}\, basis and the ZGP gate in the \ensuremath{\ket{\uparrow}}\,, \ensuremath{\ket{\downarrow}}\, basis.
This force leads to motional excitation and displacement for one spin parity combination (eg \ensuremath{\ket{\ensuremath{\ensuremath{\ket{\uparrow}}\,arrow}rrow \downarrow}}\,) but not the other (eg \ensuremath{\ket{\ensuremath{\ensuremath{\ket{\uparrow}}\,arrow}rrow \ensuremath{\ensuremath{\ket{\uparrow}}\,arrow}rrow}}\,).
Displaced spin states acquire a phase which ultimately leads to the entanglement.
The propagator of a two-qubit gate with these schemes is diag(1,i,i,1), which corresponds to a controlled-PHASE gate.
This gate can be transformed into a CNOT gate via additional single-qubit operations.
Both gate mechanisms are first-order insensitive to the ion temperature, which makes them more robust and is an important factor in the high fidelities achieved.
ZGP gates cannot be performed directly on the low-decoherence clock qubits, but are insensitive to the absolute magnetic field offset.
Two-qubit gates have been performed both with lasers \cite{Benhelm2008,Ballance2016,Gaebler2016,Baldwin2020b} and rf \cite{Ospelkaus2011,Harty2016,Weidt2016,Zarantonello2019,Srinivas2021} as well as between ions of different elements \cite{Ballance2015,Tan2015,Hughes2020,Bruzewicz2019}.
Due to the weak motional coupling rf multi-qubit gates are considerably slower than laser gates.
Gates can be performed globally on all ions in a string simultaneously or addressed locally to a specific subset of ions \cite{Erhard2019,Debnath2016b}.
\,\mathrm{s}ubsection{Performance indicators}
Coherence times in trapped ions are $T_2^*=50\,\mathrm{s}$ on magnetic-field insensitive clock qubits \cite{Harty2014} and reach over an hour by employing dynamical decoupling sequences \cite{Wang2020}.
State-preparation and measurement errors are $\varepsilon <1\cdot 10^{-3}$, with a mean duration of $46\ensuremath{\,\mathrm{\mu s}}$ \cite{Todaro2020,Noek2013b,Myerson2008}, where $\varepsilon\equiv 1-\frac{\mathcal{F}}{100}$ for fidelity $\mathcal{F}$. Depending on the method and target fidelity, readout times in these systems may vary between tens and hundreds of $\ensuremath{\,\mathrm{\mu s}}$.
Single qubit gates have been performed with errors $\varepsilon = 1.0(3)\cdot 10^{-6}$ for an rf $\pi/2$-pulse of $12\ensuremath{\,\mathrm{\mu s}}$ pulse duration on a single ion \cite{Harty2014}.
Fast single qubit gates can be implemented with a pulsed laser trading off fidelity against speed, achieving $\varepsilon = 7\cdot 10^{-3}$ for $t_{\pi/2}=40\,\mathrm{ps}$ \cite{Campbell2010}.
Two qubit gate errors are $\varepsilon = 8(4)\cdot 10^{-4}$ at a gate time of $t_g=30\ensuremath{\,\mathrm{\mu s}}$ \cite{Gaebler2016,Ballance2016,Baldwin2020} for laser gates, and $\varepsilon$ in the interval $[ 1.7\cdot 10^{-3},0]$ at a gate time of $t_g=740\ensuremath{\,\mathrm{\mu s}}$ for microwave gates \cite{Srinivas2021}.
The fastest two-qubit gates achieved $\varepsilon = 2.2(3)\cdot 10^{-3}$ in $t_{g}=1.6\ensuremath{\,\mathrm{\mu s}}$ \cite{Schafer2018}.
\,\mathrm{s}ubsection{Use case}
Various algorithms have been implemented on trapped ion systems, including Shor's algorithm and Grover's search algorithm \cite{Monz2015b,Figgatt2017}, demonstrations of error correction \cite{Negnevitsky2018,Egan2020}, analogue quantum simulations, such as the simulation of many-body dynamical phase transitions \cite{Zhang2017a} exceeding the capabilities of classical computers, as well as several VQE demonstrations \cite{Hempel2018,Nam2020,Foss-Feig2020}, for example estimating the ground state energy of $\mathrm{H_2}$, LiH and $\mathrm{H_2O}$.
\ensuremath{^{9}\mathrm{Be}^+\,}gin{figure}
\centering
\includegraphics[width=8cm]{ionq_comp.png}
\ensuremath{\mathrm{Ca}^+\,}ption{\textbf{IonQ quantum computer based on a chain of trapped ions:}
A high-numerical aperture lens allows both individual addressing and readout of the ions.
A multi-channel AOM is used to modulate the amplitude, frequency and phase of the individual laser beams.
Inset: The qubits are encoded in the hyperfine ground-states $\ensuremath{\ket{\uparrow}}\,={^2\mathrm{S}_{1/2}},\mathrm{F}=1$ and $\ensuremath{\ket{\downarrow}}\,={^2\mathrm{S}_{1/2}},\mathrm{F}=0$ of the trapped \ensuremath{^{171}\mathrm{Yb}^+\,} ions.
Gate operations are performed via a two-photon Raman process, coupling to the excited $^2\mathrm{P}$ states (purple and orange beams).
Figure adapted from \cite{Nam2020}.}
\label{fig:ionq_qc}
\end{figure}
Fig.\ \ref{fig:ionq_qc} shows the ion trap quantum computer of IonQ, which is commercially accessible and was used to perform VQE on three individually addressable qubits encoded in a string of \ensuremath{^{171}\mathrm{Yb}^+\,} ions to estimate the ground state energy of the water molecule \cite{Nam2020}.
The quantum circuit implementation for the energy-evaluation was optimised to take advantage of the asymmetric state measurement fidelities of the \ensuremath{\ket{\uparrow}}\, and \ensuremath{\ket{\downarrow}}\, states, and the higher fidelity ($\varepsilon_{\phi=\pi/100}\lesssim 4\cdot 10^{-3}$) of partially entangling gates $\mathrm{XX(\phi)}$ ($\phi<\pi/2$) compared to full entangling gates $\mathrm{XX(\pi/2)}$ ($\varepsilon\lesssim 4\cdot 10^{-2}$). The longest implemented circuit comprised 6 CNOT two-qubit operations.
An energy uncertainty close to the chemical uncertainty of $1.6\,\mathrm{mHa}$ was achieved (albeit in a minimal basis set), without a need for implementing error mitigation techniques such as Richardson extrapolation \cite{Richardson1927}.
\,\mathrm{s}ection{\label{sec:silicon}Silicon quantum computer}
Today’s digital age is enabled by the relentless progress and optimisation of semiconductor materials and technology. From an industrial standpoint, the use of well-established nanofabrication techniques for the development of quantum machines would be economically attractive to achieve large-scale systems. As discussed, some of these manufacturing techniques are already applied to superconducting and ion trap quantum platforms, and are expected to become central for the development of silicon-based systems, offering the prospect of integrating millions of qubits on chips at affordable manufacturing costs, akin to classical commercial electronics. Besides this technological motivation, silicon is a particularly suitable material for spin-based quantum devices from a performance viewpoint. Through isotopic purification, the only isotope bearing a nuclear spin ($^{29}$Si) in natural silicon can be nearly completely removed, making the silicon crystal a quasi-spin-noise-free environment for the qubit. This results in silicon spin-qubits having the longest coherence time among solid-state implementations.\\\indent
Besides silicon, there exists a large variety of semiconductor systems currently under investigation for quantum computing applications~\cite{Atature2018,chatterjee2020semiconductor,Lieven2019,Bluhm}. The main differences lie in the type of material (e.g. natural or purified silicon, synthetic diamond, silicon carbide, heterostructures such as GaAs/AlGaAs, Si/SiGe or Ge/SiGe), the operational conditions (ranging from room temperature down to millikelvin temperature), the way each qubit is spatially confined within the material (e.g. gate-defined quantum dots, etched nanowires, atomic-size crystallographic defects, implanted dopant impurities), the way the qubit state is readout (e.g. electrical readout via charge sensors, or optical readout through photoluminescence), and the way the qubit state is manipulated (e.g. electron spin resonance via magnetic field pulsing, electric dipole spin resonance via electric field pulsing). Despite such diversity, a common denominator in most platforms is the choice of electron/hole spins as the two-level system embodying the qubits. The paradigmatic encoding is represented by a single spin in a static magnetic field with its two Zeeman-split energy levels representing the states $\ket{0}$ and $\ket{1}$. Other implementations that have been explored include two-electron singlet-triplet qubits, three-electron charge-spin hybrid qubits and three-electron exchange-only qubits. Such rich ecosystem gives rise to significant performance variations among qubit implementations. The trade-offs can be many, including the robustness to specific noise sources and the ease of operation. The coherence times can range from few tens of nanoseconds in GaAs/AlGaAs quantum dots to few seconds in silicon dopants, and the single-qubit gate time can vary between sub-nanosecond and hundreds of nanoseconds in Si/SiGe quantum dots and silicon dopants, respectively.\\\indent
In this Section, we are going to focus our attention on a particular type of semiconductor qubit system, which has been deployed for the realisation of the first spin-based quantum computer in the Cloud: SPIN-2QPU~\cite{Last2020}, developed at QuTech (a collaboration between TUDelft and TNO). It consists of two single electron spin qubits in a double quantum dot (DQD) that is electrostatically defined by metallic gate electrodes deposited on top of an isotopically purified Si/SiGe heterostructure, as illustrated in Fig.~\ref{fig:si_device}~(a) and Fig.~\ref{fig:si_device}~(b). \\\indent
Similar to the other quantum processors discussed previously, spin-based machines must meet certain functional criteria. These include reliable initialisation to a known state, high fidelity projective readout of the final state, and qubit manipulation through high-quality single- and two-qubit gates. Let us see how SPIN-2QPU satisfies these criteria.
\,\mathrm{s}ubsection{\label{sec:init}Qubit initialisation \& readout}
The readout of the qubit state is ultimately a measurement of the electron spin orientation. However, the magnetic moment of a single spin is exceedingly small and its direct detection quite difficult. By contrast, the detection of small displacements of single charges is routinely carried out in semiconductor devices. To this end, SPIN-2QPU uses a single-electron transistor (SET) capacitively coupled to the DQD, as shown in Fig.~~\ref{fig:si_device}~(b). Whenever a single electron leaves/enters the DQD, the SET produces a discrete jump in the value of its electric current caused by a change in its operation point.\\\indent
\ensuremath{^{9}\mathrm{Be}^+\,}gin{figure}[]
\includegraphics[scale=0.45]{fig_Si_device.pdf}
\ensuremath{\mathrm{Ca}^+\,}ption{(a) Schematic cross-sectional view of a DQD device used to control two spin qubits. Top metal gates (yellow areas) are used to tune the conduction band profile (dashed line) in the Si layer and isolate two electron spins. An n-type doped region of semiconductor (pink shaded area) is used as an electron reservoir tunnel-coupled to the left QD. A layer of cobalt (blue box) is deposited on top of the gate layers to generate a controlled magnetic field gradient across the DQD. (b) SEM micrograph of the DQD in (a). The aluminum gates are patterned with electron-beam lithography. The two qubits are formed under the gates highlighted with red and blue circles. The SET detector is formed under the gates highlighted in yellow. Gates that accumulate the electron reservoirs for the DQD and the SET are connected to Ohmic contacts and highlighted by crossed squares. Dashed lines indicate the region where the micromagnet is deposited. (c) Schematic diagram of energy levels for the left QD and the electron reservoir during the readout pulse sequence. Energy levels in the QD are Zeeman-split according to spin polarization. (d) Pulsing sequence (top) for the spin readout and normalised SET signal for spin-up (middle) and spin-down (bottom) qubit states. Panels (a) and (b) are adapted from Ref.~\cite{Last2020}. Panels (c) and (d) are adapted from Ref.~\cite{Yang2013}.}
\label{fig:si_device}
\end{figure}
Reading out the spin state is, therefore, a matter of making a so-called spin-to-charge conversion, whereby the electron is allowed to tunnel in or out the DQD in a way that depends on its spin state, equivalent to whether the qubit is in state $\ket{0}$ or $\ket{1}$ . As shown in Fig.~\ref{fig:si_device}~(c), the selection rule is energy-based. A single spin in one of the dots is capacitively coupled to the SET and tunnel coupled to a reservoir. After spin manipulation, the dot's energy level is tuned with a gate voltage pulse such that the Fermi reservoir lies between the two Zeeman-split spin states. If the electron is in state $\ket{\downarrow}$, it does not have enough energy to leave the dot, and there is no SET current change due to a lack of charge rearrangement. For a state $\ket{\ensuremath{\ensuremath{\ket{\uparrow}}\,arrow}rrow}$, the electron can tunnel out of the quantum dot and into the reservoir, leading to a change in SET current until a new electron tunnels in and re-initializes the qubit to its ground state. Current traces for these two alternative scenarios are shown in Fig.~~\ref{fig:si_device}~(d). Note that in this system initialization can be seen as a by-product of readout, given that an electron with a known spin, i.e. $\ket{\downarrow}$, always resides in the dot at the end of the sequence.
\,\mathrm{s}ubsection{\label{sec:manip}Qubit manipulation}
Analogously to other qubit realisations, a spin-qubit requires independent rotations about the axes of the Bloch sphere (single-qubit gate), as well as rotations that are dependent on the state of another qubit (two-qubit gate), in order to form a set of universal quantum gates. Through a two-qubit gate, entangled states can be created when one of the two qubits starts in a superposition of states.\\\indent
SPIN-2QPU carries out single-qubit gate operations through electric dipole spin resonance (EDSR). It consists of a microwave modulated electric pulse delivered through a gate electrode that oscillates the electron wavefunction. This has the effect of rotating the electron spin whenever the electron experiences a time-varying magnetic field resonant with its Zeeman splitting. This requires the presence of a synthetic spin-orbit field obtained through a local magnetic field gradient in the DQD, which is engineered by depositing a cobalt micromagnet on top of the device gate layer (see Fig.~~\ref{fig:si_device}~(a)). The amplitude of the EDSR pulse controls the spin vector’s rotation frequency around the Bloch sphere, its phase controls the rotation axis, and its duration controls the rotation angle. The frequency of the pulse allows one to select which qubit is manipulated, given that each electron experiences a slightly different magnetic field due to the different position within the DQD.\\\indent
SPIN-2QPU carries out two-qubit gate control via modulation of the exchange interaction. The idea is to quickly turn on the tunnel coupling between two neighbouring spins by applying a gate-voltage pulse that lowers the tunnel barrier between their corresponding quantum dots, so that the electron wavefunctions overlap. Such overlap leads to an exchange interaction between the spins, which can be exploited for conditional gate operations.
\,\mathrm{s}ubsection{\label{sec:oper}Operational conditions \& performance indicators}
The readout protocol is effective if the qubit energy levels are separated by at least a few times the thermal energy. This is ultimately the reason why SPIN-2QPU and similar semiconductor-based quantum systems need to be operated at dilution refrigerator temperature ($T$) and in the presence of an external static magnetic field ($B$). Typical conditions require $B\approx$~1~T and $T\approx$~50~mK. The duration of the readout sequence is ultimately determined by the tunnelling rate between the DQD and the reservoir, as well as by the bandwidth of the SET detector. SPIN-2QPU’s readout duration is $\approx$~300~$\mu$s per qubit and its readout fidelity is approximately 85\%.\\\indent
Given a single-qubit gate duration of approximately $250$~ns and a phase coherence time of at least 6~$\mu$s, SPIN-2QPU achieves single-qubit fidelities in excess of 99.0\%. As for 2-qubit operations, the only allowed native gate is CZ. Hence, other gates like CNOT and SWAP have to be decomposed into CZ operations in combination with single-qubit rotations. This comes at the expense of fidelity and operational time. A detailed benchmark for CZ is ongoing. Preliminary data show gate duration of around 150 ns and fidelity in excess of 90\%, but this latter figure is likely to be a conservative underestimate at this stage.
\,\mathrm{s}ubsection{\label{sec:use}Use case}
At present, we are not aware of VQE simulations carried out with SPIN-2QPU or any other semiconductor qubit system, possibly due to the limited qubit count. By contrast, 2D arrays of semiconductor QDs have been used for analog simulations of magnetic and insulating materials by spatially engineering Hamiltonians onto the array~\cite{Hensgens2017,Dehollain2020}. It is, however, useful to report that it has been possible to run digital algorithms of different kinds (Deutsch–Josza and Grover) on the SiGe processor that QuTech used to prototype SPIN2-QPU~\cite{Watson2018}. This ultimately casts a positive light for future uses of semiconductor machines in computational chemistry.
\ensuremath{^{9}\mathrm{Be}^+\,}gin{table*}[t]
\ensuremath{^{9}\mathrm{Be}^+\,}gin{center}
\ensuremath{^{9}\mathrm{Be}^+\,}gin{tabular}{ |p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}| }
\hline
\textbf{Manufacturer} & \textbf{Platform} & \textbf{Cloud access} &\textbf{Max \# qubits} &\textbf{ Gate fidelity (1-qubit, 2-qubit) }&\textbf{Max QV} &\textbf{Simulated molecules}\\
\hline
IBM & Superconducting &IBM Quantum Experience (Open access) & 15 (Melbourne)& 99.97\%, 99.16\% (Santiago) & 32 (Santiago) &H$_2$, LiH, BeH$_2$, NaH, KH, RbH\\\hline
IonQ& Trapped Ions & Microsoft Azure or Amazon Bracket & 11 & 99.50\%, 97.50\% & not published & H$_2$O \\\hline
QuTech & Silicon & Quantum Inspire & 2 (Spin2-QPU) & $\approx$~99\%, $\approx$~90\% ¬ published &none\\\hline
Google& Superconducting & Google Quantum AI &53 (Sycamore)& 99.85\%,
99.35\% & not published & H$_2$N$_2$, H$_6$, H$_8$, H$_{10}$, H$_{12}$ \\\hline
Rigetti & Superconducting & Rigetti Quantum Cloud & 31 (Aspen-8) & 99.8\%, 95.9\% & 8 (Aspen-4) & NaH, H$_2$\\\hline
Honeywell& Trapped Ions & Microsoft Azure or Amazon Bracket &10 (H1)&99.97\%, 99.5\% &128 & none \\\hline
\end{tabular}
\ensuremath{\mathrm{Ca}^+\,}ption{Quantum computing hardware in the cloud (a non-exhaustive selection). Wherever more than one QPU is available, the relevant machine is indicated within brackets. Simulated molecules column denotes experiments run with any quantum machine from the relevant manufacturer, not necessarily one of those listed. IBM hardware considered is limited to Open Access services. Google cloud services are limited to emulators at present, although the reported chemical simulations have been performed with proprietary physical hardware.}
\label{tab:1}
\end{center}
\end{table*}
\,\mathrm{s}ection{\label{sec:disc}Discussion}
In recent months significant attention has been drawn to superconducting quantum hardware because a team at Google achieved a much anticipated milestone, namely quantum supremacy~\cite{Arute2019}. By quantum supremacy, it is meant that a quantum computer is able to produce the solution to a computational problem that would be otherwise impossible in a reasonable time with a classical machine. Google scientists achieved this with a 53-transmon-qubit processor (Table~\ref{tab:1}) by showing efficient sampling of random quantum circuits. While this result is of primary importance for the field as a whole, the problem tackled did not bear any relevance to molecular chemistry. Therefore, with regard to this type of problem, a quantum advantage is yet to be demonstrated. However, in a more recent study~\cite{Arute2020_1}, the Google team used the same quantum processor for chemical simulations, as discussed in Section~\ref{sec:super:use}. They demonstrated the most complex ground state simulation to date with as many as 24 spin-orbitals mapped onto 12 qubits. Although these calculations are relatively straightforward with a conventional supercomputer, they represent a significant advance of the state-of-the-art in quantum computing power, as the number of qubits used and orbitals simulated in prior experiments was no more than six~\cite{Kandala2017}. While Google's quantum hardware is scheduled to be deployed onto cloud services imminently, there is already a variety of tools made available by Google scientists to experiment with emulated hardware tailored for applications in molecular chemistry\cite{mcclean2019openfermion}. As for superconducting hardware readily available in the cloud, one has to currently turn to IBM or Rigetti, see Table~\ref{tab:1}. IBM has about a dozen QPUs in the cloud, arguably the most extensive offer yet. Just through its Open Access service, the community can access eight machines with qubit counts ranging from 1 to 15 and QV ranging from 8 to 32. The most powerful QPUs with qubit counts up to 65 and QV up to 32 are available for business clients via Premium Access. A recent breakthrough has led to QV=64 for a new 27-qubit system not yet available in the cloud. IBM scientists were among the pioneers in exploiting QPUs for molecular chemistry applications (see Table~\ref{tab:1})~\cite{Kandala2017}. More recently, they have also shown that improved simulation accuracy can be obtained by adopting error mitigation techniques at algorithmic level~\cite{Kandala2019}. This is important because it can be used to enhance the computational power of a processor without any hardware modification.\\\indent
Quantum machines based on trapped ions have progressed very quickly in the past year alone. While devices used for digital quantum computing typically have a lower qubit count than their superconducting counterparts, analogue quantum simulation has been performed on strings containing up to 53 qubits \cite{Zhang2017a} and single qubit operations have been performed in devices containing up to 79 qubits \cite{Wright2019}.
Due to superior gate fidelity and qubit-to-qubit connectivity, the quantum volume of ion trap processors is outperforming superconducting devices even for smaller numbers of qubits.
Recently, corporate research teams at IonQ and Honeywell have made QPUs available through the wider cloud services of Amazon and Microsoft, see Table~\ref{tab:1}. Honeywell's QPU shows the largest volume to date, i.e. QV=128. Both Honeywell and IonQ have recently announced the imminent launch of upgraded QPUs with significantly enhanced QV values.
Trapped ion machines have also been used for molecular chemistry simulations\cite{Hempel2018,Nam2020,Shen2017}. The most complex molecular simulation performed to date with trapped ions is the evaluation of the binding energy of the water molecule with a 3-qubit QPU from IonQ~\cite{Nam2020}, as discussed in Section~\ref{sec:ion}.\\\indent
The 2-qubit silicon quantum processor made by QuTech is the only spin-based system in the cloud. The service through which it is accessible, the platform~\href{http://www.quantum-inspire.com}{Quantum Inspire}, also provides a more powerful alternative based on a 5-qubit superconducting QPU. Silicon SPIN2-QPU has been the latest to be deployed (April 2020) and is not yet fully characterised, hence only approximate fidelities are quoted in Table~\ref{tab:1}. Although no chemical simulations have been attempted yet, one should expect that the semiconductor community will soon fill this gap. The modest qubit count should not be an insurmountable impediment if one considers that early 2-qubit QPUs were successfully used to simulate diatomic molecules~\cite{OMalley2016,Colless,Shen}. Undoubtedly, Si-based machines have yet to cover much ground before becoming realistic competitors of the other two major platforms. For example, high-fidelity single- and two-qubit gates have only recently been achieved and are not yet on par with those of the other hardware platforms~\cite{chatterjee2020semiconductor}. Furthermore, qubit variability due to atomic level defects in the material and its interfaces is an issue that currently hampers scalability. Nonetheless, the interest around these devices is justified by the fact that in principle they can be manufactured with industrial CMOS technology, and have the smallest qubit footprint~\cite{gonzalezzalba2020scaling}. This bodes well for future upgrades of such systems towards the million-qubit-machines needed for useful applications. Finally, note that there exists another type of silicon QPU based on photonic technology (as opposed to spins) with two systems accessible via cloud services~\cite{Xan, Bristol}.
\,\mathrm{s}ection{\label{sec:conc}Conclusion and Outlook}
A lot of theoretical and experimental ground has been covered since the early 80s, when Feynman proposed to use controllable quantum devices for computational problems in chemistry and physics. There are now dozens of small-scale quantum computers in the cloud and many more in academic and corporate laboratories worldwide. The electronic structures of simple molecules ranging from diatomic systems to chains of a dozen atoms has been determined with several QPU incarnations.\\\indent
In this Article, we have discussed the hardware of the most popular types of quantum computers, for which we have summarised the main techniques for physical encoding, manipulation and readout of quantum information. We have paid particular attention to the machines that the reader could easily access via cloud services, i.e. superconducting-, trapped ion- and silicon-based processors. For these, we have described the main performance specifications and operational conditions. Our target has been to highlight to what extent these early prototypes have been employed for chemistry simulations. The underlying message is that, despite relentless progress, none of the machines built thus far is yet advantageous to a chemist, if compared to classical computational methods. What needs to happen to change this?\\\indent
In order to achieve a sizable quantum advantage in computational chemistry with NISQ machines, the coordinated efforts between quantum hardware and quantum algorithm developers will need to continue if not intensify. Hardware improvements in terms of qubit count, qubit connectivity, quantum gate speed and fidelity, as well as overall QPU volume will be a central focus for years to come. These advances will be essential to bring quantum simulation run-times down to practical length-scales~\cite{elfving2020quantum}. However, recent breakthroughs~\cite{Kandala2019, Arute2020_1} have also shown that tailoring algorithms to the specific quantum hardware available in combination with error mitigation techniques could be important for accurate chemical computation on near-term machines. Particularly, restrictions to realisable gates inherent to NISQ processors could be bypassed with ad-hoc compilation methods.\\\indent
Beyond the NISQ era, i.e. without today's limitations due to noise, there will be the possibility of taking full advantage of the computational speed-up of quantum systems. QEC protocols will have to be reliably implemented to produce such step change. During this transition, a risk to be avoided will be that today's capability restrictions, rather than being lifted altogether, will be merely transferred from the quantum layer onto the classical control layer~\cite{martinis2020information}. There are two complementary considerations to this potential problem. Firstly, QEC will require fast feedback between measurement and control, and communication latency may become an issue. If there is a sizable physical distance between the quantum hardware and the classical control hardware, which is likely for cryogenic QPUs, delays in the communication lines may pose a synchronization challenge if they become of the same order as the gate time. Secondly, a computational bottleneck may occur in handling error correction cycles for large number of physical qubits. For example, a QPU with a million qubits corrected with cycles of 1~$\mu$s will require classical information processing at a bandwidth of 1 Tbit/s. If both latency and bandwidth issues are to be solved at once, novel ultra-low-power cryogenic RAM and CPU may need to be developed, so that they could sit near or within the same chip of a cryogenic QPU without generating detrimental heat loads~\cite{gonzalezzalba2020scaling}.
\\\indent
We believe that the challenges described do not represent a fundamental roadblock towards large-scale fault-tolerant quantum computing. However, they do pose significant engineering hurdles that will require synergies between quantum and electronic engineers, as well as quantum software developers and end users. We hope that this Article will trigger the curiosity of theoretical and quantum chemists in trying out the available cloud machines, get involved into the ongoing conversation and, eventually, steer quantum systems development to the benefit of their scientific agenda.\\
\ensuremath{^{9}\mathrm{Be}^+\,}gin{acknowledgments}
We wish to thank N. Samkharadze for useful discussions. AR acknowledges the support of the UK Government Department for Business, Energy and Industrial Strategy through the UK National Quantum Technologies Programme, as well as support from a UKRI Future Leaders Fellowship (MR/T041110/1). VMS acknowledges funding from Christ Church College, Oxford. PGB and MW acknowledge support from the EPSRC Hub in Quantum Computing and Simulation (EP/T001062/1).
\end{acknowledgments}
\end{document}
|
\begin{document}
\title{Sections of the Hodge bundle over Ekedahl-Oort strata of Shimura varieties of Hodge type}
\author{Jean-Stefan Koskivirta}
\maketitle
{|\cdot|}tract{We construct canonical non-vanishing global sections of powers of the Hodge bundle on each Ekedahl-Oort stratum of a Hodge type Shimura variety. In particular we recover the quasi-affineness of the Ekedahl-Oort strata. In the projective case, this gives a very short proof of non-emptiness of Ekedahl-Oort strata. It follows that the Newton strata are also nonempty, by a result of S.Nie. We also deduce the fact that the $\mu$-ordinary locus is determined by the Ekedahl-Oort strata of its image by any embedding.}
\section*{Introduction}
The Siegel modular variety $\mathcal{A}_{g,N}$ arises as a moduli space of $g$-dimensional principally polarized abelian varieties with level-$N$ structure. More generally, Shimura varieties of PEL-type classify abelian varieties endowed with a polarization, an action of a semisimple algebra, and a level structure. These varieties satisfy nice properties due to there nature as moduli spaces. For example, the special fiber of a PEL-Shimura variety at a place of good reduction carries different stratifications (Ekedahl-Oort, Newton, $p$-rank, etc.).
Shimura varieties which can be embedded into a Siegel modular variety are called of Hodge type. In general, they do not have an interpretation as a moduli space. In particular, all PEL-Shimura varieties are of Hodge type. For Hodge type varieties, one can still define the Ekedahl-Oort stratification using the stack of $G$-zips, introduced by Wedhorn, Moonen, Pink, Ziegler in \cite{MoWd}, \cite{PWZ1} and \cite{PWZ2}. More precisely, let $({\bf G},\mu)$ be a Hodge type Shimura datum and let $S_K$ denote the special fiber of the associated Shimura variety at a place of good reduction. Zhang has constructed in \cite{Zhang_EO} a $G$-zip over $S_K$ and he has shown that the corresponding morphism
$$S_K \to \mathop{\text{$G$-{\tt Zip}}}\nolimits^{\mu}$$
is smooth. By definition, the Ekedahl-Oort strata of $S_{K}$ are the fibers of $\zeta$. In particular, Zhang proves that the $\mu$-ordinary Ekedahl-Oort stratum is open and dense. It is also possible to define group theoretically the Newton stratification on $S_K$. Wortmann has shown in \cite{Wort_MuOrd} that the open Newton stratum coincides with the $\mu$-ordinary locus.
Fix an embedding $\iota:S_K\to \mathcal{A}_{g,N}$ of a Hodge type Shimura variety $S_K$ into a Siegel modular variety. Denote by ${\mathscr A} \to S_K$ the pull-back of the universal abelian scheme on $\mathcal{A}_{g,N}$ via $\iota$. Let $e:S_K\to {\mathscr A}$ denote the identity section of ${\mathscr A}$. Then the Hodge bundle is by definition
$$\omega_S:=\det(e^*\Omega_{{\mathscr A}/S_K}).$$
This line bundle is ample on $S_K$. In an upcoming paper by the author and T.Wedhorn, it is proved that this line bundle admits a canonical global section, a generalized Hasse invariant, which vanishes exactly outside the $\mu$-ordinary locus (\cite{KoWd} Theorem 4.12). In this paper we construct sections of $\omega_S$ on each Ekedahl-Oort stratum. Here are our main results:
\begin{thm1} \label{thm1}
Let $S^w$ be a nonempty Ekedahl-Oort stratum. There exists $N_w \geq 1$ such that for all $d\geq 1$, there exists a (canonical) non-vanishing section in the space
$$H^{0}(S^w,\omega_S^{\otimes N_w d}).$$
\end{thm1}
This section is canonical, in the sense that it is a pullback of a non-vanishing global section on a certain substack of the stack of $G$-zips. Therefore it only depends on the choice of the embedding into a Siegel Shimura variety. This non-vanishing section induces an isomorphism ${\mathcal O}_{S^w}\to \omega_S^{\otimes N_w d}$. In other words, Theorem 1 says that the line bundle $\omega_S$ is torsion on $S^w$. This implies that ${\mathcal O}_{S^w}$ is ample on $S^w$, so we recover the following result:
\begin{corollary1}
The Ekedahl-Oort strata are quasi-affine.
\end{corollary1}
As pointed out by Ulrich Görtz and Chia-Fu Yu, this result can also be deduced immediately from the quasi-affineness of the Ekedahl-Oort strata of the Siegel Shimura variety: Each stratum $S^w$ is locally closed in the preimage of the corresponding Siegel Ekedahl-Oort stratum, and is therefore quasi-affine. Let us also mention that by a result of Wedhorn and Yaroslav, the inclusion $S^w\to S$ is an affine morphism (\cite{WdYa} Theorem 4.1). From the quasi-affiness of Ekedahl-Oort strata, we deduce the following:
\begin{corollary2}\label{nonemp}
Let $S_K$ be a Hodge type Shimura variety. Assume that $S_K$ is projective. Then all Ekedahl-Oort and Newton strata are nonempty.
\end{corollary2}
For PEL-Shimura varieties, all Ekedahl-Oort strata are known to be nonempty, by a result of Viehmann and Wedhorn in \cite{ViWd}. For more general Hodge type Shimura varieties, nonemptiness is expected to hold, even though no proof has been given so far.
Here is an idea of the proof of Corollary 2. The ampleness of the Hasse bundle and the existence of the Hasse invariant imply that an Ekedahl-Oort of positive dimension cannot be projective. Using an inductive argument, we deduce that the superspecial locus (the Ekedahl-Oort stratum of dimension zero) is nonempty, and then the result is a consequence of the flatness of the map $\zeta$. Theorem 1 has also the following consequence:
\begin{corollary3} \label{mu}
Let $S^\mu$ be the $\mu$-ordinary Ekedahl-Oort stratum, and let $S_0^\mu$ be the Ekedahl-Oort stratum of the Siegel modular variety $\mathcal{A}_{g,N}$ containing the image of $S^\mu$ by the embedding $\iota:S_K\to \mathcal{A}_{g,N}$. Then one has the equality:
$$S^\mu=\iota^{-1}(S_0^\mu).$$
\end{corollary3}
For example, assume $S$ is a PEL Shimura variety parametrizing tuples $(A,\lambda,\iota,\bar{\nu})$ where $A$ is an abelian variety, $\lambda$ a polarization, $\iota$ an action of a $\mathbb{Z}_{(p)}$-algebra, and $\bar{\nu}$ a level structure. Then the $\mu$-ordinary locus of $S$ is entirely determined by the isomorphism class of the $p$-torsion $A[p]$, forgetting the rest of the structure.
We now give an overview of the paper. In the first section we recall the parametrization of Ekedahl-Oort strata using the stack of $G$-zips and the map $\zeta$. Then in the second part we state some general facts about the Picard group of a quotient stack and the space of global sections of a line bundle, which we apply to the stack of $G$-zips. In the third part we construct Hasse invariants on each Ekedahl-Oort stratum. Finally, we prove the corollaries of Theorem 1 in the last subsection.
\section{Parametrization of Ekedahl-Oort strata}
\subsection{Shimura varieties of Hodge-type}
We follow the general setup of \cite{KoWd} (4.1). Let $({\bf G},{\bf X})$ be a Shimura datum of Hodge type as in \cite{De_VarSh}. We denote by $[\mu]$ the ${\bf G}(\mathbb{C})$-conjugacy class of the component of $h_{\mathbb{C}}\colon \prod_{\Gal(\mathbb{C}/\mathbb{R})}\mathbb{G}_{m,\mathbb{C}} \to {\bf G}_{\mathbb{C}}$ corresponding to $\id \in \Gal(\mathbb{C}/\mathbb{R})$. Let $E$ be the reflex field, i.e the field of definition of $[\mu]$.
We fix an embedding $\iota:({\bf G},{\bf X})\to (\GSp(V),S^{\pm})$ of Shimura datum, where $V = (V,\psi)$ is a symplectic space over $\mathbb{Q}$ and $S^{\pm}$ the double Siegel half space. Let $p$ be a prime of good reduction ${\mathcal G}$ a reductive $\mathbb{Z}_{(p)}$-model of ${\bf G}$, and $K_p := {\mathcal G}(\mathbb{Z}_{p})$ the hyperspecial subgroup of ${\bf G}(\mathbb{Q}_p)$. We denote by $G$ the special fiber of ${\mathcal G}$.
Choose a place $v$ of the reflex field $E$ of $({\bf G},{\bf X})$ over $p$. Let $K = K_pK^p \subset {\bf G}(\mathbb{A}_f)$ be a compact open subgroup. We assume $K^p$ sufficiently small, so that an integral canonical model ${\mathcal S}_K({\bf G},{\bf X})$ over $O_{E,v}$ exists for the Shimura variety attached to $({\bf G},{\bf X})$ (see \cite{Ki_Integral} and \cite{Vasiu_Integral}). We denote by $S := S_K({\bf G},{\bf X})$ the special fiber. It is a smooth quasi-projective scheme over the residue field $\kappa := \kappa(v)$. Let $k$ denote the algebraic closure of $\kappa$.
The embedding $\iota$ admits a model over $\mathbb{Z}_{(p)}$. More precisely, there is a $\mathbb{Z}_{(p)}$-lattice $\Lambda$ of $V$ such that $\iota$ is induced by an embedding ${\mathcal G} \to \GL(\Lambda)$ (\cite{Ki_Integral}~Lemma~(2.3.1)). We may assume that $\psi$ induces a perfect $\mathbb{Z}_{(p)}$-pairing on $\Lambda$ (\cite{Ki_Points}~(1.3.3)). We obtain an embedding
\begin{equation}\label{EqEmbGroup}
\iota\colon {\mathcal G} \hookrightarrow \GSp(\Lambda)
\end{equation}
over $\mathbb{Z}_{(p)}$ such that ${\mathcal G}$ is the scheme theoretic stabilizer of a finite set $s$ of tensors in $\Lambda^{\otimes}$. We then can extend the embedding of Shimura varieties in characteristic zero to an embedding
\begin{equation}\label{emb}
{\mathcal S}_K({\bf G},{\bf X})\hookrightarrow {\mathcal S}_{\tilde{K}}(\GSp(\Lambda),S^{\pm}) \otimes_{\mathbb{Z}_{(p)}} O_{E,v}
\end{equation}
where $\tilde{K} := \tilde{K}_p\tilde{K}^p$, $\tilde{K}_p := \GSp(\Lambda)(\mathbb{Z}_p)$, and $\tilde{K}^p \subset \GSp(\mathbb{A}^p_f)$ is a certain open compact subgroup. Let $\tilde{\mathscr A} \to {\mathcal S}_{\tilde{K}}(\GSp(\Lambda),S^{\pm})$ be the universal abelian scheme and let ${\mathscr A}$ be its pullback to ${\mathcal S} := {\mathcal S}_K({\bf G},{\bf X})$ via the embedding \eqref{emb}. We define:
\[
\omega := \det(e^*\Omega^1_{{\mathscr A}/{\mathcal S}})
\]
where $e$ is the identity section of ${\mathscr A}$, and we call it the \emph{Hodge line bundle on ${\mathcal S}$}. The line bundle $\omega$ is ample (\cite{KoWd} Proposition 4.1). We denote by $\omega_S$ the pullback of $\omega$ to the special fiber.
\subsection{The stack of $G$-zips}
The conjugacy class $[\mu^{-1}]$ extends to a conjugacy class over $O_{E_v}$. As ${\mathcal G}$ is quasi-split, there exists a representative defined over $O_{E_v}$. We denote by $\chi$ the reduction over $\kappa$ of this representative. Let $P_{\pm} = P_{\pm}(\chi)$ be the pair of opposite parabolic subgroups of $G_{\kappa}$ attached to the cocharacter $\chi$, with common Levi subgroup $L$ (the centralizer of $\chi$). Then $(G,P_+,\sigma(P_-),\varphi)$ is an algebraic zip datum in the sense of \cite{PWZ1}~10.1, where $\sigma(-)$ denotes the pullback under absolute Frobenius $\sigma\colon x \mapsto x^p$ and where $\varphi\colon L \to \sigma(L)$ is the relative Frobenius. We set $P:=P_+$, $Q:=\sigma(P_-)$ and $M:=\sigma(L)$, so that $M$ is the Levi subgroup of $Q$ containing $T$.
We may assume, possibly after conjugating $\chi$ over $\kappa$, that there is a Borel pair $(T,B)$ defined over $\mathbb{F}_p$ such that $B_-\subset P$, and therefore $B\subset Q$ (see \cite{KoWd} Lemma 4.2).
Let $(X,\Phi,X{}^{\vee},\Phi{}^{\vee},\Delta)$ be the based root datum of $(G,B,T)$. Denote by $W = W(G,T) := N_{G}(T)/T$ the Weyl group and by $I \subset W$ the set of simple reflections defined by $B$. The subsets of $I$ correspond bijectively to the parabolic subgroups containing $B$, which are called \emph{standard}. For $J \subset I$, denote by $Q_{J}$ the corresponding standard parabolic and by $M_{J}$ the unique Levi subgroup of $Q_{J}$ containing $T$. We have an inclusion $W_J := W(M_{J},T)\hookrightarrow W(G,T)$
such that $J = W_J \cap I$. Every parabolic subgroup $P'$ of $G$ is conjugate to a unique standard parabolic subgroup $Q_J$ and $J \subset I$ is called the \emph{type of $P'$}.
For $x\in P$, we denote by $\overline{x}$ the image of $x$ in $P/R_{u}(P) = L$, and similarly for the image of $y \in Q$ in $Q/R_u(Q) = M$. The associated \emph{zip group} is defined by
\[
E := \set{(x,y)\in P\times Q}{\varphi(\overline{x})=\overline{y}}
\]
and $E$ acts on $G$ by $(x,y)\cdot g:= xgy^{-1}$. Note that $\dim(E) = \dim(G)$. By \cite{PWZ1}~Proposition~7.3, there are finitely many $E$-orbits in $G$, which are parametrized as follows. Let $J \subseteq I$ and $K \subseteq I$ be the type of $P$ and $Q$, respectively. For every $w \in W$ we choose a representative $\dot w \in \Norm_G(T)$ such that $(w_1w_2)^{\cdot} = \dot w_1\dot w_2$ whenever $\ell(w_1w_2) = \ell(w_1) + \ell(w_2)$. Let $w_{0,J} \in W_J$ and $w_0 \in W$ be the longest elements and set $g_0 := (w_{0} w_{0,J})^{\cdot}$. By \cite{PWZ1}~Theorem~5.12 and Theorem~7.5 we obtain a bijection
\begin{equation}\label{EqEOrbits}
\leftexp{J}{W} \liso \{\text{$E$-orbits on $G$}\}, \qquad w \mapsto O^w := E\cdot (g_0\dot{w})
\end{equation}
such that $\dim O^w = \ell(w) + \dim(P)$.
\subsection{Ekedahl-Oort strata}
The algebraic quotient stack over $\kappa$
\begin{equation}\label{EqDefGZipStack}
\mathop{\text{$G$-{\tt Zip}}}\nolimits^{\chi} := [E \backslash G_{\kappa}]
\end{equation}
is called the \emph{stack of $G$-zips}. The underlying topological space of $\mathop{\text{$G$-{\tt Zip}}}\nolimits^{\chi}$ is homeomorphic to $\leftexp{J}{W}$ endowed with the order topology with respect to a certain partial order $\preceq$; see \cite{PWZ1}~Definition~6.1 for the precise definition.
Zhang has constructed in \cite{Zhang_EO} a $G$-zip of type $\chi$ over $S_K := S_K({\bf G},{\bf X})$ and he has shown in loc.~cit.\ that the corresponding morphism $S_K \to \mathop{\text{$G$-{\tt Zip}}}\nolimits^{\chi}$ is smooth. In this paper, we prefer to use the construction given by Wortmann in \cite{Wort_MuOrd}~\S5 and we obtain again a smooth morphism
\begin{equation}\label{EqDefineZeta}
\zeta := \zeta_{G}\colon S_K \lto \mathop{\text{$G$-{\tt Zip}}}\nolimits^{\chi}.
\end{equation}
The Ekedahl-Oort strata of $S_{K}$ are defined as the fibers of $\zeta$. For $w\in \leftexp{J}{W}$, we denote by $S^w:=\zeta^{-1}(w)$ the corresponding stratum endowed with the reduced scheme structure as a locally closed subset of $S_K$. Then $S^w$ is smooth by \cite{WdYa} and if nonempty, has dimension $\ell(w)$. In the case of PEL Shimura varieties, every Ekedahl-Oort stratum is nonempty (\cite{ViWd}~Theorem~10.1). The map (\ref{EqDefineZeta}) restricts to a smooth map of stacks:
\begin{equation}\label{Zetaw}
\zeta \colon S^w \lto [E \backslash O^w].
\end{equation}
\section{Equivariant Picard group}
\subsection{$G$-linearizations}
In this section we consider an arbitrary smooth algebraic group over $k$ acting on a $k$-variety $X$. If $\pi:L\to X$ is a line bundle, a $G$-linearization of $L$ is a map
$$G\times L\to L$$
defining an action of $G$ on $L$, satisfying the conditions:
\begin{enumerate}[(i)]
\item The map $\pi$ is $G$-equivariant.
\item The action of $G$ on $L$ is linear on the fibers.
\end{enumerate}
We denote by $\Pic^{G}(X)$ the group of isomorphism classes of $G$-linearized line bundles on $X$. The image of the natural map $\Pic^{G}(X) \to \Pic(X)$ is the subgroup of $G$-linearizable line bundles, and is denoted by $\Pic_{G}(X)$. The group $\Pic^{G}(X)$ can be identified with the Picard group of the quotient stack $\left[G \backslash X\right]$.
We define ${\mathcal E}(X):=\frac{\mathbb{G}_m(X)}{k^\times}$. If $X$ is an irreducible variety over $k$, it is a free abelian group of finite type.
\begin{prop}\label{exseqgen}
Let $G$ be a smooth algebraic group, and $X$ an irreducible $G$-variety. Then
there is an exact sequence:
\[
1 \to k^{\times} \to \mathbb{G}_m(X)^G \to {\mathcal E}(X) \to X^{*}(G) \to \Pic^{G}(X) \to \Pic(X)
\]
\end{prop}
\begin{proof}
See \cite{KKV}, Proposition 2.3 and Lemma~2.2. The assumption that $k$ is of characterstic $0$ is not needed in the proof.
\end{proof}
The map $X^*(G)\to \Pic^{G}(X)$, $\lambda \mapsto {\mathscr L}(\lambda)$ is defined as follows. A character $\lambda \in X^{*}(G)$ induces a $G$-linearization of the trivial line bundle $\mathbb{A}^1_k \times X$ on $X$ given by $(g,x,s)\mapsto(g\cdot x,\lambda(g)s)$ for all $g\in G$, $x\in X$, $s\in \mathbb{A}^1_k$.
\begin{proposition} \label{charpic}
Let $H \subset G$ be algebraic groups. Then there is a natural isomorphism:
\[
\Pic^G(G/H)\simeq X^*(H).
\]
\end{proposition}
\begin{proof}
One has $\Pic^G(G/H)\simeq \Pic([G\backslash (G/H)])\simeq \Pic([1/H])\simeq \Pic^H(1)\simeq X^*(H)$.
\end{proof}
\subsection{The space of global sections}
\begin{prop}
Let $G$ be an algebraic group and let $X$ be an irreducible $G$-variety containing an open $G$-orbit $U$. Denote by $\pi:X\to\left[X/G\right]$
the projection. Let $\mathscr{L}$ be a line bundle on the stack $\left[X/G\right]$ and write $L=\pi^{*}\mathscr{L}$. Then:
\end{prop}
\begin{enumerate}
\item[\textit{(i)}] \textit{$H^{0}\left(\left[X/G\right],\mathscr{L}\right)$ identifies with $H^{0}\left(X,L\right)^{G}$. In particular, for $\lambda \in X^*(G)$ one has:}
$$H^{0}\left(\left[X/G\right],{\mathscr L}(\lambda)\right)=\{f:X\to k, f(g\cdot x)=\lambda(g)f(x), \ \forall g\in G,x\in X \}.$$
\item[\textit{(ii)}] \textit{The $k-$vector space $H^{0}\left(\left[X/G\right],\mathscr{L}\right)$
has dimension less than 1.}
\item[\textit{(iii)}] \textit{If $H^{0}\left(\left[X/G\right],\mathscr{L}\right)\neq0$ then $\mathscr{L}$
restricts to the trivial line bundle on $\left[U/G\right]$.}
\item[\textit{(iv)}] \textit{If $\mathscr{L}$ is trivial, $H^{0}\left(\left[X/G\right],\mathscr{L}\right)=k$.}
\end{enumerate}
\begin{proof}
See \cite{KoWd} Proposition 1.18.
\end{proof}
\section{Hasse invariants on Ekedahl-Oort strata}
\subsection{Construction}
In this section we construct a canonical non-vanishing section of the Hodge bundle on each Ekedahl-Oort stratum of $S_K$. We do not know if this section extends to the closure, and if it does, what its vanishing locus would be. Since we expect this to be true, we call abusively this section a Hasse invariant of the stratum. In the particular case of the $\mu$-ordinary stratum, it was proved in \cite{KoWd} Theorem 4.12, that this canonical section does extand to the whole Shimura variety (and even to its minimal compactification), and that the non-vanishing locus is exactly the $\mu$-ordinary stratum.
Let $G$ be a reductive group over $\mathbb{F}_p$, and $\mu:\mathbb{G}_{m,k}\to G_k$ a minuscule cocharacter. Denote by $(G,P,Q,\varphi)$ the associated zip datum, and by $E$ the attached zip group.
\begin{theorem}\label{H0}
For all $E$-orbit $C\subset G$, the Picard group $\Pic^E(C)$ is finite. Denote by $N_C$ its exponent. Then for all $d\geq 1$ and all $\lambda \in X^*(E)$, the space of global sections
$$H^{0}\left(\left[E \backslash C\right],{\mathscr L}(\lambda)^{\otimes N_C d}\right)$$
is one-dimensional.
\end{theorem}
\begin{proof}
We apply Proposition \ref{exseqgen} to the $E$-variety $C$. Clearly $\mathbb{G}_m(C)^E=k^\times$. Hence we get an exact sequence:
\begin{equation} \label{exseqproof}
1 \to {\mathcal E}(C) \to X^{*}(E) \to \Pic^E(C) \to \Pic(C)
\end{equation}
Let $x$ be an arbitrary element of $C$. The map $E\to C$, $e\mapsto e\cdot x$ identifies $C$ with the quotient $E/A_x$, where $A_x$ is the scheme-theoretic stabilizer of $x$ in $E$. We have an exact sequence
$$1\to A_{x,{\rm red}}\to A_x \to A_x /A_{x,{\rm red}} \to 1$$
where $A_x /A_{x,{\rm red}}$ is a finite group scheme. Hence we have an exact sequence
$$0\to X^*(A_x /A_{x,{\rm red}}) \to X^*(A_x)\to X^*(A_{x,{\rm red}}).$$
By \cite{PWZ1} Theorem 8.1, the group $A_{x,{\rm red}}$ has the form $U_x \rtimes H_x$ where $U_x$ is unipotent and $H_x$ finite. We deduce that $X^*(A_{x,{\rm red}})$ is finite, and hence so is $X^*(A_x)$. It follows from Proposition \ref{charpic} that $\Pic^E(C)$ is finite. Let $N_C$ be its exponent.
For all $d\geq 1$, the character $N_C d \lambda$ maps to zero in $\Pic^E(C)$. Therefore there exists a function $f\in {\mathcal E}(C)$ mapping to $N_C d \lambda$. By definition, this is a non-vanishing function on $C$ satisfying the relation $f(e\cdot x)=\lambda(e)^{N_C d}f(x), \ \forall e\in E,x\in C$, so it is a nonzero global section of ${\mathscr L}(\lambda)^{\otimes N_C d}$. This concludes the proof.
\end{proof}
\begin{remark}\label{rmkN}
For a fixed character $\lambda \in X^*(E)$, let $N_C(\lambda)$ be the order of ${\mathscr L}(\lambda)$ in $\Pic^E(C)$. The set of integers $r$ such that $H^{0}\left(\left[E \backslash C\right],{\mathscr L}(\lambda)^{\otimes r}\right)\neq 0$ is the subgroup of $\mathbb{Z}$ generated by $N_C(\lambda)$.
\end{remark}
The first projection $E\to P$ induces an isomorphism $X^*(E)=X^*(P)$. A character $\lambda\in X^*(E)=X^*(P)$ is said to be \emph{ample} if the associated line bundle on $G/P$ is ample, see Definition 3.2 in \cite{KoWd}. This defines a cone in $X^*(E)$. The following result is a reformulation of Theorem 3.8 in loc. cit.
\begin{theorem}\label{mainthm}
Let $U\subset G$ denote the open $E$-orbit of $G$. Let $\lambda \in X^*(E)$ be an ample character. Then one has
$$H^{0}\left(\left[E \backslash U\right],{\mathscr L}(\lambda)^{\otimes n}\right)=H^{0}\left(\left[E \backslash G\right],{\mathscr L}(\lambda)^{\otimes n}\right)$$
for all $n\geq 1$. For $n=N_Ud$, $d\geq 1$, this space is one-dimensional and any nonzero element induces a function $G\to k$ which vanishes exactly on the complement of $U$.
\end{theorem}
\begin{proof}
The natural pull-back map $H^{0}\left(\left[E \backslash G\right],{\mathscr L}(\lambda)^{\otimes n}\right) \to H^{0}\left(\left[E \backslash U\right],{\mathscr L}(\lambda)^{\otimes n}\right)$ is clearly injective. Since $\Pic^E(U)$ is finite, we have an isomorphism
$${\mathcal E}(U)_\mathbb{Q} \simeq X^*(E)_\mathbb{Q}.$$
The space $H^{0}\left(\left[E \backslash U\right],{\mathscr L}(\lambda)^{\otimes n}\right)$ is one-dimensional if and only if the function in ${\mathcal E}(U)_\mathbb{Q}$ corresponding to $n \lambda$ is in ${\mathcal E}(U) \subset {\mathcal E}(U)_\mathbb{Q}$. In this case the function extends to a regular function on $G$ which vanishes exactly outside $U$, by \cite{KoWd} Theorem 3.8.
\end{proof}
Now let us return to the notations of section 1. For an element $w\in \leftexp{J}{W}$, denote by $N_w$ the integer associated to the $E$-orbit $O^w$ as in Theorem \ref{H0}. The map (\ref{Zetaw}) induces by pullback a map
$$H^{0}\left(\left[E \backslash O^w\right],{\mathscr L}(\lambda)^{\otimes N_w d}\right)\to H^{0}\left(S^w,\zeta^*{\mathscr L}(\lambda)^{\otimes N_w d}\right)$$
As explained in \cite{KoWd} 4.6 and in the proof of Theorem 4.12, there is a character $\lambda_{\omega_S}$ of $E$ such that
$$\zeta^*{\mathscr L}(\lambda_{\omega_S})=\omega_S.$$
Thus we get a non-vanishing section $H_w$ of $\omega_S^{N_w d}$ over $S_w$ (well-defined up to a scalar), which proves Theorem 1. Note that this construction depends only on the choice of the Siegel embedding. Therefore we call $H_w$ a canonical Hasse invariant for $S_w$. It is a difficult question whether or not $H_w$ extends to the closure $\overline{S^w}$. In an upcoming article, we will show this for Hilbert-Blumenthal Shimura varieties and some unitary cases.
\subsection{Functoriality}
Let $f:G_1 \to G_2$ be a morphism of connected reductive groups over $\mathbb{F}_p$. Let $\mu_1:\mathbb{G}_{m,k}\to G_{1,k}$ be a minuscule cocharacter, and set $\mu_2:=f\circ \mu_1$. For $i=1,2$, denote by $(G_i,P_i,Q_i,\varphi)$ the zip datum attached to $\mu_i$. Denote by $E_1$ and $E_2$ respectively the corresponding zip groups. The map $f$ induces naturally a map $E_1\to E_2$, which we denote again by $f$. We get a map of stacks:
$$[E_1\backslash G_{1,k}]\longrightarrow [E_2 \backslash G_{2,k}].$$
Let $C_1$ be an $E_1$-orbit in $G_{1,k}$ and let $C_2$ be the $E_2$-orbit containing $f(C_1)$. Let $\lambda\in X^*(E_2)$ be a character of $E_2$ and denote by $N_1(\lambda \circ f)$ and $N_2(\lambda)$ the integers attached to the pairs $(C_1,\lambda \circ f)$ and $(C_2,\lambda)$ as in Remark \ref{rmkN}. We get an map:
$$\tilde{f}:H^{0} \left( \left[E_2 \backslash C_2\right],{\mathscr L}(\lambda)^{\otimes N_2(\lambda)} \right) \to H^{0} \left(\left[E_1 \backslash C_1\right],{\mathscr L}(\lambda\circ f)^{\otimes N_2(\lambda)} \right)$$
One sees readily that this map is injective. Since the space on the left has dimension one, we deduce that it is an isomorphism. In particular the integer $N_1(\lambda \circ f)$ divides $N_2(\lambda)$.
Assume now that $f$ is an embedding and that $C_1$ is the open $E_1$-orbit in $G_1$. Define again $C_2$ to be the $E_2$-orbit containing $f(C_1)$. Let $\lambda \in X^*(E_2)$ be an ample character. Then $\lambda \circ f$ is again ample (Remark 3.5 in loc. cit.). We deduce the following isomorphism:
$$H^{0} \left( \left[E_2\backslash C_2\right],{\mathscr L}(\lambda)^{\otimes N_2(\lambda)} \right) \simeq H^{0} \left(\left[E_1 \backslash G_1\right],{\mathscr L}(\lambda\circ f)^{\otimes N_2(\lambda)} \right).$$
Any nonzero element $H$ in this space induces a function $H:G_1\to k$ which vanishes exactly outside $C_1$ by Theorem \ref{mainthm}. But by definition it does not vanish on the preimage of $C_2$, so we get the following:
\begin{proposition}\label{orbs}
Assume that $f$ is an embedding. Let $C_1$ denote the open $E_1$-orbit in $G$, and let $C_2$ be the $E_2$-orbit containing $C_1$. Then we have the following:
$$C_1=f^{-1}(C_2).$$
\end{proposition}
\section{Consequences}
\subsection{Nonemptiness of Ekedahl-Oort strata for projective Shimura varieties of Hodge-type}
In this paragraph we assume that S is a projective variety. We show that all Ekedahl-Oort strata must be nonempty. Since the map $\zeta$ is open, it suffices to prove that the superspecial locus is nonempty, that is, the unique Ekedahl-Oort stratum of dimension zero.
\begin{lemma} \label{amplesec}
Assume that $S$ is projective. Let $S^w$ be an Ekedahl-Oort stratum of positive dimension. Then $S^w$ is not closed.
\end{lemma}
\begin{proof}
Since $S^w$ is quasi-affine, it cannot be projective unless it is zero-dimensional.
\end{proof}
We deduce that for any nonempty Ekedahl-Oort stratum of dimension $d \geq 1$, there is a nonempty Ekedahl-Oort stratum of dimension $ < d$ in its closure. Using this argument
recursively (begining at the $\mu$-ordinary stratum, which is nonempty), we deduce that there is a nonempty Ekedahl-Oort stratum of dimension $0$, which must be the superspecial one. This concludes the proof.
\begin{remark}\
\begin{enumerate}[(a)]
\item This proof will work for any Shimura variety of Hodge-type, provided that the closure of an Ekedahl-Oort stratum in the minimal compactification $S_K^{\rm min}$ intersects the boundary in a closed subset of codimension $\geq 2$.
\item It follows from \cite{Ni} that every Newton stratum contains a fundamental Ekedahl-Oort stratum. The argument is completely group-theoretic, and hence applies also to Hodge type Shimura varieties. We deduce that all Newton strata of a projective Shimura variety of Hodge type are nonempty as well.
\end{enumerate}
\end{remark}
\subsection{Embeddings of Shimura varieties}
We set $\overline{\Lambda}:=\Lambda \otimes_{\mathbb{Z}_p} \mathbb{F}_p$, endowed with the symplectic form induced by $\psi$. We denote by $G_0$ the group $\GSp(\bar\Lambda)$. As explained in \cite{KoWd} 4.5, the embedding (\ref{EqEmbGroup}) induces a commutative diagram:
$$\xymatrix@1@M3pt@C6pc{
S_K \ar[d] \ar[r]^-{\zeta} & \mathop{\text{$G$-{\tt Zip}}}\nolimits^{\chi} \ar[d]^-\iota \\
S_{\tilde{K}}({\rm GSp}(\Lambda),S^{\pm})_\kappa \ar[r]^-{\zeta_0} & G_0-{\tt Zip}^{\iota\circ\chi}
}$$
where $S_{\tilde{K}}({\rm GSp}(\Lambda),S^{\pm})$ denotes the special fiber of the Siegel modular variety ${\mathcal S}_{\tilde{K}}(\GSp(\Lambda),S^{\pm})$ and $\zeta_0$ the corresponding zip map. See diagram (4.9) in loc. sit. for details. The cocharacter $\iota\circ \chi$ of $G_0$ gives rise to a zip datum $(G_0,P_0,Q_0,\varphi)$ and we get a map between the quotient stacks:
$$[E\backslash G]\longrightarrow [E_0 \backslash G_0].$$
Denote by $U$ the open $E$-orbit in $G$ and by $U_0$ the $E_0$-orbit containing $\iota(U)$. We deduce from Proposition \ref{orbs} that $U=\iota^{-1}(U_0)$, and the following result follows:
\begin{corollary3}
Let $S^\mu$ be the $\mu$-ordinary Ekedahl-Oort stratum, and let $S_0^\mu$ be the Ekedahl-Oort stratum of the Siegel modular variety $S_{\tilde{K}}({\rm GSp}(\Lambda),S^{\pm})$ containing the image of $S^\mu$ by the embedding $\iota:S_K\to S_{\tilde{K}}({\rm GSp}(\Lambda),S^{\pm})$. Then one has the equality:
$$S^\mu=\iota^{-1}(S_0^\mu).$$
In particular the $\mu$-ordinary locus $S^\mu$ is entirely determined in $S$ by the isomorphism class of the $p$-divisible group $A[p^\infty]$ (or by the group scheme $A[p]$), where $A$ is the abelian variety over $k$ attached to a $k$-point in $S_K$.
\end{corollary3}
\end{document}
|
\begin{document}
\title{Dynamics of molecular rotors in bulk superfluid helium}
\author{Alexander~A.~Milner$^{1}$, V.~A.~Apkarian$^{2}$ , and Valery~Milner$^{1}$}
\affiliation{$^{1}$Department of Physics \& Astronomy, The University of British Columbia, Vancouver, Canada}
\affiliation{$^{2}$Department of Chemistry, University of California, Irvine, California 92697, United States}
\date{\today}
\begin{abstract}
Molecules immersed in liquid helium are excellent probes of superfluidity. Their electronic, vibrational and rotational dynamics provide valuable clues about the superfluid at the nanoscale. Here we report on the experimental study of the laser-induced rotation of helium dimers inside the superfluid \Hefour{} bath at variable temperature. The coherent rotational dynamics of \Hetwo{} is initiated in a controlled way by ultrashort laser pulses, and tracked by means of time-resolved laser-induced fluorescence. We detect the decay of rotational coherence on the nanosecond timescale and investigate the effects of temperature on the decoherence rate. The observed temperature dependence suggests a non-equilibrium evolution of the quantum bath, accompanied by the emission of the wave of second sound. The method offers new ways of studying superfluidity with molecular nano-probes under variable thermodynamic conditions.
\end{abstract}
\maketitle
Elementary excitations in liquid helium (LHe) and its superfluid phase (He\,II) have been studied predominantly with neutron scattering \cite{Glyde2017}, as well as by observing the dynamics of embedded atoms and molecules \cite{Toennies1998}. Due to the vanishingly small solubility of impurities in LHe, the use of molecular probes has been largely limited to studies in helium nanodroplets that can be doped by injection of foreign species in pick-up cells \cite{Toennies2004, Stienkemeier2006, Slenczka2022}. A wealth of information has been extracted from such studies about the coupling between the molecular electronic, vibrational and rotational degrees of freedom and the quantum bath, be it through frequency \cite{Grebenev1998, Choi2006, Lehnig2009} or time domain \cite{Nielsen2022} measurements. As the microscopic analog of the Andronikashvili experiment, which used a torsion balance to verify the phenomenological two-fluid model of He\,II, molecular rotors have been most informative: The change in the moment of inertia and centrifugal distortion constant of an embedded molecule serves as a gauge of the dragged normal fraction, and nearly free rotation is taken as the signature of a frictionless superfluid bath \cite{Shepperson2017, Chatterley2020, Cherepanov2021, Qiang2022}.
Despite their elegance, nanodroplets suffer from a serious limitation: their thermodynamic state is fixed to a single point on the temperature-pressure $(T,P)$ plane because of the evaporative cooling used in their production. Yet to investigate the inherently macroscopic two-fluid model of He\,II, it is essential to carry out measurements as a function of thermodynamic variables. This can be accomplished by resorting to the helium dimers in the lowest metastable triplet state (\Astate{}), known as \Hetwo{} excimers, as liquid helium's native molecular probes \cite{Surko1968, Dennis1969, Hill1971}. With a lifetime on the order of a few seconds \cite{Keto1974, Benderskii1999, McKinsey2003}, the excimers are ideally suited for time-resolved studies of their interaction with the quantum environment.
Similarly to solvated electrons, \Hetwo{} excimers form in $\sim\SI{14}{\AA}$-diameter cavities (or ``bubbles'') that expel the superfluid around the molecule \cite{Dennis1969, Eloranta2001, Eloranta2002}. Electronic transitions in \Hetwo{} have been used to drive damped bubble oscillations, whose dependence on temperature and pressure was shown to track the normal fraction, thus establishing that the two-fluid model extends down to the molecular scale \cite{Benderskii2002}. Rotational lines in the fluorescence spectra, albeit unresolved but with the envelope similar to that in the gas phase, indicated free rotation of \Hetwo{} inside the bubble \cite{Dennis1969, Hill1971}. However, large inhomogeneous broadening \cite{Eltsov1995, Rellergert2008} due to bubble shape fluctuations \cite{Guo2020} prohibited the spectroscopic analysis of the excimer's rotational dynamics. The observed slow time dependence of the broadened absorption lineshape indicated the characteristic timescale for the rotational cooling of a few milliseconds \cite{Eltsov1995, Eltsov1998}, but offered no information on the (potentially much faster) decay of rotational coherence and, therefore, on the finer details of the molecular interaction with He\,II{}. With no direct access to molecular rotation in \textit{bulk superfluid}, the microscopic Andronikashvili experiment under controlled thermodynamic conditions remained unrealized.
In the time-domain study presented here, we prepare coherent rotational wave packets in \Hetwo{}, and investigate their decoherence with femtosecond resolution in the superfluid quantum bath at variable temperature. After producing $a$-state excimers with intense pump pulses \cite{Benderskii1999, McKinsey2003}, we excite molecular rotation by a linearly polarized fs ``kick'' pulse, and then follow it in time with a delayed probe (see Supplemental Material for details \cite{Supplement}). Two-photon probe absorption promotes the molecule to a fluorescent $d$ state (\Dstate{}), which decays to \Bstate{} by emitting a photon at $\approx \SI{640}{nm}$ \cite{Benderskii1999, Rellergert2008, Guo2014, Gao2015}. Owing to the anisotropic absorption cross-section, the difference between the laser-induced fluorescence (LIF) signals corresponding to two orthogonal probe polarizations (known, and hereafter referred to, as ``linear dichroism'' \LDlif{} \cite{Supplement}) reflects the ensemble-averaged alignment of molecular axes. As the latter rotate with respect to the probe polarization, the \LDlif{} signal becomes modulated at the frequency of molecular rotation, offering the direct measure of rotational coherence.
\begin{figure}
\caption{(\textbf{a}
\label{fig-LIF_LD}
\end{figure}
An example of the \LDlif{(t)} signal, recorded as a function of the kick-probe delay is shown in Fig.~\ref{fig-LIF_LD}(\textbf{a}). The main oscillation frequency of $\SI{2.28(2)}{THz}$ corresponds to the energy difference $\Delta E_{1,3}/h=\SI{2.27}{THz}$ between the $N=1$ and $N=3$ rotational states of the ground vibrational level ($v=0$) of the \Astate{} manifold. The observed oscillations are the result of the quantum coherence between the $N=1$ and $N=3$ states induced by the kick pulse (hence, labeled as $LD_{1,3}$). Owing to this coherence, the two-photon $a\rightarrow d$ absorption channels originated from these two states and sharing the same rotational level in the upper \Dstate{} manifold, interfere as schematically illustrated by the diagram in Fig.~\ref{fig-LIF_LD}. The interference leads to the time-dependent total absorption, and hence the $d\rightarrow b$ fluorescence intensity, oscillating at the frequency $\nu _{1,3}=\Delta E_{1,3}/h$.
The apparent slow amplitude modulation in Fig.~\ref{fig-LIF_LD}(\textbf{a}) is due to the frequency beating between multiple vibrational states with slightly different rotational constants. The Fourier transform of the \LDlif{(t)} signal is plotted in the inset to Fig.~\ref{fig-LIF_LD}(\textbf{b}), showing the ro-vibrational splitting of the $LD_{1,3}$ rotational line. Since the frequency bandwidth of our pulses ($\approx \SI{14}{THz}$ FWHM) is smaller than the excimer's vibrational frequency ($\SI{54}{THz}$ \cite{NIST}), the vibrational excitation is inherent in the energetic process of the pump-induced \Hetwo{} formation \cite{Keto1972}. Clearly, the vibrational relaxation is far from complete \SI{1}{ms} after the pump pulse (at the arrival time of the kick-probe pulse pair), in agreement with the previously determined vibrational decay time of order of \SI{100}{ms} \cite{Eltsov1995}. Applying the known gas-phase molecular parameters \cite{Focsa1998, Semeria2018} results in a good fit of the observed ro-vibrational spectrum (dashed black curve), indicating that within the experimental uncertainty of $\approx\SI{10}{GHz}$, the rotational constants in the three vibrational states are unaffected by the liquid environment.
Fourier transform of a delay scan with a lower frequency resolution but higher frequency range reveals the second excited rotational line in the \LDlif{} spectrum, corresponding to the laser-induced coherence between the $N=3$ and $N=5$ rotational levels [$LD_{3,5}$ in Fig.~\ref{fig-LIF_LD}(\textbf{c})]. Similar to $LD_{1,3}$, the frequency of the second rotational peak $\nu_{3,5}=\SI{4.10(2)}{THz}$ agrees well with the energy difference between the $N=5$ and $N=3$ rotational levels of the $a$ state in the gas phase (\SI{4.08}{THz}).
Unlike the case of vibrational excitation, transferring the rotational population from the ground $N=1$ to the excited $N=3$ and $N=5$ states requires two-photon Raman frequencies well within the bandwidth of our kick pulses. One may ask then whether the $LD$ lines originate from the rotationally hot molecules created by the pump pulse, which have not decayed yet to the ground rotational state, or whether they stem from the molecules coherently excited by the kick pulse. To answer this question, we measured the ratio of the second-to-first rotational peak amplitudes, $LD_{3,5}/LD_{1,3}$, as a function of the pulse energy. The results are shown by green squares in Fig.~\ref{fig-LD_time}(\textbf{a}). The quick drop in the relative amplitude of the second peak with decreasing pulse intensity indicates the degree of rotational excitation largely controlled by the kick pulse.
To further support this conclusion, we carried out numerical calculations of the expected ratio between the two $LD$ peaks by solving the Schr\"{o}dinger equation in the rigid-rotor approximation (see Supplemental Material for details \cite{Supplement}). In Fig.~\ref{fig-LD_time}(\textbf{a}) we plot the ratio $LD_{3,5}/LD_{1,3}$ calculated for the experimentally used kick energies. The fit provides us with the rotational population of $N=3$ and $N=5$ levels prior to the arrival of the kick pulse, which are respectively 0.5\% and 0.05\% (upper confidence limits of ~5\% and ~0.2\%). This suggests that the majority of \Hetwo{} dimers have relaxed to the ground rotational $N=1$ state \SI{1}{ms} after their creation by the pump pulse, indicating a rotational decay constant much shorter than that found in earlier studies ($\approx \SI{15}{ms}$ \cite{Eltsov1998}). The numerical calculations also show the major re-distribution of, and hence the possibility to control, the rotational population by the kick pulse. With the energy of the latter at \SI{3.5}{\mu J} ($\approx \SI{5e11}{W/cm^2}$), more than 15\% of molecules are occupying $N=3$, and almost 2\% are at $N=5$ [in thermal equilibrium, the former (latter) would correspond to rotational temperatures of \SI{43}{K} (\SI{64}{K})].
\begin{figure}
\caption{(\textbf{a}
\label{fig-LD_time}
\end{figure}
While the analysis presented in Fig.~\ref{fig-LD_time}(\textbf{a}) offers a method of determining the decay of rotational \textit{population}, our approach also provides a way of measuring the decay of rotational \textit{coherence}. Both peaks in the linear dichroism spectrum exhibit strong time dependence, shown for $LD_{1,3}(t)$ in Figs.~\ref{fig-LD_time}(\textbf{b}). Here, a fine scan from $t$ to $(t+\SI{20}{ps})$ has been carried out for calculating the amplitude of the $LD_{1,3}$ peak at each (coarse) value of $t$ between 0 and \SI{1.65}{ns}. The oscillatory behavior is a consequence of the spin-rotational and spin-spin interactions, which split each rotational $N$-state into three $J=\left\{N,N\pm1\right\}$ states \cite{Lichten1974} (see the level diagram in Fig.~\ref{fig-LIF_LD}).
To verify this conclusion, we modeled the expected signal numerically as
\begin{equation}\label{eq-LDsignal}
LD_{1,3}(t)=\sum_{k=1..5} c^k_{1,3} \cos(2\pi \nu^k_{1,3} t) \times e^{-t/\tau_{1,3}},
\end{equation}
where $\nu ^k_{1,3}$ are the frequencies of the five transitions allowed by the selection rules (see Supplemental Material for details \cite{Supplement}), and calculated using the known accurate values for the spin-rotational and spin-spin coupling strength in the ground state of \Hetwo{} \cite{Focsa1998, Semeria2018}. Being on the scale of $\approx\SI{2}{GHz}$ , the splitting is significantly smaller than the kick bandwidth, justifying our assumption that all coherences are created with the same phase. On the other hand, coefficients $c^k_{1,3}$ account for the differences in the two-photon $J$-dependent matrix elements between different absorption pathways. Here, we used these coefficients as free fitting parameters, leaving the comparison to their \textit{ab initio} values to future theoretical analysis.
Our assumption of a single decay constant $\tau _{1,3}$ in Eq.~\ref{eq-LDsignal} is justified by the quality of the fit in Fig.~\ref{fig-LD_time}(\textbf{b}). From the fit, we extract the coherence lifetime $\tau_{1,3}=\SI{1.0(5)}{ns}$, during which the molecule completes more than a thousand full rotations. The corresponding rotational linewidth of $\approx \SI{0.3}{GHz}$ is significantly narrower than the scan-length limited lines in Figs.~\ref{fig-LIF_LD}(\textbf{b,c}). We note that $v>0$ vibrational branches, not included in the fit, add fast oscillations around the plotted curve without changing the optimal fit parameters. At this time, we were unable to apply the same numerical analysis to the much weaker second rotational line ($LD_{3,5}$). Improving the signal quality and comparing the two decays is the objective of current investigation.
One of the main advantages of studying molecular dynamics in bulk liquid helium is the ability to vary the temperature and pressure of the superfluid, probing the macroscopic nature of superfluidity. Here, we explored the temperature dependence of the rotational coherence between $N=1$ and $N=3$ rotational levels, reflected by the amplitude of the $LD_{1,3}$ peak in the dichroism spectrum. The experimental result, measured at a fixed kick-probe delay of \SI{850}{ps}, is shown by red circles in Fig.~\ref{fig-LD_temperature}. A clear decrease of $LD_{1,3}$ with temperature increasing towards the lambda point is a signature of the apparent interaction between the liquid and the laser-induced coherent rotation of helium dimers.
Unlike the $T$-dependent change in the total fluorescence signal \cite{Supplement}, the observed rotational decoherence cannot be attributed to bimolecular collisions. Indeed, from the known diffusion constant of the \Hetwo{} molecules in our temperature range ($\lesssim\SI[parse-numbers=false]{10^{-3}}{cm^2s^{-1}}$ \cite{McKinsey2005}), their average displacement on the time scale of our experiment is about \SI{10}{nm}, which is significantly smaller than the inter-molecular separation of $>\SI{300}{nm}$ for the experimentally determined molecular density of \SI{2e13}{cm^{-3}} (see Supplemental Material for details \cite{Supplement}).
On the other hand, scattering of thermal quasiparticles (i.e. the normal component of the liquid) on molecular rotors could be responsible for the observed temperature dependence of \LDlif{}. In a simple kinematic picture, where the (quasi)stationary dimers are colliding with He atoms moving with the velocity of first sound $u_1(T)$, one can write:
\begin{eqnarray}
\hspace{-5mm} LD_{1,3}(t,T)&=& LD^{(0)}_{1,3} \times \exp\left[-\gamma_{1,3}(T)\,t \right] \label{eq-decayRotons1} \\ &=&LD^{(0)}_{1,3} \times \exp \left[-N^{eq}_{n}(T)\,\sigma_{1,3}(T)\,u_1(T)\,t\right], \label{eq-decayRotons2}
\end{eqnarray}
where $\gamma_{1,3}$ is the decoherence rate, $N^{eq}_n$ is the equilibrium atom number density of the normal fluid, and $\sigma_{1,3}$ is the scattering cross-section. Given the unknown $T$ dependence of $\sigma_{1,3}$, we used it as a temperature-independent single fitting parameter. The best fit, shown with the thick dashed blue curve in Fig.~\ref{fig-LD_temperature}, captures the overall trend in the data, but fails to reproduce the non-exponential flattening of the curve at lower temperatures. It also results in the decoherence rate (thin dashed blue curve), which is significantly lower than $\gamma_{1,3} \approx \SI{1}{GHz}$ observed in our scans of the kick-probe delay at $T=\SI{1.95}{K}$ discussed earlier (Fig.\ref{fig-LD_time}).
\begin{figure}
\caption{Dependence of the amplitude of the first rotational peak ($LD_{1,3}
\label{fig-LD_temperature}
\end{figure}
While further theoretical investigation of $\sigma_{1,3} (T)$ may help reconcile our data with the collisional model of Eq.~(\ref{eq-decayRotons2}), we note that it is based on the assumption of a thermal equilibrium between molecular rotors and the surrounding liquid. This assumption justified the use of the time-independent normal fraction $N^{eq}_n(T)$. However, the impulsive excitation of \Hetwo{} by intense kick pulses may also create a non-equilibrium state, in which molecular rotors find themselves surrounded by a microscopic local volume of ``hot'' liquid. The sudden imbalance of entropy may trigger a coherent pulse of second sound, initiating a flow of the normal component away from the molecule, and a corresponding counterflow of superfluid towards it. Consider for simplicity a Gaussian pulse of width $w$, traveling with the speed of second sound $u_2(T)$, and describing the non-equilibrium density of the normal fraction at the location of the molecule:
\begin{equation}\label{eq-u2Pulse}
N^{neq}_n(T,t)=N\times \exp \left[- \bigl( u_2(T)\, t \bigr)^2/ w^2 \right],
\end{equation}
where $N$ is the total density of the liquid -- all of it in the normal phase at time zero. Substituting $N^{eq}_n(T)$ in Eq.~(\ref{eq-decayRotons2}) by this time-dependent $N^{neq}_n(T,t)$, and fitting it to the data in Fig.~\ref{fig-LD_temperature} using $\sigma_{1,3}$ and $w$ as free parameters, results in the thick solid red line. In contrast to the equilibrium model, the decoherence rate (thin solid red line) mediated by the wave of second sound, is much more consistent with our findings from the delay scan at \SI{1.95}{K}.
The non-equilibrium picture also better reproduces the flattening of the $LD_{1,3}(T)$ data between 1.4 and \SI{1.8}{K}. The local minimum of $\gamma _{1,3}$ in this temperature window stems from the corresponding local maximum in the speed of second sound \cite{Donnelly2009}. The faster the entropy pulse, the faster the counterflow of the frictionless superfluid component towards the molecular rotor, the slower its rotational decoherence. As the speed of second sound decreases, with $T$ increasing beyond \SI{1.8}{K}, the heat wave created by the kick pulse travels shorter distance away from the molecular rotor in a given amount of time. The correspondingly slower influx of the superfluid component results in faster decoherence and a lower signal amplitude. We note that thermal diffusion, which becomes faster with increasing $T$, would result in the opposite dependence of our signal on temperature, thus making it inadequate for explaining our experimental findings.
The decoherence cross-section $\sigma_{1,3}=\SI{2.5e-2}{\AA^2}$, extracted from the fit to the non-equilibrium model, appears to be four orders of magnitude smaller than the size of the \Hetwo{} bubble. This indicates very weak coupling between the normal fluid and the spherical \Astate{} state, and explains why, within our experimental uncertainty, the rotational constants of the excimer in different vibrational and electronic spin states seem to be unaffected by the surrounding superfluid. The \SI{22}{nm} width of the second sound pulse, provided by the fit, is larger than the distance of \SI{17}{nm} covered by the pulse in \SI{850}{ps}, which justifies the proposed far-from-equilibrium scenario. The latter could also explain the relatively large scatter of experimental data in Fig.~\ref{fig-LD_temperature}, which we could not trace to any source of instrumental noise.
In summary, we report the first experimental observation of the laser-induced coherent molecular rotation in bulk superfluid liquid helium. Our time-resolved method enables us to detect and study various rotational dynamics in three different time windows: (i) we characterize the degree of rotational cooling on the millisecond timescale; (ii) probe the ro-vibrational, spin-rotational and spin-spin dynamics on the picosecond timescale; and (iii) investigate the decay of rotational coherence on the nanosecond timescale.
By measuring the temperature dependence of the coherent rotational signal, we identify two possible decoherence mechanisms of qualitatively different nature: one mediated by the normal component of the helium bath in thermal equilibrium with the rotating molecule, and another one based on non-equilibrium dynamics of the superfluid, governed by the wave of second sound. We note that such non-equilibrium response of He\,II{} to the sudden injection of energy by an intense ultrashort laser pulse has recently been observed in the ultrafast dynamics of rotons \cite{Milner2023a}. Since rotons are predominant collective excitations of the normal component at $T\gtrsim \SI{1}{K}$ \cite{McKinsey2005}, one may also expect the latter to interact with a suddenly initiated rotation of a molecular probe in a non-equilibrium fashion. Work is underway to further investigate the molecule-superfluid interaction under variable temperature and pressure, to better differentiate between the proposed equilibrium and non-equilibrium models.
We also demonstrate the ability to vary the degree of rotational excitation, which offers a method of measuring the anisotropic polarizability of the helium dimer. Finally, new information about the rotational relaxation of \Hetwo{} in He\,II{} may also help improve the methods of LIF-based molecular tagging \cite{McKinsey2005, Rellergert2008, Guo2009} in the studies of the counterflow \cite{Guo2010} and quantum turbulence \cite{Zmeev2013, Guo2014, Gao2015} in superfluids, as well as open new avenues for studying the microscopic implications of superfluidity with molecular nano-probes.
\begin{figure*}
\caption{Scheme of the experimental setup. Pump (lower green), kick (upper red) and probe (middle blue) pulses (all at $\lambda \approx \SI{800}
\label{fig-Setup}
\end{figure*}
\section*{Acknowledgments}
We would like to thank Dr. Jussi Eloranta and Dr. Wei Guo for many helpful discussions.
\begin{thebibliography}{50}
\makeatletter
\providecommand \@ifxundefined [1]{
\@ifx{#1\undefined}
}
\providecommand \@ifnum [1]{
\ifnum #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi
}
\providecommand \@ifx [1]{
\ifx #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi
}
\providecommand \natexlab [1]{#1}
\providecommand \enquote [1]{``#1''}
\providecommand \bibnamefont [1]{#1}
\providecommand \bibfnamefont [1]{#1}
\providecommand \citenamefont [1]{#1}
\providecommand \href@noop [0]{\@secondoftwo}
\providecommand \href [0]{\begingroup \@sanitize@url \@href}
\providecommand \@href[1]{\@@startlink{#1}\@@href}
\providecommand \@@href[1]{\endgroup#1\@@endlink}
\providecommand \@sanitize@url [0]{\catcode `\\12\catcode `\$12\catcode
`\&12\catcode `\#12\catcode `\^12\catcode `\_12\catcode `\%12\relax}
\providecommand \@@startlink[1]{}
\providecommand \@@endlink[0]{}
\providecommand \url [0]{\begingroup\@sanitize@url \@url }
\providecommand \@url [1]{\endgroup\@href {#1}{\urlprefix }}
\providecommand \urlprefix [0]{URL }
\providecommand {\cal E}print [0]{\href }
\providecommand \doibase [0]{https://doi.org/}
\providecommand \selectlanguage [0]{\@gobble}
\providecommand \bibinfo [0]{\@secondoftwo}
\providecommand \bibfield [0]{\@secondoftwo}
\providecommand \translation [1]{[#1]}
\providecommand \BibitemOpen [0]{}
\providecommand \bibitemStop [0]{}
\providecommand \bibitemNoStop [0]{.{\cal E}OS\space}
\providecommand {\cal E}OS [0]{\spacefactor3000\relax}
\providecommand \BibitemShut [1]{\csname bibitem#1\endcsname}
\let\auto@bib@innerbib\@empty
\bibitem [{\citenamefont {Glyde}(2017)}]{Glyde2017}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.~R.}\ \bibnamefont
{Glyde}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Rep.
Prog. Phys.}\ }\textbf {\bibinfo {volume} {81}},\ \bibinfo {pages} {014501}
(\bibinfo {year} {2017})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Toennies}\ and\ \citenamefont
{Vilesov}(1998)}]{Toennies1998}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~P.}\ \bibnamefont
{Toennies}}\ and\ \bibinfo {author} {\bibfnamefont {A.~F.}\ \bibnamefont
{Vilesov}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Annual Review of Physical Chemistry}\ }\textbf {\bibinfo {volume} {49}},\
\bibinfo {pages} {1} (\bibinfo {year} {1998})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Toennies}\ and\ \citenamefont
{Vilesov}(2004)}]{Toennies2004}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~P.}\ \bibnamefont
{Toennies}}\ and\ \bibinfo {author} {\bibfnamefont {A.~F.}\ \bibnamefont
{Vilesov}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Angewandte Chemie International Edition}\ }\textbf {\bibinfo {volume}
{43}},\ \bibinfo {pages} {2622} (\bibinfo {year} {2004})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Stienkemeier}\ and\ \citenamefont
{Lehmann}(2006)}]{Stienkemeier2006}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {F.}~\bibnamefont
{Stienkemeier}}\ and\ \bibinfo {author} {\bibfnamefont {K.~K.}\ \bibnamefont
{Lehmann}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Journal of Physics B: Atomic, Molecular and Optical Physics}\ }\textbf
{\bibinfo {volume} {39}},\ \bibinfo {pages} {R127} (\bibinfo {year}
{2006})}\BibitemShut {NoStop}
\bibitem [{Sle(2022)}]{Slenczka2022}
\BibitemOpen
\href@noop {} {\emph {\bibinfo {title} {Molecules in Superfluid Helium
Nanodroplets: Spectroscopy, Structure, and Dynamics}}},\ Topics in Applied
Physics\ (\bibinfo {publisher} {Springer},\ \bibinfo {address} {Cham},\
\bibinfo {year} {2022})\BibitemShut {NoStop}
\bibitem [{\citenamefont {Grebenev}\ \emph {et~al.}(1998)\citenamefont
{Grebenev}, \citenamefont {Toennies},\ and\ \citenamefont
{Vilesov}}]{Grebenev1998}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Grebenev}}, \bibinfo {author} {\bibfnamefont {J.~P.}\ \bibnamefont
{Toennies}},\ and\ \bibinfo {author} {\bibfnamefont {A.~F.}\ \bibnamefont
{Vilesov}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Science}\ }\textbf {\bibinfo {volume} {279}},\ \bibinfo {pages} {2083}
(\bibinfo {year} {1998})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Choi}\ \emph {et~al.}(2006)\citenamefont {Choi},
\citenamefont {Douberly}, \citenamefont {Falconer}, \citenamefont {Lewis},
\citenamefont {Lindsay}, \citenamefont {Merritt}, \citenamefont {Stiles},\
and\ \citenamefont {Miller}}]{Choi2006}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~Y.}\ \bibnamefont
{Choi}}, \bibinfo {author} {\bibfnamefont {G.~E.}\ \bibnamefont {Douberly}},
\bibinfo {author} {\bibfnamefont {T.~M.}\ \bibnamefont {Falconer}}, \bibinfo
{author} {\bibfnamefont {W.~K.}\ \bibnamefont {Lewis}}, \bibinfo {author}
{\bibfnamefont {C.~M.}\ \bibnamefont {Lindsay}}, \bibinfo {author}
{\bibfnamefont {J.~M.}\ \bibnamefont {Merritt}}, \bibinfo {author}
{\bibfnamefont {P.~L.}\ \bibnamefont {Stiles}},\ and\ \bibinfo {author}
{\bibfnamefont {R.~E.}\ \bibnamefont {Miller}},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {International Reviews in Physical Chemistry}\
}\textbf {\bibinfo {volume} {25}},\ \bibinfo {pages} {15} (\bibinfo {year}
{2006})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Lehnig}\ \emph {et~al.}(2009)\citenamefont {Lehnig},
\citenamefont {Raston},\ and\ \citenamefont {J\"{a}ger}}]{Lehnig2009}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Lehnig}}, \bibinfo {author} {\bibfnamefont {P.~L.}\ \bibnamefont {Raston}},\
and\ \bibinfo {author} {\bibfnamefont {W.}~\bibnamefont {J\"{a}ger}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Faraday Discussions}\ }\textbf {\bibinfo {volume} {142}},\ \bibinfo {pages} {297}
(\bibinfo {year} {2009})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Nielsen}\ \emph {et~al.}(2022)\citenamefont
{Nielsen}, \citenamefont {Pentlehner}, \citenamefont {Christiansen},
\citenamefont {Shepperson}, \citenamefont {S{\o}ndergaard}, \citenamefont
{Chatterley}, \citenamefont {Pickering}, \citenamefont {Schouder},
\citenamefont {Mu\~{n}oz}, \citenamefont {Kranabetter},\ and\ \citenamefont
{Stapelfeldt}}]{Nielsen2022}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~H.}\ \bibnamefont
{Nielsen}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Pentlehner}},
\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Christiansen}}, \bibinfo
{author} {\bibfnamefont {B.}~\bibnamefont {Shepperson}}, \bibinfo {author}
{\bibfnamefont {A.~A.}\ \bibnamefont {S{\o}ndergaard}}, \bibinfo {author}
{\bibfnamefont {A.~S.}\ \bibnamefont {Chatterley}}, \bibinfo {author}
{\bibfnamefont {J.~D.}\ \bibnamefont {Pickering}}, \bibinfo {author}
{\bibfnamefont {C.~A.}\ \bibnamefont {Schouder}}, \bibinfo {author}
{\bibfnamefont {A.~V.}\ \bibnamefont {Mu\~{n}oz}}, \bibinfo {author}
{\bibfnamefont {L.}~\bibnamefont {Kranabetter}},\ and\ \bibinfo {author}
{\bibfnamefont {H.}~\bibnamefont {Stapelfeldt}},\ }in\ \href@noop {} {\emph
{\bibinfo {booktitle} {Molecules in Superfluid Helium Nanodroplets:
Spectroscopy, Structure, and Dynamics}}},\ \bibinfo {editor} {edited by\
\bibinfo {editor} {\bibfnamefont {A.}~\bibnamefont {Slenczka}}\ and\ \bibinfo
{editor} {\bibfnamefont {J.~P.}\ \bibnamefont {Toennies}}}\ (\bibinfo
{publisher} {Springer International Publishing},\ \bibinfo {address} {Cham},\
\bibinfo {year} {2022})\ pp.\ \bibinfo {pages} {381--445}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Shepperson}\ \emph {et~al.}(2017)\citenamefont
{Shepperson}, \citenamefont {S{\o}ndergaard}, \citenamefont {Christiansen},
\citenamefont {Kaczmarczyk}, \citenamefont {Zillich}, \citenamefont
{Lemeshko},\ and\ \citenamefont {Stapelfeldt}}]{Shepperson2017}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont
{Shepperson}}, \bibinfo {author} {\bibfnamefont {A.~A.}\ \bibnamefont
{S{\o}ndergaard}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont
{Christiansen}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Kaczmarczyk}}, \bibinfo {author} {\bibfnamefont {R.~E.}\ \bibnamefont
{Zillich}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Lemeshko}},\
and\ \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Stapelfeldt}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\
}\textbf {\bibinfo {volume} {118}},\ \bibinfo {pages} {203203} (\bibinfo
{year} {2017})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Chatterley}\ \emph {et~al.}(2020)\citenamefont
{Chatterley}, \citenamefont {Christiansen}, \citenamefont {Schouder},
\citenamefont {J{\o}rgensen}, \citenamefont {Shepperson}, \citenamefont
{Cherepanov}, \citenamefont {Bighin}, \citenamefont {Zillich}, \citenamefont
{Lemeshko},\ and\ \citenamefont {Stapelfeldt}}]{Chatterley2020}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~S.}\ \bibnamefont
{Chatterley}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont
{Christiansen}}, \bibinfo {author} {\bibfnamefont {C.~A.}\ \bibnamefont
{Schouder}}, \bibinfo {author} {\bibfnamefont {A.~V.}\ \bibnamefont
{J{\o}rgensen}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont
{Shepperson}}, \bibinfo {author} {\bibfnamefont {I.~N.}\ \bibnamefont
{Cherepanov}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Bighin}},
\bibinfo {author} {\bibfnamefont {R.~E.}\ \bibnamefont {Zillich}}, \bibinfo
{author} {\bibfnamefont {M.}~\bibnamefont {Lemeshko}},\ and\ \bibinfo
{author} {\bibfnamefont {H.}~\bibnamefont {Stapelfeldt}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf
{\bibinfo {volume} {125}},\ \bibinfo {pages} {013001} (\bibinfo {year}
{2020})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Cherepanov}\ \emph {et~al.}(2021)\citenamefont
{Cherepanov}, \citenamefont {Bighin}, \citenamefont {Schouder}, \citenamefont
{Chatterley}, \citenamefont {Albrechtsen}, \citenamefont {Muñoz},
\citenamefont {Christiansen}, \citenamefont {Stapelfeldt},\ and\
\citenamefont {Lemeshko}}]{Cherepanov2021}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {I.~N.}\ \bibnamefont
{Cherepanov}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Bighin}},
\bibinfo {author} {\bibfnamefont {C.~A.}\ \bibnamefont {Schouder}}, \bibinfo
{author} {\bibfnamefont {A.~S.}\ \bibnamefont {Chatterley}}, \bibinfo
{author} {\bibfnamefont {S.~H.}\ \bibnamefont {Albrechtsen}}, \bibinfo
{author} {\bibfnamefont {A.~V.}\ \bibnamefont {Mu\~{n}oz}}, \bibinfo {author}
{\bibfnamefont {L.}~\bibnamefont {Christiansen}}, \bibinfo {author}
{\bibfnamefont {H.}~\bibnamefont {Stapelfeldt}},\ and\ \bibinfo {author}
{\bibfnamefont {M.}~\bibnamefont {Lemeshko}},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume}
{104}},\ \bibinfo {pages} {L061303} (\bibinfo {year} {2021})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Qiang}\ \emph {et~al.}(2022)\citenamefont {Qiang},
\citenamefont {Zhou}, \citenamefont {Lu}, \citenamefont {Lin}, \citenamefont
{Ma}, \citenamefont {Pan}, \citenamefont {Lu}, \citenamefont {Jiang},
\citenamefont {Sun}, \citenamefont {Zhang}, \citenamefont {Li}, \citenamefont
{Gong}, \citenamefont {Averbukh}, \citenamefont {Prior}, \citenamefont
{Schouder}, \citenamefont {Stapelfeldt}, \citenamefont {Cherepanov},
\citenamefont {Lemeshko}, \citenamefont {J\"{a}ger},\ and\ \citenamefont
{Wu}}]{Qiang2022}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Qiang}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Zhou}},
\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Lu}}, \bibinfo {author}
{\bibfnamefont {K.}~\bibnamefont {Lin}}, \bibinfo {author} {\bibfnamefont
{Y.}~\bibnamefont {Ma}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Pan}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Lu}}, \bibinfo
{author} {\bibfnamefont {W.}~\bibnamefont {Jiang}}, \bibinfo {author}
{\bibfnamefont {F.}~\bibnamefont {Sun}}, \bibinfo {author} {\bibfnamefont
{W.}~\bibnamefont {Zhang}}, \bibinfo {author} {\bibfnamefont
{H.}~\bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont
{Gong}}, \bibinfo {author} {\bibfnamefont {I.~S.}\ \bibnamefont {Averbukh}},
\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Prior}}, \bibinfo
{author} {\bibfnamefont {C.~A.}\ \bibnamefont {Schouder}}, \bibinfo {author}
{\bibfnamefont {H.}~\bibnamefont {Stapelfeldt}}, \bibinfo {author}
{\bibfnamefont {I.~N.}\ \bibnamefont {Cherepanov}}, \bibinfo {author}
{\bibfnamefont {M.}~\bibnamefont {Lemeshko}}, \bibinfo {author}
{\bibfnamefont {W.}~\bibnamefont {J\"{a}ger}},\ and\ \bibinfo {author}
{\bibfnamefont {J.}~\bibnamefont {Wu}},\ }\href@noop {} {\bibfield {journal}
{\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {128}},\
\bibinfo {pages} {243201} (\bibinfo {year} {2022})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Surko}\ and\ \citenamefont {Reif}(1968)}]{Surko1968}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {C.~M.}\ \bibnamefont
{Surko}}\ and\ \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Reif}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev.}\
}\textbf {\bibinfo {volume} {175}},\ \bibinfo {pages} {229} (\bibinfo {year}
{1968})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Dennis}\ \emph {et~al.}(1969)\citenamefont {Dennis},
\citenamefont {Durbin}, \citenamefont {Fitzsimmons}, \citenamefont {Heybey},\
and\ \citenamefont {Walters}}]{Dennis1969}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {W.~S.}\ \bibnamefont
{Dennis}}, \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Durbin}},
\bibinfo {author} {\bibfnamefont {W.~A.}\ \bibnamefont {Fitzsimmons}},
\bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Heybey}},\ and\ \bibinfo
{author} {\bibfnamefont {G.~K.}\ \bibnamefont {Walters}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf
{\bibinfo {volume} {23}},\ \bibinfo {pages} {1083} (\bibinfo {year}
{1969})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Hill}\ \emph {et~al.}(1971)\citenamefont {Hill},
\citenamefont {Heybey},\ and\ \citenamefont {Walters}}]{Hill1971}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~C.}\ \bibnamefont
{Hill}}, \bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Heybey}},\ and\
\bibinfo {author} {\bibfnamefont {G.~K.}\ \bibnamefont {Walters}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\
}\textbf {\bibinfo {volume} {26}},\ \bibinfo {pages} {1213} (\bibinfo {year}
{1971})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Keto}\ \emph {et~al.}(1974)\citenamefont {Keto},
\citenamefont {Soley}, \citenamefont {Stockton},\ and\ \citenamefont
{Fitzsimmons}}]{Keto1974}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~W.}\ \bibnamefont
{Keto}}, \bibinfo {author} {\bibfnamefont {F.~J.}\ \bibnamefont {Soley}},
\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Stockton}},\ and\
\bibinfo {author} {\bibfnamefont {W.~A.}\ \bibnamefont {Fitzsimmons}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\
}\textbf {\bibinfo {volume} {10}},\ \bibinfo {pages} {872} (\bibinfo {year}
{1974})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Benderskii}\ \emph {et~al.}(1999)\citenamefont
{Benderskii}, \citenamefont {Zadoyan}, \citenamefont {Schwentner},\ and\
\citenamefont {Apkarian}}]{Benderskii1999}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~V.}\ \bibnamefont
{Benderskii}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Zadoyan}},
\bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Schwentner}},\ and\
\bibinfo {author} {\bibfnamefont {V.~A.}\ \bibnamefont {Apkarian}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {J. Chem. Phys.}\
}\textbf {\bibinfo {volume} {110}},\ \bibinfo {pages} {1542} (\bibinfo {year}
{1999})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {McKinsey}\ \emph {et~al.}(2003)\citenamefont
{McKinsey}, \citenamefont {Brome}, \citenamefont {Dzhosyuk}, \citenamefont
{Golub}, \citenamefont {Habicht}, \citenamefont {Huffman}, \citenamefont
{Korobkina}, \citenamefont {Lamoreaux}, \citenamefont {Mattoni},
\citenamefont {Thompson}, \citenamefont {Yang},\ and\ \citenamefont
{Doyle}}]{McKinsey2003}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.~N.}\ \bibnamefont
{McKinsey}}, \bibinfo {author} {\bibfnamefont {C.~R.}\ \bibnamefont {Brome}},
\bibinfo {author} {\bibfnamefont {S.~N.}\ \bibnamefont {Dzhosyuk}}, \bibinfo
{author} {\bibfnamefont {R.}~\bibnamefont {Golub}}, \bibinfo {author}
{\bibfnamefont {K.}~\bibnamefont {Habicht}}, \bibinfo {author} {\bibfnamefont
{P.~R.}\ \bibnamefont {Huffman}}, \bibinfo {author} {\bibfnamefont
{E.}~\bibnamefont {Korobkina}}, \bibinfo {author} {\bibfnamefont {S.~K.}\
\bibnamefont {Lamoreaux}}, \bibinfo {author} {\bibfnamefont {C.~E.~H.}\
\bibnamefont {Mattoni}}, \bibinfo {author} {\bibfnamefont {A.~K.}\
\bibnamefont {Thompson}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont
{Yang}},\ and\ \bibinfo {author} {\bibfnamefont {J.~M.}\ \bibnamefont
{Doyle}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. A}\ }\textbf {\bibinfo {volume} {67}},\ \bibinfo {pages} {062716}
(\bibinfo {year} {2003})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Eloranta}\ and\ \citenamefont
{Apkarian}(2001)}]{Eloranta2001}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Eloranta}}\ and\ \bibinfo {author} {\bibfnamefont {V.~A.}\ \bibnamefont
{Apkarian}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {J.
Chem. Phys.}\ }\textbf {\bibinfo {volume} {115}},\ \bibinfo {pages} {752}
(\bibinfo {year} {2001})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Eloranta}\ \emph {et~al.}(2002)\citenamefont
{Eloranta}, \citenamefont {Schwentner},\ and\ \citenamefont
{Apkarian}}]{Eloranta2002}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Eloranta}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont
{Schwentner}},\ and\ \bibinfo {author} {\bibfnamefont {V.~A.}\ \bibnamefont
{Apkarian}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {J.
Chem. Phys.}\ }\textbf {\bibinfo {volume} {116}},\ \bibinfo {pages} {4039}
(\bibinfo {year} {2002})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Benderskii}\ \emph {et~al.}(2002)\citenamefont
{Benderskii}, \citenamefont {Eloranta}, \citenamefont {Zadoyan},\ and\
\citenamefont {Apkarian}}]{Benderskii2002}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~V.}\ \bibnamefont
{Benderskii}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Eloranta}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Zadoyan}},\
and\ \bibinfo {author} {\bibfnamefont {V.~A.}\ \bibnamefont {Apkarian}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {J. Chem. Phys.}\
}\textbf {\bibinfo {volume} {117}},\ \bibinfo {pages} {1201} (\bibinfo {year}
{2002})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Eltsov}\ \emph {et~al.}(1995)\citenamefont {Eltsov},
\citenamefont {Parshin},\ and\ \citenamefont {Todoshchenko}}]{Eltsov1995}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {V.~B.}\ \bibnamefont
{Eltsov}}, \bibinfo {author} {\bibfnamefont {A.~Y.}\ \bibnamefont
{Parshin}},\ and\ \bibinfo {author} {\bibfnamefont {I.~A.}\ \bibnamefont
{Todoshchenko}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{JETP}\ }\textbf {\bibinfo {volume} {81}},\ \bibinfo {pages} {909} (\bibinfo
{year} {1995})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Rellergert}\ \emph {et~al.}(2008)\citenamefont
{Rellergert}, \citenamefont {Cahn}, \citenamefont {Garvan}, \citenamefont
{Hanson}, \citenamefont {Lippincott}, \citenamefont {Nikkel},\ and\
\citenamefont {McKinsey}}]{Rellergert2008}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {W.~G.}\ \bibnamefont
{Rellergert}}, \bibinfo {author} {\bibfnamefont {S.~B.}\ \bibnamefont
{Cahn}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Garvan}},
\bibinfo {author} {\bibfnamefont {J.~C.}\ \bibnamefont {Hanson}}, \bibinfo
{author} {\bibfnamefont {W.~H.}\ \bibnamefont {Lippincott}}, \bibinfo
{author} {\bibfnamefont {J.~A.}\ \bibnamefont {Nikkel}},\ and\ \bibinfo
{author} {\bibfnamefont {D.~N.}\ \bibnamefont {McKinsey}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf
{\bibinfo {volume} {100}},\ \bibinfo {pages} {025301} (\bibinfo {year}
{2008})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Guo}\ and\ \citenamefont {Golov}(2020)}]{Guo2020}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {W.}~\bibnamefont
{Guo}}\ and\ \bibinfo {author} {\bibfnamefont {A.~I.}\ \bibnamefont
{Golov}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. B}\ }\textbf {\bibinfo {volume} {101}},\ \bibinfo {pages} {064515}
(\bibinfo {year} {2020})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Eltsov}\ \emph {et~al.}(1998)\citenamefont {Eltsov},
\citenamefont {Dzhosyuk}, \citenamefont {Parshin},\ and\ \citenamefont
{Todoshchenko}}]{Eltsov1998}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {V.~B.}\ \bibnamefont
{Eltsov}}, \bibinfo {author} {\bibfnamefont {S.~N.}\ \bibnamefont
{Dzhosyuk}}, \bibinfo {author} {\bibfnamefont {A.~Y.}\ \bibnamefont
{Parshin}},\ and\ \bibinfo {author} {\bibfnamefont {I.~A.}\ \bibnamefont
{Todoshchenko}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{J. Low Temp. Phys.}\ }\textbf {\bibinfo {volume} {110}},\ \bibinfo {pages}
{219} (\bibinfo {year} {1998})}\BibitemShut {NoStop}
\bibitem [{Sup()}]{Supplement}
\BibitemOpen
\href@noop {} {\bibinfo {journal} {Supplementary material}\ }\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Guo}\ \emph {et~al.}(2014)\citenamefont {Guo},
\citenamefont {La~Mantia}, \citenamefont {Lathrop},\ and\ \citenamefont
{Van~Sciver}}]{Guo2014}
\BibitemOpen
\bibfield {journal} { }\bibfield {author} {\bibinfo {author} {\bibfnamefont
{W.}~\bibnamefont {Guo}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{La~Mantia}}, \bibinfo {author} {\bibfnamefont {D.~P.}\ \bibnamefont
{Lathrop}},\ and\ \bibinfo {author} {\bibfnamefont {S.~W.}\ \bibnamefont
{Van~Sciver}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{PNAS}\ }\textbf {\bibinfo {volume} {111}},\ \bibinfo {pages} {4653}
(\bibinfo {year} {2014})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Gao}\ \emph {et~al.}(2015)\citenamefont {Gao},
\citenamefont {Marakov}, \citenamefont {Guo}, \citenamefont {Pawlowski},
\citenamefont {Van~Sciver}, \citenamefont {Ihas}, \citenamefont {McKinsey},\
and\ \citenamefont {Vinen}}]{Gao2015}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Gao}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Marakov}},
\bibinfo {author} {\bibfnamefont {W.}~\bibnamefont {Guo}}, \bibinfo {author}
{\bibfnamefont {B.~T.}\ \bibnamefont {Pawlowski}}, \bibinfo {author}
{\bibfnamefont {S.~W.}\ \bibnamefont {Van~Sciver}}, \bibinfo {author}
{\bibfnamefont {G.~G.}\ \bibnamefont {Ihas}}, \bibinfo {author}
{\bibfnamefont {D.~N.}\ \bibnamefont {McKinsey}},\ and\ \bibinfo {author}
{\bibfnamefont {W.~F.}\ \bibnamefont {Vinen}},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Rev. Sci. Instrum.}\ }\textbf {\bibinfo
{volume} {86}},\ \bibinfo {pages} {093904} (\bibinfo {year}
{2015})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Huber}\ and\ \citenamefont {Herzberg}(2012)}]{NIST}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont
{Huber}}\ and\ \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Herzberg}},\ }\href@noop {} {\emph {\bibinfo {title} {NIST Chemistry
WebBook, NIST Standard Reference Database}}},\ \bibinfo {series} {Constants
of Diatomic Molecules}, Vol.~\bibinfo {volume} {69}\ (\bibinfo {publisher}
{National Institute of Standards and Technology},\ \bibinfo {address}
{Gaithersburg MD},\ \bibinfo {year} {2012})\BibitemShut {NoStop}
\bibitem [{\citenamefont {Keto}\ \emph {et~al.}(1972)\citenamefont {Keto},
\citenamefont {Stockton},\ and\ \citenamefont {Fitzsimmons}}]{Keto1972}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~W.}\ \bibnamefont
{Keto}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Stockton}},\
and\ \bibinfo {author} {\bibfnamefont {W.~A.}\ \bibnamefont {Fitzsimmons}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\
}\textbf {\bibinfo {volume} {28}},\ \bibinfo {pages} {792} (\bibinfo {year}
{1972})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Focsa}\ \emph {et~al.}(1998)\citenamefont {Focsa},
\citenamefont {Bernath},\ and\ \citenamefont {Colin}}]{Focsa1998}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont
{Focsa}}, \bibinfo {author} {\bibfnamefont {P.~F.}\ \bibnamefont {Bernath}},\
and\ \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Colin}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {J. Mol.
Spectrosc.}\ }\textbf {\bibinfo {volume} {191}},\ \bibinfo {pages} {209}
(\bibinfo {year} {1998})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Semeria}\ \emph {et~al.}(2018)\citenamefont
{Semeria}, \citenamefont {Jansen}, \citenamefont {Clausen}, \citenamefont
{Agner}, \citenamefont {Schmutz},\ and\ \citenamefont {Merkt}}]{Semeria2018}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont
{Semeria}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Jansen}},
\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Clausen}}, \bibinfo
{author} {\bibfnamefont {J.~A.}\ \bibnamefont {Agner}}, \bibinfo {author}
{\bibfnamefont {H.}~\bibnamefont {Schmutz}},\ and\ \bibinfo {author}
{\bibfnamefont {F.}~\bibnamefont {Merkt}},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume}
{98}},\ \bibinfo {pages} {062518} (\bibinfo {year} {2018})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Lichten}\ \emph {et~al.}(1974)\citenamefont
{Lichten}, \citenamefont {McCusker},\ and\ \citenamefont
{Vierima}}]{Lichten1974}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {W.}~\bibnamefont
{Lichten}}, \bibinfo {author} {\bibfnamefont {M.~V.}\ \bibnamefont
{McCusker}},\ and\ \bibinfo {author} {\bibfnamefont {T.~L.}\ \bibnamefont
{Vierima}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {J.
Chem. Phys.}\ }\textbf {\bibinfo {volume} {61}},\ \bibinfo {pages} {2200}
(\bibinfo {year} {1974})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {McKinsey}\ \emph {et~al.}(2005)\citenamefont
{McKinsey}, \citenamefont {Lippincott}, \citenamefont {Nikkel},\ and\
\citenamefont {Rellergert}}]{McKinsey2005}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.~N.}\ \bibnamefont
{McKinsey}}, \bibinfo {author} {\bibfnamefont {W.~H.}\ \bibnamefont
{Lippincott}}, \bibinfo {author} {\bibfnamefont {J.~A.}\ \bibnamefont
{Nikkel}},\ and\ \bibinfo {author} {\bibfnamefont {W.~G.}\ \bibnamefont
{Rellergert}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {95}},\ \bibinfo {pages}
{111101} (\bibinfo {year} {2005})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Donnelly}(2009)}]{Donnelly2009}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.~J.}\ \bibnamefont
{Donnelly}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Phys. Today}\ }\textbf {\bibinfo {volume} {62}},\ \bibinfo {pages} {34}
(\bibinfo {year} {2009})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Milner}\ \emph {et~al.}(2023)\citenamefont {Milner},
\citenamefont {Stamp},\ and\ \citenamefont {Milner}}]{Milner2023a}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Milner}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Stamp}},\ and\
\bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Milner}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {PNAS, in press}\ } (\bibinfo
{year} {2023})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Guo}\ \emph {et~al.}(2009)\citenamefont {Guo},
\citenamefont {Wright}, \citenamefont {Cahn}, \citenamefont {Nikkel},\ and\
\citenamefont {McKinsey}}]{Guo2009}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {W.}~\bibnamefont
{Guo}}, \bibinfo {author} {\bibfnamefont {J.~D.}\ \bibnamefont {Wright}},
\bibinfo {author} {\bibfnamefont {S.~B.}\ \bibnamefont {Cahn}}, \bibinfo
{author} {\bibfnamefont {J.~A.}\ \bibnamefont {Nikkel}},\ and\ \bibinfo
{author} {\bibfnamefont {D.~N.}\ \bibnamefont {McKinsey}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf
{\bibinfo {volume} {102}},\ \bibinfo {pages} {235301} (\bibinfo {year}
{2009})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Guo}\ \emph {et~al.}(2010)\citenamefont {Guo},
\citenamefont {Cahn}, \citenamefont {Nikkel}, \citenamefont {Vinen},\ and\
\citenamefont {McKinsey}}]{Guo2010}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {W.}~\bibnamefont
{Guo}}, \bibinfo {author} {\bibfnamefont {S.~B.}\ \bibnamefont {Cahn}},
\bibinfo {author} {\bibfnamefont {J.~A.}\ \bibnamefont {Nikkel}}, \bibinfo
{author} {\bibfnamefont {W.~F.}\ \bibnamefont {Vinen}},\ and\ \bibinfo
{author} {\bibfnamefont {D.~N.}\ \bibnamefont {McKinsey}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf
{\bibinfo {volume} {105}},\ \bibinfo {pages} {045301} (\bibinfo {year}
{2010})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Zmeev}\ \emph {et~al.}(2013)\citenamefont {Zmeev},
\citenamefont {Pakpour}, \citenamefont {Walmsley}, \citenamefont {Golov},
\citenamefont {Guo}, \citenamefont {McKinsey}, \citenamefont {Ihas},
\citenamefont {McClintock}, \citenamefont {Fisher},\ and\ \citenamefont
{Vinen}}]{Zmeev2013}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.~E.}\ \bibnamefont
{Zmeev}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Pakpour}},
\bibinfo {author} {\bibfnamefont {P.~M.}\ \bibnamefont {Walmsley}}, \bibinfo
{author} {\bibfnamefont {A.~I.}\ \bibnamefont {Golov}}, \bibinfo {author}
{\bibfnamefont {W.}~\bibnamefont {Guo}}, \bibinfo {author} {\bibfnamefont
{D.~N.}\ \bibnamefont {McKinsey}}, \bibinfo {author} {\bibfnamefont {G.~G.}\
\bibnamefont {Ihas}}, \bibinfo {author} {\bibfnamefont {P.~V.~E.}\
\bibnamefont {McClintock}}, \bibinfo {author} {\bibfnamefont {S.~N.}\
\bibnamefont {Fisher}},\ and\ \bibinfo {author} {\bibfnamefont {W.~F.}\
\bibnamefont {Vinen}},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {110}},\ \bibinfo
{pages} {175303} (\bibinfo {year} {2013})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Renard}\ \emph {et~al.}(2003)\citenamefont {Renard},
\citenamefont {Renard}, \citenamefont {Gu\'{e}rin}, \citenamefont {Pashayan},
\citenamefont {Lavorel}, \citenamefont {Faucher},\ and\ \citenamefont
{Jauslin}}]{Renard2003}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {V.}~\bibnamefont
{Renard}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Renard}},
\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Gu\'{e}rin}}, \bibinfo
{author} {\bibfnamefont {Y.~T.}\ \bibnamefont {Pashayan}}, \bibinfo {author}
{\bibfnamefont {B.}~\bibnamefont {Lavorel}}, \bibinfo {author} {\bibfnamefont
{O.}~\bibnamefont {Faucher}},\ and\ \bibinfo {author} {\bibfnamefont {H.~R.}\
\bibnamefont {Jauslin}},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {90}},\ \bibinfo
{pages} {153601} (\bibinfo {year} {2003})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Mendoza-Luna}\ \emph {et~al.}(2016)\citenamefont
{Mendoza-Luna}, \citenamefont {Shiltagh}, \citenamefont {Watkins},
\citenamefont {Bonifaci}, \citenamefont {Aitken},\ and\ \citenamefont {von
Haeften}}]{Mendoza2016}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {L.~G.}\ \bibnamefont
{Mendoza-Luna}}, \bibinfo {author} {\bibfnamefont {N.~M.~K.}\ \bibnamefont
{Shiltagh}}, \bibinfo {author} {\bibfnamefont {M.~J.}\ \bibnamefont
{Watkins}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Bonifaci}},
\bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Aitken}},\ and\ \bibinfo
{author} {\bibfnamefont {K.}~\bibnamefont {von Haeften}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {J. Phys. Chem. Lett.}\ }\textbf
{\bibinfo {volume} {7}},\ \bibinfo {pages} {4666} (\bibinfo {year}
{2016})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Hereford}(1972)}]{Hereford1972}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {F.~L.}\ \bibnamefont
{Hereford}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {29}},\ \bibinfo {pages}
{1722} (\bibinfo {year} {1972})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Andersen}\ \emph {et~al.}(1996)\citenamefont
{Andersen}, \citenamefont {Bossy}, \citenamefont {Cook}, \citenamefont
{Randl},\ and\ \citenamefont {Ragazzoni}}]{Andersen1996}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {K.~H.}\ \bibnamefont
{Andersen}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Bossy}},
\bibinfo {author} {\bibfnamefont {J.~C.}\ \bibnamefont {Cook}}, \bibinfo
{author} {\bibfnamefont {O.~G.}\ \bibnamefont {Randl}},\ and\ \bibinfo
{author} {\bibfnamefont {J.~L.}\ \bibnamefont {Ragazzoni}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf
{\bibinfo {volume} {77}},\ \bibinfo {pages} {4043} (\bibinfo {year}
{1996})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Bedell}\ \emph {et~al.}(1982)\citenamefont {Bedell},
\citenamefont {Pines},\ and\ \citenamefont {Fomin}}]{Bedell1982}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont
{Bedell}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Pines}},\ and\
\bibinfo {author} {\bibfnamefont {I.}~\bibnamefont {Fomin}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {J. Low Temp. Phys.}\ }\textbf
{\bibinfo {volume} {48}},\ \bibinfo {pages} {417} (\bibinfo {year}
{1982})}\BibitemShut {NoStop}
\bibitem [{Note1()}]{Note1}
\BibitemOpen
\bibinfo {note} {This observation is consistent with the earlier study of
Benderskii et. al \cite {Benderskii1999}}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Auzinsh}\ and\ \citenamefont
{Ferber}(1995)}]{Auzinsh1995}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Auzinsh}}\ and\ \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Ferber}},\ }\href@noop {} {\emph {\bibinfo {title} {Optical Polarization of
Molecules}}},\ Cambridge Monographs on Atomic, Molecular and Chemical
Physics\ (\bibinfo {publisher} {Cambridge University Press},\ \bibinfo
{address} {Cambridge},\ \bibinfo {year} {1995})\BibitemShut {NoStop}
\bibitem [{Note2()}]{Note2}
\BibitemOpen
\bibinfo {note} {The approach is similar to the polarization-based studies of
the laser-induced rotation of gas-phase molecules \cite
{Renard2003}}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Flo{\ss}}\ and\ \citenamefont
{Averbukh}(2012)}]{Floss2012}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Flo{\ss}}}\ and\ \bibinfo {author} {\bibfnamefont {I.~{\relax Sh}.}\
\bibnamefont {Averbukh}},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {86}},\ \bibinfo
{pages} {021401} (\bibinfo {year} {2012})}\BibitemShut {NoStop}
\bibitem [{Note3()}]{Note3}
\BibitemOpen
\bibinfo {note} {J.~Eloranta, private communication. Calculated using the
method of Coupled Cluster with Single and Double substitutions, and the basis
set from Ref.~\protect \citenum {Eloranta2001}}\BibitemShut {NoStop}
\end{thebibliography}
\section*{Supplemental Material}
\subsection*{Generation of \Hetwo{} excimers solvated in He\,II{}}
Our experiments are performed in a custom-built helium cryostat (Fig.~\ref{fig-Setup}). By pumping on the helium, the temperature of the liquid can be varied between $\approx\SI{1.4}{K}$ and $\SI{4.2}{K}$, while the pressure above the surface is at the saturated vapor pressure (SVP). Three laser pulses -- pump, kick and probe -- are delivered to the cryostat at the repetition rate of \SI{1}{KHz}, and are focused in LHe with a \SI{250}{mm}-focal length lens. Extracted from the same ultrafast Ti:Sapph laser system, they share the same central wavelength of $\SI{798}{nm}$ and bandwidth of $\SI{30}{nm}$ (full width at half maximum, FWHM), but differ in pulse length, energy, and the time of arrival. The kick-probe pulse pair is delayed from the pump by $\approx\SI{1}{ms}$, whereas the delay within the pair can be scanned up to \SI{1.2}{ns} with fs accuracy.
Pump pulses, stretched to $\approx \SI{2}{ps}$ and carrying $\SI{80}{\mu J}$ per pulse, are used to create helium excimers. Their peak intensity of \SI{4e11}{W/cm^2} is significantly below the breakdown threshold $I_\text{break}\approx\SI{5e13}{W/cm^2}$, determined in previous studies \cite{Benderskii1999, Gao2015}. We note, however, that sub-breakdown intensities do not guarantee the production of the desired ``bubble phase'', i.e. an ensemble of isolated \Hetwo{} molecules, each solvated in the liquid in its own bubble. To illustrate this, Fig.~\ref{fig-LIF}(\textbf{a}) shows the observed fluorescence spectrum corresponding to pump intensities of \SI{1.7e13}{W/cm^2} (lower red curve) and \SI{6.8e12}{W/cm^2} (upper blue curve). Even though both intensities are below $I_\text{break}$, the gas-phase-like narrow rotational lines in the lower trace indicate that the molecules are created in macroscopic \textit{gas pockets} \cite{Mendoza2016}. We found the transition between the bubble and gas phases to occur at $I_\text{gas}\approx \SI{5e12}{W/cm^2}$, which dictated our choice of all pulse intensities well below this threshold value.
\begin{figure}
\caption{(\textbf{a}
\label{fig-LIF}
\end{figure}
To further verify the important aspect of preparing the excimers in the solvated bubble state, we investigated the influence of the LHe temperature on the total LIF signal. The latter is proportional to the time-dependent \Hetwo{} number density $N(t)$. The time dependence is governed by the bimolecular annihilation reaction, $\mathrm{He}_2^* + \mathrm{He}_2^* \rightarrow \mathrm{He}_2^{**} + 2\mathrm{He}$ \cite{Keto1974, Benderskii1999}:
\begin{equation}\label{eq-density}
N(t)=N_0/\left[1+K(T) N_0 t\right].
\end{equation}
The dependence on temperature enters through the reaction rate $K(T)$. In the bubble phase, $K(T)$ is determined by the diffusion of \Hetwo{} molecules in the liquid due to their scattering on thermal rotons \cite{Keto1972, Hereford1972}. As the roton energy $\Delta (T)$ decreases with increasing temperature \cite{Andersen1996}, their equilibrium density grows proportionally to $\sqrt{T} \exp\left[ -\Delta (T)/T \right]$ \cite{Bedell1982}, causing the scattering length and the diffusion coefficient to decrease. Slower diffusion at higher $T$ results in a lower annihilation rate, and correspondingly larger number of excimers at any given time.
Dark dots in panel (\textbf{b}) of Fig.~\ref{fig-LIF} show our measured temperature dependence of the fluorescence intensity, induced by a fs probe pulse (pulse length of \SI{70}{fs} FWHM, intensity of \SI{1.3e12}{W/cm^2}) following the ps pump pulse after a fixed delay of $\approx \SI{1}{ms}$. The red solid line is a fit to Eq.~(\ref{eq-density}), with the known roton energy $\Delta (T)$ from the neutron scattering experiments \cite{Andersen1996}. An excellent fit to the roton-mediated diffusion model confirms the production of \Hetwo{} molecules properly solvated in liquid helium. Note that in the gas phase, one would expect a very different rate of bimolecular decay, owing to the increasing gas pressure, and hence higher annihilation rate, with increasing temperature. From the fit in Fig.~\ref{fig-LIF}(\textbf{b}), the initial molecular number density ($N_{0}$, the only fitting parameter) is \SI{1.9(1)e13}{cm^{-3}}.
\subsection*{Rotational excitation of \Hetwo{} excimers}
To excite the rotation of helium excimers we send the linearly polarized femtosecond kick pulse prior to the probe (upper red in Fig.~\ref{fig-Setup}). Utilizing the \SI{1}{KHz} repetition rate of our laser system, we make the kick-probe pair trail the pump pulse by about \SI{1}{ms}. At that time, the amount of \Hetwo{} molecules which survived the bimolecular reaction is still quite high, yet given the experimentally observed decay of the pump-induced fluorescence on the timescale of a few tens of nanoseconds \footnote{This observation is consistent with the earlier study of Benderskii et. al \cite{Benderskii1999}}, all these molecules have already decayed to their lowest metastable electronic state \Astate{}. On the other hand, the rotational relaxation is expected to occur on the scale of a few milliseconds \cite{Eltsov1998}. As we demonstrate in this work, even after \SI{1}{ms} most of the molecules have also relaxed to the ground rotational state corresponding to the angular momentum (excluding electronic spin) $N=1$ (due to the nuclear spin statistics, only odd values of $N$ are allowed in \Astate{} \cite{Focsa1998}).
\subsection*{Detection of \Hetwo{} rotation}
Our method of detecting molecular rotation is based on the anisotropic absorption cross-section, common to linear molecules \cite{Auzinsh1995}. Two probe photons with a wavelength of \SI{800}{nm} promote the excimer from the ground $a$ to the excited $d$ state \cite{Benderskii2002, Rellergert2008} with an absorption rate dependent on the angle between the molecular axis and the vector of the probe polarization. The difference in the absorption of two orthogonally polarized probe pulses (known as ``linear dichroism'', $LD$) corresponds to the anisotropy of the ensemble-averaged distribution of molecular axes, whereas its time dependence reflects the rotational dynamics of the molecules \footnote{The approach is similar to the polarization-based studies of the laser-induced rotation of gas-phase molecules \cite{Renard2003}}. Since we detect this linear dichroism via the induced fluorescence on the $d\rightarrow b$ transition, we refer to it as \LDlif{}.
We keep the probe polarization constant and modulate the polarization direction of the kick pulses between $0^{\circ}$ and $90^{\circ}$ with a Pockels cell ($PC$ in Fig.~\ref{fig-Setup}). The \LDlif{} signal is defined as:
\begin{equation}\label{eq-LD_LIF}
LD_\textsc{lif}=\frac{I_\textsc{lif}^\parallel - I_\textsc{lif}^\perp}{(I_\textsc{lif}^\parallel + I_\textsc{lif}^\perp)/2},
\end{equation}
where $I_\textsc{lif}^{\parallel,\perp}$ is the fluorescence intensity recorded (by a photo-multiplier tube) with the kick polarization respectively parallel or perpendicular to the fixed probe polarization. We use a Boxcar integrator to gate the fluorescence signal around the arrival time of the kick-probe pulse pair. A lock-in amplifier is employed to retrieve the dichroism from the LIF intensity as the signal component at the polarization modulation frequency $\omega _{m} \approx\SI{200}{Hz}$. To eliminate possible instrumental artifacts due to our detection geometry, we use a half-wave plate ($\lambda /2$ in Fig.~\ref{fig-Setup}) to rotate all polarization vectors by $45^{\circ}$ with respect to the excitation-observation plane. The angle of the plate is fine-tuned to bring the \LDlif{} signal to zero when the probe pulse is blocked. We note that due to the undesired \textit{kick}-induced fluorescence, inadvertently entering the denominator in Eq.~\ref{eq-LD_LIF}, we are currently unable to extract reliable absolute values of \LDlif{}.
\subsection*{Calculating the effect of a fs rotational kick}
Consider a rotational state $\psi_{J,M}(t)=\sum_{J,M} c_{J,M} \exp(-iE_J t) \ket{J,M}$ interacting with a laser field ${\cal E}_\text{kick}(t)$. Here, $J$ and $M$ are the molecular total angular momentum (including electronic spin) and its projection on the vector of kick polarization, whereas $c_{J,M}$ and $E_J$ are the amplitude and energy of the corresponding eigenstate. The interaction potential is given by \cite{Floss2012}
\begin{equation}\label{eq-potential}
V(t)=-\frac{1}{4}\Delta\alpha \cos^2(\theta) {\cal E}_\text{kick}(t),
\end{equation}
where $\Delta \alpha =\SI{35.1}{\AA^3}$ is the difference between the molecular polarizability along and perpendicular to the molecular axis \footnote{J.~Eloranta, private communication. Calculated using the method of Coupled Cluster with Single and Double substitutions, and the basis set from Ref.~\citenum{Eloranta2001}}, and $\theta $ is the angle between the molecular axis and the laser field polarization. We numerically solved the Schr\"{o}dinger equation in the rigid-rotor approximation, assuming that the kick length is much shorter than the period of the molecular rotation. For simplicity, here we also neglect the effect of the electronic spin (which is discussed in the next section) and take $J\equiv N$ and $E_J\equiv E_N$.
Linearly polarized kick field leaves the molecule in a coherent superposition of states with $\Delta N=\pm2$ and $\Delta M=0$. The numerical solution of the Schr\"{o}dinger equation provides us with the complex amplitudes $c_{N,M}$ of those states right after the kick. The observed $LD_{N,N+2}$ signal, oscillating at the frequency $\Delta E_{N,N+2}$, is proportional to the real part of the product $c_{N,M}c^*_{N+2,M}$, summed over all independent $M$ channels.
\subsection*{Spin-rotational/spin-spin splitting of rotational lines}
Consider the first rotational line $LD_{1,3}$, corresponding to the coherent superposition of $N=1$ (split into $J_1=\left\{0,1,2\right\}$) and $N=3$ (split into $J_3=\left\{2,3,4\right\}$), created by the kick pulse. The absorption of two probe photons on the $a\rightarrow d$ transition must obey the selection rule $\Delta J=0,2$. Therefore, there are five $(J_1,J_3=\left\{J_1,J_1\pm2,J_1\pm4\right\})$ pairs producing the $LD$ signal at the approximate frequency $\nu^k_{1,3}\approx\SI{2.27}{THz}$: $(0,2),(0,4),(1,3),(2,2)$ and $(2,4)$. Beating of these five frequencies results in the observed oscillations of $LD_{1,3}$ with a minimum around \SI{500}{ps} (see Fig.~\ref{fig-LD_time}).
\end{document}
|
\begin{document}
\title{P-positions in Modular Extensions to Nim}
\author{Tanya Khovanova \and Karan Sarkar}
\maketitle
\large
\begin{abstract}
In this paper, we consider a modular extension to the game of Nim, which we call $m$-Modular Nim, and explore its optimal strategy. In $m$-Modular Nim, a player can either make a standard Nim move or remove a multiple of $m$ tokens in total. We develop a winning strategy for all $m$ with $2$ heaps and for odd $m$ with any number of heaps.
\end{abstract}
\section{Introduction}
Nim forms the foundation of the mathematical study of two-player strategy games. In his landmark $1901$ paper, \textit{Nim, a game with a complete mathematical theory}, Charles L. Bouton provided a solution to the game of Nim, essentially founding the field of Combinatorial Game Theory \cite{Nim}.
Since Bouton's discovery, many extensions or variants of Nim have been explored. Some variations that come to mind are Wythoff's Game, Poker Nim and Kayles. These variations often yield winning strategies that bare little resemblance to that of Nim \cite{WinningWays}.
Interestingly, very few if any of these variations use moves predicated upon modular congruence. In this paper, we explore a modular extension to Nim, which we call $m$-Modular Nim, in which moves are indeed predicated upon modular congruence are added to the traditional Nim moves.
We start this paper with preliminaries in Section~\ref{prelim}. In Section~\ref{mModularNim}, we introduce the game of Modular Nim which is similar to Nim but in addition to Nim moves it allows players to remove a positive multiple of $m$ tokens total from the position.
Section~\ref{2HeapOdd} considers $2$ heap Modular Nim for odd modular bases. Starting with an example for $m = 3$, we prove that the number of P-positions is finite and equal to $m$ for odd $m$. In Section~\ref{2HeapEven}, we expand our result to even values of $m$ by observing a self-similar structure in the set of P-positions. In Section~\ref{sec:explicit} we describe the P-positions explicitly.
In Section~\ref{sec:manyheaps} we describe P-positions in $m$-Modular Nim for any number of heaps and odd $m$.
\section{Preliminaries}\label{prelim}
We will be investigating the broad field of Combinatorial Game Theory (CGT). Roughly speaking, CGT concerns the study of winning strategies in two-player perfect information games. Our exploration of this large topic begins with some basic yet essential definitions \cite{WinningWays}.
\begin{definition}An \emph{impartial combinatorial game} is a two-player game where each player has both the same moves available at each and every point in the game and a complete set of information about the game and the potential moves. \end{definition}
This implies that no randomness such as rolling dice can exist.
\begin{definition}In \emph{normal play}, the first player unable to move is declared the loser. \end{definition}
\begin{definition} We will call a position a \emph{terminal position} if no moves may be made from it. \end{definition}
In general, impartial combinatorial games are analyzed using the notion of P-positions and N-positions. This system of notation allows for games to be solved from the bottom up.
\begin{definition}A \emph{P-position} is a position from which the \emph{previous} player will win given perfect play. The set of P-positions is denoted as $\mathcal{P}$. \end{definition}
We can observe that all terminal positions are P-positions.
\begin{definition}An \emph{N-position} is a position from which the \emph{next} player will win given perfect play. The set of N-positions is denoted as $\mathcal{N}$.\end{definition}
Any position in the game is either a P-position or an N-position. All the moves from any P-position lead to an N-position. On the other hand, from any N-position, there exists some move to a P-position. These observations motivate the following theorem \cite{LessonsInPlay}.
\begin{theorem}Suppose that the positions of a finite impartial game can be partitioned into disjoint sets $\mathcal{A}$ and $\mathcal{B}$ such that:
\begin{enumerate}
\item Every move from a position in $\mathcal{A}$ is to a position in $\mathcal{B}$.
\item Every move from a position in $\mathcal{B}$ has at least one move to a position in $\mathcal{A}$.
\item All terminal positions are elements of $\mathcal{A}$.
\end{enumerate}
Then $\mathcal{A} = \mathcal{P}$ and $\mathcal{B} = \mathcal{N}$.
\end{theorem}
\subsection{Nim}
Nim is the most fundamental impartial combinatorial game. It is one of the earliest \emph{take-away games} and is testament to the complexity that can arise from simple rules \cite{LessonsInPlay}.
\begin{definition}In the game of \emph{Nim}, each position consists of a set of heaps of tokens. In a move, a player must remove a positive number of tokens from a single heap. \end{definition}
In a Nim-like game, denote positions with $h$ heaps as ordered $h$-tuples. To describe the set of P-positions in Nim, we need to define the bitwise XOR operation.
\begin{definition} The \textit{bitwise XOR} of two numbers is calculated by writing both numbers in binary and adding them without carrying over. We will use the $\oplus$ symbol to denote the bitwise XOR operation.
\end{definition}
The set of P-positions in Nim is well understood and summarized by the following theorem \cite{Nim}.
\begin{theorem}[Bouton's Theorem]In Nim, $(a_1, \ldots, a_n) \in \mathcal{P}$ if and only if \[\bigoplus_{i=1}^n a_i = 0.\] \end{theorem}
\section{$m$-Modular Nim}\label{mModularNim}
We will now introduce a natural extension to Nim which we will subsequently discuss in detail. Our game involves loosening the restrictions on Nim moves with conditions based on modular congruence.
\begin{definition}In the game of \emph{$m$-Modular Nim}, each position consists of a set of heaps of tokens, like in Nim. However, we have two types of moves:
\begin{enumerate}[Type I.]
\item Remove a positive number of tokens from a single heap.
\item Remove $km$ tokens total where $k$ is a positive integer.
\end{enumerate}
\end{definition}
In our analysis of $m$-Modular Nim, we introduce some additional notation and positional functions for convenience.
\begin{definition}Let the \emph{heap-sum} of a position $A$ be the total number of tokens. We denote it as $|A|$. \end{definition}
We introduce a partial order on the set of positions to allow ourselves to speak more concisely about important concepts.
\begin{definition}If $A = \left(a_1, a_2, a_3, \ldots, a_k\right)$ and $B = \left(b_1, b_2, b_3, \ldots, b_k\right)$ are positions in $m$-Modular Nim such that $a_i \geq b_i$ for all integers $1 \leq i \leq k$, we say that $A$ \emph{dominates} $B$. Position $A$ \emph{strictly dominates} $B$ if $A$ dominates $B$ and $A$ is not equal to $B$, that is, there exists $i$ such that $a_i > b_i$. We denote domination as $A \succeq B$ and strict domination as $A \succ B$.
Moreover if all members of set $\mathcal{S}$ dominate all members of set $\mathcal{T}$, we say that $\mathcal{S}$ dominates $\mathcal{T}$ or $\mathcal{S} \succeq \mathcal{T}$. Similarly, if all members of $\mathcal{S}$ strictly dominate all members of $\mathcal{T}$, we say that $\mathcal{S}$ strictly dominates $\mathcal{T}$ or $\mathcal{S} \succ \mathcal{T}$. \end{definition}
If a P-position $A$ dominates a P-position $B$, one might expect that then there exists an optimal game when $A$ occurs as a position in the game before $B$. This is true for a $2$-heap game, but is not true for a game with more heaps.
For example, consider a 3 heap game of $4$-Modular Nim. The P-position $(1,2,2)$ dominates the P-position $(0,1,1)$, but the latter position can not be reached from the former position in any optimal play.
\begin{lemma}\label{DominateTypeII}
A Type II move from position $A$ to position $B$ exists, if and only if $|A| \equiv |B| \pmod{m}$ and $A \succ B$. \end{lemma}
\begin{proof}
If a move from position $A$ to position $B$ exists then $A \succ B$. If in addition, this is a Type II move, the total number of tokens is decreased by a multiple of $m$ implying $|A| \equiv |B| \pmod{m}$.
On the other hand, suppose $|A| \equiv |B| \pmod{m}$ and $A \succ B$. Let $A = (a_1,a_2,\ldots,a_n)$ and $B = (b_1,b_2,\ldots,b_n)$. From the $i^\text{th}$ heap in $A$, take away $a_i - b_i$ tokens. Because $A \succ B$, we have that $a_i - b_i \geq 0$ implying that the move is well-defined. Moreover, the total number of tokens removed must be divisible by $m$ as $|A| \equiv |B| \pmod{m}$.
\end{proof}
\section{$2$ Heap $m$-Modular Nim for Odd $m$}\label{2HeapOdd}
Rather than dealing with any number of heaps, we will start with $2$ heaps and odd $m$.
\subsection{An Example: $m=3$.}
\begin{example}[$m = 3$]
The P-positions of $3$-Modular Nim are the ordered pairs:
\begin{center}
$(0,0),$ \\
$(1,1),$ \\
$(2,2).$
\end{center}
Let the specified set be $\mathcal{S}$. We can manually verify that no members of $\mathcal{S}$ are connected by a legal move. Thus, it suffices to show that a move from any position $(a,b) \notin \mathcal{S}$ to an element of $\mathcal{S}$ exists.
\begin{enumerate}
\item Suppose that $\min(a,b) < 3$, then a Type I move must exist.
\item On the other hand, if $\min(a,b) \geq 3$, then $(a,b) \succ \mathcal{S}$. By Lemma \ref{DominateTypeII}, a Type II move must exist because all residue classes modulo $3$ are covered by $\mathcal{S}$.
\end{enumerate}
\end{example}
Figure~\ref{2HeapMod3} displays the P-positions in 3-Modular Nim on a coordinate grid.
\begin{figure}
\caption{P-positions in $3$-Modular Nim.}
\label{2HeapMod3}
\end{figure}
These patterns of P-positions for $m = 3$ suggests a similar structure exists for other integers. This motivates a generalization to all odd integers.
\begin{theorem}\label{Odd2HeapPPosition}For odd $m$, a position of $m$-Modular Nim with $2$ heaps is a P-position if and only it is of the form $(i,i)$ for integers $i$, where $0 \leq i < m$. \end{theorem}
\begin{proof}
Let the set of specified positions be $\mathcal{S}$. We first prove that no move exists between any of the specified positions. Because no distinct positions have a heap size in common, no Type I move exists. Suppose that a Type II move exists between distinct positions $(i,i)$ and $(j,j)$ where $0 \leq i,j < m$. By Lemma~\ref{DominateTypeII}, we must have that:
\[2i \equiv 2j \pmod{m}. \]
Because $\gcd(2,m) = 1$, we may divide both sides by 2:
\[i \equiv j \pmod{m}. \]
Because $0 \leq i,j < m$, we have that $i = j$, contradicting the assumption of distinctness.
Now we must prove that for any position $(a,b) \notin \mathcal{S}$ there is a move to a position in $\mathcal{S}$.
\begin{enumerate}
\item Suppose that $\min(a,b) < m$, then a Type I move must exist.
\item On the other hand, if $\min(a,b) \geq m$, then $(a,b) \succ \mathcal{S}$. Therefore, a Type II move must exist because all residue classes modulo $m$ are covered by $\mathcal{S}$.
\end{enumerate}
\end{proof}
\section{$2$ Heap $m$-Modular Nim for Any $m$}\label{2HeapEven}
\subsection{Another Example: $m=6$.}
We consider an example of $m$-Modular Nim, where $m$ is an even integer: $m = 6$.
\begin{example}[$m = 6$] \label{m=6}
We claim that the set of P-positions for $m = 6$ is the following set:
\begin{center}
$(0,0)$ \\
$(1,1)$ \\
$(2,2)$
\begin{align*}
(3,4) & \hspace{40pt} (4,3) \\
(5,6) & \hspace{40pt} (6,5) \\
(7,8) & \hspace{40pt} (8,7).
\end{align*}
\end{center}
Let the specified set be $\mathcal{S}$. As before, we can manually verify that no Type I or Type II moves connect any two members of $\mathcal{S}$.
To show that $\mathcal{S}$ is the set of all P-positions, we must now show that for any position $(a,b) \notin \mathcal{S}$ there is a move to an element of $\mathcal{S}$.
\begin{enumerate}
\item If $\min(a,b) \leq 8$, then we may reach a member of $\mathcal{S}$ by removing the necessary number of tokens from the larger heap using a Type I move.
\item On the other hand, if $\min(a,b) > 8$, we have that $(a,b) \succ \mathcal{S}$. Because $\mathcal{S}$ contains positions with each possible total heap-sum modulo $6$, there must exist a Type II move.
\end{enumerate}
\end{example}
We can see the existence of three distinct groups of P-positions for the case where $m = 6$. The first group has both heaps equal in size and is the same as P-positions for 3-Modular Nim. The second and third group can be viewed as the P-positions of 3-Modular Nim shifted by $(3,4)$ and $(4,3)$ respectively.
This idea is further elucidated by Figure~\ref{2HeapMod6}. The red sections indicate the locations of the second and third groups that are shifted replicas of Figure~\ref{2HeapMod3}. This nesting structure is essential in finding a formula for the set of P-positions.
\begin{figure}\label{2HeapMod6}
\end{figure}
To make this nesting pattern more clear, we display the set of P-positions in $12$-Modular Nim in Figure~\ref{2HeapMod12}. The red section indicates one of the two embedded shifted replicas of P-positions in 6-Modular Nim as shown in Figure~\ref{2HeapMod6} whereas the green section shows the doubly nested and shifted copy of P-positions in $3$-Modular Nim. Note that because we are chiefly concerned with the recursive structure, the individual labels have been removed.
\begin{figure}
\caption{P-positions for 12-Modular Nim.}
\label{2HeapMod12}
\end{figure}
\subsection{Potential P-positions}\label{sec:potentialpositions}
In order to formalize the previous notion of nesting, we will recursively define the positions that we later prove to be P-positions for any value of $m$.
\begin{definition}
\[\mathcal{A}_m = \left \{
\begin{array}{ll}
\hspace{5pt} (i,i) \text{ where } 0 \leq i < m & \hspace{112pt} : m \text{ is odd} \\
\hspace{5pt} (i,i) \text{ where } 0 \leq i < \frac{m}{2} & \hspace{112pt}: m \text{ is even.}
\end{array}
\right. \]
We will refer to any member of $\mathcal{A}_m$ as a \emph{trunk position}.
\end{definition}
Note that the P-positions in $6$-Modular Nim (from Example \ref{m=6}) that appear before the ``splitting'' are trunk positions.
\begin{definition}
\[ \mathcal{B}_m = \left \{
\begin{array}{ll}
\hspace{5pt} \varnothing &: m \text{ is odd} \\
\begin{array}{l}\left(2a + \frac{m}{2} + 1, 2b + \frac{m}{2}\right) \\ \left(2a + \frac{m}{2}, 2b + \frac{m}{2} + 1\right) \end{array}
\text{ where } (a,b) \in \mathcal{B}_{\frac{m}{2}}\cup \mathcal{A}_{\frac{m}{2}} &: m \text{ is even.}
\end{array}
\right. \]
We will refer to any member of $\mathcal{B}_m$ as a \emph{branch position}.
\end{definition}
Note that the P-positions in $6$-Modular Nim (from Example \ref{m=6}) that appear after the ``splitting'' are branch positions. Also note that both positions in $\mathcal{B}_m $ generated from $(a,b)$ have the same sum of coordinates.
We seek to prove that the set of P-positions in $m$-Modular Nim is $\mathcal{Q}_{m} = \mathcal{A}_{m} \cup \mathcal{B}_{m}$.
\begin{definition} We call the set of positions $\mathcal{Q}_{m}$ \emph{potential positions}. \end{definition}
Note that the recursion that builds $\mathcal{Q}_{2m}$ from $\mathcal{Q}_{m}$ allows us to provide a recursion for the number of elements in $\mathcal{Q}_{m}$:
\[|\mathcal{Q}_{2m}| = m+ 2|\mathcal{Q}_{m}|.
\]
Because of the recursive doubling involved in generating trunk and branch positions, it is natural to consider the number of times that this doubling can occur. In other words, we wish to count the greatest power of $2$ that divides $m$.
\begin{definition}Define the \emph{$2$-adic order} of $m$ as the highest power of $2$ that divides $m$. We denote this arithmetic function as $\nu_2(m)$ \cite{NumberTheory}.\end{definition}
Now we prove a series of preliminary results.
The following lemma shows that a number may appear in a potential position in a particular coordinate at most once.
\begin{lemma} \label{2HeapTypeI}
If $(a,b)$ and $(a,c) \in \mathcal{Q}_m$, then $b = c$. \end{lemma}
\begin{proof}
We proceed by induction on the $2$-adic order of $k$. For the base case, suppose that $k$ is odd. From the definition of $\mathcal{Q}_k$, where $k$ is odd, no two distinct positions share a heap size, because all positions are of the form $(i,i)$ where $0 \leq i < k$.
For the inductive step, suppose that the lemma is true for $\mathcal{Q}_k$. We wish to prove that if $(a,b)$ and $(a,c) \in \mathcal{Q}_{2k}$, then $b = c$. We will split this into cases:
\begin{enumerate}
\item Suppose that both $(a,b)$ and $(a,c)$ are trunk positions. No two trunk positions share a heap size because they are of the form $(i,i)$ for different $i$.
\item Suppose one of the positions $(a,b)$ and $(a,c)$ is a trunk position and the other is a branch position. Any trunk position in $\mathcal{Q}_k$ is strictly dominated by $(k,k)$ and any branch position strictly dominates $(k,k)$. Therefore, a branch position and a trunk position cannot share a coordinate.
\item Suppose that both $(a,b)$ and $(a,c)$ are elements of $\mathcal{B}_{2k}$. If $a$ has the same parity as $k$, then $a = 2a^\prime + k$. Thus, we may write $b = 2b^\prime + k + 1$ and $c = 2c^\prime + k + 1$ where $\left(a^\prime, b^\prime\right)$ and $\left(a^\prime, c^\prime\right)$ are in $\mathcal{Q}_{k}$. By the inductive hypothesis, $b^\prime = c^\prime$. Therefore, $b = c$.
A similar symmetric argument works when $a$ has the opposite parity as $k$.
\end{enumerate}
Thus, we are done by induction.
\end{proof}
We can strengthen Lemma~\ref{2HeapTypeI} by showing that the set of integers allowed to be a coordinate of a potential position only consists of consecutive numbers.
\begin{lemma}\label{2HeapPPositionCount}
Integers between $0$ and $k\left(\frac{\nu_2(k)}{2} + 1\right) -1$ inclusive form the set of all first coordinates in the set of potential positions $\mathcal{Q}_k$. By symmetry the same is true for the second coordinate.\end{lemma}
\begin{proof}
As before, we induct on the $2$-adic order of $k$. Our base case is the odd integers. In Theorem \ref{Odd2HeapPPosition}, we defined the set of potential positions for odd integers $k$ to be all $(i,i)$ where $0 \leq i < k$. This concludes the base case, as $\nu_2(k)=0$.
For the inductive step, consider $\mathcal{Q}_{2k}$. For trunk positions, the first coordinate ranges over all numbers from $0$ to $k - 1$ inclusive. For branch positions, it ranges over all numbers of the form $2a+k$ and $2a+k+1$, where $a$ is a first coordinate of an element of $\mathcal{Q}_k$.
By induction, $a$ is between 0 and $k\left(\frac{\nu_2(k)}{2} + 1\right) -1$ inclusive. Thus the new branch positions for $2k$ have first coordinates in the range between $k$ and
\begin{align*}
2\left(k\left(\frac{\nu_2(k)}{2} + 1\right) -1 \right) + k + 1 &= \\
2k\left(\frac{\nu_2(k)}{2} + 1\right) + k - 1 &= \\
2k\left(\frac{\nu_2(k)}{2} + 1 + \frac{1}{2} \right) -1 &= \\
2k\left(\frac{\nu_2(2k)}{2} + 1 \right) -1.
\end{align*}
Therefore, an integer is between $0$ and $2k\left(\frac{\nu_2(k)}{2} + 1\right) -1$ inclusive if and only if it appears as a first coordinate in the set of potential positions $\mathcal{Q}_{2k}$, completing the induction.
\end{proof}
\begin{corollary}\label{thm:totalcount}
\[|\mathcal{Q}_m| = m\left(\frac{\nu_2(m)}{2} + 1\right).\] \end{corollary}
With Lemma~\ref{2HeapTypeI} and Lemma~\ref{2HeapPPositionCount}, we can now explicitly determine the number of potential positions.
The following lemma shows that two potential positions that share a remainder modulo $m$ have the same sum.
\begin{lemma}\label{2HeapTypeII} If $(a,b)$ and $(c,d) \in \mathcal{Q}_k$ and $a + b \equiv c + d \pmod{k}$, then $a + b = c + d$. \end{lemma}
\begin{proof}
We proceed by induction on $\nu(k)$. For the base case, suppose that $k$ is odd. All potential positions are of the
from $(i,i)$ where $0 \leq i < k$. Let $(a,b) = (m,m)$ and $(c,d) = (n,n)$. We can rewrite our condition as:
\[2m \equiv 2n \pmod{k}. \]
Because $\gcd(2,k) = 1$, we may divide both sides by $2$:
\[m \equiv n \pmod{k}. \]
Because $0 \leq m,n < k$, we have that $m = n$. Thus, $a + b = c + d$ where $k$ is odd.
For the inductive hypothesis, assume the lemma is true for $k$. We seek to now prove the corresponding statement for $2k$. We will divide this into cases:
\begin{enumerate}
\item Suppose that both $(a,b)$ and $(c,d)$ are trunk positions in $\mathcal{Q}_{2k}$. Because trunk positions are of the from $(i,i)$ where $0 \leq i < k$, we have that $(a,b) = (m,m)$ and $(c,d) = (n,n)$ where $0 \leq m,n < k$. We can rewrite our condition as:
\[2m \equiv 2n \pmod{2k}. \]
Because $\gcd(2,2k) = 2$, we may divide both sides by $2$ only if we divide the modular base by $2$ as well:
\[m \equiv n \pmod{k}. \]
Because $0 \leq m,n < k$, this implies that $m = n$, finishing this case.
\item Suppose one of $(a,b)$ and $(c,d)$ is a trunk position and the other is a branch position in $\mathcal{Q}_{2k}$. The heap-sum of a trunk position is even and the heap-sum of a branch position is odd. Therefore, they cannot have the same remainder modulo an even number $2k$.
\item Suppose that both $(a,b)$ and $(c,d)$ are branch positions in $\mathcal{Q}_{2k}$. Therefore, $(a,b)$ is equal to $(2a^\prime + k, 2b^\prime + k + 1)$ or $(2a^\prime + k + 1, 2b^\prime + k)$ where $\left(a^\prime, b^\prime\right)$ is a position in $\mathcal{Q}_{2k}$. In either case,
\[a + b = 2a^\prime + 2b^\prime + 2k + 1.\]
Similarly,
\[c + d = 2c^\prime + 2d^\prime + 2k + 1.\]
with $\left(c^\prime, d^\prime\right)$ in $\mathcal{Q}_{2k}$. We may now rewrite our given condition
\[a + b \equiv c + d \pmod{2k}\]
as
\[2a^\prime + 2b^\prime \equiv 2c^\prime + 2d^\prime \pmod{2k}.\]
Because $\gcd(2,2k) = 2$, we may divide both sides by $2$ only if we divide the modular base by $2$ as well:
\[ a^\prime + b^\prime \equiv c^\prime + d^\prime \pmod{k}. \]
By the inductive hypothesis, $a^\prime + b^\prime = c^\prime + d^\prime$. Therefore, $a + b = c + d$.
Thus, we are done by induction.
\end{enumerate}
\end{proof}
\begin{corollary}\label{cor:trunkremainders}
Trunk positions in $\mathcal{Q}_k$ have distinct remainders modulo $k$.
\end{corollary}
We are now ready to prove that every position not in $\mathcal{Q}_m$ has a move to a position in $\mathcal{Q}_m$ in the game of $m$-Modular Nim.
\begin{lemma}\label{MovesFromNotQtoQ}Every position not in $\mathcal{Q}_m$ has a move to an element of $\mathcal{Q}_m$ in the game of $m$-modular Nim.\end{lemma}
\begin{proof}
Let there be a position $(a,b) \notin \mathcal{Q}_k$. We will as usual induct on the $2$-adic order. Our base case is when $k$ is odd. By Lemma~\ref{Odd2HeapPPosition}, $\mathcal{Q}_k$ is the complete set of P-positions in $k$-Modular Nim. Therefore, all other positions are N-positions. From the definition of an N-position, every N-position has a move to a P-position.
For the inductive step, assume that this lemma is true for $\mathcal{Q}_k$ in the game of $k$-Modular Nim. We want to show the corresponding statement for $\mathcal{Q}_{2k}$ in the game of $2k$-Modular Nim. Again we will use casework. Assume without loss of generality that $a \leq b$.
\begin{enumerate}
\item Suppose that $0 \leq a < k$. Then, we have $(a,a) \in \mathcal{Q}_{2k}$. Therefore, there is a Type I move from $(a,b)$ to $(a,a)$, a member of $\mathcal{Q}_{2k}$.
\item Suppose that $k \leq a \leq b$ and $a \equiv b \pmod{2}$. The heap-sums of trunk positions in $\mathcal{Q}_{2k}$ form a set of $k$ even integers. By Corollary~\ref{cor:trunkremainders} there exists a trunk position with any given even remainder modulo $2k$. Therefore, there exists a trunk position with the same even remainder modulo $2k$ as $a+b$. As $(a,b)$ dominates $(k,k)$, it also strictly dominates any trunk position. Therefore by Lemma \ref{DominateTypeII}, there exists a Type II move from $(a,b)$ to the trunk position with the same remainder.
\item Suppose that $k \leq a \leq b$ and $a \equiv k \pmod{2}$ while $b \equiv k + 1 \pmod{2}$. Consider the position:
\[ \left(a^\prime, b^\prime\right) = \left(\frac{a - k}{2}, \frac{b - k - 1}{2} \right) \]
Note that $\left(a^\prime, b^\prime\right)$ is not an element of $\mathcal{Q}_k$, as $\left(2a^\prime + k, 2b^\prime + k + 1\right) = (a,b)$ is not an element of $\mathcal{Q}_{2k}$. Therefore, by the inductive hypothesis there exists a position $\left(q_1,q_2\right) \in \mathcal{Q}_{k}$ that can be reached with a Type II move from $\left(a^\prime, b^\prime\right)$ in the game of $k$-Modular Nim.
Thus, there exists a Type II move from $(a,b) = \left(2a^\prime + k, 2b^\prime + k + 1\right)$ to $\left(2q_1 + k,2q_2 + k + 1\right)$ in the game of $2k$-Modular Nim. Note that $\left(2q_1 + k,2q_2 + k + 1\right)$ is an element of $\mathcal{Q}_{2k}$. This finishes this case.
\item Suppose that $k \leq a \leq b$ and $a \equiv k + 1\pmod{2}$ while $b \equiv k \pmod{2}$. We are done by a similar symmetric argument to the previous case.
\end{enumerate}
This completes the induction. \end{proof}
These results enable us to determine a recursive definition of P-positions in $m$-Modular Nim with $2$ heaps.
\begin{theorem}If $\mathcal{P}_m$ is the set of P-positions for $m$-Modular Nim with $2$ heaps, then $\mathcal{P}_m = \mathcal{Q}_m$. \end{theorem}
\begin{proof}We must first prove that no move exists between any two elements of $\mathcal{Q}_m$. Let $(a,b)$ and $(c,d)$ be distinct elements of $\mathcal{Q}_m$. If a Type I move exists between $(a,b)$ and $(c,d)$, we may assume without loss of generality that $a = c$. Thus $(a,b)$ and $(a,d)$ are distinct elements of $\mathcal{Q}_m$. However, from Lemma \ref{2HeapTypeI}, then we have that $b = d$ which contradicts distinctness.
Now suppose that a Type II move exists. By Lemma \ref{DominateTypeII}, we have that $a + b \equiv c + d \pmod{m}$. However, from Lemma \ref{2HeapTypeII}, we have that $a + b = c + d$. However, every move strictly decreases heap-sum, so no such move exists.
Moreover by Lemma \ref{MovesFromNotQtoQ}, every position not in $\mathcal{Q}_m$ has a move to an element of $\mathcal{Q}_m$. Therefore, $\mathcal{P}_m = \mathcal{Q}_m$. \end{proof}
We are now ready to describe the set of P-positions explicitly.
\section{Explicit Description of P-positions}\label{sec:explicit}
Let $m = k \cdot 2^n$ where $k$ is odd. The P-positions in the game of $m$-Modular Nim are built recursively from the positions of $k$-Modular Nim, by using the recursion described in Section~\ref{sec:potentialpositions}. The recursion is used $n$ times.
The recursion procedure is similar for all $m$ with the same 2-adic order. Figure~\ref{fig:8mod} depicts P-positions for 8-Modular Nim to emphasize the branching.
\begin{figure}\label{fig:8mod}
\end{figure}
\begin{definition}\label{Branch Level}The \textit{$i$-level branch} is the set of P-positions that are generated from the trunk of $\mathcal{Q}_{k\cdot 2^{n-i}}$ using $i$ splitting procedures as described in Section~\ref{sec:potentialpositions}.\end{definition}
The P-positions in the $i$-level branch are of the form
\[(2^ia + f_im + b_1, 2^ia + f_im + b_2),\]
where $f_i$ is the coefficient by which $m$ is multiplied after completing $i$ splitting procedures. Moreover, we have $0 \leq a < k \cdot 2^{n-i-1}$, for $i < n$, and $0 \leq a < k$, for $i=n$.
Let us trace through recursion and find $f_i$. The $i$-level branch is recursively generated from the trunk of $\mathcal{Q}_{\frac{m}{2^i}}$. After the first recursion the coefficient is $\frac{m}{2^i}$. After the next recursion it is \[2\cdot \frac{m}{2^i} + \frac{m}{2^{i-1}}=2 \cdot \frac{m}{2^{i-1}}.\] Repeating again we get \[2\cdot \frac{m}{2^{i-2}} + \frac{m}{2^{i-2}}=3 \cdot \frac{m}{2^{i-2}}.\] As we continue, we see that $f_i = \frac{i}{2}$.
We also can calculate that the largest possible value for $b_1$ is $2^i-1$, and the smallest is 0. The same is true for $b_2$. Therefore, the following lemma follows.
\begin{lemma}
The P-positions within the $i$-level branch correspond to all possible values for $0 \leq b_1 < 2^i$ and $b_2 = 2^i-1-b_1$.
\end{lemma}
\begin{proof}
The P-positions corresponding to the same $a$ within the $i$-level branch have the same heap-sum. They all have different first coordinates and there are $2^i$ of them.\end{proof}
Combining all the results together, we deduce the following theorem.
\begin{theorem}
The P-positions of $m$-Modular Nim are of the form
\[\left(2^ia + \frac{im}{2} + b, 2^ia + \frac{im}{2} + 2^i-1-b\right),\]
where $0 \leq i \leq n$ and $0 \leq b < 2^i$. In addition, $0 \leq a < k \cdot 2^{n-i-1}$, for $i < n$, and $0 \leq a < k$, for $i = n$.
\end{theorem}
Note that 0-level branch corresponds to the trunk and the formula correctly produces positions of the form:
\[(a,a)\]
for $0 \leq a < k \cdot 2^n = m$. Similarly, the 1-level branch positions are of the form:
\[\left(2a + \frac{m}{2} + 1, 2a + \frac{m}{2}\right) \text{ and } \left(2a + \frac{m}{2}, 2a + \frac{m}{2}+1\right),\]
for $0 \leq a < k \cdot 2^{n-1}$.
We see that in the list of all P-positions the range of the coordinates with the higher heap-sum is above the range of the coordinates with the lower heap-sum.
\begin{corollary}
Any P-position with a greater sum dominates every P-position with a lower sum.
\end{corollary}
In addition, on level $i$ the first coordinate ranges from $f_im=i\frac{m}{2}$ to \[f_im+\frac{m}{2}-1=(i+1)\frac{m}{2}-1 = f_{i+1}m- 1.\]
This further confirms Lemma~\ref{2HeapPPositionCount} in that the numbers in the first and second coordinates of P-positions are consecutive. We can also see that the largest number in the range is $2^nk+n\frac{m}{2}-1$, which, not surprisingly, matches Corollary~\ref{thm:totalcount}.
The description above allows us to calculate the second coordinate of the P-position, given the first coordinate.
\begin{lemma}
Suppose the first coordinate of an $m$-Modular Nim P-position is $x$, then the second coordinate is
\[ 2^{\left \lfloor \frac{2x}{m}\right \rfloor+1} \left \lfloor \frac{x-\left \lfloor \frac{2x}{m}\right \rfloor \frac{m}{2}}{2^i}\right \rfloor + \left \lfloor \frac{2x}{m}\right \rfloor m + 2^{\left \lfloor \frac{2x}{m}\right \rfloor}-1 -x.\]
\end{lemma}
\begin{proof}
Suppose the first coordinate is $x$, then the level $i$ is defined as $\left\lfloor \frac{2x}{m}\right\rfloor$. After that we can calculate $a$ in the formula as $\left\lfloor \frac{x-im/2}{2^i}\right\rfloor$. Therefore, the second coordinate is:
\[ 2^{i+1}a + im + 2^i-1 -x.\]\end{proof}
\section{$m$-Modular Nim for Odd $m$ and any Number of Heaps}\label{sec:manyheaps}
We will now generalize the results from Theorem~\ref{Odd2HeapPPosition} to any number of heaps.
\begin{theorem}Let $a = (a_1,a_2,\ldots,a_n)$ be a position in $m$-Modular Nim with $n$ heaps, where $m$ is odd. It is a P-position if and only if:
\begin{enumerate}
\item $|a| < 2m$
\item $\oplus_i a_i = 0$.
\end{enumerate}\end{theorem}
\begin{proof}
Let the specified set be $\mathcal{Q}$. We will first show that no move exists between any two positions in $\mathcal{Q}$. By Bouton's Theorem, no Type I move exists.
Because the heap-sum of all members of $\mathcal{Q}$ is less than $2m$, any Type II move must subtract exactly $m$ tokens total. However, all members of $\mathcal{Q}$ have an even heap-sum as the bitwise XOR of their heap sizes is $0$. Because $m$ is odd, no two elements of $\mathcal{Q}$ can be connected by a Type II move.
We must now show that every position $p=(p_1,p_2,\ldots,p_n)$ not in $\mathcal{Q}$ has a move to an element of $\mathcal{Q}$.
\begin{enumerate}
\item Suppose that $p$ is not a P-position in Nim and there is a Nim-move from $p$ to a position $p^\prime$ such that $|p^\prime| < 2m$. Then $p^\prime \in \mathcal{Q}$ and we have found our move.
\item Suppose that $p$ is not a P-position in Nim; and there is a Nim-move from $p$ to a position $q$ such that $|q| \geq 2m$. There exists a number $k$, such that $|p| -km < 2m$ and $|p| -km$ is even. In the game of Nim, there exists an optimal play in which only $1$ token is removed per turn. That means, for any P-position $q$, there exists a P-position dominated by $q$ for every even heap-sum less than $|q|$. In particular, there exists a P-position in Nim, $p^\prime$, such that $|p^\prime| = |p| -km$ and $q$ dominates $p^\prime$. Therefore, $p$ dominates $p^\prime$, and $p^\prime$ is reachable from $p$ by a Type II move.
\item Suppose that $p$ is a P-position in Nim, and $|p| \geq 2m$. A similar argument to the one above shows that there exists a P-position of Nim $p^\prime \in \mathcal{Q}$ that is reachable from $p$ via a Type II move.
\end{enumerate} \end{proof}
\end{document}
|
\begin{document}
\title[]{On semi-Markov processes and their Kolmogorov's integro-differential equations}
\author[E. Orsingher]{Enzo Orsingher$^{1}$}
\author[C. Ricciuti]{Costantino Ricciuti$^{1,\text{\textsection}}$}
\address[1]{Dipartimento di Scienze Statistiche, Sapienza - Universit\`{a} di Roma}
\author[B. Toaldo]{Bruno Toaldo$^{2}$}
\address[2]{Dipartimento di Matematica e Applicazioni ``Renato Caccioppoli'' - Universit\`{a} degli Studi di Napoli Federico II}
\email[\textsection Corresponding Author]{[email protected]}
\keywords{Semi-Markov processes, time-changed processes, subordinators, non-homogeneous subordinators, integro-differential equations, Bernstein functions, fractional equations}
\date{\today}
\subjclass[2010]{60K15, 60J25, 60G51}
\begin{abstract}
Semi-Markov processes are a generalization of Markov processes since the exponential distribution of time intervals is replaced with an arbitrary distribution. This paper provides an integro-differential form of the Kolmogorov's backward equations for a large class of homogeneous semi-Markov processes, having the form of an abstract Volterra integro-differential equation. An equivalent evolutionary (differential) form of the equations is also provided. Fractional equations in the time variable are a particular case of our analysis. Weak limits of semi-Markov processes are also considered and their corresponding integro-differential Kolmogorov's equations are identified.
\end{abstract}
\maketitle
\tableofcontents
\section{Introduction}
Semi-Markov processes have been introduced by L\'evy \cite{levysemi} and Smith \cite{smith} in order to reduce the limitation induced by the exponential distribution of the corresponding time intervals. In these papers jump semi-Markov processes were considered, i.e., jump processes with a waiting time between jumps which is not necessarily given by an exponential random variable. This is the immediate generalization of Markov chains since the Markov property is the typical consequence of the lack of memory of the exponential distribution. The general theory of semi-Markov processes has then been developed by Pyke \cite{pykefinite, pykeinfinite}. The first generalization of the Kolmogorov's equations to the semi-Markov case was given in Feller \cite{fellersemi}. In this paper the author provided an integral form for the backward equation when a semi-Markov process runs on a countable state-space, but from the discussion in \cite{fellersemi} it is clear that the generalization of such equations to any state space was not so far. In succesive years, indeed, the theory of semi-Markov processes has been developed (e.g. Cinlar \cite{cinlarsemi, cinlar}, Gihman and Skorohod \cite{gihman}, Jacod \cite{jacod}). Recent developments of the theory can be found in Harlamov \cite{harlamov} and in Korolyuk and Swishchuk \cite{koro} in which semi-Markov processes are discussed in full generality.
A large class of semi-Markov processes can be equivalently constructed as time-changed Markov processes. This fact is well formalized in Kurtz \cite{kurtz} in the case where the waiting times between jumps have finite mean. A more general approach is proposed in Kaspi and Maisonneuve \cite{kaspi} by assuming a Markov additive process $(A_t, D_t)$ and defining $X(t) = A(L(t))$ where $L(t)$ is the hitting time process of $D_t$.
In the present paper we consider indeed semi-Markov processes which can be obtained as a time-changed Markov process. In recent years Baeumer and Meerschaert \cite{fracCauchy}, Meerschaert and Scheffler \cite{meertri}, Meerschaert et al. \cite{meerpoisson} considered Markov processes time-changed via an independent inverse of an $\alpha$-stable subordinator and shown that these processes are governed by time-fractional equations, which are very popular in applications (e.g. \cite{meerbook, metzler} for a review of possible applications or \cite{hairer} for a recent development; see \cite{grot1, grot2} for a different probabilistc approach related to fractional diffusion equation). When the same thing is done with a more general independent inverse subordinator then the equations become more general integro-differential equations (Kochubei \cite{kochu}, Kolokoltsov \cite{kololast, KoloCTRW, kolokoltsov}, Toaldo \cite{toaldopota, toaldodo}) or pseudo-differential in the time-variable \cite{meertri}. These processes can be often viewed as semi-Markov processes as discussed in Meerschaert and Straka \cite{meerstra}. Hence this suggests that there is a strong relationship between integro-differential equations and the Kolmogorov's equation of semi-Markov processes. In the present paper such a relationship is clearly established in a very general framework. The Kolmogorov's equations are firstly investigated when the semi-Markov processes have stepped paths. Such equations turn out to be Volterra integro-differential equations. Our framework also includes the case in which the Markov process is non independent on the random time process. This yields to variable order Volterra integro-differential equations having the form
\begin{align}
\frac{d}{dt} \int_0^t q(s, \cdot) \, k(t-s,\cdot) \, ds \, - k(t,\cdot) q(0,\cdot)\, = \, \l G q(t)\r (\cdot)
\label{volterraintro}
\end{align}
for $q:[0, \infty) \mapsto \mathfrak{B}$ where $\mathfrak{B}$ is a Banach space, $k(t, \cdot)$ is a suitable convolution kernel and $G$ is the generator of the Markov process. When the kernel $k$ does not depend on the vector variable of the Banach space the regularity of the equation has been recently investigated by \cite{caffarelli}. Further, weak limits of stepped semi-Markov processes are also studied and the corresponding Kolmogorov's equations are determined.
Time-fractional equations can be always viewed as particular and interesting cases of the equations studied in this paper.
\section{Preliminaries}
\br{We collect in this section some technical information which will be used throughout the paper.}
\subsection{Complete monotonicity and Bernstein functions} We recall here some basic facts on Bernstein functions and complete monotonicity. We refer to \cite{librobern} for such information. A function $f:(0, \infty) \mapsto \mathbb{R}$ is said to be a Bernstein function if it is of class $C^\infty$, $f(\lambda) \geq 0$ and $(-1)^{n-1}f^{(n)}(\lambda) \geq 0$ for all $\lambda >0$ (\cite[Definition 3.1]{librobern}). For a non-negative $C^\infty$ function $f$ to be a Bernstein function it is necessary and sufficient that $f^\prime$ is completely monotone. A function $g:(0, \infty) \mapsto \mathbb{R}$ is completely monotone if it is of class $C^\infty$ and such that $(-1)^{n}g^{(n)}(\lambda) \geq 0$ for all $n \in \mathbb{N} \cup \ll 0 \rr$ and $\lambda >0$; hence (e.g. \cite[Theorem 1.4]{librobern}) it can be written as the Laplace transform of a unique measure $K$ on $[0, \infty)$, i.e., for all $\lambda >0$,
\begin{align}
g(\lambda) \, = \, \int_{[0, \infty)}e^{-\lambda s}K(ds).
\end{align}
A consequence of the above facts is that a function $f$ is a Bernstein function if, and only if, it can be written in the form \cite[Theorem 3.2]{librobern}
\begin{align}
f(\lambda) \, = \, a+b\lambda + \int_0^\infty \l 1-e^{-\lambda s} \r \nu(ds)
\end{align}
where $a$ and $b$ are non-negative constants and $\nu(\cdot)$ is a measure on $(0, \infty)$ satisfying the integrability condition $\int_0^\infty \l s \wedge 1 \r \nu(ds) < \infty$. A subclass of Bernstein functions, playing a central role in this paper is the class of complete Bernstein functions. A Bernstein function is said to be complete if the corresponding L\'evy measure has a completely monotone density with respect to the Lebesgue measure \cite[Definition 6.1]{librobern}. Furthermore, a Bernstein function $f \neq 0$, is complete if, and only if, can be represented as $f = 1/h$ where $h$ is a (non-negative) Stieltjes function, i.e., $h$ admits the representation
\begin{align}
h(\lambda) \, = \, \frac{a}{\lambda}+b+\int_0^\infty \frac{1}{s+\lambda} m(ds)
\end{align}
where $m$ is a measure on $(0, \infty)$ such that
\begin{align}
\int_0^\infty \frac{1}{1+s}m(ds) < \infty.
\end{align}
\subsection{Subordinators and non-homogeneous subordinators}
\label{nonhomosub}
A subordinator is a non-decreasing L\'evy process. Every Bernstein function is the Laplace exponent of a subordinator \cite[Theorem 1.2]{bertoins}. Hence with the symbol $\sigma^f(t)$ we denote the subordinator with transition probabilites $\mu_t(ds)$ such that
\begin{align}
\int_0^\infty e^{-\lambda s} \mu_t(ds) \, = \, \mathds{E}e^{-\lambda \sigma^f(t)} \, = \, e^{-tf(\lambda)}.
\end{align}
A non-homogeneous subordinator, say $\sigma^\Pi (t)$, $t \geq 0$, is a non-decreasing additive process in the sense of Sato \cite[Definition 1.6]{satolevy}. Hence it is a non-decreasing process with independent increments, stochastically continuous and with a.s. c\`{a}dl\`{a}g paths and such that $\sigma^\Pi (0) = 0$ a.s. Such a process can be constructed as proposed in \cite[Sec. 2]{orsrictoapota}, i.e.,
\begin{align}
\sigma^\Pi (t) \, : = \, b(t) + \sum_{0 \leq s \leq t} \mathpzc{e}(s)
\label{sumpoisson}
\end{align}
where $\mathpzc{e}(s)$ is a Poisson point process in $\mathbb{R}^+$ whose characteristic measure $\nu(dx, dt)$ on $(0, \infty) \times [0, \infty)$ satisfies the integrability condition
\begin{align}
\int_{(0, \infty)\times[0,t]} (x\wedge 1) \nu(dx, dw) \, < \, \infty.
\end{align}
Hence if $\nu(dx, dt) = v(dx) dt$, where $dt$ is the Lebesgue measure, the construction of a subordinator is obtained. In what follows we will always assume, as in \cite{orsrictoapota}, that $\nu(ds, \cdot)$ is absolutely continuous with respect to the Lebesgue measure and we denote a density as $\nu(ds, t) dt : = \nu(ds, dt)$. Furthermore we will assume that the process has no drift, hence in \eqref{sumpoisson} we have $b(t) =0$, for all $t \geq 0$. Therefore the expected number of jumps from $x$ to $x+dx$ occuring up to $t$ is given by
\begin{align}
\phi(dx, t) \, : = \, \int_0^t \nu(dx, s) ds.
\end{align}
By \cite[Eqs (2.6) and (2.7)]{orsrictoapota} we have
\begin{align}
\mathds{E} e^{-\lambda \sigma^\Pi (t)} \, = \, e^{-\Pi (\lambda, t)}
\end{align}
where
\begin{align}
\lambda \mapsto \Pi (\lambda, t) \, : = \, \int_0^\infty \l 1-e^{-\lambda s} \r \phi(ds, t)
\end{align}
or equivalently, by \cite[Eq. (2.12)]{orsrictoapota},
\begin{align}
\Pi (\lambda, t) \, = \, \int_0^t f(\lambda, s) ds
\end{align}
where $\lambda \mapsto f(\lambda, s)$ is, for each $s$, the Bernstein function
\begin{align}
f(\lambda, s) \, = \, \int_0^\infty \l 1-e^{-\lambda w} \r \nu(dw, s).
\label{29}
\end{align}
We will further assume throughout the paper that $\nu$ is such that $\sup_s f(\lambda, s) < \infty$ and that $s \mapsto f(\lambda, s)$ is continuous.
\section{Stepped semi-Markov processes and time-changed Markov processes}
\subsection{Stepped semi-Markov processes}
\label{defsemi}
Let $\l \mathcal{S}, \mathfrak{S} \r$ be a state space where $\mathfrak{S}$ is a $\sigma$-algebra on $\mathcal{S}$, generated from the space $\mathcal{S}$ endowed with the discrete topology induced by the metric
\begin{align}
\rho (x,y) \, = \, \begin{cases} 1, \qquad & x \neq y, \\ 0, & x=y. \end{cases}
\end{align}
We consider on $\mathcal{S}$ right-continuous processes $X(t)$, $t \geq 0$, whose paths are stepped functions. Hence the paths are functions $t \mapsto x(t)$, such that for any $t \geq 0$ there exists a $\delta >0$ such that for all $h \in (0, \delta)$ it is true that $x(t)=x(t+h)$, i.e. the functions $t \mapsto x(t)$ are right-continuous in the discrete topology, and further they have a finite number of discontinuities on any finite interval (of time). Processes with these paths are semi-Markov processes in the sense of Gihman and Skorohod (see \cite[Chapter 3]{gihman} and also \cite[Chapter 3, Section 12, p. 76]{harlamov}) if the couple $\l X(t), \gamma^X(t) \r$, where
\begin{align}
\gamma^X(t) \, := \, t - \l 0 \vee \sup \ll s \leq t : X(s) \neq X(t) \rr \r,
\end{align}
is a strict Markov process. A process with these properties can be constructed as follows (see \cite[Chapter 3, Section 3]{gihman}). Assume a family of probability spaces $\l \Omega, \mathcal{F}, P^x \r$, $x \in \mathcal{S}$. Let $X_n$ be a discrete-time Markov chain on $\mathcal{S}$ under $P^x$ such that $P^x \l X_0 = x \r =1$ and define for all $B \in \mathfrak{S}$, the transition probabilities $B \mapsto h(x,B) = P^x \l X_1 \in B \r$. In the case $\mathcal{S}$ is countable we denote by $\l H \r_{ij} = h_{ij}$ the transition matrix of $X_n$. Let $\eta_i$ be a sequence of i.i.d. r.v.'s jointly independent from $X_n$ under any $P^x$ each one of which is uniformly distributed on $[0,1]$. Let $F_{x,y}$ be a c.d.f. of a non-negative r.v. for any $x,y \in \mathcal{S}$ and define a function $\varphi_{x,y}:[0,1] \mapsto [0, \infty)$ such that $\varphi_{x,y} (\eta_i)$ have distributions $F_{x,y}$ for any $i$.
Then let $J_i = \varphi_{X_i,X_{i+1}} (\eta_i)$ and define
\begin{align}
X(t) \, = \, X_n, \qquad \sum_{i=0}^{n-1} J_i \leq t < \sum_{i=0}^{n}J_i.
\label{defx}
\end{align}
This is equivalent to say that for any $x,y,z \in \mathcal{S}$
\begin{align}
P^x \l J_n \leq t \mid X_n = z, X_{n+1}=y \r \, = \, P^z \l J_0 \leq t \mid X_1 = y \r \, = \, F_{z,y}(t).
\end{align}
We denote $\overline{F}_{x,y}(t) := 1-F_{x,y}(t)$. When $F_{x,y}$ has a density with respect to the Lebesgue measure, we denote it by $\mathpzc{f}_{x,y}(t)$. It is clear that if $\overline{F}_{x,y}(t) = e^{-\theta(x) t}$ then we have that \eqref{defx} defines a continuous-time Markov process. In this case we use the symbol $\mathcal{J}_i$ to denote such exponential r.v.'s, the symbol $M(t)$, $t \geq 0$, to denote the corresponding Markov process and $t \mapsto m(t)$ for the sample paths of $M(t)$.
Now let $T_n = \sum_{i=1}^{n-1} J_i$ and define
\begin{align}
N^J(t) \, := \, \max \ll n \in \mathbb{N}: T_n \leq t \rr.
\label{ren}
\end{align}
By using \eqref{ren} we can say that \eqref{defx} is equivalent to define
\begin{align}
X(t) \, := \, X_{N^J(t)}.
\label{26}
\end{align}
It is clear that for $M(t)$ the process $N^J$ reduces to a birth process. In this case we denote the epochs of the birth process with the symbol $\mathcal{T}_n$. From the discussion above it is therefore clear that $P^x \l \mathcal{T}_{n+1}- \mathcal{T}_n > t \mid X_n = y \r = e^{-\theta(y)t}$.
Finally, note that in view of \cite[Chapter 3, Section 3, Lemma 2]{gihman} the process $X(t)$ satisfies the defining properties of semi-Markov processes, i.e., the couple $\l X(t), \gamma^X(t) \r$, where $\gamma^X (t) := t-T_{N^J(t)}$, is a strict (homogeneous) Markov process.
Hence it is clear that our semi-Markov processes are time-homogeneous in the sense that
\begin{align}
P^x \l X(t+\tau) \in B \mid X(\tau) = y, \gamma^X(\tau) =s \r \, = \, P^y \l X(t) \in B \mid \gamma^X (0) = s \r,
\label{homo}
\end{align}
for any $t,\tau \geq 0$, $0 \leq s \leq \tau$, $x,y \in \mathcal{S}$ and $B \in \mathfrak{S}$.
\subsection{Construction as time-changed Markov processes}
\label{sectimech}
The aim of this section is to make precise the following heuristical remarks.
The results below show when a homogeneous semi-Markov process is given by a random time change of a Markov process and which type of time-change is allowed. Suppose that at the random times $\l \mathcal{T}_i \r_{i \in \mathbb{N}}$ a Markov process $M(t^\prime)$ jumps in the states $\l X_i \r_{i \in \mathbb{N}}$ and a semi-Markov process $X(t)$ jumps in the same states $\l X_i \r_{i \in \mathbb{N}}$ at the random times $(T_i)_{i \in \mathbb{N}}$. If $t = g(t^\prime)$ we have that $T_i = g (\mathcal{T}_i)$, in distribution, and $X(t) = M(t^\prime) = M(g^{-1}(t))$. This can happen for a suitably defined strictly increasing function $t \mapsto g(t)$ which must be meant as either a deterministic or as a random function. To determine which function $g$ is allowed, consider that the r.v.'s $g \l \mathcal{T}_{i+1}\r- g \l \mathcal{T}_i \r$ must be independent r.v.'s. \brr{Note also that the behaviour of the function $g$ can be dependent on the position of the process, i.e., the increment $g(t)-g(s)$ depends on the r.v.'s $M(w),s \leq w \leq t $. \brbr{This means that the (infinitesimal) increment $g(s+ds)-g(s)$ is given by the increment of a different function $\sigma^x(s+ds)-\sigma^x(s)$, conditionally on $M(s) = x$. Furthermore since $M(t)$ is a stepped process, we can write
\begin{align}
g(t) = g(\mathcal{T}_i)+\sigma^{X_i}(t)-\sigma^{X_i}(\mathcal{T}_i), \qquad \mathcal{T}_i \leq t < \mathcal{T}_{i+1},
\end{align}
since the position $X_i$ reached at $\mathcal{T}_i$ is mantained up to time $\mathcal{T}_{i+1}$}.}
\brbr{The holding times $T_{i+1}-T_i$ are the r.v.'s
\begin{align}
g(\mathcal{T}_{i+1}-) - g(\mathcal{T}_i) = \sigma^{X_i}(\mathcal{T}_{i+1}-)-\sigma^{X_i}(\mathcal{T}_i),
\label{39}
\end{align}
and to preserve time-homogeneity we may require that the distribution of $T_{i+1}-T_i$ does not depend on $i$ which is the number of the jumps. Hence if the increment satisfies $\sigma^{X_i}(t)-\sigma^{X_i}(s)=\sigma^{X_i}(t-s)$, in distribution, and if $\sigma^{X_i} (t)=\sigma^{X_i}(t-)$, then the distribution of $T_{i+1}-T_i$, which is the distribution of \eqref{39}, is independent on $i$ since $\mathcal{T}_{i+1}-\mathcal{T}_i$ is an exponential r.v. with parameter $\theta (x)$ (conditionally on $X_i=x)$.} This suggest to choose $\sigma^{X_i}(t)$ as a strictly increasing process with stationary and independent increments, i.e., a strictly increasing subordinator. Since any increment $\sigma^{X_i}(t)$ is strictly increasing and left-invertible then $g$ is strictly increasing and left-invertible and therefore
\begin{align}
X(t) = X_n, \qquad g(\mathcal{T}_n) \leq t < g(\mathcal{T}_{n+1}),
\end{align}
is equivalent to
\begin{align}
X(t) = X_n, \qquad \mathcal{T}_n \leq g^{-1}(t) < \mathcal{T}_{n+1}.
\end{align}
In particular since $g$ is strictly increasing then the left-inverse $g^{-1}$ is the hitting-time process of $g$.
This heuristically shows that in this case $X(t)$ and $M(g^{-1}(t))$ are the same process and since in the previous remarks the increments of $g$ depend on the position $X_i$ (and on any $\tau_i$) then $g(t)$ is dependent on $M(t)$. This will be made precise below.
We show here that processes $X(t)$ defined as in Section \ref{defsemi} can be constructed under suitable assumptions as a particular time-change of $M$.
Consider a family of Bernstein functions $\ll f(\lambda, x,y) \rr_{(x,y) \in \mathcal{S}\times \mathcal{S}}$ having representation
\begin{align}
f(\lambda, x,y ) \, = \, \int_0^\infty \l 1-e^{-\lambda s} \r \nu(ds, x,y),
\end{align}
and let $\nu((0, \infty),x,y)=\infty$ for each $(x,y) \in \mathcal{S}\times \mathcal{S}$.
Let $\ll \sigma^{(x,y)}(t) \rr_{(x,y) \in \mathcal{S}\times \mathcal{S}}$, $t \geq 0$, be a family of subordinators with Laplace exponent $f(\lambda, x,y)$. Then consider $\ll L^{(x,y)} (t) \rr_{(x,y) \in \mathcal{S}\times \mathcal{S}}$ which is a family of inverses of $\sigma^{(x,y)}$.
Now consider a non-homogeneous subordinator defined as in \eqref{sumpoisson} with $b(t) =0$ for all $t \geq 0$ and with characteristic measure
\begin{align}
v(ds,t) \, = \, \sum_{k=1}^\infty \nu(ds, x_k, x_{k+1}) \, \mathds{1}_{\left[ \tau_k,\tau_{k+1} \right]}(t)
\label{lev}
\end{align}
where \br{the symbols $x_k$ and $\tau_k$ indicate the realization of $X_k$ and $\mathcal{T}_k$} so that
\begin{align}
t \mapsto m(t) = x_k, \qquad \tau_k \leq t < \tau_{k+1}.
\label{mpiccolodef}
\end{align}
Equivalently if we denote by $t \mapsto n(t)$ the sample paths of the counting process $N(t)$ associated with $M(t)$, we can rewrite \eqref{mpiccolodef} as
\begin{align}
m(t) = x_{n(t)}.
\end{align}
This is to say that each fixed path $t \mapsto m(t)$ of $M(t)$, whose discontinuities (jumps) are in the points $t = \tau_k$, defines a different measure $v(ds, t)dt$.
Hence the increments of $\sigma^\Pi(t)$ are given by
\begin{align}
\sigma^\Pi(t) \, = \, \sigma^\Pi (\mathcal{T}_n)+ \sigma^{(X_n, X_{n+1})}(t-\mathcal{T}_n), \qquad \mathcal{T}_n \leq t < \mathcal{T}_{n+1}.
\label{incre}
\end{align}
The process $\sigma^\Pi(t)$ is therefore a different non-homogeneous subordinator for any fixed path of $m(t)$ with Laplace exponent $\Pi (\lambda, t)$ which is determined by the path of $M(t)$. However the r.v. $\sigma^\Pi(t)$ depends only on the events needed at time $t$, i.e., we have
\begin{align}
\mathds{E}^x\left[e^{-\lambda \sigma^\Pi (t)} \mid \tau_0, \cdots, \tau_{n(t)}, x_0, \cdots, x_{n(t)+1} \right] \notag \, = \, e^{-\Pi (\lambda, t)}
\end{align}
where
\begin{align}
\Pi (\lambda, t) \, = \,& \int_0^t \int_0^\infty \l 1-e^{-\lambda s} \r v(ds,y)dy \ \notag \\
= \, & \int_0^t \int_0^\infty \l 1-e^{-\lambda s} \r \sum_{k=0}^{n(t)} \nu(ds,x_k,x_{k+1}) \, \mathds{1}_{\left[ \tau_k,\tau_{k+1} \right]}(y)dy \notag \\
= \, & \sum_{k=0}^{n(t)-1} (\tau_{k+1}-\tau_{k} ) f(\lambda, x_k, x_{k+1}) + (t-\tau_{n(t)})f(\lambda, x_{n(t)}, x_{n(t)+1}),
\label{lapl}
\end{align}
where $\tau_0=0$ and, under $P^x$, $x_0=x$.
Then we define
\begin{align}
L^\Pi (t) \, := \, \inf \ll s \geq 0 : \sigma^\Pi (s) \geq t \rr.
\end{align}
\br{Since it is assumed that $\nu((0, \infty), x, y) = \infty$ for any $(x,y)$ then $v((0, \infty), t) = \infty$ for any $t$ and thus the process $\sigma^\Pi (t)$ is strictly increasing on any finite interval of time \cite[Proposition 2.2]{orsrictoapota}. Hence $L^\Pi \l \sigma^\Pi (t) \r = t$, a.s.}
The following Theorem is the precise statement of the heuristical discussion at the beginning of this section.
\begin{te}
\label{tetimech}
Let $X(t)$ be a stepped semi-Markov process (as in \eqref{defsemi}). Assume $\nu((0, \infty),x,y))=\infty$ for any $(x,y) \in \mathcal{S} \times \mathcal{S}$. The following assertions are equivalent.
\begin{enumerate}
\item For any $(x,y) \in \mathcal{S} \times \mathcal{S}$ and $z \in \mathcal{S}$, $\overline{F}_{x,y}(t) = \mathds{E}^ze^{-\theta(x)L^{(x,y)}(t)}$.
\item $X(t)$ and $M \l L^\Pi (t) \r$ are the same process.
\end{enumerate}
\end{te}
\begin{proof}
The process $\sigma^\Pi(t)$ is strictly increasing on any finite interval (\cite[Proposition 2.2]{orsrictoapota}) then $L^\Pi(t)$ has continuous sample paths. Since $X(t)$ and $M \l L^\Pi(t) \r$ have the same embedded chain, to prove that $X(t)$ coincides with $M(L^\Pi(t))$ it is sufficient to prove that $M(L^\Pi(t))$ has the same waiting times between the jumps of $X(t)$. Note that
\begin{align}
M \l L^\Pi(t) \r \, = \, X_n, \qquad \mathcal{T}_n \leq L^\Pi(t) < \mathcal{T}_{n+1},
\end{align}
and hence by using \cite[Lemma 2.1]{meerpoisson} we have that the epochs of $M(L^\Pi(t))$ occurr at the random times $\sigma^\Pi (\mathcal{T}_n-)$. But it is true that, a.s., $\sigma^\Pi(\mathcal{T}_n-) = \sigma^\Pi (\mathcal{T}_n)$ (\cite[Theorem 2.1]{orsrictoapota}) and hence we have by \eqref{incre} that $\sigma^\Pi(\mathcal{T}_{n+1}) - \sigma^\Pi (\mathcal{T}_n) = \sigma^{(X_n,X_{n+1})} \l \mathcal{J}_n \r$ (we recall that $\mathcal{J}_n = \mathcal{T}_{n+1}-\mathcal{T}_n$). Since $\mathcal{J}_n$ are the holding times of the process $M(t)$, conditionally on $X_i=x$, are independent exponential r.v.'s with parameter $\theta(x)$. Hence we can compute the Laplace transform of the r.v. $\sigma^{(X_n,X_{n+1})} \l \mathcal{J}_n \r$ as
\begin{align}
\mathds{E}^x \l e^{-\lambda \sigma^{(X_n, X_{n+1})} \l \mathcal{J}_n \r} \mid X_n = y, X_{n+1}=z \r \, = \, & \mathds{E}^x e^{-\mathcal{J}_nf(\lambda, y,z)} \notag \\ = \, & \int_0^\infty \theta(y) e^{-\theta(y) w} e^{-wf(\lambda, y,z)} dw \notag \\
= \, & \frac{\theta(y)}{\theta(y)+f(\lambda, y,z)}.
\end{align}
Since by \cite[Corollary 3.5]{meertri} we have that
\begin{align}
\int_0^\infty e^{-\lambda t} \mathds{E}^x e^{-\theta(y) L^{(y,z)}(t) } dt \, = \,\lambda^{-1} \frac{f(\lambda, y, z)}{\theta(y)+f(\lambda, y,z)},
\label{319}
\end{align}
then the corresponding density has Laplace transform
\begin{align}
\int_0^\infty e^{-\lambda t} \mathpzc{f}_{y,z}(dt) \, = \, \frac{\theta(y)}{\theta(y)+f(\lambda, y,z)}.
\label{3200}
\end{align}
Therefore, for $\lambda >0$,
\begin{align}
\int_0^\infty e^{-\lambda t}\mathpzc{f}_{y,z} (dt) =\mathds{E}^x \l e^{-\lambda \sigma^{(X_n, X_{n+1})} \l \mathcal{J}_n \r} \mid X_n = y, X_{n+1}=z \r
\end{align}
and therefore the proof is complete.
\end{proof}
\subsection{Complete monotonicity of the survival function}
From the discussion above we know that a stepped semi-Markov process can be viewed as the random time-change (independent or dependent) of a Markov process. For example in \cite{jacod} or \cite{kurtz} it is also discussed the existence of a time-change relationship between Markov and semi-Markov processes. We provide here a condition which guarantees the existence of a time-change relationship between $M$ and $X$ in the form described in Theorem \ref{tetimech}. This condition also identify the process $\sigma^\Pi(t)$ when no information are given. Hence the following result is also a tool to identify the \br{random time process} related to a semi-Markov process.
\begin{te}
\label{tecm}
Let $K_{x,y}(\cdot)$, $(x,y) \in \mathcal{S} \times \mathcal{S}$, be a family of measures on $[0, \infty)$ which satisfies $K_{x,y}[0, \infty) =1$ and $\int_0^\infty s K_{x,y}(ds) = \infty$. The following assertions are equivalent.
\begin{enumerate}
\item For any $(x,y) \in \mathcal{S} \times \mathcal{S}$ it is true that the functions $t \mapsto \overline{F}_{x,y}(t)$ are completely monotone functions with respect to the measures $K_{x,y}(\cdot)$.
\item There exists a process $L^\Pi (t)$ such that $X(t)$ and $M \l L^\Pi (t) \r$ are the same process defined by setting in \eqref{lapl}
\begin{align}
\Pi(\lambda, t) \, = \, \sum_{k=0}^{n(t)-1}(\tau_{k+1}-\tau_k) f(\lambda, x_k, x_{k+1})+(t-\tau_{n(t)}) f(\lambda, x_{n(t)}, x_{n(t)+1}),
\label{reprf}
\end{align}
with
\begin{align}
f(\lambda, x,y) \, = \,\br{\theta(x)} \frac{\lambda \widetilde{\overline{F}}_{x,y}(\lambda)}{1-\lambda \widetilde{\overline{F}}_{x,y}(\lambda)} \, = \, \br{\theta(x)}\int_0^\infty \l 1-e^{-\lambda s} \r \nu(ds,x,y)
\label{320}
\end{align}
which are unbounded complete Bernstein functions for any $(x,y) \in \mathcal{S} \times \mathcal{S}$.
\end{enumerate}
\end{te}
\begin{proof}
\begin{enumerate}
\item[ 1) $\to$ 2)] By using Theorem \ref{tetimech} it is sufficient to prove that any completely monotone function which is also the survival function of a non-negative r.v.'s (with a Lebesgue density diverging at zero) can be considered as the moment generating function of an inverse subordinator $L^f$ for some $f$. In other words we prove that if $t \mapsto \overline{F}_{x,y}(t)$ are completely monotone functions for any $(x,y)$, then it must be true that
\begin{align}
\overline{F}_{x,y}(t) \, = \, \mathds{E}^ze^{-\theta(x) L^{(x,y)}(t)}
\end{align}
for some inverse $L^{(x,y)}(t)$ and hence by Theorem \ref{tetimech} we have $X(t) = M (L^\Pi(t))$ where $\Pi$ is defined in \eqref{lapl}.
After this we prove that the representation in \eqref{reprf} is true.
Observe that the dependence of $f$ on $(x,y)$ is unnecessary in this proof: it is indeed sufficient to prove that every completely monotone function can be written as the moment generating function of an inverse subordinator $L^f$ for some Bernstein function $f$. Then letting $(x,y)$ vary is the same thing as considering a different function $f$. Hence in what follows we omit the dependence of $\overline F_{x,y}$ from $(x,y)$. \br{In the same spirit} we also fix the parameter $\theta(x) = \theta$.
Since $t \mapsto \overline{F}(t)$ is completely monotone then
\begin{align}
\overline{F}(t) = \int_0^\infty e^{-ts} K(ds)
\label{cmg}
\end{align}
for some measure $K(\cdot)$ with $K(0, \infty) =1$. Hence
\begin{align}
\widetilde{\overline{F}}(\lambda) = \int_0^\infty \frac{1}{s+\lambda} K(ds)
\label{gsti}
\end{align}
which is a Stieltjes function if
\begin{align}
\int_0^\infty \frac{1}{s+1} K(ds) < \infty.
\label{convst}
\end{align}
But \eqref{convst} is true since $K[0, \infty) = 1$.
Hence by Lemma \ref{te22} we know that there exists $f(\lambda)$ complete Bernstein function such that
\begin{align}
\widetilde{\overline{F}}(\lambda) = \frac{f(\lambda)}{\lambda} \frac{1}{\br{\theta}+f(\lambda)}.
\label{423}
\end{align}
Use \cite[Corollary 3.5]{meertri} to say that
\begin{align}
\int_0^\infty e^{-\lambda t} \mathds{E}^ze^{-\br{\theta}L^f(t)} dt \, = \, \frac{f(\lambda)}{\lambda} \frac{1}{\br{\theta}+f(\lambda)}
\end{align}
and hence since $t \mapsto \mathds{E}e^{-\br{\theta} L^f(t)}$ is completely monotone by Lemma \ref{tecommon} and obviously continuous we have that $\overline{F}(t) = \mathds{E}^xe^{-\br{\theta}L^f(t)}$ for any $t \geq 0$.
Now we prove that the representation for $f$ is true.
Rearrange \eqref{423} and use Lemma \ref{te22} to say that
\begin{align}
f(\lambda) \, = \,\br{\theta} \frac{\lambda \widetilde{\overline{F}}(\lambda)}{1-\lambda \widetilde{\overline{F}}(\lambda)}
\label{431}
\end{align}
is a complete Bernstein function.
Hence we have
\begin{align}
f(\lambda) = \br{\theta}\l a+b\lambda + \int_0^\infty \l 1-e^{-\lambda s} \r \nu(ds)\r,
\end{align}
where \br{the L\'evy measure $\nu(ds)$ has a completely monotone density $s \mapsto \nu(s)$ since $f$ is complete. Now we prove that $a=0$, $b=0$ and $\nu(0, \infty) = \infty$. By using \eqref{gsti} it is easy to verify that $\lim_{\lambda \to \infty}f(\lambda) = \infty$: indeed note that}
\begin{align}
\lim_{\lambda \to \infty} \lambda \widetilde{\overline{F}}(\lambda)\, = \, & \lim_{\lambda \to \infty} \int_0^\infty \frac{\lambda}{\lambda +s} K(ds) \notag \\
= \, & \lim_{\lambda \to \infty} \int_0^\infty \l 1-\frac{s}{\lambda + s}\r K(ds)
\label{335}
\end{align}
and since $\lambda \mapsto \l 1-s (\lambda +s)^{-1} \r$ is \br{bounded} and monotone we can move the limit inside the integral to say that $\lim_{\lambda \to \infty} \lambda \widetilde{\overline{F}}(\lambda) = 1$. We can conclude now that $\lim_{\lambda \to \infty } f(\lambda) = \infty$. Therefore it must be true, by \cite[Corollary 3.7, Item (v)]{librobern}, that $\nu(0, \infty) = \infty$ or $b>0$ (or both).
Now we compute $b$. Note that by \cite[p. 16, item (iv)]{librobern} we know that
\begin{align}
b \, = \, \lim_{\lambda \to \infty} \lambda^{-1} f(\lambda)
\end{align}
and by using \eqref{431} we have that
\begin{align}
b \, = \, & \lim_{\lambda \to \infty} \frac{\widetilde{\overline{F}}(\lambda)}{1-\lambda \widetilde{\overline{F}}(\lambda)} \notag \\
= \, & \lim_{\lambda \to \infty} \frac{\int_0^\infty (s+\lambda)^{-1} K(ds)}{1-\lambda \int_0^\infty \l s+\lambda\r^{-1} K(ds) }.
\label{limitforb}
\end{align}
However since $K(0, \infty) =1$ we note that the denominator is given by
\begin{align}
1-\lambda \widetilde{\overline{F}}(\lambda) \, = \, & 1- \lambda \int_0^\infty \l s+\lambda\r^{-1} K(ds) \notag \\
= \, & \int_0^\infty \l 1- \frac{\lambda}{s+\lambda} \r K(ds) \notag \\
= \, & \int_0^\infty \frac{1}{s+\lambda} sK(ds).
\label{dens}
\end{align}
Let $k(ds): = sK(ds)$, the limit \eqref{limitforb} is
\begin{align}
b \, = \, &\lim_{\lambda \to \infty} \frac{\int_0^\infty (\lambda + s)^{-1} s^{-1}k(ds) }{\int_0^\infty (\lambda + s)^{-1} k(ds)} \notag \\
= \, & \lim_{\lambda \to \infty} \frac{\int_0^\infty \lambda^{-1} \l 1 +\lambda^{-1} s \r^{-1}s^{-1}k(ds)}{ \int_0^\infty \lambda^{-1} \l 1+\lambda^{-1} s \r^{-1} k(ds)} \notag \\
= \, & \lim_{\lambda \to \infty} \frac{\int_0^\infty \l 1 +\lambda^{-1} s \r^{-1}s^{-1}k(ds)}{ \int_0^\infty \l 1+\lambda^{-1} s \r^{-1} k(ds)}.
\label{227}
\end{align}
The function $\lambda \mapsto \l 1 +\lambda^{-1} s \r^{-1}$ is, for any fixed $s>0$, \br{bounded} and monotone. Hence in \eqref{227} we can move the limit inside the integral to get
\begin{align}
b \, = \, \frac{\int_0^\infty s^{-1}k(ds)}{ \int_0^\infty k(ds)} \, = \, \frac{1}{k(0, \infty)}.
\label{b1suk}
\end{align}
Hence we have by the assumptions that $b=0$ since $k(0, \infty) = \int_0^\infty sK(ds) = \infty$.
The fact that $a=0$ is true since $a = f(0)$ and we can use again the representation \eqref{335} to verify. This complete the proof.
\item[2) $\to$ 1)] Now we prove the converse statement. If $X(t)$ and $M \l L^\Pi(t) \r$ are the same process then it must be true by Theorem \ref{tetimech} that $\overline{F}_{x,y}(t) = \mathds{E}^ze^{-\theta(x) L^{(x,y)}(t)}$ for any $(x,y)$ and that $L^{(x,y)}$ are inverses subordinators with Laplace exponents $f(\lambda, x,y)$ given in \eqref{320}. Further since $f(\lambda, x, y)$ are complete Bernstein functions then we have by Lemma \ref{tecommon} that $t \mapsto \overline{F}_{x,y}(t)$ is completely monotone with respect to a measure $K$. The fact that $K[0, \infty)=1$ is obvious since $L^{(x,y)} = 0$ a.s. The fact that $\int_0^\infty sK(ds) = \infty$ can be verified by repeating the computation leading to \eqref{b1suk}.
\end{enumerate}
\end{proof}
\begin{os} \normalfont
Note that the condition assumed in Theorem \ref{tecm}
\begin{align}
\int_0^\infty sK_{x,y}(ds) = \infty
\end{align}
together with the complete monotonicity of $t \mapsto \overline{F}_{x,y}(t)$ \br{implies that the the corresponding r.v.'s have densities $\mathpzc{f}_{x,y}(\cdot)$ which are singular at zero.}
\end{os}
\begin{os} \normalfont
Suppose that the measures $K_{x,y} (\cdot)$ have the form the so-called Lamperti distribution \cite{lamperti}, for $\alpha(x,y) \in (0,1)$,
\begin{align}
K_{x,y}(ds) \, = \, \frac{\sin \pi \alpha(x,y)}{\pi} \frac{s^{\alpha(x,y) -1}}{s^{2\alpha(x,y)}+2y^{\alpha(x,y)} \cos \pi \alpha(x,y) +1}ds.
\end{align}
We have \cite{gross}
\begin{align}
\int_0^\infty e^{-s t} \frac{\sin \pi \alpha(x,y)}{\pi} \frac{s^{\alpha(x,y) -1}}{s^{2\alpha(x,y)}+2y^\alpha(x,y) \cos \pi \alpha(x,y) +1}ds \, = \, E_{\alpha(x,y)}\l - t^{\alpha(x,y)} \r
\end{align}
where
\begin{align}
E_{\alpha(x,y)}\l x \r \, : = \, \sum_{k=0}^\infty \frac{x^k}{\Gamma (\alpha(x,y) k+1)}
\end{align}
is the Mittag-Leffler function. Then since (e.g. \cite[eq. (3.4)]{meerbounded})
\begin{align}
\int_0^\infty e^{-\lambda t} E_{\alpha(x,y)}\l - t^{\alpha(x,y)} \r dt \, = \, \frac{\lambda^{\alpha(x,y)-1}}{1+\lambda^{\alpha(x,y)}}
\end{align}
the representation \eqref{reprf} yields
\begin{align}
\Pi (\lambda, t) \, = \, \sum_{k=0}^{n^J(t)-1} \l \tau_{k+1}-\tau_k \r \lambda^{\alpha(x_k, x_{k+1})} + (t-\tau_{n^J(t)}) \lambda^{\alpha(x_{n^J(t)}, x_{n^J(t)+1})}.
\end{align}
Hence the \br{associated subordinators representing the increments of $\sigma^\Pi$ are $\alpha(x,y)$-stable subordinators conditionally on $X_0=x, \cdots, X_{n+1}=x_{n+1}, \mathcal{T}_1=\tau_1, \cdots, \mathcal{T}_n=\tau_n$}.
\end{os}
\section{The Kolmogorov's equations}
When a semi-Markov process $X$ is given by the time change of a Markov process $M$ by means of an independent inverse subordinator the corresponding Kolmogorov's backward equation has been written down in some different ways. In particular, \cite{meertri} showed that the scale limit of a (suitably defined) continuous-time random walk is of the form $M \l L^f(t) \r$, where $M$ is a L\'evy process generated by $A$, then its pdf $p(x, t)$ solves a governing equation
\begin{align}
\mathbb{C}_f \l \partial_t \r p(x, t) \, = \, Ap(x, t)
\end{align}
where
\begin{align}
\mathbb{C}_f \l \partial_t \r u(t) \, : = \, \mathcal{L}^{-1} \left[ f(\lambda) \widetilde u(\lambda) - \lambda^{-1}f(\lambda) u(0) \right] (t).
\label{meerinv}
\end{align}
In \cite{toaldopota} the author introduced the operator
\begin{align}
\mathcal{D}^f_t q(t) : = \frac{d}{dt} \int_0^t q(s) \, \bar{\nu}(t-s) ds
\end{align}
where
\begin{align}
\bar{\nu}(t) : = a+ \nu(t, \infty)
\end{align}
which can be regularized by subctracting an initial condition (see also \cite[Remark 4.8]{meertri}) as
\begin{align}
\mathfrak{D}_t^f q(t)\, := \, & \mathcal{D}^f_tq(t)- \bar{\nu}(t) q(0).
\label{caputotype}
\end{align}
The operators \eqref{caputotype} and \eqref{meerinv} turn out to be the same operator at least for exponentially bounded continuously differentiable functions (see the discussion following \cite[formula (2.18)]{meertoa}).
Suppose $P_t$ is a semigroup of linear operators on a Banach space $\mathfrak{B}$ corresponding to a Markov process $M$, i.e.,
\begin{align}
\l P_tu \r (x) \, := \, \mathds{E}^xu(X(t))
\end{align}
and then define
\begin{align}
(\Pi_t u)(x)\, = \, \mathds{E}^x u \l M \l L^f(t) \r \r
\label{timenaif}
\end{align}
where $L^f$ is an independent inverse subordinator.
Using Bochner integrals \eqref{timenaif} is equivalent to
\begin{align}
\Pi_tu \, = \, \int_0^\infty P_su \; l(ds,t)
\end{align}
where $l(ds, t) := P^x \l L^f(t) \in ds \r$. In this case we can use \cite[Theorem 5.1]{toaldopota} to say that $t \mapsto P_tu$ solves
\begin{align}
\mathfrak{D}_t^f q(t) \, = \, Aq(t) \qquad q(0) = u \in \text{Dom}(A).
\label{eqold}
\end{align}
When $f^\star(\lambda):= \lambda /f(\lambda)$ is again a Bernstein function it is proved in \cite{meertoa} (under the additional assumption that $A$ is self-adjoint on an Hilbert space) that equation \eqref{eqold} can be rewritten, for $t>0$, in the evolutionary form
\begin{align}
\frac{d}{dt}q(t) \, = \, \mathcal{D}_t^{f^\star} Aq(t), \qquad q(0) = u,
\label{k2}
\end{align}
where $\mathcal{D}_t^{f^\star}$ is related to $f^\star$ in the same way $\mathcal{D}^f$ is related to $f$. Hence when $M(L^f(t))$ is a semi-Markov process then \eqref{eqold} and \eqref{k2} play the same role of the classical backward Kolmogorov's equation of Markov processes. In \cite{magda} an evolutionary form analogous to \eqref{k2} is proposed. In what follows we provide two integro-differential forms of the Kolmogorov's backward equation for the general class of semi-Markov processes considered above.
\subsection{The backward equation}
Let $C_b (\mathcal{S})$ be the space of bounded functions on $\mathcal{S}$ equipped with the sup-norm $\left\| \cdot \right\|_\infty$ and note that these functions are also continuous since $\mathcal{S}$ is equipped with the discrete topology. We define the Markov semigroup $P_t$ acting on functions $u \in C_b (\mathcal{S})$ associated with the Markov process $M(t)$ as
\begin{align}
(P_tu)(x) : = \mathds{E}^x u(M(t)).
\end{align}
The generator of $P_t$ is the operator $G$ given by
\begin{align}
(G u)(\cdot) \, := \, \theta(\cdot)\int_{\mathcal{S}} \l u(y)-u(\cdot) \r h(\cdot,dy)
\label{defG}
\end{align}
which is a bounded operator with respect to $\left\| \cdot \right\|_\infty$ and hence $P_t$ is strongly continuous on $C_b \l \mathcal{S} \r$ (e.g. \cite[Theorem 1.2, p. 2]{pazy}).
Hence the mapping $t \mapsto q(t) : = P_tu$ is $C^1([0, \infty), C_b(\mathcal{S}))$ and is the unique classical solution to the Cauchy problem (e.g. \cite[Proposition 6.2]{engelnagel})
\begin{align}
\frac{d}{dt}q(t) = Gq(t), \qquad q(0) = u,
\label{mbackcon}
\end{align}
for any $u \in C_b (\mathcal{S})$. In the forthcoming theorems we propose two equivalent very general forms for the Kolmogorov's equations of stepped semi-Markov processes. Hence define the operator $\Pi_t^s$, on the space $C_b(\mathcal{S})$ of continuous and bounded functions on $\mathcal{S}$, as
\begin{align}
(\Pi_t^s u) (x):= \mathds{E}^x \left[ u(X(t)) \mid \gamma^X(0) = s \right] \, = \, \mathds{E}^y \left[ u(X(t+\tau)) \mid X(\tau) =x , \gamma^X(\tau) =s \right]
\end{align}
for any $y \in \mathcal{S}$ and $t, \tau \geq 0$, $s\leq t$. Therefore, in order to study integro-differential equations we need some properties of the operator $\Pi_t^s$ and of the mapping $t \mapsto \Pi_t^su$. In particular we will write the Kolmogorov's equation of the process in two integro-differential forms, and hence we will then consider the case $s=0$, i.e., the mapping
\begin{align}
t \mapsto q(t) := \Pi_t u := \Pi_t^0u.
\end{align}
We have the following two results.
\begin{prop}
Let $u \in C_b\l \mathcal{S}\r$. Then $\Pi_t^su \in C_b \l \mathcal{S} \r$ for any $t \geq 0$.
\end{prop}
\begin{proof}
Consider the semigroup of operators associated with the strict Markov process $\l X(t), \gamma^X(t) \r$, i.e., the operators
\begin{align}
(T_t h)(x,s) : = \mathds{E}^{x} \left[ h(X(t), \gamma^X(t)) \mid \gamma^X (0) = s\right]
\label{semcoppia}
\end{align}
on the space $C_b(\mathcal{S} \times \mathbb{R}^+)$. Continuity of functions $h(x,s)$ on $\mathcal{S} \times \mathbb{R}^+$ simply means continuity in $s$ since $\mathcal{S}$ is equipped with the discrete topology. By \cite[Theorem 1, p. 239]{gihman} we know that $T_t$ maps $C_b \l \mathcal{S} \times \mathbb{R}^+ \r$ into itself. Now set $h(x,y) = u(x)$ for any $y \geq 0$ and note that
\begin{align}
(T_t h)(x,s) \, = \,& \int_{\mathcal{S}\times \mathbb{R}^+} u(y) P^{x} \l X(t) \in dy, \gamma^X (t) \in dw \mid \gamma^X (0) =s \r \notag \\
= \, & \int_{\mathcal{S}} u(y) P^{x} \l X(t) \in dy \mid \gamma^X (0) =s \r \notag \\
= \, &(\Pi_t^s u)(x).
\label{4166}
\end{align}
Since $T_th \in C_b \l \mathcal{S} \times \mathbb{R}^+ \r$ we have by \eqref{4166} that $\Pi_t^s u \in C_b \l \mathcal{S} \r $.
\end{proof}
\begin{prop}
\label{tediff}
Let $u \in C_b(\mathcal{S})$.
If $u$ is such that
\begin{align}
\lim_{t \to 0+} \frac{1}{t}\l \Pi_t^s u-u \r
\end{align}
exists as strong limit in $C_b \l \mathcal{S} \times [0, \infty) \r$ and if $t \mapsto F_{x,y}(t)$ is continuous for any $(x,y) \in \mathcal{S} \times \mathcal{S}$, then the mapping $t \mapsto \Pi_t^su$ is $C^1 \l [0, \infty), C_b (\mathcal{S}) \r$.
\end{prop}
\begin{proof}
Consider again the semigroup of operators associated with the strict Markov process $\l X(t), \gamma^X(t) \r$, i.e., the operators defined in \eqref{semcoppia}, on the space $C_b(\mathcal{S} \times \mathbb{R}^+)$. Recall again that continuity of functions $h(x,s)$ on $\mathcal{S} \times \mathbb{R}^+$ simply means continuity in $s$ since $\mathcal{S}$ is equipped with the discrete topology. The family $\l T_t \r_{t \geq 0}$ forms a (strongly continuous) $C_b(\mathcal{S}\times \mathbb{R}^+)$-Feller semigroup (see \cite[formula (33) p. 238 and Theorem 1 p. 239]{gihman}). Denote by $G^T$ the generator of $T_t$. Now set $h(x,y) = u(x)$ for any $y \in \mathbb{R}^+$. We have by \eqref{4166}
\begin{align}
(T_t h)(x,s) \, = \, (\Pi_t^su )(x).
\end{align}
Since $T_t$ is a strongly continuous Feller semigroup we know that the mapping $t \mapsto T_th$ is $C^1$ for any $h \in C_b(\mathcal{S} \times \mathbb{R}^+)$ in the domain of $G^T$ and hence it is sufficient to prove that $h(x,s):=u(x)$ is $C_b (\mathcal{S} \times \mathbb{R}^+)$ and is in the domain of $G^T$. The fact that $h(x,s) = u(x) \in C_b(\mathcal{S} \times \mathbb{R}^+)$ is easy to see since we assume that $u(x) \in C_b(\mathcal{S})$. Now we prove that $h(x,s) = u(x) \in \text{Dom}(G^T)$. This can be shown by considering, as $t \to 0$,
\begin{align}
\left\| \frac{1}{t} \l T_th-h \r \right\| \, = \, \left\| \frac{1}{t} \l \Pi_t^s u-u\r \right\|
\label{limit}
\end{align}
and the limit on the right-hand side of \eqref{limit} exists by the hypotheses for any $s$.
\end{proof}
The following Theorem provides the first version of the Kolmogorov's equation for semi-Markov processes in the form of an integro-differential Volterra equation. We remark that from now on it will be always true that the c.d.f. $F_{x,y}$ is independent on $y$.
\begin{te}
\label{teclassiceq}
Let $u$ be as in Proposition \ref{tediff}. Assume that for each $x \in \mathcal{S}$ it is true that $\overline{F}_x(t) = \mathds{E}^y e^{-\theta(x)L^x(t)} $ for some $f(\lambda, x)$, with $\nu((0, \infty),x) = \infty$ and $s \mapsto \bar{\nu}(s,x)$ continuous for any $x$.
The mapping $t \mapsto q(t):=\Pi_tu$ satisfies the initial value problem
\begin{align}
\mathfrak{D}_t^{\bm{\cdot}} q(t) \, = \, G q(t), \qquad q(0) = u,
\label{classiceqgen}
\end{align}
where
\begin{align}
\l \mathfrak{D}_t q(t) \r \, (\cdot) \, : = \, \frac{d}{dt} \int_0^t q(s, \cdot) \, \bar{\nu}(t-s, \cdot) \, ds \,- \, \bar{\nu}(t, \cdot)q(0, \cdot)
\end{align}
with $q(s, \cdot):= (\Pi_s u)(\cdot)$ and $G$ is defined in \eqref{defG}.
\end{te}
\begin{proof}
Under our assumptions on $L^x (t)$ (section \ref{sectimech}) we have that the functions $t \mapsto \mathds{E}e^{-\theta(x)L^{x}(t)}$ are continuous functions (see the proof of \cite[Theorem 3.1]{meertri}). Hence the conditions of Proposition \ref{tediff} are satisfied and $t \mapsto q(t)$ is continuously differentiable. This fact together with the continuity of $s \mapsto \bar{\nu}(s)$ ensures that the operator $t \mapsto \mathfrak{D}_t^{\bm{\cdot}}q(t)$ is well defined.
By \cite[eq. (1.52), p. 20]{koro} we know that the mapping $q(t)$ satisfies
\begin{align}
(\Pi_t u)(x) = \overline{F}_x (t) u(x) + \int_0^t \int_{\mathcal{S}} (\Pi_{t-s}u )(y) \, h(x,dy) \, \mathpzc{f}_x(s)ds.
\label{koroswi}
\end{align}
Let $(\mathpzc{R}_\lambda u)(x) := \int_0^\infty e^{-\lambda t} (\Pi_t u)(x) dt$. By \eqref{319}, \eqref{3200} and the convolution theorem for Laplace transform we can take Laplace transform in \eqref{koroswi} to write
\begin{align}
(\mathpzc{R}_\lambda u)(x) = \frac{f(\lambda,x)}{\lambda} \frac{1}{\theta(x)+f(\lambda, x)}u(x) \, + \, \frac{\theta(x)}{\theta(x)+f(\lambda, x)} \int_\mathcal{S} (\mathpzc{R}_\lambda u) (y) h(x,dy).
\label{laplrlambda}
\end{align}
By rearranging \eqref{laplrlambda} we get
\begin{align}
f(\lambda, x) (\mathpzc{R}_\lambda u) (x) - \lambda^{-1}f(\lambda, x) u(x) \, = \, G (\mathpzc{R}_\lambda u)(x).
\label{rearranged}
\end{align}
It is easy to check that \eqref{rearranged} is the Laplace transform of \eqref{classiceqgen} by using again the convolution theorem and the fact that
\begin{align}
\int_0^\infty e^{-\lambda t} \bar{\nu}(t,x) \, dt \, = \, \lambda^{-1} f(\lambda, x).
\end{align}
\end{proof}
\begin{os} \normalfont
\label{remfrac}
We remark that the operator $\mathfrak{D}_t^{\bm{\cdot}}g(t)$ acting on functions $g:[0, \infty) \mapsto C_b(\mathcal{S})$ can be interpreted as a generalized ``variable-order" fractional derivative. Suppose that for any $x$
\begin{align}
\overline{F}_x(t) \, = \, E_{\alpha(x)}(-\theta(x) t^{\alpha(x)}),
\label{mittsf}
\end{align}
where
\begin{align}
E_\alpha(z) : = \sum_{k=0}^\infty \frac{z^k}{\Gamma(\alpha k+1)}
\end{align}
is the Mittag-Leffler function. We obtain from \eqref{mittsf} the Laplace transform
\begin{align}
\widetilde{\overline{F}}_x(\lambda) \, = \, \lambda^{\alpha(x)-1} \frac{1}{\theta(x)+\lambda^{\alpha(x)}}.
\end{align}
Furthermore since \eqref{mittsf} is completely monotone and with first derivative diverging at zero (e.g. \cite[Section 3.1]{mainardimittag}) we can apply Theorem \ref{tecm} to say that $\overline{F}_x(t) = \mathds{E}^ze^{-\theta(x)L^{(x)}(t)}$ where $L^{(x)}(t)$ are a family of inverses of $\alpha(x)$-stable subordinators, i.e., the representation \eqref{reprf} yields to
\begin{align}
f(\lambda,x) \, = \, \lambda^{\alpha(x)}.
\end{align}
This implies $\bar{\nu}(s, x) = s^{-\alpha(x)}/\Gamma(1-\alpha(x))$ and hence
\begin{align}
\l \mathfrak{D}_t^{\bm{\cdot}} g(t) \r (\cdot) \, = \,& \frac{1}{\Gamma (1-\alpha (\cdot))} \frac{d}{dt} \int_0^t g(s,\cdot) \, (t-s)^{-\alpha(\cdot)}ds - \frac{t^{-\alpha(\cdot)}}{\Gamma(1-\alpha(\cdot))}u(\cdot)\label{fracvarord} \\
=: \, & \l \frac{d^{\alpha(\cdot)}}{dt^{\alpha(\cdot)}}g(t) \r (\cdot)\notag.
\end{align}
In formula \eqref{fracvarord} we recognize the fractional derivative in the sense of Caputo-Dzerbayshan (e.g. \cite[eq. (6.1.39)]{kilbas}).
\end{os}
We show now that also eq \eqref{k2} can be extended to our framework. Since we do not assume in this paper that our subordinators are special (as it was assumed in \cite{meertoa}) we write here an equation on the same line of \eqref{k2} valid for general subordinators.
We consider here the operator $\mathcal{D}_t^\star g(t)$, acting on functions $g:[0, \infty) \mapsto C_b(\mathcal{S})$, given by
\begin{align}
\l \mathcal{D}_t^\star g(t) \r (\cdot) \, : = \, \frac{d}{dt} \int_0^t g(\cdot,s) \, u^f(t-s,\cdot) \, ds
\label{cappotmeas}
\end{align}
where $u^f(s,\cdot)$ is the potential density of a subordinator with Laplace exponent $f(\lambda, \cdot)$, $\cdot \in \mathcal{S}$. Of course it is not necessarily true that such a density exists. For example if $s \mapsto \bar{\nu}(s,\cdot)$ is absolutely continuous and $\nu((0, \infty), \cdot) = \infty$ then the density exists since the distribution of $\sigma^{(\cdot)} (t)$ has a density (by \cite[Theorem 27.7]{satolevy}) which we denote by $s \mapsto \mu(s, t)$ and hence we have
\begin{align}
s \mapsto u^f(s,\cdot)ds \, = \, \mathds{E}^x \int_0^\infty \mathds{1}_{\left[ \sigma^{(\cdot)}(t) \in ds \right]} dt \, = \, ds\int_0^\infty \mu (s, t) dt.
\end{align}
Using \eqref{cappotmeas} we have the following evolutionary form for the equation \eqref{classiceqgen}.
\begin{te}
\label{teeqevol}
Let $u$ be as in Proposition \ref{tediff}. Assume that for any $x \in \mathcal{S}$ it is true that $\overline{F}_{x,y}(t) =\mathds{E}^{y}e^{-\theta(x)L^{(x)}(t)}$ for some $f(\lambda, x)$ with $\nu((0, \infty),x) = \infty$ and assume that there exists a potential density $s \mapsto u^f(s,x)$ continuous.
Then $t \mapsto q(t)$ satisfies the evolutionary Cauchy problem
\begin{align}
\frac{d}{dt} q(t) \, = \,\mathcal{D}_t^\star \;G q(t), \qquad q(0) = u.
\label{evoleq}
\end{align}
\end{te}
\begin{proof}
Of course the operator $\mathcal{D}_t^\star q(t)$ is well defined since $t \mapsto q(t)$ is continuously differentiable and $s \mapsto u^f(s, \cdot)$ is continuous. Hence to prove this theorem it is sufficient to rearrange \eqref{rearranged} multiplying both sides by $\lambda /f(\lambda, x)$ to write
\begin{align}
\lambda (\mathpzc{R}_\lambda u)(x) - u (x) \, = \, \frac{\lambda}{f(\lambda, x)} G (\mathpzc{R}_\lambda u) (x).
\label{427}
\end{align}
By the convolution theorem for the Laplace transform and since the Laplace transform of the potential density $u^f(s, \cdot)$ is
\begin{align}
\int_0^\infty e^{-\lambda s} u^f(s, \cdot) \, ds \, = \, \frac{1}{f(\lambda, \cdot)},
\end{align}
it is easy to see that \eqref{427} is the Laplace transform of \eqref{evoleq}.
\end{proof}
\begin{os} \normalfont
Suppose we are in the situation of Remark \ref{remfrac}. Then the potential densities corresponding to the $\alpha(x)$-stable subordinators are
\begin{align}
u^f(s, \cdot) \, = \, \frac{s^{\alpha(\cdot)-1}}{\Gamma(\alpha(\cdot))}.
\end{align}
Hence the operator $\mathcal{D}^\star $ becomes
\begin{align}
\l \mathcal{D}_t^\star q(t) \r (\cdot) \, = \, \frac{d^{1-\alpha(\cdot)}}{dt^{1-\alpha(\cdot)}}q(t, \cdot)
\end{align}
but the fractional derivative must be meant now in the Riemann-Liouville sense (e.g. \cite[p. 69]{kilbas}).
\end{os}
\begin{ex}[Time-changed Poisson process]
\label{expoieq}
Suppose that the r.v.'s $J_i$ have distribution $P^x (J_i > t \mid X_n = z, X_{n+1} = y) = e^{-\theta t}$ and that $X_n$ is a discrete Markov chain in $\mathbb{R}^d$ with transition probabilities $h(x,dy) = \delta_{x+l}(dy)$ where $\delta_z(\cdot)$ is the Dirac point mass at $z \in \mathbb{R}^d$. Then define
\begin{align}
N^l(t) \, = \, X_n, \qquad \sum_{i=0}^n J_i \leq t < \sum_{i=0}^{n+1} J_i.
\end{align}
Hence $N^l(t)$ is a Poisson process in $\mathbb{R}^d$ with jump height $l \in \mathbb{R}^d$.
Now let $\mathpzc{J}_i$ be independent r.v.'s with distribution
\begin{align}
P^x \l \mathpzc{J}_n > t \mid X_n=z, X_{n+1} = y \r \, = \, \overline{F}_z(t)
\end{align}
and define
\begin{align}
\mathpzc{N}^l(t) \, = \, X_n, \qquad \sum_{i=0}^n \mathpzc{J}_i \leq t < \sum_{i=0}^{n+1} \mathpzc{J}_i.
\end{align}
Hence $\mathpzc{N}^l (t)$ is a semi-Markov process in the sense of Gihman and Skorohod defined as in Section \ref{defsemi}.
Now assume that $t \mapsto \overline{F}_y(t)$ is completely monotone. Hence by Theorem \ref{tecm} we know that there exists a process $L^\Pi$ defined as in section \ref{sectimech} by setting for any $x \in \mathbb{R}^d$,
\begin{align}
f(\lambda, x) = \frac{\lambda\widetilde{\overline{F}}_x (\lambda)}{1-\lambda \widetilde {\overline{F}}_x(\lambda)},
\label{easyto}
\end{align}
such that $\mathpzc{N}^l(t)$ and $N^l \l L^\Pi (t) \r $ are the same process. Furthermore since we have by Theorem \ref{tecm} that $\lambda \mapsto f(\lambda, x)$ is, for any $x \in \mathbb{R}^d$, a complete Bernstein function then we know that the L\'evy measures $\nu(\cdot,x)$ have a completely monotone density and hence also the tails $s \mapsto \bar{\nu}_x(s)$ are completely monotone. Furthermore also the potential densities $s \mapsto u^f(s, x)$ exist and are completely monotone \cite[Remark 10.6]{librobern}, for any $x \in \mathbb{R}^d$. Hence all the continuity properties are satisfied and we can apply Theorems \ref{teclassiceq} and \ref{teeqevol} to say that the mapping $t \mapsto q(t,x): = \mathds{E}^xu(\mathpzc{N}^l(t))$, for $u \in C_b \l \mathbb{R}^d \r$ satisfying the condition of Proposition \ref{tediff}, solves
\begin{align}
\frac{d}{dt} \int_0^t q(s,x) \, \bar{\nu}(t-s,x)ds-\bar{\nu}(t,x)q(0,x) \, = \, \frac{\theta}{|l|} \l q(t,x+l)-q(t,x)\r,
\end{align}
under $q(0,x) = u(x)$, as well as
\begin{align}
\frac{d}{dt} q(t,x) \, = \,\frac{\theta}{|l|} \frac{d}{dt} \int_0^t \l q(s,x+l)-q(s,x) \r \, u^f(t-s,x) \, ds,
\end{align}
under $q(0,x) = u(x)$.
See also Garra et al. \cite{garra} for a similar equation related to a state-dependent Poisson model.
\end{ex}
\section{Geneneral semi-Markov processes as limit of stepped semi-Markov processes}
In the present section we study weak limits of semi-Markov processes defined above. Denoting by $M^c$ a stepped Markov process depending on a parameter $c$, and by $X^c(t)=M^c (L(t))$ the semi-Markov process obtained by the time-change with a dependent random time $L^\Pi$, we study the limit as $c\to 0$ of the single time distribution of $X^c$. Formally, assuming that the generator $G^c$ of the semigroup $P_t^c$ associated with $M^c$ converges to a generator $G$ on a suitable function space, we are able to study the convergence of $\Pi_t^cu$ for $u \in C_b (\mathcal{S})$.
Then by assigning to the parameter $c$ different meanings we can reexamine some classical results on the convergence of Markov processes, in our semi-Markov framework.
It is a known fact that any L\'evy process is the limit as $c\to 0$ of a suitable sequence of compound Poisson processes, say $M^c$, where $c$ represents the lower bound for the jump sizes; an analogous result holds for L\'evy-type processes in the sense of \cite[p. 366]{kolokoltsov}, i.e., jump processes whose L\'evy measures not only depend on the jump size, but also on the current position. Moreover, we remind that some stepped Markov processes are continuous time random walks \br{in $\mathbb{R}^d$ jumping on a lattice of size $c$}. By letting $c\to 0$ (and simultaneously scaling the intensity of jumps) \br{one obtains processes whose paths are no more stepped functions}. Following this procedure, the Brownian motion is known to be obtained as a limit of symmetric random walks.
Hence we study here the semi-Markov analogue of these facts, where a sequence of semi-Markov processes $X^c$ satisfies the assumptions in Theorem \ref{tetimech} or \ref{tecm} and hence $M^c$ is time-changed by a dependent process $L^\Pi$.
An interesting case is the one leading to a diffusive limit. Very recently, indeed, fractional diffusion equations have been derived, exhibiting a variable order fractional derivative (see \cite{baestra, checgore, fedofalco}). These are models of anomalous diffusion in inhomogeneous media. In the heuristic discussion in \cite{checgore}, it is supposed that each lattice site has a trapping effect which leads to a sub-diffusive dynamics; in particular, the distribution of the sojourn time is here assumed to depend on the site itself, and this leads to a state dependent anomalous exponent. The study of this matter seems to be still at an early stage. Our results specialized to this situation provide a probabilistic derivation of the variable order diffusion equation.
The results below also extends the analysis given in \cite{kurtz}, where asymptotic results on semi-Markov processes where discussed by only considering state dependent holding times with finite expectation. Our processes $M^c$ can have holding times having infinite mean and this turns out to be the most interesting situation.
In particular section \ref{seclimit} is devoted to the case in which the waiting times $J_i$ have infinite mean when this is obtained by requiring that the Bernstein functions $f(\lambda, x)$ defining the exponent $\Pi$ are regularly varying at zero. By giving to $c$ the meaning of a scale parameter the result in Theorem \ref{telimit} below provides a further probabilistic derivation of the (variable order) $\alpha(x)$ diffusion equation (together with other related results). This is done by means of a local scaling of the lattice size, in which the scaling factor is given by the regularly varying function $f(c, x)$ depending on the current position $x$: passing to the limit, this gives rise to the fractional operator of state dependent order.
\subsection{Limit of stepped semi-Markov processes}
\label{seclimfacile}
Here is the first general statement. The following theorem assumes the convergence of the generator $G^c$ to a generator $G^0$ and study the convergence of the corresponsing semi-Markov process $X^c(t)$ constructed as in Section \ref{sectimech}.
\begin{te}
\label{telimfacile}
Let $M^c$ be a family of Markov processes associated with the semigroups $P_t^c$ on the space $C_b \l \mathcal{S} \r$ having generators $G^c$ which can be written in the form $(G^cu)(\cdot):=\theta^c(\cdot) \int_S \l u(y)-u(\cdot) \r h^c(\cdot,dy)$. Let $G^0$ be a (dissipative) generator of a semigroup on the Banach space $C_0(\mathcal{S})$, where $C_0(\mathcal{S})$ the space of continuous functions vanishing at infinity, and assume that $G^cu \to G^0u$ for $u \in \text{Dom}(G^0)$. Let $X^c(t)$ be a family of stepped semi-Markov processes defined as in Section \ref{defsemi} each one of which satisfies the assumptions of Theorem \ref{tetimech} with respect to the Markov process $M^c$ and hence denote
\begin{align}
\l \Pi_t^c u \r (x) : = \mathds{E}^x u(X^c(t)) \, = \, \mathds{E}^x u \l M^c \l L^\Pi (t) \r \r.
\end{align}
Let $\mathpzc{R}^c_\lambda u:= \int_0^\infty e^{-\lambda t} \Pi_t^cu \,dt$. Then we have that the limit
\begin{align}
\mathpzc{R}^0_\lambda u : = \lim_{c \to 0} \mathpzc{R}_\lambda^cu
\end{align}
exists for any $u \in C_0(\mathcal{S})$ and $ \frac{\lambda}{f(\lambda, \cdot)}\l f(\lambda, \cdot)I - G^0 \r \mathpzc{R}_\lambda^0= I$.
\end{te}
\begin{proof}
First observe that by rearranging \eqref{rearranged} we have for any $u \in C_b \l \mathcal{S} \r$ that
\begin{align}
\frac{\lambda}{f(\lambda, \cdot)} \l f(\lambda, \cdot) - G^c \r \mathpzc{R}_\lambda^cu \, = \, u.
\end{align}
We show that the elements $\mathpzc{R}_\lambda^{1/n}u$ for $n \in \mathbb{N}$ form a Cauchy sequence.
Let $h_n:= \frac{\lambda}{f(\lambda, \cdot)} \l f(\lambda, \cdot) I- G^{1/n} \r w$ then we have that
\begin{align}
h_n \to h= \frac{\lambda}{f(\lambda, \cdot)} \l f(\lambda, \cdot) - G^0 \r w.
\end{align}
Now note that
\begin{align}
\mathpzc{R}^{1/n}_\lambda h-\mathpzc{R}^{1/m}_\lambda h \, = \, \mathpzc{R}^{1/m}_\lambda (h_m-h) + \mathpzc{R}^{1/n}_\lambda (h-h_n) + \l \mathpzc{R}^{1/n}_\lambda h_n - \mathpzc{R}^{1/m}_\lambda h_m \r.
\label{decres}
\end{align}
Since for any $n \in \mathbb{N}$ and $\lambda >0$ we have that $|| \mathpzc{R}_\lambda^{1/n}|| \leq 1/\Re\lambda$, the first two terms go to zero as $m,n \to \infty$ while the last term in \eqref{decres} is clearly zero. Therefore the limit exists for functions
\begin{align}
u \in \frac{\lambda}{f(\lambda, \cdot)} \l f(\lambda, \cdot) -G^0 \r \text{Dom}(G^0) = \l \lambda - \frac{\lambda}{f(\lambda, \cdot)} G^0 \r \text{Dom}(G^0).
\end{align}
Since $G_0$ is a (dissipative) generator of a contraction semigroup we have (e.g. \cite[Thm 3.4.5]{abhn}) $\l \lambda -G^0 \r \text{Dom}(G^0) = C_0(\mathcal{S})$.
To conclude the proof observe that
\begin{align}
\frac{\lambda}{f(\lambda, \cdot)} \l f(\lambda, \cdot) - G^0 \r \mathpzc{R}_\lambda^0 w \, = \, & \lim_{n \to \infty} \frac{\lambda}{f(\lambda, \cdot)} \l f(\lambda, \cdot) - G^{1/n} \r \mathpzc{R}_\lambda^{1/n} w \, = \, w.
\end{align}
\end{proof}
\begin{os} \normalfont
We remark that the limit $\mathpzc{R}_\lambda^0u$ obtained in Theorem \ref{telimit} satisfies, for $u \in \text{Dom}(G^0)$
\begin{align}
f(\lambda, \cdot)\mathpzc{R}_\lambda^0u- \lambda^{-1}f(\lambda, \cdot) u = G^0 \mathpzc{R}^c_\lambda u.
\label{inve}
\end{align}
Hence if one further assumes that the family $q^c(t):=\l \Pi_t^cu \r_{c \geq 0}$ has a limit then Theorem \ref{telimit} implies the convergence of $q^c(t)$ to the solution of the variable order equation
\begin{align}
\mathcal{D}_t^{\bm{\cdot}}q^0(t) = G^0 q^0(t), \qquad q^0(0) = u \in \text{Dom}(G^0).
\end{align}
obtained by inverting ($\lambda \mapsto t$) the resolvent equation \eqref{inve}.
The equation \eqref{inve} plays the role of the Kolmogorov's equation for a limit process when such a process exists. This process indeed exists and still is a semi-Markov process, for example, when $X^c$ is a continuous time random walk with state space in $\mathbb{R}^d$ defined as in \cite[Section 2]{meerstra} and hence $c$ represents here a scale parameter. In this case the limit process is a semi-Markov process in the sense of Gihman and Skorohod (see Harlamov \cite[section 3.12, p. 76]{harlamov}), i.e., the limit process $X^0(t)$ is such that $\l X^0(t), \gamma^0(t) \r$ is a Markov process.
\end{os}
\subsubsection{Convergence to Brownian motion and L\'evy type processes} \label{convbm} Suppose that the embedded chain $X_n$ runs on $\mathbb{R}^d$ with transition probabilities given by
\begin{align}
h(x,dy) \, = \, \frac{1}{2d} \sum_{i=1}^d \l \delta_{x+e_i}(dy)+\delta_{x-e_i}(dy) \r.
\label{ciao}
\end{align}
Now if we set the transition probabilities of $X_n^c$ as in \eqref{ciao} with jump's height $c \geq 0$ we get
\begin{align}
h^c(x, dy) \, = \, \frac{1}{2d} \sum_{i=1}^d \l \delta_{x+ce_i}(dy)+\delta_{x-ce_i}(dy) \r.
\label{ciaoc}
\end{align}
Now suppose that the distribution of the waiting times $J_i$ is exponential with parameter $1/c^2$. By collecting all pieces together we obtain the corresponding Markov process $M^c(t)$ generated by
\begin{align}
\l G^cu \r (x) \, = \, & \frac{1}{2dc^2} \sum_{i=1}^d \l u(x+ce_i)+u(x-ce_i)-2u(x) \r.
\end{align}
By letting $c \to 0$ we have, for functions $C^2 \l \mathbb{R}^d \r$, that $G^cu \to \frac{1}{2} \Delta u$ which is the generator of the strongly continuous heat semigroup on $C_0(\mathcal{S})$. Now suppose that the semi-Markov process $X^c(t)$ has the same embedded chain $X_n^c$ and has waiting times $J_i^c$ between jumps as in Theorem \ref{tetimech} having c.d.f. $\overline{F}_x(t) = \mathds{E}^z e^{-(1/c^2)L^{(x)}(t)}$ so that
Theorem \ref{telimfacile} holds and yields
\begin{align}
\int_0^\infty e^{-\lambda t} \mathds{E}^x u(X^c (t)) dt \to \widetilde{g}^0(\lambda,x)
\end{align}
where $\widetilde{g}^0 (\lambda,x)$ is the Laplace transform of the solution to
\begin{align}
\mathfrak{D}_t (x) g^0(t,x) \, = \, \frac{1}{2} \Delta g^0(t, x),
\end{align}
where
\begin{align}
\mathfrak{D}_t (x) g^0(t,x) \, = \, \frac{d}{dt} \int_0^t g^0(s, x) \, \bar{\nu}(t-s,x) \, ds \, - \, \bar{\nu}(t,x) g^0(0,x).
\end{align}
If instead the transition probabilities of the chain $X_n^c$ are
\begin{align}
h^c(x, d(y-x)) \, = \, \frac{\nu(x, d(y-x))}{\nu(x, \mathbb{R}^d \backslash B_c(0) )} \mathds{1}_{ \left[ \mathbb{R}^d \backslash B_c(0) \right]}(y-x)
\end{align}
where $B_c(0) = \ll x \in \mathbb{R}^d : |x| \leq c \rr$ and $\mathcal{B}\l \mathbb{R}^d \r \ni B \mapsto \nu(x, B)$ is a $\sigma$-finite measure on $\mathbb{R}^d$ such that for any $x$
\begin{align}
\int_{\mathbb{R}^d} \l |y-x|^2 \wedge 1 \r \nu(x, d(y-x)) < \infty
\end{align}
and if the parameter of the exponential r.v. is $\theta^c(x) = \nu(x, \mathbb{R}^d \backslash B_c(0) )$ we obtain a Markov process $M^c(t)$ generated by
\begin{align}
\l G^cu \r (x) \, = \, \int_{\mathbb{R}^d\backslash B_c(0)} \l u(y) -u(x) \r \nu(x, d(y-x))
\end{align}
and by letting $c \to 0$ we obtain the L\'evy type generator \cite[formula (8.51), p. 366]{kolokoltsov}
\begin{align}
\l G^0 u \r (x) \, = \, \int_{\mathbb{R}^d} \l u(y) -u(x) \r \nu(x, d(y-x)).
\end{align}
having domain on a subset of $\mathcal{C}_b(\mathcal{S})$.
Hence by using again Theorem \ref{telimfacile} we have that a semi-Markov process with the same embedded chain and whose waiting times $J_i^c$ have c.d.f.
\begin{align}
1-\overline{F}_x(t) \, = \, 1-\mathds{E}e^{-\theta^c(x) L^{(x)}(t)}
\end{align}
is such that
\begin{align}
\int_0^\infty e^{-\lambda t} \mathds{E}^xu(X^c(t))dt \to \widetilde{g}^0(\lambda,x)
\end{align}
where $\widetilde{g}^0(\lambda,x)$ is the Laplace transform of the solution to
\begin{align}
\mathfrak{D}_t(x) g(t, x) \, = \, \int_{\mathbb{R}^d} \l g(t,y) -g(t,x) \r \nu(x, d(y-x)).
\end{align}
\subsection{Convergence to the fractional equation}
\label{seclimit}
In this section we focus on the situation in which the waiting times $J_i$ have infinite mean. This is obtained here by assuming that the Bernstein functions $f(\lambda, x)$ defining the exponent $\Pi$ are regularly varying at zero with index $\alpha(x) \in (0,1)$. We recall that a function $f(\lambda)$ vary regularly at zero if $g(\lambda):=f(1/\lambda)$ vary regularly at infinity, i.e.,
\begin{align}
\lim_{\lambda \to \infty} \frac{g(c\lambda)}{g(\lambda)} = c^\alpha, \qquad \alpha \geq 0.
\end{align}
By Karamata's characterization Theorem then we know that every reguarly varying function can be written in form $g(\lambda) = \lambda^\alpha L(\lambda)$ where $L(\lambda)$ is a slowly varying function, hence $L$ is such that
\begin{align}
\lim_{\lambda \to \infty} \frac{L(c\lambda)}{L(\lambda)} = 1.
\end{align}
This assumption on the Bernstein function $f(\lambda, x)$ implies by \cite[formula (2.32)]{meertoa} that
\begin{align}
\int_0^\infty \overline{F}_x(t) \, dt \, = \, \int_0^\infty \mathds{E}^ze^{-\theta(x)L^{(x)}(t)} \, dt \, = \, \infty.
\end{align}
Hence the waiting times $J_i$ with c.d.f. $1-\overline{F}_x(t)$ are such that $\mathds{E}J_i = \infty$.
In the following theorem we assume again that the generator $G^c$ converges to a generator $G^0$. Under the additional assumption on regular variation described above we study the limit of the corresponding semi-Markov processes.
\begin{te}
\label{telimit}
Let $M^c$ be a family of Markov processes associated with the semigroups $P_{t}^c$ on the space $C_b \l \mathcal{S} \r$ having generators $G^cu$ which is given by $(G^cu)(\cdot):=\theta^c(\cdot) \int_S \l u(y)-u(\cdot) \r h^c(\cdot,dy)$. Let $G^0$ be a (dissipative) generator of a semigroup on the Banach space $C_0(\mathcal{S})$ equipped with the sup-norm and assume that $c^{-1}G^cu \to G^0u$ for $u \in \text{Dom}(G^0)$. Let $f(\lambda, x)$ be regularly varying at zero with index $\alpha(x) \in (0,1)$ for any $x \in \mathcal{S}$. Now denote $P_t^{f(c,\cdot)}$ the semigroup generated by $G^{f(c,\cdot)}u:= \theta^{f(c, \cdot)}(\cdot)\int_{\mathcal{S}}\l u(y) - u(\cdot) \r h^{f(c, \cdot)}(\cdot,dy)$ and denote the corresponding Markov process as $M^{f(c,\cdot)}$. Let $\l X^c(t) \r_{c \geq 0}$ be a family of stepped semi-Markov processes defined as in Section \ref{defsemi} each one of which satisfies the assumptions of Theorem \ref{tetimech} with respect to the Markov process $M^{f(c,x)}$ and hence denote
\begin{align}
\l \Pi_t^cu \r (x) := \mathds{E}^x u\l X^c(t) \r \, = \, \mathds{E}^xu \l M^{f(c,x)} \l L^\Pi (t) \r \r.
\end{align}
Let $\mathpzc{R}_\lambda^cu:=\int_0^\infty e^{-\lambda t} \Pi_{t/c}u\, dt$. Then we have that the limit
\begin{align}
\mathpzc{R}^0_\lambda u:=\lim_{c \to 0}\mathpzc{R}^{f(c, \cdot)}_\lambda u
\end{align}
exists for any $u \in C_0(\mathcal{S})$ and $ \lambda^{1-\alpha(\cdot)}\l \lambda^{\alpha(\cdot)}I-G^0 \r\mathpzc{R}_\lambda^0= I$.
\end{te}
\begin{proof}
First observe that
\begin{align}
\mathpzc{R}_\lambda^c u \, = \, c\int_0^\infty e^{-\lambda ct} \Pi_tu \, dt
\end{align}
and hence as in the proof of theorem \ref{telimfacile} we find, by rearranging \eqref{rearranged}, that for any $u \in C_b \l \mathcal{S} \r$
\begin{align}
\frac{\lambda}{f(c\lambda, \cdot)} \l f(c\lambda, \cdot) - G^{f(c, \cdot)} \r \mathpzc{R}_\lambda^cu \, = \, u.
\end{align}
Let $w \in \text{Dom}(G^0)$ and use the fact that $f(\lambda, x)$ is regularly varying at zero to say that $f(\lambda, x) = \lambda^{\alpha(x)}L(\lambda, x)$ where $\lambda \mapsto L(\lambda, x)$ is slowly varying. Hence, as $c \to 0$,
\begin{align}
\lim_{c \to 0}\frac{\lambda}{f(c\lambda, \cdot)} \l f(c\lambda, \cdot) - G^{f(c, \cdot)} \r w \, = \, &\lim_{c \to 0} \frac{\lambda^{1-\alpha(\cdot)}}{c^{\alpha (\cdot)}L(\lambda c, \cdot)} \l (\lambda c)^{\alpha(\cdot)}L(\lambda c, \cdot)-G^{f(c, \cdot)} \r w \notag \\
= \,&\lim_{c \to 0} \frac{\lambda^{1-\alpha(\cdot)}}{f(c, \cdot)} \l (\lambda )^{\alpha(\cdot)}f( c, \cdot)-G^{f(c, \cdot)} \r w \notag \\
= \, &\lambda^{1-\alpha(\cdot)} \lim_{c \to 0} \l \lambda^{\alpha (\cdot)} - \frac{1}{f(c, \cdot)} G^{f(c, \cdot)} \r w \notag \\
= \, & \lambda^{1-\alpha(\cdot)} \l \lambda^{\alpha (\cdot)} - G^0 \r w.
\label{525}
\end{align}
The elements $\mathpzc{R}_\lambda^{1/n}u$ for $n \in \mathbb{N}$ form a Cauchy sequence. This can be proved as in the proof of Theorem \ref{telimfacile}.
Let $h_n:= \frac{\lambda}{f(\lambda/n, \cdot)} \l f(\lambda/n, \cdot) I- G^{f \l 1/n, \cdot \r} \r w$ then we have by \eqref{525}
\begin{align}
h_n \to h= \lambda^{1-\alpha(\cdot)}\l \lambda^{\alpha (\cdot)} - G^0 \r w.
\end{align}
Now note that
\begin{align}
\mathpzc{R}^{1/n}_\lambda h-\mathpzc{R}^{1/m}_\lambda h \, = \, \mathpzc{R}^{1/m}_\lambda (h_m-h) + \mathpzc{R}^{1/n}_\lambda (h-h_n) + \l \mathpzc{R}^{1/n}_\lambda h_n - \mathpzc{R}^{1/m}_\lambda h_m \r.
\label{527}
\end{align}
Since for any $n \in \mathbb{N}$ and $\lambda >0$ we have that $|| \mathpzc{R}_\lambda^{1/n}|| \leq 1/\Re\lambda$, the first two terms go to zero as $m,n \to \infty$ while the last term in \eqref{527} is clearly zero. Therefore the limit exists for functions
\begin{align}
u \in \lambda^{1-\alpha(\cdot)}\l \lambda^{\alpha (\cdot)} - G^0 \r \text{Dom}(G^0) = \l \lambda - \lambda^{1-\alpha(\cdot)} G^0 \r \text{Dom}(G^0).
\end{align}
Since $G_0$ is dissipative we have (e.g. \cite[Corollary 3.4.6]{abhn}) $\l \lambda -G^0 \r \text{Dom}(G^0) = \mathbb{C} \l \mathcal{S} \r$.
To conclude the proof observe that
\begin{align}
\lambda^{1-\alpha(\cdot)} \l \lambda^{\alpha (\cdot)} - G^0 \r \mathpzc{R}_\lambda^0 w \, = \, & \lim_{n \to \infty} \frac{\lambda}{f(\lambda/n,\cdot)} \l f(\lambda/n, \cdot) - G^{f\l 1/n, \cdot \r} \r \mathpzc{R}_\lambda^{1/n} w \, = \, w.
\end{align}
\end{proof}
\begin{os} \normalfont
Note that the limit $\mathpzc{R}_\lambda^0u$ obtained in Theorem \ref{telimit} satisfies, for $u \in \text{Dom}(G^0)$
\begin{align}
\lambda^{\alpha (\cdot)}\mathpzc{R}_\lambda^0u- \lambda^{\alpha (\cdot)-1} u = G^0 \mathpzc{R}^0_\lambda u.
\label{inverfrac}
\end{align}
By inverting Laplace transform in \eqref{inverfrac} $(\lambda \mapsto t)$ we get the variable order fractional equation
\begin{align}
\frac{d^{\alpha(\cdot)}}{dt^{\alpha(\cdot)}} q^0(t) = G^0 q^0(t), \qquad q^0(t) = u.
\label{fracvareq}
\end{align}
Hence if we additionally assume that the family $q^c(t):=\l \Pi_t^cu \r_{c \geq 0}$ has a limit then Theorem \ref{telimit} implies the convergence of $q^c(t)$ to the solution of the fractional variable order equation \eqref{fracvareq}.
As stated in \cite{kurtz} the asymptotic behaviour of a semi-Markov process is Markovian when the holding times have finite mean. Here the limit process cannot be Markovian since it is governed by a fractional equation. Hence the asymptotic Markovian behaviour is lost due to such heavy tailed waiting times.
\end{os}
\subsubsection{The limit of the Poisson process and the inverse stable subordinator}
Consider the strongly continuous Poisson semigroup on the space $C_b \l \mathbb{R}^d \r$ defined as
\begin{align}
(P_t^c u)(x) \, = \, \sum_{j=0}^\infty u(x+lj) \frac{(\theta t)^j}{j!}e^{-\theta t}
\end{align}
for $l \in \mathbb{R}^d$ with $|l|=1$. This correspond to the Poisson process, say $N^l(t)$, with intensity $\theta$ and jump height $l \in \mathbb{R}^d$ introduced in Example \ref{expoieq}. Now let $P_t^c$ be the family of Poisson semigroups
\begin{align}
(P_t^cu)(x) \, = \, \sum_{j=0}^\infty u(x+clj) \frac{(\theta t)^j}{j!}e^{-\theta t}
\end{align}
which corresponds to the Poisson process $N^{cl}$ with jump height $cl$, $l \in \mathbb{R}^d$, $c >0$ and consider the operator $P_{t/c}^c$ which is still a Poisson semigroup and has generator
\begin{align}
(G^c u)(x) \, : = \, \theta \frac{u(x+cl)-u(x)}{c}.
\end{align}
Hence by letting $c \to 0$ we have that
\begin{align}
G^c u \to G^0 u \, = \, \theta \nabla_l u, \qquad u \in C^1_l(\mathbb{R}^d).
\end{align}
Let $X_n^c$ be a Markov chain with transition probabilities $h^{f(c,x)}(x,dy) = \delta_{x+f(c,x)l}$ and consider the semigroup $P_t^{f(c,x)}$ generated by
\begin{align}
(G^{f(c,x)}u)(x) \, = \, \theta \l u(x+f(c,x)l)-u(x)\r
\end{align}
and let $N^{f(c,x)l}$ be the associated Markov chain. Now let $\mathpzc{J}_i$ be i.i.d. independent r.v.'s with distribution
\begin{align}
\overline{F}_y(t) \, = \, P^x \l \mathpzc{J}_i >t \mid X_i = y, X_{i+1} = z \r
\end{align}
and define
\begin{align}
\mathpzc{N}^c(t) \, = \, X_n^c, \qquad \sum_{i=0}^n \mathpzc{J}_i \leq t < \sum_{i=0}^{n+1} \mathpzc{J}_i,
\end{align}
and denote
\begin{align}
(\Pi_t^cu)(x) \, := \, \mathds{E}^x u \l \mathpzc{N}^c(t) \r \, = \, \mathds{E}^x u \l N^{f(c,x)l} \l L^\Pi (t) \r \r
\end{align}
and assume that $\widetilde{\overline{F}}_x(\lambda)$ vary regularly at zero with index $\alpha(x)-1$ for $\alpha(x) \in (0,1)$, for any $x \in \mathbb{R}^d$. Use this and \eqref{easyto} to check that $\lambda \mapsto f(\lambda, x)$ vary regularly at zero with index $\alpha(x) \in (0,1)$ for any $x \in \mathcal{S}$.
Now apply Theorem \ref{telimit} to say that for any $x \in \mathcal{S}$,
\begin{align}
\int_0^\infty e^{-\lambda t} (\Pi_{t/c}^c u)(x) \, dt \, = \, \int_0^\infty e^{-\lambda t} \, \mathds{E}^x u \l \mathpzc{N}^c(t/c) \r \, dt \, \stackrel{c \to 0}{\longrightarrow} \, \widetilde{q}^0 (x,\lambda)
\label{526}
\end{align}
where $\widetilde{q}^0 (\lambda)$ is the Laplace transform of the solution to
\begin{align}
\frac{d^{\alpha(\cdot)}}{dt^{\alpha(\cdot)}} q^0(t) \, = \, \theta \nabla_l q^0(t), \qquad q^0(0) = u \in C^1_l (\mathbb{R}^d).
\label{fractrasl}
\end{align}
\subsubsection{Convergence to the fractional diffusion equation and to L\'evy type processes}
Let $X_n$ be the embedded chain with transition probabilities given in \eqref{ciaoc}. Assume that the waiting times of the Markov process $M^c(t)$ having $X_n$ as embedded chain have exponential distribution with parameter $\theta^c(x)=1/c$. Hence the Markov process $M^c(t)$ has generator
\begin{align}
\l G^cu \r (x) \, = \, & \frac{1}{2dc^2} \sum_{i=1}^d \l u(x+ce_i)+u(x-ce_i)-2u(x) \r.
\end{align}
Hence we have as in Section \ref{convbm} that $G^cu \to (1/2)\Delta u$ as $ c \to 0$ for $u \in C^2(\mathbb{R}^d)$ which is the generator of the strongly continuous heat semigroup on $C_0(\mathcal{S})$. Now consider the process $M^{f(c,\cdot)}(t)$ which is obtained by setting the transition probabilities of the embedded chain $X_n$
\begin{align}
h^{f(c,x)}(x, dy) \, = \, \frac{1}{2d} \sum_{i=1}^d \l \delta_{x+f(c,x)e_i}(dy) - \delta_{x-f(c,x)e_i}(dy) \r
\end{align}
and the paramaters of the exponential r.v.'s equal to $\theta^c(x)=1/f(c,x)$. Assume that $f(\lambda, x)$ varies regularly at zero with index $\alpha (x) \in (0,1)$ and define $X^c(t)$ as a semi-Markov process which satisfies the condition in Theorem \ref{tetimech}. Hence we can apply Theorem \ref{telimit} to say that
\begin{align}
\int_0^\infty e^{-\lambda t} \mathds{E}^xu(X^c(t/c)) dt \to \widetilde{g}^0 (\lambda, x)
\end{align}
where $\widetilde{g}^0(\lambda)$ is the Laplace transform of the solution to
\begin{align}
\frac{d^{\alpha (\cdot)}}{dt^{\alpha(\cdot)}}g^0(t) \, = \, \frac{1}{2} \Delta g^0(t), \qquad g^0(0) = u \in C^2 \l \mathcal{S} \r.
\end{align}
Consider again the transition probabilities of the chain $X_n^c$ as in Section \ref{convbm} given by
\begin{align}
h^c(x, d(y-x)) \, = \, \frac{\nu(x, d(y-x))}{\nu(x, \mathbb{R}^d \backslash B_c(0) )} \mathds{1}_{ \left[ \mathbb{R}^d \backslash B_c(0) \right]}(y-x)
\end{align}
which yields to a Markov process $M^c(t)$ with semigroup $P_t^c$ converging to the L\'evy type semigroup generated by
\begin{align}
\l G^0 u \r (x) \, = \, \int_{\mathbb{R}^d} \l u(y) -u(x) \r \nu(x, d(y-x)).
\end{align}
In order to apply Theorem \ref{telimit} we need to consider the Markov process $M^{f(c,x)}(t)$ having transition probabilities
\begin{align}
h^{f(c,x)}(x, d(y-x)) \, = \, \frac{\nu(x, d(y-x))}{\nu\l x, \mathbb{R}^d \backslash B_{f(c,x)}(0) \r} \mathds{1}_{ \left[ \mathbb{R}^d \backslash B_{f(c,x)}(0) \right]}(y-x)
\end{align}
where $B_{f(c,x)}(0) = \ll y \in \mathbb{R}^d : |y| \leq f(c,x), x \in \mathbb{R}^d \rr$. Now let $X^c(t)$ be a family of semi-Markov process satisfying the hypothesys in Theorem \ref{tetimech} with respect to $M^{f(c,x)}(t)$ and use Theorem \ref{telimit} to say that
\begin{align}
\int_0^\infty e^{-\lambda t} \mathds{E}^xu(X^c(t)) dt \, \to \, \widetilde{g}^0(\lambda,x)
\end{align}
as $c \to 0$, where $\widetilde{g}^0(\lambda,x)$ is the Laplace transform of $g^0(t,x)$ which satisfies
\begin{align}
\frac{d^{\alpha(x)}}{dt^{\alpha(x)}} g(t, x) \, = \, \int_{\mathbb{R}^d} \l g(t,y) -g(t,x) \r \nu(x, d(y-x)).
\end{align}
\section{Some remarks on countable state spaces}
For the sake of intuition we present here some results of previous sections when the state space $\mathcal{S}$ is countable. This situation is often useful in applications (e.g. \cite{jannsen} and also \cite{G15, raberto} for some recent examples). The results here are a consequence of the ones obtained above and hence we present here only the points in which the discussion becomes easier. Since the state space is countable we define the transition matrix $\l P_t \r_{ij} := P^i \l M(t) = j \r$, which is a semigroup of linear operators acting on vectors $\underline{u}$ generated by
\begin{align}
G \,: = \, \Theta \l H - 1 \r,
\label{genM}
\end{align}
where $\Theta = \text{diag}(\theta_1, \theta_2, \cdots)$ and $H$ is the transition matrix of the embedded Markov chain $X_n$.
Hence we have that
\begin{align}
\frac{d}{dt} P_t \underline{u} = G P_t\underline{u} = P_t G\underline{u}.
\label{keq}
\end{align}
Consider now the matrix $\Pi_t$ given by
\begin{align}
\pi_{i,j}(t) \, := \l \Pi_t \r_{ij} :=\, P^i \l X(t) = j \mid \gamma^X(0) = 0 \r
\end{align}
which is equivalent, for any $x \in \mathcal{S}$, to
\begin{align}
\l \Pi_t \r_{ij} = P^x \l X(t+\tau) = j \mid X(\tau) = i, \gamma^X(\tau) = 0 \r
\end{align}
in view of homogeneity \eqref{homo}. The backward equation has the following form.
\begin{coro}
\label{coroclasseq}
Let $\l \Pi_t \r_{ij} = \pi_{i,j}= P^i \l X(t) = j \r$ and consider as initial datum a vector $\underline{u} = \left[ u(1), u(2), \dots, u(i), \dots \right]^\prime$, $i=1, 2, \dots$ where $u$ is $C_b \l \mathcal{S} \r$ and satisfies the assumptions in Proposition \ref{tediff}. The mapping $ t \mapsto \Pi_tu$ solves, for $t \geq 0$,
\begin{align}
\mathfrak{D}_t^\cdot q(t) \, = \, Gq(t), \qquad q(0) = \underline{u}.
\label{classiceq}
\end{align}
\end{coro}
\begin{proof}
This is a Corollary of Theorem \ref{teclassiceq}. We provide here an heuristic proof. Use \cite[Chapter 10, formula (5.5)]{cinlar} to say that $\pi_{i,j}(t)$ satisfies the backward renewal equation
\begin{align}
\pi_{i,j}(t) \, = \, \overline{F}_{i}(t) \delta_{ij} \, + \, \sum_{k \in \mathcal{S}} \int_0^t h_{ik} \, \mathpzc{f}_i (s) \, \pi_{k,j}(t-s) \, ds
\label{410}
\end{align}
where $f$ is a density of $F$. Since $\overline{F}_i(t) = \mathds{E}^x e^{-\theta_iL^i(t)}$ we recall that
\begin{align}
\mathcal{L} \left[ \overline{F}_i(\cdot) \right] (\lambda)\, = \, \frac{f(\lambda, i)}{\lambda} \frac{1}{\theta_i+f(\lambda, i)}
\end{align}
and therefore by the convolution theorem for Laplace transform we can take the Laplace transform in \eqref{410} to write
\begin{align}
\widetilde{\pi}_{i,j}(\lambda) \, = \, \frac{f(\lambda, i)}{\lambda} \frac{1}{\theta_i+f(\lambda, i)} \delta_{ij}+ \sum_{k \in \mathcal{S}} \frac{\theta_i}{\theta_i+f(\lambda, i)} h_{ik}\widetilde{\pi}_{k,j}(\lambda).
\label{rearr}
\end{align}
By rearranging \eqref{rearr} we can write
\begin{align}
f(\lambda, i) \widetilde{\pi}_{i,j}(\lambda) - \lambda^{-1}f(\lambda, i) \pi_{i,j}(0) \, = \, -\theta_i \widetilde{\pi}_{i,j}(\lambda) + \sum_{k \in \mathcal{S}} \theta_i h_{ik} \widetilde{\pi}_{k,j}(\lambda).
\label{rearr2}
\end{align}
By using \cite[Proposition 2.7 and Lemma 2.5]{toaldopota} we can invert the Laplace transform and we have that
\begin{align}
\frac{d}{dt} \int_0^t \pi_{i,j} (s) \, \bar{\nu}(t-s,i) ds \, - \delta_{ij} \bar{\nu}(t,i) \, = \, -\theta_i \pi_{i,j}(t) + \sum_{k \in \mathcal{S}} \theta_i h_{ik} \pi_{k,j}(t).
\label{414}
\end{align}
Let $g_{ij}$ be the elements of the matrix $G$. By definition \eqref{genM} we have that
\begin{align}
g_{ij} \, = \, \theta_i \l h_{ij}-\delta_{ij} \r
\label{elg}
\end{align}
and hence \eqref{414} reduces to
\begin{align}
\frac{d}{dt} \int_0^t \pi_{i,j} (s) \, \bar{\nu}(t-s,i) ds \,- \delta_{ij} \bar{\nu}(t,i) \, = \, \sum_{k \in \mathcal{S}} g_{ik} \, \pi_{k,j}(t).
\end{align}
\end{proof}
The version of the equation in Theorem \ref{teeqevol} in a countable space is given in the following Corollary.
\begin{coro}
Under the same assumptions of Corollary \eqref{coroclasseq} we have that $q(t)$ satisfies the evolutionary Cauchy problem
\begin{align}
\frac{d}{dt} q(t) \, = \, \mathcal{D}_t^\star \, G \, q(t), \qquad q(0) = u.
\end{align}
\end{coro}
\begin{proof}
This is a consequence of Theorem \ref{teeqevol}. To give an heuristic proof as in Corollary \ref{coroclasseq} it is sufficient to rearrange \eqref{rearr2} as
\begin{align}
\lambda \widetilde{\pi}_{i,j}(\lambda) - \pi_{i,j}(0) \, = \, -\frac{\lambda}{f(\lambda, i)}\l\theta_i \widetilde{\pi}_{i,j}(\lambda) + \sum_{k \in \mathcal{S}} \theta_i h_{ik} \widetilde{\pi}_{k,j}(\lambda)\r.
\end{align}
By inverting the Laplace transform and using \eqref{elg} we obtain
\begin{align}
\frac{d}{dt}\pi_{i,j}(t) \, = \, \frac{d}{dt} \int_0^t \sum_{k \in \mathcal{S}} g_{ik} \, \pi_{k,j}(s) u^f(t-s,i)ds.
\end{align}
\end{proof}
\section{Auxiliary results}
\br{We collect here two technical results used in the paper.}
\begin{lem}
\label{tecommon}
Let $\sigma^f$ be a strictly increasing subordinator with Laplace exponent $f(\lambda)$ and let $L^f(t)$ be the hitting time of $\sigma^f(t)$. The function $t \mapsto \mathds{E}^xe^{-\br{\theta}L^f(t)}$, $\theta >0$, is completely monotone if and only if $f(\lambda)$ is a complete Bernstein function.
\end{lem}
\begin{proof}
Since we assume that $\sigma^f(t)$ is strictly increasing it must be true that $b>0$ and $\nu(0, \infty) < \infty$, or $b=0$ and $\nu(0, \infty) = \infty$, or $b>0$ and $\nu(0, \infty) = \infty$.
In \cite[Theorem 2.1]{meertoa} it is proved that if $b=0$ and $\nu(0, \infty)=\infty$ the function $t \mapsto \mathds{E}^xe^{-\br{\theta}L^f(t)}$ is completely monotone if and only if the tail $t \mapsto \bar{\nu}(t)$ is a completely monotone function. This proves the result when $b=0$ and $\nu(0, \infty)=\infty$ since the complete monotonicity of $t \mapsto \bar{\nu}(t)$ implies that the L\'evy measure $\nu(\cdot)$ has a completely monotone density and therefore $\lambda \mapsto f(\lambda)$ is a complete Bernstein function (e.g. the discussion in \cite[p. 91]{pottheory}). When $b>0$ and $\nu(0, \infty) < \infty$ the proof can be very similar and we write here the basic facts.
First we prove the direct statement. Since $t \mapsto \sigma^f (t)$ is strictly increasing then $t \mapsto L^f(t)$ are a.s. continuous functions. Hence $L^f(t)$ is a.s. continuous and also in distribution. The function $t \mapsto \mathds{E}^xe^{-\br{\theta}L^f(t)}$ is therefore continuous by \cite[Theorem 4, p. 431]{Feller} and has Laplace transform \cite[Corollary 3.5]{meertri}
\begin{align}
\int_0^\infty e^{-\lambda t} \mathds{E}^xe^{-L^f(t)}dt \, = \, \frac{f(\lambda)}{\lambda} \frac{1}{\br{\theta}+f(\lambda)}.
\label{unlapl}
\end{align}
Since
\begin{align}
\lambda \mapsto \varphi (\lambda) = \frac{\lambda}{\br{\theta}+\lambda}
\end{align}
is a complete Bernstein function then by \cite[Theorem 7.6]{librobern}
\begin{align}
\lambda \mapsto \l \varphi \circ f \r (\lambda) \, = \, \frac{f(\lambda)}{\br{\theta}+f(\lambda)}
\end{align}
is a complete Bernstein function. Therefore there exists a triple $\l \mathpzc{a}, \mathpzc{b}, \mathpzc{v} \r$ such that
\begin{align}
\lambda \mapsto \l \varphi \circ f \r (\lambda) \, = \, \mathpzc{a} + \mathpzc{b}\lambda + \int_0^\infty \l 1-e^{-\lambda s} \r \mathpzc{v}(ds)
\end{align}
where the L\'evy measure $\mathpzc{v}(\cdot)$ has a completely monotone density and the tail $t \mapsto \bar{\mathpzc{\nu}}(t) = \mathpzc{a}+ \mathpzc{v}(t, \infty)$ is a completely monotone function \cite[p. 91]{pottheory}. After an integration by parts we can also write
\begin{align}
\lambda \mapsto \frac{1}{\lambda} \l \varphi \circ f \r (\lambda) \, = \, \int_0^\infty e^{-\lambda s} \l \mathpzc{b}\lambda+ \bar{\mathpzc{v}}(s) \r ds
\end{align}
and since $s \mapsto \mathpzc{b}\lambda + \bar{\mathpzc{\nu}}(s)$ is completely monotone (and obviously continuous) we can use \eqref{unlapl} and the unicity of Laplace transform to say that $t \mapsto \mathds{E}^xe^{-\br{\theta}L^f(t)}$ is completely monotone. Now we prove the converse statement and therefore assume that
\begin{align}
t \mapsto \mathds{E}^xe^{-\br{\theta}L^f(t)} \, = \, \int_0^\infty e^{-st} \mathpzc{m}(ds)
\label{convstat}
\end{align}
for some measure $\mathpzc{m}(\cdot)$. Then by using \eqref{unlapl} and \eqref{convstat} we can write
\begin{align}
\frac{f(\lambda)}{\lambda} \frac{1}{\br{\theta}+f(\lambda)} \, = \, \int_0^\infty e^{-\lambda t} \mathds{E}^xe^{-L^f(t)} \, dt \, = \, \int_0^\infty \frac{1}{\lambda + s} \mathpzc{m}(ds)
\end{align}
and thus $\lambda \mapsto \lambda^{-1}f(\lambda)\big/ \l \br{\theta}+f(\lambda) \r$ is a Stieltjes function \cite[Definition 2.1]{librobern} if
\begin{align}
\int_0^\infty \frac{1}{1+s} \mathpzc{m}(ds) \, < \, \infty.
\label{416}
\end{align}
To verify \eqref{416}, observe that the subordinator $\sigma^f(t)$ is a.s. increasing (and non-negative) and therefore, a.s., $L^f(0) =0$. Hence $\mathds{E}^xe^{-\br{\theta}L^f(0)}=1$, a.s.. Applying this to \eqref{convstat} allows us to write $\int_0^\infty \mathpzc{m}(ds) < \infty$ and thus \eqref{416} is true.
Then $\lambda \mapsto f(\lambda) / (1+f(\lambda))$ is a complete Bernstein function by \cite[Theorem 6.2]{librobern} and therefore $\lambda \mapsto (1/f(\lambda))+1$ is a Stieltjes function by \cite[Theorem 7.3]{librobern} (and therefore also $1/f(\lambda)$). Another application of \cite[Theorem 7.3]{librobern} ensures that $\lambda \mapsto f(\lambda)$ is a complete Bernstein function. This concludes the proof.
\end{proof}
\begin{lem}
\label{te22}
For every non-negative Stieltjes function $\mathfrak{G} (\lambda)$ with representation
\begin{align}
\mathfrak{G} (\lambda) \, = \, c\lambda^{-1}+\int_0^\infty \frac{1}{s+\lambda} \mathfrak{K}(ds) \quad \text{with} \quad 1-c \leq \mathfrak{K}(0, \infty) < \infty
\label{222}
\end{align}
for $0 \leq c < 1$, there exists a complete Bernstein function $f$ such that
\begin{align}
\mathfrak{G} \l \lambda \r \, = \, \frac{1}{\lambda} \frac{f(\lambda)}{\br{\theta}+f(\lambda)},
\label{215}
\end{align}
\br{for some constant $\theta >0$.}
\end{lem}
\begin{proof}
By rearranging \eqref{215} we note that in order to prove \eqref{215} we can prove that
\begin{align}
f(\lambda)= \br{\theta}\frac{\lambda \mathfrak{G}(\lambda)}{1- \lambda \mathfrak{G}(\lambda)}
\end{align}
is a complete Bernstein function for any Stieltjes function $\mathfrak{G}$ having representation \eqref{222}. \br{Since here $\theta$ is just a multiplicative constant we can prove the Theorem for $\theta =1$. Indeed if $f(\lambda)$ is a complete Bernstein function then also $\theta^{-1}f(\lambda)$ is, for any $\theta >0$.}
Since $\mathfrak{G}(\lambda)$ is a Stieltjes function we have that $\lambda \mathfrak{G}(\lambda)$ is a complete Bernstein function by \cite[Theorem 6.2]{librobern}. Then $1/( \lambda \mathfrak{G}(\lambda)) $ is Stieltjes by \cite[Theorem 7.3]{librobern} and therefore
\begin{align}
\frac{1}{\lambda \mathfrak{G}(\lambda)} \, = \, \frac{\mathfrak{a}}{\lambda} + \mathfrak{b} + \int_0^\infty \frac{1}{s+\lambda} \mathfrak{m}(ds)
\end{align}
for some constant $\mathfrak{a},\mathfrak{b}$ and a measure $\mathfrak{m}(\cdot)$. Note that
\begin{align}
\mathfrak{b} \, = \, \lim_{\lambda \to \infty} \frac{1}{\lambda \mathfrak{G}(\lambda)}
\end{align}
but
\begin{align}
\lim_{\lambda \to \infty} \lambda \mathfrak{G}(\lambda) \, = \, c+ \lim_{\lambda \to \infty} \int_0^\infty \l 1-\frac{s}{s+\lambda}\r \mathfrak{K}(ds) \, = \, c+ \mathfrak{K}(0, \infty)
\label{218}
\end{align}
where we used the monotone convergence Theorem to move the limit inside the integral. Hence $\mathfrak{b}\geq 1$. Therefore the function
\begin{align}
\frac{1}{\lambda \mathfrak{G}(\lambda)}-1
\end{align}
is a (non-negative) Stieltjes function and thus by \cite[Theorem 7.3]{librobern} there exists a complete Bernstein function $f(\lambda)$ such that
\begin{align}
\frac{1}{f(\lambda)} \, = \, \frac{1}{\lambda \mathfrak{G}(\lambda)}-1
\end{align}
and this proves that the function
\begin{align}
\lambda \mapsto f(\lambda) \, = \, \frac{\lambda \mathfrak{G}(\lambda)}{1-\lambda \mathfrak{G}(\lambda)}
\label{140}
\end{align}
is a complete Bernstein function.
\end{proof}
\end{document}
|
\begin{document}
\begin{center}
{{\Lambda}arge{\bf Stochastic nonlinear Schr\"odinger equations
in the defocusing mass and energy critical cases}}
{\large{\bf Deng Zhang}}
\footnote[2]{Department of Mathematics,
Shanghai Jiao Tong University, 200240 Shanghai, China. \\
Email: [email protected] }
{\epsilon}nd{center}
\begin{quote}
\noindent {\small{\bf Abstract.}
We study the stochastic nonlinear Schr\"odinger equations with linear multiplicative noise,
particularly in the defocusing mass-critical and energy-critical cases.
For general initial data,
we prove the global existence and uniqueness of solutions in both cases.
When the quadratic variation of noise is globally bounded,
we also obtain the rescaled scattering behavior of solutions in the
spaces $L^2$, $H^1$ as well as the pseudo-conformal space.
Moreover,
the Stroock-Varadhan type theorem for the topological support of
solutions are also obtained
in the Strichartz and local smoothing spaces. }
{\it \bf Keywords}: Critical space,
global well-posedness,
scattering,
Stochastic nonlinear Schr\"odinger equation,
support theorem.
\\
{\bf 2010 Mathematics Subject Classification: } 60H15, 35Q55, 35J10.
{\epsilon}nd{quote}
{\varphi}ill
\section{Introduction} \label{Sec-Intro}
This work is devoted to stochastic nonlinear Schr\"odinger equations with linear multiplicative noise
in the defocusing mass-critical and energy-critical cases,
it is a continuation of a series of work \cite{BRZ14,BRZ16,HRZ18}.
Precisely, we consider
\begin{equation} \label{equa-x}
\begin{split}
idX&= {\Delta}elta Xdt + {\lambda} F(X) dt -i\mu Xdt + i\sum\limits_{k=1}^N X G_k d\beta_k(t)
, \\
X(0)&=X_0.
{\epsilon}nd{split}
{\epsilon}nd{equation}
Here,
the nonlinearity $F(X)= |X|^{{\alpha}-1}X$,
${\alpha}>1$,
${\lambda} =-1$ (resp. ${\lambda}=1$) corresponds to the defocusing (resp. focusing) case,
$\beta_k$ are standard real-valued
Brownian motions on a probability space $(\Omega, \mathscr{F},
\mathbb{P})$ with normal filtration
$(\mathscr{F}_t)_{t{\gamma}eq 0}$,
and $G_k(t,x)=g_k(t){\partial}hi_k(x)$,
where $g_k$ are real-valued predictable processes satisfying
$g_k \in L^2_{loc}({\mathbb{R}}^+; {\mathbb{R}})$ ${\mathbb{P}}$-a.s.,
and ${\partial}hi_k\in C^{\infty}({\mathbb{R}}^d; \mathbb{C})$,
$d{\gamma}eq 1$.
For simplicity, we assume $N<{\infty}$,
but the arguments in this paper extend also to the case where
$N={\infty}$ under appropriate summable conditions on $G_k$.
Moreover,
the term
$\mu$ is of the form
$$\mu(t,x) = \frac12 \sum_{k=1}^N |G_k(t,x)|^2. $$
In particular,
in the {\it conservative case} where ${\rm Re} \, G_k = 0$,
$1\leq k\leq N$,
$-i\mu Xdt + i\sum_{k=1}^N X G_k d\beta_k(t)$
is indeed the Stratonovitch differential,
and, via It\^o's formula,
the mass is pathwisely conserved $|X(t)|_2^2 =|X_0|_2^2$, $t\in [0,T]$.
Hence, for the normalized initial state $|X_0|_2=1$,
the quantum system evolves on the unit bass of $L^2$
and verifies the conservation of probability.
See, e.g., \cite{BCIR94, BCIRG95} for applications
in molecular aggregates with thermal fluctuations.
The {\it non-conservative case} (i.e.,
${\rm Re} \, G_k \noindent ot = 0$ for some $1\leq k\leq N$)
plays an important role in the application to open quantum systems \cite{BG09},
one of the main features is that
$t \mapsto |X(t)|_2^2$
is a continuous martingale.
This fact
implies the mean norm square conservation ${\mathbb{E}} |X(t)|_2^2$, $t\in [0,T]$,
and enables one to define the ``physical'' probability law
\begin{align*}
\widehat{\mathbb{P}}^T_{X_0} (d \omega)
:= (\mathbb{E}_{\mathbb{P}} [|X_0|_2^2])^{-1} |X(T,\omega)|_2^2\ \mathbb{P} (d \omega)
{\epsilon}nd{align*}
of the events occurring in $[0,T]$.
We refer to \cite{BG09} for more details.
For more physical applications, e.g. nonlinear optics,
Bose-Einstein condensation and
the Gross-Pitaevskii equation,
we refer to \cite{SS99}.
For stochastic nonlinear Schr\"odinger equations,
most results in literature center around the subcritical case.
The first global well-posedness results
were proved by de Bouard and Debussche \cite{BD99,BD03},
by using the theory of radonifying operators.
Later,
the compact manifold case was studied by
Brze\'{z}niak and Millet \cite{BM14},
where more general stochastic Strichartz estimates were proved.
See also \cite{BHW17, BHW18}.
Recently, the global well-posedness of equa\-tionref{equa-x}
for the full subcritical exponents was proved in \cite{BRZ14,BRZ16},
the new method introduced is the rescaling approach,
which can be viewed as Doss-Dussman type transformations in Hilbert spaces.
We also refer to \cite{H16} for the global well-posedness
in the full mass-subcritical case with quite general multiplicative noise.
See also \cite{BRZ16, BRZ18,CG15,Z17}.
The critical case is much more subtle
and, to the best of our knowledge,
quite few results are known for stochastic nonlinear Schr\"odinger equations.
See, e.g., \cite{BRZ14,BRZ16,BD99,BD03,H16} for the local well-posedenss results.
The main difference between the subcritical and critical cases is,
that the maximal existing time of solutions depends only on the $L^2$- or $H^1$-norm of initial data
in the subcritical case,
while on the whole profile in the critical case.
Hence,
the standard energy method works well for the global well-posedness in the subscritical case,
however, it fails in the critical case.
In contrast,
the critical case in the deterministic case
has been extensively studied in literature.
In the defocusing mass- and energy-critical cases,
it was conjectured that deterministic solutions exist globally and even scatter at infinity.
This conjecture was first proved,
via the energy induction method,
by Bourgain in the seminal work \cite{B99}
for the energy-critical case with radial initial data in dimensions three and four.
Later, for general initial data and dimensions,
it was proved
by the I-team \cite{CKSTT08}, Ryckman and Visan \cite{RV07}
and Visan \cite{V07},
based on the energy induction method and
interaction Morawetz estimates.
See also the concentration compactness method introduced in
\cite{KM06}.
Recently,
this conjecture in the mass-critical case
was proved by Dodson \cite{D12,D16.1,D16.2} for general initial data,
where the key ingredients are long-time Strichartz estimates.
See also \cite{KTV14, T06}.
However,
it is quite hard to obtain these estimates in the stochastic case.
Actually,
the presence of noise in equa\-tionref{equa-x}
destroys the symmetries of equation and the conservation laws (e.g. of the mass and the Hamiltonian),
the corresponding It\^o formulas
consist of several stochastic integrals,
with which it is very difficult to obtain sharp estimates
as in the deterministic case.
Moreover,
even a Banach space $\mathscr{X}$ is compactly embedded into another one $\mathscr{Y}$,
one does not generally have the compact embedding of $L^p(\Omega; \mathscr{X})$
into $L^p(\Omega; \mathscr{Y})$, $p{\gamma}eq 1$.
Hence, the global existence of stochastic solutions in the mass- and energy-critical cases
with general initial data
has been an open problem.
See the recent progress \cite{FX18.1,FX18.2}
for the global well-posedness
in the conservative mass-critical case for dimension $d=1$.
(See also Remark \ref{Rem-Com-FANXU} below.)
In this paper, we prove the global well-posedness
of equa\-tionref{equa-x}
in the mass-critical case
for all dimensions $d{\gamma}eq 1$.
Moreover, in the energy-critical case,
we prove the global well-posedness
for dimensions $3\leq d\leq 6$,
and we are also able to prove conditional global well-posedness results
for high dimensions $d>6$,
assuming an {\it a-priori} bound of the energy.
Thus, together with the previous work \cite{BRZ14,BRZ16}
and the {\it a-priori} bound of the energy in the energy-critical case with $d>6$,
the global existence and uniqueness of solutions to equa\-tionref{equa-x}
are obtained
for the full subcritical and critical exponents of the nonlinearity in the defocusing case.
We would like also to mention that,
these results also apply to the non-conservative case,
which is important in the physical context \cite{BG09}.
The proof presented below is
different from that in \cite{FX18.1,FX18.2}
and is
based on a new application of the rescaling approach.
It is also based on the work \cite{CKSTT08, D12,D16.1,D16.2, RV07,V07} mentioned above
and on the stability results for
nonlinear Schr\"odinger equations with lower order perturbations
(see Theorems \ref{Thm-Sta-L2}, \ref{Thm-Sta-H1-dlow} and \ref{Thm-Sta-H1} below),
which are also of independent interest.
Another main interest of this paper lies in the large time behavior of global solutions to equa\-tionref{equa-x}.
As mentioned above,
in the defocusing case
deterministic global solutions scatter at infinity,
namely, behave asymptotically like linear solutions.
However, the situation becomes quite difficult in the stochastic case
because of rapid fluctuations of noise at large time.
Very recently, in \cite{HRZ18},
the rescaled scattering behavior of global solutions to equa\-tionref{equa-x}
is proved for the energy-subscritical exponents
${\alpha} \in [\max\{2, 1+\frac 4d\}, 1+\frac{4}{d-2})$,
$3\leq d\leq 6$,
and
it is also proved that
the non-conservative noise has the effect to improve scattering with high probability,
even in the regime where deterministic solutions fail to scatter.
The energy-critical case is also studied there,
however, relying on the {\it a-priori} assumption of global existence of solutions,
which is another motivation for the present work.
When the quadratic variation of noise is globally bounded,
on the basis of \cite{HRZ18},
we prove the rescaled scattering behavior of
global solutions
to equa\-tionref{equa-x}
in the spaces $L^2$, $H^1$ as well as the pseudo-conformal space, respectively.
These results are new in the $L^2$ case
and also improve those of \cite{HRZ18} in the $H^1$ and pseudo-conformal spaces.
At last,
we give a characterization of the support of the law of
global solutions to equa\-tionref{equa-x},
in both mass-critical and energy-critical cases.
We prove that
the law of stochastic solutions is supported on the closure of all deterministic controlled trajectories
in the Strichartz and local smoothing spaces (see Theorem \ref{Thm-Sca} below).
We would like to mention that,
for each deterministic controlled trajectory,
the global well-posedness
can be proved by stability results as in \cite{TV05,TVZ07}.
So, if the support theorem is {\it a-priori} assumed to hold,
the related stochastic trajectories should also exist globally.
This, actually, gives an intuitive point of view for the global well-posedness
of stochastic solutions
at the beginning of this work. \\
{\bf Notation.}
For $z\in {\mathbb{C}}$,
we set $F(z):=|z|^{{\alpha}-1}z$
with ${\alpha}=1+\frac 4d$ or ${\alpha}=1+\frac{4}{d-2}$
in the mass-critical or energy-critical case, respectively.
We denote by $F_z$ and $F_{\overline{z}}$
the usual complex derivatives
$F_z= \frac 12 (\frac{{\partial} F}{{\partial} x} - i \frac{{\partial} F}{{\partial} y})$,
$F_{\overline z}= \frac 12 (\frac{{\partial} F}{{\partial} x} + i \frac{{\partial} F}{{\partial} y})$.
For any $x=(x_1,\cdots,x_d) \in {\mathbb{R}}^d$
and multi-index ${\alpha}=({\alpha}_1,\cdots, {\alpha}_d)$,
we use the notations
$|{\alpha}|= \sum_{j=1}^d {\alpha}_j$,
$\left<x\right>=(1+|x|^2)^{1/2}$,
${\partial}_j = \frac{{\partial}}{{\partial}{x_j}}$,
${\partial}artial_x^{\alpha}={\partial}artial_{x_1}^{{\alpha}_1}\cdots {\partial}artial_{x_d}^{{\alpha}_d}$,
$\left<\noindent a\right>=(I-{\Delta}elta)^{1/2}$.
Let $\mathscr{S}$ denote the space of rapid decreasing functions
and $\mathscr{S}'$ be the dual space of $\mathscr{S}$.
For any $f\in \mathscr{S}$, $\mathscr{F}(f)$
is the Fourier transform of $f$,
i.e. $\mathscr{F}(f)(\xi) = \int e^{-ix\cdot \xi} f(x)dx$.
Given $1\leq p \leq {\infty}$, $s{\gamma}eq 0$,
$L^p = L^p({\mathbb{R}}^d)$ is the space of $p$-integrable complex functions with the norm $|\cdot|_{L^p}$,
$W^{s,p}= \left<D\right>^{-s}L^p({\mathbb{R}}^d)$ is the usual Sobolev space
with the norm $\|\cdot\|_{W^{s,p}}$.
In particular, we write $|\cdot|_2 = |\cdot|_{L^2}$,
$|\cdot|_{H^1}= \|\cdot\|_{W^{1,2}}$.
For any Banach space $\mathcal{X}$
and any interval $I \subseteq {\mathbb{R}}^+$,
$L^p(I; \mathcal{X})$ is the space of $p$-integrable $\mathcal{X}$-valued functions
with the norm $\|\cdot\|_{L^p(0,T; \mathcal{X})}$,
and $C(I;\mathcal{X})$ is the space of continuous
$\mathcal{X}$-valued functions with the super norm in $t$.
Moreover,
for two Banach spaces $\mathcal{X}, \mathcal{Y}$,
the norm of $\mathcal{X} \cap \mathcal{Y}$ is
$\|\cdot\|_{\mathcal{X}} + \|\cdot\|_{\mathcal{Y}}$,
and
$\mathcal{X} + \mathcal{Y}$ is equipped with norm
$\|u\|_{\mathcal{X} + \mathcal{Y}} =\inf\{\|u_1\|_\mathcal{X} + \|u_2\|_\mathcal{Y}: u=u_1 + u_2, u_1 \in \mathcal{X}, u_2 \in \mathcal{Y}\}$.
A pair $(p,q)$ is called a Strichartz pair,
if $\frac 2 q = d(\frac 12 - \frac 1p)$,
$(p,q)\in [2,{\infty}]\times[2,{\infty}]$
and $(d,p,q)\noindent ot = (2,{\infty},2)$.
For any interval $I\subseteq{\mathbb{R}}^+$,
define the Strichartz spaces by
\begin{align*}
S^0(I):= \bigcap\limits_{(p,q):Strichartz\ pair} L^q(I; L^p), \ \
N^0(I):= \bigcup\limits_{(p,q):Strichartz\ pair} L^{q'}(I; L^{p'}).
{\epsilon}nd{align*}
Similarly, let
$S^1(I) = \{u\in \mathscr{S}': \|u\|_{S^0(I)} + \|\noindent a u\|_{S^0(I)} <{\infty}\}$,
and
$N^1(I) = \{u\in \mathscr{S}': \|u\|_{N^0(I)} + \|\noindent a u\|_{N^0(I)} <{\infty}\}$.
In particular,
the Strichartz spaces $V(I)= L^{2+\frac 4d}(I; L^{2+\frac 4d})$,
$W(I) = L^{\frac{2(d+2)}{d-2}}(I; L^{\frac{2d(d+2)}{d^2+4}})$
and ${\mathbb{W}}(I) = L^{\frac{2(d+2)}{d-2}}(I; W^{1, \frac{2d(d+2)}{d^2+4}})$
will be frequently used
in the mass and energy critical spaces.
We use the exotic Strichartz spaces
$X^0(I)$, $\mathbb{X}(I)$ and ${\mathbb{Y}}(I)$
with the norms
\begin{align*}
\|u\|_{X^0(I)}
= \|u\|_{L^{\frac{d(d+2)}{2(d-2)}}(I;L^{\frac{2d^2(d+2)}{(d+4)(d-2)^2}})},& \ \
\|u\|_{{\mathbb{X}}(I)}
:= \| \left<\noindent a \right>^{\frac{4}{d+2}} u\|_{L^{\frac{d(d+2)}{2(d-2)}}(I;L^{\frac{2d^2(d+2)}{d^3-4d+16}})}, \\
\|u\|_{{\mathbb{Y}}(I)}
:= &\||\left<\noindent a \right>^{\frac{4}{d+2}} u \|_{L^{{\frac{d}{2}}}(I; L^{\frac{2d^2(d+2)}{d^3+4d^2+4d-16}})},
{\epsilon}nd{align*}
which are the inhomogeneous versions of exotic Strichartz spaces in \cite{KV13}.
We also use the local smoothing spaces defined by,
for ${\alpha}, \beta \in \mathbb{R}$,
$$L^2(I;H^{\alpha}_{\beta})=\{u\in \mathscr{S}': \int_{I} \int \left<x\right>^{2\beta}|\left<\noindent a\right>^{{\alpha}} u(t,x)|^2 dxdt <{\infty} \}.$$
Throughout this paper,
we use $C(\cdots)$ for various constants that may
change from line to line.
\section{Formulations of main results}
Let us start with the definition of solutions to equa\-tionref{equa-x}.
\begin{definition}\label{def-x}
Fix $T>0$. An $L^2$-(resp., $H^1$-)solution to equa\-tionref{equa-x} is an $L^2$-(resp., $H^1$-)valued continuous
$(\mathscr{F}_t)$-adapted process $X=X(t)$, $t\in[0,T],$ such that $|X|^{\alpha}\in L^1([0,T], H^{-1})$
and it satisfies ${\mathbb{P}}$-a.s.,
\begin{align}\label{equa-x'}
X(t) =& X_0 - \int^t_0 (i{\Delta}elta X(s) +\mu X(s) + \lambda i F(X(s)) )ds \noindent onumber \\
& + \sum\limits_{k=1}^N\int ^t_0 X(s)G_k(s) d \beta_k(s),\ \forall t\in [0,T],
{\epsilon}nd{align}
as an It\^o equation in $H^{-2}$ (resp. $H^{-1}$).
{\epsilon}nd{definition}
We assume the asymptotically flat condition
as in \cite{BRZ14,BRZ16,HRZ18}.
\begin{itemize}
\item[{\rm}(H0)]
For each $1\leq k\leq N$,
$G_k(t,x)= g_k(t){\partial}hi_k(x)$,
$g_k$ are real-valued predictable processes,
$g_k \in L^{\infty}(\Omega\times [0,T])$,
$0<T<{\infty}$,
and
${\partial}hi_k \in C^{\infty}({\mathbb{R}}^d,\mathbb{C})$ satisfying that for any muti-index ${\gamma}$,
${\gamma}\noindent ot =0$,
\begin{align} \label{asymflat}
\limsup\limits_{|x|\to {\infty}} |x|^2 |{\partial}artial_x^{\gamma} {\partial}hi_k(x)| = 0.
{\epsilon}nd{align}
{\epsilon}nd{itemize}
\begin{remark}
The condition equa\-tionref{asymflat} is
slightly stronger than $(1.3)$ in \cite{HRZ18},
mainly for the convenience to perform pseudo-differential calculus.
Moreover, one can weaken the smoothness condition to that ${\partial}hi_k \in C^n$
for $n$ large enough.
{\epsilon}nd{remark}
We have the local well-posedness results
in the mass- and energy-critical cases.
\begin{theorem} \label{Thm-LWP} ({\it Local well-posedness})
Consider equa\-tionref{equa-x} in the mass-(resp., energy-)critical case,
i.e., ${\alpha}=1+4/d$, $d{\gamma}eq 1$
(resp., ${\alpha}=1+4/(d-2)$, $d{\gamma}eq 3$).
Assume $(H0)$.
Then, for each $X_0 \in L^2$ (resp. $X_0\in H^1$),
there exits a unique $L^2$-(resp., $H^1$-)solution $X$ to equa\-tionref{equa-x} on $[0,\tau^*)$,
where the maximal existing time $\tau^*$ is an $\{\mathcal{F}_t\}$-stopping time,
such that $\mathbb{P}$-a.s. for any $t\in (0,\tau^*)$
and any Strichartz pair $(\rho, {\gamma})$,
\begin{align}
X|_{[0,t]} & \in C([0,t]; L^2) \cap L^{\gamma}(0,t;L^\rho) \label{esti-X-LpLq}\\
(resp.,\ X|_{[0,t]} &\in C([0,t];H^1) \cap L^{\gamma}(0,t;W^{1,\rho}) ). \label{esti-X-LpW1q}
{\epsilon}nd{align}
Moreover,
$X$ exists globally ${\mathbb{P}}$-a.s. if for any $0<T<{\infty}$,
\begin{align}
& \|X(\omega)\|_{L^{2+\frac 4 d}(0,\tau^* \wedge T; L^{2+\frac 4d})} < {\infty},\ \ {\mathbb{P}}-a.s. \label{gloexist-L2} \\
(resp.,\ & \|X(\omega)\|_{L^{\frac{2(d+2)}{d-2}}(0,\tau^*\wedge T; L^{\frac{2(d+2)}{d-2}})} < {\infty},\ \ {\mathbb{P}}-a.s.). \label{gloexist-H1}
{\epsilon}nd{align}
{\epsilon}nd{theorem}
The proofs are similar to
those of \cite[Proposition $5.1$]{BRZ14} and \cite[Theorem $2.1$]{BRZ16},
and the last assertion concerning the global existence follows from the
blow-up alternative results as in \cite{BRZ14,BRZ16}.
The main result of this paper is formulated below,
concerning the global existence and uniqueness of solutions to equa\-tionref{equa-x} in the critical cases.
\begin{theorem} \label{Thm-GWP} (Global Well-Posedness)
$(i)$ Consider equa\-tionref{equa-x} in the defocusing mass-critical case,
i.e., ${\lambda}=-1$, ${\alpha} = 1+ 4/d$, $d{\gamma}eq 1$. Assume $(H0)$.
Then,
for each $X_0\in L^2$ and $0<T<{\infty}$,
there exists a unique $L^2$-solution to equa\-tionref{equa-x} on $[0,T]$,
satisfying that for any $p{\gamma}eq 1$,
\begin{align} \label{thm-L2-L2}
{\mathbb{E}} \|X\|^p_{C([0,T];L^2)} \leq C(p,T) <{\infty},
{\epsilon}nd{align}
and for any Strichartz pair $({\gamma},\rho)$,
\begin{align} \label{thm-L2-Lpq}
X\in L^{\gamma}(0,T; L^\rho) \cap L^2(0,T; H^\frac12_{-1}),\ \ \mathbb{P}-a.s..
{\epsilon}nd{align}
$(ii)$ Consider equa\-tionref{equa-x} in the defocusing energy-critical case,
i.e., ${\lambda}=-1$, ${\alpha} = 1+ 4/(d-2)$, $d{\gamma}eq 3$. Assume $(H0)$.
In the high dimensional case where $d>6$,
assume additionally that for each $0<t<T$,
\begin{align} \label{bdd-E-assum-d6}
E_T:= \sup\limits_{0\leq t<\tau^* \wedge T} |X(t)|_{H^1} \leq C(T) <{\infty},\ \ {\mathbb{P}}-a.s..
{\epsilon}nd{align}
Then,
for each $X_0\in H^1$ and $0<T<{\infty}$,
there exists a unique $H^1$-solution to equa\-tionref{equa-x} on $[0,T]$,
satisfying that for any $p{\gamma}eq 1$,
\begin{align} \label{thm-H1-H1}
{\mathbb{E}} \|X\|^p_{C([0,T];H^1)} + {\mathbb{E}} \|X\|^p_{L^{\frac{2d}{d-2}}(0,T;L^{\frac{2d}{d-2}})} \leq C(p,T)<{\infty},
{\epsilon}nd{align}
and for any Strichartz pair $({\gamma},\rho)$,
\begin{align} \label{thm-H1-LpWq}
X\in L^{\gamma}(0,T; W^{1,\rho}) \cap L^2(0,T; H^\frac 32_{-1}),\ \ \mathbb{P}-a.s..
{\epsilon}nd{align}
{\epsilon}nd{theorem}
\begin{remark}
We also have the stability results in both mass- and energy-critical cases,
see Theorems \ref{Thm-Sta-L2}, \ref{Thm-Sta-H1-dlow} and \ref{Thm-Sta-H1} below.
{\epsilon}nd{remark}
\begin{remark} \label{Rem-globdd-E-d6}
It is possible to obtain the pathwise bound equa\-tionref{bdd-E-assum-d6}
by using the It\^o formula of Hamiltonian equa\-tionref{Ito-H} below.
The formula equa\-tionref{Ito-H} can be derived directly by a formal computation,
however, the rigorous proof in the high dimensional case where $d>6$ is technically unclear.
See also Remark \ref{Rem-globdd-E-d6-proof} below.
{\epsilon}nd{remark}
\begin{remark} \label{Rem-Com-FANXU}
We would like to mention that,
the global well-posedness of stochastic nonlinear Schr\"odinger equations
has been recently proved in \cite{FX18.1,FX18.2} for the mass-critical case
for dimension $d=1$
in the {\it conservative case},
under a different spatial decay assumption on the noise.
The results in \cite{FX18.1,FX18.2} also hold in the case
where one has a uniform pathwise control of mass
(see \cite[Remark 1.7]{FX18.1}).
In Theorem \ref{Thm-GWP} above,
we prove the global well-posedenss of equa\-tionref{equa-x}
in the mass-critical case for all dimensions $d{\gamma}eq 1$.
In addition,
Theorem \ref{Thm-GWP} also applies to the
{\it non-conservative case},
which is important in the physical context \cite{BG09}.
Furthermore,
Theorem \ref{Thm-GWP} proves the global well-posedness
(resp. conditional global well-posedness)
in the energy-critical case for dimensions $3\leq d\leq 6$
(resp. $d>6$),
which is not discussed in \cite{FX18.1,FX18.2}.
Below we also prove scattering
and the Stroock-Varadhan type support theorem for equa\-tionref{equa-x},
see Theorems \ref{Thm-Sca} and \ref{Thm-Supp}.
{\epsilon}nd{remark}
We can also enhance the estimates equa\-tionref{thm-L2-Lpq} and equa\-tionref{thm-H1-LpWq}
to the whole time regime,
provided that $g_k\in L^2({\mathbb{R}}^+)$, $1\leq k\leq N$, a.s.. Namely, we have
\begin{theorem} \label{Thm-S0S1-Global}
Consider the situations in Theorem \ref{Thm-GWP} $(i)$ (resp. $(ii)$).
Assume additionally that $g_k\in L^2({\mathbb{R}}^+)$, $1\leq k\leq N$, a.s..
Then, for each $X_0\in L^2$ (resp. $X_0\in H^1$),
the solution $X$ to equa\-tionref{equa-x}
satisfies that for any Strichartz pair $(\rho, {\gamma})$,
\begin{align}
&X\in L^{\gamma}({\mathbb{R}}^+; L^{\rho}) \cap L^2({\mathbb{R}}^+; H^\frac 32_{-1}), \ \ {\mathbb{P}}-a.s. \label{globbd-L2-Lpq} \\
(resp.\ & X\in L^{\gamma}({\mathbb{R}}^+; W^{1,\rho}) \cap L^2({\mathbb{R}}^+; H^\frac 32_{-1})\ \ {\mathbb{P}}-a.s..). \label{globdd-H1-LpWq}
{\epsilon}nd{align}
{\epsilon}nd{theorem}
Next, we study the scattering behaviour of global solutions to equa\-tionref{equa-x} at infinity.
Besides in $L^2$ and $H^1$,
we also work with the pseudo-conformal space, i.e.,
$\Sigma:=\{f\in H^1: |\cdot|f(\cdot) \in L^2\}$,
in which
we assume that the time functions $g_k$ in $(H0)$
have appropriate integrability and
decay speed at infinity as in \cite{HRZ18}.
\begin{itemize}
\item[{\rm(H1)}] For each $1\leq k\leq N$,
\begin{align} \label{AF-3}
\limsup\limits_{|x|\to {\infty}} |x|^3 |{\partial}artial_x^{\gamma} {\partial}hi_k(x)| = 0,\ \ 1\leq |{\gamma}|\leq 3,
{\epsilon}nd{align}
$esssup_{\Omega} \int_0^{\infty}(1+t^4)g_k^2(t) dt <{\infty}$,
$1\leq k\leq N$,
and for ${\mathbb{P}}$-a.e. $\omega \in \Omega$,
\begin{align} \label{ILog}
\lim\limits_{t\noindent earrow 1} (1-t)^{-3} \left(\int_{\frac{t}{1-t}}^{\infty} g_k^2 (\omega, s)ds \ln \ln \left({\int_{\frac{t}{1-t}}^{\infty} g_k^2 (\omega, s)ds}\right)^{-1} \right)^{\frac 12} =0.
{\epsilon}nd{align}
{\epsilon}nd{itemize}
\begin{remark}
As mentioned in \cite[Remark 1.4]{HRZ18},
the $L^{\infty}(\Omega)$ condition on $\int_0^{\infty} (1+t^4)g_k^2(t)dt$
can be weakened by some suitable exponential integrability.
{\epsilon}nd{remark}
In order to formulate the scattering results,
we shall use the rescaling function
\begin{align} \label{vf*}
{\varphi}_*(t)=-\sum_{k=1}^N \int_t^{\infty} G_k(s) d\beta_k(s) + \frac 12 \sum_{k=1}^N \int_t^{\infty} \left(|G_k(s)|^2+G^2_k(s) \right)ds,
{\epsilon}nd{align}
Note that,
${\varphi}_* \in C({\mathbb{R}}^+; W^{1,{\infty}})$
if $g_k\in L^2({\mathbb{R}}^+)$, $1\leq k\leq N$, a.s..
Then, letting
\begin{align} \label{z*}
z_*(t):= e^{-{\varphi}_*(t)} X(t),
{\epsilon}nd{align}
we have
\begin{align} \label{equa-RNLS-Sca}
i {\partial}artial_t z_* = e^{-{\varphi}_*(t)}{\Delta}elta (e^{{\varphi}_*(t)}z_*) - e^{({\alpha}-1) {\rm Re} \, {\varphi}_*(t)} F(z_*),
{\epsilon}nd{align}
with $ z_*(0) = X_0$. Here,
\begin{align} \label{Op-A*}
e^{-{\varphi}_*(t)}{\Delta}elta (e^{{\varphi}_*(t)}z_*) = ({\Delta}elta + b_*(t) \cdot \noindent a + c_*(t))z_*
{\epsilon}nd{align}
with the coefficients of lower order perturbations
\begin{align}
b_*(t) =& -2 \sum\limits_{k=1}^N \int_t^{\infty} \noindent a G_k(s) d\beta_k(s) + 2 \int_t^{\infty} \noindent a \widehat{\mu}(s)ds, \label{b*}\\
c_*(t) =& \sum\limits_{j=1}^N \left( \sum\limits_{k=1}^N \int_t^{\infty} {\partial}artial_j G_k(s) d\beta_k(s) - \int_t^{\infty} {\partial}artial_j\widehat{\mu}(s)ds\right)^2 \noindent onumber \\
&-\sum\limits_{k=1}^N \int_t^{\infty} {\Delta}elta G_k(s) d\beta_k(s) + \int_t^{\infty} {\Delta}elta \widehat{\mu}(s)ds, \label{c*}
{\epsilon}nd{align}
and
\begin{align} \label{mu}
\widehat{\mu}(s,x) = \frac 12 \sum\limits_{k=1}^{N} (|G_k(s,x)|^2 + G_k(s,x)^2)
= \sum\limits_{k=1}^N ({\rm Re} \, G_k) G_k (s,x).
{\epsilon}nd{align}
It is also convenient to use the notations $U_*(t,s)$ (resp. $U(t,s)$),
$s,t{\gamma}eq 0$, for the evolution operators
corresponding to the random equation equa\-tionref{equa-RNLS-Sca}
(resp. equa\-tionref{equa-RNLS} with $\sigma=0$)
in the homogeneous case $Fequa\-tionuiv 0$.
We are now ready to state the scattering result.
\begin{theorem} \label{Thm-Sca} ({\it Scattering})
$(i)$
Consider the defocusing mass-critical case,
i.e., ${\lambda}=-1$, ${\alpha} = 1+ 4/d$, $d{\gamma}eq 1$.
Assume $(H0)$ and that $g_k\in L^2({\mathbb{R}}^+)$, $1\leq k\leq N$, a.s..
Then, for each $X_0\in L^2$,
the global $L^2$-solution $X$ to equa\-tionref{equa-x} scatters at infinity, i.e.,
${\mathbb{P}}$-a.s. there exist $v_+, u_+\in L^2$ such that
\begin{align} \label{Sca-L2.1}
e^{it{\Delta}elta}e^{-{\varphi}_*(t)} X(t) \to v_+,\ \ in\ L^2,\ as\ t\to {\infty},
{\epsilon}nd{align}
and
\begin{align} \label{Sca-L2.2}
U_*(0,t) e^{-{\varphi}_*(t)} X(t) \to u_+, \ \ in\ L^2, \ as\ t\to {\infty}.
{\epsilon}nd{align}
$(ii)$
Consider the defocusing energy-critical case,
i.e., ${\lambda}=-1$, ${\alpha} = 1+ 4/(d-2)$, $d{\gamma}eq 3$.
Assume $(H0)$ and that $g_k\in L^2({\mathbb{R}}^+)$, $1\leq k\leq N$, a.s..
In the high dimensional case where $d>6$,
assume additionally that
\begin{align} \label{globdd-E-assum-d6}
E_{\infty}: = \sup\limits_{0\leq t<{\infty}} |X(t)|_{H^1} \leq C <{\infty},\ \ a.s..
{\epsilon}nd{align}
Then, for each $X_0\in H^1$,
the global $H^1$-solution satisfies the asymptotics equa\-tionref{Sca-L2.1} and equa\-tionref{Sca-L2.2}
with $H^1$ replacing $L^2$.
$(iii)$
Consider the situations as in the defocusing energy-critical case in $(ii)$,
$d{\gamma}eq 3$.
Then, for each $X_0\in \Sigma$,
the asymptotic equa\-tionref{Sca-L2.1} holds with $\Sigma$ replacing $L^2$.
{\epsilon}nd{theorem}
\begin{remark}
Unlike in the deterministic case,
the scattering behavior of stochastic solutions to equa\-tionref{equa-x}
is closely related to the rescaling function $e^{-{\varphi}_*}$,
which, actually, encodes the information of noise in equa\-tionref{equa-x}.
We would like also to mention that,
the rescaling function here is different from that in the proof of
global well-posedness in Theorem \ref{Thm-GWP}
(see equa\-tionref{vf} below).
{\epsilon}nd{remark}
\begin{remark}
As in the $H^1$ case in Theorem \ref{Thm-GWP} $(ii)$,
it is possible to obtain the global pathwise bound equa\-tionref{globdd-E-assum-d6}
from the Hamiltonian equa\-tionref{Ito-H} below,
by using similar arguments
as in the proof of \cite[(1.7)]{HRZ18}.
However, the rigorous derivation of equa\-tionref{Ito-H} is technically unclear.
{\epsilon}nd{remark}
Next,
we characterize the topological support of the law of global solutions to equa\-tionref{equa-x},
in both mass-critical and energy-critical cases.
Support theorem for diffusions was initiated in the seminal papers
\cite{SV72, SV72.2}
and has been extensively studied in literature.
We refer to \cite{G05}
and \cite{G07} for stochastic nonlinear Schr\"odinger equations
with additive noise and with fractional noise, respectively.
See also \cite{MS94,MS94.2} and references therein.
Let $\mathscr{H}$ denote the Cameron-Martin space associated with
the Brownian motions $\beta=(\beta_1,\cdots,\beta_N)$,
i.e., $\mathscr{H}=\{h \in H^1(0,T; {\mathbb{R}}^N): h(0)=0\}$.
For any $h=(h_1,\cdots,h_N)\in \mathscr{H}$,
let $X(\beta+h)$ be the solution to equa\-tionref{equa-x} with the driven process $\beta+h$ replacing
the Brownian motion $\beta$.
Moreover, let $S(h)$ denote the solution to the controlled equation below
\begin{align} \label{equa-Sh}
i dS(h) &= {\Delta}elta S(h) dt + {\lambda} F(S(h)) dt - i \widehat{\mu} S(h) dt + iS(h) G_k \dot{h}_k dt, \\
S(h)(0) &= X_0, \noindent onumber
{\epsilon}nd{align}
where $\widehat{\mu}$ is as in equa\-tionref{mu},
and $\dot{h}_k$ is the derivative of $h_k$.
We also use the notation ${\rm supp} ({\mathbb{P}} \circ X^{-1})$
for the topological support of law of solutions to equa\-tionref{equa-x}.
\begin{theorem} \label{Thm-Supp} (Support Theorem)
$(i)$ Consider the defocusing mass-critical case,
i.e., ${\lambda}=-1$, ${\alpha} = 1+ 4/d$, $d{\gamma}eq 1$.
Assume $(H0)$ and that
$g_k$ are deterministic and continuous, $1\leq k\leq N$.
Let $X$ be the global $L^2$-solution to equa\-tionref{equa-x}
corresponding to $X(0)=X_0\in L^2$.
Then,
the support ${\rm supp} ({\mathbb{P}} \circ X^{-1})$
in the spaces $S^0(0,T)$ and $L^2(0,T; H^{\frac 12}_{-1})$
is the closure of the set $\{S(h), h\in \mathscr{H}\}$.
$(ii)$ Consider the defocusing energy-critical case,
i.e., ${\lambda}=-1$, ${\alpha} = 1+ 4/(d-2)$, $3\leq d\leq 6$.
Assume $(H0)$ and that $g_k$ are deterministic and continuous, $1\leq k\leq N$.
Let $X$ be the global $H^1$-solution to equa\-tionref{equa-x}
with $X(0)=X_0\in H^1$.
Then,
the support ${\rm supp} ({\mathbb{P}} \circ X^{-1})$
in the spaces $S^1(0,T)$ and $L^2(0,T; H^{\frac 32}_{-1})$
is the closure of the set $\{S(h), h\in \mathscr{H}\}$.
{\epsilon}nd{theorem}
\begin{remark}
Theorem \ref{Thm-Supp} applies in particular to the stochastic nonlinear Schr\"odinger equations
in \cite{BRZ14,BRZ16}, where $g_k equa\-tionuiv 1$, $1\leq k\leq N$.
{\epsilon}nd{remark}
\begin{remark}
We also expect the support theorem to hold in high dimensions $d>6$ in the energy-critical case,
yet the present stability result Theorem \ref{Thm-Sta-H1} can not help us,
due to the smallness condition on the time function $g$ in equa\-tionref{Sta-H1-ve} below.
{\epsilon}nd{remark}
\begin{remark}
Equation equa\-tionref{equa-Sh} can be viewed as
a subcritical (linear) perturbation of the nonlinear Schr\"odinger equation equa\-tionref{equa-NLS} below.
This observation helps to obtain the global well-posedness of equa\-tionref{equa-Sh}
by using the stability results in \cite{KV13, TV05, TVZ07}.
So,
if the support theorem is assumed {\it a-priori} to hold,
then, intuitively, the stochastic solution $X$ itself should also exist globally.
This viewpoint, actually, offers an intuition for the
global well-posedness of equa\-tionref{equa-x} in critical cases.
{\epsilon}nd{remark}
The proof of Theorem \ref{Thm-GWP}
is mainly based on a new application of rescaling approach and the theory of stability.
In order to prove the global existence of solutions to equa\-tionref{equa-x},
in view of Theorem \ref{Thm-LWP},
we only need to obtain the global bounds of the
$L^{2+\frac 4 d}(0,\tau^*; L^{2+\frac 4d})$- and
$L^{\frac{2(d+2)}{d-2}}(0,\tau^*; L^{\frac{2(d+2)}{d-2}})$-norms
of solutions
in the critical cases.
Such estimates were obtained in the deterministic case by using
the energy induction method
or the concentration-compact method,
combined with
the conservation laws
(e.g., of the mass and Hamiltonian)
and interaction Morawetz estimates.
However,
the presence of Brownian motions in equa\-tionref{equa-x} destroys the conservation laws,
the related It\^o formulas actually
consist of several stochastic integrals
(see equa\-tionref{Ito-L2}, equa\-tionref{Ito-H} below),
which make it quite hard to obtain estimates as in the deterministic case.
Proceeding differently,
we perform a series of rescaling transformations on
a random partition (depending on the growth of noise) of any bounded time interval.
On each small time piece,
we compare the resulting random equation with
the standard nonlinear Schr\"odinger equation with the same initial data,
by using the stability results
(see Theorems \ref{Thm-Sta-L2},
\ref{Thm-Sta-H1-dlow} and \ref{Thm-Sta-H1} below)
and the work of \cite{CKSTT08,RV07,V07} and \cite{D12,D16.1,D16.2}
in the deterministic defocusing mass- and energy-critical cases, respectively
(see Theorems \ref{Thm-L2GWP-Det} and \ref{Thm-H1GWP-Det} below).
Then,
by virtue of the global pathwise bounds of
mass and energy in the defocusing case,
we are able to put together all finitely many bounds
in the previous step to
obtain the desirable global bounds.
For the reader's convenience,
let us explain more precisely the procedure above
on a random time interval $[\sigma, \sigma+\tau]$,
where $\sigma$ and $\sigma+\tau$ are $(\mathscr{F}_t)$-stopping times.
We use the rescaling transformation
\begin{align} \label{vsigma}
v_\sigma(t) := e^{-{\varphi}_\sigma(t)} X(\sigma+t),\ \ t\in[0,\tau],
{\epsilon}nd{align}
where
\begin{align} \label{vf}
{\varphi}_\sigma(t,x)
:= \int_\sigma^{\sigma+ t} G_k(s,x) d\beta_k(s)
- \int_\sigma^{\sigma+t} \widehat{\mu}(s,x) ds
{\epsilon}nd{align}
with $\widehat{\mu}$ as in equa\-tionref{mu}.
The rescaling transformation
can be regarded as
a Doss-Sussman type transformation in Hilbert space.
See, e.g., \cite{BR15}
for the applications of rescaling approach
to general stochastic partial differential equations with coercive structure.
See \cite{BRZ18} for the application to optimal bilinear control problems,
see also \cite{BRZ17, Z17}
for other quite general stochastic dispersive equations.
The nice feature is that it reveals the structure of stochastic equation equa\-tionref{equa-x}
by reducing to the random equation with lower order perturbations below
\begin{align} \label{equa-RNLS}
i{\partial}_t v_\sigma
=& e^{-{\varphi}_\sigma} {\Delta}elta (e^{{\varphi}_\sigma} v_\sigma) - e^{({\alpha}-1){\rm Re} \, {\varphi}_\sigma} F(v_\sigma), \\
v(0)=& X(\sigma), \noindent onumber
{\epsilon}nd{align}
where
\begin{align} \label{Op-A}
e^{-{\varphi}_\sigma}(e^{{\varphi}_\sigma} v_\sigma)
= ({\Delta}elta + b_\sigma(t) \cdot \noindent a + c_\sigma(t))v_\sigma,
{\epsilon}nd{align}
and the coefficients
\begin{align}
b_\sigma(t)
=& 2 \noindent a {\varphi}_\sigma(t)
= 2 \sum\limits_{k=1}^N \int_\sigma^{\sigma+t} \noindent a G_k(s) d\beta_k(s)
- 2 \int_\sigma^{\sigma+t} \noindent a \widehat{\mu}(s) ds, \label{b} \\
c_\sigma(t)
=&{\Delta}elta {\varphi}_\sigma + \sum\limits_{j=1}^N ({\partial}_j {\varphi}_\sigma)^2 \noindent onumber \\
=& \sum\limits_{j=1}^N
\left( \sum\limits_{k=1}^N \int_\sigma^{\sigma+t} {\partial}_j G_k(s) d\beta_k(s)
-\int_\sigma^{\sigma+t} {\partial}_j \widehat{\mu}(s) ds \right)^2 \noindent onumber \\
& + \sum\limits_{k=1}^N \int_\sigma^{\sigma+t} {\Delta}elta G_k(s) d\beta_k(s)
- \int_\sigma^{\sigma+t} {\Delta}elta \widehat{\mu}(s)ds. \label{c}
{\epsilon}nd{align}
The result below connects equations equa\-tionref{equa-x} and equa\-tionref{equa-RNLS},
which generalizes the case where $\sigmaequa\-tionuiv 0$ in \cite{BRZ14}-\cite{BRZ18}.
The proof is postponed to the Appendix.
\begin{theorem} \label{Thm-Rescale-sigma}
Consider the situations in Theorem \ref{Thm-LWP}.
Let $X$ be the $L^2$-(resp. $H^1$-)solution to equa\-tionref{equa-x} on $[0,\tau^*)$ with $X(0)=X_0 \in L^2$,
where $\tau^*$ is the maximal existing time.
Let $v_\sigma$ be as in equa\-tionref{vsigma},
where
$\sigma$ is any $(\mathscr{F}_t)$-stopping time
satisfying $0\leq \sigma <\tau^*$.
Then,
$v_\sigma$ satisfies equa\-tionref{equa-RNLS}
on $[0,\tau^*-\sigma)$ in the space $H^{-2}$ (resp, $H^{-1}$) almost surely.
{\epsilon}nd{theorem}
The key obervation here is,
that the amplitude of lower order perturbations
depends only on the trajectories of noises $\{\beta_k(t), \sigma\leq t\leq \sigma+\tau\}$.
This fact inspires us to view Equation equa\-tionref{equa-RNLS},
if the random interval is short enough,
as a small perturbation of the nonlinear Schr\"odinger equation
\begin{align} \label{equa-NLS}
i{\partial}_t \widetilde{u}
=& {\Delta}elta \widetilde{u} - F(\widetilde{u}), \\
\widetilde{u}(0)=& v_\sigma(0)= X(\sigma). \noindent onumber
{\epsilon}nd{align}
Now,
it becomes clear that a stability-type result will fulfill the comparison procedure above.
It should be mentioned that,
because of the lower order perturbations,
we need to prove stability results for the equation of similar form as in equa\-tionref{equa-RNLS}.
For this reason, we reformulate equa\-tionref{equa-NLS} as follows
\begin{align} \label{equa-NLS*}
i {\partial}_t \widetilde{u}
= e^{-{\varphi}_\sigma}{\Delta}elta(e^{{\varphi}_\sigma}\widetilde{u})
- e^{({\alpha}-1){\rm Re} \, {\varphi}_{\sigma}} F(\widetilde{u}) + e
{\epsilon}nd{align}
with the error term
\begin{align} \label{Error-NLS}
e = -(b_\sigma(t) \cdot \noindent a + c_\sigma(t))\widetilde{u}
- (1-e^{({\alpha}-1){\rm Re} \, {\varphi}_\sigma}) F(\widetilde{u}),
{\epsilon}nd{align}
where the coefficients $b_\sigma$, $c_\sigma$ are as in equa\-tionref{b} and equa\-tionref{c}, respectively.
The proof of stability results in Theorems \ref{Thm-Sta-L2},
\ref{Thm-Sta-H1-dlow} and \ref{Thm-Sta-H1} below
is mainly inspired by the work \cite{KV13,TV05,TVZ07}.
However,
it relies heavily on Strichartz estimates for the Laplacian with lower order perturbations
(see Theorem \ref{Thm-Stri}).
Moreover, another important role here is played by the local smoothing spaces,
which enable us to control the lower order perturbations arising from the operator
$e^{-{\varphi}_\sigma}{\Delta}elta(e^{{\varphi}} \cdot)$,
for which the pseudo-differential calculus is performed.
Finally,
the proof of scattering in Theorem \ref{Thm-Sca} is based on the
very recent work \cite{HRZ18},
and the proof of the Stroock-Varadhan type support theorem
(i.e., Theorem \ref{Thm-Supp})
is inspired by the work \cite{MS94}.
In both cases,
we shall construct appropriate rescaling transformations,
related to the structure of our problems.
See, e.g., equa\-tionref{vf*},
and equa\-tionref{res-zn-Sbetan}, equa\-tionref{res-yn-Xbetan} below.
We also emphasize that,
again the key ingredients are the
stability results in Theorems \ref{Thm-Sta-L2},
\ref{Thm-Sta-H1-dlow} and \ref{Thm-Sta-H1}
for the Laplacian with lower order perturbations. \\
The remainder of this paper is structured as follows.
In Section \ref{Sec-Pre},
we present the preliminaries used in this paper,
including the pseudo-differential operators,
the Strichartz and local smoothing estimates and
the exotic Strichartz spaces.
Then, we prove the stability results in both mass- and energy-critical cases in Section \ref{Sec-Sta}.
Sections \ref{Sec-GWP}, \ref{Sec-Sca} and \ref{Sec-Supp}
are mainly devoted to the proof of Theorems \ref{Thm-GWP}, \ref{Thm-Sca} and \ref{Thm-Supp}, respectively.
Finally, some technical proofs are postponed to the Appendix, i.e. Section \ref{Sec-App}.
\section{Preliminaries} \label{Sec-Pre}
This section collects some preliminaries used in this paper.
\subsection{Pseudo-differential operators}
We recall some basic facts of pseudo-differential operators.
For more details see \cite{K81,T00,Z17} and references therein.
We say that
$a\in C^{\infty}({\mathbb{R}}^d \times {\mathbb{R}}^d)$ is a symbol of class $S^m$,
if for any multi-indices ${\alpha},\beta \in \mathbb{N}^d$,
$|{\partial}artial^{\alpha}_\xi{\partial}artial^\beta_x a(x,\xi)| \leq C_{{\alpha},\beta} \left<\xi\right>^{m-|{\alpha}|}$.
The semi-norms $|a|_{S^m}^{(l)}$ are defined by
$$|a|_{S^m}^{(l)} = \max_{|{\alpha}+\beta|\leq l} \sup_{{\mathbb{R}}^{2d}}
\{ |{\partial}artial^{\alpha}_\xi {\partial}artial^\beta_x a(x,\xi)|\left<\xi\right>^{-(m-|{\alpha}|)}\},\ \ l\in \mathbb{N}. $$
Let $\Psi_a$ denote the pseudo-differential operator
related to the symbol $a(x,\xi)$, i.e.,
\begin{align*}
\Psi_a v(x) = (2{\partial}i)^{-d} \int e^{ix \cdot \xi} a(x,\xi) \mathscr{F}(v)(\xi) d\xi,\ \ v\in \mathscr{S}.
{\epsilon}nd{align*}
In this case, we write $\Psi_a \in S^m$ when no confusion arises.
\begin{lemma} \label{Lem-Err}
Let $a_i\in S^{m_i}$, $i=1,2$. Then, $\Psi_{a_1} \circ \Psi_{a_2} = \Psi_a \in S^{m_1+m_2}$
with
\begin{align*}
a(x,\xi)
= (2{\partial}i)^{-d} \iint e^{-iy\cdot {\epsilon}ta} a_1(x,\xi+{\epsilon}ta) a_2(x+y,\xi) dy d{\epsilon}ta .
{\epsilon}nd{align*}
{\epsilon}nd{lemma}
Note that, the commutator $i[\Psi_{a}, \Psi_{b}]:= i(\Psi_{a} \Psi_{b} - \Psi_{b} \Psi_{a})$
is an operator with symbol in $S^{m_1+m_2-1}$, and the principle symbol is the Poisson bracket
\begin{align*}
H_{a}b := \{a,b\} = \sum\limits_{j=1}^d {\partial}artial_{\xi_j}a {\partial}artial_{x_j}b - {\partial}artial_{\xi_j}b {\partial}artial_{x_j}a.
{\epsilon}nd{align*}
One can also expand the composition of two pseudo-differential operators
into any finite order
and estimate the remainder.
See Lemmas $3.1$ and $3.2$ in \cite{Z17}.
\begin{lemma} \label{Lem-L2-Bdd}
Let $a\in S^0$, $p\in (1,{\infty})$.
Then, for some $C>0$ and $l \in \mathbb{N}$,
\begin{align} \label{pdo-l2}
\|\Psi_a \|_{\mathcal{L}(L^p)} \leq C |a|_{S^0}^{(l)} .
{\epsilon}nd{align}
{\epsilon}nd{lemma}
\subsection{Strichartz and local smoothing estimates} \label{Subsec-Stri}
We first present the Strichartz and local smoothing estimates below.
\begin{theorem} \label{Thm-Stri}
Let $ I=[t_0,T]\subseteq {\mathbb{R}}^+$.
Consider the equation
\begin{align} \label{equa-stri}
& i{\partial}artial_t u = e^{-\Phi} {\Delta}elta(e^\Phi u) + f.
{\epsilon}nd{align}
Here,
the function
$\Phi =\Phi(t,x)$ is continuous on $t$ for each $x\in {\mathbb{R}}^d$, $d{\gamma}eq 1$,
and satisfies that for each multi-index ${\gamma}$,
\begin{align} \label{psi-Stri}
\sup\limits_{t\in I} |{\partial}_x^{\gamma} \Phi(t,x)| \leq C({\gamma}) \sup\limits_{t\in I}g(t) \left<x\right>^{-2}
{\epsilon}nd{align}
for some positive and continuous function $g$.
Then, for any $u(t_0)\in L^2$
and $f\in N^0(I) + L^2{(I; H^{-\frac 12}_1)}$,
the solution $u$ to equa\-tionref{equa-stri} satisfies
\begin{align} \label{L2-Stri}
\|u\|_{S^0(I) \cap L^2{(I; H^{\frac12}_{-1})}}\leq
C_T (|u(t_0)|_{2}+\|f\|_{N^0(I) + L^2{(I; H^{-\frac 12}_1)}}).
{\epsilon}nd{align}
Moreover, if in addition $u(t_0)\in H^1$, $d{\gamma}eq 3$,
$f\in N^1(I)+ L^2{(I; H^{\frac 12}_1)}$,
then
\begin{align} \label{H1-Stri}
\|u\|_{S^1(I) \cap L^2{(I; H^\frac 32_{-1})}}
\leq&
C_T \big(|u(t_0)|_{H^1}+\|f\|_{N^1(I)+ L^2{(I; H^{\frac 12}_1)}}).
{\epsilon}nd{align}
{\epsilon}nd{theorem}
Below, we use the notation $C_T$ for the
constant in Strichartz estimates above
throughout the paper.
We may assume $C_T {\gamma}eq 1$ without lose of generality.
\begin{remark}
Estimates equa\-tionref{L2-Stri} and equa\-tionref{H1-Stri} are the so called {\it local-in-time} estimates,
in that the constant $C_T$ depends on time.
Quantitative estimates and $L^p(\Omega)$-integrability
of $C_T$ have been obtained in \cite{Z17}
for quite general stochastic dispersive equations,
including stochastic Schr\"odinger equations with variable coefficients
as well as the stochastic Airy equation.
See also \cite{MMT08} for more general situations
where Hamiltonian flows associated to Schr\"odinger operators are trapped.
{\epsilon}nd{remark}
{\bf Proof.}
Estimate equa\-tionref{L2-Stri} can be proved similarly as in \cite[Theorem $2.11$]{Z17}.
See also Remark $2.14$ in \cite{Z17}.
Actually,
the asymptotically flat condition equa\-tionref{psi-Stri} guarantees that the
lower order perturbations arising in the operator $e^{-\Phi}{\Delta}elta( e^{\Phi} \cdot)$
can be controlled, via the G{{\alpha}a}rding inequality,
by the Poisson bracket $i[\Psi_h,{\Delta}elta]$
for some appropriate symbol $h\in S^0$
(see the proof of \cite[Theorem 4.1]{Z17}).
We refer to \cite{Z17} for more details.
See also \cite[Lemma 4.1]{BRZ14} and \cite[Lemma 2.7]{BRZ16} for the
special case where $\Phi$ is as in equa\-tionref{vf} with $\sigma equa\-tionuiv 0$.
Regarding equa\-tionref{H1-Stri},
Applying the operator $\left<\noindent a\right>$ to both sides of equa\-tionref{equa-stri} we get
\begin{align} \label{equa-nau}
i {\partial}_t (\left<\noindent a\right> u) = e^{-\Phi}{\Delta}elta(e^\Phi (\left<\noindent a\right>u))
+ [\left<\noindent a\right>, b\cdot \noindent a+c] u + \left<\noindent a\right>f,
{\epsilon}nd{align}
where the coefficients $b=\noindent a \Phi$,
$c= {\Delta}elta \Phi+ \sum_{j=1}^d ({\partial}_j\Phi)^2$.
We regard equa\-tionref{equa-nau} as the equation for the unknown $\left<\noindent a\right>u$.
Then, equa\-tionref{L2-Stri} yields
\begin{align} \label{H1-stri.0}
\|u\|_{S^1(I) \cap L^2{(I; H^{\frac 32}_{-1})}}\leq
C_T& \bigg(|u(t_0)|_{H^1}
+\|[\left<\noindent a\right>, b\cdot \noindent a+c] u\|_{L^2(I; H^{-\frac 12}_1)} \noindent onumber \\
&\qquad +\|f\|_{N^1(I) + L^2{(I; H^{\frac 12}_1)}} \bigg).
{\epsilon}nd{align}
Note that,
for the commutator $[\left<\noindent a\right>, b\cdot \noindent a+c]$,
\begin{align*}
\left<x\right>\left<\noindent a\right>^{-\frac 12} [\left<\noindent a\right>, b\cdot \noindent a+c] = \Psi_p \left<x\right>^{-1}\left<\noindent a\right>^{\frac 12},
{\epsilon}nd{align*}
where $\Psi_p:= \left<x\right>\left<\noindent a\right>^{-\frac 12} [\left<\noindent a\right>, b\cdot \noindent a+c] \left<\noindent a\right>^{-\frac 12} \left<x\right>$
is a pseudo-differential operator of order $0$
with semi-norms depending on $\sup_{t\in I} g(t)$.
By Lemma \ref{Lem-L2-Bdd} and equa\-tionref{L2-Stri},
\begin{align} \label{esti-bc-Stri}
\|[\left<\noindent a\right>, b\cdot \noindent a+c] u\|_{L^2(I; H^{-\frac 12}_1)}
\leq& C \sup\limits_{t\in I} g(t) \|u\|_{L^2(I; H^{\frac 12}_{-1})} \noindent onumber \\
\leq& CC_T \sup\limits_{t\in I} g(t) \|f\|_{N^0(I)+L^2(I; H^{-\frac 12}_{1})}.
{\epsilon}nd{align}
Since when $d{\gamma}eq 3$,
$\left<x\right>^2$ is a weight of Muckenhoupt class $A_2$
(see, e.g., \cite[Lemma 2.3 (iv)]{FS97}),
by virtue of the boundedness of multiplier $m(\xi)=\left<\xi\right>^{-1}$
in the weighted space $L^2(\left<x\right>^2dx)$ (see, e.g., \cite{KW79, K80}),
we have the embedding
$H_1^{\frac 12} \hookrightarrow H_1^{-\frac 12}$
and so $L^2(I; H^{\frac 12}_{1}) \hookrightarrow L^2(I; H^{-\frac 12}_{1})$.
Therefore, taking into account $N^1(I) \hookrightarrow N^0(I)$
and plugging equa\-tionref{esti-bc-Stri} into equa\-tionref{H1-stri.0}
we obtain equa\-tionref{H1-Stri}. The proof is complete.
$\square$ \\
It is known that {\it global-in-time} Strichartz and local smoothing estimates
(i.e., the constant $C_T$ is independent of $T$)
hold for the free Schr\"odinger group $\{e^{-it{\Delta}elta}\}$.
See, e.g., \cite{B08, KV13, MMT08} and references therein.
This is also true for
the operator $-i e^{-\Phi}{\Delta}elta(e^{\Phi} \cdot)$
when $g$ satisfies some smallness condition,
which is crucial in the study of scattering in Section \ref{Sec-Sca} below.
Precisely, we have
\begin{theorem} \label{Thm-Stri*}
Consider the situations as in Theorem \ref{Thm-Stri}.
Assume $(H0)$.
Assume additionally that for some $T_*>0$,
$\sup_{t{\gamma}eq T_*} g(t) \leq {\varepsilon}$ with ${\varepsilon}$ sufficiently small.
Then,
the estimates equa\-tionref{L2-Stri} and
equa\-tionref{H1-Stri} also hold
with some constant $C$ independent of $t_0$ and $T$,
and $u(t_0)$ can be replaced by the final datum $u(T)$.
{\epsilon}nd{theorem}
\begin{remark}
Similar estimates were proved in \cite[Corollary 5.3]{HRZ18},
with $L^2(I;H^\frac{1}{2}_{-1})$ and $L^2(I;H^{-\frac{1}{2}}_{1})$
replaced by the local smoothing spaces $LS(I)$ and $LS'(I)$ introduced in \cite{MMT08}, respectively.
We refer to \cite{MMT08} for more general situations.
{\epsilon}nd{remark}
{\bf Proof.}
Below we mainly consider the $L^2$ case.
The proof is quite similar to that of \cite[Corollary 5.3 (i)]{HRZ18}.
Actually, we have from Equation equa\-tionref{equa-stri} that
\begin{align*}
i {\partial}_t u = {\Delta}elta u + (b\cdot \noindent a + c) u + f,
{\epsilon}nd{align*}
where $b,c$ are as in the proof of Theorem \ref{Thm-Stri}.
We assume $T_*< T$ without lose of generality.
First, on the time regime $[t_0, T_*]$,
using equa\-tionref{L2-Stri} we have
\begin{align} \label{esti-uS0-Stri.0}
\|u\|_{S^0(t_0, T_*) \cap L^2(t_0, T_*; H^{\frac 12}_{-1})}
\leq C_{T_*} |u(t_0)|_2
+ C_{T_*} \|f\|_{N^0(t_0, T_*) + L^2(t_0, T_*; H^{-\frac 12}_{1})}
{\epsilon}nd{align}
Moreover, on the regime $[T_*,T]$,
using the global-in-time Stichartz and local smoothing estimates of
the free Schr\"odinger group $\{e^{-it{\Delta}elta}\}$ we get
\begin{align} \label{esti-uS0-Stri.1}
&\|u\|_{S^0(T_*, T) \cap L^2(T_*, T; H^{\frac 12}_{-1})} \noindent onumber \\
\leq& C |u(T_*)|_2 + C \|(b\cdot \noindent a + c) u\|_{L^2(T_*, T; H^{-\frac 12}_{1})}
+ C \|f\|_{N^0(T_*, T) + L^2(T_*, T; H^{-\frac 12}_{1})} \\
\leq& C |u(t_0)|_2 + C \|(b\cdot \noindent a + c) u\|_{L^2(T_*, T; H^{-\frac 12}_{1})}
+ (C_{T_*}+C) \|f\|_{N^0(T_*, T) + L^2(T_*, T; H^{-\frac 12}_{1})}, \noindent onumber
{\epsilon}nd{align}
where $C$ is independent of $t_0$ and $T$,
and in the last step we also used equa\-tionref{esti-uS0-Stri.0} to bound $|u(T_*)|_2$.
Note that, similarly to equa\-tionref{esti-bc-Stri},
\begin{align*}
\|(b\cdot \noindent a + c) u\|_{L^2(T_*, T; H^{-\frac 12}_{1})}
\leq C \sup\limits_{t\in I} g(t) \|u\|_{L^2(T_*, T; H^{\frac 12}_{-1})}
\leq C {\varepsilon} \|u\|_{L^2(T_*, T; H^{\frac 12}_{-1})}.
{\epsilon}nd{align*}
Plugging this into equa\-tionref{esti-uS0-Stri.1}
we obtain that for ${\varepsilon}$ small enough,
\begin{align} \label{esti-uS0-Stri.2}
\|u\|_{S^0(T_*, T) \cap L^2(T_*, T; H^{\frac 12}_{-1})}
\leq C |u(t_0)|_2
+ C \|f\|_{N^0(T_*, T) + L^2(T_*, T; H^{-\frac 12}_{1})},
{\epsilon}nd{align}
Now, combining equa\-tionref{esti-uS0-Stri.0} and equa\-tionref{esti-uS0-Stri.2} together
we obtain equa\-tionref{L2-Stri} with the constant uniformly bounded on the whole time regime ${\mathbb{R}}^+$.
The $H^1$ case can be proved similarly.
Moreover, one can use similar arguments as in the proof of \cite[Corollary 5.3 (iii)]{HRZ18}
to replace $u(t_0)$ in equa\-tionref{L2-Stri} and equa\-tionref{H1-Stri} with the final datum $u(T)$.
The proof is complete.
$\square$ \\
In the end of this subsection,
we collect some estimates in the Strichartz space $V(I)$, $W(I)$ and ${\mathbb{W}}(I)$,
where $I$ is any interval in ${\mathbb{R}}^+$.
These estimates will be frequently used throughout this paper.
Precisely, we have
\begin{align} \label{ineq-V}
& \| |u|^{\frac 4d} v\|_{L^{\frac{2+4}{d+4}}(I\times {\mathbb{R}}^d)}
\leq \|u\|_{V(I)}^{\frac 4d} \|v\|_{V(I)}, \\
& \||u|^{\frac{4}{d-2}} v\|_{L^2(I; L^{\frac{2d}{d+2}})}
\leq \|u\|^{\frac{4}{d-2}}_{L^{\frac{2(d+2)}{d-2}}(I; L^{\frac{2(d+2)}{d-2}})}
\|v\|_{W(I)}
\leq \|u\|^{\frac{4}{d-2}}_{{\mathbb{W}}(I)}
\|v\|_{W(I)}, \label{ineq-W.2}
{\epsilon}nd{align}
and if $3\leq d\leq 6$,
\begin{align}
\||u|^{\frac{6-d}{d-2}} v \noindent a w\|_{L^2(I; L^{\frac{2d}{d+2}})}
\leq \|u\|^{\frac{6-d}{d-2}}_{{\mathbb{W}}(I)} \|v\|_{\mathbb{W}}
\|w\|_{{\mathbb{W}}(I)}. \label{ineq-W.3}
{\epsilon}nd{align}
Estimates equa\-tionref{ineq-V}-equa\-tionref{ineq-W.3} can be proved by using the H\"older inequality
and the Sobolev embedding
\begin{align} \label{Sob-W1-Lpq}
{\mathbb{W}}{(I)} \hookrightarrow L^{\frac{2(d+2)}{d-2}}(I; L^{\frac{2(d+2)}{d-2}}).
{\epsilon}nd{align}
\subsection{Exotic Strichartz estimates} \label{Subsec-Exotic}
The exotic Strichartz spaces are introduced primarily
to treat the non-Lipschitzness of the derivatives of nonlinearity,
particularly for dimensions larger than six.
Actually,
for the nonlinearity $F(u)=|u|^{\frac{4}{d-2}} u$, $u\in {\mathbb{C}}$,
we have (see \cite[(1.3), (1.4)]{TVZ07})
\begin{align}
& |F_z(u)| + |F_{\overline{z}}(u)|
\leq C |u|^{\frac{4}{d-2}}, \label{Fz.1}\\
& |F_z(u) - F_z(v)| + |F_{\overline{z}}(u) - F_{\overline{z}}(v)|
\leq \left\{
\begin{array}{ll}
C|u-v|^{\frac{4}{d-2}} , & \hbox{if $d>6$;} \\
C|u-v|(|u|^{\frac{6-d}{d-2}} + |v|^{\frac{6-d}{d-2}}), & \hbox{if $3\leq d\leq 6$.}
{\epsilon}nd{array}
\right. \label{Fz.2}
{\epsilon}nd{align}
The space ${\mathbb{X}}(0,\tau)$ allows to take $\frac{4}{d+2}$-derivatives of the nonlinearity,
instead of taking the full derivative.
Below We recall some important estimates in the exotic Strichartz spaces when $d{\gamma}eq 3$,
which are mainly proved in \cite{KV13} in the homogenous case.
The arguments there apply also the inhomogenous case
considered in this paper.
\begin{lemma} \label{Lem-X0-bbX-S1}
For any compact time interval $I \subseteq {\mathbb{R}}^+$,
\begin{align}
& \|u\|_{X^0(I)} \leq C \|u\|_{{\mathbb{X}}(I)} \leq C \|u\|_{S^1(I)}. \label{X0-bbX-S1}\\
& \|u\|_{{\mathbb{X}}(I)} \leq C \|u\|^{\frac{1}{d+2}}_{L^\frac{2(d+2)}{(d-2)}(I \times {\mathbb{R}}^d)}
\| u\|_{S^1(I)}^{\frac{d+1}{d+2}}
\leq C\|u\|^{\frac{1}{d+2}}_{{\mathbb{W}}(I)}
\| u\|_{S^1(I)}^{\frac{d+1}{d+2}}. \label{bbX-LpS0}
{\epsilon}nd{align}
and for some $0<c\leq 1$,
\begin{align} \label{Lp-bbXS1}
\|u\|_{L^\frac{2(d+2)}{(d-2)}(I\times {\mathbb{R}}^d)}
\leq \|u\|^c_{{\mathbb{X}}} \|u\|_{S^1(I)}^{1-c}.
{\epsilon}nd{align}
{\epsilon}nd{lemma}
The proof is similar to that of \cite[Lemma 3.11]{KV13}.
\begin{lemma} \label{Lem-bbX-bbYLSN1}
Let $I=[t_0,T]$ be any compact interval in ${\mathbb{R}}^+$. We have
\begin{align}
\|e^{-i(\cdot -t_0) {\Delta}elta }u_0\|_{{\mathbb{X}}(I)} \leq& C |u_0|_{H^1}, \label{bbX-H1} \\
\bigg \|\int_{t_0}^\cdot e^{-i(\cdot-t_0){\Delta}elta} f(s) ds \bigg \|_{{\mathbb{X}}(I)}
\leq & C \|f\|_{{\mathbb{Y}}(I)+L^2(I;H^\frac 12_1)+N^1(I)}. \label{bbX-bbYLSN1}
{\epsilon}nd{align}
{\epsilon}nd{lemma}
{\bf Proof.}
Estimate equa\-tionref{bbX-H1} follows from equa\-tionref{X0-bbX-S1}
and the homogenous Strichartz estimates.
For equa\-tionref{bbX-bbYLSN1},
similar arguments as in the proof of \cite[Lemma 3.10]{KV13} yield
\begin{align*}
\bigg \|\int_{t_0}^\cdot e^{-i(\cdot-t_0){\Delta}elta} f(s) ds \bigg \|_{{\mathbb{X}}(I)}
\leq C \|f\|_{{\mathbb{Y}}(I)}.
{\epsilon}nd{align*}
Moreover, using equa\-tionref{X0-bbX-S1} and Strichartz estimates we have
\begin{align*}
\bigg \|\int_{t_0}^\cdot e^{-i(\cdot-t_0){\Delta}elta} f(s) ds \bigg \|_{{\mathbb{X}}(I)}
\leq C \bigg \|\int_{t_0}^\cdot e^{-i(\cdot-t_0){\Delta}elta} f(s) ds \bigg \|_{S^1(I)}
\leq C \|f\|_{ L^2(I;H^\frac 12_1)+N^1(I)}.
{\epsilon}nd{align*}
Combining the estimates above together we prove equa\-tionref{bbX-bbYLSN1}.
$\square$
\begin{lemma} \label{Lem-F-bbY-bbX}
For any compact time interval $I \subseteq {\mathbb{R}}^+$,
\begin{align}
\|F(u)\|_{{\mathbb{Y}}(I)} \leq& C \|u\|_{{\mathbb{X}}(I)}^{\frac{d+2}{d-2}}. \label{F-bbY-bbX}
{\epsilon}nd{align}
Moreover,
\begin{align} \label{Fz-bbY-S1bbX}
\|F_z(u+v)w\|_{{\mathbb{Y}}(I)}
\leq & C(\|u\|_{{\mathbb{X}}(I)}^\frac{8}{d^2-4} \|u\|_{S^1(I)}^\frac{4d}{d^2-4}
+ \|v\|_{{\mathbb{X}}(I)}^\frac{8}{d^2-4} \|v\|_{S^1(I)}^\frac{4d}{d^2-4} )
\|w\|_{{\mathbb{X}}(I)},
{\epsilon}nd{align}
and similar estimate also holds for $\|F_{\overline{z}}(u+v)w\|_{{\mathbb{Y}}(I)} $.
{\epsilon}nd{lemma}
The proof is similar to that of \cite[Lemma 3.12]{KV13}.
\section{Stability} \label{Sec-Sta}
This section is devoted to the stability results in the mass and energy critical cases,
which are crucial in the proof of global well-posedness in the next section.
To begin with,
let us start with the easier mass-critical case.
\subsection{Mass-critical case} \label{Subsec-Sta-L2}
The main result of this subsection is formulated below.
\begin{theorem} \label{Thm-Sta-L2} ({\it Mass-Critical Stability Result}).
Fix $I=[t_0,T]\subseteq {\mathbb{R}}^+$.
Let $v$ be the solution to
\begin{align} \label{equa-v-p}
i{\partial}_t v = e^{-\Phi}{\Delta}elta(e^{\Phi} v) - e^{\frac 4d {\rm Re} \, \Phi}F(v),
{\epsilon}nd{align}
where $\Phi$ satisfies equa\-tionref{psi-Stri}, $d{\gamma}eq 1$,
and $\widetilde{v}$ solve the perturbed equation
\begin{align} \label{equa-wtv-p}
i{\partial}_t \widetilde{v} = e^{-\Phi}{\Delta}elta(e^{\Phi} \widetilde{v}) - e^{\frac 4d {\rm Re} \, \Phi}F(\widetilde{v}) + e
{\epsilon}nd{align}
for some function $e$.
Assume that
\begin{align} \label{Sta-L2-V}
\|\widetilde{v}\|_{C(I; L^2)} \leq M, \ \
|v(t_0) - \widetilde{v}(t_0)|_{2} \leq M', \ \
\|\widetilde{v}\|_{V(I)} \leq L
{\epsilon}nd{align}
for some positive constants $M,M'$ and $L$.
Assume also the smallness conditions
\begin{align} \label{Sta-L2-ve}
\| U(\cdot, t_0)(v(t_0)-\widetilde{v}(t_0)) \|_{V(I)} \leq {\varepsilon}, \ \
\| e \|_{L^2(I; H^{-\frac 12}_1) + {N}^0(I)} \leq {\varepsilon}
{\epsilon}nd{align}
for some $0<{\varepsilon}\leq {\varepsilon}_*$,
where ${\varepsilon}_* = {\varepsilon}_*(C_T, D_T, M,M',L)>0$ is a small constant,
$C_T$ is the Strichartz constant in Theorem \ref{Thm-Stri},
$D_T = \|e^{\frac 4d {\rm Re} \,\Phi}\|_{C(I;L^{\infty})}$.
Then,
\begin{align}
& \|v-\widetilde{v}\|_{V(I)} \leq C(C_T, D_T, M,M',L) {\varepsilon} , \label{Sta-L2.1}\\
& \|v-\widetilde{v}\|_{S^0(I) \cap L^2(I; H^\frac 12_{-1})} \leq C(C_T, D_T, M,M',L)M', \label{Sta-L2.2} \\
& \|v\|_{S^0(I)\cap L^2(I; H^\frac 12_{-1})} \leq C(C_T, D_T, M, M', L). \label{Sta-L2.3}
{\epsilon}nd{align}
We can take ${\varepsilon}_*(C_T,D_T, M, M', L)$
(resp. $C(C_T,D_T, M, M', L)$)
to be decreasing (resp. nondecreasing) with respect to each argument.
{\epsilon}nd{theorem}
Theorem \ref{Thm-Sta-L2} states that,
if the difference between two initial data
and the error term are small enough in
appropriate spaces,
the two corresponding solutions will also
stay very close to each other in the mass-critical space $V(t_0,T)$.
\begin{remark} \label{Rem-glob-Sta-L2}
Theorem \ref{Thm-Sta-L2} also holds if $\Phi$
is replaced by ${\varphi}_*$ as in equa\-tionref{vf*}.
In this case,
since the Strichartz constants are independent of time and ${\varphi}_*\in L^{\infty}({\mathbb{R}}^+; L^{\infty})$,
the constants in equa\-tionref{Sta-L2.1}-equa\-tionref{Sta-L2.3}
are independent of time,
i.e., depend only $M'$, $M$ and $L$.
This fact will be important in the study of scattering in Section \ref{Sec-Sca} later.
{\epsilon}nd{remark}
In order to prove Theorem \ref{Thm-Sta-L2},
we first prove the short-time perturbation result below.
\begin{proposition} \label{Pro-ShortP-L2} ({\it Mass-Critical Short-time Perturbation}).
Let $I=[t_0, T]\subseteq{\mathbb{R}}^+$
and
$v$, $\widetilde{v}$ be the solutions to Equations
equa\-tionref{equa-v-p} and equa\-tionref{equa-wtv-p}, respectively.
Assume that,
\begin{align} \label{Short-L2-l2}
\|\widetilde{v}\|_{C(I; L^2)} \leq M, \ \
|v(t_0) - \widetilde{v}(t_0)|_{2} \leq M'
{\epsilon}nd{align}
for some positive constants $M,M'$.
Assume also the smallness conditions
\begin{align} \label{Short-L2-ve}
\|\widetilde{v}\|_{V(I)} \leq {\delta}lta, \ \
\| U(\cdot, 0)(v(t_0)-\widetilde{v}(t_0)) \|_{V(I)} \leq {\varepsilon}, \ \
\| e \|_{L^2(I; H^{- \frac 12}_{1}) + {N}^0(I)} \leq {\varepsilon}
{\epsilon}nd{align}
for some $0<{\varepsilon}\leq {\delta}lta$
where ${\delta}lta = {\delta}lta(C_T, D_T, M,M')>0$ is a small constant,
and $C_T, D_T$ are as in Theorem \ref{Thm-Sta-L2}.
Then, we have
\begin{align}
& \|v-\widetilde{v}\|_{V(I)} \leq C(C_T, D_T) {\varepsilon}, \label{Short-L2.1} \\
& \|v-\widetilde{v}\|_{S^0(I)\cap L^2(I; H^\frac 12_{-1})} \leq C(C_T, D_T) M', \label{Short-L2.2} \\
& \|v\|_{S^0(I)\cap L^2(I; H^\frac 12_{-1})} \leq C(C_T, D_T) (M+M'), \label{Short-L2.3} \\
& \|e^{\frac 4d {\rm Re} \, \Phi}(F(v) - F(\widetilde{v})) \|_{{N}^0(I)}
\leq C(C_T, D_T) {\varepsilon}. \label{Short-L2.4}
{\epsilon}nd{align}
{\epsilon}nd{proposition}
{\bf Proof.}
The proof is similar to that of \cite[Lemma $3.4$]{TVZ07},
however,
based on Theorem \ref{Thm-Stri},
i.e.,
the Strichartz estimates for the Laplacian with lower order perturbations.
Let $z:= v-\widetilde{v}$.
In view of the equations equa\-tionref{equa-v-p} and equa\-tionref{equa-wtv-p}, we have
\begin{align} \label{equa-z-p}
i{\partial}_t z =& e^{-\Phi}{\Delta}elta(e^{\Phi} z) - e^{\frac 4d {\rm Re} \, \Phi} (F(z+\widetilde{v}) - F(\widetilde{v})) - e, \\
z(t_0)=& v(t_0) - \widetilde{v}(t_0), \noindent onumber
{\epsilon}nd{align}
or equivalently,
\begin{align} \label{equa-z-p*}
z(t) = U(t,t_0)z(t_0)
+ \int_{t_0}^t U(t,s) (ie^{\frac 4d {\rm Re} \, \Phi} (F(z+\widetilde{v}) - F(\widetilde{v})) + i e) ds.
{\epsilon}nd{align}
Set $S(I):= \|e^{\frac 4d {\rm Re} \, \Phi} (F(z+\widetilde{v}) - F(\widetilde{v})\|_{ N^0(I)}$.
By equa\-tionref{ineq-V} and equa\-tionref{Short-L2-ve},
\begin{align} \label{esti-S-L2}
S(I) \le& \|e^{\frac 4d {\rm Re} \, \Phi} (F(z+\widetilde{v}) - F(\widetilde{v}) \|_{L^{\frac{2(d+2)}{d+4}}(I \times {\mathbb{R}}^d)} \noindent onumber \\
\leq& CD_T (\|\widetilde{v}\|_{V(I)}^{\frac 4d} \|z\|_{V(I)} + \|z\|_{V(I)}^{1+\frac 4d}) \noindent onumber \\
\leq& C D_T ({\delta}lta^{\frac 4d} \|z\|_{V(I)} + \|z\|_{V(I)}^{1+\frac 4d}).
{\epsilon}nd{align}
Moreover,
applying Theorem \ref{Thm-Sta-L2} to equa\-tionref{equa-z-p}
and using equa\-tionref{Short-L2-ve} we have
\begin{align} \label{esti-S-L2*}
\|z\|_{V(I)}
\leq& C_T (\|U(\cdot, t_0)z(t_0)\|_{V(I)} + S(I) + \|e\|_{N^0(I) + L^2(I; H^{- \frac 12}_{1})}) \noindent onumber \\
\leq& C_T (2{\varepsilon} + S(I)).
{\epsilon}nd{align}
Then, pugging equa\-tionref{esti-S-L2} into equa\-tionref{esti-S-L2*} we obtain
\begin{align*}
\|z\|_{V(I)}
\leq C_T (2{\varepsilon} + C D_T {\delta}lta^{\frac 4d} \|z\|_{V(I)} + CD_T \|z\|_{V(I)}^{1+\frac 4d}).
{\epsilon}nd{align*}
Thus, in view of Lemma $6.1$ in \cite{BRZ18},
for ${\delta}lta = {\delta}lta(C_T,D_T)$ small enough such that
$CC_T D_T {\delta}lta^{\frac 4d} \leq \frac 12$
and $4C_T {\delta}lta < (1-\frac 1{\alpha})(2{\alpha} CC_TD_T)^{-\frac{1}{{\alpha}-1}}$
with ${\alpha}= 1+ \frac 4d$, we obtain
\begin{align} \label{esti-z-V}
\|z\|_{V(I)} \leq (d+4) C_T {\varepsilon},
{\epsilon}nd{align}
which along with equa\-tionref{esti-S-L2} implies equa\-tionref{Short-L2.1} and equa\-tionref{Short-L2.4}.
Now, applying Theorem \ref{Thm-Stri} to equa\-tionref{equa-z-p} again
and using equa\-tionref{Short-L2-l2}, equa\-tionref{Short-L2.4} we have
\begin{align} \label{esti-z-S0.0}
\|z\|_{S^0(I) \cap L^2(I;H^\frac 12_{-1})}
\leq& C_T (|z(t_0)|_2 + S(I) + \|e\|_{{N}^0(I) + L^2(I;H^{-\frac 12}_1) } ) \noindent onumber \\
\leq& C_T (M' + C(C_T, D_T){\varepsilon} +{\varepsilon}) \noindent onumber \\
\leq& 2C_T M',
{\epsilon}nd{align}
if ${\delta}lta= {\delta}lta(C_T,D_T,M')$ is such that
$(C(C_T, D_T)+1){\delta}lta \leq M' $.
Thus, equa\-tionref{Short-L2.2} follows.
Similarly,
by Equation equa\-tionref{equa-wtv-p} and conditions
equa\-tionref{Short-L2-l2} and equa\-tionref{Short-L2-ve},
taking a even smaller
${\delta}lta = {\delta}lta(C_T, D_T, M,M')$ such that
$D_T {\delta}lta^{1+\frac 4d} + {\delta}lta \leq M$,
we have
\begin{align} \label{esti-wtv-S0.0}
\|\widetilde{v}\|_{S^0(I)\cap L^2(I;H^\frac 12_{-1})}
\leq& C_T (|\widetilde{v}({t_0})|_2 + D_T\|\widetilde{v}\|^{1+\frac 4d}_{V(I)} + \|e\|_{N^0(I)+ L^2(I; H^{- \frac 12}_{1})}) \noindent onumber \\
\leq& C_T (M+D_T {\delta}lta^{1+\frac 4d} + {\delta}lta) \noindent onumber \\
\leq& 2 C_T M,
{\epsilon}nd{align}
which along with equa\-tionref{esti-z-S0.0} implies that
\begin{align} \label{esti-z-S0}
\|v\|_{{S}^0(I)\cap L^2(I;H^\frac 12_{-1})}
\leq \|z\|_{{S}^0(I)} + \|\widetilde{v}\|_{{S}^0(I)}
\leq 2 C_T (M' + M),
{\epsilon}nd{align}
thereby yielding equa\-tionref{Short-L2.3}.
The proof is complete.
$\square$ \\
{\bf Proof of Theorem \ref{Thm-Sta-L2}.}
First fix ${\delta}lta = {\delta}lta(C_T, D_T, M, 2C_TM')$,
where ${\delta}lta$ is as in Proposition \ref{Pro-ShortP-L2}.
We divide $[t_0,T]$ into finitely many
small pieces $I_j= [t_j,t_{j+1}]$, $0\leq j\leq l$,
such that
$t_{l+1}=T$,
$\|\widetilde{v}\|_{V(t_j,t_{j+1})} ={\delta}lta$,
$0\leq j\leq l-1$,
and
$\|\widetilde{v}\|_{V(t_l,t_{l+1})} \leq {\delta}lta$.
Then, $l\leq (L/{\delta}lta)^{2+\frac 4d}<{\infty}$.
Let
$C(0) = C(C_T, D_T)$,
$C(j+1) = \max\{ C(0)C_T^2 (\sum_{k=0}^j C(j)+ 2), C(0)(1+2C_T)\}$,
$0\leq j\leq l-1$,
where $C(C_T, D_T)$ is the constant in equa\-tionref{Short-L2.1}-equa\-tionref{Short-L2.4}.
Choose ${\varepsilon}_*= {\varepsilon}_*(C_T, D_T, M,M',L)$ sufficiently small such that
\begin{align} \label{ve*-L2}
(\sum\limits_{k=0}^l C(k) +1) {\varepsilon}_* \leq M',\ \
C_T^2 (\sum\limits_{k=0}^l C(k) +2) {\varepsilon}_* \leq {\delta}lta.
{\epsilon}nd{align}
Below we use inductive arguments to prove for any $0\leq j\leq l$,
\begin{align}
& \|v-\widetilde{v}\|_{V(I_j)} \leq C(j){\varepsilon}, \label{Sta-L2.1-proof}\\
& \|v-\widetilde{v}\|_{S^0(I_j)\cap L^2(I_j;H^\frac 12_{-1})} \leq C(j) M',\label{Sta-L2.2-proof}\\
& \|v\|_{S^0(I_j)\cap L^2(I_j;H^\frac 12_{-1})} \leq C(j)(M+M'),\label{Sta-L2.3-proof}\\
& \|e^{\frac 4d {\rm Re} \, \Phi} (F(v) - F(\widetilde{v}))\|_{N^0(I_j)} \leq C(j) {\varepsilon}. \label{Sta-L2.4-proof}
{\epsilon}nd{align}
Proposition \ref{Pro-ShortP-L2} yields that
the estimates above hold for $j=0$.
Suppose that equa\-tionref{Sta-L2.1-proof}-equa\-tionref{Sta-L2.4-proof} are also valid for each $0\leq k\leq j<l$.
We shall apply Proposition \ref{Pro-ShortP-L2} to show that they also hold for the case where $j+1$ replaces $j$.
For this purpose,
by Theorem \ref{Thm-Stri}, equa\-tionref{Sta-L2-V} and the inductive assumptions
\begin{align*}
|v(t_{j+1} - \widetilde{v}(t_{j+1})|_2
\leq& C_T (|v(t_0) - \widetilde{v}(t_0)|_2 + S(t_0, t_{j+1})
+ \|e\|_{L^2(t_0,t_{j+1}; H^{-\frac 12}_1) + N^0(t_0, t_{j+1})} \\
\leq& C_T (M' + \sum\limits_{k=0}^jC(k){\varepsilon} +{\varepsilon} )
\leq 2 C_T M',
{\epsilon}nd{align*}
where
$S(t_0, t_{j+1})$ is as in the proof of Proposition \ref{Pro-ShortP-L2},
the last step is due to equa\-tionref{ve*-L2}.
Moreover, by Theorem \ref{Thm-Stri},
\begin{align*}
\|U(\cdot, t_{j+1})(v(t_{j+1}) - \widetilde{v}(t_{j+1}))\|_{V(I_{j+1})}
\leq& C_T |v(t_{j+1}) - \widetilde{v}(t_{j+1})|_2 \\
\leq& C_T \|v - \widetilde{v} \|_{C([t_j,t_{j+1}]; L^2)}.
{\epsilon}nd{align*}
Then, applying Theorem \ref{Thm-Stri} to equa\-tionref{equa-z-p} again we have
\begin{align*}
&\|U(\cdot, t_{j+1})(v(t_{j+1}) - \widetilde{v}(t_{j+1}))\|_{V(I_{j+1})} \\
\leq& C_T \|U(\cdot, t_0)(v(t_0)-\widetilde{v}(t_0))\|_{C([0,t_{j+1}])}
+ C^2_T \|e^{\frac 4d {\rm Re} \, \Phi} (F(v) - F(\widetilde{v})) \|_{N^0([0,t_{j+1}])} \\
& + C_T^2 \|e\|_{N^0(t_0, t_{j+1}) + L^2(t_0,t_{j+1}; H^{-\frac 12}_1)} \\
\leq& C_T {\varepsilon} + C_T^2 (\sum\limits_{k=0}^j C(k) {\varepsilon} + {\varepsilon})
\leq {\delta}lta,
{\epsilon}nd{align*}
where the last step is again due to equa\-tionref{ve*-L2}.
Thus, the conditions equa\-tionref{Sta-L2-ve} and equa\-tionref{Sta-L2-ve} of Proposition \ref{Pro-ShortP-L2} are satisfied
with $2C_TM'$ and
$C_T^2 (\sum_{k=0}^j C(k) +2 ){\varepsilon}$
replacing $M'$ and ${\varepsilon}$, respectively.
Proposition \ref{Pro-ShortP-L2} yields that
estimates equa\-tionref{Sta-L2.1-proof}-equa\-tionref{Sta-L2.4-proof} are valid with $j+1$ replacing $j$.
Therefore, inductive arguments
yield that
equa\-tionref{Sta-L2.1-proof}-equa\-tionref{Sta-L2.4-proof} are valid for all $0\leq j\leq l$,
thereby proving Theorem \ref{Thm-Sta-L2}.
The proof is complete.
$\square$
\subsection{Energy-critical case} \label{Subsec-Sta-H1}
The main results of this subsection are Theorems \ref{Thm-Sta-H1-dlow}
and \ref{Thm-Sta-H1} below.
The delicate problem here is that
the derivatives of the nonlinearity in equa\-tionref{equa-x}
are Lipschitz when $3\leq d\leq 6$,
however, they are only H\"older continuous in high dimensions
when $d>6$.
In the latter case,
more delicate arguments involving the exotic Strichartz spaces
as well as local smoothing spaces will be used.
To begin with,
we start with the easier case when $3\leq d\leq 6$.
\subsubsection{The case when $3\leq d\leq 6$}
In this case,
the stability result is quite similar to the previous mass-critical case.
\begin{theorem} \label{Thm-Sta-H1-dlow} ({\it Energy-critical Stability Result when $3\leq d\leq 6$}).
Consider any bounded compact interval $I=[t_0,T]\subseteq {\mathbb{R}}^+$.
Let $w$ be the solution to
\begin{align} \label{equa-w-p}
i{\partial}_t w = e^{-\Phi}{\Delta}elta(e^{\Phi} w) - e^{\frac{4}{d-2} {\rm Re} \, \Phi} F(w)
{\epsilon}nd{align}
with $\Phi$ satisfying equa\-tionref{psi-Stri}, $3\leq d\leq 6$,
and $\widetilde{w}$ solve the perturbed equation
\begin{align} \label{equa-wtw-p}
i{\partial}_t \widetilde{w} = e^{-\Phi}{\Delta}elta(e^{\Phi} \widetilde{w}) - e^{\frac{4}{d-2}{\rm Re} \,\Phi} F(\widetilde{w}) + e
{\epsilon}nd{align}
for some function $e$.
Assume that
\begin{align} \label{Sta-H1-V-dlow}
\|\widetilde{w}\|_{C(I;H^1)} \leq E,\ \
|w(t_0) - \widetilde{w}(t_0)|_{H^1} \leq E', \ \
\|\widetilde{w}\|_{{\mathbb{W}}(I)} \leq L
{\epsilon}nd{align}
for some positive constants $E,E'$ and $L$.
Assume also the smallness conditions
\begin{align} \label{Sta-L2-ve-dlow}
\| U(\cdot, t_0)(w(t_0)-\widetilde{w}(t_0) \|_{{\mathbb{W}}(I)} \leq {\varepsilon}, \ \
\| e \|_{{N}^1(I) + L^2(I; H^{\frac 12}_1)} \leq {\varepsilon}
{\epsilon}nd{align}
for some $0<{\varepsilon}\leq {\varepsilon}_*$,
where ${\varepsilon}_* = {\varepsilon}_*(C_T, D'_T, E,E',L)>0$ is a small constant,
$C_T$ is the Strichartz constant in Theorem \ref{Thm-Stri}
and $D'_T = \|e^{\frac{4}{d-2} {\rm Re} \, \Phi} \|_{C(I; W^{1,{\infty}})}$.
Then,
\begin{align}
& \|w-\widetilde{w}\|_{{\mathbb{W}}(I)} \leq C(C_T, D'_T, E,E',L) {\varepsilon} , \label{Sta-H1.1-dlow}\\
& \|w-\widetilde{w}\|_{S^1(I) \cap L^2(I;H^\frac 32_{-1})} \leq C(C_T, D'_T, E,E',L)E', \label{Sta-H1.2-dlow} \\
& \|w\|_{S^1(I)\cap L^2(I;H^\frac 32_{-1})} \leq C(C_T, D'_T, E,E', L). \label{Sta-H1.3-dlow}
{\epsilon}nd{align}
The constants ${\varepsilon}_*(C_T, D'_T, E,E', L)$ and $C(C_T, D'_T, E,E', L)$
can be taken to be decreasing and nondecreasing with respect to each argument,
respectively.
{\epsilon}nd{theorem}
As in the mass-critical case,
Theorem \ref{Thm-Sta-H1-dlow} follows from the
short-time perturbation result below.
\begin{proposition} \label{Pro-ShortP-H1-dlow} ({\it Energy-Critical Short-time Perturbation when $3\leq d\leq 6$}).
Let $I=[ t_0,T]$, $w$, $\widetilde{w}$ be as in Theorem \ref{Thm-Sta-H1-dlow},
$3\leq d\leq 6$.
Assume that
\begin{align} \label{Short-H1-h1-dlow}
\|\widetilde{w}\|_{C(I; H^1)} \leq E, \ \
|w(t_0) - \widetilde{w}(t_0)|_{H^1} \leq E'
{\epsilon}nd{align}
for some positive constants $E,E'$.
Assume also the smallness conditions
\begin{align} \label{Short-H1-ve-dlow}
\|\widetilde{w}\|_{{\mathbb{W}}(I)} \leq {\delta}lta, \ \
\| U(\cdot, t_0)(w(t_0)-\widetilde{w}(t_0)) \|_{{\mathbb{W}}(I)} \leq {\varepsilon}, \ \
\| e \|_{{N}^1(I) + L^2(I; H^{ \frac 12}_{1})} \leq {\varepsilon}
{\epsilon}nd{align}
for some $0<{\varepsilon}\leq {\delta}lta$,
where ${\delta}lta = {\delta}lta(C_T, D'_T, E,E')>0$ is a small constant,
and $C_T, D'_T$ are as in Theorem \ref{Thm-Sta-H1-dlow}.
Then, we have
\begin{align}
& \|w-\widetilde{w}\|_{{\mathbb{W}}(I)} \leq C(C_T, D'_T) {\varepsilon}, \label{Short-H1.1-dlow} \\
& \|w-\widetilde{w}\|_{S^1(I)\cap L^2(I;H^\frac 32_{-1})} \leq C(C_T, D'_T) E', \label{Short-H1.2-dlow} \\
& \|w\|_{S^1(I)\cap L^2(I;H^\frac 32_{-1})} \leq C(C_T, D'_T) (E+E'), \label{Short-H1.3-dlow} \\
& \|e^{\frac{4}{d-2} {\rm Re} \,\Phi}(F(w)-F(\widetilde{w})) \|_{{N}^1(I)}
\leq C(C_T, D'_T) {\varepsilon}. \label{Short-H1.4-dlow}
{\epsilon}nd{align}
{\epsilon}nd{proposition}
{\bf Proof.}
Set $z:= w-\widetilde{w}$
and $S(I) := \|e^{\frac {4}{d-2} {\rm Re} \,\Phi}(F(\widetilde{w}+z)-F(\widetilde{w}))\|_{N^1(I)}$.
Then,
\begin{align*}
S(I) \leq
D'_T (\|F(z+\widetilde{w}) - F(\widetilde{w}) \|_{L^2(I; L^{\frac{2d}{d+2}})}
+ \| \noindent a (F(z+\widetilde{w}) - F(\widetilde{w})) \|_{L^2(I; L^{\frac{2d}{d+2}})}).
{\epsilon}nd{align*}
By equa\-tionref{ineq-W.2}, equa\-tionref{Fz.1} and equa\-tionref{Short-H1-ve-dlow},
\begin{align*}
\|F(z+\widetilde{w}) - F(\widetilde{w}) \|_{L^2(I; L^{\frac{2d}{d+2}})}
\leq& C(\|z\|_{{\mathbb{W}}(I)}^{\frac{4}{d-2}} + \|\widetilde{w}\|_{{\mathbb{W}}(I)}^{\frac{4}{d-2}}) \|z\|_{{\mathbb{W}}(I)} \\
\leq& C {\delta}lta^{\frac{4}{d-2}} \|z\|_{{\mathbb{W}}(I)} + C \|z\|^{\frac{d+2}{d-2}}_{{\mathbb{W}}(I)}.
{\epsilon}nd{align*}
Moreover, since by equa\-tionref{Fz.2} we have (see, e.g., \cite[(3.20)]{HRZ18})
\begin{align*}
|\noindent a(F(z+\widetilde{w})-F(\widetilde{w}))|
\leq C (|\noindent a \widetilde{w}| |z|^{\frac{4}{d-2}}
+ |\widetilde{w}|^{\frac{4}{d-2}} |\noindent a z| + |z|^{\frac{4}{d-2}} |\noindent a z|
+ |\noindent a \widetilde{w}||\widetilde{w}|^{\frac{6-d}{d-2}} |z|).
{\epsilon}nd{align*}
Taking into account equa\-tionref{ineq-W.2}, equa\-tionref{ineq-W.3} and equa\-tionref{Short-H1-ve-dlow} we get
\begin{align*}
\|\noindent a(F(z+\widetilde{w}) - F(\widetilde{w})) \|_{L^2(I; L^{\frac{2d}{d+2}})}
\leq& C(\|\widetilde{w}\|_{{\mathbb{W}}(I)} \|z\|_{{\mathbb{W}}(I)}^{\frac{4}{d-2}}
+ \|\widetilde{w}\|_{{\mathbb{W}}(I)}^{\frac{4}{d-2}} \|z\|_{{\mathbb{W}}(I)}
+ \|z\|^{\frac{d+2}{d-2}}_{{\mathbb{W}}(I)} ) \\
\leq& C( {\delta}lta \|z\|_{{\mathbb{W}}(I)}^{\frac{4}{d-2}}
+ {\delta}lta ^{\frac{4}{d-2}} \|z\|_{{\mathbb{W}}(I)}
+ \|z\|^{\frac{d+2}{d-2}}_{{\mathbb{W}}(I)} ).
{\epsilon}nd{align*}
Thus,
combining the estimates above together we obtain
\begin{align*}
S(I) \leq CD_T'
( {\delta}lta ^{\frac{4}{d-2}} \|z\|_{{\mathbb{W}}(I)}
+ {\delta}lta \|z\|_{{\mathbb{W}}(I)}^{\frac{4}{d-2}}
+ \|z\|^{\frac{d+2}{d-2}}_{{\mathbb{W}}(I)}).
{\epsilon}nd{align*}
Since $1\leq \frac{4}{d-2} \leq \frac{d+2}{d-2}$ when $3\leq d\leq 6$,
$ \|z\|_{{\mathbb{W}}(I)}^{\frac{4}{d-2}} \leq \|z\|_{{\mathbb{W}}(I)}+ \|z\|^{\frac{d+2}{d-2}}_{{\mathbb{W}}(I)}$,
we come to
\begin{align} \label{esti-S-Short-H1-dlow}
S(I) \leq 2CD_T'
( {\delta}lta \|z\|_{{\mathbb{W}}(I)}
+ \|z\|^{\frac{d+2}{d-2}}_{{\mathbb{W}}(I)}).
{\epsilon}nd{align}
Moreover,
similarly to equa\-tionref{equa-z-p*}, we have
\begin{align} \label{equa-z-p-H1-dlow}
z(t) = U(t,0)z(t_0)
+ \int_{t_0}^t U(t,s)(ie^{\frac{4}{d-2}{\rm Re} \,\Phi}(F(z+\widetilde{w})- F(\widetilde{w})) +i e )ds
{\epsilon}nd{align}
Applying Theorem \ref{Thm-Stri} and using equa\-tionref{Short-H1-ve-dlow}
we have
\begin{align*}
\|z\|_{{\mathbb{W}}(I)}
\leq C_T (\|U(\cdot, t_0) z(t_0)\|_{{\mathbb{W}}(I)} + S(I) + \|e\|_{L^2(I; H^{ \frac 12}_{1}) + {N}^1(I)} )
\leq C_T (2{\varepsilon} + S(I)).
{\epsilon}nd{align*}
Thus, plugging equa\-tionref{esti-S-Short-H1-dlow} into the estimate above we obtain
\begin{align*}
\|z\|_{{\mathbb{W}}(I)}
\leq& 2C_T ({\varepsilon} + CD_T'
{\delta}lta \|z\|_{{\mathbb{W}}(I)}
+ CD_T'\|z\|^{\frac{d+2}{d-2}}_{{\mathbb{W}}(I)}) .
{\epsilon}nd{align*}
Taking ${\delta}lta={\delta}lta(C_T, D'_T)$ very small such that
$ 2C C_T D_T' {\delta}lta \leq \frac 12$ we come to
\begin{align*}
\|z\|_{{\mathbb{W}}(I)}
\leq& 4C_T {\varepsilon} + 4 CC_TD_T'\|z\|^{\frac{d+2}{d-2}}_{{\mathbb{W}}(I)} .
{\epsilon}nd{align*}
Then, by virtue of \cite[Lemma A.1]{BRZ18},
taking ${\delta}lta= {\delta}lta (C_T,D'_T)$ smaller
such that $ 4C_T {\delta}lta < (1-\frac 1 {\alpha})( 4{\alpha} CC_TD_T')^{-\frac{1}{{\alpha}-1}}$
with ${\alpha}=1+\frac{4}{d-2}$
we obtain
\begin{align} \label{esti-z-Short-H1-dlow.1}
\|z\|_{{\mathbb{W}}(I)} \leq\frac{ 4 {\alpha}}{{\alpha}-1} C_T {\varepsilon},
{\epsilon}nd{align}
which together with equa\-tionref{esti-S-Short-H1-dlow} implies equa\-tionref{Short-H1.1-dlow} and equa\-tionref{Short-H1.4-dlow}.
For equa\-tionref{Short-H1.2-dlow},
applying Theorem \ref{Thm-Stri} to equa\-tionref{equa-z-p-H1-dlow}
and using equa\-tionref{Short-H1-ve-dlow}, equa\-tionref{Short-H1.4-dlow}
we have
\begin{align} \label{esti-wtw-Short-H1-dlow}
\|z\|_{S^1(I)\cap L^2(I;H^\frac 32_{-1})}
\leq& C_T (|z(t_0)|_{H^1} + S(I) + \|e\|_{{N}^1(I) + L^2(I; H^{ \frac 12}_{1})} ) \noindent onumber \\
\leq& C_T (E'+ C(C_T, D'_T){\varepsilon} + {\varepsilon} ),
{\epsilon}nd{align}
which implies equa\-tionref{Short-H1.2-dlow},
provided ${\delta}lta$ is smaller
such that $ C(C_T,D_T'){\delta}lta + {\delta}lta \leq E'$.
Similarly, by equa\-tionref{equa-wtw-p},
\begin{align} \label{esti-z-S1-Short-H1-dlow}
\|\widetilde{w}\|_{S^1(I)\cap L^2(I;H^\frac 32_{-1})}
\leq& C_T (|\widetilde{w}(t_0)|_{H^1} + \|e^{\frac{4}{d-2} {\rm Re} \,\Phi} F(\widetilde{w})\|_{N^1(I)} + \|e\|_{{N}^1(I)+L^2(I; H^{ \frac 12}_{1})} ) \noindent onumber \\
\leq& C_T (|\widetilde{w}(t_0)|_{H^1} + D'_T \|\widetilde{w}\|_{{\mathbb{W}}(I)}^{\frac{d+2}{d-2}} + \|e\|_{{N}^1(I)+L^2(I; H^{ \frac 12}_{1})}) \noindent onumber \\
\leq& C_T (E+ D'_T {\delta}lta^{\frac{d+2}{d-2}} + {\varepsilon} ) \noindent onumber \\
\leq& 2C_T E,
{\epsilon}nd{align}
if we take ${\delta}lta$ even smaller such that $D'_T {\delta}lta^{\frac{d+2}{d-2}} + {\delta}lta \leq E$.
Therefore, we obtain equa\-tionref{Short-H1.3-dlow} from equa\-tionref{Short-H1.2-dlow} and equa\-tionref{esti-z-S1-Short-H1-dlow}
and so finish the proof.
$\square$
Once Proposition \ref{Pro-ShortP-H1-dlow} obtained,
we can use the partition arguments as in the proof of Theorem $4.1$
to prove Theorem \ref{Thm-Sta-H1-dlow}.
The details are omitted for simplicity.
\subsubsection{The case when $d>6$}
\begin{theorem} \label{Thm-Sta-H1} ({\it Energy-Critical Stability Result when $d>6$}).
Consider any bound compact interval $I=[t_0,T]\subseteq {\mathbb{R}}^+$.
Let $w, \widetilde{w}$ solve the equations equa\-tionref{equa-w-p} and equa\-tionref{equa-wtw-p}, respectively,
and $\Phi$ satisfy equa\-tionref{psi-Stri},
$d>6$.
Assume that,
\begin{align} \label{Sta-H1-W}
\|\widetilde{w}\|_{C(I; H^1)} \leq E, \ \
\|\widetilde{w}\|_{{\mathbb{W}}(I) \cap L^2(I;H^\frac 32_{-1})} \leq L
{\epsilon}nd{align}
for some positive constants $E$ and $L$.
Assume also the smallness conditions
\begin{align} \label{Sta-H1-ve}
\|g\|_{C(I;{\mathbb{R}}^+)} \leq {\varepsilon},\ \
|w(t_0) - \widetilde{w}(t_0)|_{H^1} \leq {\varepsilon}, \ \
\| e \|_{ N^1(I)+ L^2(I; H^{\frac 12 }_1)} \leq {\varepsilon}
{\epsilon}nd{align}
for some $0<{\varepsilon}\leq {\varepsilon}_*$,
where
$g$ is the time function as in equa\-tionref{psi-Stri},
${\varepsilon}_* = {\varepsilon}_* (C_T, D'_T, E,L)>0$ is a small constant,
and $C_T, D'_T$ are as in Theorem \ref{Thm-Sta-H1-dlow}.
Then, for some $c = c(C_T,D'_T,E,L)>0$,
\begin{align}
& \|w-\widetilde{w}\|_{L^{\frac{2(d+2)}{d-2}}(I\times {\mathbb{R}}^d)} \leq C(C_T, D'_T, E,L) {\varepsilon}^c, \label{Sta-H1.1} \\
& \|w-\widetilde{w}\|_{{S}^1(I)\cap L^2(I;H^\frac 32_{-1})} \leq C(C_T, D'_T, E,L){\varepsilon}^c, \label{Sta-H1.2} \\
& \|w\|_{{S}^1(I)\cap L^2(I;H^\frac 32_{-1})} \leq C(C_T, D'_T, E, L). \label{Sta-H1.3}
{\epsilon}nd{align}
We can take the constants ${\varepsilon}_*(C_T,D'_T,E,L)$ and $ C(C_T, D'_T, E, L)$
to be decreasing and nondecreasing with respect to each argument, respectively.
{\epsilon}nd{theorem}
\begin{remark}
Unlike in the case where $3\leq d\leq 6$,
the smallness condition on $g$
is imposed in equa\-tionref{Sta-H1-ve} mainly to control the lower order perturbations
(see equa\-tionref{esti-J1-H1-p} below).
One may remove this restriction on $g$,
if the estimate equa\-tionref{bbX-bbYLSN1} still holds
with $L^2(I;H^{\frac 12}_1)$ replaced by
$L^2(0,\tau; H^{-\frac 12 + \frac{4}{d+2}}_1)$,
which, however, is unclear.
{\epsilon}nd{remark}
We first prove
the short-time perturbation result below.
\begin{proposition} \label{Pro-ShortP-H1} ({\it Energy-Critical Short-time Perturbations when $d>6$}).
Let $I=[t_0,T]$, $w, \widetilde{w}$ and $g$ be as in Theorem \ref{Thm-Sta-H1}, $d>6$.
Assume that
\begin{align} \label{Short-wtW-H1}
\|\widetilde{w}\|_{C(I; H^1)} \leq E
{\epsilon}nd{align}
for some positive constant $E$.
Assume also the smallness conditions
\begin{align}
&\|\widetilde{w}\|_{{\mathbb{W}}(I)\cap L^2(I; H^{\frac 32}_{-1})} \leq {\delta}lta, \label{Short-H1-ve.0}\\
\|g\|_{C([t_0,T];{\mathbb{R}}^+)} \leq {\varepsilon},\ \
& |w(t_0) - \widetilde{w}(t_0)|_{H^1} \leq {\varepsilon}, \ \
\| e \|_{L^2(I; H^{\frac 12 }_1) + N^1(I)} \leq {\varepsilon} \label{Short-H1-ve}
{\epsilon}nd{align}
for some $0<{\varepsilon}\leq {\delta}lta$,
where
${\delta}lta = {\delta}lta (C_T, D'_T, E)>0$ is a small constant
and $C_T, D'_T$ are as in Theorem \ref{Thm-Sta-H1}.
Then, we have
\begin{align}
& \|w-\widetilde{w}\|_{{{\mathbb{X}}(I)}} \leq C(C_T, D'_T, E) {\varepsilon} , \label{Short-H1.1} \\
& \|w-\widetilde{w}\|_{{S}^1(I)\cap L^2(I;H^\frac 32_{-1})} \leq C(C_T, D'_T, E) {\varepsilon}^{\frac{4}{d-2}}, \label{Short-H1.2} \\
& \|w\|_{{S}^1(I)\cap L^2(I;H^\frac 32_{-1})} \leq C(C_T, D'_T, E), \label{Short-H1.3} \\
& \|F(w) - F(\widetilde{w})\|_{{\mathbb{Y}}(I)} \leq C(C_T, D'_T, E) {\varepsilon}, \label{Short-H1.4} \\
& \| F(w) - F(\widetilde{w}) \|_{N^1(I)} \leq C(C_T, D'_T, E) {\varepsilon}^{\frac{4}{d-2}}. \label{Short-H1.5}
{\epsilon}nd{align}
where ${\mathbb{X}}(I)$, ${\mathbb{Y}}(I)$ are exotic Strichartz spaces as in Section \ref{Sec-Intro}.
{\epsilon}nd{proposition}
\begin{remark}
The exotic Strichartz space ${\mathbb{X}}(I)$ and ${\mathbb{Y}}(I)$
are used to
deal with the non-Lipschitzness of the derivatives of nonlinearity when $d>6$.
Moreover,
the local smoothing spaces are
introduced primarily
to treat the
lower order perturbations of the Laplacian
arising in the operator $e^{-\Phi}{\Delta}elta (e^{\Phi} \cdot)$.
{\epsilon}nd{remark}
In order to prove Proposition \ref{Pro-ShortP-H1},
we first prove Lemma \ref{Lem-Short-H1-rough} below .
\begin{lemma} \label{Lem-Short-H1-rough}
Consider the situations in Proposition \ref{Pro-ShortP-H1}.
We have that for ${\delta}lta= {\delta}lta(C_T, D'_T, E)$ small enough,
\begin{align}
& \|\widetilde{w}\|_{S^1(I)\cap L^2(I;H^\frac 32_{-1})} \leq C( C_T, D'_T, E), \label{esti-wtw-S1LS-bdd-Short-H1} \\
& \|w\|_{{\mathbb{W}}(I) \cap L^2(I; H^\frac 32_{-1})} \leq C(C_T, D'_T, E) {\delta}lta, \label{esti-w-bbWLS-da-Short-H1} \\
& \|w\|_{{\mathbb{X}}(I)} \leq C(C_T, D'_T, E) {\delta}lta^{\frac{1}{d+2}}. \label{esti-w-bbX-da-Short-H1}
{\epsilon}nd{align}
{\epsilon}nd{lemma}
\begin{remark}
Unlike in \cite{KV13} (and also \cite{TV05}),
it is more delicate here
to derive the smallness bound equa\-tionref{esti-w-bbX-da-Short-H1} of $w$ in the exotic Strichartz space ${\mathbb{X}}(I)$,
because of the lower order perturbations in Equation equa\-tionref{equa-w-p}.
Below we first prove the smallness bound equa\-tionref{esti-w-bbWLS-da-Short-H1} of $w$
in the local smoothing space $L^2(I; H^\frac 32_{-1})$,
with which we are able to control the lower order perturbations
and then obtain the estimate equa\-tionref{esti-w-bbX-da-Short-H1}.
{\epsilon}nd{remark}
{\bf Proof. }
We first prove equa\-tionref{esti-wtw-S1LS-bdd-Short-H1}.
Applying Theorem \ref{Thm-Stri} to equa\-tionref{equa-wtw-p} and
using equa\-tionref{ineq-W.2}, equa\-tionref{Short-wtW-H1} and equa\-tionref{Short-H1-ve.0} we have
\begin{align} \label{esti-wtw-S1.0}
\|\widetilde{w}\|_{S^1(I)}
\leq& C_T (|\widetilde{w}(t_0)|_{H^1}
+ \|e^{\frac{4}{d-2} {\rm Re} \, \Phi} F(\widetilde{w})\|_{L^2(I; W^{1,\frac{2d}{d+2}})}
+ \| e \|_{N^1(I)+L^2(I; H^{\frac 12 }_1)} ) \noindent onumber \\
\leq& C_T (E+ CD'_T\|\widetilde{w}\|_{{\mathbb{W}}(I)}^{\frac{d+2}{d-2}} +{\varepsilon}) \noindent onumber \\
\leq& C_T (E+ C D'_T {\delta}lta^{\frac{d+2}{d-2}} +{\varepsilon}),
{\epsilon}nd{align}
which yields equa\-tionref{esti-wtw-S1LS-bdd-Short-H1}
if ${\delta}lta = {\delta}lta(D'_T,E)$ is small enough such that
$CD'_T {\delta}lta^{\frac{d+2}{d-2}} + {\delta}lta \leq E$.
In order to prove equa\-tionref{esti-w-bbWLS-da-Short-H1},
again applying Theorem \ref{Thm-Stri} to equa\-tionref{equa-wtw-p}
and using the H\"older inequality equa\-tionref{ineq-W.2} and equa\-tionref{Short-H1-ve.0} we have
\begin{align*}
\|U(\cdot,t_0)\widetilde{w}(t_0)\|_{{\mathbb{W}}(I) \cap L^2(I; H^\frac 32_{-1}) }
\leq& \|\widetilde{w}\|_{{\mathbb{W}}(I) \cap L^2(I; H^\frac 32_{-1})}
+ C C_T D'_T \|\widetilde{w}\|_{{\mathbb{W}}(I)}^{\frac{d+2}{d-2}} \noindent onumber \\
& + C_T \|e\|_{N^1(I) + L^2(I; H^\frac 12_{-1})} \noindent onumber \\
\leq&{\delta}lta + CC_T D'_T{\delta}lta^{\frac{d+2}{d-2}} + C_T {\varepsilon}.
{\epsilon}nd{align*}
Moreover, by the homogeneous Strichartz estimates and equa\-tionref{Short-H1-ve},
\begin{align*}
\|U(\cdot, t_0)(w(t_0)-\widetilde{w}(t_0))\|_{{\mathbb{W}}(I) \cap L^2(I; H^\frac 32_{-1}) }
\leq C|w(t_0)-\widetilde{w}(t_0)|_{H^1} \leq C {\varepsilon}.
{\epsilon}nd{align*}
Thus, we obtain
\begin{align} \label{esti-w-homo-bbWLS}
\|U(\cdot, t_0) w(t_0)\|_{{\mathbb{W}}(I) \cap L^2(I; H^\frac 32_{-1}) }
\leq C_1(C_T, D'_T){\delta}lta.
{\epsilon}nd{align}
Arguing as above
and using equa\-tionref{esti-w-homo-bbWLS}
we deduce from Equation equa\-tionref{equa-w-p} that
\begin{align*}
\|w\|_{{\mathbb{W}}(I) \cap L^2(I; H^\frac 32_{-1}) }
\leq& \| U(\cdot, t_0)w(t_0)\|_{{\mathbb{W}}(I) \cap L^2(I; H^\frac 32_{-1}) }
+ C C_T D'_T\|w\|_{{\mathbb{W}}(I)}^{\frac{d+2}{d-2}} \\
\leq& C_1(C_T, D'_T){\delta}lta
+ C C_T D'_T\|w\|_{{\mathbb{W}}(I)}^{\frac{d+2}{d-2}}.
{\epsilon}nd{align*}
Then, in view of \cite[Lemma A.1]{BRZ18},
taking ${\delta}lta={\delta}lta(C_T, D'_T)$ smaller
such that
$C_1(C_T, D'_T){\delta}lta < (1-\frac 1{\alpha})({\alpha} C C_T D'_T)^{-\frac{1}{{\alpha}-1}}$,
we obtain equa\-tionref{esti-w-bbWLS-da-Short-H1}.
It remains to prove equa\-tionref{esti-w-bbX-da-Short-H1}.
For this purpose,
we see from equa\-tionref{equa-wtw-p} that
\begin{align*}
i{\partial}_t \widetilde{w} = {\Delta}elta \widetilde{w} + (b\cdot \noindent a + c)\widetilde{w} - e^{\frac{4}{d-2} {\rm Re} \,{\partial}si} F(\widetilde{w}) +e,
{\epsilon}nd{align*}
where $b = 2 \noindent a \Phi$ and $c= {\Delta}elta \Phi + \sum_{j=1}^d ({\partial}_j \Phi)^2$.
This yields that
\begin{align} \label{esti-wtw-homo-bbX-p.0}
\|e^{-i(\cdot-t_0){\Delta}elta} \widetilde{w}(t_0) &\|_{{\mathbb{X}}(I)}
\leq \|\widetilde{w}\|_{{\mathbb{X}}(I)}
+ \bigg\|\int_0^\cdot e^{-i(\cdot-s){\Delta}elta}( b\cdot \noindent a+ c)\widetilde{w}ds \bigg\|_{{\mathbb{X}}(I)} \noindent onumber \\
& + \bigg\|\int_0^\cdot e^{-i(\cdot-s){\Delta}elta}e^{\frac{4}{d-2}{\rm Re} \, \Phi} F(w)ds \bigg\|_{{\mathbb{X}}(I)}
+ \bigg\|\int_0^\cdot e^{-i(\cdot-s){\Delta}elta} e(s) ds \bigg\|_{{\mathbb{X}}(I)} \noindent onumber \\
& =:K_0+ K_1 + K_2 +K_3.
{\epsilon}nd{align}
Note that, by equa\-tionref{bbX-LpS0},
\begin{align} \label{esti-K0-H1-p}
K_0
\leq C \|\widetilde{w}\|_{{\mathbb{W}}(I)}^{\frac{1}{d+2}} \|\widetilde{w}\|_{S^1(I)}^{\frac{d+1}{d+2}}.
{\epsilon}nd{align}
Moreover, equa\-tionref{bbX-bbYLSN1} yields that
\begin{align*}
K_1 \leq C\|(b\cdot \noindent a+c) \widetilde{w}\|_{L^2(I;H^\frac 12_{1})}.
{\epsilon}nd{align*}
We see that,
$\|(b\cdot \noindent a+c) \widetilde{w}\|_{H^\frac 12_{1}}= |\Psi_q \left<x\right>^{-1} \left<\noindent a\right>^{\frac 32} \widetilde{w}|_2$,
where $\Psi_q:=\left<x\right>\left<\noindent a\right>^{\frac 12} (b\cdot \noindent a+c) \left<\noindent a\right>^{-\frac 32} \left<x\right> \in S^0$.
Then, using Lemma \ref{Lem-L2-Bdd} and equa\-tionref{Short-H1-ve}
we have for some $l{\gamma}eq 1$,
\begin{align*}
\|(b\cdot \noindent a+c) \widetilde{w}\|_{H^\frac 12_{1}}
\leq C \sup\limits_{t\in I} |(ib(t)\cdot \xi + c(t))|_{S^1}^{(l)} |\left<x\right>^{-1} \left<\noindent a\right>^{\frac 32} \widetilde{w}|_2
\leq C {\delta}lta \|\widetilde{w}\|_{H^{\frac 32}_{-1}}.
{\epsilon}nd{align*}
This yields that
\begin{align} \label{esti-K1-H1-p}
K_1 \leq C \| (b\cdot \noindent a+ c)\widetilde{w}\|_{L^2(I;H^\frac 12_{1})}
\leq C {\delta}lta \|\widetilde{w}\|_{L^2(I;H^\frac 32_{-1})}.
{\epsilon}nd{align}
We also deduce from equa\-tionref{bbX-bbYLSN1} that
\begin{align*}
K_2 \leq C \|e^{\frac{4}{d-2}{\rm Re} \, \Phi} F(\widetilde{w})\|_{{\mathbb{Y}}(I)}.
{\epsilon}nd{align*}
The product rule for fractional derivatives (see, e.g., \cite[Chapter 2.1]{T00})
implies
\begin{align*}
\|e^{\frac{4}{d-2}{\rm Re} \,\Phi} F(\widetilde{w})\|_{{\mathbb{Y}}(I)}
\leq& C\|\left<\noindent a\right>^{\frac{4}{d+2}}e^{\frac{4}{d-2}{\rm Re} \, \Phi}\|_{C(I;L^{\infty})}
\|F(\widetilde{w})\|_{L^{\frac d2}(I; L^{\frac{2d^2(d+2)}{d^3+4d^2+4d-16}})} \\
& + C \|e^{\frac{4}{d-2}{\rm Re} \, \Phi}\|_{C(I;L^{\infty})}
\|\left<\noindent a\right>^{\frac{4}{d+2}} F(\widetilde{w})\|_{L^{\frac d2}(I; L^{\frac{2d^2(d+2)}{d^3+4d^2+4d-16}})} \\
\leq& C \|e^{\frac{4}{d-2}{\rm Re} \, \Phi}\|_{C(I;W^{1,{\infty}})} \|F(\widetilde{w})\|_{{\mathbb{Y}}(I)},
{\epsilon}nd{align*}
which along with equa\-tionref{bbX-LpS0}, equa\-tionref{F-bbY-bbX}
implies that
\begin{align} \label{esti-K2-H1-p}
K_2 \leq CD'_T \|\widetilde{w}\|_{{\mathbb{X}}(I)}^{\frac{d+2}{d-2}}
\leq CD'_T \|\widetilde{w}\|_{{\mathbb{W}}(I)}^{\frac{1}{d-2}} \|\widetilde{w}\|_{S^1(I)}^{\frac{d+1}{d-2}}.
{\epsilon}nd{align}
Regarding $K_3$, by equa\-tionref{bbX-bbYLSN1},
\begin{align} \label{esti-K3-H1-p}
K_3 \leq C \|e\|_{N^1(I) + L^2(I;H^\frac 12 _{1})} .
{\epsilon}nd{align}
Thus, plugging equa\-tionref{esti-K0-H1-p}-equa\-tionref{esti-K3-H1-p} into equa\-tionref{esti-wtw-homo-bbX-p.0}
and using equa\-tionref{Short-H1-ve.0}, equa\-tionref{Short-H1-ve} and equa\-tionref{esti-wtw-S1LS-bdd-Short-H1}
yield
\begin{align} \label{esti-wtw-homo-bbX-p}
\|e^{-i(\cdot-t_0){\Delta}elta} \widetilde{w}(t_0) \|_{{\mathbb{X}}(I)}
\leq& C\|\widetilde{w}\|^{\frac{1}{d+2}}_{{\mathbb{W}}(I)} \|\widetilde{w}\|^{\frac{d+1}{d+2}}_{S^1(I)}
+ C {\delta}lta \|\widetilde{w}\|_{L^2(I;H^\frac 32_{-1})} \noindent onumber \\
& + CD'_T \|\widetilde{w}\|_{{\mathbb{W}}(I)}^{\frac{1}{d-2}} \|\widetilde{w}\|_{S^1(I)}^{\frac{d+1}{d-2}}
+ C \|e\|_{N^1(I)+L^2(I;H^\frac 12 _{1})} \noindent onumber \\
\leq& C_2(C_T,D'_T, E){\delta}lta^{\frac{1}{d+2}}.
{\epsilon}nd{align}
Moreover,
by equa\-tionref{bbX-H1} and equa\-tionref{Short-H1-ve},
\begin{align*}
\|e^{-i(\cdot-t_0){\Delta}elta} (w(t_0)- \widetilde{w}(t_0)) \|_{{\mathbb{X}}(I)}
\leq C |w(t_0)-\widetilde{w}(t_0)|_{H^1}
\leq C {\varepsilon}.
{\epsilon}nd{align*}
Thus, we obtain
\begin{align*}
\|e^{-i(\cdot-t_0){\Delta}elta} w(t_0) \|_{{\mathbb{X}}(I)}
\leq C_3(C_T,D'_T, E){\delta}lta^{\frac{1}{d+2}}.
{\epsilon}nd{align*}
Now,
similarly as above,
we deduce from Equation equa\-tionref{equa-w-p} that
\begin{align*}
\|w \|_{{\mathbb{X}}(I)}
\leq& \|e^{-i(\cdot-t_0){\Delta}elta} w(t_0)\|_{{\mathbb{X}}(I)}
+ \bigg\|\int_0^\cdot e^{-i(\cdot-s){\Delta}elta} (b\cdot \noindent a+ c)w(s) ds \bigg\|_{{\mathbb{X}}(I)} \\
& + \bigg\|\int_0^\cdot e^{-i(\cdot-s){\Delta}elta}e^{\frac{4}{d-2}{\rm Re} \,\Phi} F(w(s))ds \bigg\|_{{\mathbb{X}}(I)} \\
\leq& C_3(C_T,D'_T,E){\delta}lta^{\frac{1}{d+2}} + C {\delta}lta \|w\|_{L^2(I;H^\frac 32_{-1})}
+ C D'_T \|w\|_{{\mathbb{X}}(I)}^{\frac{d+2}{d-2}}
+ C {\varepsilon} \\
\leq& C_4(C_T,D'_T,E){\delta}lta^{\frac{1}{d+2}} + CD'_T \|w\|_{{\mathbb{X}}(I)}^{\frac{d+2}{d-2}},
{\epsilon}nd{align*}
where the last step is due to equa\-tionref{esti-w-bbWLS-da-Short-H1}.
Therefore,
taking ${\delta}lta= {\delta}lta(C_T, D'_T, E)$ even smaller such that
$ C_4(C_T,D'_T,E) {\delta}lta^{\frac{1}{d+2}} < (1-\frac 1{\alpha})({\alpha} C D'_T)^{-\frac{1}{{\alpha}-1}}$
and using \cite[Lemma A.1]{BRZ18} we obtain equa\-tionref{esti-w-bbX-da-Short-H1}.
$\square$ \\
{\bf Proof of Proposition \ref{Pro-ShortP-H1}.}
We first estimate $\|w-\widetilde{w}\|_{{\mathbb{X}}(I)}$.
For this purpose, we note that $z:= w-\widetilde{w}$ satisfies the equation
\begin{align} \label{equa-z-p-H1}
i{\partial}_t z =& e^{-\Phi}{\Delta}elta(e^{\Phi} z) - e^{\frac{4}{d-2}{\rm Re} \, \Phi} (F(z+\widetilde{w}) - F(\widetilde{w})) -e \noindent onumber \\
=& {\Delta}elta z + (b\cdot \noindent a + c)z - e^{\frac{4}{d-2}{\rm Re} \, \Phi} (F(z+\widetilde{w}) - F(\widetilde{w})) -e.
{\epsilon}nd{align}
This yields that
\begin{align*}
\|z\|_{{\mathbb{X}}(I)}
\leq& \|e^{-i(\cdot-t_0) {\Delta}elta}z(t_0)\|_{{\mathbb{X}}(I)}
+ \bigg \|\int_{t_0}^\cdot e^{-i(\cdot-s){\Delta}elta} (b\cdot \noindent a +c)zds \bigg \|_{{\mathbb{X}}(I)} \noindent onumber \\
& + \bigg \|\int_{t_0}^\cdot e^{-i(\cdot-s){\Delta}elta}e^{\frac{4}{d-2} {\rm Re} \, \Phi} (F(z+\widetilde{w}) - F(\widetilde{w})) ds \bigg \|_{{\mathbb{X}}(I)}
+ \bigg \|\int_{t_0}^\cdot e^{-i(\cdot-s){\Delta}elta} e(s) ds \bigg \|_{{\mathbb{X}}(I)} \\
=:& J_0 + J_1 + J_2 + J_3.
{\epsilon}nd{align*}
First, Theorem \ref{Thm-Stri*}, equa\-tionref{X0-bbX-S1} and equa\-tionref{Short-H1-ve} yield that
$$ J_0 \leq C\|e^{-i(\cdot-t_0) {\Delta}elta}z(t_0)\|_{S^1(I)} \leq C|z(t_0)|_{H^1} \leq C {\varepsilon}.$$
Moreover, similarly to equa\-tionref{esti-K1-H1-p},
using equa\-tionref{Short-H1-ve.0}, equa\-tionref{Short-H1-ve} and equa\-tionref{esti-w-bbWLS-da-Short-H1} we have
\begin{align} \label{esti-J1-H1-p}
J_1 \leq C \|(b\cdot \noindent a + c)z \|_{L^2(I;H^\frac 12_{1})}
\leq C {\varepsilon} \|z\|_{L^2(I;H^\frac 32_{-1})}
\leq C_1(C_T, D'_T, E) {\varepsilon}.
{\epsilon}nd{align}
We also use equa\-tionref{bbX-bbYLSN1} and equa\-tionref{Fz-bbY-S1bbX} to get that
\begin{align} \label{esti-J2-H1-p}
J_2 \leq& C \| e^{\frac{4}{d-2}{\rm Re} \, \Phi} (F(z+\widetilde{w}) - F(\widetilde{w}))\|_{{\mathbb{Y}}(I)} \noindent onumber \\
\leq& CD'_T \|(F(z+\widetilde{w}) - F(\widetilde{w}))\|_{{\mathbb{Y}}(I)} \noindent onumber \\
\leq& CD'_T (\|\widetilde{w}\|_{{\mathbb{X}}(I)}^\frac{8}{d^2-4} \|\widetilde{w}\|_{S^1(I)}^\frac{4d}{d^2-4}
+ \|z\|_{{\mathbb{X}}(I)}^\frac{8}{d^2-4} \|z\|_{S^1(I)}^\frac{4d}{d^2-4} )
\|z\|_{{\mathbb{X}}(I)}.
{\epsilon}nd{align}
Note that, by equa\-tionref{bbX-LpS0} and equa\-tionref{Short-H1-ve.0},
\begin{align} \label{esti-wtw-bbx-S1}
\|\widetilde{w}\|_{{\mathbb{X}}(I)}
\leq C\|\widetilde{w}\|_{{\mathbb{W}}(I)}^{\frac {1}{d+2}}
\|\widetilde{w}\|_{S^1(I)}^{\frac{d+1}{d-2}}
\leq C {\delta}lta^{\frac{1}{d+2}} \|\widetilde{w}\|_{S^1(I)}^{\frac{d+1}{d-2}} .
{\epsilon}nd{align}
Plugging equa\-tionref{esti-wtw-bbx-S1} into equa\-tionref{esti-J2-H1-p} and
using equa\-tionref{esti-wtw-S1LS-bdd-Short-H1} we obtain
\begin{align*}
J_2 \leq& C_2(C_T,D_T',E) {\delta}lta^{\frac{8}{(d-2)(d+2)^2}} \|z\|_{{\mathbb{X}}(I)}
+ C D'_T \|z\|_{S^1(I)}^\frac{4d}{d^2-4} \|z\|^{1+\frac{8}{d^2-4}}_{{\mathbb{X}}(I)}.
{\epsilon}nd{align*}
Regarding $J_3$,
similarly to equa\-tionref{esti-K3-H1-p},
by equa\-tionref{bbX-bbYLSN1},
\begin{align*}
J_3 \leq C\|e\|_{ N^1(I)+ L^2(I;H^\frac 12_1)} \leq C{\varepsilon}.
{\epsilon}nd{align*}
Thus, combining the estimates of $J_i$ above, $i=0,1,2,3$, we obtain
that for ${\delta}lta = {\delta}lta(C_T,D'_T, E)$ small enough
such that
$ C_2(C_T,D_T',E) {\delta}lta^{\frac{8}{(d-2)(d+2)^2}} \leq \frac 12$,
\begin{align} \label{esti-z-bbX-StaH1}
\|z\|_{{\mathbb{X}}(I)}
\leq 2 (C_1(C_T,D'_T,E)+2C){\varepsilon}
+ 2 CD'_T \|z\|_{S^1(I)}^\frac{4d}{d^2-4} \|z\|^{1+\frac{8}{d^2-4}}_{{\mathbb{X}}(I)} .
{\epsilon}nd{align}
Below we estimate $\|z\|_{S^1(I)}$ and $\|z\|_{L^2(I;H^\frac 32_{-1})}$.
Arguing as in the proof of equa\-tionref{esti-wtw-S1.0},
applying Theorem \ref{Thm-Stri} to equa\-tionref{equa-z-p-H1}
and using equa\-tionref{ineq-W.2} and equa\-tionref{Short-H1-ve} we have
\begin{align} \label{esti-z-S1-Short-H1.0}
\|z\|_{S^1(I) \cap L^2(I; H^{\frac 32 }_{-1})}
\leq& C_T \|z(t_0)\|_{H^1}
+ C_T \|e^{\frac{4}{d-2} {\rm Re} \,\Phi} (F(z+\widetilde{w}) - F(\widetilde{w}))\|_{N^1(I)} \noindent onumber \\
& + C_T \| e \|_{N^1(I)+ L^2(I; H^{\frac 12 }_1)} \noindent onumber \\
\leq& 2C_T {\varepsilon} + C_T D'_T \|(F(z+\widetilde{w}) - F(\widetilde{w}))\|_{N^1(I)}.
{\epsilon}nd{align}
Note that, by H\"older's inequality equa\-tionref{ineq-W.2}, equa\-tionref{Short-H1-ve.0} and equa\-tionref{esti-w-bbWLS-da-Short-H1},
\begin{align} \label{esti-F-Short-H1}
\|F(z+\widetilde{w}) - F(\widetilde{w})\|_{N^0(I)}
\leq& C (\|\widetilde{w}\|^{\frac{4}{d-2}}_{{\mathbb{W}}(I)}
+ \|z\|^{\frac{4}{d-2}}_{{\mathbb{W}}(I)} )
\|z\|_{S^1(I)} \noindent onumber \\
\leq& C_3(C_T,D'_T,E){\delta}lta^{\frac{4}{d-2}}\|z\|_{S^1(I)}.
{\epsilon}nd{align}
Moreover,
arguing as in the proof of \cite[(3.67)]{KV13}
and using equa\-tionref{Short-H1-ve.0}, equa\-tionref{esti-w-bbX-da-Short-H1} we have
\begin{align} \label{esti-naF-Short-H1}
\|\noindent a (F(z+\widetilde{w}) - F(\widetilde{w}))\|_{N^0(I)}
\leq& C( \|\noindent a \widetilde{w}\|_{S^0(I)} \|z\|^{\frac{4}{d-2}}_{X^0(I)}
+ \|z+\widetilde{w}\|^{\frac{4}{d-2}}_{X^0(I)} \|\noindent a z\|_{S^0(I)} ) \noindent onumber \\
\leq& C_4(C_T, D'_T,E) ( \|z\|^{\frac{4}{d-2}}_{{\mathbb{X}}(I)}
+ {\delta}lta^{\frac{4}{d^2-4}} \|z\|_{S^1(I)}) .
{\epsilon}nd{align}
Thus, plugging equa\-tionref{esti-F-Short-H1}, equa\-tionref{esti-naF-Short-H1} into equa\-tionref{esti-z-S1-Short-H1.0}
we get
\begin{align*}
\|z\|_{S^1(I) \cap L^2(I;H^{\frac 32}_{-1})}
\leq& 2C_T {\varepsilon}
+C_5(C_T,D'_T,E) ( \|z\|_{{\mathbb{X}}(I)}^{\frac{4}{d-2}}
+ ({\delta}lta^{\frac{4}{d-2}}+{\delta}lta^{\frac{4}{d^2-4}}) \|z\|_{S^1(I)} ).
{\epsilon}nd{align*}
Taking ${\delta}lta = {\delta}lta(C_T, D'_T,E)$ small such that
$C_5(C_T,D'_T,E) ({\delta}lta^{\frac{4}{d-2}} + {\delta}lta^{\frac{4}{d^2-4}}) \leq \frac 12$
yields
\begin{align} \label{esti-z-S1-Short-H1}
\|z\|_{S^1(I)\cap L^2(I;H^{\frac 32}_{-1})}
\leq 4C_T {\varepsilon}
+ 2C_5(C_T,D'_T,E) \|z\|^{\frac{4}{d-2}}_{{\mathbb{X}}(I)} .
{\epsilon}nd{align}
Now,
plugging equa\-tionref{esti-z-S1-Short-H1} into equa\-tionref{esti-z-bbX-StaH1} we get that
if $c_1:= \frac{8}{d^2-4}$, $c_2:= \frac{24d-16}{(d-2)^2(d+2)}>0$,
\begin{align} \label{esti-z-bbX-StaH1.1}
\|z\|_{{\mathbb{X}}(I)}
\leq C_6(C_T, D'_T, E) {\varepsilon}
+ C_6(C_T, D'_T,E)
{\varepsilon}^{\frac{4d}{d^2-4}} (\|z\|_{{\mathbb{X}}(I)}^{1+ c_1}
+ \|z\|_{{\mathbb{X}}(I)}^{1+c_2}).
{\epsilon}nd{align}
Since $0<c_1<c_2$,
$\|z\|_{{\mathbb{X}}(I)}^{1+c_1} \leq \|z\|_{{\mathbb{X}}(I)} + \|z\|_{{\mathbb{X}}(I)}^{1+c_2}$,
we have
\begin{align*}
\|z\|_{{\mathbb{X}}(I)}
\leq C_6(C_T,D'_T,E)({\varepsilon} + {\varepsilon}^{\frac{4d}{d^2-4}} \|z\|_{{\mathbb{X}}(I)}
+ 2 \|z\|_{{\mathbb{X}}(I)}^{1+c_2}).
{\epsilon}nd{align*}
Then, taking ${\delta}lta$ very small such that
$C_6(C_T,D'_T,E) {\delta}lta^{\frac{4d}{d^2-4}} \leq \frac 12$,
we come to
\begin{align*}
\|z\|_{{\mathbb{X}}(I)}
\leq 2 C_6(C_T,D'_T,E) {\varepsilon} + 4C_6(C_T,D'_T,E)\|z\|_{{\mathbb{X}}(I)}^{1+c_2}.
{\epsilon}nd{align*}
Thus, taking ${\delta}lta$ even smaller such that
$2C_6{\delta}lta <(1-\frac 1{\alpha})(4{\alpha} C_6)^{-\frac{1}{{\alpha}-1}}$,
we apply \cite[Lemma A.1]{BRZ18}
to obtain equa\-tionref{Short-H1.1},
which along with equa\-tionref{esti-z-S1-Short-H1} implies equa\-tionref{Short-H1.2}.
Finally,
equa\-tionref{Short-H1.3} follows from equa\-tionref{Short-H1.2} and equa\-tionref{esti-wtw-S1LS-bdd-Short-H1},
equa\-tionref{Short-H1.4} can be proved by equa\-tionref{Short-H1.1} and similar estimates as in equa\-tionref{esti-J2-H1-p},
and equa\-tionref{Short-H1.5} follow from
equa\-tionref{Short-H1.1}, equa\-tionref{Short-H1.2} and equa\-tionref{esti-naF-Short-H1}.
Therefore, the proof is complete.
$\square$ \\
{\bf Proof of Theorem \ref{Thm-Sta-H1}.}
Let ${\delta}lta={\delta}lta(C_T,D'_T,E)$ be as in Proposition \ref{Pro-ShortP-H1}.
As in the proof of Theorem \ref{Thm-Sta-L2},
since $\|\widetilde{w}\|_{{\mathbb{W}}(I)} \leq L<{\infty}$,
we can divide $I$ into subintervals $I'_j=[t'_j,t'_{j+1}]$,
such that
$0\leq j\leq l'\leq (\frac{2L}{{\delta}lta})^{\frac{2(d+2)}{d-2}}<{\infty}$,
and $\|\widetilde{w}\|_{{\mathbb{W}}(I'_j)} \leq \frac {\delta}lta 2$ for each $0\leq j\leq l'$.
Similarly, since $\|\widetilde{w}\|_{L^2(I;H^\frac 32_{-1})} \leq L<{\infty}$,
we have another finite partition $I''_{j}=[t''_j, t''_{j+1}]$,
so that
$0\leq j\leq l''\leq (\frac{2L}{{\delta}lta})^{2}$
and on each $I''_j$, $\|\widetilde{w}\|_{L^2(I''_j;H^\frac 32_{-1})} \leq \frac {\delta}lta 2$.
Thus, let $\{t_j\} = \{t'_j\} \cup \{t''_j\}$,
we obtain a partition $\{I_j=[t_j,t_{j+1}]\}_{j=0}^l$,
satisfying that
$l\leq (\frac{2L}{{\delta}lta})^{\frac{2(d+2)}{d-2}} + (\frac{2L}{{\delta}lta})^{2}$
and $\|\widetilde{w}\|_{{\mathbb{W}}(I_j) \cap L^2(I_j;H^\frac 32_{-1})} \leq {\delta}lta$.
Let $C(0): =C(C_T,D'_T,E)$,
$C(j+1) = C(0)(2C_T+C_T D'_T \sum_{k=0}^jC(k))$,
$0\leq j\leq l$,
where $C(C_T,D_T',E)$ is as in Proposition \ref{Pro-ShortP-H1}.
Choose ${\varepsilon}_*(C_T,D'_T,E,L)$
sufficiently small such that
\begin{align} \label{ve*-H1}
(2C_T + C_T D'_T \sum\limits_{k=0}^l C(k)) {\varepsilon}_*^{(\frac{4}{d-2})^{l+1}} \leq {\delta}lta.
{\epsilon}nd{align}
We claim that on each $I_j$, $0\leq j\leq l$,
estimates equa\-tionref{Short-H1.1}-equa\-tionref{Short-H1.5} hold with
$I$, $C(C_T,D'_T,E)$, ${\varepsilon}$
replaced by $I_j$, $C(j)$ and ${\varepsilon}^{(\frac{4}{d-2})^{j+1}}$, respectively.
Actually,
Proposition \ref{Pro-ShortP-H1} implies that
the claim is true for $j=0$.
Suppose that it is valid for each $0\leq k\leq j<l$.
Then, on the next interval $I_{j+1}$,
applying Theorem \ref{Thm-Stri} to equa\-tionref{equa-z-p-H1}
and using the inductive assumptions and equa\-tionref{ve*-H1} we have
\begin{align*}
|w(t_{j+1})- \widetilde{w}(t_{j+1})|_{H^1}
\leq& C_T |w(t_{0})- \widetilde{w}(t_{0})|_{H^1}
+ C_T \|e^{\frac{4}{d-2}{\rm Re} \,\Phi} (F(w)- F(\widetilde{w}))\|_{N^1(t_0,t_{j+1})} \\
& + C_T \|e\|_{L^2(t_0, T;H^{\frac 12}_{1}) + N^1(t_0, T)} \\
\leq& 2 C_T {\varepsilon} + C_T D'_T \sum\limits_{k=0}^j C(k) {\varepsilon}^{(\frac{4}{d-2})^{k+1}}
\leq {\delta}lta.
{\epsilon}nd{align*}
Thus, Proposition \ref{Pro-ShortP-H1} yields that the claim holds on $I_{j+1}$.
Therefore,
using the inductive arguments we prove the claim on any $I_j$, $0\leq j\leq l$.
This yields equa\-tionref{Sta-H1.2}, equa\-tionref{Sta-H1.3} and that for some
$c=c(C_T,D'_T,E,L)>0$,
\begin{align*}
\|w-\widetilde{w}\|_{{\mathbb{X}}(I)}
\leq C'(C_T, D'_T,E) {\varepsilon}^c.
{\epsilon}nd{align*}
Finally, taking into account equa\-tionref{Lp-bbXS1} and equa\-tionref{Sta-H1.2},
we obtain for some $0<c'\leq 1$,
\begin{align*}
\|w-\widetilde{w}\|_{L^{\frac{2(d+2)}{d-2}}(I)}
\leq \|w-\widetilde{w}\|_{{\mathbb{X}}(I)}^{c'} \|w-\widetilde{w}\|_{S^1(I)}^{1-c'}
\leq C''(C_T, D'_T,E) {\varepsilon}^{c},
{\epsilon}nd{align*}
thereby yielding equa\-tionref{Sta-H1.1}.
The proof is complete.
$\square$
\section{Global well-posedness} \label{Sec-GWP}
This section is mainly devoted to the global well-posedness of equa\-tionref{equa-x} in the
mass and energy critical cases.
\subsection{Mass-critical case} \label{Subsec-GWP-L2}
We first recall the global well-posedness and scattering results
in the deterministic defocusing mass-critical case,
based on the work of Dodson \cite{D12, D16.1,D16.2}
\footnote{\cite{D12,D16.1, D16.2} study the equation $i{\partial}_t u= -{\Delta}elta u + |u|^{\frac 4 d} u$,
which can be easily transformed into equa\-tionref{equa-u-L2}
by reversing the time.
Hence, the results in \cite{D12,D16.1, D16.2} also hold for equa\-tionref{equa-u-L2}.}.
\begin{theorem} (\cite{D12,D16.1, D16.2}) \label{Thm-L2GWP-Det}
For any $u_0\in L^2$,
there exists a unique global $L^2$-solution $u$ to the equation
\begin{align} \label{equa-u-L2}
i{\partial}_t u =& {\Delta}elta u - |u|^{\frac{4}{d}} u, \\
u(0)=& u_0 \noindent onumber
{\epsilon}nd{align}
with $d{\gamma}eq 1$. Moreover,
\begin{align} \label{globdd-u-L2-Det}
\|u\|_{V({\mathbb{R}}) \cap L^2({\mathbb{R}}; H^\frac 12_{-1})} \leq B_0(|u_0|_2) <{\infty},
{\epsilon}nd{align}
where $B_0(|u_0|_2)$ depends continuously on $u_0$ in $L^2$,
and $u$ scatters at infinity,
i.e., there exist $u_{{\partial}m} \in L^2$ such that
\begin{align} \label{sca-det-L2}
| e^{it{\Delta}elta} u(t) - u_{{\partial}m}|_2 \to 0,\ as\ t\to {\partial}m{\infty}.
{\epsilon}nd{align}
{\epsilon}nd{theorem}
We remark that the bound of $\|u\|_{L^2({\mathbb{R}}; H^\frac 12_{-1})}$
in equa\-tionref{globdd-u-L2-Det} follows standardly from
Strichartz estimates and the bound of $\|u\|_{V({\mathbb{R}})}$,
and the continuity of $B_0(|u_0|_2)$ on $u_0$
is a consequence of the mass-critical stability result Lemma $3.6$ of \cite{TVZ07}.
We also need the following boundedness of $X$ in the space $L^2$.
\begin{lemma} \label{Lem-bdd-L2}
Assume the conditions of Theorem \ref{Thm-GWP} $(i)$ to hold.
Then, for each $X_0\in L^2$,
we have ${\mathbb{P}}$-a.s.
\begin{align} \label{Ito-L2}
|X(t)|_2^2 = |X_0|_2^2 + 2 \sum\limits_{k=1}^N\int_0^t \int {\rm Re} \, G_k(s) |X(s)|^2 dx d\beta_k(s),
\ \ 0\leq t<\tau^*,
{\epsilon}nd{align}
where $\tau^*$ is the maximal existing time as in Theorem \ref{Thm-LWP}.
Moreover, for any $0<T<{\infty}$, $p{\gamma}eq 1$,
\begin{align} \label{bdd-X-L2}
& {\mathbb{E}} \|X\|^p_{C([0,\tau^*\wedge T);L^2)} \leq C(p,T) <{\infty}.
{\epsilon}nd{align}
In particular,
\begin{align} \label{globdd-M}
M_T:= \sup\limits_{0\leq t<\tau^*\wedge T} |X(t)|_2 \leq C(T) <{\infty},\ \ a.s..
{\epsilon}nd{align}
{\epsilon}nd{lemma}
{\bf Proof.} The It\^o formula equa\-tionref{Ito-L2} was obtained in \cite[(6.1)]{HRZ18}.
The proof of equa\-tionref{bdd-X-L2}
is similar to that of \cite[Lemma $3.6$]{BRZ16},
based on the
Burkholder-Davis-Gundy inequality and the Gronwall inequality.
We omit the details here for simplicity.
$\square$ \\
{\bf Proof of Theorem \ref{Thm-GWP} $(i)$.}
(Mass-Critical Case).
Let $X$ be the unique $L^2$-solution to equa\-tionref{equa-x}
on the maximal time interval $[0,\tau^*)$.
In order to prove the global existence of $X$,
we only need to prove the global bound equa\-tionref{gloexist-L2} for any $0<T<{\infty}$.
For this purpose, we proceed as follows:
we first consider a small (random) time interval $I_1$,
determined by the smallness condition equa\-tionref{Sta-L2-ve} of Theorem \ref{Thm-Sta-L2},
and we apply the rescaling transformation
and the stability result Theorem \ref{Thm-Sta-L2}
to obtain the bound of $V(I_1)$-norm of the resulting random solution
and so of the stochastic solution $X$.
Then, we construct consecutively small (random) subintervals $I_j$, $2\leq j\leq l$,
on which we obtain the bound of $\|X\|_{V(I_j)}$ by using
Theorem \ref{Thm-Rescale-sigma}
as well as Theorem \ref{Thm-Sta-L2}.
At last, by virtue of the global $L^2$ bound in Lemma \ref{Lem-bdd-L2},
we are able to show that the total number $l$ is finite almost surely,
thus we obtain the desirable global bound equa\-tionref{gloexist-L2}.
Let us start with the first step.
{\bf Step $1$.}
Set
$g(t):= \sum_{k=1}^N|\int_0^t g_k(s) d\beta_k(s)| + \int_0^t g_k^2(s) ds$,
$0\leq t\leq T$.
Let $\tau_1:= \inf\{ 0<t<T\wedge \tau^*: g(t) {\gamma}eq {\varepsilon}_1(t)\} \wedge (T\wedge \tau^*)$
with
\begin{align} \label{ve1-L2-GWP}
{\varepsilon}_1(t) = \frac{{\varepsilon}_*(C_t,D_t, |X_0|_2)}{D_0(|X_0|_2)},
{\epsilon}nd{align}
where
${\varepsilon}_*(C_t,D_t, |X_0|_2) := {\varepsilon}_*(C_t,D_t,|X_0|_2, 0, B_0(|X_0|_2)$
is as in Theorem \ref{Thm-Sta-L2},
and $D_0(|X_0|_2) = C_0(B_0(|X_0|_2) + (B_0(|X_0|_2))^{1+\frac 4d})$
with $C_0$ specified in equa\-tionref{esti-e1-L2} below.
Let ${\varphi}$ be as in equa\-tionref{vf} with $\sigmaequa\-tionuiv0$.
By Theorem \ref{Thm-Rescale-sigma},
$v_1:= e^{-{\varphi}} X$
satisfies the random equation equa\-tionref{equa-RNLS}
with ${\alpha}= 1+\frac 4d$.
In order to obtain the bound of $\|v_1\|_{V(0,\tau_1)}$,
we compare $v_1$ with the solution $\widetilde{v}_1$ to equa\-tionref{equa-NLS}
(or, equivalently, equa\-tionref{equa-NLS*}) with
${\alpha}=1+\frac 4d$
and with the same initial datum, i.e.,
$\widetilde{v}_1(0) = v_1(0) = X_0$.
Then, Theorem \ref{Thm-L2GWP-Det} implies that
$\widetilde{v}_1$ exists globally and satisfies that
\begin{align} \label{esti-u1-L2}
\|\widetilde{v}_1\|_{V(0,\tau_1 ) \cap L^2(0,\tau_1; H^\frac 12_{-1})}
\leq B_0(|\widetilde{v}_1(0)|_2) = B_0(|X_0|_2) <{\infty}.
{\epsilon}nd{align}
Moreover,
in order to estimate the error term equa\-tionref{Error-NLS}, i.e.,
\begin{align*}
e_1 := -(b \cdot \noindent a + c)\widetilde{v}_1
- (1- e^{\frac 4d {\rm Re} \, {\varphi}} ) F(\widetilde{v}),
{\epsilon}nd{align*}
where $b$, $c$ are as in equa\-tionref{b} and equa\-tionref{c}
with $\sigmaequa\-tionuiv 0$, respectively,
we note that
\begin{align} \label{esti-e1-L2.0}
&\|e_1\|_{ N^0(0,\tau_1) + L^2(0,\tau_1; H^{-\frac 12}_1)} \noindent onumber \\
\leq& \|(b\cdot \noindent a+ c) \widetilde{v}_1\|_{L^2(0,\tau_1; H^{-\frac 12}_1)}
+ \| (1- e^{\frac 4d {\rm Re} \, {\varphi}} ) F(\widetilde{v}_1) \|_{L^{\frac{2(d+2)}{d+4}}((0,\tau_1)\times {\mathbb{R}}^d)}.
{\epsilon}nd{align}
To estimate the first term on the right-hand side above,
we see that, by equa\-tionref{asymflat},
\begin{align*}
\left<x\right> \left<\noindent a\right>^{-\frac 12} (b\cdot \noindent a + c) \widetilde{v}_1
= \Psi_p \left<x\right>^{-1} \left<\noindent a\right>^\frac 12 \widetilde{v}_1,
{\epsilon}nd{align*}
where
$\Psi_p := \left<x\right> \left<\noindent a\right>^{-\frac 12} (b\cdot \noindent a + c) \left<\noindent a\right>^{-\frac 12} \left<x\right>$
is a pseudo-differential operator of zero order.
Then, using Lemma \ref{Lem-L2-Bdd} we get for some $m{\gamma}eq 1$,
\begin{align} \label{esti-e1-L2.1}
\|(b\cdot \noindent a+ c) \widetilde{v}_1\|_{L^2(0,\tau_1; H^{-\frac 12}_1)}
\leq& C \sup\limits_{0\leq t\leq \tau_1} |p(t)|_{S^0}^{(m)}
\|\widetilde{v}_1 \|_{L^2(0,\tau_1; H^{\frac 12}_{-1})} \noindent onumber \\
\leq& C' \sup\limits_{0\leq t\leq\tau_1 } g(t)
\|\widetilde{v}_1 \|_{L^2(0,\tau_1; H^{\frac 12}_{-1})}.
{\epsilon}nd{align}
Moreover, using equa\-tionref{ineq-V}
and the inequality $|1-e^x|\leq e|x|$ for $|x|\leq 1$,
we have
\begin{align} \label{esti-e1-L2.2}
\|(1-e^{\frac 4d {\rm Re} \, {\varphi}}) F(\widetilde{v}_1)\|_{L^{\frac{2(d+2)}{d-2}}((0,\tau_1) \times {\mathbb{R}}^d)}
\leq C'' \sup\limits_{0\leq t\leq \tau_1} g(t)
\|\widetilde{v}_1\|^{1+ \frac 4d}_{V(0,\tau_1)}.
{\epsilon}nd{align}
Hence, plugging equa\-tionref{esti-e1-L2.1} and equa\-tionref{esti-e1-L2.2} into equa\-tionref{esti-e1-L2.0}
and using equa\-tionref{globdd-u-L2-Det} we arrive at
\begin{align} \label{esti-e1-L2}
\|e_1\|_{L^2(0,\tau_1; H^{-\frac 12}_1) + N^0(0,\tau_1)}
\leq& C \sup\limits_{0\leq t\leq \tau_1} g(t)
(\|\widetilde{v}_1\|_{L^2(0,\tau_1; H^{\frac 12}_{-1})}
+ \|\widetilde{v}_1\|^{1+\frac 4 d}_{V(0,\tau_1)}) \noindent onumber \\
\leq& C( B_0(|X_0|_2) + (B_0(|X_0|_2))^{1+\frac 4d}) {\varepsilon}_1 (\tau_1) \noindent onumber \\
\leq& {\varepsilon}_*(C_{\tau_1}, D_{\tau_1}, |X_0|_2),
{\epsilon}nd{align}
where $C_0 := \max\{C', C''\}$.
Thus, in view of Theorem \ref{Thm-Sta-L2},
we obtain
\begin{align} \label{esti-v1-L2*}
\|v_1\|_{V(0,\tau_1)} \leq C(C_{\tau_1}, D_{\tau_1}, |X_0|_2, 0, B_0(|X_0|_2))
=: C(C_{\tau_1}, D_{\tau_1}, |X_0|_2) ,
{\epsilon}nd{align}
which implies that
\begin{align}
\|X\|_{V(0,\tau_1)} \leq \|e^{{\varphi}}\|_{C([0,\tau_1];L^{\infty})} C(C_{\tau_1}, D_{\tau_1}, |X_0|_2).
{\epsilon}nd{align}
Thus, equa\-tionref{esti-v1-L2*} implies equa\-tionref{gloexist-L2} if $\tau_1 = T\wedge \tau^*$.
Otherwise, we come to the next step.
{\bf Step $2$.}
Set $\sigma_0:=0$,
$\sigma_1 := \tau_1$.
For $j{\gamma}eq 1$,
we define random times inductively:
\begin{align*}
& \tau_{j+1} := \inf\{t\in (0,(T\wedge \tau^*)-\sigma_j): g_{\sigma_{j}}(t) {\gamma}eq {\varepsilon}_{j+1}(t) \} \wedge (T\wedge \tau^* - \sigma_j), \\
& \sigma_{j}:= \sum_{k=1}^{j} \tau_k (\leq T\wedge \tau^*), \ \ l:=\inf\{j{\gamma}eq 1:\sigma_j = T\wedge \tau^*\}.
{\epsilon}nd{align*}
Here,
Let $g_{\sigma_j}(t) := \sum_{k=1}^N |\int_{\sigma_j}^{\sigma_{j}+t} g_k(s) d\beta_k(s)| + \int_{\sigma_j}^{\sigma_{j}+t} g^2_k(s) ds$
and
\begin{align} \label{vej-L2-GWP}
{\varepsilon}_{j+1}(t):= \frac{{\varepsilon}_*(C_{\sigma_j+t}, D_{\sigma_j+t}, |X(\sigma_j)|_2)}{D_0(|X(\sigma_j)|_2)}
{\epsilon}nd{align}
with
${\varepsilon}_*(C_{\sigma_j+t}, D_{\sigma_j+t}, |X(\sigma_j)|_2)
:= {\varepsilon}_*(C_{\sigma_j+t}, D_{\sigma_j+t}, |X(\sigma_j)|_2, 0, B_0(|X(\sigma_j)|_2))$
as in Theorem \ref{Thm-Sta-L2},
and $D_0(|X(\sigma_j)|_2)$ is defined similarly to $D_0(|X_0|_2)$.
We see that
$\tau_{j+1}$ (resp. $\sigma_{j}$) are $\mathscr{G}_j(t):=\mathscr{F}(\sigma_j+t)$ (resp. $\mathscr{F}(t))$-stopping times, $0\leq t\leq T$.
We use the inductive arguments to obtain the bound of $\|X\|_{V(0,\sigma_j)}$ for any $1\leq j\leq l$.
Suppose that for each $1\leq k\leq j<l$,
\begin{align} \label{esti-v-Xj}
\|X\|_{V(0,\sigma_k)}
\leq \sum\limits_{i=0}^{k-1} \|e^{{\varphi}_{\sigma_i}}\|_{C([0,\tau_{i+1}];L^{\infty})}
C(C_{\sigma_{i+1}}, D_{\sigma_{i+1}}, |X(\sigma_j)|_2),
{\epsilon}nd{align}
where ${\varphi}_{\sigma_i}$ is as in equa\-tionref{vf} with $\sigma_i$ replacing $\sigma$,
and
$C(C_{\sigma_{i+1}}, D_{\sigma_{i+1}}, |X(\sigma_j)|_2) :=
C(C_{\sigma_{i+1}}, D_{\sigma_{i+1}}, |X(\sigma_j)|_2, 0, B_0(|X(\sigma_j)|_2))$
is as in Theorem \ref{Thm-Sta-L2}.
Below we show that equa\-tionref{esti-v-Xj} also holds when $k$ is replaced by $j+1$.
For this purpose,
we apply Theorem \ref{Thm-Rescale-sigma} to obtain that
\begin{align} \label{res-zj1}
v_{j+1}(t) := e^{-{\varphi}_{\sigma_j}(t)} X(\sigma_j +t), \ \ 0\leq t<(T\wedge \tau^*)-\sigma_j.
{\epsilon}nd{align}
satisfies the equation
\begin{align} \label{equa-vj1-L2}
i{\partial}_t v_{j+1} &= e^{-{\varphi}_{\sigma_j}}{\Delta}elta (e^{{\varphi}_{\sigma_j}}v_{j+1})
- e^{\frac 4d {\rm Re} \, {\varphi}_{\sigma_j}} F(v_{j+1}), \\
v_{j+1}(0)&= X(\sigma_j). \noindent onumber
{\epsilon}nd{align}
Similarly to Step $1$,
we compare equa\-tionref{equa-vj1-L2} with the equation
\begin{align} \label{equa-wtu-j1.0}
i {\partial}_t \widetilde{v}_{j+1}
= {\Delta}elta \widetilde{v}_{j+1} - F(\widetilde{v}_{j+1}),
{\epsilon}nd{align}
or equivalently,
\begin{align} \label{equa-wtu-j1}
i {\partial}_t \widetilde{v}_{j+1}
= e^{-{\varphi}_{\sigma_j}}{\Delta}elta (e^{{\varphi}_{\sigma_j}}\widetilde{v}_{j+1})
- e^{\frac 4d {\rm Re} \, {\varphi}_{\sigma_j}} F(\widetilde{v}_{j+1}) + e_{j+1},
{\epsilon}nd{align}
with $\widetilde{v}_{j+1}(0) = X(\sigma_j)$
and
\begin{align} \label{err-ej1-L2}
e_{j+1}:= -(b_{\sigma_j}(t) \cdot \noindent a + c_{\sigma_j}(t))\widetilde{v}_{j+1}
-(1- e^{\frac 4d {\rm Re} \, {\varphi}_{\sigma_j}(t)}) F(\widetilde{v}_{j+1}).
{\epsilon}nd{align}
where $b_{\sigma_j}$ and $c_{\sigma_j}$ are as in equa\-tionref{b} and equa\-tionref{c}
with $\sigma_j$ replacing $\sigma$, respectively.
Again, Theorem \ref{Thm-L2GWP-Det} yields that
$\widetilde{v}_{j+1}$ exists globally and
\begin{align} \label{esti-wtu-Vj1}
\|\widetilde{v}_{j+1}\|_{V(0,\tau_{j+1})} \leq B_0 (|\widetilde{v}_{j+1}(0)|_2) = B_0 (|X(\sigma_j)|_2).
{\epsilon}nd{align}
This implies that,
similarly to equa\-tionref{esti-e1-L2},
\begin{align} \label{esti-ej1}
\|e_{j+1}\|_{N^0(0,\tau_{j+1} ) + L^2(0,\tau_{j+1}; H^{-\frac 12}_{1})}
\leq& C \sup\limits_{0\leq t\leq \tau_{j+1}} g_{\sigma_j}(t)
(\|\widetilde{v}_{j+1}\|_{L^2(0,\tau_{j+1} ; H^\frac 12_{-1})}
+ \|\widetilde{v}_{j+1}\|^{1+\frac 4 d}_{V(0,\tau_{j+1} )}) \noindent onumber \\
\leq& C(B_0(|X_{\sigma_j}|_2) + (B_0(|X_{\sigma_j}|_2))^{1+\frac 4d }) {\varepsilon}_{j+1}(\tau_{j+1}) \noindent onumber \\
\leq& {\varepsilon}_*(C_{\sigma_{j+1}}, D_{\sigma_{j+1}}, |X_{\sigma_j}|_2).
{\epsilon}nd{align}
Thus, by virtue of Theorem \ref{Thm-Sta-L2},
we obtain
\begin{align}
\|v_{j+1}\|_{V(0,\tau_{j+1})}
\leq C(C_{\sigma_{j+1}}, D_{\sigma_{j+1}}, |X(\sigma_j)|_2),
{\epsilon}nd{align}
and so
\begin{align} \label{esti-Xj1-L2}
\|X\|_{V(\sigma_j,\sigma_{j+1})}
\leq \|e^{{\varphi}_{\sigma_j}}\|_{C([0,\tau_{j+1}];L^{\infty})} C(C_{\sigma_{j+1}}, D_{\sigma_{j+1}}, |X(\sigma_j)|_2).
{\epsilon}nd{align}
This along with the inductive assumptions yields equa\-tionref{esti-v-Xj} with $j+1$ replacing $k$.
Thus, using the inductive arguments
we conclude that equa\-tionref{esti-v-Xj} holds for all $1\leq j\leq l$.
This yields that
\begin{align} \label{esti-X9-L2}
\|X \|_{V(0,\sigma_l)}
\leq \sum\limits_{k=0}^{l-1}
\|e^{{\varphi}_{\sigma_k}}\|_{C([0,T];L^{\infty})}
C(C_T,D_T, M_T),
{\epsilon}nd{align}
where
$C(C_T,D_T, M_T):=C(C_T,D_T, M_T,0,\sup_{0\leq x\leq M_T} B_0(x))$ is as in Theorem \ref{Thm-Sta-L2},
and $M_T$ is as in Lemma \ref{Lem-bdd-L2}.
{\bf Step $3$.}
We claim that
\begin{align} \label{sigma9-L2}
{\mathbb{P}} (l<{\infty}) =1.
{\epsilon}nd{align}
To this end, we use the contradiction argument.
Suppose that equa\-tionref{sigma9-L2} is not true.
We consider $\omega\in \{l={\infty}\}$.
For simplicity, we omit the argument $\omega$ below.
On one hand, by the definition of $\tau_{j+1}$,
\begin{align*}
g_{\sigma_j}(\tau_{j+1})
= \frac{{\varepsilon}_*(C_{\sigma_{j+1}}, D_{\sigma_{j+1}}, |X(\sigma_j)|_2) } {D_0(|X(\sigma_{j})|_2)}
{\gamma}eq \frac{{\varepsilon}_*(C_T, D_T, M_T) } {D_0(M_T)} >0,
{\epsilon}nd{align*}
where
${\varepsilon}_*(C_T, D_T, M_T) := {\varepsilon}_*(C_T, D_T, M_T, 0,\sup_{0\leq x\leq M_T} B_0(x))$, and
$D_0(M_T):= C_0 \sup_{0\leq x\leq M_T} ((B_0(x)) + (B_0(x))^{1+\frac 4d})$.
On the other hand,
Since the processes $t \mapsto \int_0^t g_k d\beta_k(s)$
and $t\mapsto \int_0^t g_k^2 ds$
are $(\frac 12 -\kappa)$-H\"older continuous
for any $\kappa<\frac 12$ and $1\leq k\leq N$,
we have for some positive $C(T)$ (depending on $\omega$)
$$ g_{\sigma_j} (\tau_{j+1})
\leq C(T) (\tau_{j+1})^{\frac 12 - \kappa},\ \ \forall j{\gamma}eq 1.
$$
Thus, we conclude that
$$
\tau_{j+1} {\gamma}eq \left(\frac{{\varepsilon}_*(C_T, D_T, M_T) } {C(T)D_0(M_T)}\right)^{\frac{2}{1-2\kappa}} > 0,\ \ \forall j{\gamma}eq 1.
$$
Thus, for $\omega\in \{l={\infty}\}$,
$$
\sigma_l(\omega)= \sum_{j=1}^{\infty} \tau_{j}(\omega) = {\infty},
$$
which contracts the fact that $\sigma_l(\omega)\leq (T\wedge \tau^*)(\omega) \leq T<{\infty}$.
This yields equa\-tionref{sigma9-L2}, as claimed.
Now, since $\{l<{\infty}\} \subseteq \{\sigma_l = T\wedge \tau^*\}$,
combining equa\-tionref{esti-X9-L2} and equa\-tionref{sigma9-L2} together
we conclude that
\begin{align} \label{bdd-X-V}
\|X\|_{V(0,\tau^*\wedge T)}
\leq \sum\limits_{k=0}^{l-1}
\|e^{{\varphi}_{\sigma_k}}\|_{C([0,T];L^{\infty})}
C(C_T,D_T, M_T) <{\infty},\ \ a.s..
{\epsilon}nd{align}
This yields equa\-tionref{gloexist-L2}
and so the global well-posedness of equa\-tionref{equa-x}.
Finally,
the estimate equa\-tionref{thm-L2-L2} follows from equa\-tionref{bdd-X-L2},
and the estimate equa\-tionref{thm-L2-Lpq} can be proved standardly by equa\-tionref{bdd-X-V} and
Strichartz estimates.
Therefore, the proof of Theorem \ref{Thm-GWP} $(i)$ is complete.
$\square$
\subsection{Energy-critical case} \label{Subsec-GWP-H1}
We start with the global well-posedness and scattering results
in the deterministic defocusing energy-critical case,
mainly based on the work of I-team \cite{CKSTT08},
Ryckman and Visan \cite{RV07} and Visan \cite{V07}
\footnote{As in the mass-critical case,
although the equation studied in \cite{CKSTT08,RV07,V07}
is $i{\partial}_t u = -{\Delta}elta u + |u|^{\frac {4}{d-2}}u$,
the results there are also valid for equa\-tionref{equa-u-H1}
by reversing the time.}
\begin{theorem} \label{Thm-H1GWP-Det} (\cite{CKSTT08,RV07,V07})
For every $u_0 \in H^1$,
there exists a unique global $H^1$-solution $u$ to the equation
\begin{align} \label{equa-u-H1}
i{\partial}_t u =& {\Delta}elta u - |u|^{\frac{4}{d-2}} u, \\
u(0) =& u_0 \noindent onumber
{\epsilon}nd{align}
with $d{\gamma}eq 3$.
Moreover,
\begin{align} \label{globdd-u-H1-Det}
\|u\|_{S^1({\mathbb{R}}) \cap L^2({\mathbb{R}}; H^\frac 32_{-1})} \leq B_1(|u_0|_{H^1}) <{\infty},
{\epsilon}nd{align}
where $ B_1(|u_0|_{H^1})$ depends continuously on $u_0$ in $H^1$,
and $u$ scatters at infinity, i.e.,
there exist $u_{\partial}m \in H^1$ such that
\begin{align} \label{sca-det-H1}
|e^{it{\Delta}elta} u(t) - u_{\partial}m|_{H^1} \to 0,\ \ as\ t\to {\partial}m{\infty}.
{\epsilon}nd{align}
{\epsilon}nd{theorem}
As is the mass-critical case,
the bound of $\|u\|_{L^2({\mathbb{R}}; H^\frac 32_{-1})}$
follows standardly from Strichartz estimates
and the bound of $\|u\|_{S^1({\mathbb{R}}^+)}$,
and the continuous dependence on $u_0$
follows from the energy-critical stability result Lemma $3.8$ of \cite{TVZ07}.
We also need the global energy estimates below.
\begin{lemma} \label{Lem-bdd-H1}
Assume the conditions of Theorem \ref{Thm-GWP} $(ii)$ to hold, $3\leq d\leq 6$.
Define the Hamiltonian of $X$ by
$H(X) :=\frac 12 |\noindent a X|_2^2 - \frac{{\lambda}}{{\alpha}+1} |X|^{{\alpha}+1}_{L^{{\alpha}+1}}$.
Then, for each $X_0\in H^1$, we have ${\mathbb{P}}$-a.s., for any $t\in (0,\tau^*)$,
\begin{align} \label{Ito-H}
H(X(t))
&= H(X_0) - \int_0^t {\rm Re} \, \int \noindent a \overline{X} \noindent a(\mu X) dx ds
+ \frac 12 \sum\limits_{k=1}^N \int_0^t |\noindent a (G_k X)|^2 dx ds \noindent onumber \\
& - \frac{{\lambda}({\alpha}-1)}{2}\sum\limits_{k=1}^N \int_0^t \int ({\rm Re} \, G_k)^2 |X|^{{\alpha}+1} dx ds \\
&+ \sum\limits_{k=1}^N\int_0^t {\rm Re} \, \int \noindent a \overline{X} \noindent a (G_k X) dx d\beta_k(s)
-{\lambda} \sum\limits_{k=1}^N\int_0^t \int {\rm Re} \, G_k |X|^{{\alpha}+1} dx d\beta_k(s). \noindent onumber
{\epsilon}nd{align}
Moreover,
in the defocusing case where ${\lambda} = -1$,
for any
$0<T<{\infty}$, $p{\gamma}eq 1$,
\begin{align} \label{bdd-X-H1}
{\mathbb{E}} \|X\|^p_{C([0;\tau^*\wedge T);H^1)} + {\mathbb{E}} \|X\|^p_{L^{\frac{2d}{d-2}}(0,\tau^* \wedge T;L^{\frac{2d}{d-2}})} \leq C(p,T) <{\infty},
{\epsilon}nd{align}
where $\tau^*$ is the maximal existing time as in Theorem \ref{Thm-LWP}.
In particular,
\begin{align} \label{globdd-E}
E_T:= \sup\limits_{0\leq t\leq \tau^*\wedge T} |X(t)|_{H^1} \leq C(T) <{\infty},\ \ a.s..
{\epsilon}nd{align}
{\epsilon}nd{lemma}
The proof is postponed to the Appendix.
\begin{remark} \label{Rem-globdd-E-d6-proof}
Similar formula was proved in \cite{BRZ16} in the energy-subcritical case
(i.e., ${\alpha}\in (1,1+\frac{4}{d-2})$, $d{\gamma}eq 3$),
where an approximating procedure was used to derive the It\^o formula of
corresponding potential energy $|X|^{{\alpha}+1}_{L^{{\alpha}+1}}$.
The arguments there
apply also to the energy-critical case where $3\leq d\leq 6$,
except that we need to apply the stability result Theorem \ref{Thm-Sta-H1-dlow} instead
in the approximating procedure.
{\epsilon}nd{remark}
\begin{remark}
In the high dimensional case where $d>6$,
the It\^o formula equa\-tionref{Ito-H} also can be obtained
by a formal computation,
however, the rigorous derivation is technically unclear.
Actually,
one can not use the Stability result Theorem \ref{Thm-Sta-H1} to derive equa\-tionref{Ito-H}
as in the case $3\leq d\leq 6$.
The main reason is that in equa\-tionref{Sta-H1-ve} the time function
$g$ is imposed to satisfy a small constant,
which is even smaller than that of
the difference of two solutions equa\-tionref{Sta-H1.1}.
Hence, the approximating procedure as in the case $3\leq d\leq 6$ in Appendix
does not work.
{\epsilon}nd{remark}
We are now ready to prove Theorem \ref{Thm-GWP} $(ii)$.
{\bf Proof of Theorem \ref{Thm-GWP} $(ii)$.} (Energy-Critical Case).
The arguments below are similar to those in the mass-critical case,
however, based on the more delicate stability results Theorems \ref{Thm-Sta-H1-dlow} and \ref{Thm-Sta-H1}.
Below we mainly treat the case $d>6$,
the case $3\leq d\leq 6$ is easier and can be proved similarly by using Theorem \ref{Thm-Sta-H1-dlow}.
Let $X$ be the unique $H^1$-solution to equa\-tionref{equa-x} with ${\alpha}= 1+ \frac{4}{d-2}$
on the maximal time interval $[0,\tau^*)$.
In view of Theorem \ref{Thm-LWP},
we only need to prove equa\-tionref{gloexist-H1} for
any $0<T<{\infty}$.
For this purpose,
we define $g$ as in Step $1$ in the proof of Theorem \ref{Thm-GWP} $(i)$
and let
$\tau_1:= \inf\{t\in (0,T\wedge \tau^*): g(t) {\gamma}eq {\varepsilon}_1(t)\} \wedge (T\wedge \tau^*)$
with
\begin{align}
{\varepsilon}_1(t):= \frac{{\varepsilon}_*(C_t,D_t, |X_0|_{H^1})}{D_1(|X_0|_{H^1})},
{\epsilon}nd{align}
where
${\varepsilon}_*(C_t,D'_t,|X_0|_{H^1}) := {\varepsilon}_*(C_t,D'_t,\sqrt{2H(X_0)}, B_1(|X_0|_{H^1}))$ is as in Theorem \ref{Thm-Sta-H1},
and $D_1(|X_0|_{H^1}) =1+ C_1(B_1(|X_0|_{H^1}) + (B_1(|X_0|_{H^1}))^{1+\frac{4}{d-2}})$
with $C_1$ as in equa\-tionref{esti-e1-H1} below.
We can take $g$ as the time function in Theorem \ref{Thm-Sta-H1}.
Hence,
$\sup_{0\leq t\leq \tau_1} g(t) \leq {\varepsilon}_*(C_{\tau_1},D_{\tau_1}, |X_0|_{H^1})$,
and so the smallness condition on $g$ in equa\-tionref{Sta-H1-ve} is satisfied on $[0,\tau_1]$.
Let ${\varphi}$ be as in equa\-tionref{vf} with $\sigma equa\-tionuiv0$.
By Theorem \ref{Thm-Rescale-sigma},
$w_1:= e^{-{\varphi}} X$ satisfies equa\-tionref{equa-RNLS} with
$\sigmaequa\-tionuiv 0$ and ${\alpha}= 1+\frac{4}{d-2}$.
Moreover,
let $\widetilde{w}_1$ be the solution to equa\-tionref{equa-NLS}
with ${\alpha}= 1+\frac{4}{d-2}$ and $\widetilde{w}(0)= w_1(0) = X_0$.
Then, Theorem \ref{Thm-L2GWP-Det} implies that
\begin{align} \label{esti-u1-L2}
\|\widetilde{w}_1\|_{{\mathbb{W}}(0,\tau_1) \cap L^2(0,\tau_1; H^\frac 32_{-1})}
\leq B_1 (|\widetilde{w}_1(0)|_{H^1}) = B_1 (|X_0|_{H^1}) <{\infty},
{\epsilon}nd{align}
and by the conservation law of Hamiltonian (i.e., $H(\widetilde{w}(t)))= H(\widetilde{w}(0))$),
\begin{align} \label{wtw-H1-tau1-GWP}
\|\widetilde{w}\|_{C([0,t];H^1)}
\leq \sqrt{2\sup\limits_{0\leq s\leq t} H(\widetilde{w}(s))}
= \sqrt{2H(X_0)},\ \ t\in [0,\tau_1].
{\epsilon}nd{align}
For the error term
\begin{align*}
e_{1}
:= - (b \cdot \noindent a + c)\widetilde{w}_{1}
- (1- e^{\frac{4}{d-2} {\rm Re} \, {\varphi}}) F(\widetilde{w}_{1}),
{\epsilon}nd{align*}
where $b,c$ are as in equa\-tionref{b} and equa\-tionref{c} with $\sigma equa\-tionuiv 0$, respectively,
using equa\-tionref{ineq-W.2}
and similar arguments as in the proof of equa\-tionref{esti-e1-L2.1}
we have
\begin{align} \label{esti-e1-H1}
&\|e_1\|_{N^1(0,\tau_1) + L^2(0,\tau_1; H^{\frac 12}_1)} \noindent onumber \\
\leq& \|(b \cdot \noindent a + c)\widetilde{w}_{1}\|_{L^2(0,\tau_1; H^\frac 12_1)}
+ \|(1- e^{\frac{4}{d-2}{\rm Re} \, {\varphi}}) F(\widetilde{w}_{1}) \|_{L^2(0,\tau_1; W^{1,\frac{2d}{d+2}})} \noindent onumber \\
\leq& C \sup\limits_{0\leq t\leq \tau_1} g(t)
(\|\widetilde{w}_1\|_{L^2(0,\tau_1; H^\frac 32_{-1})}
+ \|\widetilde{w}_1\|^{\frac {d+2} {d-2}}_{{\mathbb{W}}(0,\tau_1)}) \noindent onumber \\
\leq& C_1 (B_1(|X_0|_{H^1}) + (B_1(|X_0|_{H^1}))^{1 + \frac{4}{d-2}}) {\varepsilon}_1(\tau_1) \noindent onumber \\
\leq& {\varepsilon}_*(C_{\tau_1}, D'_{\tau_1}, |X_0|_{H^1}).
{\epsilon}nd{align}
Then,
applying Theorem \ref{Thm-Sta-H1}
we obtain
\begin{align} \label{esti-v1-H1}
\|w_1\|_{{\mathbb{W}}(0,\tau_1)} \leq C(C_{\tau_1}, D'_{\tau_1}, \sqrt{2H(X_0)}, B_1(|X_0|_{H^1})) =:C(C_{\tau_1}, D'_{\tau_1}, |X_0|_{H^1}),
{\epsilon}nd{align}
which implies that
\begin{align} \label{bdd-X-bbw-tau1}
\|X\|_{{\mathbb{W}}(0,\tau_1)}
\leq \|e^{\varphi}\|_{C([0,\tau_1];W^{1,{\infty}})} C(C_{\tau_1}, D'_{\tau_1}, |X_0|_{H^1}).
{\epsilon}nd{align}
Thus, equa\-tionref{gloexist-H1} follows from equa\-tionref{bdd-X-bbw-tau1} if $\tau_1{\gamma}eq \tau^*$.
Otherwise, we turn to the next step.
Let $\tau_j, \sigma_j, g_{\sigma_{j}}$ and $l$
be as in Step 2 in the proof of Theorem \ref{Thm-GWP} $(i)$,
but with
$${\varepsilon}_{j+1}(t) = \frac{{\varepsilon}_*(C_{\sigma_j+t},D'_{\sigma_j+t}, |X(\sigma_j)|_{H^1})}{D_1(|X(\sigma_j)|_{H^1})}, $$
where $D_1(|X(\sigma_j)|_{H^1})$ is defined similarly to $D_1(|X_0|_{H^1})$,
${\varepsilon}_*(C_{\sigma_j+t},D'_{\sigma_j+t}, |X(\sigma_j)|_{H^1})
:= {\varepsilon}_*(C_{\sigma_j+t},D'_{\sigma_j+t}, \sqrt{2|X(\sigma_j)|_{H^1}}, B_1(|X(\sigma_j)|_{H^1}))$
is as in Theorem \ref{Thm-Sta-H1}.
We use the inductive arguments to prove that for any $1\leq j\leq l$,
\begin{align} \label{bdd-X-bbw-H1}
\|X\|_{{\mathbb{W}}(0,\sigma_j)}
\leq \sum\limits_{k=0}^{j-1}
\|e^{{\varphi}_{\sigma_k}}\|_{C([0,\tau_{k+1}];W^{1,{\infty}})}
C(C_{\sigma_{k+1}},D'_{\sigma_{k+1}}, |X(\sigma_k)|_{H^1})<{\infty},
{\epsilon}nd{align}
where ${\varphi}_{\sigma_k}$ are as in Theorem \ref{Thm-Rescale-sigma}
with $\sigma_k$ replacing $\sigma$,
and
$C(C_{\sigma_{k+1}},D'_{\sigma_{k+1}}, |X(\sigma_k)|_{H^1})$
$:= C(C_{\sigma_{k+1}},D'_{\sigma_{k+1}}, \sqrt{2H(X(\sigma_k))}, B_1(|X(\sigma_k)|_{H^1}))$
are as in Theorem \ref{Thm-Sta-H1}.
We see from equa\-tionref{bdd-X-bbw-tau1} that equa\-tionref{bdd-X-bbw-H1} holds for $j=1$.
Suppose that equa\-tionref{bdd-X-bbw-H1} holds for each $1\leq k\leq j<l$.
In order to prove equa\-tionref{bdd-X-bbw-H1} with $j+1$ replacing $j$,
we consider the rescaling transformation
$w_{j+1}(t) := e^{-{\varphi}_{\sigma_j}(t)} X(\sigma_j+t)$, $0\leq t < (T\wedge \tau^*)-\sigma_j$,
and apply Theorem \ref{Thm-Rescale-sigma} to obtain
\begin{align} \label{equa-wj1}
i {\partial}_t w_{j+1} &= e^{-{\varphi}_{\sigma_j}}{\Delta}elta (e^{{\varphi}_{\sigma_j}}w_{j+1})
- e^{\frac{4}{d-2} {\rm Re} \, {\varphi}_{\sigma_j}} F(w_{j+1}) , \\
w_{j+1}(0)&= X(\sigma_j). \noindent onumber
{\epsilon}nd{align}
Then, we compare equa\-tionref{equa-wj1} with the equation
\begin{align} \label{equa-wtw-j1.0-H1}
i{\partial}_t \widetilde{w}_{j+1}
= {\Delta}elta \widetilde{w}_{j+1} - F(\widetilde{w}_{j+1}),
{\epsilon}nd{align}
or equivalently,
\begin{align} \label{equa-wtw-j1-H1}
i{\partial}_t \widetilde{w}_{j+1}
= e^{-{\varphi}_{\sigma_j}}{\Delta}elta (e^{{\varphi}_{\sigma_j}}\widetilde{w}_{j+1})
- e^{\frac{4}{d-2} {\rm Re} \, {\varphi}_{\sigma_j}} F(\widetilde{w}_{j+1}) + e_{j+1},
{\epsilon}nd{align}
with $\widetilde{w}_{j+1}(0) =w_{j+1}(0) = X(\sigma_j)$
and the error term
\begin{align*}
e_{j+1}
= - (b_{\sigma_j}\cdot \noindent a + c_{\sigma_j}) \widetilde{w}_{j+1}
- (1-e^{\frac{4}{d-2}{\rm Re} \, {\varphi}_{\sigma_j}}) F(\widetilde{w}_{j+1}).
{\epsilon}nd{align*}
Theorem \ref{Thm-H1GWP-Det} yields that
$\widetilde{w}_{j+1}$ exists globally and satisfies
\begin{align}
\|\widetilde{w}_{j+1}\|_{{\mathbb{W}}(0,\tau_{j+1})} \leq B_1 (|\widetilde{w}_{j+1}(0)|_{H^1})
= B_1(|X(\sigma_j)|_{H^1}),
{\epsilon}nd{align}
and similarly to equa\-tionref{wtw-H1-tau1-GWP},
\begin{align}
\|\widetilde{w}_{j+1}\|_{C([0,\tau_{j+1}];H^1)}
\leq \sqrt{2\sup\limits_{0\leq s\leq \tau_{j+1}} H(\widetilde{w}_{j+1}(s))}
= \sqrt{2H(X(\sigma_j))}.
{\epsilon}nd{align}
Note that,
we can take $g_{\sigma_j}$ the time function in Theorem \ref{Thm-Sta-H1}.
Then, by the definition of $\tau_{j+1}$,
$\sup_{0\leq t\leq \tau_{j+1}} g_{\sigma_j}(t) \leq {\varepsilon}_*(C_{\sigma_{j+1}},D'_{\sigma_{j+1}},|X({\sigma_{j+1}})|_{H^1})$,
and so the smallness condition on $g_{\sigma_j}$ in Theorem \ref{Thm-Sta-H1} is satisfied on $[0,\tau_{j+1}]$.
Moreover, similarly to equa\-tionref{esti-e1-H1},
\begin{align} \label{esti-ej1}
&\|e_{j+1}\|_{L^{2}(0,\tau_{j+1}; H^{\frac 12}_1) + N^1(0,\tau_{j+1})} \noindent onumber \\
\leq& C_1 \sup\limits_{0\leq t\leq \tau_{j+1}} g_{\sigma_j} (t)
(\|\widetilde{w}_{j+1}\|_{L^2(0,\tau_{j+1}; H^\frac 32 _1)}
+ \|\widetilde{w}_{j+1}\|^{\frac {d+2}{d-2}}_{{\mathbb{W}}(0,\tau_{j+1})}) \noindent onumber \\
\leq& C_1(B_1(|X(\sigma_j)|_{H^1}) + (B_1(|X(\sigma_j)|_{H^1}))^{\frac{d+2}{d-2}}) {\varepsilon}_{j+1}(\tau_{j+1}) \noindent onumber \\
\leq& {\varepsilon}_*(C_{\sigma_{j+1}},D'_{\sigma_{j+1}},|X(\sigma_j)|_{H^1}).
{\epsilon}nd{align}
Thus, by virtue of Theorem \ref{Thm-Sta-H1}, we obtain
\begin{align}
\|w_{j+1}\|_{{\mathbb{W}}(0,\tau_{j+1} )}
\leq C(C_{\sigma_{j+1}},D'_{\sigma_{j+1}},|X(\sigma_j)|_{H^1}),
{\epsilon}nd{align}
and so
\begin{align*}
\|X\|_{{\mathbb{W}}(\sigma_j, \sigma_{j+1})}
\leq \|e^{ {\varphi}_{\sigma_j}}\|_{C([0,\tau_{j+1}];W^{1,{\infty}})}
C(C_{\sigma_{j+1}},D'_{\sigma_{j+1}},|X(\sigma_j)|_{H^1})<{\infty},
{\epsilon}nd{align*}
thereby yielding equa\-tionref{bdd-X-bbw-H1} with $j+1$ replacing $j$.
Therefore, the inductive arguments yield
equa\-tionref{bdd-X-bbw-H1} for all $1\leq j\leq l$
and so
\begin{align} \label{bdd-X-W1}
\|X\|_{{\mathbb{W}}(0,\sigma_l)}
\leq& \sum\limits_{k=0}^{l-1}
\|e^{{\varphi}_{\sigma_k}}\|_{C([0,\tau_{k+1}];W^{1,{\infty}})}
C(C_{\sigma_{j+1}},D'_{\sigma_{j+1}}, |X(\sigma_j)|_{H^1}) \noindent onumber \\
\leq& \sum\limits_{k=0}^{l-1}
\|e^{{\varphi}_{\sigma_k}}\|_{C([0,\tau_{k+1}];W^{1,{\infty}})}
C(C_T,D'_T, C' E_T, \sup\limits_{0\leq x\leq E_T} B_1(x)),
{\epsilon}nd{align}
where $E_T$ is as in equa\-tionref{bdd-E-assum-d6},
and we also used the inequality
$\sup_{0\leq t<\tau^*} \sqrt{2H(X(t))} \leq C'E_T$ in the last step,
implied by the Sobolev embedding.
Since by equa\-tionref{bdd-E-assum-d6},
$E_T<{\infty}$, a.s.,
and for any $0\leq j\leq l-1$,
\begin{align*}
g_{\sigma_j}(\tau_{j+1})
=\frac{{\varepsilon}_*(C_{\sigma_{j+1}},D'_{\sigma_{j+1}},|X(\sigma_j)|_{H^1})}{D_1(|X(\sigma_j)|_{H^1})}
{\gamma}eq \frac{{\varepsilon}_*(C_T,D_T',C'E_T, \sup\limits_{0\leq x\leq E_T}B_1(x))}{ \sup\limits_{0\leq x\leq E_T} D_1(x)}
>0,
{\epsilon}nd{align*}
we can use similar arguments as in Step $3$
in the proof of Theorem \ref{Thm-GWP} $(i)$ to
deduce that $l<{\infty}$, a.s.,
which together with equa\-tionref{bdd-X-W1}
yields the global bound equa\-tionref{gloexist-H1},
thereby implying the global existence of $X$ to equa\-tionref{equa-x}.
Finally, the estimate equa\-tionref{thm-H1-H1} follows from Lemma \ref{Lem-bdd-H1},
and equa\-tionref{thm-H1-LpWq} follows from equa\-tionref{bdd-X-W1} and Strichartz estimates.
The proof of Theorem \ref{Thm-GWP} is complete.
$\square$
\section{Scattering} \label{Sec-Sca}
In this section we prove the scattering behavior of global solutions to equa\-tionref{equa-x}.
The idea here is based on the very recent work \cite{HRZ18}.
More precisely, we use a new rescaling transformation equa\-tionref{z*},
i.e., $z_*= e^{-{\varphi}_*}X$
with ${\varphi}_*$ as in equa\-tionref{vf*},
and compare the resulting random equation equa\-tionref{equa-RNLS-Sca}
with equa\-tionref{equa-NLS}
but after some large time $T$, i.e.,
\begin{align} \label{equa-NLS-Sca}
i{\partial}_t u =& {\Delta}elta u- |u|^{{\alpha}-1}u, \\
u(T) =& z_*(T). \noindent onumber
{\epsilon}nd{align}
Let us start with the mass-critical case.
\subsection{Mass-critical case}
First we enhance the bounds equa\-tionref{bdd-X-L2} and equa\-tionref{globdd-M}
to the whole time regime,
under the condition that $g_k\in L^2({\mathbb{R}}^+)$, a.s..
\begin{lemma} \label{Lem-globdd-X-L2}
Consider the situations in Theorem \ref{Thm-Sca} $(i)$.
Then, for any $p{\gamma}eq 1$,
\begin{align} \label{globdd-EX-L2}
{\mathbb{E}} \sup\limits_{0\leq t<{\infty}} |X(t)|_2^p \leq C(p) <{\infty}.
{\epsilon}nd{align}
In particular, we have the global pathwise bound
\begin{align} \label{globdd-X-L2}
M_{\infty}:= \sup\limits_{0\leq t<{\infty}} |X(t)|_2 \leq C <{\infty},\ \ a.s..
{\epsilon}nd{align}
{\epsilon}nd{lemma}
{\bf Proof.} Estimate equa\-tionref{globdd-EX-L2} can be proved by using
the It\^o formula equa\-tionref{Ito-L2}
and similar arguments as in the proof of \cite[(1.7)]{HRZ18}.
We omit the details here for simplicity.
$\square$ \\
Below we also have the important uniform boundedness (independent of $T$).
\begin{lemma} \label{Lem-globdd-u-L2}
For $z_*(T) \in L^2$,
there exists a unique global $L^2$-solution $u$ (depending on $T$)
to equa\-tionref{equa-NLS-Sca} with ${\alpha}=1+\frac 4d$, $d{\gamma}eq 1$,
which scatters at infinity and satisfies
\begin{align} \label{globdd-u-Sca-L2}
\|u\|_{S^0(T,{\infty}) \cap L^2(T,{\infty};H^\frac 12_{-1})} \leq C <{\infty},\ \ a.s.,
{\epsilon}nd{align}
where $C$ is independent of $T$.
{\epsilon}nd{lemma}
{\bf Proof.}
For each $z_*(T)\in L^2$ fixed,
the global well-posedness and scattering follow from Theorem \ref{Thm-L2GWP-Det}.
Regarding equa\-tionref{globdd-u-Sca-L2},
applying the global-in-time Strichartz estimates in Theorem \ref{Thm-Stri*} to equa\-tionref{equa-NLS*}
and using the H\"older inequality equa\-tionref{ineq-V}
we have that for any $t>T$,
\begin{align*}
\|u\|_{L^2(T,t;H^\frac 12_{-1})} + \|u\|_{S^0(T,t)}
\leq C|u(T)|_2 + C \|u\|_{V(T,t)}^{1+\frac 4d}
{\epsilon}nd{align*}
with $C$ independent of $T$ and $t$.
using equa\-tionref{globdd-u-L2-Det}
and that $u(T)=z_*(T)$, we get
\begin{align*}
\|u\|_{L^2(T,t;H^\frac 12_{-1})} + \|u\|_{S^0(T,t)}
\leq C|z_*(T)|_2
+ C (B_0(|z_*(T)|_2))^{1+\frac 4d}.
{\epsilon}nd{align*}
Since $g_k\in L^2({\mathbb{R}}^+)$, a.s.,
we have ${\varphi}_* \in C({\mathbb{R}}^+; L^{\infty})$,
and so
$|z_*(T)|_2 \leq C|X(T)|_2$ with
$C$ independent of $T$.
In view of the global bound equa\-tionref{globdd-X-L2},
we obtain
\begin{align*}
\|u\|_{L^2(T,t;H^\frac 12_{-1})} + \|u\|_{S^0(T,t)}
\leq CM_{\infty} + C \sup\limits_{0\leq x\leq M_{\infty}} (B_0(x))^{1+\frac 4d}
<{\infty}
{\epsilon}nd{align*}
with $C, M_{\infty}$ independent of $T$ and $t$.
Thus, letting $t\to {\infty}$, we obtain equa\-tionref{globdd-u-Sca-L2}.
$\square$
The following result
is crucial for the scattering in the mass-critical case.
\begin{lemma} \label{Lem-z*u-0-L2}
Consider the situations in Theorem \ref{Thm-Sca} $(i)$.
Let $u$ be the solution to equa\-tionref{equa-NLS-Sca}
with $u(T) = z_*(T)$.
Then, ${\mathbb{P}}$-a.s.
as $T \to {\infty}$,
\begin{align} \label{z*u-0-scaL2}
\|z_*- u\|_{S^0(T,{\infty}) \cap L^2(T,{\infty}; H^{\frac 12}_{-1})} \to 0.
{\epsilon}nd{align}
{\epsilon}nd{lemma}
{\bf Proof.}
We use the idea of comparison as in the proof of Theorem \ref{Thm-GWP} $(i)$.
Precisely,
we compare the solution $z_*$ to equa\-tionref{equa-RNLS-Sca}
with the solution $u$ to equa\-tionref{equa-NLS-Sca}.
For this purpose,
we rewrite equa\-tionref{equa-NLS-Sca} with ${\alpha}=1+\frac 4d$ as follows
\begin{align*}
i{\partial}_t u = e^{-{\varphi}_*}{\Delta}elta (e^{{\varphi}_*} u)
- e^{\frac 4d {\rm Re} \, {\varphi}_*} F(u) + e,
{\epsilon}nd{align*}
with the error term
\begin{align*}
e = -(b_*\cdot \noindent a + c_*) u
- (1-e^{\frac 4d {\rm Re} \, {\varphi}_*}) F(u).
{\epsilon}nd{align*}
Since $g_k\in L^2({\mathbb{R}}^+)$, a.s.,
$1\leq k\leq N$,
for any multi-index ${\gamma}$, as $T\to {\infty}$,
\begin{align} \label{part-vf*}
\sup\limits_{t{\gamma}eq T} |{\partial}_x^{\gamma} {\varphi}_*(t,x)|
\leq C \left<x\right>^{-2} \sup\limits_{t{\gamma}eq T} \sum\limits_{k=1}^N
\left( \bigg|\int_t^{\infty} g_k d\beta_k \bigg| + \int_t^{\infty} g_k^2 ds \right) \to 0.
{\epsilon}nd{align}
Hence, for $T$ large enough,
Theorem \ref{Thm-Stri*} yields that
global-in-time Strichartz and local smoothing estimates hold for the operator
$e^{-{\varphi}_*}{\Delta}elta(e^{{\varphi}_*} \cdot)$.
Note that, for any $t{\gamma}eq T$,
\begin{align} \label{esti-u-scaL2.0}
\|e\|_{N^0(T,t) + L^2(T,t;H^{-\frac 12}_{1})}
\leq& \|(b_*\cdot \noindent a + c_*) u\|_{L^2(T,t;H^{-\frac 12}_{1})} \noindent onumber \\
& + \|(e^{\frac 4d {\rm Re} \, {\varphi}_*}-1) F(u)\|_{L^{\frac{2d+4}{d+4}}((T,t)\times {\mathbb{R}}^d)}.
{\epsilon}nd{align}
Since $g_k\in L^2({\mathbb{R}}^+$, a.s.,
we have
\begin{align} \label{ve1-u-scaL2}
{\varepsilon}_1(T)
:= \sup\limits_{t{\gamma}eq T} \sum\limits_{k=1}^N
\left( \bigg|\int_t^{\infty} g_k d\beta_k \bigg| + \int_t^{\infty} g_k^2 ds \right) \to 0, \ \ as\ T\to {\infty}, \ a.s..
{\epsilon}nd{align}
Then, estimating as in equa\-tionref{esti-e1-L2.1}, we get
\begin{align} \label{esti-u-scaL2.1}
\|(b_*\cdot \noindent a + c_*)u\|_{L^2(T,t;H^{-\frac 12}_{1})}
\leq C {\varepsilon}_1(T) \|u\|_{L^2(T,t;H^{\frac 12}_{-1})},
{\epsilon}nd{align}
where $C$ is independent of $T$ and $t$.
Moreover,
using again $g_k\in L^2({\mathbb{R}}^+)$, a.s.,
we deduce that
\begin{align} \label{ve2-u-scaL2}
{\varepsilon}_2 (T): = \sup\limits_{t{\gamma}eq T} \|{\varphi}_*(t)\|_{W^{1,{\infty}}} \to 0,\ \ as\ T\to {\infty},\ a.s..
{\epsilon}nd{align}
Then, using the inequality $|e^x-1|\leq e|x|$ for $|x|\leq 1$ and equa\-tionref{ineq-V},
we get
\begin{align} \label{esti-u-scaL2.2}
\|(e^{\frac 4d {\rm Re} \, {\varphi}_*}-1) F(u) \|_{N^0(T,t)}
\leq C {\varepsilon}_2(T) \|u\|_{V(T,t)}^{1+\frac 4d}.
{\epsilon}nd{align}
Plugging equa\-tionref{esti-u-scaL2.1} and equa\-tionref{esti-u-scaL2.2} into equa\-tionref{esti-u-scaL2.0}
and using equa\-tionref{globdd-u-Sca-L2} we obtain that,
\begin{align} \label{esti-e-Sca-L2}
\|e\|_{N^0(T,t) + L^2(T,t;H^{-\frac 12}_{1}) }
\leq C {\varepsilon}(T) (\|u\|_{ L^2(T,t;H^{\frac 12}_{-1})} + \|u\|_{V(T,t)}^{1+\frac 4d})
\leq C {\varepsilon}(T),
{\epsilon}nd{align}
where ${\varepsilon}(T): = \max\{{\varepsilon}_1(T), {\varepsilon}_2(T)\}$,
and $C$ is independent of $T, t$,
due to equa\-tionref{globdd-u-Sca-L2}.
Thus,
in view of Remark \ref{Rem-glob-Sta-L2}
and equa\-tionref{globdd-u-Sca-L2},
we obtain that for $T$ large enough,
\begin{align} \label{esti-z*-u-Sca-L2}
\|z_* - u\|_{S^0(T,t) \cap L^2(T,t;H^{\frac 12}_{-1})} \leq C {\varepsilon}(T),
{\epsilon}nd{align}
where $C$ is independent of $T$ and $t$.
(Note that, since $|z_*(T)-u(T)|_2 =0$,
we can choose $M'={\varepsilon}(T)$ when applying
the stability result.)
Therefore, letting $t\to {\infty}$ in equa\-tionref{esti-z*-u-Sca-L2}
and using equa\-tionref{ve1-u-scaL2}, equa\-tionref{ve2-u-scaL2}
we obtain equa\-tionref{z*u-0-scaL2}.
$\square$ \\
{\bf Proof of Theorem \ref{Thm-Sca} }
$(i)$ { (Mass-Critical Case).}
Let $u$ be as in Lemma \ref{Lem-globdd-u-L2}.
We have ${\mathbb{P}}$-a.s. for any $t_1, t_2 {\gamma}eq T$,
\begin{align*}
|e^{it_1{\Delta}elta} z_*(t_1) - e^{it_2{\Delta}elta} z_*(t_2) |_2
\leq& |e^{it_1{\Delta}elta} (z_*-u)(t_1) - e^{it_2{\Delta}elta} (z_*-u)(t_2) |_2 \\
& + |e^{it_1{\Delta}elta} u(t_1) - e^{it_2{\Delta}elta} u(t_2) |_2.
{\epsilon}nd{align*}
By lemma \ref{Lem-globdd-u-L2}, the scattering of $u$ yields
\begin{align*}
|e^{it_1{\Delta}elta} u(t_1) - e^{it_2{\Delta}elta} u(t_2) |_2 \to 0,\ \ as\ t_1, t_2 \to {\infty}.
{\epsilon}nd{align*}
Hence, taking into account equa\-tionref{z*u-0-scaL2} we obtain
\begin{align*}
\limsup\limits_{t_1,t_2\to {\infty}}
|e^{it_1{\Delta}elta} z_*(t_1) - e^{it_2{\Delta}elta} z_*(t_2) |_2
\leq& \limsup\limits_{t_1,t_2\to {\infty}}|e^{it_1{\Delta}elta} (z_*-u)(t_1) - e^{it_2{\Delta}elta} (z_*-u)(t_2) |_2 \\
\leq& 2\|z_*-u\|_{C([T,{\infty});L^2)} \to 0, \ \ as\ T\to {\infty}, \ a.s..
{\epsilon}nd{align*}
This implies that $\{e^{it{\Delta}elta} z_*(t)\}$ is a Cauchy sequence in $L^2$,
thereby yielding equa\-tionref{Sca-L2.1}.
Next we prove equa\-tionref{Sca-L2.2}.
Recall that $U_*(t,s)$, $s,t{\gamma}eq 0$,
are the evolution operators related to the operators $e^{-{\varphi}_*}{\Delta}elta(e^{{\varphi}_*} \cdot)$, $t{\gamma}eq 0$.
Then, by Equation equa\-tionref{equa-RNLS-Sca},
\begin{align*}
z_*(t) = U_*(t,0)X_0
+ i \int_0^t U_*(t,s)e^{\frac 4d {\rm Re} \,{\varphi}_*} F(z_*) ds,\ \ t{\gamma}eq 0.
{\epsilon}nd{align*}
Since $U_*(0,t) U_*(t,s) = U_*(0,s)$ for $s{\gamma}eq 0$,
applying $U_*(0,t)$ to both sides we get
\begin{align*}
U_*(0,t)z_*(t)
=X_0 + i \int_0^t U_*(0,s) e^{\frac 4d {\rm Re} \,{\varphi}_*} F(z_*)ds,
{\epsilon}nd{align*}
which implies that for any $0<t_1<t_2<{\infty}$,
\begin{align} \label{equa-z*-Sca-L2}
U_*(0,t_2) z_*(t_2) - U_*(0,t_1)z_*(t_1)
=& i \int_{t_1}^{t_2} U_*(0,s) e^{\frac 4d {\rm Re} \,{\varphi}_*} F(z_*)ds \noindent onumber \\
=& U_*(0,t_2) \left(i \int_{t_1}^{t_2} U_*(t_2,s) e^{\frac 4d {\rm Re} \,{\varphi}_*} F(z_*) ds\right) \noindent onumber \\
=:& U_*(0,t_2) w(t_2),
{\epsilon}nd{align}
Thus,
applying homogeneous Strichartz estimates in Theorem \ref{Thm-Stri*}
leads to
\begin{align} \label{V-t12.1}
| U_*(0,t_2) z_*(t_2) - U_*(0,t_1)z_*(t_1) |_2
\leq \|U_*(\cdot,t_2) w(t_2)\|_{C([0,t_2];L^2)}
\leq C|w(t_2)|_2,
{\epsilon}nd{align}
where $C$ is independent of $t_1, t_2$.
Moreover, since
$w(\cdot)$ satisfies equa\-tionref{equa-RNLS}
with the initial datum $w(t_1)=0$,
applying Theorem \ref{Thm-Stri*} again and using equa\-tionref{ineq-V} we obtain
\begin{align} \label{V-t12.2}
|w(t_2)|_2
\leq \|w\|_{C([t_1,t_2];L^2)}
\leq C \|e^{\frac 4d {\rm Re} \,{\varphi}_*} F(z_*) \|_{L^{\frac{2d+4}{d+4}}((t_1, t_2)\times {\mathbb{R}}^d)}
\leq C \|z_*\|^{1+\frac 4d}_{V(t_1,t_2)},
{\epsilon}nd{align}
where $C$ is independent of $t_1,t_2$,
due to the global-in-time Strichartz estimates
and that $e^{\frac 4d {\rm Re} \, {\varphi}_*} \in C({\mathbb{R}}^+; L^{\infty})$, a.s..
Moreover, taking into account equa\-tionref{globdd-u-Sca-L2} and equa\-tionref{z*u-0-scaL2} we have
\begin{align} \label{globdd-z*-sca-L2}
\|z_*\|_{V(T,{\infty})}
\leq \|u\|_{V(T,{\infty})} + \|z_*-u\|_{V(T,{\infty})} <{\infty},\ \ a.s..
{\epsilon}nd{align}
Thus, plugging equa\-tionref{V-t12.2} into equa\-tionref{V-t12.1}
and using the global bound equa\-tionref{globdd-z*-sca-L2}
we obtain
\begin{align*}
|U_*(0,t_2) z_*(t_2) - U_*(0,t_1)z_*(t_1)|_2 \leq C\|z_*\|_{V(t_1,t_2)}^{1+\frac 4d} \to 0,\ \ as\ t_1,t_2\to {\infty},\ a.s..
{\epsilon}nd{align*}
This implies that
$\{U_*(0,t)z_*(t)\}$ is a Cauchy sequence in $L^2$,
and so equa\-tionref{Sca-L2.2} follows.
The proof of Theorem \ref{Thm-Sca} $(i)$ is complete.
\subsection{Energy-critical and pseudo-conformal cases}
As in the mass-critical case, we have the global bound of
solutions $X$ and $u$ below.
\begin{lemma} (\cite{HRZ18}) \label{Lem-globdd-X-H1}
Consider the situations in Theorem \ref{Thm-Sca} $(ii)$
with $3\leq d\leq 6$.
Then, for any $p{\gamma}eq 1$,
\begin{align} \label{globdd-EX-H1}
{\mathbb{E}} \sup\limits_{0\leq t<{\infty}} |X(t)|_{H^1}^p + |X(t)|^p_{L^\frac{2d}{d-2}} \leq C(p) <{\infty}.
{\epsilon}nd{align}
In particular,
\begin{align} \label{globdd-X-H1}
E_{{\infty}}:= \sup\limits_{0\leq t<{\infty}} |X(t)|_{H^1} \leq C <{\infty},\ \ a.s..
{\epsilon}nd{align}
{\epsilon}nd{lemma}
\begin{lemma} \label{Lem-globdd-u-H1}
For $z_*(T) \in H^1$,
there exists a unique global $H^1$-solution $u$ (depending on $T$)
to equa\-tionref{equa-NLS-Sca} with ${\alpha}= 1+\frac{4}{d-2}$, $d{\gamma}eq 3$,
which scatters at infinity and satisfies
\begin{align} \label{globdd-u-Sca-H1}
\|u\|_{S^1(T, {\infty}) \cap L^2(T,{\infty}; H^{\frac 32}_{-1})} \leq C <{\infty},\ \ a.s.,
{\epsilon}nd{align}
where $C$ is independent of $T$.
{\epsilon}nd{lemma}
{\bf Proof.}
The proof is analogous to that of Lemma \ref{Lem-globdd-u-L2}.
First,
the global well-posedness and scattering in the space $H^1$
follows from Theorem \ref{Thm-H1GWP-Det}.
In order to prove equa\-tionref{globdd-u-Sca-H1},
applying Theorem \ref{Thm-Stri*} to equa\-tionref{equa-NLS-Sca}
and using equa\-tionref{ineq-W.2} and equa\-tionref{globdd-u-H1-Det} we obtain
that for any $t>T$,
\begin{align*}
\|u\|_{L^2(T,t; H^\frac 32_{-1})}
+ \|u\|_{S^1(T,t)}
\leq& C|u(T)|_{H^1} + C\|u\|_{{\mathbb{W}}(T,t)}^{\frac{d+2}{d-2}} \\
\leq& C|z_*(T)|_{H^1} + C(B_1(\|z_*(T)\|_{H^1}))^{\frac{d+2}{d-2}},
{\epsilon}nd{align*}
where $C$ is independent of $T$ and $t$.
Since
$|z_*(T)|_{H^1} \leq \|e^{{\varphi}_*}\|_{C({\mathbb{R}}^+; W^{1,{\infty}})} |X(T)|_{H^1}$,
using equa\-tionref{globdd-X-H1} and letting $t\to {\infty}$ we prove equa\-tionref{globdd-u-Sca-H1}.
$\square$
We have the crucial asymptotics of difference between the solutions $z_*$ and $u$.
\begin{lemma} \label{Lem-z*u-0-H1}
Consider the situations in Theorem \ref{Thm-Sca} $(ii)$.
Let $u$ be the solution to equa\-tionref{equa-NLS-Sca} with ${\alpha}= 1+\frac {4}{d-2}$,
$u(T) = z_*(T)$, $d{\gamma}eq 3$.
Then,
\begin{align} \label{z*u-0-scaH1}
\|z_*- u\|_{S^1(T,{\infty}) \cap L^2(T,{\infty}; H^\frac 32_{-1})} \to 0, \ \ as\ T\to {\infty},\ a.s..
{\epsilon}nd{align}
{\epsilon}nd{lemma}
{\bf Proof.}
The case $3\leq d\leq 6$ was proved in the recent work \cite{HRZ18}
under weaker condition on ${\partial}hi_k$.
Below we mainly consider the high dimensional case $d>6$.
We shall apply Theorem \ref{Thm-Sta-H1} to compare the solutions $z_*$ and $u$.
For this purpose, we reformulate Equation equa\-tionref{equa-NLS-Sca} as follows
\begin{align*}
i {\partial}_t u = e^{-{\varphi}_*}{\Delta}elta (e^{{\varphi}_*} u)
- e^{\frac{4}{d-2}{\rm Re} \, {\varphi}_*} F(u) + e,
{\epsilon}nd{align*}
with the error term
\begin{align*}
e = -(b_*\cdot \noindent a + c_*) u
- (1-e^{\frac{4}{d-2}{\rm Re} \, {\varphi}_*}) F(u).
{\epsilon}nd{align*}
We see that,
equa\-tionref{part-vf*} implies that for $T$ large enough,
\begin{align*}
\sup\limits_{t{\gamma}eq T} g(t)
:= \sup\limits_{t{\gamma}eq T} \sum\limits_{k=1}^N
\left(\bigg| \int_t^{\infty} g_k d\beta_k \bigg| + \int_t^{\infty} g_k^2 ds\right)
\leq {\varepsilon},
{\epsilon}nd{align*}
so the smallness condition on $g$ in Theorem \ref{Thm-Sta-H1}
is satisfied on $[T,t]$ for any $t>T$.
Regarding the error term,
similarly to equa\-tionref{esti-e-Sca-L2},
\begin{align*}
\|e\|_{ N^1(T,t) + L^2(T,t;H^{\frac 32}_{-1})}
\leq C {\varepsilon}(T)
( \|u\|_{L^2(T,t;H^{\frac 32}_{-1})}
+ \|u\|^{\frac{d+2}{d-2}}_{{\mathbb{W}}(T,t)})
\leq C {\varepsilon}(T) \to 0,\ as\ T\to {\infty}.
{\epsilon}nd{align*}
where $C$ is independent of $T$ and $t$ due to equa\-tionref{globdd-u-Sca-H1},
and ${\varepsilon}(T)$ is as in equa\-tionref{esti-e-Sca-L2}.
Thus, by virtue of Theorem \ref{Thm-Sta-H1}
we deduce that
there exist $c, C>0$, independent of $T$ and $t$, such that
\begin{align*}
\|z_* - u \|_{S^1(T,t) \cap L^2(T,t;H^{\frac 32}_{-1})}
\leq C ({\varepsilon}(T))^c.
{\epsilon}nd{align*}
Therefore,
letting $t\to {\infty}$ and then taking $T \to {\infty}$ we obtain equa\-tionref{z*u-0-scaH1}.
$\square$ \\
Now, we are ready to prove Theorem \ref{Thm-Sca} $(ii)$.
{\bf Proof of Theorem \ref{Thm-Sca} $(ii)$.}
In the case where $3\leq d\leq 6$,
because of the global well-posedness of equa\-tionref{equa-x} and the estimate equa\-tionref{thm-H1-LpWq} in
Theorem \ref{Thm-GWP} $(ii)$,
Assumption $(H0')$ in \cite{HRZ18} in the energy-critical case is satisfied.
Thus, the asymptotics equa\-tionref{Sca-L2.1} and equa\-tionref{Sca-L2.2} with $H^1$ replacing $L^2$
follow from Theorem $1.4$ in the recent work \cite{HRZ18}.
Below we consider the case where $d>6$.
The proof is similar to that of mass-critical case,
thanks to Lemmas \ref{Lem-globdd-X-H1}, \ref{Lem-globdd-u-H1} and \ref{Lem-z*u-0-H1}.
Actually, let $u$ be as in Lemma \ref{Lem-z*u-0-H1}. We have
for any $t_1,t_2 {\gamma}eq T$,
\begin{align*}
|e^{it_1{\Delta}elta} z_*(t_1) - e^{it_2{\Delta}elta} z_*(t_2) |_{H^1}
\leq |e^{it_1{\Delta}elta} u(t_1) - e^{it_2{\Delta}elta} u(t_2) |_{H^1}
+ 2 \| z_*-u \|_{C([T,{\infty});H^1)}.
{\epsilon}nd{align*}
Then, we first use the scattering of $u$ in Lemma \ref{Lem-globdd-u-H1} to
pass to the limits $t_1,t_2 \to {\infty}$,
and then we use Lemma \ref{Lem-z*u-0-H1} to take the limit $T\to {\infty}$.
It follows that
\begin{align*}
\limsup\limits_{t_1,t_2\to {\infty}}
|e^{it_1{\Delta}elta} z_*(t_1) - e^{it_2{\Delta}elta} z_*(t_2) |_{H^1}
\leq 2 \| z_*-u \|_{C([T,{\infty});H^1)} \to 0,\ as\ T\to {\infty},\ a.s..
{\epsilon}nd{align*}
This yields that $\{e^{it{\Delta}elta} z_*(t)\}$ is a Cauchy sequence in $H^1$,
thereby implying equa\-tionref{Sca-L2.1} with $H^1$ replacing $L^2$.
Moreover, applying Theorem \ref{Thm-Stri*} to equa\-tionref{equa-z*-Sca-L2}
with $\frac{4}{d-2}$ replacing $\frac 4d$ we get
\begin{align*}
|U_*(0,t_2)z_*(t_2) - U_*(0,t_1)z_*(t_1)|_{H^1}
\leq C \|e^{\frac{4}{d-2} {\rm Re} \, {\varphi}_*}F(z_*)\|_{N^1(t_1,t_2)}
\leq C \| z_*\|^{\frac{d+2}{d-2}}_{{\mathbb{W}}(t_1,t_2)} ,
{\epsilon}nd{align*}
where $C$ is independent of $t_1$ and $t_2$.
Then, taking into account the global pathwise bound of $z_*$
implied by equa\-tionref{globdd-u-Sca-H1} and equa\-tionref{z*u-0-scaH1},
we pass to the limits $t_1,t_2\to{\infty}$ to
obtain that right-hand side above tends to $0$ almost surely.
This yields that
$\{U_*(0,t)z_*(t)\}$ is a Cauchy sequence in $H^1$,
thereby implying equa\-tionref{Sca-L2.2} with $H^1$ replacing $L^2$.
Therefore, the proof of Theorem \ref{Thm-Sca} $(ii)$ is complete.
$\square$\\
{\bf Proof of Theorem \ref{Thm-Sca} $(iii)$.}
In view of the global well-posedness of equa\-tionref{equa-x},
we see that Assumption $(H0')$ in \cite{HRZ18} in the energy-critical case is satisfied.
Thus, the asymptotic equa\-tionref{Sca-L2.1} with $\Sigma$ replacing $L^2$
follows from Theorem $1.3$ in the recent work \cite{HRZ18}.
$\square$
\begin{remark}
In $(H0')$ of \cite{HRZ18},
the assumption on the boundedness of $\||\cdot|X\|_{L^{\gamma}(0,T; L^\rho)}$
is redundant,
which can be deduced from Theorem $1.2$ and Lemma $2.1$ of \cite{HRZ18}.
{\epsilon}nd{remark}
We close this section with the proof of Theorem \ref{Thm-S0S1-Global}.
{\bf Proof of Theorem \ref{Thm-S0S1-Global}.}
In the mass-critical case,
the global bound equa\-tionref{globbd-L2-Lpq} follows from
equa\-tionref{thm-L2-Lpq} and Lemma \ref{Lem-globdd-u-L2}.
In the energy-critical case,
the global bound equa\-tionref{globdd-H1-LpWq}
is a consequence of equa\-tionref{thm-H1-LpWq} and
Lemma \ref{Lem-globdd-u-H1}.
$\square$
\section{Support theorem} \label{Sec-Supp}
In this section we prove Theorem \ref{Thm-Supp}
concerning the support theorem for equa\-tionref{equa-x}.
We combine the idea of \cite{MS94}
with the stability results in Section \ref{Sec-Sta}.
Recall that, for any $h=(h_1,\cdots,h_N)\in \mathscr{H}$
(i.e., the Cameron-Martin space),
$X(\beta+h)$ denotes the solution to equa\-tionref{equa-x}
with the driving processes $\beta_k+h_k$
replacing the Brownian motions $\beta_k$,
$1\leq k\leq N$,
$S(h)$ denotes the controlled solution to equa\-tionref{equa-Sh}.
The global existence and uniqueness of $X(\beta+h)$ and $S(h)$ can be proved similarly as in Section \ref{Sec-GWP}.
In view of Proposition $2.2$ in \cite{MS94},
we only need to prove that, for any ${\varepsilon} >0$,
\begin{align}
& \lim\limits_{n\to {\infty}} {\mathbb{P}} (\|S(\beta^n) - X(\beta)\|_{\mathcal{X}(0,T)}{\gamma}eq {\varepsilon}) = 0, \label{0-Sn-X-E}\\
& \lim\limits_{n\to {\infty}} {\mathbb{P}} (\|X(\beta^n-\beta+h) - S(h)\|_{\mathcal{X}(0,T)}{\gamma}eq {\varepsilon})=0, \label{0-Xnh-Sh-E}
{\epsilon}nd{align}
where
$\mathcal{X}(0,T) = S^0(0,T)\cap L^2(0,T; H^\frac 12_{-1})$
or
$\mathcal{X}(0,T) = S^1(0,T)\cap L^2(0,T; H^\frac 32_{-1})$
in the mass-critical or energy-critical case, respectively,
and $\beta^n$ are the adapted linear interpolation of Brownian motions in \cite{MS94}, defined by
\begin{align*}
\beta^n(t) = \beta(\overline{t}_n) + 2^n(t-\tilde{t}_n)(\beta_{\tilde{t}_n} - \beta_{\overline{t}_n}),
{\epsilon}nd{align*}
$\widetilde{t}_n = \frac{k}{2^n}$ and $\overline{t}_n = \frac{k-1}{2^n} {\varepsilon}e 0$ if $\frac{k}{2^n} \leq t< \frac{k+1}{2^n}$.
For this purpose,
we first prove the asymptotic result below.
\begin{lemma} \label{Lem-betan-beta}
Assume $g_k$ are deterministic and continuous, $1\leq k\leq N$.
Then,
\begin{align} \label{E-betan-beta-0}
{\mathbb{E}} \left(\sup\limits_{t\in [0,T]}
\bigg|\int_0^t g_k(s) \dot\beta_k^n(s) ds - \int_0^t g_k(s) d\beta_k(s) \bigg|^2 \right)
\to 0, \ \ as\ n\to {\infty}.
{\epsilon}nd{align}
{\epsilon}nd{lemma}
The proof is quite technical and is postponed to the Appendix. \\
{\bf Proof of Theorem \ref{Thm-Sca}.}
We mainly prove Theorem \ref{Thm-Supp} in the energy-critical case when $3\leq d\leq 6$.
The mass-critical case can be proved similarly,
based on the stability result Theorem \ref{Thm-Sta-L2}.
It order to obtain equa\-tionref{0-Sn-X-E} and equa\-tionref{0-Xnh-Sh-E},
it is equivalent to prove that for any subsequence $\{n_j\}$,
there exists some subsequence $\{n_{j_k}\}$ of $\{n_j\}$
such that as $k\to {\infty}$,
\begin{align}
& \lim\limits_{k\to {\infty}} \|S(\beta^{n_{j_k}}) - X(\beta)\|_{S^1(0,T) \cap L^2(0,T; H^\frac 32_{-1})} = 0,\ \ a.s., \label{0-Sn-X}\\
& \lim\limits_{k\to {\infty}} \|X(\beta^{n_{j_k}}-\beta+h) - S(h)\|_{S^1(0,T)\cap L^2(0,T; H^\frac 32_{-1})} =0,\ \ a.s.. \label{0-Xnh-Sh}
{\epsilon}nd{align}
Below, for simplicity,
we still denote the subsequence $\{n_j\}$ by $\{n\}$.
{\bf Proof of equa\-tionref{0-Sn-X}}.
For this purpose,
for each $h\in H^1(0,T; {\mathbb{R}}^N)$,
we set
$${\partial}si(\beta+ h)(t)
:= \sum\limits_{k=1}^N \int_0^t G_k(s,x) d \beta_k(s)
+ \sum\limits_{k=1}^N \int_0^t G_k(s,x) \dot{h}_k(s) ds - \int_0^t \widehat{\mu}(s,x) ds, $$
and ${\partial}si(h)$ is defined similarly.
Using
\begin{align} \label{res-zn-Sbetan}
z_n:=e^{-{\partial}si(\beta^n)} S(\beta^n),
{\epsilon}nd{align}
we have
\begin{align} \label{equa-zn-Supp}
i{\partial}_t z_n =& e^{-{\partial}si(\beta^n)} {\Delta}elta(e^{{\partial}si(\beta^n)} z_n)
-e^{\frac{4}{d-2} {\rm Re} \, {\partial}si(\beta^n)} F(z_n), \\
z_n(0)=& X_0. \noindent onumber
{\epsilon}nd{align}
Similarly,
$\widetilde{z}:= e^{-{\partial}si(\beta)} X(\beta)$ satisfies the random equation
\begin{align}
i{\partial}_t \widetilde{z} =& e^{-{\partial}si(\beta)} {\Delta}elta(e^{{\partial}si(\beta)} \widetilde{z}) dt
- e^{\frac {4}{d-2} {\rm Re} \, {\partial}si(\beta)} F(\widetilde{z}), \label{equa-wtz-Supp} \\
\widetilde{z}(0)=& X_0, \noindent onumber
{\epsilon}nd{align}
or equivalently,
\begin{align*}
i {\partial}_t \widetilde{z} =& e^{-{\partial}si(\beta^n)} {\Delta}elta(e^{{\partial}si(\beta^n)} \widetilde{z}) dt
- e^{\frac {4}{d-2} {\rm Re} \, {\partial}si(\beta^n)} F(\widetilde{z}) + e_n
{\epsilon}nd{align*}
with the error term
\begin{align} \label{en-betan}
e_n =& ( (b({\partial}si(\beta))-b({\partial}si(\beta^n)))\cdot \noindent a + (c({\partial}si(\beta))-c({\partial}si(\beta^n))) ) \widetilde{z} \noindent onumber \\
& - (e^{\frac{4}{d-2} Re {\partial}si(\beta)} - e^{\frac{4}{d-2} {\rm Re} \, {\partial}si(\beta^n)}) F(\widetilde{z}),
{\epsilon}nd{align}
where
$b({\partial}si(\beta)) = 2 \noindent a {\partial}si(\beta)$,
$c({\partial}si(\beta)) = {\Delta}elta{\partial}si(\beta) + \sum_{j=1}^d ({\partial}_j {\partial}si(\beta))^2$,
and $b({\partial}si(\beta^n))$, $c({\partial}si(\beta^n))$
are defined similarly.
Note that,
the global well-posedness and
the bound of $S^1(0,T)\cap L^2(0,T;H^{\frac 32}_{-1})$-norm of $\widetilde{z}$
can be proved similarly as in Section \ref{Sec-GWP}
by using the energy-critical stability result Theorem $1.4$ of \cite{TV05}.
Similar assertions also hold for $z_n$, $n{\gamma}eq 1$.
Let
\begin{align*}
& g^n(t): =\sum_{k=1}^N \int_0^t g_k(s)\dot{\beta}^n_k(s) ds + \int_0^t g_k^2(s) ds,
{\epsilon}nd{align*}
and define $g(t)$ similarly with $d\beta_k(s)$ replacing $\dot{\beta}^n_k(s) ds$.
Set
\begin{align*}
{\varepsilon}_n(t) := \sum_{k=1}^N \bigg| \int_0^t g_k \dot{\beta}_k^n(s) ds - \int_0^t g_k d\beta_k(s) \bigg|.
{\epsilon}nd{align*}
Lemma \ref{Lem-betan-beta} implies that
for some subsequence of $\{n\}$ (still dented by $\{n\}$),
${\mathbb{P}}$-a.s., as $n\to{\infty}$,
\begin{align} \label{gn-g-0}
\sup\limits_{0\leq t\leq T} \|{\partial}si(\beta^n) - {\partial}si(\beta)\|_{W^{1,{\infty}}}
\leq C\sup\limits_{0\leq t\leq T} |g^n(t) - g(t)|
\leq C \sup\limits_{0\leq t\leq T} {\varepsilon}_n(t) \to 0.
{\epsilon}nd{align}
In particular, for some positive constant $C$ uniformly bounded of $n$,
\begin{align} \label{bdd-psin-gn}
\sup\limits_{0\leq t\leq T} \|{\partial}si(\beta^n)\|_{W^{1,{\infty}}} \leq C, \ \
\sup\limits_{n{\gamma}eq 1} \sup\limits_{0\leq t\leq T} |g^n(t)|\leq C.
{\epsilon}nd{align}
This along with Assumption $(H0)$ yields that for any multi-index ${\gamma}$,
\begin{align} \label{part-psibeta}
\sup\limits_{n{\gamma}eq 1} \sup\limits_{0\leq t\leq T} |{\partial}_x^{\gamma} {\partial}si(\beta^n)| \leq
C\left<x\right>^{-2} \sup\limits_{n{\gamma}eq 1} \sup\limits_{0\leq t\leq T} g^n(t)
\leq C\left<x\right>^{-2}.
{\epsilon}nd{align}
Then, Theorem \ref{Thm-Stri} yields that Strichartz and local smoothing estimates hold
for the operator $e^{-{\partial}si(\beta^n)} {\Delta}elta (e^{{\partial}si(\beta^n)} \cdot )$,
and the corresponding Strichartz constants $C_T$
are uniformly bounded for all $n$.
Estimating as in equa\-tionref{esti-e1-H1}
and using the global bounds of the
$L^2(0,T;H^{\frac 32}_{-1})$- and $S^1(0,T)$-norms of $\widetilde{z}$,
we obtain
\begin{align*}
\|e_n\|_{N^1(0,T) + L^2(0,T;H^{\frac 12}_{1})}
\leq C(T) \sup\limits_{0\leq t\leq T} {\varepsilon}_n(t)
( \|\widetilde{z}\|_{L^2(0,T;H^{\frac 32}_{-1})} + \|\widetilde{z}\|_{{\mathbb{W}}(0,T)}^{1+\frac{4}{d-2}})
\leq C'(T) {\varepsilon}_n(t) .
{\epsilon}nd{align*}
We note that $C'(T)$ is independent of $n$,
due to the uniform bound equa\-tionref{bdd-psin-gn}.
This along with equa\-tionref{gn-g-0} yields that
\begin{align} \label{e-b-bn-0}
\|e_n\|_{N^1(0,T) + L^2(0,T;H^{\frac 12}_{1})}
\to 0,\ \ as\ n\to{\infty},\ a.s..
{\epsilon}nd{align}
Then,
by virtue of Theorem \ref{Thm-Sta-H1-dlow}, we obtain
that ${\mathbb{P}}$-a.s. as $n\to {\infty}$,
\begin{align} \label{zn-z-0}
\| z_n - z \|_{S^1(0,T) \cap L^2(0,T;H^{\frac 32}_{-1})}
\leq C(T) \sup\limits_{0\leq t\leq T} {\varepsilon}_n(t)
\to 0.
{\epsilon}nd{align}
In particular, this yields the uniform bound
\begin{align*}
\sup\limits_{n{\gamma}eq 1} \|z_n\|_{S^1(0,T) \cap L^2(0,T; H^{\frac 32}_{-1})}
\leq C(T) <{\infty},\ \ a.s..
{\epsilon}nd{align*}
We claim that equa\-tionref{zn-z-0} implies equa\-tionref{0-Sn-X}.
Actually, we have
\begin{align} \label{Sbetan-Xbeta}
&\|S(\beta^n) - X(\beta)\|_{S^1(0,T) \cap L^2(0,T;H^{\frac 32}_{-1})} \noindent onumber \\
=& \|e^{{\partial}si(\beta^n)} z_n - e^{{\partial}si(\beta)} \widetilde{z}\|_{S^1(0,T) \cap L^2(0,T;H^{\frac 32}_{-1})}\\
\leq& \|e^{{\partial}si(\beta^n)} (z_n-\widetilde{z})\|_{S^1(0,T) \cap L^2(0,T;H^{\frac 32}_{-1})}
+ \|(e^{{\partial}si(\beta^n)} - e^{{\partial}si(\beta)}) \widetilde{z}\|_{S^1(0,T) \cap L^2(0,T;H^{\frac 32}_{-1})} . \noindent onumber
{\epsilon}nd{align}
In order to pass to the limit $n\to {\infty}$,
using equa\-tionref{bdd-psin-gn} and the inequality
$|e^x-e^y| \leq C|e^y||x-y|$ for $|x|,|y|\leq \frac 12$,
we have,
as $n\to {\infty}$,
\begin{align} \label{zn-z-S1-0}
& \|e^{{\partial}si(\beta^n)} (z_n-\widetilde{z})\|_{S^1(0,T) }
+ \|(e^{{\partial}si(\beta^n)} - e^{{\partial}si(\beta)}) \widetilde{z}\|_{S^1(0,T)} \noindent onumber \\
\leq& C(T) (\| (z_n-\widetilde{z})\|_{S^1(0,T) }
+ \|{\partial}si(\beta^n) - {\partial}si(\beta)\|_{C([0,T]; W^{1,{\infty}})} ) \noindent onumber \\
\leq& C(T) \left(\| (z_n-\widetilde{z})\|_{S^1(0,T) }
+ \sup\limits_{0\leq t\leq T} {\varepsilon}_n(t) \right)
\to 0,
{\epsilon}nd{align}
where in the last step we used equa\-tionref{gn-g-0} and equa\-tionref{zn-z-0}.
Regarding the $L^2(0,T; H^{\frac 32}_{-1})$-norm in equa\-tionref{Sbetan-Xbeta},
we deduce from equa\-tionref{part-psibeta} that
$e^{{\partial}si(\beta^n)} \in S^0$,
and so $\Psi_{p}:= \left<x\right>^{-1}\left<\noindent a\right>^{\frac 32} e^{{\partial}si(\beta^n)}\left<\noindent a\right>^{-\frac 32} \left<x\right>$
is a pseudo-differential operator of order zero.
This along with Lemma \ref{Lem-L2-Bdd} yields
\begin{align*}
\|e^{{\partial}si(\beta^n)} (z_n-\widetilde{z})\|_{L^2(0,T;H^{\frac 32}_{-1})}
=& \| \Psi_p \left<x\right>^{-1} \left<\noindent a\right>^{\frac 32} (z_n-\widetilde{z})\|_{L^2(0,T;L^2)} \\
\leq& C \|(z_n-\widetilde{z})\|_{L^2(0,T;H^{\frac 32}_{-1})}
{\epsilon}nd{align*}
Moreover,
using Assumption $(H0)$
and the inequality
$|e^x-e^y| \leq C|e^y||x-y|$ for $|x|,|y|\leq \frac 12$
we have that
for any multi-index ${\gamma}$,
\begin{align*}
{\partial}_x^{\gamma} |(e^{{\partial}si(\beta^n)} - e^{{\partial}si(\beta)})(t,x)|
\leq C(T) \left<x\right>^{-2} {\varepsilon}_n(t)
{\epsilon}nd{align*}
where $C(T)$ is independent of $n$. Then, estimating as above we get
\begin{align*}
\|(e^{{\partial}si(\beta^n)} - e^{{\partial}si(\beta)}) \widetilde{z}\|_{L^2(0,T;H^{\frac 32}_{-1})}
\leq C(T)
\sup\limits_{0\leq t\leq T} {\varepsilon}_n(t)
\| \widetilde{z}\|_{L^2(0,T;H^{\frac 32}_{-1})}
\leq C(T)
\sup\limits_{0\leq t\leq T} {\varepsilon}_n(t) .
{\epsilon}nd{align*}
Combing the estimates above together we conclude that,
${\mathbb{P}}$-a.s. as $n \to {\infty}$,
\begin{align} \label{zn-z-LS-0}
&\|e^{{\partial}si(\beta^n)} (z_n-\widetilde{z})\|_{L^2(0,T;H^{\frac 32}_{-1})}
+ \|(e^{{\partial}si(\beta^n)} - e^{{\partial}si(\beta)}) \widetilde{z}\|_{L^2(0,T;H^{\frac 32}_{-1})} \noindent onumber \\
\leq& C (\|(z_n-\widetilde{z})\|_{L^2(0,T;H^{\frac 32}_{-1})}
+ \sup\limits_{0\leq t\leq T} {\varepsilon}_n(t) )
\to 0.
{\epsilon}nd{align}
Therefore,
plugging equa\-tionref{zn-z-S1-0} and equa\-tionref{zn-z-LS-0} into equa\-tionref{Sbetan-Xbeta}
we prove equa\-tionref{0-Sn-X}, as claimed. \\
{\bf Proof of equa\-tionref{0-Xnh-Sh}.}
The proof is similar as above.
Now we use a new transformation
\begin{align} \label{res-yn-Xbetan}
y_n= e^{-{\partial}si(\beta^n-\beta+h)} X(\beta^n-\beta+h)
{\epsilon}nd{align}
to obtain
\begin{align} \label{equa-zn-bbnh}
i{\partial}_t y_n = e^{-{\partial}si(\beta^n-\beta+h)} {\Delta}elta (e^{{\partial}si(\beta^n-\beta+h)}y_n)
- e^{\frac{4}{d-2} {\rm Re} \, {\partial}si(\beta^n-\beta+h)} F(y_n)
{\epsilon}nd{align}
with $y_n(0)=X_0$.
Arguing as above,
we have the Strichartz and local smoothing estimates for
the operator $e^{-{\partial}si(\beta^n-\beta+h)} {\Delta}elta (e^{{\partial}si(\beta^n-\beta+h)} \cdot)$,
the related Strichartz constants $C_T$
are uniformly bounded of $n$.
Moreover, letting $\widetilde{y}:= e^{-{\partial}si(h)} S(h)$ we have
\begin{align}
i {\partial}_t \widetilde{y} =& e^{-{\partial}si(h)} {\Delta}elta (e^{-{\partial}si(h)} \widetilde{y}) - e^{\frac{4}{d-2}{\rm Re} \, {\partial}si(h)} F(\widetilde{y}) \noindent onumber \\
=& e^{-{\partial}si(\beta^n-\beta+h)} {\Delta}elta (e^{{\partial}si(\beta^n-\beta+h)}\widetilde{y})
- e^{\frac{4}{d-2}{\rm Re} \, {\partial}si(\beta-\beta^n+h)} F(\widetilde{y}) + e'_n
{\epsilon}nd{align}
with $\widetilde{y}(0) = X_0$ and the error term
\begin{align} \label{en-supp.1}
e'_n =& (( b({\partial}si(h)) - b({\partial}si(\beta^n-\beta+h)) ) \cdot \noindent a
+ ( c({\partial}si(h)) - c({\partial}si(\beta^n-\beta+h)) )) \widetilde{y} \noindent onumber \\
& -(e^{\frac{4}{d-2} {\rm Re} \, {\partial}si(h)} - e^{\frac{4}{d-2} {\rm Re} \, {\partial}si(\beta^n - \beta + h)}) F(\widetilde{y}),
{\epsilon}nd{align}
where
$b({\partial}si(h))= 2\noindent a {\partial}si(h)$,
$c({\partial}si(h)) = {\Delta}elta {\partial}si(h) + \sum_{j=1}^d({\partial}_j {\partial}si(h))^2$,
and $b({\partial}si(\beta^n-\beta +h))$, $c({\partial}si(\beta^n-\beta+h))$ are defined similarly.
Similarly to equa\-tionref{gn-g-0},
for some subsequence of $\{n\}$ (still denoted by $\{n\}$),
for any multi-index ${\gamma}$,
\begin{align*}
\sup\limits_{0\leq t\leq T} |{\partial}_x^{\gamma} ({\partial}si(\beta^n-\beta+h)(t) - {\partial}si(h)(t))|
\leq C \left<x\right>^{-2} \sup\limits_{0\leq t\leq T} {\varepsilon}_n(t),
{\epsilon}nd{align*}
and
\begin{align*}
\sup\limits_{0\leq t\leq T} \|{\partial}si(\beta^n-\beta+h)(t) - {\partial}si(h)(t)\|_{W^{1,{\infty}}}
\leq C \sup\limits_{0\leq t\leq T} {\varepsilon}_n(t).
{\epsilon}nd{align*}
Then,
similarly to equa\-tionref{en-supp.1},
we have that
\begin{align*}
\|e'_n\|_{N^1(0,T)+ L^2(0,T;H^{\frac 12}_{1})} \to 0,\ \ as\ n \to {\infty},\ a.s..
{\epsilon}nd{align*}
which along with Theorem \ref{Thm-Sta-H1-dlow} implies that ${\mathbb{P}}$-a.s.
as $n\to {\infty}$,
\begin{align} \label{yn-wty-0}
\|y_n -\widetilde{y}\|_{S^1(0,T) \cap L^2(0,T;H^{\frac 32}_{-1})}
\to 0.
{\epsilon}nd{align}
In particular,
\begin{align*}
\sup\limits_{n{\gamma}eq 1} \|y_n\|_{S^1(0,T) \cap L^2(0,T; H^{\frac 32}_{-1})} \leq C(T) <{\infty},\ \ a.s..
{\epsilon}nd{align*}
Thus, estimating as those below equa\-tionref{zn-z-0}
and using equa\-tionref{yn-wty-0} we obtain equa\-tionref{0-Xnh-Sh}.
Therefore, the proof of Theorem \ref{Thm-Supp} is complete.
$\square$
\section{Appendix} \label{Sec-App}
{\bf Proof of Theorem \ref{Thm-Rescale-sigma}.}
The case where $\sigma equa\-tionuiv 0$ can be proved similarly as in
\cite[Lemma 6.1]{BRZ14} and \cite[Lemma 2.4]{BRZ16}
in the $L^2$ and $H^1$ space, respectively.
For the general case,
we prove the $L^2$ case below,
the $H^1$ case can be proved similarly.
Set ${\varphi}(t) := {\varphi}_0(t) = \int_0^{t} G_k d\beta_k(s) - \int_0^t \widehat{\mu}ds$
and $v(t) := v_0(t)=e^{-{\varphi}(t)}X(t)$, $t\in[0,\tau^*)$.
For any $0\leq t<\tau^*-\sigma$, we have
\begin{align} \label{X-v}
X(t) = e^{{\varphi}(t)} v(t),\ \
X(\sigma+t) = e^{{\varphi}_\sigma(t)} v_\sigma(t),
{\epsilon}nd{align}
and
\begin{align} \label{vfsigma-vf}
{\varphi}(\sigma+t) - {\varphi}(\sigma) = {\varphi}_\sigma(t).
{\epsilon}nd{align}
It follows that
\begin{align} \label{vsigma-v}
v_\sigma(t) = e^{-{\varphi}_\sigma(t)} X(\sigma+t)
= e^{-({\varphi}(\sigma+t) - {\varphi}(\sigma) )} X(\sigma+t)
= e^{{\varphi}(\sigma)} v(\sigma+t).
{\epsilon}nd{align}
Then, similar arguments as in the proof of \cite[Lemma 6.1]{BRZ14} show that,
$v$ satisfies pathwisely the equation equa\-tionref{equa-RNLS} on $[0,\tau^*)$
in the space $H^{-2}$,
with $0$ replacing $\sigma$.
Hence, ${\mathbb{P}}$-a.s. for any $t\in[0,\tau^*-\sigma)$,
\begin{align*}
iv(\sigma+t)
= iv(\sigma) + \int_\sigma^{\sigma+t} e^{-{\varphi}(s)} {\Delta}elta (e^{{\varphi}(s)}v(s)) ds
- \int_\sigma^{\sigma+t} e^{({\alpha}-1){\rm Re} \, {\varphi}(s)} F(v(s)) ds,
{\epsilon}nd{align*}
where the equation is taken in $H^{-2}$.
Plugging this into equa\-tionref{vsigma-v} yields that
\begin{align} \label{vsigma-v.0}
iv_\sigma(t)
=& ie^{{\varphi}(\sigma)}v(\sigma)
+\int_\sigma^{\sigma+t} e^{{\varphi}(\sigma)-{\varphi}(s)} {\Delta}elta (e^{{\varphi}(s)}v(s)) ds \noindent onumber \\
& - \int_\sigma^{\sigma+t} e^{{\varphi}(\sigma)} e^{({\alpha}-1){\rm Re} \, {\varphi}(s)} F(v(s))ds \noindent onumber \\
=& iX(\sigma)
+ \int_0^{t} e^{{\varphi}(\sigma)-{\varphi}(\sigma+s)} {\Delta}elta (e^{{\varphi}(\sigma+s)}v(\sigma+s)) ds \noindent onumber \\
& - \int_0^{t} e^{{\varphi}(\sigma)} e^{({\alpha}-1){\rm Re} \, {\varphi}(\sigma+s)} F(v(\sigma+s)) ds.
{\epsilon}nd{align}
Note that, by equa\-tionref{X-v} and equa\-tionref{vfsigma-vf},
\begin{align*}
e^{{\varphi}(\sigma)-{\varphi}(\sigma+s)} {\Delta}elta (e^{{\varphi}(\sigma+s)}v(\sigma+s))
= e^{-{\varphi}_\sigma(s)} {\Delta}elta (X(\sigma+s))
= e^{-{\varphi}_\sigma(s)} {\Delta}elta (e^{{\varphi}_\sigma(s)}v_\sigma(s) ).
{\epsilon}nd{align*}
Moreover,
\begin{align*}
e^{{\varphi}(\sigma)} e^{({\alpha}-1){\rm Re} \, {\varphi}(\sigma+s)} F(v(\sigma+s))
=& e^{{\varphi}(\sigma) - {\varphi}(\sigma+s)} F(X(\sigma+s)) \noindent onumber \\
=& e^{-{\varphi}_\sigma(s)} F(e^{{\varphi}_\sigma(s)}v_\sigma(s)) \noindent onumber \\
=& e^{({\alpha}-1){\rm Re} \, {\varphi}_\sigma(s)}F(v_{\sigma}(s)).
{\epsilon}nd{align*}
Thus, plugging the two identities above into equa\-tionref{vsigma-v.0},
we obtain that ${\mathbb{P}}$-a.s. for any $0\leq t<\tau^*- \sigma$,
\begin{align*}
i v_\sigma(t)
= i X(\sigma) + \int_0^t e^{-{\varphi}_\sigma(s)} {\Delta}elta (e^{{\varphi}_{\sigma(s)}} v_\sigma(s)) ds
- \int_0^t e^{({\alpha}-1){\rm Re} \, {\varphi}_\sigma(s)} F(v_{\sigma}(s)) ds
{\epsilon}nd{align*}
as an equation in $H^{-2}$,
which implies equa\-tionref{equa-RNLS},
thereby finishing the proof.
$\square$ \\
{\bf Proof of Lemma \ref{Lem-bdd-H1}.}
Below, we mainly prove the It\^o formula equa\-tionref{Ito-H}.
The estimate equa\-tionref{bdd-X-H1} can be obtained from equa\-tionref{Ito-H}
by using similar arguments as in the proof of \cite[(2.4)]{BRZ18},
involving the Burkholder-Davis-Gundy inequality and the Gronwall inequality.
In order to prove equa\-tionref{Ito-H},
we use the stability result to pass to the limit
in the approximating procedure
as in the proof of $(2.4)$ in \cite{BRZ16}.
More precisely,
we consider the solution $X_m$ to equa\-tionref{equa-x},
with the nonlinearity $\Theta_m (|X|^{4/(d-2)}X)$ replacing $|X|^{4/(d-2)}X$,
where $\Theta_m f:= \mathscr{F}^{-1}(\theta(\frac{|\cdot|}{m})) {\alpha}st f$,
$\theta \in C_c^{\infty}$ is real-valed, nonnegative,
and $\theta(x)=1$ for $|x|\leq 1$,
$\theta(x)=0$ for $|x|{\gamma}eq 2$.
Since the operators $\{\Theta_m\}$ are uniformly bounded in $L^p$ for any $1<p<{\infty}$
(see \cite[(3.2)]{BRZ16}),
arguing as in the proof of \cite[Theorem 3.1]{BRZ16},
we deduce that $X_m$, $m{\gamma}eq 1$, exist on the common
time regime $[0,\tau^*)$,
and
\begin{align*}
\sup_{m{\gamma}eq 1}\|X_m\|_{S^1(0,t) \cap L^2(0,t; H^{\frac 32}_{-1})} \leq C(t)<{\infty}, \ \ t\in (0,\tau^*),\ a.s..
{\epsilon}nd{align*}
Moreover, similar arguments as in the proof of \cite[(3.9)]{BRZ16} yield that
\begin{align*}
&H(X_m(t)) \noindent onumber \\
=& H(X_0) - \int_0^t {\rm Re} \, \int \noindent a \overline{X}_m \noindent a(\mu X_m) dx ds
+ \frac 12 \sum\limits_{k=1}^N \int_0^t |\noindent a (G_k X_m)|^2 dx ds \noindent onumber \\
& - \frac{{\lambda}({\alpha}-1)}{2} \sum\limits_{k=1}^N \int_0^t \int ({\rm Re} \, G_k)^2 |X_m|^{{\alpha}+1} dx ds \\
& - {\lambda} \int_0^t {\rm Re} \, \int i \noindent a ((\Theta_m -1) F(X_m) ) \noindent a \overline{X}_m dx ds \\
& + \sum\limits_{k=1}^N\int_0^t {\rm Re} \, \int \noindent a \overline{X}_m \noindent a (G_k X_m) dx d\beta_k(s)
-{\lambda} \sum\limits_{k=1}^N \int_0^t \int {\rm Re} \, G_k |X_m|^{{\alpha}+1} dx d\beta_k(s). \noindent onumber
{\epsilon}nd{align*}
Then, in order to pass to the limit $m\to {\infty}$,
we only need to show that, for $w_m :=e^{-{\varphi}} X_m$ and $w :=e^{-{\varphi}} X$
with ${\varphi}$ as in equa\-tionref{vf} with $\sigma equa\-tionuiv 0$,
\begin{align} \label{asym-ym-y}
w_m \to w,\ \ in\ S^1(0,t), \ \ as\ m\to {\infty},\ t\in (0,\tau^*),\ a.s..
{\epsilon}nd{align}
For this purpose,
we use the stability result in Section \ref{Sec-Sta}
to replace the subcritical arguments in \cite{BRZ16}.
Note that, $w_m$ satisfies
\begin{align} \label{equa-ym}
i{\partial}_t w_m = e^{-{\varphi}}{\Delta}elta (e^{{\varphi}}w_m) - e^{-{\varphi}} \Theta_m(F(e^{\varphi} z_m))
{\epsilon}nd{align}
with $w_m(0) = X_0$.
Moreover, $w$ satisfies equa\-tionref{equa-w-p}
with ${\varphi}$ replacing ${\partial}si$, i.e.,
\begin{align} \label{equa-z}
i{\partial}_t w = e^{-{\varphi}}{\Delta}elta (e^{{\varphi}}w) - e^{-{\varphi}} \Theta_m(F(e^{\varphi} z)) + e_m
{\epsilon}nd{align}
with the error
\begin{align*}
e_m = e^{-{\varphi}} ( \Theta_m(F(e^{\varphi} w))-F(e^{\varphi} w)).
{\epsilon}nd{align*}
Since for $p\in (1,{\infty})$,
$\Theta_m f \to f$ in $L^p$ (see \cite[(3.3)]{BRZ16}),
we have for $t\in (0,\tau^*)$,
\begin{align*}
\|e_m\|_{L^{q2}(0,t; W^{1,\frac{2d}{d+2}})}
\leq C(t) \| \Theta_m(F(e^{\varphi} w))-F(e^{\varphi} w)\|_{L^{2}(0,t; W^{1,\frac{2d}{d+2}})}
\to 0,\ m\to {\infty},
{\epsilon}nd{align*}
where $C(t)$ is independent of $m$.
Therefore,
we deduce that
the asymptotic equa\-tionref{asym-ym-y} holds
by using the stability result similar to Theorem \ref{Thm-Sta-H1-dlow}
with the nonlinearity $e^{-{\varphi}}\Theta_m (F(e^{{\varphi}}w_m))$
replacing $e^{\frac{4}{d-2}{\rm Re} \, \Phi}F(w)$,
which can be proved similarly as in the proof of Theorem \ref{Thm-Sta-H1-dlow}.
Then,
we use equa\-tionref{asym-ym-y} to pass to the limit $m\to {\infty}$
in the It\^o formula of $H(X_m)$ to obtain equa\-tionref{Ito-H}.
The proof is complete.
$\square$\\
{\bf Proof of Lemma \ref{Lem-betan-beta}.}
Note that, for each $1\leq k\leq N$ fixed,
\begin{align} \label{gk-betanbeta}
\bigg|\int_0^t g_k(s) \dot\beta_k^n(s) ds - \int_0^t g_k(s) d\beta_k(s) \bigg|
\leq& \bigg|\int_{\frac{[2^nt]}{2^n}}^t g_k(s) d\beta_k(s)\bigg|
+ \bigg|\int_{\frac{[2^nt]}{2^n}}^t g_k(s) \dot{\beta}^n_k(s)ds \bigg| \noindent onumber \\
+& \bigg|\int_0^{\frac{[2^nt]}{2^n}} g_k(s) \dot\beta^n_k(s) ds
- \int_0^{\frac{[2^nt]}{2^n}} g_k(s) d\beta_k(s) \bigg| \noindent onumber \\
=:& J'_{n,1}(t) + J_{n,2}'(t) + J'_{n,3}(t).
{\epsilon}nd{align}
Below we estimate $J'_{n,1}, J'_{n,2}, J'_{n,3}$ respectively.
First we prove that
\begin{align} \label{Jn1-0}
{\mathbb{E}} \sup\limits_{0\leq t\leq T} (J'_{n,1}(t))^2
\to 0,\ \ as\ n\to {\infty}.
{\epsilon}nd{align}
To this end, we set
$M_k(t):= \int_0^t g_k(s) d\beta_k(s)$.
Since $g_k\in C(0,T)$,
using the Burkholder-Davis-Gundy inequality
we have that for any $p{\gamma}eq 1$,
\begin{align*}
{\mathbb{E}}|M_k(t) - M_k(s)|^{2p} \leq C(p) |t-s|^p.
{\epsilon}nd{align*}
Then, in view of
Kolmogorov's continuity criterion
(see, e.g., \cite[Proposition 2.1]{MS94}), we get that for any
${\lambda}>0$, ${\gamma}<\frac{2p}{2p-1}$,
\begin{align*}
{\mathbb{P}} \left( \sup\limits_{t\noindent ot =s} \frac{|M_k(t) - M_k(s)|}{|t-s|^{\gamma}} >{\lambda} \right) \leq C {\lambda}^{-2p}.
{\epsilon}nd{align*}
In particular, taking $p=3$ and ${\gamma}=\frac 14$, we arrive at
\begin{align*}
{\mathbb{P}}(\sup\limits_{0\leq t\leq T} \bigg|M_k(t) - M_k(\frac{[2^n t]}{2^n}) \bigg| >{\lambda} )
\leq C {\lambda}^{-6} 2^{-\frac 32 n}.
{\epsilon}nd{align*}
This yields that
\begin{align*}
{\mathbb{E}} \sup\limits_{0\leq t\leq T} (J'_{n,1}(t))^2
=& 2 \int_0^{\infty} {\lambda} {\mathbb{P}} \left(\sup\limits_{0\leq t\leq T} |M_k(t) - M_k(\frac{[2^n t]}{2^n})| >{\lambda} \right) d{\lambda} \\
\leq& \frac 2n + 2 \int_{\frac 1n}^{\infty}{\lambda}^{-5} 2^{-\frac 32 n}
= \frac 2n + \frac 12 n^4 2^{-\frac 32n} \to 0,\ \ as\ n \to {\infty},
{\epsilon}nd{align*}
which implies equa\-tionref{Jn1-0}, as claimed.
Similarly, since $g_k\in C(0,T)$, $0\leq t- \frac{[2^nt]}{2^n} \leq \frac{1}{2^n}$,
\begin{align*}
J'_{n,2}(t)
=& \bigg| 2^n \int_{\frac{[2^nt]}{2^n}}^t g_k(s) ds \left(\beta_k(\frac{[2^n]t}{2^n}) - \beta_k(\frac{[2^n]t-1}{2^n})\right)\bigg| \\
\leq& C \bigg|\beta_k(\frac{[2^n]t}{2^n}) - \beta_k(\frac{[2^n]t-1}{2^n})\bigg|.
{\epsilon}nd{align*}
Arguing as above we have
\begin{align} \label{Jn2-0}
{\mathbb{E}} \sup\limits_{0\leq t\leq T} (J'_{n,2}(t))^2
\to 0,\ \ as\ n\to 0.
{\epsilon}nd{align}
It remains to prove that
\begin{align} \label{Jn3-0}
{\mathbb{E}} \sup\limits_{0\leq t\leq T} (J'_{n,3}(t))^2
\to 0,\ \ as\ n\to 0.
{\epsilon}nd{align}
For this purpose,
since
\begin{align*}
\int_{\frac{j-1}{2^n}}^{\frac{j}{2^n}} g_k(s) \dot{\beta}^n_k(s) ds
= \int_{\frac{j-2}{2^n}}^{\frac{j-1}{2^n}} \left(\int_{\frac{j-1}{2^n}}^{\frac{j}{2^n}} g_k(r)2^n dr \right) d\beta_k(s),
{\epsilon}nd{align*}
we have
\begin{align*}
J'_{n,3}(t)
=& \bigg| \sum\limits_{j=1}^{[2^nt]} \int_{\frac{j-2}{2^n}}^{\frac{j-1}{2^n}}
\left(\int_{\frac{j-1}{2^n}}^{\frac{j}{2^n}} g_k(r) 2^n dr\right) d\beta_k(s)
- \sum\limits_{j=1}^{[2^nt]} \int_{\frac{j-1}{2^n}}^{\frac{j}{2^n}} g_k(s) d\beta_k(s)\bigg| \noindent onumber \\
=& \bigg|\sum\limits_{j=1}^{[2^nt]-1} \int_{\frac{j-1}{2^n}}^{\frac{j}{2^n}}
\left( \left(\int_{\frac{j}{2^n}}^{\frac{j+1}{2^n}} g_k(r) 2^n dr\right) -g_k(s)\right) d\beta_k(s)
- \int_{\frac{[2^nt]-1}{2^n}}^{\frac{[2^nt]}{2^n}} g_k(s) d\beta_k(s)\bigg|.
{\epsilon}nd{align*}
Let
$ M_n(t):= \sum_{j=1}^{[2^nt]-1} \int_{\frac{j-1}{2^n}}^{\frac{j}{2^n}}
( (\int_{\frac{j}{2^n}}^{\frac{j+1}{2^n}} g_k(r) 2^n dr) -g_k(s)) d\beta_k(s)$.
We get
\begin{align} \label{Jn3.1}
J'_{n,3}(t) \leq |M_t(t)| + \bigg| \int_{\frac{[2^nt]-1}{2^n}}^{\frac{[2^nt]}{2^n}} g_k(s) d\beta_k(s) \bigg|.
{\epsilon}nd{align}
Estimating as in the proof of equa\-tionref{Jn1-0} we see that
\begin{align} \label{Jn3.2}
{\mathbb{E}} \sup\limits_{0\leq t\leq T} \bigg|\int_{\frac{[2^nt]-1}{2^n}}^{\frac{[2^nt]}{2^n}}g_k(s) d\beta_j(s) \bigg|^2
\to 0,\ \ as\ n\to {\infty}.
{\epsilon}nd{align}
Moreover, since $g_k(s)$ is deterministic,
using the independence of increments of Brownian motions
we have that for each $n{\gamma}eq 1$,
$t \mapsto M_n(t)$ is a right-continuous martingale.
Then, using the maximal inequality and
the Burkholder-Davis-Gundy inequality we get
\begin{align} \label{esti-Mn}
{\mathbb{E}} \sup\limits_{0\leq t\leq T} |M_n(t)|^2
\leq& 4 {\mathbb{E}} |M_n(T)|^2 \noindent onumber \\
\leq& C \sum\limits_{j=1}^{[2^nT]-1} \int_{\frac{j-1}{2^n}}^{\frac{j}{2^n}}
\left( \left(\int_{\frac{j}{2^n}}^{\frac{j+1}{2^n}} g_k(r) 2^n dr\right) -g_k(s)\right)^2 ds.
{\epsilon}nd{align}
For any ${\varepsilon}>0$,
by the uniform continuity of $g_k$ on $[0,T]$,
we have that for $n$ large enough,
$|g_k(r_1) - g_k(r_2)| \leq {\varepsilon}$
for any $|r_1-r_2| \leq 2^{1-n}$.
Then, by the mean-value theorem for integrals,
we get that for any $1\leq j\leq [2^nT]-1$,
\begin{align*}
\bigg| \left(\int_{\frac{j}{2^n}}^{\frac{j+1}{2^n}} g_k(r) 2^n dr\right) -g_k(s) \bigg|
\leq |g_k(s_{n,j}) - g_k(s)| \leq {\varepsilon},
{\epsilon}nd{align*}
where $s_{n,j} \in (\frac{j}{2^n}, \frac{j+1}{2^n})$.
Thus the right hand-side of equa\-tionref{esti-Mn} is bounded by
\begin{align*}
C \sum\limits_{j=1}^{[2^nT]-1} \int_{\frac{j-1}{2^n}}^{\frac{j}{2^n}} {\varepsilon}^2 ds
\leq C T {\varepsilon}^2.
{\epsilon}nd{align*}
This implies that
\begin{align} \label{Jn3.3}
{\mathbb{E}} \sup\limits_{0\leq t\leq T} |M_n(t)|^2 \to 0,\ \ as\ n\to {\infty}.
{\epsilon}nd{align}
Thus, we obtain equa\-tionref{Jn3-0} from equa\-tionref{Jn3.2} and equa\-tionref{Jn3.3}.
Therefore, collecting equa\-tionref{gk-betanbeta}, equa\-tionref{Jn1-0}, equa\-tionref{Jn2-0} and equa\-tionref{Jn3-0} together
we prove equa\-tionref{E-betan-beta-0}.
$\square$\\
\begin{thebibliography}{99}
\bibitem{BCIR94}
O. Bang, P.L. Christiansen, F. If, K.O. Rasmussen,
Temperature effects in a nonlinear model of monolayer Scheibe aggregates.
{\it Phys. Rev. E} {\bf 49} (1994), 4627--4636.
\bibitem{BCIRG95}
O. Bang, P.L. Christiansen, F. If, K.O. Rasmussen, Y.B. Gaididei,
White noise in the two-dimensional nonlinear Schr\"odinger equation,
{\it Appl. Anal}. {\bf 57} (1995), no. 1-2, 3--15.
\bibitem{BR15}
V. Barbu, M. R\"ockner,
An operatorial approach to stochastic partial differential equations driven by linear multiplicative noise.
{\it J. Eur. Math. Soc.} {\bf 17} (2015), no. 7, 1789--1815.
\bibitem{BRZ14}
V. Barbu, M. R\"{o}ckner, D. Zhang, The stochastic nonlinear
Schr\"{o}dinger equations with multiplicative noise: the rescaling
approach, {\it J. Nonlinear Sci.} {\bf 24} (2014), no. 3, 383--409.
\bibitem{BRZ16}
V. Barbu, M. R\"{o}ckner, D. Zhang, Stochastic nonlinear Schr\"odinger equations. {\it Nonlinear Anal.} {\bf 136} (2016), 168--194.
\bibitem{BRZ17}
V. Barbu, M. R\"ockner, D. Zhang, The stochastic logarithmic Schr\"odinger equation.
{\it J. Math. Pures Appl.} {\bf 107} (2017), no. 2, 123--149.
\bibitem{BRZ18}
V. Barbu, M. R\"ockner, D. Zhang,
Optimal bilinear control of nonlinear stochastic Schr\"odinger equations driven by linear multiplicative noise.
{\it Ann. Probab.} {\bf 46} (2018), no. 4, 1957--1999.
\bibitem{BG09}
A. Barchielli, M. Gregoratti,
Quantum Trajectories and Measurements in Continuous Case.
The Diffusive Case,
{\it Lecture Notes Physics} {\bf 782}, Springer Verlag,
Berlin, 2009.
\bibitem{B99}
J. Bourgain,
Global wellposedness of defocusing critical nonlinear Schr¡§odinger
equation in the radial case,
{\it J. Amer. Math. Soc.} {\bf 12} (1999), no. 1, 145--171.
\bibitem{BM14}
Z. Brz\'{e}zniak, A. Millet, On the stochastic Strichartz estimates and the stochastic nonlinear Schr\"{o}dinger equation on a compact Riemannian manifold.
{\it Potential Anal.} {\bf 41} (2014), no. 2, 269--315.
\bibitem{BHW17}
Z. Brz\'{e}zniak, F. Hornung, L. Weis,
Martingale solutions for the stochastic nonlinear Schr\"odinger equation in the energy space. {\texttt arXiv:1707.05610v1}
\bibitem{BHW18}
Z. Brz\'{e}zniak, F. Hornung, L. Weis,
Uniqueness of martingale solutions for the stochastic nonlinear Schr\"odinger equation on 3d compact manifolds.
{\texttt arXiv:1808.10619}
\bibitem{B08}
H. Chihara,
Resolvent estimates related with a class of dispersive equations.
{\it J. Fourier Anal. Appl.} {\bf 14} (2008), no. 2, 301--325.
\bibitem{CG15}
K. Chouk, M. Gubinelli,
Nonlinear PDEs with modulated dispersion I: Nonlinear Sch\"odinger equations.
{\it Comm. Partial Differential Equations} {\bf 40} (2015), no. 11, 2047--2081.
\bibitem{CKSTT08}
J. Colliander, M. Keel, G. Staffilani, H. Takaoka, T. Tao,
Global well-posedness and scattering for the energy-critical nonlinear Schr\"odinger equation in $\mathbb{R}^3$.
{\it Ann. of Math. (2)} {\bf 167} (2008), no. 3, 767--865.
\bibitem{BD99}
A. de Bouard, A. Debussche, A stochastic nonlinear Schr{\"o}dinger equation with multiplicative noise.
{\it Comm. Math. Phys.} {\bf 205} (1999),
161--181.
\bibitem{BD03}
A. de Bouard, A. Debussche, The stochastic nonlinear Schr\"{o}dinger equation in $H^1$. {\it Stoch. Anal. Appl.} {\bf 21} (2003), 97--126.
\bibitem{D12}
B. Dodson,
Global well-posedness and scattering for the defocusing, $L^2$-critical nonlinear Schr\"odinger equation when $d{\gamma}eq 3$.
{\it J. Amer. Math. Soc.} {\bf 25} (2012), no. 2, 429--463.
\bibitem{D16.1}
B. Dodson, Global well-posedness and scattering for the defocusing, L2 critical, nonlinear Schr\"odinger equation when $d=1$.
{\it Amer. J. Math.} {\bf 138} (2016), no. 2, 531--569.
\bibitem{D16.2}
B. Dodson, Global well-posedness and scattering for the defocusing, L2-critical, nonlinear Schr\"odinger equation when d=2.
{\it Duke Math. J.} {\bf 165} (2016), no. 18, 3435--3516.
\bibitem{FX18.1}
C.J. Fan, W.J. Xu,
Global well-posedness for the defocusing mass-critical stochastic nonlinear Schr\"odinger equation on ${\mathbb{R}}$ at $L^2$ regularity,
\texttt{arXiv:1810.07925}
\bibitem{FX18.2}
C.J. Fan, W.J. Xu,
Subcritical approximations to stochastic defocusing mass-critical nonlinear Schr\"odinger equation on ${\mathbb{R}}$,
\texttt{arXiv: 1810.09407.}
\bibitem{FS97}
R. Farwig, H. Sohr,
Weighted Lq-theory for the Stokes resolvent in exterior domains.
{\it J. Math. Soc. Japan}. {\bf 49} (1997), no. 2, 251--288.
\bibitem{G05}
E. Gautier, Large deviations and support results for nonlinear Schr\"odinger equations with additive noise and applications.
{\it ESAIM Probab. Stat.} {\bf 9} (2005), 74--97.
\bibitem{G07}
E. Gautier, Stochastic nonlinear Schr\"odinger equations driven by a fractional noise well-posedness,
large deviations and support.
{\it Electron. J. Probab.} {\bf 12} (2007), no. 29, 848--861.
\bibitem{HRZ18}
S. Herr, M. R\"ockner, D. Zhang,
Scattering for stohastic nonlinear Sch\"odinger equations,
{\texttt arXiv: 1804.10429v2.}
\bibitem{H16}
F. Hornung,
The nonlinear stochastic Schr\"odinger equation via stochastic Strichartz estimates.
{\it J. Evol. Equ.} {\bf 18} (2018), no. 3, 1085--1114.
\bibitem{KM06}
C.E. Kenig, F. Merle, Global well-posedness, scattering and blow-up for the energy-critical, focusing, non-linear Schr\"odinger equation in the radial case.
{\it Invent. Math.} {\bf 166} (2006), no. 3, 645--675.
\bibitem{KV13}
R. Killip, M. Visan,
Nonlinear Schr\"odinger equations at critical regularity. Evolution equations, 325-437,
{\it Clay Math. Proc.,} {\bf 17}, Amer. Math. Soc., Providence, RI, 2013.
\bibitem{KTV14}
H. Koch, D. Tataru, M. Visan,
Dispersive equations and nonlinear waves. Generalized Korteweg-de Vries, nonlinear Schr\"odinger, wave and Schr\"odinger maps.
{\it Oberwolfach Seminars}, {\bf 45}. Birkh\"auser/Springer, Basel, 2014.
\bibitem{K81}
H. Kumano-go,
Pseudodifferential operators. Translated from the Japanese by the author, R\'{e}mi Vaillancourt and Michihiro Nagase. MIT Press, Cambridge, Mass.-London, 1981.
\bibitem{KW79}
D.S. Kurtz, R.L. Wheeden, Results on weighted norm inequalities for multipliers.
{\it Trans. Amer. Math. Soc.} {\bf 255} (1979), 343--362.
\bibitem{K80}
D.S. Kurtz, Littlewood-Paley and multiplier theorems on weighted $L^{p}$ spaces.
{\it Trans. Amer. Math. Soc.} {\bf 259} (1980), no. 1, 235--254.
\bibitem{MMT08}
J. Marzuola, J. Metcalfe, D. Tataru, Strichartz estimates and local
smoothing estimates for asymptotically flat Schr\"{o}dinger
equations.
{\it J. Funct. Anal.} {\bf 255 } (2008), no. 6, 1479--1553.
\bibitem{MS94}
A. Millet, M. Sanz-Sol\'{e},
A simple proof of the support theorem for diffusion processes. S¨¦minaire de Probabilit¨¦s, XXVIII, 36-48,
{\it Lecture Notes in Math.}, {\bf 1583}, Springer, Berlin, 1994.
\bibitem{MS94.2}
A. Millet, M. Sanz-Sol\'{e},
The support of the solution to a hyperbolic SPDE.
{\it Probab. Theory Related Fields} {\bf 98} (1994), no. 3, 361--387.
\bibitem{RV07}
E. Ryckman, M. Visan,
Global well-posedness and scattering for the defocusing energy-critical nonlinear Schr\"odinger equation in ${\mathbb{R}}^{1+4}$.
{\it Amer. J. Math.} {\bf 129} (2007), no. 1, 1--60.
\bibitem{SV72}
D.W. Stroock, S.R.S. Varadhan, On the support of diffusion processes with applications to the strong maximum principle.
{\it Proc. Sixth Berkeley Symp. Math. Statist. Probab}. {\bf 3}
333--359.
Univ. California Press, Berkeley, 1972.
\bibitem{SV72.2}
D. Stroock, S.R.S. Varadhan,
On degenerate elliptic-parabolic operators of second order and their associated diffusions.
{\it Comm. Pure Appl. Math}. {\bf 25} (1972), 651--713.
\bibitem{SS99}
C. Sulem, P.L. Sulem,
The nonlinear Schr\"odinger equation: self-focusing
and wave collapse.
{\it Applied Mathematical Sciences} {\bf 139}, Springer, New York,
1999.
\bibitem{T06}
T. Tao. Nonlinear Dispersive Equations. Local and Global Analysis,
{\it CBMS Regional Conference Series in Mathematics}, {\bf 106}, AMS (2006).
\bibitem{TV05}
T. Tao, M. Visan,
Stability of energy-critical nonlinear Schr\"odinger equations in high dimensions.
{\it Electron. J. Differential Equations} 2005, No. {\bf 118}, 28 pp.
\bibitem{TVZ07}
T. Tao, M. Visan, X.Y. Zhang,
The nonlinear Schr\"odinger equation with combined power-type nonlinearities.
{\it Comm. Partial Differential Equations} {\bf 32} (2007), no. 7-9, 1281-1343.
\bibitem{T00}
M.E. Taylor,
Tools for PDE.
Pseudodifferential operators, paradifferential operators, and layer potentials.
{\it Mathematical Surveys and Monographs},
{\bf 81}.
American Mathematical Society, Providence, RI, 2000. x+257 pp.
\bibitem{V07}
M. Visan, The defocusing energy-critical nonlinear Schr\"odinger equation in higher dimensions.
{\it Duke Math. J.} {\bf 138} (2007), no. 2, 281--374.
\bibitem{Z17}
D. Zhang, Strichartz and local smoothing estimates for stochastic dispersive equations. \texttt{arXiv:1709.03812}
{\epsilon}nd{thebibliography}
{\epsilon}nd{document}
|
\betaegin{document}
\deltaefinecolor{background-color}{gray}{0.98}
\nuewcommand{\betaegin{eqnarray}}{\betaegin{eqnarray}}
\nuewcommand{\end{eqnarray}}{\end{eqnarray}}
\nuewcommand{\betaegin{subequations}}{\betaegin{subequations}}
\nuewcommand{\end{subequations}}{\end{subequations}}
\nuewtheorem{dfn}{Definition}[section]
\nuewtheorem{ex}{Example}[section]
\nuewtheorem{subex}{Example}[subsection]
\nuewtheorem{cl}{Corrolary}[section]
\nuewtheorem{propo}{Proposition}[section]
\nuewtheorem{theorem}{Theorem}[section]
\nuewcommand{\betaegin{document}}{\betaegin{document}}
\nuewcommand{\end{document}}{\end{document}}
\nuewcommand{\betaegin{center}}{\betaegin{center}}
\nuewcommand{\end{center}}{\end{center}}
\nuewcommand{\betaegin{flushright}}{\betaegin{flushright}}
\nuewcommand{\end{flushright}}{\end{flushright}}
\nuewcommand{\left}{\left}
\nuewcommand{\right}{\right}
\nuewcommand{\vspace}{\vspacepace}
\nuewcommand{\hspace}{\hspacepace}
\nuewcommand{\betaegin{equation}}{\betaegin{equation}}
\nuewcommand{\end{equation}}{\end{equation}}
\nuewcommand{\linebreak}{\linebreak}
\nuewcommand{\piagebreak}{\piagebreak}
\nuewcommand{\muakebox}{\muakebox}
\nuewcommand{\fracramebox}{\fracramebox}
\nuewcommand{\muulticolumn}{\muulticolumn}
\nuewcommand{\betaegin{enumerate}}{\betaegin{enumerate}}
\nuewcommand{\end{enumerate}}{\end{enumerate}}
\nuewcommand{\betaegin{itemize}}{\betaegin{itemize}}
\nuewcommand{\end{itemize}}{\end{itemize}}
\nuewcommand{\overline}{\overline}
\nuewcommand{\underline}{\underlinederline}
\nuewcommand{\lefteqn}{\lefteqn}
\nuewcommand{\betaegin{array}}{\betaegin{array}}
\nuewcommand{\end{array}}{\end{array}}
\nuewcommand{\betaegin{equation}a}{\betaegin{eqnarray}}
\nuewcommand{\end{equation}a}{\end{eqnarray}}
\nuewcommand{\betaegin{equation}as}{\betaegin{eqnarray*}}
\nuewcommand{\end{equation}as}{\end{eqnarray*}}
\nuewcommand{\betaegin{figure}}{\betaegin{figure}}
\nuewcommand{\end{figure}}{\end{figure}}
\nuewcommand{\betaegin{document}s}{\betaegin{displaymath}}
\nuewcommand{\end{document}s}{\end{displaymath}}
\nuewcommand{\betaegin{tabbing}}{\betaegin{tabbing}}
\nuewcommand{\end{tabbing}}{\end{tabbing}}
\nuewcommand{\piarallel}{\piarallelllel}
\nuewcommand{\piartial}{\piartial}
\nuewcommand{\nuonumber}{\nuonumber}
\nuewcommand{\leftarrow}{\leftarrow}
\nuewcommand{\rightarrow}{\rightarrow}
\nuewcommand{\longleftarrow}{\longleftarrow}
\nuewcommand{\longrightarrow}{\longrightarrow}
\nuewcommand{I\!\!Rightarrow}{I\!\!Rightarrow}
\nuewcommand{\Leftrightarrow}{\Leftrightarrow}
\nuewcommand{\Longleftarrow}{\Longleftarrow}
\nuewcommand{\Longrightarrow}{\Longrightarrow}
\nuewcommand{\leftarrown}{\leftarrowngle}
\nuewcommand{\rightarrown}{\rightarrowngle}
\renewcommand{\alpha}{\alphalpha}
\renewcommand{\beta}{\betaeta}
\nuewcommand{\gamma}{\gammaamma}
\nuewcommand{\Gamma}{\Gammaamma}
\renewcommand{\delta}{\deltaelta}
\nuewcommand{\epsilon}{\epsilonilon}
\nuewcommand{\Theta}{\Thetaeta}
\nuewcommand{\sigma}{\sigmaigma}
\nuewcommand{\leftarrowm}{\leftarrowmbda}
\nuewcommand{\Delta}{\Deltaelta}
\nuewcommand{\deltaisplaystyle}{\deltaisplaystyle}
\nuewcommand{E}{E}
\nuewcommand{\pirime}{\pirimeime}
\nuewcommand{\rho}{\rho}
\nuewcommand{\nuabla}{\nuablala}
\nuewcommand{\mu}{\muu}
\nuewcommand{\nu}{\nuu}
\nuewcommand{\Sigma}{\Sigma}
\nuewcommand{\pi}{\pii}
\nuewcommand{I\!\!R}{I\!\!R}
\nuewcommand{\omega}{\omegaega}
\nuewcommand{\Omega}{\Omegaega}
\nuewcommand{\overrightarrow}{\overrightarrow}
\nuewcommand{\zeta}{\zetata}
\nuewcommand{\vartheta}{\varthetaheta}
\nuewcommand{\triangle}{\triangleangle}
\nuewcommand{\frac}{\fracrac}
\nuewcommand{\infty}{\infty}
\nuewcommand{\pirimeo}{\pirimeopto}
\renewcommand{\alpharraystretch}{1.25}
\betaegin{frontmatter}
\title{Ro-vibrational energy analysis of Manning-Rosen and P\"oschl-Teller potentials with a new improved approximation in the
centrifugal term}
\alphauthor[address1]{Debraj Nath}
\end{array}d{[email protected]}
\alphauthor[address2]{Amlan K.~Roy}
\end{array}d{[email protected]}
\alphaddress[address1]{Department of Mathematics, Vivekananda College, Kolkata-700063, WB, India.}
\alphaddress[address2]{Department of Chemical Sciences, Indian Institute of Science Education and Research (IISER) Kolkata, Mohanpur-741246,
Nadia, WB, India.}
\betaegin{abstract}
Two physically important potentials (Manning-Rosen and P\"oschl-Teller) are considered for the ro-vibrational energy in diatomic molecules.
An improved new approximation is invoked for the centrifugal term, which is then used for their solution within the Nikiforov-Uvarov framework.
This employs a recently proposed scheme, which combines the two widely used Greene-Aldrich and Pekeris-type approximations. Thus, approximate
analytical expressions are derived for eigenvalues and eigenfunctions. The energies are examined with respect to two approximation parameters,
$\leftarrowm$ and $\nuu$. The original approximations are recovered for certain specials values of these two parameters. This offers a simple effective
scheme for these and other relevant potentials in quantum mechanics.
\\ \\
\end{abstract}
\betaegin{keyword}
Manning-Rosen potential, P\"oschl-Teller potential, Nikiforov-Uvarov method, Greene-Aldrich approximation, Pekeris approximation,
ro-vibrational energy.
\end{keyword}
\end{frontmatter}
\sigmaection{Introduction}
The construction of a general, universal potential energy function for molecules has been at the forefront of research
activity in chemical, molecular and solid-state physics, as it carries the relevant information for a given molecule.
They are needed as input in various areas including spectroscopy, molecule-molecule collision, molecular simulation,
dynamical situation, thermodynamic properties etc.
There are non-trivial challenges faced towards the development of energy-distance relationship, due to which, its general
form still remains elusive. The publication of famous exponential Morse potential about 90 years ago, has inspired a broad
range of works along this direction having varying degrees of sophistication, accuracy and flexibility. A vast number of
such functions have been generated over the years by a number of researchers. Generally speaking, as the number of parameters
in the analytic potential function increases, the greater it fits with the experimental data.
The exponential Manning-Rosen (MR) potential \cite{Manning.Rosen},
\betaegin{equation}\leftarrowbel{pot.MR}
V=V_{MR}(r)=\frac{\hbar^2}{2\muu b^2} \left[\frac{\alpha(\alpha-1)e^{-2r/b}}{(1-e^{-r/b})^2}-\frac{Ae^{-r/b}}{1-e^{-r/b}}\right],
\end{equation}
has played the role of an important mathematical model in the context of molecular vibration and rotation. It has relevance
to a multitude of bound and resonance-state problems in physics. This expression contains two dimensionless parameters $A$
(signifying the strength) and $\alphalpha$ (constant), whereas the screening parameter $b$ (having a dimension of length) is
connected to the range of potential. Some interesting properties include (i) it is invariant under transformation $\alphalpha
\leftrightarrow \alphalpha -1$ (ii) this reduces to the celebrated short-range Hulth\'en potential for $\alphalpha=0$ or 1 (iii) a
relative minimum occurs for $r=b \ln \left[ 1+ \fracrac{2 \alphalpha (\alphalpha -1)}{A} \right]$, when $\alphalpha > 1$ and $A > 0$.
The Schr\"odinger equation for the so-called $s$ (non-zero $\ell$) states can be obtained \emph{exactly}, by a number of
elegant and attractive formalisms, such as direct factorization method, Feynman path-integral formalism, standard
function analysis leading to wave functions expressed in terms of hyper-geometric functions, a tridiagonal matrix
representation \cite{diaf2005,MR, chen2007}, etc. However, the same for $\ell \nueq 0$ states are yet to be found in closed
analytic form, giving rise to a number of notable approximate schemes. Thus, arbitrary $\ell$ states were calculated by
means of an approximation of centrifugal term: $\fracrac{1}{r^2} \alphapprox \fracrac{1}{b^2} \fracrac{e^{-r/b}}{(1-e^{-r/b})^2}$ in
short range \cite{qiang2007}, similar along the lines of familiar Pekeris scheme. In the literature, other methods
are also available, such as, a super-symmetric shape invariance formalism along with a function analysis \cite{chen2009},
$1/r^2$ approximated by a term having 3 adjustable parameters \cite{MR3}, Nikiforov-Uvarov (NU) method with
an approximation of the $1/r^2$ term \cite{MR4.SMIkhdair}, path-integral formalism approached by a Duru-Kleinert method
\cite{diaf2011}. In \cite{hady2011}, the author used Laguerre and oscillator bases for tridiagonalization of relevant
Hamiltonian and a Gauss quadrature to calculate potential matrix elements. Other significant works are: J-matrix method
\cite{nasser2013} with a proper expansion for $1/r^2$, numerical integrating procedure in MATHEMATICA \cite{lucha1999},
generalized pseudospectral method \cite{MR9}, etc. While most works focused on bound states, scattering states
\cite{chen2007,nasser2013} were also considered. Moreover, properties such as oscillator strengths, multipole moments,
transition probabilities for certain states were reported in the literature \cite{nasser2013}. It has been probed in
higher dimension \cite{gu2011}.
Another important diatomic molecular potential is the P\"oschl-Teller (PT) \cite{Poschl.Teller} like model, given by,
\betaegin{equation}\leftarrowbel{pot.PT}
V=V_{PT}(r)=\frac{\xi_1-\xi_2\cosh(\alpha r)}{\sigmainh^2(\alpha r)},
\end{equation}
where $r$ denotes the internuclear distance, $\xi_1, \xi_2$ are two parameters controlling the potential well, and $\alphalpha$
governs the range of potential. Its bound states for arbitrary quantum numbers were expressed in terms of hyper-geometric
functions $_2 F_1 (a,b,c; z)$, by employing an approximation to $1/r^2$ \cite{PT}. One of its variants, the so-called
Scarf potential was analyzed by expanding the $1/r^2$ term around the minimum equilibrium point \cite{qiang2010}.
A comparison of Pekeris- as well as Greene-Aldrich-like approximations for the centrifugal term, in the context of ro-vibrational states,
have been presented lately \cite{PT2.epjp2020, horchani2020}. The scattering states have found discussion \cite{pekeris.ap2, you2013} as well.
Thus it appears that there is a preponderance of approaches which involve a well-designed approximation for the centrifugal term. Broadly
speaking, two major routes have gained popularity over the years, namely the Pekeris- \cite{Pekeris,pekeris.badawi,pekeris.ap3,pekeris.ap4}
and Greene-Aldrich-type
approximations, along with their several variations \cite{greene}. The primary objective of this work is to
present an extension/modification of such schemes for the two potentials mentioned in Eqs.~\ref{pot.MR} and \ref{pot.PT}.
The new proposed scheme has been recently applied successfully for energy and thermodynamic analysis of Deng-Fan molecular potential \cite{nath2021}.
Towards this end, we will express the centrifugal term as a linear combination of two approximations which turns out (from the future
discussion) to offer an accurate alternative approximation to the centrifugal term, near the origin ($r=0$) and any other point ($r=r_0$).
This new intuitive approximation is succinctly discussed as functions of $r$, $r_0$ and two approximating parameters, namely, $\leftarrowm$, $\nuu$.
The corresponding energy spectra generated from our current approximation are then critically compared with the available established
approximations for the centrifugal term, for some representative states. This is done for both MR and PT potentials.
The article is organized as follows. Section \ref{Sec.Exac}, at first, provides the formal solutions of Schr\"odinger equation within
the NU method. The eigenvalues and eigenfunctions for MR and PT within the various approximations of centrifugal term, are derived
in Secs.~\ref{Sec.Examples1} and \ref{Sec.Examples2} respectively. The associated energy spectra are analyzed in Sec.~\ref{Sec.Results}.
Moreover, the effect of the approximation parameters $\leftarrowm$ and $\nuu$ on the energy spectrum of MR and PT potentials are shown in
Finally a few comments are made in Sec.~\ref{Sec.Conclusion}.
\sigmaection{Exact solution of the Schr\"odinger equation by Nikiforov-Uvarov Method}\leftarrowbel{Sec.Exac}
Let us consider the Schr\"odinger equation of a diatomic molecule,
\betaegin{equation}\leftarrowbel{Schro.Eq}
-\frac{\hbar^2}{2\muu}\nuablala^2\pisi+V({\betaf r})\pisi=E\pisi,
\end{equation}
in presence of MR ($V({\betaf r}) = V_{MR}(r))$ or PT ($V({\betaf r}) = V_{PT}(r))$ potential of Eq.~(\ref{pot.MR}) and
Eq.~(\ref{pot.PT}) respectively. Here,
\betaegin{equation}
\nuablala^2=\frac{1}{r^2}\frac{\piartial}{\piartial r}\left(r^2\frac{\piartial}{\piartial r}\right)+\frac{1}{r^2\sigmain\theta}\frac{\piartial}{\piartial \theta}\left(\sigmain\theta\frac{\piartial}{\piartial \theta}\right)+\frac{1}{r^2\sigmain^2\theta}\frac{\piartial^2}{\piartial \pihi^2},
\end{equation}
and $\muu$ is the reduced mass of a diatomic molecule, $r$ is the internuclear distance. The MR potential, $V_{MR}$ has a minimum at
$r=b\ln\left[1+\frac{2\alpha(\alpha-1)}{A}\right]$, whereas the same occurs at $r=\frac{1}{\alpha}\tanh^{-1}\left(\sigmaqrt{\frac{2a}{b}}\right)$ in case of
$V_{PT}$. Let
\betaegin{equation}
\pisi({\betaf r})=\frac{R(r)}{r}Y_l^m(\theta)\,e^{im\pihi},
\end{equation}
be the solution of Eq.~(\ref{Schro.Eq}). Then one obtains,
\betaegin{equation}\leftarrowbel{Eq.R}
\frac{d^2R}{dr^2}+\left[\frac{2\muu}{\hbar^2}E-\frac{2\muu }{\hbar^2}V(r)-\frac{l(l+1)}{r^2}\right]R=0,
\end{equation}
and
\betaegin{equation}\leftarrowbel{Ylm}
Y_{l}^{m}=\left[\frac{(2l+1)(l-|m|)!}{4\pii(l+|m|)!}\right]^{\frac{1}{2}}\,P_{l}^{|m|}(\cos\theta)\,e^{im\pihi},
\end{equation}
where $l(l+1)$ is the separation constant. To find the general solution of resulting radial Schr\"odinger equation obtained in
Eq.~(\ref{Eq.R}), we will use a transformation $s=e^{-r/b}$ for MR and $s=\tanh^2(\alpha r/2)$ for PT potentials respectively. Moreover,
we have considered two set of bases, \emph{viz.},
(i) $\left\{1,\frac{e^{-r/b}}{1-e^{-r/b}},\frac{e^{-2r/b}}{(1-e^{-r/b})^2}\right\}$, for MR and
(ii) $\left\{1,\frac{1}{\sigmainh^2\alpha r},\frac{\cosh \alpha r}{\sigmainh^2\alpha r}\right\}$, for PT potential, for which the centrifugal term is approximated
in different forms, such as Greene-Aldrich \cite{greene} and Pekeris-type \cite{Pekeris}. Then the Schr\"odinger equation becomes a second-order
differential equation of the form given below,
\betaegin{equation}\leftarrowbel{Eq.NU.gen}
\frac{d^2R}{ds^2}+\frac{\widetilde{\tau}(s)}{\sigmaigma(s)}\frac{dR}{ds}+\frac{\widetilde{\sigmaigma}(s)}{\sigmaigma^2(s)}R=0,
\end{equation}
where $\widetilde{\tau}(s)$, $\sigmaigma(s)$, $\widetilde{\sigmaigma}(s)$ are polynomials in $s$ of degree one, two and two respectively.
If we let,
\betaegin{equation}
R(s)=\pihi_1(s)\pihi_2(s),
\end{equation}
be the solution of Eq.~(\ref{Eq.NU.gen}), then we obtain \cite{NU},
\betaegin{equation}\leftarrowbel{Eq.y}
\sigmaigma(s)\pihi_2^{''}(s)+\tau(s)\pihi'_2(s)+\nuu\, \pihi_2(s)=0,
\end{equation}
and
\betaegin{equation}
\betaegin{array}{l}
\pihi_1(s)=\deltaisplaystyle e^{\int\frac{\pii(s)}{\sigmaigma(s)}ds},
\end{array}
\end{equation}
where
\betaegin{equation}
\betaegin{array}{ll}\leftarrowbel{pi.sigma}
\pii(s)&=\frac{\sigmaigma'(s)-\widetilde{\tau}(s)}{2}\pim\sigmaqrt{\left(\frac{\sigmaigma'(s)-\widetilde{\tau}(s)}{2}\right)^2-\widetilde{\sigmaigma}(s)+k\,\sigmaigma(s)},\\
\tau(s)&=\widetilde{\tau}(s)+2\pii(s),~
\betaegin{array}r{\nuu}=k+\pii'(s),
\end{array}
\end{equation}
$\betaegin{array}r{\nuu}$ and $k$ are real constants. Since $\pii(s)$ is a polynomial in $s$, we have to find $k$ in such a way that, $\left(\frac{\sigmaigma'(s)-\widetilde{\tau}(s)}{2}\right)^2-\widetilde{\sigmaigma}(s)+k\,\sigmaigma(s)$ is a square of a polynomial in $s$. Then the solutions of
Eq.~(\ref{Eq.y}) are given by,
\betaegin{equation}
\pihi_{2,n_r}(s)=\frac{1}{\rho(s)}\frac{d^{n_r}}{ds^{n_r}}\left[\sigmaigma^{n_r}(s)\rho(s)\right],
\end{equation}
and eigenvalues are obtained as,
\betaegin{equation}\leftarrowbel{eigen.nu}
\nuu_{n_r}=-{n_r}\,\tau'(s)-\frac{{n_r}({n_r}-1)}{2}\sigmaigma^{''}(s),~{n_r}=0,1,2,\deltaots,
\end{equation}
where
\betaegin{equation}
\betaegin{array}{l}
\rho(s)=\deltaisplaystyle \left[\sigmaigma(s)\right]^{-1}\,e^{\int\frac{\tau(s)}{\sigmaigma(s)}ds}.
\end{array}
\end{equation}
\sigmaection{NU method for MR potential}\leftarrowbel{Sec.Examples1}
\sigmaubsection{Approximation 1: Greene-Aldrich-type}
Following \cite{qiang2007,chen2009,MR4.SMIkhdair,greene,wei2008,MR7.nc,MR8,qiang2009,MR10.qdot,MR11.thermal,MR12.scattering}, at first, let us consider,
\betaegin{equation}
\betaegin{array}{ll}\leftarrowbel{greene.app1}
\frac{1}{r^2}&\alphapprox f_1(r)=\frac{1}{b^2}\left(x_{11}+\frac{x_{21}e^{-r/b}}{1-e^{-r/b}}+\frac{x_{31}e^{-2r/b}}{(1-e^{-r/b})^2}\right),
\end{array}
\end{equation}
where
\betaegin{equation}
x_{11}= \frac{1}{12},x_{21}=1,x_{31}=1,
\end{equation}
when $ r/b\ll 1$. In the limiting case, $\lim\limits_{b\rightarrow \infty}\frac{1}{b^2}\left(\frac{1}{12}+\frac{s}{1-s}+\frac{s^2}{(1-s)^2}\right)=\frac{1}{r^2}$. Therefore, for a fixed $b$, Eq.~(\ref{greene.app1}) is a good approximation near $r=0$. It is to be mentioned that, for $x_{11}=0,x_{21}=x_{31}=1$, this approximation corresponds to that of \cite{qiang2007,wei2008}, whereas for $x_{11}=0,x_{21}=e^{1/b},x_{31}=1$, it refers to \cite{MR8,qiang2009}.
\sigmaubsection{Approximation 2: Pekeris-type}
In another attempt, $\frac{1}{r^2}$ can be expressed \cite{Pekeris,MR3} around $r=r_0$, as,
\betaegin{equation}\leftarrowbel{pekeris.app2}
\frac{1}{r^2}\alphapprox f_2(r)=\frac{1}{b^2}\left(x_{12}+\frac{x_{22}e^{-r/b}}{1-e^{-r/b}}+\frac{x_{32}e^{-2r/b}}{(1-e^{-r/b})^2}\right),
\end{equation}
where
\betaegin{equation}
\betaegin{array}{ll}
x_{12}&=\deltaisplaystyle\frac{3-3u+u^2+\left(2u-6\right)s_0+\left(u+3\right)s_0^2}{u^4},\\
x_{22}&=\deltaisplaystyle\frac{2\left(1-s_0\right)^2}{u^4}\left(3+u+\frac{2u-3}{s_0}\right),\\
x_{32}&=-\deltaisplaystyle\frac{\left(1-s_0\right)^3}{u^4}\left(\frac{3+u}{s_0}+\frac{u-3}{s_0^2}\right),\\
s_0&=e^{-u},~u=\frac{r_0}{b}.
\end{array}
\end{equation}
In particular, if $r_0=b\ln\left[1+\frac{2\alpha(\alpha-1)}{A}\right]$, then the potential value $V_{MR}(r_0)$ is minimum, which is equal
to $-\frac{A^2\hbar^2}{8\muu b^2\alpha(\alpha-1)}$ for $A>0$ and $\alpha\in(-\infty,0)\cup(1,\infty)$.
\sigmaubsection{Approximation 3: Pekeris-type}
In \cite{shd.pla.2008,MR4.SMIkhdair}, the authors considered an approximation of the following form,
\betaegin{equation}\leftarrowbel{approx.app3}
\frac{1}{r^2}\alphapprox f_3(r)=\frac{1}{b^2}\left(x_{13}+\frac{x_{23}e^{-r/b}}{1-e^{-r/b}}+\frac{x_{33}e^{-2r/b}}{\left(1-e^{-r/b}\right)^2}\right),
\end{equation}
where
\betaegin{equation}
\betaegin{array}{ll}
x_{13}&=\frac{12\epsilonilon_1^2-4\epsilonilon_1(2A+3\epsilonilon_1)\log(\epsilonilon_2)+\epsilonilon_3^2\log(\epsilonilon_2)^2}{\epsilonilon_4^2\log(\epsilonilon_2)^4},\\
x_{23}&=\frac{8\epsilonilon_1^2\left[-6\epsilonilon_1+(3A+4\epsilonilon_1)\log(\epsilonilon_2)\right]}{A\epsilonilon_4^2\log(\epsilonilon_2)^4},\\
x_{33}&=-\frac{16\epsilonilon_1^3\left[-3\epsilonilon_1+\epsilonilon_3\log(\epsilonilon_2)\right]}{A^2\epsilonilon_4^2\log(\epsilonilon_2)^4},\\
\epsilonilon_1&=\alpha(\alpha-1),~\epsilonilon_2=1+\frac{2\alpha(\alpha-1)}{A},\\
\epsilonilon_3&= A\epsilonilon_2,~
\epsilonilon_4=b\epsilonilon_3.
\end{array}
\end{equation}
\sigmaubsection{Approximation 4: A linear combination of Greene-Aldrich and Pekeris-type}
Now, we propose a new approximation of the form \cite{nath2021},
\betaegin{equation}\leftarrowbel{app.convex}
\frac{1}{r^2}\alphapprox f_4(r,\leftarrowm,\nuu)=\frac{1}{b^2}\left(x_1+\frac{x_2e^{-r/b}}{1-e^{-r/b}}+\frac{x_3e^{-2r/b}}{(1-e^{-r/b})^2}\right),
\end{equation}
where
\betaegin{equation}
\betaegin{array}{l}
\left(\betaegin{array}{l}x_1\\x_2\\x_3\end{array}\right)=\left(\betaegin{array}{lll}x_{11} & x_{12}&x_{13}\\x_{21} & x_{22}&x_{23}\\x_{31} & x_{32}&x_{33}\end{array} \right)\left(\betaegin{array}{l}\nuu\leftarrowm\\\nuu(1-\leftarrowm)\\1-\nuu\end{array}\right),
\end{array}
\end{equation}
and $\leftarrowm,\nuu$ are dimensionless real constants. In Fig.~\ref{Fig.1approximation.MR}, the centrifugal term under different
approximations defined in Eqs.~(\ref{greene.app1}), (\ref{pekeris.app2}), (\ref{approx.app3}) and (\ref{app.convex}), is displayed. From this figure,
it is clear that, Eq.~(\ref{app.convex}) gives the best approximation amongst all. If $(\leftarrowm,\nuu)=(1,1)$, then Eq.~(\ref{app.convex})
implies Eq.~(\ref{greene.app1}); if $(\leftarrowm,\nuu)=(0,1)$, then it leads to Eq.~(\ref{pekeris.app2}); if $(\leftarrowm,\nuu)=(\leftarrowm,0)$, then it
implies Eq.~(\ref{approx.app3}). The approximation is suitable for negative values of $\leftarrowm$ for large $r$. Furthermore, it is
applicable for any values of $r$.
\sigmaubsection{Solution of the MR potential}
Now, under the transformation, $s=e^{-r/b}$, Eq.(\ref{Eq.R}) becomes,
\betaegin{equation}
\betaegin{array}{l}\leftarrowbel{Eq.R.NU.MR}
\frac{d^2R}{ds^2}+\frac{1-s}{s(1-s)}\frac{dR}{ds}+\frac{-A_1s^2+Bs-C}{s^2(1-s)^2}R=0,
\end{array}
\end{equation}
where
\betaegin{equation}
\betaegin{array}{ll}
A_1 &=\betaegin{array}r{Epsilon}^2+A+\alpha(\alpha-1)+l(l+1)(x_3-x_2),\\
B& =2\betaegin{array}r{Epsilon}^2+A-l(l+1) x_2,\\
C& =\betaegin{array}r{Epsilon}^2=l(l+1) x_1-Epsilon,
Epsilon=\frac{2\muu b^2E}{\hbar^2}.
\end{array}
\end{equation}
According to NU method, we obtain a pair of $k$, as given by \cite{ikhdair2008},
\betaegin{equation}
\betaegin{array}{ll}\leftarrowbel{kpitaunu.MR}
k_{\pim}=A-l(l+1) x_2\pim\betaegin{array}r{Epsilon}\sigmaqrt{(2\alpha-1)^2+4l(l+1)x_3}.
\end{array}
\end{equation}
Applying Eq.~(\ref{kpitaunu.MR}) in Eq.~(\ref{pi.sigma}), we obtain,
\betaegin{equation}
\betaegin{array}{ll}\leftarrowbel{pi.tau.nu.MR}
\pii(s)
&=-\frac{s}{2}\pim\left\{\betaegin{array}{lll}\left(\betaegin{array}r{Epsilon}-\frac{1}{2}\sigmaqrt{(2\alpha-1)^2+4l(l+1) x_3}\right)s+\betaegin{array}r{Epsilon},&k=k_+,& k_+-B>0\\\left(\betaegin{array}r{Epsilon}+\frac{1}{2}\sigmaqrt{(2\alpha-1)^2+4l(l+1) x_3}\right)s+\betaegin{array}r{Epsilon},&k=k_-,& k_--B>0\\
\left(\betaegin{array}r{Epsilon}-\frac{1}{2}\sigmaqrt{(2\alpha-1)^2+4l(l+1) x_3}\right)s-\betaegin{array}r{Epsilon},&k=k_+,& k_+-B<0\\\left(\betaegin{array}r{Epsilon}+\frac{1}{2}\sigmaqrt{(2\alpha-1)^2+4l(l+1) x_3}\right)s-\betaegin{array}r{Epsilon},&k=k_-,& k_--B<0\end{array} \right\}.
\end{array}
\end{equation}
For the potential in Eq.~(\ref{pot.MR}), we have chosen $k=k_-$ with $k_--B<0$, and selected,
\betaegin{equation}\leftarrowbel{rho.phi}
\pii(s)=-\frac{s}{2}-\left(\betaegin{array}r{Epsilon}+\frac{1}{2}\sigmaqrt{(2\alpha-1)^2+4l(l+1) x_3}\right)s+\betaegin{array}r{Epsilon}.
\end{equation}
This gives,
\betaegin{equation}
\betaegin{array}{l}
\rho(s)=s^{2\betaegin{array}r{Epsilon}}(1-s)^{2L-1},\\
\pihi_1(s)=s^{\betaegin{array}r{Epsilon}}(1-s)^L,
\end{array}
\end{equation}
and
\betaegin{equation}
\betaegin{array}{ll}
\pihi_{2,n_r}&=({n_r})!\,P_{n_r}^{(2\betaegin{array}r{Epsilon},2L-1)}(1-2s),\\
&=\frac{\Gamma\left(n_r+2\betaegin{array}r{Epsilon}+1\right)}{\Gamma\left(2\betaegin{array}r{Epsilon}+1\right)}{}_2F_1\left(-n_r,n_r+2\betaegin{array}r{Epsilon}+2L;2\betaegin{array}r{Epsilon}+1;s\right),
\end{array}
\end{equation}
where
\betaegin{equation}
L=\frac{1}{2}+\frac{1}{2}\sigmaqrt{(2\alpha-1)^2+4l(l+1) x_3}.
\end{equation}
The eigenvalues, $E_{n_r,l}^{MR}$, will then be generated from the following relation,
\betaegin{equation}\leftarrowbel{deri.tau}
k+(2n_r+1)\pii'(s)=n_r^2.
\end{equation}
Finally, we obtain the ro-vibrational energy spectrum of MR potential, as a function of $(\hbar,\muu),(\alpha,b),(\leftarrowm,\nuu)$, as given below,
\betaegin{equation}\leftarrowbel{Energy.MR}
\betaegin{array}{ll}
E_{n_r,l}^{MR}
&=x_5-\frac{\hbar^2}{2\muu b^2}\left(\frac{x_4^2}{(n_r+L)^2}+\frac{(n_r+L)^2}{4}\right),
\end{array}
\end{equation}
where
\betaegin{equation}
\betaegin{array}{ll}
x_4=\frac{1}{2}\left[A+\alpha(\alpha-1)+l(l+1)(x_3-x_2)\right],x_5=\frac{\hbar^2}{2\muu b^2}(l(l+1) x_1+x_4).
\end{array}
\end{equation}
Therefore, the radial wave function of MR potential can be expressed as \cite{Gradshteyn},
\betaegin{equation}
R_{n_r}^{MR}(r)=N_{{n_r},l}\,s^{\betaegin{array}r{Epsilon}}(1-s)^{L}\,{}_2F_1\left(-{n_r},{n_r}+2\betaegin{array}r{Epsilon}+2L;2\betaegin{array}r{Epsilon}+1;s\right),
\end{equation}
where
\betaegin{equation}
N_{{n_r},l}=\left[\frac{2\betaegin{array}r{Epsilon}(n_r+\betaegin{array}r{Epsilon}+L)\Gamma(n_r+2\betaegin{array}r{Epsilon}+1)\Gamma(n_r+2\betaegin{array}r{Epsilon}+2L)}{({n_r})!b(n_r+\betaegin{array}r{Epsilon})\Gamma(n_r+2L)\left[\Gamma(2\betaegin{array}r{Epsilon}+1)\right]^2}\right]^{\frac{1}{2}},
\end{equation}
is the normalization constant, to be obtained from,
\betaegin{equation}
\deltaisplaystyle\int|\pisi_{n_r,l,m}^{MR}({\betaf r})|^2\,d{\betaf r}=1,~ d{\betaf r}=r^2\sigmain\theta\,dr\,d\theta\,d\pihi.
\end{equation}
Finally, the explicit form of eigenfunctions of MR potential can be written as,
\betaegin{equation}
\pisi_{n_r,l,m}^{MR}({\betaf r})=\left[\frac{(2l+1)(l-|m|)!}{4\pii(l+|m|)!}\right]^{\frac{1}{2}}\frac{N_{{n_r},l}}{r}\,s^{\betaegin{array}r{Epsilon}}(1-s)^{L}\,{}_2F_1\left(-{n_r},{n_r}+2\betaegin{array}r{Epsilon}+2L;2\betaegin{array}r{Epsilon}+1;s\right)\,P_{l}^m(\cos\theta)\,e^{im\pihi},~s=e^{-r/b}.
\end{equation}
It is to be noted that the approximation (\ref{app.convex}) is well defined {\betaf for $\leftarrowm, \nuu$ satisfying,}
\betaegin{equation}
\betaegin{array}{l}\leftarrowbel{relation1}
(2\alpha-1)^2+4l(l+1)\left(x_{31}\nuu\leftarrowm+x_{32}\nuu(1-\leftarrowm)+x_{33}(1-\nuu)\right)>0,\\
x_{11}\nuu\leftarrowm+x_{12}\nuu(1-\leftarrowm)+x_{13}(1-\nuu)>0.
\end{array}
\end{equation}
\sigmaection{NU method for PT potential}\leftarrowbel{Sec.Examples2}
\sigmaubsection{Approximation 1: Greene-Aldrich type}
At first, we consider an approximation \cite{shd.ijmpa2008} of the form,
\betaegin{equation}\leftarrowbel{ap1}
\frac{1}{r^2}\alphapprox
f_1(r)=\alpha^2\left(x_{11}+\frac{x_{21}}{\sigmainh^2\alpha r}+\frac{x_{31}\cosh\alpha r}{\sigmainh^2\alpha r}\right),
\end{equation}
where
\betaegin{equation}
x_{11}=0,x_{21}=\frac{1}{2},x_{31}=\frac{1}{2}.
\end{equation}
\sigmaubsection{Approximation 2: Greene-Aldrich type}
Next let us examine another Greene-type approximation due to \cite{greene,PT2.epjp2020},
\betaegin{equation}\leftarrowbel{ap2}
\frac{1}{r^2}\alphapprox f_2(r)=
\alpha^2\left(x_{12}+\frac{x_{22}}{\sigmainh^2\alpha r}+\frac{x_{32}\cosh\alpha r}{\sigmainh^2\alpha r}\right),
\end{equation}
where
\betaegin{equation}
x_{12}=\frac{1}{12},x_{22}=\frac{1}{2},x_{32}=\frac{1}{2}.
\end{equation}
\sigmaubsection{Approximation 3: Pekeris-type}
The Pekeris-type approximation, around $r=r_0$ is given by \cite{PT2.epjp2020,Pekeris,pekeris.badawi,pekeris.ap2,pekeris.ap3,pekeris.ap4},
\betaegin{equation}\leftarrowbel{ap3}
\frac{1}{r^2}\alphapprox f_3(r)=
\alpha^2\left(x_{13}+\frac{x_{23}}{\sigmainh^2\alpha r}+\frac{x_{33}\cosh\alpha r}{\sigmainh^2\alpha r}\right),
\end{equation}
where
\betaegin{equation}
\betaegin{array}{ll}
x_{13}=\deltaisplaystyle\frac{1}{u^4}\left(3+u^2-3u\coth u\right),\\
x_{23}=\deltaisplaystyle\frac{1}{4u^4}\left(18+6\cosh(2u)-23u\coth u-u\cosh(3u) \ \muathrm{cosech} \ u\right),\\
x_{33}=\deltaisplaystyle\frac{1}{u^4}\left(4u+2u\cosh(2u)-3\sigmainh(2u)\right) \ \muathrm{cosech} \ u,\\
u=\alpha r_0.
\end{array}
\end{equation}
In particular, if $r_0=\frac{1}{\alpha}\tanh^{-1}\left(\sigmaqrt{\frac{2\sigmaqrt{\xi_1^2-\xi_2^2}}{\xi_1+\sigmaqrt{\xi_1^2-\xi_2^2}}}\right)$, then the potential has a
minimum at $r=r_0$.
\sigmaubsection{Approximation 4: A linear combination of Greene-Aldrich and Pekeris-type}
Now let us discuss a linear combination of three approximations in Eqs.~(\ref{ap1}), (\ref{ap2}), (\ref{ap3}), proposed lately \cite{nath2021},
\betaegin{equation}\leftarrowbel{ap4}
\frac{1}{r^2}\alphapprox f_4(r,\leftarrowm,\nuu)=
\alpha^2\left(x_{1}+\frac{x_{2}}{\sigmainh^2\alpha r}+\frac{x_{3}\cosh\alpha r}{\sigmainh^2\alpha r}\right),
\end{equation}
where
\betaegin{equation}
\left(\betaegin{array}{c}x_1\\x_2\\x_3\end{array}\right)=\left(\betaegin{array}{ccc}x_{11} & x_{12} & x_{13}\\
x_{21} & x_{22} & x_{23}\\
x_{31} & x_{32} & x_{33}\end{array}\right)\left(\betaegin{array}{l}\nuu\leftarrowm\\\nuu(1-\leftarrowm)\\1-\nuu\end{array}\right),
\end{equation}
and $\leftarrowm,\nuu$ are real constants. In Fig.~\ref{Fig.2approximation.PT}, the centrifugal term under different approximations, defined in
Eqs.~(\ref{ap1}), (\ref{ap2}),
(\ref{ap3}) and (\ref{ap4}) are displayed. It is quite clear that, the approximation of Eq.~(\ref{ap4}) is the best of all. When $(\leftarrowm,\nuu)=(1,1)$,
Eq.~(\ref{ap4}) implies Eq.~(\ref{ap1}); $(\leftarrowm,\nuu)=(0,1)$ means (\ref{ap2}); $(\leftarrowm,\nuu)=(\leftarrowm,0)$, leads to (\ref{ap3}). It may be noted that,
Eq.~(\ref{ap4}) is good for negative $\nuu$ in the large-$r$ region. The advantage is that, for any arbitrary values of $r$, this approximation is
easily seen to provide the best representation amongst all, for select values of $\leftarrowm$ and $\nuu$.
\sigmaubsection{Solution of the PT potential}
Under the transformation, $s=\tanh^2\frac{\alpha r}{2}$, the radial Schr\"odinger equation in (\ref{Eq.R}) becomes,
\betaegin{equation}
\betaegin{array}{l}\leftarrowbel{Eq.R.NU.PT}
\frac{d^2R}{ds^2}+\frac{1-3s}{s(1-s)}\frac{dR}{ds}+\frac{-As^2+Bs-C}{s^2(1-s)^2}R=0,
\end{array}
\end{equation}
where
\betaegin{equation}
\betaegin{array}{ll}
A &=\frac{\muu}{2\alpha^2\hbar^2}(\xi_1+\xi_2)+\frac{l(l+1)}{4}(x_3-x_2),\\
B &=\frac{2\muu}{\alpha^2\hbar^2}\left(E+\frac{\xi_1}{2}\right)+\frac{l(l+1)}{2}(x_2-2x_1),\\
C &=\frac{\muu}{2\alpha^2\hbar^2}(\xi_1-\xi_2)+\frac{l(l+1)}{4}(x_3+x_2). \\
\end{array}
\end{equation}
Similarly, we obtain a pair of $k$, which can be defined by,
\betaegin{equation}\leftarrowbel{kpitaunu}
k_{\pim}=B-2C\pim a\sigmaqrt{C},~a=\sigmaqrt{1+4(A-B+C)}.
\end{equation}
This produces,
\betaegin{equation}
\betaegin{array}{ll}\leftarrowbel{pi.tau.nu.PT}
\pii(s)&=\frac{s}{2}\pim\left\{\betaegin{array}{lll}\left|\sigmaqrt{C}-\frac{a}{2}\right|s+\sigmaqrt{C},&k=k_+,& k_+-B>0\\\left(\sigmaqrt{C}+\frac{a}{2}\right)s+\sigmaqrt{C},&k=k_-,& k_--B>0\\
\left|\sigmaqrt{C}-\frac{a}{2}\right|s-\sigmaqrt{C},&k=k_+,& k_+-B<0\\\left(\sigmaqrt{C}+\frac{a}{2}\right)s-\sigmaqrt{C},&k=k_-,& k_--B<0\end{array} \right\}.
\end{array}
\end{equation}
Since $\tau'(s)<0$, for PT potential, we choose $k=k_-=B-2C-a\sigmaqrt{C}$, where $k_--B<0$, and select $\pii(s)$ as,
\betaegin{equation}
\betaegin{array}{ll}
\pii(s)&=\left(\frac{1-a}{2}-\sigmaqrt{C}\right)s+\sigmaqrt{C}.
\end{array}
\end{equation}
Then, we find,
\betaegin{equation}
\betaegin{array}{l}\leftarrowbel{rho.phi.PT}
\tau(s)=1+2\sigmaqrt{C}-\left(2+a+2\sigmaqrt{C}\right)s,\\
\rho(s)=s^{2\sigmaqrt{C}}(1-s)^{a},\\
\pihi_1(s)=s^{\sigmaqrt{C}}(1-s)^{\frac{a-1}{2}},
\end{array}
\end{equation}
along with
\betaegin{equation}
\betaegin{array}{ll}
\pihi_{2,n_r}(s)&=s^{-2\sigmaqrt{C}}(1-s)^{-a}\frac{d^{n_r}}{ds^{n_r}}\left[s^{{n_r}+2\sigmaqrt{C}}(1-s)^{{n_r}+a}\right]=({n_r})!\,P_{n_r}^{(2\sigmaqrt{C},a)}(1-2s).
\end{array}
\end{equation}
Then the ro-vibrational energy $E_{n_r,l}^{PT}$ of PT potential can be obtained from the following relation,
\betaegin{equation}\leftarrowbel{deri.tau.PT}
k+(2{n_r}+1)\pii'(s)={n_r}^2+2{n_r}.
\end{equation}
Accordingly, one gets the ro-vibrational energy spectrum, as function of $(\hbar,\muu),(\xi_1,\xi_2,\alpha),(\leftarrowm,\nuu),(n_r,l)$, as below,
\betaegin{equation}\leftarrowbel{Energy.PT}
E_{n_r,l}^{PT}=x_4-\frac{\alpha^2\hbar^2}{2\muu}(n_r+L)^2,
\end{equation}
where
\betaegin{equation}
\betaegin{array}{ll}
x_4&=-\frac{\xi_1}{2}+\frac{l(l+1)\alpha^2\hbar^2}{4\muu}\left(2x_1-x_2\right)+\frac{\alpha^2\hbar^2}{2\muu}\left[\frac{1}{4}+A+C\right],\\
L&=\frac{1}{2}+\sigmaqrt{C}-\sigmaqrt{1+A}.
\end{array}
\end{equation}
Therefore, the explicit form of radial wave function can be found to be \cite{Gradshteyn},
\betaegin{equation}
R_{n_r}^{PT}(r)=N_{n_r,l}\,s^{\sigmaqrt{C}}(1-s)^{\frac{a-1}{2}}P_{n_r}^{(2\sigmaqrt{C},a)}(1-2s),
\end{equation}
where
\betaegin{equation}
\betaegin{array}{r}
N_{n_r,l}= \sigmaqrt{\alpha}\Big(\deltaisplaystyle\sigmaum\limits_{m=0}^{n_r}(-1)^{n_r-m}\betainom{n_r+\gamma}{m}\betainom{n_r+\delta}{n_r-m}\frac{\Gamma(n_r-m+\gamma+1)\Gamma(m+\delta+1)\Gamma(n_r+\gamma+\frac{3}{2})}{n_r!\Gamma(n_r+\gamma+\delta+2)\Gamma(n_r-m+\gamma+1)}\\
\deltaisplaystyle\times {}_3F_2(-n_r,2n_r+1+\gamma+\delta,m+1+\delta;n_r-m+\gamma+1,n_r+\gamma+\delta+2;1)\Big)^{-\frac{1}{2}}
\end{array}
\end{equation}
is the normalization constant determined from the following condition,
\betaegin{equation}
\deltaisplaystyle\int \left[R_{n_r}^{PT}(r)\right]^2\,dr=1,
\end{equation}
where
\betaegin{equation}
\gamma=2\sigmaqrt{C}-\frac{1}{2},~\delta=a-2.
\end{equation}
The required eigenfunctions of PT potential are finally expressed as,
\betaegin{equation}
\deltaisplaystyle\pisi_{n_r,l,m}^{PT}({\betaf r})=\left[\frac{(2l+1)(l-|m|)!}{4\pii(l+|m|)!}\right]^{\frac{1}{2}}\frac{N_{n_r,l}}{r}\,s^{\sigmaqrt{C}}(1-s)^{\frac{a-1}{2}}P_{n_r}^{(2\sigmaqrt{C},a)}(1-2s)\,P_{l}^m(\cos\theta)\,e^{im\pihi},~s=\tanh^2\left(\frac{\alpha r}{2}\right).
\end{equation}
It is to be noted that the approximation (\ref{ap4}) is well defined {\betaf for $\leftarrowm, \nuu$ satisfying,}
\betaegin{equation}
\betaegin{array}{l}\leftarrowbel{relation2}
2\muu(\xi_1-\xi_2)+\alpha^2\hbar^2l(l+1)\left[(x_{21}+x_{31})\nuu\leftarrowm+(x_{22}+x_{32})\nuu(1-\leftarrowm)+(x_{23}+x_{33})(1-\nuu)\right]>0,\\
(2x_{11}-x_{21}+x_{31})\nuu\leftarrowm+(2x_{12}-x_{22}+x_{32})\nuu(1-\leftarrowm)+(2x_{13}-x_{23}+x_{33})(1-\nuu)>0
\end{array}
\end{equation}
\sigmaection{Results and discussion}\leftarrowbel{Sec.Results}
In Table~\ref{Table1}, computed energies, $E_{n_r,l}^{MR}$ of the MR potential are given.
The performance of the proposed schemes are illustrated for eight representative states for five different sets of ($\leftarrowm$, $\nuu$) pair;
namely, (1,1), (0,1), (1,0), ($-$1.5,1), ($-$2.5,1), which include the negative approximation parameters as well. It appears that for all the
states under consideration, the (1,0) set somehow separates out from all others, which remain in a family of their own. The energies corresponding
of (1,1) and (0,1) sets in columns 3 and 4 match quite well with references \cite{MR8} and \cite{MR3}, which is duly pointed out in footnote. {\betaf Additional reference energies are also provided from the numerical works of Lucha \cite{lucha1999} and generalized pseudospectral method
(GPS) \cite{MR9}}. Similar energies are presented for
PT potential in Table~\ref{Table2}, again for same eight states of previous table. In addition to the first three positive $(\leftarrowm, \nuu)$ parameter
sets of Table~\ref{Table1}, in this case, we consider negative sets as (0.5, $-$1) and (0.5, $-$2). Once again, the energies of column 3 having (1,1) parameter set
compares well those from \cite{shd.ijmpa2008}, as indicated in footnote. {\betaf These energies are also compared with the accurate results from GPS method, which has been very successful for a number of model and real systems \cite{MR9,gps,gps2}}. One finds that the current proposed approximation in
Eq.~(\ref{app.convex}) for MR potential fares better for negative $\leftarrowm$ whereas the same for PT potential in Eq.~(\ref{ap4}) works better
for negative $\nuu$.
In order to examine the effects of $\leftarrowm$ and $\nuu$, in Fig.~\ref{Fig.3MRP.Energy}, we have plotted the computed energies of MR potential
with respect to these two parameters. Four states corresponding to $(n_r, l)$ quantum numbers as (1,1), (1,2), (2,1) and (2,2) are displayed.
One sees that, for a given $\leftarrowm$, $E_{n_r,l}^{MR}$ is an increasing function of $\nuu$, and $E_{n_r,l}^{MR}\rightarrow 0$ as
$\nuu$ assumes larger values. Note that negative $\leftarrowm$ values are also considered. The energies marked with red, blue and magenta squares
refer to $(\leftarrowm,\nuu)=(1,1)$, (0,1) and \{(0,0), (1,0)\} respectively. These are in good harmony with the values presented in references
\cite{MR8} and \cite{MR3}. Analogous plots are offered for PT potential in Fig.~\ref{Fig.4PTP.Energy}. In this
occasion, the energy increases as $\leftarrowm$ increases for a specific $\nuu<0$; while it decreases for a fixed $\nuu>0$ as $\leftarrowm$ increases.
The red, green and magenta squares in the diagram correspond to same $(\leftarrowm, \nuu)$ pairs of previous figure. They recover the energies
of \cite{shd.ijmpa2008} well. Also these energies are found to be in good agreement with \cite{pekeris.ap2}, for the parameter sets provided therein.
\sigmaection{Conclusions}\leftarrowbel{Sec.Conclusion}
In this article, we have introduced a new simple novel approximations to the centrifugal term for both MR and PT potentials. These are intuitively derived from a
linear combination of the commonly used Greene-Aldrich and Pekeris-type approximations. From this, the original approximations are recovered
for certain special values of the two approximating parameters $\leftarrowm$ and $\nuu$. Approximate analytical expressions are then presented for these two
potentials by the NU method. It is gratifying to note that, the approximation perform quite nicely throughout the whole range of $r$,
whereas, Greene-Aldrich and Pekeris provide superior approximations near the origin $r=0$, and $r=r_0$ (where the potential is minimum) respectively. Analytical expressions are
presented for eigenvalues and eigenfunctions.
An investigation of the controlling parameters, $\leftarrowm$ and $\nuu$ on energy spectra shows that, $E_{n_r,l}^{MR}$ is an increasing function of $\nuu$ subject to the conditions (\ref{relation1}).
Whereas $E_{n_r,l}^{PT}$ is a increasing function of $\leftarrowm$ for $\nuu<0$ and a decreasing function of $\leftarrowm$ for $\nuu>0$ subject to the conditions (\ref{relation2}).
For some special cases $(\leftarrowm,\nuu)=(1,1)$, $(\leftarrowm,\nuu)=(0,1)$, and $(\leftarrowm,\nuu)=(\leftarrowm,0)$, energies of MR and PT potentials compare quite favorably
with available literature results. It may be worthwhile to study the performance and efficacy of this approach for other related potentials of
physical and chemical interest. Also its relevance in the thermodynamic studies may be pursued.
\sigmaection*{Acknowledgement}
AKR gratefully acknowledges financial support from MATRICS, DST-SERB, New Delhi (sanction order: MTR/2019/000012). We thank the
anonymous referee for constructive comments and suggestions. \\
\betaegin{thebibliography}{99}
\betaibitem{Manning.Rosen} M.~F.~Manning and N.~Rosen, Phys.~Rev.~{\betaf 44} (1933) 953.
\betaibitem{diaf2005} A.~Diaf, A.~Chouchaoui and R.~J.~Lombard, Ann.~Phys.~ \textbf{317} (2005) 354.
\betaibitem{MR} S.-H.~Dong and J.~Garc\'ia-Ravelo, Phys.~Scr.~{\betaf 75} (2007) 307.
\betaibitem{chen2007} C.-Y.~Chen, F.-L.~Lu and D.-S.~Sun, Phys.~Scr.~ \textbf{76} (2007) 428.
\betaibitem{qiang2007} W.-C.~Qiang and S.-H.~Dong, Phys.~Lett.~A {\betaf 368} (2007) 13.
\betaibitem{chen2009} Z.-Y.~Chen, M.~Li and C.-S.~Jia, Mod.~Phys.~Lett.~A \textbf{24} (2009) 1863.
\betaibitem{MR3} W.~C.~Qiang and S.-H.~Dong, Phys.~Scr.~{\betaf 79} (2009) 045004.
\betaibitem{MR4.SMIkhdair} S.~M.~Ikhdair, Phys.~Scr.~{\betaf 83} (2011) 015010.
\betaibitem{diaf2011} A.~Diaf and C.~Chouchaoui, Phys.~Scr.~{\betaf 84} (2011) 015004.
\betaibitem{hady2011} A.~Abdel-Hady, \emph{Proc. of the 8th Conf. on Nucl. Particle Phys.}, NUPPAC-2011, Hurghada,
Egypt (2011) 131.
\betaibitem{nasser2013} I.~Nasser, M.~S.~Abdelmonem and A.~Abdel-Hady, Mol.~Phys. {\betaf 111} (2013) 1.
\betaibitem{lucha1999} W.~Lucha and F.~F.~Sch\"oberl, Int.~J.~Mod.~Phys.~C \textbf{10} (1999) 607.
\betaibitem{MR9} A.~K.~Roy, Mod.~Phys.~Lett.~A {\betaf 29} (2014) 1450042.
\betaibitem{gu2011} X.-Y.~Gu and S.-H.~Dong, J.~Math.~Chem. \textbf{49} (2011) 2053.
\betaibitem{Poschl.Teller} G.~P\"oschl and E.~Teller, Z.~Phys.~{\betaf 83} (1933) 143.
\betaibitem{PT} S.~H.~Dong, W.~C.~Qiang and J.~Garc\'oa-Ravelo, Int.~J.~Mod.~Phys.~A {\betaf 23} (2008) 1537.
\betaibitem{qiang2010} W.~C.~Qiang and S.-H.~Dong, Int.~J.~Quant.~Chem. {\betaf 110} (2010) 2342.
\betaibitem{PT2.epjp2020} H.~Yanar, A.~Tas, M.~Salti and O.~Aydogdu, Eur.~Phys.~J.~Plus. {\betaf 135} (2020) 292.
\betaibitem{horchani2020} R.~Horchani, H.~Jelassi, A.~N.~Ikot and U.~S.~Okorie, Int.~J.~Quant.~Chem. (2020) e26558.
\betaibitem{pekeris.ap2} W.~C.~Qiang, W.~L.~Chen, K.~Li and G.~F.~Wei, Phys.~Scr.~ \textbf{79} (2009) 025005.
\betaibitem{you2013} Y.~You, F.-L.~Lu, D.-S.~Sun, C.-Y.~Chen and S.-H.~Dong, Few-Body Syst.~ \textbf{54} (2013) 2125.
\betaibitem{Pekeris} C.~L.~Pekeris, Phys.~Rev.~ {\betaf 45} (1934) 98.
\betaibitem{pekeris.badawi} M.~Badawi, N.~Bessis and G.~Bessis, J. Phys. B {\betaf 5} (1972) L157.
\betaibitem{pekeris.ap3} W.~C.~Qiang, J.~Y.~Wu and S.~H.~Dong, Phys.~Scr.~ \textbf{79} (2009) 065011.
\betaibitem{pekeris.ap4} F.~J.~S.~Ferreira and F.~V.~Prudente, Phys.~Lett.~A 377 (2013) 3027.
\betaibitem{greene} R.~L.~Greene and C.~Aldrich, Phys.~Rev.~A {\betaf 14} (1976) 2363.
\betaibitem{nath2021} D.~Nath and A.~K.~Roy, Int.~J.~Quant.~Chem. {\betaf 121} (2021) e26616. DOI: 10.1002/qua.26616
\betaibitem{NU} A.~F.~Nikiforov and V.~B.~Uvarov, {\it Special Functions of Mathematical Physics}. (Birkh\"auser, Basel, 1988).
\betaibitem{wei2008} G.~F.~Wei, C.~Y.~Long and S.~H.~Dong, Phys.~Lett.~A {\betaf 372} (2008) 2592.
\betaibitem{MR7.nc} H.~I.~Ahmadov, C.~Aydin, N.~S.~H.~Huseynova and O.~Uzun, Int.~J.~Mod.~Phys.~E {\betaf 22} (2013) 1350072.
\betaibitem{MR8} B.~J.~Falaye, K.~J.~Oyewumi, T.~T.~Ibrahim, M.~A.~Punyasena and C.~A.~Onate, Can.~J.~Phys. {\betaf 91} (2013) 98.
\betaibitem{qiang2009} W.~C.~Qiang, K.~Li and W.~L.~Chen, J.~Phys.~A {\betaf 42} (2009) 205306.
\betaibitem{MR10.qdot} M.~C.~Onyeaju, J.~O.~A.~Idiodi, A.~N.~Ikot, M.~Solaimani and H.~Hassanabadi, J.~Opt. {\betaf 46} (2016) 254.
\betaibitem{MR11.thermal} H.~Louis, B.~I.~Ita and N.~I.~Nzeata, Eur.~Phys.~J.~Plus {\betaf 134} (2019) 315.
\betaibitem{MR12.scattering} B.~Khirali, A.~K.~Behera, J.~Bhoi and U.~Laha, Ann.~Phys.~ (NY) {\betaf 412} (2020) 168044.
\betaibitem{shd.pla.2008} G.~F.~Wei and S.~H.~Dong, Phys.~Lett.~A {\betaf 373} (2008) 49.
\betaibitem{ikhdair2008} S.~M.~Ikhdair and R.~Sever, Ann.~Phys.~(Berlin) {\betaf 17} (2008) 897.
\betaibitem{Gradshteyn} I.S. Gradshteyn, I.M. Ryzhik, {\it Tables of Integals, Series, and Products} 5th edn (New York: Academic) 1994.
\betaibitem{shd.ijmpa2008} S.~H.~Dong, W.~C.~Qiang and J.~Garc\'ia-Ravelo, Int.~J.~Mod.~Phys.~A {\betaf 23} (2008) 1537.
{\betaf \betaibitem{gps} A.K. Roy, Results Phys. {\betaf 3} (2013) 103.
\betaibitem{gps2} A.K. Roy, J. Math. Chem. {\betaf 52} (2014) 1405.}
\end{thebibliography}
\betaegin{figure}[htp]
\centering
\includegraphics[width=18cm,height=16cm]{FigCentMR.eps}
\caption{\leftarrowbel{Fig.1approximation.MR} Plot of the differences between exact centrifugal term $\frac{l(l+1)}{r^2}$ and various approximations
from Eqs.~(\ref{greene.app1}), (\ref{pekeris.app2}), (\ref{approx.app3}), (\ref{app.convex}) in MR potential, for $l=1,\hbar=\muu=1,\alpha=1.5,b=1/0.05, A=2b$,
in panels (A)-(D) respectively.}
\end{figure}
\betaegin{figure}[htp]
\centering
\includegraphics[width=18cm,height=10cm]{FigCentPT.eps}
\caption{\leftarrowbel{Fig.2approximation.PT} Plot of the differences between exact centrifugal term $\frac{l(l+1)}{r^2}$ and various approximations
from Eqs.~(\ref{ap1}), (\ref{ap2}), (\ref{ap3}) and (\ref{ap4}) in PT potential, for $l=1,\hbar=\muu=1,\xi_1=4,\xi_2=2,\alpha=0.05$, in
panels (A)-(D) respectively.}
\end{figure}
\betaegin{table}[htp]
\caption{\leftarrowbel{Table1} Energies, $E_{n_r,l}^{MR}$, of MR potential for five sets of $(\leftarrowm, \nuu)$, given in parentheses. Here
$\hbar=\muu=1,\alpha=1.5,b=1/0.025$. }
\centering
\betaegin{tabular}{|lr|lllllll|}\hline
$n_r$ & $l$ & $-E_{n_r,l}^{MR}(1,1)$\fracootnotemark[1] & $-E_{n_r,l}^{MR}(0,1)$\fracootnotemark[2] & $-E_{n_r,l}^{MR}(1,0)$ & $-E_{n_r,l}^{MR}(-1.5,1)$ & $-E_{n_r,l}^{MR}(-2.5,1)$& GPS \cite{MR9} & Numerical \cite{MR4.SMIkhdair}\fracootnotemark[3] \\ \hline
1 &1 & 0.036913014 & 0.036913019 & 0.069378105 & 0.036913027 & 0.036913032&0.036913922& 0.0369134 \\
1 &2 & 0.018208662 & 0.018208677 & 0.069378064 & 0.0182087 & 0.018208715&0.0182117637& 0.0182115 \\
1 &3 & 0.0086497057 & 0.0086497369 & 0.069378003 & 0.0086497836 & 0.0086498148&0.0086619417& 0.0086619 \\
1 &4 & 0.003521352 & 0.0035214045 & 0.069377922 & 0.0035214833 & 0.0035215358&0.0035623305& 0.0035623 \\
2 &1 & 0.017172833 & 0.017172838 & 0.029925043 & 0.017172846 & 0.017172851&0.0171740303& 0.0171740 \\
2 &2 & 0.0085339943 & 0.0085340099 & 0.029925003 & 0.0085340333 & 0.0085340489&0.0085414805& 0.0085415 \\
2 &3 & 0.0036481309 & 0.0036481624 & 0.029924942 & 0.0036482097 & 0.0036482412&0.0036774476& 0.0036774 \\
2 &4 & 0.00093625137 & 0.00093630425 & 0.02992486 & 0.00093638357 & 0.00093643645&0.0010296092& --- \\ \hline
\end{tabular}
\betaegin{tabbing}
\fracootnotemark[1]{These energies nearly coincide with those from \cite{MR8}}. \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \=
\fracootnotemark[2]{These energies compare with those from \cite{MR3}.} \\
\fracootnotemark[3]{These correspond to numerical results using the method of \cite{lucha1999}.} \\
\end{tabbing}
\end{table}
\betaegin{table}[htp]
\caption{\leftarrowbel{Table2} Energies, $E_{n_r,l}^{PT}$, of PT potential for five sets of $(\leftarrowm, \nuu)$, given in parentheses. Here,
$\hbar=\muu=1,\xi_1=4,\xi_2=2,\alpha=0.05$.}
\centering
\betaegin{tabular}{|lr|lllllll|}\hline
$n_r$ & $l$ & $E_{n_r,l}^{PT}(1,1)$\fracootnotemark[1] & $E_{n_r,l}^{PT}(0,1)$ & $E_{n_r,l}^{PT}(1,0)$ & $E_{n_r,l}^{PT}(0.5,-1)$ & $E_{n_r,l}^{PT}(0.5,-2)$ & GPS\fracootnotemark[2] & Lucha/others \\ \hline
1 &1 & $-$0.21560894 & $-$0.21540061 & $-$0.21524815 & $-$0.21499153 & $-$0.21473492&0.215258812&\\
1 &2 & $-$0.21478931 & $-$0.21416431 & $-$0.2137065 & $-$0.21293627 & $-$0.21216612& 0.2141062447&\\
1 &3 & $-$0.2135647 & $-$0.2123147 & $-$0.21139777 & $-$0.20985618 & $-$0.20831492& 0.212382835&\\
1 &4 & $-$0.21194083 & $-$0.2098575 & $-$0.20832642 & $-$0.2057546 & $-$0.20318371& 0.2100950625&\\
2 &1 & $-$0.18400157 & $-$0.18379323 & $-$0.18363528 & $-$0.18337317 & $-$0.18311107&0.1836932855&\\
2 &2 & $-$0.18324436 & $-$0.18261936 & $-$0.18214507 & $-$0.18135837 & $-$0.18057175&0.1826114098&\\
2 &3 & $-$0.18211323 & $-$0.18086323 & $-$0.17991337 & $-$0.17833885 & $-$0.17676468&0.180993962&\\
2 &4 & $-$0.18061374 & $-$0.17853041 & $-$0.17694447 & $-$0.17431782 & $-$0.17169211&0.178847328&\\\hline
\end{tabular}
\betaegin{tabbing}
\fracootnotemark[1]{These energies compare with those from \cite{shd.ijmpa2008}}. \ \ \ \ \ \ \ \=
\fracootnotemark[2]{These are calculated here using the GPS method, for the first time.} \\
\end{tabbing}
\end{table}
\betaegin{figure}[htp]
\centering
\includegraphics[width=10cm,height=10cm]{MR11.eps}~\includegraphics[width=10cm,height=10cm]{MR12.eps}\\
\includegraphics[width=10cm,height=10cm]{MR21.eps}~\includegraphics[width=10cm,height=10cm]{MR22.eps}
\caption{\leftarrowbel{Fig.3MRP.Energy} Effect of $\leftarrowm$ and $\nuu$ on the energy, $E_{n_r,l}^{MR}$, of MR potential. Panels
(A), (B), (C), (D) correspond to $(n_r, l)$ values (1,1), (1,2), (2,1) and (2,2) respectively. The parameters are:
$\hbar=\muu=1,\alpha=1.5,b=1/0.025$. See text for details.}
\end{figure}
\betaegin{figure}[htp]
\centering
\includegraphics[width=10cm,height=10cm]{PT11.eps}~\includegraphics[width=10cm,height=10cm]{PT14.eps}\\
\includegraphics[width=10cm,height=10cm]{PT41.eps}~\includegraphics[width=10cm,height=10cm]{PT44.eps}
\caption{\leftarrowbel{Fig.4PTP.Energy} Effect of $\leftarrowm$ and $\nuu$ on the energy, $E_{n_r,l}^{PT}$, of PT potential. Panels
(A), (B), (C), (D) correspond to $(n_r, l)$ values (1,1), (1,4), (4,1) and (4,4) respectively. The parameters are:
$\hbar=\muu=1,\xi_1=4,\xi_2=2,\alpha=0.05$. See text for details.}
\end{figure}
\end{document}
|
\begin{document}
\title[A logarithmic bound for the chromatic number of the associahedron]{A logarithmic bound for the chromatic number\\ of the associahedron}
\author{Louigi Addario-Berry}
\address[L. Addario-Berry]{Department of Mathematics and Statistics, McGill University, Montr\'eal, Qu\'ebec, Canada}
{\mathbf E}mail{[email protected]}
\thanks{Louigi Addario-Berry is supported by an NSERC Discovery Grant and an FRQNT Team Research Grant.}
\author{Bruce Reed}
\address[B. Reed]{School of Computer Science, McGill University, Montr\'eal, Canada. CNRS, Projet COATI, I3S (CNRS and UNS) UMR7271 and INRIA, Sophia Antipolis, France. Instituto Nacional de Matem\'atica Pura e Aplicada (IMPA), Brasil. Visiting Research Professor, ERATO Kawarabayashi Large Graph Project, National Institute of Informatics, Japan.}
{\mathbf E}mail{[email protected]}
\thanks{Bruce Reed is supported by NSERC and the ERATO Kawarabayashi Large Graph Project.}
\author{Alex Scott}
\address[A. Scott]{Mathematical Institute, University of Oxford, Oxford OX2 6GG, United Kingdom}
{\mathbf E}mail{[email protected]}
\thanks{Alex Scott is supported by a Leverhulme Trust Research Fellowship.}
\author{David R. Wood}
\address[D. R. Wood]{School of Mathematical Sciences, Monash University, Melbourne, Australia}
{\mathbf E}mail{[email protected]}
\date{November 21, 2018}
\subjclass{52B05, 05C15}
\begin{abstract}
We show that the chromatic number of the $n$-dimensional associahedron grows at most logarithmically with $n$, improving a bound from and proving a conjecture of Fabila-Monroy~et~al.~[{\mathbf E}mph{Discrete Math. Theor. Comput. Sci.}, 2009].
{\mathbf E}nd{abstract}
\maketitle
\section{Introduction}
The associahedron $\cA_n$ is an $(n-3)$-dimensional convex polytope that arises in numerous branches of mathematics, including algebraic combinatorics~\cite{Stasheff63,LR98,CFZ02,HLT11} and discrete geometry~\cite{BFS90,PS12, PS15}. Associahedra are also called Stasheff polytopes after the work of Stasheff~\cite{Stasheff63}, following earlier work by Tamari~\citep{Tamari51}. We are only interested in the 1-skeleton of the associahedron, so we consider it as a graph, defined as follows.
The elements of the associahedron $\cA_n$ are triangulations $T$ of the convex $n$-gon with vertices labeled by $\{0,\ldots,n-1\}$ in clockwise order. For any such a triangulation $T$, we always denote triangles of $T$ by the sequence $ABC$ of their vertices, ordered so that $A < B < C$. We write
$E(T)$ for the set of edges contained in $T$.
Every triangulation $T \in \cA_n$ contains the edges $01,12,\ldots, (n-1)0$; we refer to these as {{\mathbf E}m boundary edges}. For $T \in \cA_n$, each non-boundary edge $e \in E(T)$ is contained in a unique quadrilateral $Q=Q_T(e)=ABCD$ with $A < B < C < D$; we will always list the vertices of quadrilaterals in increasing order. {{\mathbf E}m Flipping} the edge $e$ means replacing $e$ by the other diagonal of $Q$. (See Figures~\ref{fig:tri_quad} and~\ref{fig:tri_quad_flip}.)\ Two triangulations $T,T' \in \cA_n$ are adjacent in $\cA_n$ if they may be obtained from one another by a single flip.
\begin{figure}[ht]
\begin{subfigure}[b]{0.45\textwidth}
\begin{centering}
\includegraphics[width=0.65\textwidth,page=1]{edge_flip.pdf}
\caption{A portion of a triangulation $T$. }
\label{fig:tri_quad}
{\mathbf E}nd{centering}
{\mathbf E}nd{subfigure}
\quad
\begin{subfigure}[b]{0.45\textwidth}
\begin{centering}
\includegraphics[width=0.65\textwidth,page=2]{edge_flip.pdf}
\caption{A portion of the triangulation $T'$ formed from $T$ by flipping $AC$. }
\label{fig:tri_quad_flip}
{\mathbf E}nd{centering}
{\mathbf E}nd{subfigure}
\caption{Portions of two adjacent triangulations of an $n$-gon.}
\label{fig:basic_quads}
{\mathbf E}nd{figure}
Graph-theoretic properties of associahedra have been well-studied. For example, it is easily seen that
$\cA_n$ is $(n-3)$-regular. Hurtado and Noy \citep{HN99} proved that $\cA_n$ is Hamiltonian and has connectictivity
$n-3$, as well as determining its automorphism group. The diameter of $\cA_n$ and several equivalent questions has been studied extensively \cite{STT88,CP16,Pournin14}. Sleator~et~al.~\cite{STT88} proved that the diameter equals $2n-8$ for sufficiently large $n$, and recently Pournin~\cite{Pournin14} showed that $2n-8$ is the answer for $n\geqslantqslant 7$.
This paper studies the chromatic number of $\cA_n$, a quantity which was first considered by Fabila-Monroy~et~al.~\cite{FFHHUW09}. That work gave an explicit $\lceil{\frac{n}{2}\rceil}$-colouring of $\cA_n$, and observed that $\chi(\cA_n) \leqslantqslant O(n/\log n)$, based on the
result of Johansson \cite{johansson94} which says that every triangle-free graph with maximum degree $\Delta$ is $O(\Delta/\log\Delta)$-colourable. No non-constant lower bound on $\chi(\cA_n)$ is known. Indeed, the best known lower bound is
$\chi(\cA_{10}) \geqslantqslant 4$ [private communication, Ruy Fabila-Monroy]. Fabila-Monroy~et~al.~\cite{FFHHUW09} conjectured a
$O(\log n)$ upper bound. We prove this conjecture.
\begin{thm}\label{thm:main}
$\chi(\cA_n) =O(\log n)$.
{\mathbf E}nd{thm}
\section{The Proof}
We prove Theorem~\ref{thm:main} by tracking how several carefully chosen properties of triangulations change when an edge is flipped. To see how this yields a route to bounding the chromatic number of $\cA_n$, first recall that if $f$ is a graph homomorphism from $\cA_n$ to some graph $G$, which is to say that $f:\cA_n \to V(G)$ is adjacency-preserving, then $\chi(\cA_n) \leqslant \chi(G)$.
This fact may be generalized as follows. Suppose that $(G_i)_{i \in I}$ is a finite set of graphs and $(f_i:\cA_n \to V(G_i))_{i \in I}$ are functions such that if $T,T' \in \cA_n$ are adjacent triangulations then there exists $i \in I$ for which $f_i(T)$ and $f_i(T')$ are adjacent in $G_i$. For each $i \in I$, let $\kappa_i$ be a proper colouring of $G_i$ with $\chi(G_i)$ colours, and colour $T \in \cA_n$ with the vector $(\kappa_i(f_i(T)))_{i \in I}$. If $T$ and $T'$ are adjacent then $(\kappa_i(f_i(T)))_{i \in I}$ and $(\kappa_i(f_i(T')))_{i \in I}$ differ in at least one coordinate; thus this is a proper colouring of $\cA_n$, and $\chi(\cA_n) \leqslant \prod_{i \in I} \chi(G_i)$.
The remainder of the paper is devoted to defining the functions we will use, and showing they have the requisite properties.
Two fundamental notions that we will use are the {{\mathbf E}m type} of a quadrilateral and the {{\mathbf E}m scale} of an edge.
For a quadrilateral $Q=Q_T(e)=ABCD$ contained within triangulation $T$,
we say $Q$ is {{\mathbf E}m type-1} if $e=AC$, and otherwise say $Q$ is {{\mathbf E}m type-2};
we say that an edge $e$ is {{\mathbf E}m type-1} or {{\mathbf E}m type-2} according to the type of the quadrilateral $Q_T(e)$.
(For example, in Figure~\ref{fig:basic_quads},
$Q_T(AC)=ABCD$ is type-1 and $Q_T(BC)=ABEC$ is type-2,
and in Figure ~\ref{fig:basic_quads},
$Q_{T'}(BC)$ is type-1 and $Q_{T'}(BD)$ is type-2.)
We fix a parameter $r > 0$ to be chosen later (in fact we will end up taking $r=3$). For an edge $e=UV$, define the {{\mathbf E}m scale} of $e$ to be $\sigma_e = \lceil \log_r |U-V|\rceil \in \{0,1,\ldots,\lceil \log_r(n-1)\rceil\}$. The scales of the edges incident to triangles within a fixed quadrilateral $Q$ will be a key input to the functions we define.
We first consider the effect of edge flips on triangles $ABC$ where two of the three incident edges have the same scale. If $AB$ (resp.~$BC$, $AC$) is the unique edge whose scale is different from the others, then we say $ABC$ is a type-$l$ (resp.~type-$m$, type-$r$) triangle.
If all three edges have the same scale $s$, then we say $ABC$ is a type-$z$ triangle.
Let $(l_T,m_T,r_T,z_T)$ be the vector counting the number of type-$l$, type-$m$, type-$r$ and type-$z$ triangles in $T$.
For the remainder of the paper, we fix a triangulation $T$, and consider the effect of flipping a type-1 edge $e=AC$ within its quadrilateral $Q_T(e)=ABCD$, to form $T'$.
\begin{prop}\label{prop:2_same}
Suppose that $r \geqslant 3$, and that $\sigma_{AC}$, $\sigma_{BD}$ and $\sigma_{BC}$ are all different. Then $(l_{T'},m_{T'},r_{T'},z_{T'})\ne (l_T,m_T,r_T,z_T)$.
{\mathbf E}nd{prop}
\begin{proof}
By assumption, $\sigma_{BD}\ne \sigma_{AC}$ and $\sigma_{BC} < \min(\sigma_{AC},\sigma_{BD})$.
We argue by contradiction. To this end, suppose that $(l_{T'},m_{T'},r_{T'},z_{T'})=(l_T,m_T,r_T,z_T)$.
Note that
\[
\log_r (D-A) \leqslant \log_r (3\max(B-A,C-B,D-C)) \leqslant 1+ \log_r \max(B-A,C-B,D-C).
\]
Taking ceilings, it follows that
\begin{equation}\label{eq:tight}
\sigma_{AD} \leqslant 1+ \max(\sigma_{AB},\sigma_{BC},\sigma_{CD}).
{\mathbf E}nd{equation}
The preceding equation requires one of three inequalities to hold; the next three paragraphs treat the possibilities one at a time.
Suppose that $\sigma_{AB} = \max(\sigma_{AB},\sigma_{BC},\sigma_{CD})$.
Then either $\sigma_{AC}=\sigma_{AB}$ or
(using {\mathbf E}qref{eq:tight} and the fact that $\sigma_{AB}\leqslant \sigma_{AC}\leqslant\sigma_{AD}$) we have
$\sigma_{AC}=\sigma_{AB}+1=\sigma_{AD}$.
If
$\sigma_{AC}=\sigma_{AB}$, as in Figure~\ref{fig:tri_type_lz}, then $ABC$ is a type-$m$ triangle so, since we assume the triangle type vector is unchanged by flipping edge $AC$, either $ABD$ or $BCD$ is also type-$m$.
$BCD$ is not type-$m$, as $\sigma_{BC}<\sigma_{BD}$ by assumption, so $ABD$ is type-$m$ and hence $\sigma_{AB}=\sigma_{AD}$. But then $ABC$ and $ACD$ are both type-$m$, which gives a contradiction as $BCD$ is not.
If $\sigma_{AC}=\sigma_{AB}+1$, as in Figure~\ref{fig:tri_type_lm}, then $ACD$ is type-$m$ or type-z, so either $ABD$ or $BCD$ is type-m or type-z. But $BCD$ is neither, as $\sigma_{BC} < \sigma_{BD}$, and $ABD$ is neither as $\sigma_{AB}\ne\sigma_{AD}$.
Next suppose that $\sigma_{BC}=\max(\sigma_{AB},\sigma_{BC},\sigma_{CD})$, as in Figure~\ref{fig:tri_type_mz}.
Since $\sigma_{BC}\ne \sigma_{AC}$ and $\sigma_{BC}\ne \sigma_{BD}$ by assumption,
and all scales are at most $\sigma_{AD}$, it must be that
$\sigma_{AC}=\sigma_{BD}=\sigma_{AD}$; but this is ruled out by assumption.
Finally, suppose that $\sigma_{CD} = \max(\sigma_{AB},\sigma_{BC},\sigma_{CD})$.
This case is the same as the first case, as we can apply the argument to a reversed copy of the associahedron (which exchanges type-$l$ and type-$m$ triangles, while leaving the other two types invariant).
{\mathbf E}nd{proof}
\begin{figure}[ht]
\begin{subfigure}[b]{0.3\textwidth}
\begin{centering}
\includegraphics[width=0.9\textwidth,page=3]{edge_flip.pdf}
\caption{$\sigma_{AB}=\sigma=\sigma_{AC}$.}
\label{fig:tri_type_lz}
{\mathbf E}nd{centering}
{\mathbf E}nd{subfigure}
\quad
\begin{subfigure}[b]{0.3\textwidth}
\begin{centering}
\includegraphics[width=0.9\textwidth,page=4]{edge_flip.pdf}
\caption{$\sigma_{AB}=\sigma,\sigma_{AC}=\sigma+1$.}
\label{fig:tri_type_lm}
{\mathbf E}nd{centering}
{\mathbf E}nd{subfigure}
\quad
\begin{subfigure}[b]{0.3\textwidth}
\begin{centering}
\includegraphics[width=0.9\textwidth,page=5]{edge_flip.pdf}
\caption{$\sigma_{BC}=\sigma$.}
\label{fig:tri_type_mz}
{\mathbf E}nd{centering}
{\mathbf E}nd{subfigure}
\caption{Writing $\sigma=\max(\sigma_{AB},\sigma_{BC},\sigma_{CD})$, the subfigures correspond to possible configurations arising in the proof of Proposition~\ref{prop:2_same}.}
\label{fig:constrained-types}
{\mathbf E}nd{figure}
\begin{prop}\label{prop:three_equal}
If $\sigma_{AC}=\sigma_{BD}=\sigma_{BC}$ and $(l_{T'},m_{T'},r_{T'},z_{T'})=(l_T,m_T,r_T,z_T)$,
then either $\sigma_{AD}=\sigma_{BC}$ or $\sigma_{AB}=\sigma_{BC}=\sigma_{CD}$.
{\mathbf E}nd{prop}
\begin{proof}
In this case $ABC$ is type-$l$ or type-z, and $BCD$ is type-$m$ or type-$z$.
If $ABC$ is type-$l$ then since the triangle type vector doesn't change it must be that $ABD$ is type-$l$, which implies that $\sigma_{AD}=\sigma_{BD}$, yielding the result in this case.
Similarly, if $BCD$ is type-$m$ then it must be that $ACD$ is type-$m$, which implies that $\sigma_{AD}=\sigma_{AC}$. Otherwise, both $ABC$ and $BCD$ are type-$z$, in which case we indeed have $\sigma_{AB}=\sigma_{BC}=\sigma_{CD}$.
{\mathbf E}nd{proof}
For a triangulation $T \in \cA_n$ and $k \in \{1,2\}$ and $i\in\{0,1,\dots,\lceil{\log_r(n-1)\rceil}\}$, let
\[
s^k_i(T) = \#\{e \in E(T): Q(e)\mbox{ is type-}k, \sigma_e=i\}\, .
\]
We assign an integer label $c(T)$ to $T$ given by
\[
c(T) = \leqslantft(\sum_{i=0}^{\lceil{\log_r(n-1)\rceil}} 2i s^1_i(T) + \sum_{i=0}^{\lceil{\log_r(n-1)\rceil}} 3is^2_i(T) \right) \mod (3\lceil \log_r n\rceil)\, .
\]
The utility of such a labelling rule is explained by the following fact.
We continue to work with triangulations $T$ and $T'$ related by an edge flip within quadrilateral $ABCD$ with $AC \in E(T)$ and $BD \in E(T')$, as above.
\begin{prop} \label{prop_two_equal}
If exactly two of $\sigma_{AC}$, $\sigma_{BD}$ and $\sigma_{BC}$ are equal then
$c(T') \ne c(T)$.
{\mathbf E}nd{prop}
\begin{proof}
First suppose that $BC$ is not a boundary edge, and
let $V$ be the unique vertex of $T$ with $B<V<C$ which is incident to both $B$ and $C$. Note that $ABCD=Q_T(AC)$ is type-1 in $T$ and $ABCD=Q_{T'}(BD)$ is type-2 in $T'$. Also, $Q_T(BC) = ABVC$ is type-2 in $T$ and $Q_{T'}(BC)=BVCD$ is type-1 in $T'$. It is not hard to check that no other quadrilaterals change type when moving from $T$ to $T'$. We thus have
\begin{align*}
c_1(T')-c_1(T)
& = 3\sigma_{BD}-2\sigma_{AC} +2\sigma_{BC}-3\sigma_{BC} & \mod (3\lceil \log_r n\rceil)\, \ \\
& = 3\sigma_{BD}-2\sigma_{AC} - \sigma_{BC} & \mod (3\lceil \log_r n\rceil)\, .
{\mathbf E}nd{align*}
It follows that if $\sigma_{BC} < \sigma_{BD}=\sigma_{AC}$ then
\[
c_1(T')-c_1(T) = \sigma_{BD} - \sigma_{BC} \mod (3\lceil \log_r n\rceil) \ne 0\, ;
\]
the difference is non-zero $\mathrm{mod} (3\lceil \log_r n\rceil)$ since all scales are at most $\lceil \log_r n\rceil$. Similarly, if
$\sigma_{BC} = \sigma_{BD}<\sigma_{AC}$ then
\[
c_1(T')-c_1(T) = 2(\sigma_{AC}-\sigma_{BD}) \mod (3\lceil \log_r n\rceil) \ne 0\, ,
\]
and if
$\sigma_{AC}=\sigma_{BC}< \sigma_{BD}$ then
\[
c_1(T')-c_1(T) = 3(\sigma_{BD}-\sigma_{AC}) \mod (3\lceil \log_r n\rceil) \ne 0\, ,
\]
Since $\sigma_{BC} \leqslant \min(\sigma_{AC},\sigma_{BD})$, these are the only possibilities.
The case when $BC$ is a boundary edge is very similar, but easier. In this case we obtain that
\[
c_1(T')-c_1(T) = 3\sigma_{BD}-2\sigma_{AC} \mod (3\lceil \log_r n\rceil)\, .
\]
Since $BC$ is a boundary edge, $AC$ and $BD$ are not, so $\sigma_{BC}=0$ and $\sigma_{AC}\ne 0$, $\sigma_{BD} \ne 0$. It follows by assumption that $\sigma_{AC}=\sigma_{BD}$, so
\[
c_1(T')-c_1(T) = \sigma_{AC} \mod (3\lceil \log_r n\rceil) \ne 0\, .\qedhere
\]
{\mathbf E}nd{proof}
Propositions~\ref{prop:2_same},~\ref{prop:three_equal} and~\ref{prop_two_equal} imply that the label $c(T)$ and the type vector
$(l_T,m_T,r_T,z_T)$ together distinguish $T$ from $T'$ except in the following cases.
\begin{enumerate}
\item $AC$, $BD$, $BC$, and $AD$ have the same scale and $AB$, $CD$ have smaller scales.
\item $AC$, $BD$, $BC$, $AD$ and $AB$ have the same scale and $CD$ has a smaller scale.
\item $AC$, $BD$, $BC$, $AD$ and $CD$ have the same scale and $AB$ has a smaller scale.
\item $AC$, $BD$, $BC$, $AB$ and $CD$ have the same scale and $AD$ has a larger scale.
\item All six edges have the same scale.
{\mathbf E}nd{enumerate}
To handle cases (1)--(3) we track two additional parameters, and show that
the parity of one or both parameters is different for $T$ and $T'$. In case (4) we again prove there is a change of parity, but of a third, more complicated parameter. For case (5) we use induction.
\begin{figure}[ht]
\begin{subfigure}[b]{0.48\textwidth}
\begin{centering}
\includegraphics[width=0.95\textwidth,page=1]{dual_tree.pdf}
\caption{The dual tree of a triangulation of an 8-gon.}
\label{fig:dual_oct}
{\mathbf E}nd{centering}
{\mathbf E}nd{subfigure}
\quad
\begin{subfigure}[b]{0.48\textwidth}
\begin{centering}
\includegraphics[width=0.95\textwidth,page=2]{dual_tree.pdf}
\caption{The subgraph $\hat{S}$ of $\hat{T}$ corresponding to a subgraph $S$ of a triangulation $T$.}
\label{fig:dual_subforest}
{\mathbf E}nd{centering}
{\mathbf E}nd{subfigure}
\caption{The dual trees of an $8$-gon and of a sub-triangulation of a $12$-gon}
\label{fig:dual_trees}
{\mathbf E}nd{figure}
Figure~\ref{fig:dual_oct} should make the definitions of the current paragraph clear. Orient the edges of the triangulation $T$ so that the head of each edge has larger label. The {{\mathbf E}m root edge} of $T$ is the edge $\rho=(0,n-1)$.
Now construct the following oriented tree $\hat{T}$. First, augment $T$ by adding a vertex $v$ to the unbounded face, and join it to all vertices of the polygon. Let $\hat{T}_0$ be the planar dual of the augmented graph; then $\hat{T}$ is formed by removing all edges of $\hat{T}_0$ lying entirely within the unbounded face of $T$. For each edge $e$ of $T$ there is a unique edge $\hat{e}$ of $\hat{T}$ crossing $e$. Orient $\hat{e}$ from the left to the right of $e$ (when following $e$ from tail to head). Root $\hat{T}$ at the edge $\hat{\rho}$, whose head is the unique node of $\hat{T}$ with out-degree zero. Note that $\hat{T}$ is a tree, which we call the {\mathbf E}mph{dual tree} of $T$.
Given an edge $e=UV$ of $T$ with $e \ne \rho$, the triangle containing the head of $\hat{e}$ is incident to both $U$ and $V$; let $W$ be its third node. Necessarily either $W < \min(V,U)$ or $W > \max(V,U)$. In the first case we say $\hat{e}$ is a {{\mathbf E}m left turn}, in the second we say it is a {{\mathbf E}m right turn}.
Given a subgraph $S$ of triangulation $T$, we let $\hat{S}$ be the ``dual'' subgraph of $\hat{T}$, with the same vertex set as $\hat{T}$ and with edge set
\[
E(\hat{S}) = \{\hat{e}: e \in E(S)\}\, ;
\]
this is illustrated in Figure~\ref{fig:dual_subforest}.
A node of $\hat{S}$ is a {{\mathbf E}m leaf} if it has degree one.
For each node $v$ of $\hat{S}$, let $g_S(v)$ and $d_S(v)$ be defined as follows.
Write $r$ for the root (the unique node of out-degree $0$) of the tree component of $\hat{S}$ containing $v$. Then $g_S(v)$ and $d_S(v)$ are the number of left- and right-turns on the path from $v$ to $r$, respectively; see Figure~\ref{fig:lr_labeling}.
\begin{figure}[ht]
\begin{subfigure}[b]{0.48\textwidth}
\begin{centering}
\includegraphics[width=\textwidth,page=3]{dual_tree.pdf}
\caption{The left-turn and right-turn labelling of a component $\hat{H}$ of $\hat{S}$ with root node $r$. Labels are given in the form $(g(v),d(v))$ for all nodes $v$ of $\hat{S}$.}
\label{fig:lr_labeling}
{\mathbf E}nd{centering}
{\mathbf E}nd{subfigure}
\quad
\begin{subfigure}[b]{0.48\textwidth}
\begin{centering}
\includegraphics[width=\textwidth,page=4]{dual_tree.pdf}
\caption{The reduced tree $\tilde{H}$ corresponding to the component $\hat{H}$, together with the triangulation of a polygon to which $\tilde{H}$ is dual. The root edge of $\tilde{H}$ is dashed.}
\label{fig:reduced_dualtree}
{\mathbf E}nd{centering}
{\mathbf E}nd{subfigure}
\caption{In both subfigures, left-turn edges are red and right-turn edges are blue.}
{\mathbf E}nd{figure}
Recall that $T'$ is obtained from $T$ by flipping edge $AC$ within quadrilateral $ABCD$. For each $1 \leqslant i \leqslant \lceil \log_r n \rceil$, let $S_i$ be the subgraph of $T$ with edge set
$E(S_i) = \{e \in E(T): \sigma_e=i\}$, and let $S_i'$ be the subgraph of $T'$ with edge set
$E(S_i') = \{e \in E(T'): \sigma_e=i\}$. Then let $\hat{S}_i$ and $\hat{S}'_i$ be as defined in the preceding paragraph (so $\hat{S}_i$ is a subgraph of $\hat{T}$ and $\hat{S}_i'$ is a subgraph of $\hat{T}'$). Let
\[
G(T) = \sum_{i=1}^{\lceil \log_r n \rceil} \sum_{v \in V(\hat{S}_i)}
g_{S_i}(v), \quad \mbox{ and let }\quad D(T) = \sum_{i=1}^{\lceil \log_r n \rceil} \sum_{v \in V(\hat{S}_i)}
d_{S_i}(v)\, .
\]
The following proposition implies that in cases (1), (2) and (3), flipping edge $AC$ yields a change in parity of at least one of $G$ and $D$.
\begin{prop}\label{prop_parity_cases123}
If the scales of the edges $AB$, $AC$, $AD$, $BC$, $BD$ and $CD$ are as in cases (1), (2) or (3) above, then either $G(T')=G(T)-1$ or $D(T')=D(T)+1$ or both.
{\mathbf E}nd{prop}
\begin{proof}
Let $\sigma=\sigma_{AC}$.
We first claim that for all $i \ne \sigma$, the contributions to $G(T)$ and to $D(T)$ from scale-$i$ nodes are unchanged by the edge flip operation, i.e.,
\begin{equation}\label{eq:parity123}
\sum_{i \ne \sigma}\!\sum_{v \in V(\hat{S}_i)}\!
g_{S_i}(v)=\sum_{i \ne \sigma}\!\sum_{v \in V(\hat{S}_i')}\!
g_{S_i'}(v)
\quad \mbox{ and }\quad
\sum_{i \ne \sigma}\!\sum_{v \in V(\hat{S}_i)} \!
d_{S_i}(v)=\sum_{i \ne \sigma}\! \sum_{v \in V(\hat{S}_i')} \!
d_{S_i'}(v)\, .
{\mathbf E}nd{equation}
We prove these equalities in case (1); the other two cases are similar but easier.
The triangle containing the head of the edge $\hat{e}_{AB}$ dual to $AB$ is $ABC$ in $T$ and is $ABD$ in $T'$. The case (1) assumptions on the scales of the edges then imply that $\hat{e}_{AB}$ has out-degree $0$ and in-degree $1$ in $\hat{S}_{\sigma_{AB}}$. In particular, it is the root of its component of $\hat{S}_{\sigma_{AB}}$. Moreover, $\hat{e}_{AB}$ is a right-turn edge since $C$ and $D$ are both larger than $A$ and $B$.
Similarly, the triangle containing the tail of the edge $\hat{e}_{CD}$ dual to $CD$ is $ACD$ in $T$ and is $BCD$ in $T'$. The assumptions on the scales of edges again imply that $\hat{e}_{CD}$ has in-degree $1$ and out-degree $0$ within $\hat{S}_{\sigma_{CD}}$, so is the root of its component of $\hat{S}_{\sigma_{CD}}$. Moreover, $\hat{e}_{AB}$ is a left-turn edge since $A$ and $B$ are both smaller than $C$ and $D$.
Since the structures of $T$ and of $T'$ are unaffected outside of the quadrilateral $ABCD$, the equalities in (\ref{eq:parity123}) follow in case (1).
We now restrict our attention to scale $\sigma$. We write $g(\cdot) = g_{S_\sigma}(\cdot)$ and $d(\cdot) = d_{S_\sigma}(\cdot)$,
and likewise write $g'(\cdot) = g_{S'_\sigma}(\cdot)$ and $d'(\cdot) = d_{S'_\sigma}(\cdot)$.
Note that all nodes not lying within the quadrilateral $ABCD$ either belong to both $S_\sigma$ and $S'_{\sigma}$ or belong to neither of $S_\sigma$ and $S'_{\sigma}$.
The remainder of the proof boils down to inspection of Figures~\ref{fig:lr_labels_1},~\ref{fig:lr_labels_2} and~\ref{fig:lr_labels_3}.
In case (1), observe (see Figure~\ref{fig:lr_labels_1}) that $g(u)=g'(u)=a+1$ and $d(u)=d'(u)=b+1$, which implies that $(g(q),g(q))=(g'(q),d'(q))$ for all nodes $q$ not lying within $ABCD$. Since $g(v)+g(x)=2a+1=g(p)+g(z)+1$ and $d(v)+d(z)=2b=d(p)+d(z)-1$, it follows that $G(T)=G(T')+1$ and $D(T')=D(T)-1$.
Figure~\ref{fig:lr_labels_2} depicts the situation in case (2). In this case $d(u)=d'(u)$ and $d(y)=d'(y)$, which implies that $d(q)=d'(q)$ for all $q$ not lying in $ABCD$.
Since $d'(z)+d'(p)=d(v)+d(x)+1$, it follows that $D(T')=D(T)+1$.
Finally, Figure~\ref{fig:lr_labels_3} relates to case (3). In this case $g(u)=g'(u)$, $g(y)=g'(t)$, and
$g(v)+g(x)=g'(z)+g(p)+1$, so the same logic as above implies that $G(T')=G(T)-1$. This completes the proof.
{\mathbf E}nd{proof}
\begin{figure}[ht]
\begin{centering}
\includegraphics[width=0.85\textwidth,page=2]{5_cases}
\caption{The left-turn and right-turn labels near quadrilateral $ABCD$ in $T$ and $T'$: case (1).
Here $(g(x),d(x))=(a,b)$, $(g(v),d(v))=(a+1,b)$ and $(g(u),d(u))=(a+1,b+1)$.}
\label{fig:lr_labels_1}
{\mathbf E}nd{centering}
{\mathbf E}nd{figure}
\begin{figure}[ht]
\begin{centering}
\includegraphics[width=0.85\textwidth,page=3]{5_cases}
\caption{The left-turn and right-turn labels near $ABCD$ in $T$ and $T'$: case (2).}
\label{fig:lr_labels_2}
{\mathbf E}nd{centering}
{\mathbf E}nd{figure}
\begin{figure}[ht]
\begin{centering}
\includegraphics[width=0.85\textwidth,page=4]{5_cases}
\caption{The left-turn and right-turn labels near $ABCD$ in $T$ and $T'$: case (3).}
\label{fig:lr_labels_3}
{\mathbf E}nd{centering}
{\mathbf E}nd{figure}
We now turn our attention to cases (4) and (5).
Consider any subgraph $S$ of $T$, and let $\hat{H}$ be a connected component of $\hat{S}$. Note that $\hat{H}$ is a rooted sub-binary tree (i.e.~every node has degree at most three; see Figure~\ref{fig:dual_subforest}). Let $\tilde{H}$ be the tree obtained from $\hat{H}$ as follows (see Figure~\ref{fig:lr_labeling} and~\ref{fig:reduced_dualtree}). First, if the root $r$ of $\hat{H}$ has exactly two children then add a new node $\tilde{r}$ incident only to $r$ and reroot at $\tilde{r}$. Next, suppress all nodes of degree exactly two to obtain a rooted binary tree. We call $\tilde{H}$ the {{\mathbf E}m reduced tree} of $\hat{H}$.
\begin{prop}\label{prop:leaf_count}
For all $1 \leqslant i \leqslant \lceil \log_r n \rceil$ and all components $\hat{H}$ of $\hat{S}_i$, the reduced tree $\tilde{H}$ has at most $2r-1$ leaves.
{\mathbf E}nd{prop}
\begin{proof}
Fix any node $u$ of $\tilde{H}$ with in-degree zero, and consider the edge $uv$ incident to $u$ in $\hat{H}$. Then $uv$ is dual to an edge $AB$ with $\sigma_{AB}=i$ so with $r^{i-1} < B-A \leqslant r^i$. Now fix another node $w$ of $\tilde{H}$ with in-degree zero, write $wx$ for the edge incident to $w$ in $\hat{H}$, and let $CD$ be its dual edge. Then necessarily either $A < B < C < D$ or $C < D < B < A$.
Next consider an edge $yr$ from a child of $r$ to $r$ in $H$. Writing $EF$ for the edge dual to $yr$, then the observation of the preceding paragraph implies that $F-E > r^{i-1} \cdot {\mathbf E}ll$, where ${\mathbf E}ll$ is the number of nodes with in-degree zero in the subtree rooted at $y$. On the other hand, $\sigma_{EF}=i$ so $F-E \leqslant r^i$; so ${\mathbf E}ll \leqslant r-1$. If $r$ has only one child (so is a leaf itself) this yields that $\tilde{H}$ has at most $r$ leaves. If $r$ has two children then each of their subtrees contains at most $r-1$ leaves; in this case $\tilde{r}$ is also a leaf, so the total number of leaves is at most $2(r-1)+1$.
{\mathbf E}nd{proof}
For any subgraph $S$ of the triangulation $T$, the embedding of $\hat{T}$ in the plane induces a total order of the connected components of $\hat{S}$, given by the order their roots are visited by a clockwise tour around the contour of $\hat{T}$ starting from the head of the root edge $\rho$. For each $1 \leqslant i \leqslant \lceil \log_r n \rceil$, list the components of $\hat{S}_i$ in the order just described as $H_{i,1},\ldots,H_{i,{\mathbf E}ll}$, where ${\mathbf E}ll={\mathbf E}ll(T,i)$ is the number of such components. Then, for $1 \leqslant j \leqslant {\mathbf E}ll(T,i)$ let $\tilde{H}_{i,j}$ be the reduced tree of $H_{i,j}$.
Each tree from $(\tilde{H}_{i,j},i \leqslant \lceil \log_r n\rceil,j \leqslant {\mathbf E}ll(T,i))$ is a dual to a unique triangulation $\tilde{T}_{i,j}$ of a polygon, as in Figure~\ref{fig:reduced_dualtree}. Proposition~\ref{prop:leaf_count} implies that $\tilde{T}_{i,j}$ belongs to an associahedron $\cA_k$ for some $k \leqslant 2r-1$. Let $\phi$ be a proper colouring of the disjoint union of $(\cA_k,k \leqslant 2r-1)$, with colours $\{1,\ldots,\chi(\cA_{2r-1})\}$, and define
\[
\mathrm{I}(T) = \leqslantft(\sum_{i=0}^{\lceil \log_r n \rceil} \sum_{j=1}^{{\mathbf E}ll(T,i)} \phi(\tilde{T}_{i,j}) \right)\mod \chi(\cA_{2r-1})\, .
\]
\begin{prop}\label{prop_induction}
In cases (4) and (5), we have $\mathrm{I}(T) \ne \mathrm{I}(T')$.
{\mathbf E}nd{prop}
\begin{proof}
We write $vx=\hat{e}$ for the dual edge of $AC$ in $T$, and $zp=\hat{e}_{BD}$ for the dual edge of $BD$ in $T'$. Figures~\ref{fig:case_4} and~\ref{fig:case_5} illustrate cases (4) and (5) respectively.
The clockwise contour exploration of a rooted plane tree is a walk around the outside of the tree which starts and finishes at the root, keeping the unbounded face to its left at all times. This walk traverses each edge exactly twice, and records the vertices it visits in sequence, with repetition.
In cases (4) and (5), for the clockwise contour explorations of $\hat{T}$ and of $\hat{T}'$, there are (possibly empty) strings $P_1,\ldots,P_5$ so that the sequences recorded by the contour explorations of $\hat{T}$ and of $\hat{T}'$ are of the form
\[
P_1sxvwP_2wvuP_3uvxyP_4yxsP_5
\]
and
\[
P_1spwP_2wpzuP_3uzyP_4yzpsP_5\, ,
\]
respectively; again, see Figures~\ref{fig:case_4} and~\ref{fig:case_5}.
The contour explorations of $\hat{T}$ and $\hat{T}'$ agree until they visit dual vertices lying within $ABCD$. It follows that if $H_{\sigma,j}$ is the component of $S_\sigma$ containing $\hat{e}_{AC}$, then the component of $S_{\sigma}'$ containing $\hat{e}_{BD}$ is $H'_{\sigma,j}$.
In case (4), since $x$ has two children in $H_{\sigma,j}$, by construction it is the unique child of the root of $\tilde{H}_{\sigma,j}$. It is thus natural to identify $s$ with the root of $\tilde{H}_{\sigma,j}$. We may likewise identify $s$ with the root of $\tilde{H}'_{\sigma,j}$, since $p$ has two children in $H'_{\sigma,j}$. After the addition of $s$ as a root, the nodes $v,x,p$ and $z$ all have degree $3$, so none of these nodes are suppressed when constructing $\tilde{H}_{\sigma,j}$ and $\tilde{H}_{\sigma,j}'$ from $H_{\sigma,j}$ and $H_{\sigma,j}'$.
In case (5), nodes $v,x,p$ and $z$ all have degree $3$, and the edges $xs$ and $ps$ belong to $H_{\sigma,j}$ and $H_{\sigma,j}'$, respectively.
It follows from the two preceding paragraphs that in cases~(4) and~(5), we may view $v$ and $x$ as nodes of both $H_{\sigma,j}$ and $\tilde{H}_{\sigma,j}$, and $p$ and $z$ as nodes of both $H_{\sigma,j}'$ and $\tilde{H}_{\sigma,j}'$. It is then clear that
flipping the edge $AC$ in the triangulation $T$ corresponds precisely to flipping the corresponding edge in $\tilde{T}_{\sigma,j}$ to form $\tilde{T}'_{\sigma,j}$.
Since $\tilde{T}_{\sigma,j}$ and $\tilde{T}_{\sigma,j}'$ are related by a single edge flip, and $\phi$ is a proper colouring, it follows that
$\phi(\tilde{T}_{\sigma,j}) \ne \phi(\tilde{T}_{\sigma,j}')$. Since all other components of $S_\sigma$, and more generally of each $(S_i,1 \leqslant i \leqslant \lceil \log_r n \rceil)$, are unchanged when moving from $T$ to $T'$, the result follows.
{\mathbf E}nd{proof}
\begin{figure}[ht]
\begin{centering}
\includegraphics[width=0.85\textwidth,page=5]{5_cases}
\caption{The structure near the quadrilateral $ABCD$ in $T$ and $T'$ case (4). The dashed edges have scale $\sigma_{AD} > \sigma$, all other edges of the triangulations shown in the figure have scale $\sigma$.}
\label{fig:case_4}
{\mathbf E}nd{centering}
{\mathbf E}nd{figure}
\begin{figure}[ht]
\begin{centering}
\includegraphics[width=0.85\textwidth,page=6]{5_cases}
\caption{The structure near the quadrilateral $ABCD$ in $T$ and $T'$ in case (5). All edges of the triangulations shown in the figure have scale $\sigma$.}
\label{fig:case_5}
{\mathbf E}nd{centering}
{\mathbf E}nd{figure}
\begin{proof}[Proof of Theorem~\ref{thm:main}.]
Consider the graph $G$ with vertex set ${\mathbb N}^4$ where $(l,m,r,z)$ and $(l',m',r',z')$ are adjacent if
$|l'-l|+|m'-m|+|r'-r|+|z'-z| \leqslant 3$. This graph is $128$-regular, so is 129-colourable. (We are not optimizing constants.)
Let $\kappa:{\mathbb N}^4 \to \{1,\ldots,129\}$ be a proper colouring of~$G$.
In all situations covered by Proposition~\ref{prop:2_same}, the vectors $(l_{T'},m_{T'},r_{T'},z_{T'})$ and $(l_T,m_T,r_T,z_T)$ are adjacent in $G$, so we have $\kappa((l_{T'},m_{T'},r_{T'},z_{T'})) \ne \kappa((l_T,m_T,r_T,z_T))$.
In the situations covered by Proposition~\ref{prop_two_equal}, we have $c(T) \ne c(T')$.
Let $g(T) = G(T) \mod 2$ and $d(T)=D(T)\mod 2$.
By Proposition~\ref{prop_parity_cases123}, in cases (1)--(3), either $g(T) \ne g(T')$ or $d(T) \ne d(T')$ or both. Finally, in cases (4) and (5), by Proposition~\ref{prop_induction} we have $\mathrm{I}(T) \ne \mathrm{I}(T')$.
It follows that the data $\kappa((l_T,m_T,r_T,z_T))$, $c(T)$, $g(T)$, $d(T)$, and $I(T)$ are sufficient to distinguish $T$ from all its neighbours in $\cA_n$. Therefore,
\[
\chi(\mathcal{A}_n) \leqslant 129\cdot \lceil 3 \log_r n \rceil \cdot 2 \cdot 2 \cdot \chi(\mathcal{A}_{2r-1}),
\]
for any $r \geqslant 3$. Taking $r=3$ yields $\chi(\mathcal{A}_{2r-1}) = \chi(\mathcal{A}_5)=3$, since $\mathcal{A}_5$ is a $5$-cycle.
It follows that
\[
\chi(\mathcal{A}_{n}) \leqslant 12\cdot 129 \cdot \lceil 3 \log_3 n \rceil = O(\log n)\, .\qedhere
\]
{\mathbf E}nd{proof}
\addtocontents{toc}{\SkipTocEntry}
\begin{thebibliography}{14}
\providecommand{\natexlab}[1]{#1}
\providecommand{\url}[1]{\texttt{#1}}
{\mathbf E}xpandafter\ifx\csname urlstyle{\mathbf E}ndcsname\relax
\providecommand{\doi}[1]{doi: #1}{\mathbf E}lse
\providecommand{\doi}{doi: \begingroup \urlstyle{rm}\Url}\fi
\bibitem[Billera et~al.(1990)Billera, Filliman, and Sturmfels]{BFS90}
Louis~J. Billera, Paul Filliman, and Bernd Sturmfels.
\newblock Constructions and complexity of secondary polytopes.
\newblock {\mathbf E}mph{Adv. Math.}, 83\penalty0 (2):\penalty0 155--179, 1990.
\newblock \doi{10.1016/0001-8708(90)90077-Z}.
\bibitem[Ceballos and Pilaud(2016)]{CP16}
Cesar Ceballos and Vincent Pilaud.
\newblock The diameter of type {$D$} associahedra and the non-leaving-face
property.
\newblock {\mathbf E}mph{European J. Combin.}, 51:\penalty0 109--124, 2016.
\newblock \doi{10.1016/j.ejc.2015.04.006}.
\bibitem[Chapoton et~al.(2002)Chapoton, Fomin, and Zelevinsky]{CFZ02}
Fr\'{e}d\'{e}ric Chapoton, Sergey Fomin, and Andrei Zelevinsky.
\newblock Polytopal realizations of generalized associahedra.
\newblock {\mathbf E}mph{Canad. Math. Bull.}, 45\penalty0 (4):\penalty0 537--566, 2002.
\newblock \doi{10.4153/CMB-2002-054-1}.
\bibitem[Fabila-Monroy et~al.(2009)Fabila-Monroy, Flores-Pe\~{n}aloza, Huemer,
Hurtado, Urrutia, and Wood]{FFHHUW09}
Ruy Fabila-Monroy, David Flores-Pe\~{n}aloza, Clemens Huemer, Ferran Hurtado,
Jorge Urrutia, and David~R. Wood.
\newblock On the chromatic number of some flip graphs.
\newblock {\mathbf E}mph{Discrete Math. Theor. Comput. Sci.}, 11\penalty0 (2):\penalty0
47--56, 2009.
\newblock URL \url{https://dmtcs.episciences.org/460}.
\bibitem[Hohlweg et~al.(2011)Hohlweg, Lange, and Thomas]{HLT11}
Christophe Hohlweg, Carsten E. M.~C. Lange, and Hugh Thomas.
\newblock Permutahedra and generalized associahedra.
\newblock {\mathbf E}mph{Adv. Math.}, 226\penalty0 (1):\penalty0 608--640, 2011.
\newblock \doi{10.1016/j.aim.2010.07.005}.
\bibitem[Hurtado and Noy(1999)]{HN99}
Ferran Hurtado and Marc Noy.
\newblock Graph of triangulations of a convex polygon and tree of
triangulations.
\newblock {\mathbf E}mph{Comput. Geom. Theory Appl.}, 13\penalty0 (3):\penalty0
179--188, 1999.
\bibitem[Johansson(1994)]{johansson94}
Anders Johansson.
\newblock {\mathbf E}mph{Some results on colourings of graphs}.
\newblock PhD thesis, University of Ume\aa, 1994.
\bibitem[Loday and Ronco(1998)]{LR98}
Jean-Louis Loday and Mar\'{i}a~O. Ronco.
\newblock Hopf algebra of the planar binary trees.
\newblock {\mathbf E}mph{Adv. Math.}, 139\penalty0 (2):\penalty0 293--309, 1998.
\newblock \doi{10.1006/aima.1998.1759}.
\bibitem[Pilaud and Santos(2012)]{PS12}
Vincent Pilaud and Francisco Santos.
\newblock The brick polytope of a sorting network.
\newblock {\mathbf E}mph{European J. Combin.}, 33\penalty0 (4):\penalty0 632--662, 2012.
\newblock \doi{10.1016/j.ejc.2011.12.003}.
\bibitem[Pilaud and Stump(2015)]{PS15}
Vincent Pilaud and Christian Stump.
\newblock Brick polytopes of spherical subword complexes and generalized
associahedra.
\newblock {\mathbf E}mph{Adv. Math.}, 276:\penalty0 1--61, 2015.
\newblock \doi{10.1016/j.aim.2015.02.012}.
\bibitem[Pournin(2014)]{Pournin14}
Lionel Pournin.
\newblock The diameter of associahedra.
\newblock {\mathbf E}mph{Adv. Math.}, 259:\penalty0 13--42, 2014.
\newblock \doi{10.1016/j.aim.2014.02.035}.
\bibitem[Sleator et~al.(1988)Sleator, Tarjan, and Thurston]{STT88}
Daniel~D. Sleator, Robert~E. Tarjan, and William~P. Thurston.
\newblock Rotation distance, triangulations, and hyperbolic geometry.
\newblock {\mathbf E}mph{J. Amer. Math. Soc.}, 1\penalty0 (3):\penalty0 647--681, 1988.
\newblock \doi{10.2307/1990951}.
\bibitem[Stasheff(1963)]{Stasheff63}
James~Dillon Stasheff.
\newblock Homotopy associativity of {$H$}-spaces. {I}, {II}.
\newblock {\mathbf E}mph{Trans. Amer. Math. Soc.}, 108:\penalty0 293--312, 1963.
\newblock \doi{10.1090/s0002-9947-1963-0158400-5}.
\bibitem[Tamari(1951)]{Tamari51}
Dov Tamari.
\newblock {\mathbf E}mph{Mono\"{i}des pr\'{e}ordonn\'{e}s et cha\^{i}nes de {M}alcev}.
\newblock Th\`ese, Universit\'{e} de Paris, 1951.
{\mathbf E}nd{thebibliography}
{\mathbf E}nd{document}
|
\begin{document}
\mathcal{A}ketitle
\begin{abstract}
In this paper we introduce a family of two-variable derivative polynomials for tangent and secant.
We study the generating functions for the coefficients of this family of polynomials. In particular, we establish a connection between these generating functions and Eulerian polynomials.
\\
{\sl Keywords:}\quad Derivative polynomials; Eulerian polynomials
\end{abstract}
\section{Introduction}
Throughout this paper, we define $y=\tan(x)$ and $z=\sec(x)$. Denote by $D$ the differential operator ${d}/{d x}$.
Thus $D(y)=z^2$ and $D(z)=yz$. An important tangent identity is given by $$1+y^2=z^2.$$
In 1995, Hoffman~\cite{Hoffman95} considered two sequences of {\it derivative polynomials} defined respectively by
\begin{equation*}\label{derivapoly-1}
D^n(y)=P_n(y)\quad {\text and}\quad D^n(z)=z Q_n(y)
\end{equation*}
for $n\geq 0$.
From the chain rule it follows that the polynomials $P_n(u)$ satisfy $P_0(u)=u$ and $P_{n+1}(u)=(1+u^2)P_n'(u)$, and similarly $Q_0(u)=1$ and $Q_{n+1}(u)=(1+u^2)Q_n'(u)+uQ_n(u)$. The first few of the polynomials $P_n(u)$ are
\begin{align*}
P_1(u)&=1+u^2,\\
P_2(u)&=2u+2u^3,\\
P_3(u)&=2+8u^2+6u^4.
\end{align*}
Various refinements of the polynomials $P_n(u)$ and $Q_n(u)$ have been pursued by several authors,
see~\cite{Carlitz72,Cvijovic09,Franssens07,Hoffman99,Josuat10,Ma12} for instance.
Let ${\mathcal S}_n$ denote the symmetric group of all permutations of $[n]$, where $[n]=\{1,2,\ldots,n\}$.
A permutation $\pi=\pi(1)\pi(2)\cdots\pi(n)\in{\mathcal S}_n$
is {\it alternating} if $\pi(1)>\pi(2)<\cdots \pi(n)$. In other words, $\pi(i)<\pi({i+1})$ if $i$ is even and $\pi(i)>\pi({i+1})$ if $i$ is odd.
Let $E_n$ denote the number of alternating permutations in ${\mathcal S}_n$. The number $E_n$ is called an {\it Euler number} because Euler considered the numbers $E_{2n+1}$. There has been a huge literature on Euler numbers (see~\cite{Stanley} for details).
In 1879, Andr\'e~\cite{Andre79} obtained that
$$y+z=\sum_{n=0}^\infty E_n\frac{x^n}{n!}.$$
Since the tangent is an odd function and the secant is an even function, we have
$$y=\sum_{n=0}^\infty E_{2n+1}\frac{x^{2n+1}}{(2n+1)!} \quad{\text and} \quad z=\sum_{n=0}^\infty E_{2n}\frac{x^{2n}}{(2n)!}.$$
For this reason the integers $E_{2n+1}$ are sometimes called the {\it tangent numbers} and the integers $E_{2n}$ are called the {\it secant numbers}.
Let $E(x)=y+z$. Clearly, $E(0)=1$.
It is easy to verify that
\begin{equation}\label{D-1}
2D(E(x))=1+E^2(x).
\end{equation}
Consider the derivative of~(\ref{D-1}) with respect to $x$, we have
\begin{equation}\label{D-2}
2^2D^2(E(x))=2E(x)+2E^3(x).
\end{equation}
By the derivative of~(\ref{D-2}) with respect to $x$, we get
$2^3D^3(E(x))=2+8E^2(x)+6E^4(x)$.
We now present a connection between $E(x)$ and $P_n(u)$
\begin{proposition}
For $n\geq 0$, we have $2^nD^n(E(x))=P_n(E(x))$.
\end{proposition}
\begin{proof}
It suffices to consider the case $n\geq 3$.
We proceed by induction on $n$. Assume that the statement is true for $n=k$.
Then
\begin{align*}
2^{k+1}D^{k+1}(E(x))&=2D(P_k(E(x)))\\
&=2P_k'(E(x))D(E(x))\\
&=(1+E^2(x))P_k'(E(x))\\
&=P_{k+1}(E(x)).
\end{align*}
Thus the statement is true for $k+1$, as desired.
\end{proof}
In~\cite{Ma12} we write the derivative polynomials in terms of $y$ and $z$ as follows:
\begin{equation*}
D^n(y)=\sum_{k=0}^{\lrf{({n-1})/{2}}}W_{n,k}y^{n-2k-1}z^{2k+2}\quad {\text and}\quad D^n(z)=\sum_{k=0}^{\lrf{{n}/{2}}}W_{n,k}^{\textit{l}}y^{n-2k}z^{2k+1} \quad {\text for}\quad n\geq 1.
\end{equation*}
In particular, we observed that the coefficients $W_{n,k}$ and $W_{n,k}^{\textit{l}}$ have simple combinatorial interpretations. The coefficient $W_{n,k}$ is the number of permutations in ${\mathcal S}_n$ with $k$ interior peaks, where an interior peak of $\pi$ is an index $2\leq i\leq n-1$ such that $\pi(i-1)<\pi(i)>\pi(i+1)$.
The coefficient $W_{n,k}^{\textit{l}}$ is the number of permutations in ${\mathcal S}_n$ with $k$ left peaks, where a left peak of $\pi$ is either an interior peak or else the index $1$ in the case $\pi(1)>\pi(2)$ (see~\cite{Petersen09} for instance).
In this paper we are concerned with a variation of the above definitions.
The organization of this paper is as follows. In Section~\ref{Section-2}, we collect some notation, definitions and results that will be needed in the rest of the paper. In Section~\ref{Section-3}, we establish a connection between
Eulerian numbers and the expansion of $(Dy)^n(y)$.
In Section~\ref{Section-4}, we establish a connection between
Eulerian numbers of type $B$ and the expansion of $(Dy)^n(z)$.
In Section~\ref{Section-5}, we study some polynomials related to $(yD)^n(y)$ and $(yD)^n(z)$.
\section{Preliminaries}\label{Section-2}
For a permutation $\pi\in{\mathcal S}_n$, we define a {\it descent} to be a position $i$ such that $\pi(i)>\pi(i+1)$. Denote by ${\rm des\,}(\pi)$ the number of descents of $\pi$. Let
\begin{equation*}
A_n(x)=\sum_{\pi\in{\mathcal S}_n}x^{{\rm des\,}(\pi)+1}=\sum_{k=1}^nA(n,k)x^{k},
\end{equation*}
The polynomial $A_n(x)$ is called an {\it Eulerian polynomial}, while $A(n,k)$ is called an {\it Eulerian number}.
The exponential generating function for $A_n(x)$ is
\begin{equation}\label{Axz}
A(x,z)=1+\sum_{n\geq 1}A_n(x)\frac{t^n}{n!}=\frac{1-x}{1-xe^{t(1-x)}}.
\end{equation}
The numbers $A(n,k)$ satisfy the recurrence
\begin{equation}\label{recu-Euleriannum}
A(n+1,k)=kA(n,k)+(n-k+2)A(n,k-1)
\end{equation}
with the initial conditions $A(0,0)=1$ and $A(0,k)=0$ for $k\geq 1$ (see~\cite[A008292]{Sloane} for details).
The first few of the Eulerian polynomials $A_n(x)$ are
$$A_0(x)=1,A_1(x)=x,A_2(x)=x+x^2,A_3(x)=x+4x^2+x^3.$$
It is well known that
\begin{equation}\label{symmetric}
A_n(x)=x^{n+1}A_n\left(\frac{1}{x}\right).
\end{equation}
An explicit formula for $A(n,k)$ is given as follows:
\begin{equation*}
A(n,k)=\sum_{i=0}^k(-1)^i\binom{n+1}{i}(k-i)^n.
\end{equation*}
Let $B_n$ denote the set of signed permutations of $\pm[n]$ such that $\pi(-i)=-\pi(i)$ for all $i$, where $\pm[n]=\{\pm1,\pm2,\ldots,\pm n\}$.
Let
$${B}_n(x)=\sum_{k=0}^nB(n,k)x^{k}=\sum_{\pi\in {\mathcal{A}thcal B}_n}x^{{\rm des\,}_B(\pi)},$$
where
$${\rm des\,}_B=|\{i\in[n]:\omega(i-1)>\omega({i+1})\}|$$
with $\pi(0)=0$.
The polynomial $B_n(x)$ is called an {\it Eulerian polynomial of type $B$}, while $B(n,k)$ is called an {\it Eulerian number of type $B$}.
Below are the polynomials ${B}_n(x)$ for $n\leq 3$:
$$B_0(x)=1,B_1(x)=1+x,B_2(x)=1+6x+x^2,B_3(x)=1+23x+23x^2+x^3.$$
The numbers $B(n,k)$ satisfy
the recurrence relation
\begin{equation}\label{Bnk-Euleriannum}
B(n+1,k)=(2k+1)B(n,k)+(2n-2k+3)B(n,k-1),
\end{equation}
with the initial conditions $B(0,0)=1$ and $B(0,k)=0$ for $k\geq 1$.
An explicit formula for $B(n,k)$ is given as follows:
\begin{equation*}
B(n,k)=\sum_{i=0}^k(-1)^{i}\binom{n+1}{i}(2k-2i+1)^{n}
\end{equation*}
for $0\leq k\leq n$ (see~\cite{Eriksen00} for details).
For $n\geq 0$, we always assume that
$$(Dy)^{n+1}(y)=(Dy)(Dy)^n(y)=D(y(Dy)^n(y)),$$
$$(Dy)^{n+1}(z)=(Dy)(Dy)^n(z)=D(y(Dy)^n(z)),$$
$$(yD)^{n+1}(y)=(yD)(yD)^n(y)=yD((yD)^n(y)),$$
$$(yD)^{n+1}(z)=(yD)(yD)^n(z)=yD((yD)^n(z)).$$
Clearly, $(Dy)^n(y+z)=(Dy)^n(y)+(Dy)^n(z)$.
For $n\geq 1$,
we define
\begin{equation*}\label{def-derivative-2}
(Dy)^n(y+z)=\sum_{k=0}^{2n}J(2n,k)y^{2n-k}z^{k+1}.
\end{equation*}
In Section~\ref{Section-3} and Section~\ref{Section-4}, we respectively obtain the following results:
$$J(2n,2k-1)=2^nA(n,k) \quad {\text for}\quad 1\leq k\leq n,$$
and $$J(2n,2k)=B(n,k) \quad {\text for}\quad 0\leq k\leq n.$$
Let $J_n(x)=\sum_{k=0}^{2n}J(2n,k)x^k$ for $n\geq 1$.
Then $xJ_n(x)=2^nA_n(x^2)+xB_n(x^2)$.
Therefore, from~\cite[Theorem 3]{Ma12}, we have
\begin{equation}\label{JnxAnx}
xJ_n(x)=(1+x)^{n+1}A_n(x).
\end{equation}
Using~(\ref{JnxAnx}), we immediately obtain the following result.
\begin{proposition}
For $n\geq 1$, we have
$$(Dy)^n(y+z)=(y+z)^{n+1}\sum_{k=1}^nA(n,k)y^{n-k}z^k.$$
\end{proposition}
\section{On the expansion of $(Dy)^n(y)$}\label{Section-3}
For $n\geq 1$, we define
\begin{equation}\label{def-derivative-1}
(Dy)^n(y)=\sum_{k=1}^{n}E(n,k)y^{2n-2k+1}z^{2k}.
\end{equation}
\begin{theorem}\label{thm1}
For $1\leq k\leq n$, we have
$E(n,k)=2^nA(n,k)$, where $A(n,k)$ is an Eulerian number.
\end{theorem}
\begin{proof}
Note that $D(y^2)=2yz^2$. Then $E(1,1)=2A(1,1)$.
Since
$$(Dy)^{n+1}(y)=D(y(Dy)^n(y))=2\sum_{k=1}^nkE(n,k)y^{2n-2k+3}z^{2k}+
2\sum_{k=1}^n(n-k+1)E(n,k)y^{2n-2k+1}z^{2k+2},$$
there follows
\begin{equation}\label{T-recurrence}
E(n+1,k)=2(kE(n,k)+(n-k+2)E(n,k-1)).
\end{equation}
Comparison of~(\ref{T-recurrence}) with~(\ref{recu-Euleriannum}) gives the desired result.
\end{proof}
Let
\begin{equation}\label{Def-22}
F_n(y)=(Dy)^n(y)=\sum_{k=0}^nF(n,k)y^{2k+1}.
\end{equation}
By $F_{n+1}(y)=D(yF_n(y))$, we obtain
\begin{equation}\label{bny-recu}
F_{n+1}(y)=(1+y^2)F_n(y)+y(1+y^2)F'_n(y)
\end{equation}
with initial values $F_0(y)=y$. We define $F_n(y)=2^na_n(y)$ for $n\geq 0$.
In the following we present an explicit formula for $a_n(y)$.
\begin{theorem}\label{thm2}
For $n\geq 1$, we have
\begin{equation}\label{explicit-any}
a_n(y)=\sum_{k=1}^nA(n,k)y^{2n-2k+1}(1+y^2)^k.
\end{equation}
\end{theorem}
\begin{proof}
Combining~(\ref{def-derivative-1}) and~(\ref{Def-22}), we get
\begin{align*}
F_n(y)&=\sum_{k=1}^{n}E(n,k)y^{2k-1}(1+y^2)^{n-k+1}\\
&=2^ny^{-1}(1+y^2)^{n+1}\sum_{k=1}^nA(n,k)\left(\frac{y^2}{1+y^2}\right)^k.
\end{align*}
It follows from~(\ref{symmetric}) that
\begin{align*}
a_n(y)&=y^{2n+1}\left(\frac{1+y^2}{y^2}\right)^{n+1}\sum_{k=1}^nA(n,k)\left(\frac{y^2}{1+y^2}\right)^k\\
&=\sum_{k=1}^nA(n,k)y^{2n-2k+1}(1+y^2)^k.
\end{align*}
This completes the proof.
\end{proof}
Let $a_n(y)=\sum_{k=0}^na(n,k)y^{2k+1}$.
Equating the coefficients of $y^{2n-2k+1}$ on both sides of~(\ref{explicit-any}), we obtain
$$a(n,n-k)=\sum_{i=k}^n\binom{i}{k}A(n,i).$$
It follows from~(\ref{bny-recu}) that
\begin{equation}\label{ank}
a(n+1,k)=(k+1)a(n,k)+ka(n,k-1).
\end{equation}
We define $W_n(x)=\sum_{k=0}^na(n,k)x^{k+1}$. By~(\ref{ank}), we have
\begin{equation}\label{any-recurrence}
W_{n+1}(x)=(x+x^2)W'_n(x),
\end{equation}
with initial values $W_{0}(x)=x$.
The first few terms of $W_n(y)$ are given as follows:
\begin{align*}
W_1(x)&=x+x^2,\\
W_2(x)&=x+3x^2+2x^3,\\
W_3(x)&=x+7x^2+12x^3+6x^4,\\
W_4(x)&=x+15x^2+50x^3+60x^4+24x^5.
\end{align*}
The triangular array $\{a(n,k)\}_{n\geq 0, 0\leq k\leq n}$ is called a {\it Worpitzky triangle}, and it has been extensively studied by many authors (see~\cite[\textsf{A028246}]{Sloane}).
In view of~(\ref{any-recurrence}),
it is natural to consider the expansion of the operator $((x+x^2)D)^n$.
We define
\begin{equation}\label{def-Tnkx}
((x+x^2)D)^n=\sum_{k=1}^nG_{n,k}(x)(x+x^2)^kD^k \quad\textrm{for $n\ge 1$}¡£
\end{equation}
Applying the operator $(x+x^2)D$ on the left of~(\ref{def-Tnkx}), we get
\begin{equation}\label{Tnkx-1}
G_{n+1,k}(x)=k(1+2x)G_{n,k}(x)+(x+x^2)D(G_{n,k}(x))+G_{n,k-1}(x).
\end{equation}
On the other hand, since
$$D^k((x+x^2)D)=(x+x^2)D^{k+1}+k(1+2x)D^{k}+k(k-1)D^{k-1}.$$
Applying the operator $(x+x^2)D$ on the right of~(\ref{def-Tnkx}), we get
\begin{equation}\label{Tnkx-2}
G_{n+1,k}(x)=k(1+2x)G_{n,k}(x)+k(k+1)(x+x^2)G_{n,k+1}(x)+G_{n,k-1}(x).
\end{equation}
Comparison of~(\ref{Tnkx-1}) with~(\ref{Tnkx-2}) gives $D(G_{n,k}(x))=k(k+1)G_{n,k+1}(x)$.
Thus
\begin{equation*}\label{TnkxD}
G_{n,k}(x)=\frac{1}{k!(k-1)!}D^{k-1}(G_{n,1}(x)).
\end{equation*}
Thus $\deg G_{n,k}(x)=n-k$.
For $k=1$, set $G_{n}(x)=G_{n,1}(x)$.
Then~(\ref{Tnkx-1}) reduces to
\begin{equation*}\label{Tnx-recu}
G_{n+1}(x)=(1+2x)G_{n}(x)+(x+x^2)D(G_{n}(x))
\end{equation*}
with initial values $G_1(x)=1$.
We define $G_{n}(x)=\sum_{k=1}^nG(n,k)x^{k-1}$. It is easy to verify that
\begin{equation}\label{Tnk-recurrence}
G(n+1,k)=kG(n,k)+kG(n,k-1)
\end{equation}
with $G(1,1)=1$.
The {\it Stirling numbers of the second kind}, denoted by $S(n,k)$, may be defined by
the recurrence relation
\begin{equation}\label{recu-Stirling}
S(n+1,k)=kS(n,k)+S(n,k-1)
\end{equation}
with the initial conditions $S(0,0)=1$ and $S(n,0)=0$ for $n\geq 1$ (see~\cite[A008277 ]{Sloane} for details).
Comparison of~(\ref{Tnk-recurrence}) with~(\ref{recu-Stirling}) gives the following result.
\begin{proposition}
For $1\leq k\leq n$, we have
$G(n,k)=k!S(n,k)$.
\end{proposition}
\section{On the expansion of $(Dy)^n(z)$}\label{Section-4}
For $n\geq 0$,
we define
\begin{equation*}\label{def-derivative-2}
(Dy)^n(z)=\sum_{k=0}^{n}H(n,k)y^{2n-2k}z^{2k+1}.
\end{equation*}
\begin{theorem}\label{thm1}
For $0\leq k\leq n$, we have $H(n,k)=B(n,k)$, where $B(n,k)$ is an Eulerian numbers of type $B$.
\end{theorem}
\begin{proof}
Clearly, $H(0,0)=1$. Note that $D(yz)=y^2z+z^3$. Then $H(1,0)=B(1,0)$ and $H(1,1)=B(1,1)$.
Note that
$$(Dy)(Dy)^n(z)=\sum_{k=0}^n(1+2k)H(n,k)y^{2n-2k+2}z^{2k+1}+
\sum_{k=0}^n(2n-2k+1)H(n,k)y^{2n-2k}z^{2k+3}.$$
Thus we obtain
$$H(n+1,k)=(1+2k)H(n,k)+(2n-2k+3)H(n,k-1).$$
Hence $H(n,k)$ satisfies the same recurrence and initial conditions as $B(n,k)$, so they agree.
\end{proof}
Let $(Dy)^n(z)=zf_n(y)$. Using $(Dy)^{n+1}(z)=D(yzf_n(y))$, we get
\begin{equation}\label{Rny-recu}
f_{n+1}(y)=(1+2y^2)f_n(y)+y(1+y^2)f'_n(y)
\end{equation}
with initial values $f_0(y)=1$.
The first few terms of $f_n(y)$ are given as follows:
\begin{align*}
f_1(y)&=1+2y^2,\\
f_2(y)&=1+8y^2+8y^4,\\
f_3(y)&=1+26y^2+72y^4+48y^6,\\
f_4(y)&=1+80y^2+464y^4+768y^6+384y^8.
\end{align*}
Set $f_n(y)=\sum_{k=0}^nf({n,k})y^{2k}$. By~(\ref{Rny-recu}), we obtain
\begin{equation}\label{fnk-recu}
f(n+1,k)=(1+2k)f(n,k)+2kf(n,k-1)\quad\textrm{for $0\leq k\leq n$,}
\end{equation}
with initial conditions $f(0,0)=1,f(0,k)=0$ for $k\geq 1$.
It should be noted~\cite[A145901]{Sloane} that $$(f(n,0),f(n,1),\ldots,f(n,n))$$
is the $f$-vectors of the simplicial complexes dual to the
permutohedra of type $B$.
\section{Polynomials related to $(yD)^n(y)$ and $(yD)^n(z)$}\label{Section-5}
For $n\geq 1$, we define
\begin{equation}\label{def-derivative}
(yD)^n(y)=\sum_{k=1}^{n}M({n,k})y^{2k-1}z^{2n-2k+2}\quad {\text and}\quad (yD)^n(z)=\sum_{k=1}^{n}N({n,k})y^{2k}z^{2n-2k+1}.
\end{equation}
\begin{theorem}
For $1\leq k\leq n$, we have
\begin{equation}\label{recurrence-1}
M({n+1,k})=(2k-1)M({n,k})+(2n-2k+4)M({n,k-1}),
\end{equation}
\begin{equation}\label{recurrence-11}
N({n+1,k})=2kN({n,k})+(2n-2k+3)N({n,k-1}).
\end{equation}
\end{theorem}
\begin{proof}
Note that
$$(yD)^{n+1}(y)=(yD)(yD)^n(y)=\sum_{k=1}^n(2k-1)M({n,k})y^{2k-1}z^{2n-2k+4}+
\sum_{k=1}^n(2n-2k+2)M({n,k})y^{2k+1}z^{2n-2k+2}.$$
Thus we obtain~(\ref{recurrence-1}).
Similarly, we get~(\ref{recurrence-11}).
\end{proof}
From~(\ref{recurrence-1}) and~(\ref{recurrence-11}), we immediately get a connection between $M({n,k})$ and $N({n,k})$.
\begin{corollary}
For $1\leq k\leq n$, we have
$M({n,k})=N({n,n-k+1})$.
\end{corollary}
Let
\begin{equation*}
M_n(x)=\sum_{k=1}^nM({n,k})x^k\quad {\text and}\quad N_n(x)=\sum_{k=1}^nN({n,k})x^k.
\end{equation*}
Then
\begin{equation}\label{MnxNnxSymm}
M_n(x)=x^{n+1}N_n\left(\frac{1}{x}\right).
\end{equation}
Let
\begin{equation}\label{Def-2}
R_n(y)=(yD)^n(y)=\sum_{k=0}^nR({n,k})y^{2k+1}\quad {\text and}\quad zT_n(y)=(yD)^n(z)=z\sum_{k=1}^nT({n,k})y^{2k}.
\end{equation}
Using~(\ref{Def-2}), it is easy to verify that
\begin{equation}\label{Rny-recurr}
R_{n+1}(y)=y(1+y^2)R'_n(y)
\end{equation}
and
\begin{equation}\label{Galton}
T_{n+1}(y)=y^2T_n(y)+y(1+y^2)T'_n(y).
\end{equation}
The first few terms of $R_n(y)$ and $T_n(y)$ are given as follows:
\begin{align*}
R_1(y)&=y+y^3,\\
R_2(y)&=y+4y^3+3y^5,\\
R_3(y)&=y+13y^3+27y^5+15y^7,\\
R_4(y)&=y+40y^3+174y^5+240y^7+105y^9,\\
R_5(y)&=y+121y^3+990y^5+2550y^7+2625y^9+945y^{11};\\
T_1(y)&=y^2,\\
T_2(y)&=2y^2+3y^4,\\
T_3(y)&=4y^2+18y^4+15y^6,\\
T_4(y)&=8y^2+84y^4+180y^6+105y^8\\
T_5(y)&=16y^2+360y^4+1500y^6+2100y^8+945y^{10}.
\end{align*}
Equating the coefficient of $y^{2k+1}$ on both sides of~(\ref{Rny-recurr}), we get
\begin{equation*}\label{Rny-recu-2}
R({n+1,k})=(2k+1)R({n,k})+(2k-1)R({n,k-1}).
\end{equation*}
Equating the coefficient of $y^{2k}$ on both sides of~(\ref{Galton}), we get
\begin{equation*}\label{GaltonTri}
T({n+1,k})=2kT({n,k})+(2k-1)T({n,k-1}).
\end{equation*}
Clearly, $R({n,n})=T({n,n})=(2n-1)!!$, where $(2n-1)!!$ is the {\it double factorial number}.
It should be noted that the triangular arrays $\{R({n,k})\}_{n\geq 1,0\leq k\leq n}$ and $\{T({n,k})\}_{n\geq 1,1\leq k\leq n}$ are both {\it Galton triangles} (see~\cite[\textsf{A187075 }]{Sloane} for instance), and it has been studied by Neuwirth~\cite{Neuwirth01}.
We now present the following result.
\begin{theorem}\label{RnyMny}
For $n\geq 1$, we have
\begin{equation*}
R_n(y)=y^{2n+1}N_n\left(\frac{1+y^2}{y^2}\right)\quad {\text and}\quad T_n(y)=
(1+y^2)^{n}N_n\left(\frac{y^2}{1+y^2}\right).
\end{equation*}
\end{theorem}
\begin{proof}
Note that $z^2=y^2+1$. Combining~(\ref{def-derivative}) and~(\ref{Def-2}), we obtain
$$R_n(y)=\sum_{k=1}^{n}M({n,k})y^{2k-1}(y^2+1)^{n-k+1}=
y^{-1}(1+y^2)^{n+1}M_n\left(\frac{y^2}{1+y^2}\right),$$
and
$$T_n(y)=\sum_{k=1}^{n}N({n,k})y^{2k}(y^2+1)^{n-k}=
(1+y^2)^{n}N_n\left(\frac{y^2}{1+y^2}\right).$$
Using~(\ref{MnxNnxSymm}), we get
$$(1+y^2)^{n+1}M_n\left(\frac{y^2}{1+y^2}\right)=y^{2n+2}N_n\left(\frac{1+y^2}{y^2}\right)$$
as desired.
\end{proof}
By Theorem~\ref{RnyMny}, we get $R_n(1)=N_n(2)$ and $T_n(1)=2^nN_n(\frac{1}{2})$.
It follows from~(\ref{recurrence-11}) that
\begin{equation}\label{recu-Nnx}
N_{n+1}(x)=(2n+1)xN_n(x)+2x(1-x)N'_n(x)
\end{equation}
with initial values $N_0(x)=1$. The first few terms of $N_n(x)$ can be computed directly as follows:
\begin{align*}
N_1(x)&=x,\\
N_2(x)&=2x+x^2,\\
N_3(x)&=4x+10x^2+x^3,\\
N_4(x)&=8x+60x^2+36x^3+x^4,\\
N_5(x)&=16x+296x^2+516x^3+116x^4+x^5.
\end{align*}
In particular, $N({n,1})=2^{n-1}$, $N({n,n})=1$ and $N_n(1)=(2n-1)!!$ for $n\geq 1$.
In the following discussion, we will study some properties of $N_n(x)$.
The numbers $N({n,k})$ arise often in combinatorics and other branches of mathematics (see~\cite{Lehmer85} for instance). A {\it perfect matching} of $[2n]$ is a partition of $[2n]$ into $n$ blocks of size $2$. Using~(\ref{recurrence-11}) and analyzing the placement of $2n-1$ and $2n$, it is easy to verify that the number $N({n,k})$ counts perfect matchings of $[2n]$ with the restriction that only $k$ matching pairs have odd smaller entries (see~\cite[\textsf{A185411}]{Sloane}).
It is well known~\cite[\textsf{A156919}]{Sloane} that
\begin{equation}\label{Explicit}
N_n(x)=\sum_{k=1}^n2^{n-2k}\binom{2k}{k}k!S(n,k)x^k(1-x)^{n-k} \quad {\text for}\quad n\geq 1,
\end{equation}
where $S(n,k)$ is {\it the Stirling number of the second kind}.
By~(\ref{Explicit}), we get
$$N({n,k})=\sum_{i=1}^k(-1)^{k-i}2^{n-2i}\binom{2i}{i}\binom{n-i}{k-i}i!S(n,i).$$
Let
$$N(x,t)=\sum_{n\geq 0}N_n(x)\frac{t^n}{n!}.$$
Using~(\ref{recu-Nnx}), the formal power series $N(x,t)$ satisfies the following
partial differential equation:
\begin{equation*}\label{diff-eq}
(1-2xt)\frac{\partial N(x,t)}{\partial t}-2x(1-x)\frac{\partial N(x,t)}{\partial x}=xN(x,t).
\end{equation*}
By {\it the method of characteristics}~\cite{Wilf}, it is easy to derive an explicit form:
\begin{equation*}
N(x,t)=e^{xt}\sqrt{\frac{1-x}{e^{2xt}-xe^{2t}}}.
\end{equation*}
Hence
\begin{equation}\label{N2xt}
N^2(x,t)=\frac{1-x}{1-xe^{2t(1-x)}}.
\end{equation}
Combining~(\ref{Axz}) and~(\ref{N2xt}), we get the following result.
\begin{theorem}
For $n\geq 0$, we have
\begin{equation*}
\sum_{k=0}^n\binom{n}{k}N_k(x)N_{n-k}(x)=2^nA_n(x).
\end{equation*}
\end{theorem}
In the following of this section, we will give both central and local limit theorems for the coefficients of $N_n(x)$.
As an application of a result~\cite[Theorem 2]{Ma08} on polynomials with only real zeros,
the recurrence relation~(\ref{recu-Nnx}) enables us to show that the polynomials $\{N_n(x)\}_{n\geq 1}$
form a {\it Sturm sequence}.
\begin{proposition}\label{RealZeros}
For $n\geq 2$, the polynomial $N_n(x)$ has $n$ distinct real zeros, separated by the zeros of $N_{n-1}(x)$.
\end{proposition}
Let $\{a(n,k)\}_{0\leq k\leq n}$ be a sequence of positive real numbers. It has no {\it internal zeros} if and only if there exist no indices $i<j<k$ with $a(n,i)a(n,k)\neq0$ but $a(n,j)=0$.
Let $A_n=\sum_{k=0}^na(n,k)$. We say the sequence $\{a(n,k)\}$ satisfies a central limit theorem with mean $\mu_n$ and variance $\sigma_n^2$ provided
$$\limsup_{n\rightarrow+\infty,x\in\mathcal{A}thbb{R}}\left|\sum_{k=0}^{\mu_n+x\sigma_n}\frac{a(n,k)}{A_n} -\frac{1}{\sqrt{2\pi}}\int_{-\infty}^xe^{-\frac{t^2}{2}}dt\right|=0.$$
The sequence satisfies a local limit theorem on $B\in\mathcal{A}thbb{R}$ if
$$\limsup_{n\rightarrow+\infty,x\in B}\left|\frac{\sigma_na(n,\mu_n+x\sigma_n)}{A_n} -\frac{1}{\sqrt{2\pi}}e^{-\frac{x^2}{2}}\right|=0.$$
Recall the following Bender's theorem.
\begin{theorem}\cite{Bender73}\label{bender}
Let $\{P_n\}_{n\geq1}$ be a sequence of polynomials with only real zeros. The sequence of the coefficients of $P_n$ satisfies a central limit theorem with $$\mu_n=\frac{P_n'(1)}{P_n(1)} \quad\textrm{and}\quad
\sigma_n^2=\frac{P_n'(1)}{P_n(1)}+\frac{P_n''(1)}{P_n(1)}-\left(\frac{P_n'(1)}{P_n(1)}\right)^2,$$
provided that $\lim\limits_{n\to\infty}\sigma_n^2=+\infty$.
If the sequence of coefficients of each $P_n(x)$ has no internal zeros, then the sequence of coefficients satisfies a local limit theorem.
\end{theorem}
Combining Proposition~\ref{RealZeros} and Theorem~\ref{bender}, we obtain the following result.
\begin{theorem}\label{mthm-2}
The sequence $\{N({n,k})\}_{1\leq k\leq n}$ satisfies a central and a local limit theorem with
$\mu_n={(2n+1)}/{4}$ and $\sigma_n^2=(2n+1)/24,$ where $n\geq 4$.
\end{theorem}
\begin{proof}
By differentiating~(\ref{recu-Nnx}), we obtain the recurrence
$x_{n+1}=(2n+1)!!+(2n-1)x_n$
for $x_n=N_n'(1)$, and this has the solution
$x_n=(2n+1)!!/4$ for $n\geq 2
$. By Theorem~\ref{bender}, we have $\mu_n={(2n+1)}/{4}$.
Another differentiation leads to the recurrence
$$y_{n+1}=\frac{(2n+1)!!}{4}(4n-2)+(2n-3)y_n$$
for $y_n=N_n''(1)$. Set $y_n=(2n-1)!!(an^2+bn+c)$ and solve for $a,b,c$ to get
$$y_n={(2n-1)!!}(12n^2-8n-7)/{48}$$
for $n\geq 4$.
Hence $\sigma_n^2=(2n+1)/24$.
Thus $\lim\limits_{n\to\infty}\sigma_n^2=+\infty$ as desired.
\end{proof}
Let $P(x)=\sum_{i=0}^na_ix^i$ be a polynomial. Let $m$ be an index such that $a_m=\mathcal{A}x_{0\leq i\leq n}a_i$.
Darroch~\cite{Darroch64} showed that if $P(x)\in{\rm RZ}(-\infty,0]$, then
$$\left\lfloor{\frac{P_n'(1)}{P_n(1)}}\right\rfloor\leq m\leq \left\lceil{\frac{P_n'(1)}{P_n(1)}}\right\rceil.$$
So the following result is immediate.
\begin{corollary}
Let $i={\lrf{(2n+1)}/{4}}$ or $i={\lrc{(2n+1)}/{4}}$. Then $N({n,i})=\mathcal{A}x_{1\leq k\leq n}N({n,k})$.
\end{corollary}
\end{document}
|
\begin{document}
\title{Numerical approximation of the Euler-Poisson-Boltzmann model \in the quasineutral limit}
\begin{abstract}
This paper analyzes various schemes for the Euler-Poisson-Boltzmann (EPB) model of plasma physics. This model consists of the pressureless gas dynamics equations coupled with the Poisson equation and where the Boltzmann relation relates the potential to the electron density. If the quasi-neutral assumption is made, the Poisson equation is replaced by the constraint of zero local charge and the model reduces to the Isothermal Compressible Euler (ICE) model. We compare a numerical strategy based on the EPB model to a strategy using a reformulation (called REPB formulation). The REPB scheme captures the quasi-neutral limit more accurately.
\end{abstract}
\noindent
{\bf Acknowledgements:} P. D., D. S. and M-H. V. have been supported by the 'Fondation Sciences et Technologies pour
l'Aéronautique et l'Espace', in the frame of the project 'Plasmax' (contract \# RTRA-STAE/2007/PF/002) and by the
'F\'{e}d\'{e}ration de recherche sur la fusion par confinement magn\'{e}tique',
in the frame of the contract 'APPLA' (Asymptotic-Preserving schemes for
Plasma Transport) funded by the CEA (contract \# V3629.001 avenant 2). Liu's research was partially supported by the National Science Foundation under Kinetic FRG grant No. DMS 07-57227.
\noindent
{\bf Key words: } Euler-Poisson-Boltzmann, quasineutrality, Asymptotic-Preserving scheme, stiffness, Debye length.
\noindent
{\bf AMS Subject classification: } 82D10, 76W05, 76X05, 76N10, 76N20, 76L05
\vskip 0.4cm
\setcounter{equation}{0}
\section{Introduction}
\label{sec_intro}
The goal of this paper is to analyze various schemes for the Euler-Poisson-Boltzmann model of plasma physics. The Euler-Poisson-Boltzmann (EPB) model describes the plasma ions through a system of pressureless gas dynamics equations subjected to an electrostatic force. The electrostatic potential is related to the ion and electron densities through the Poisson equation. The Boltzmann relation provides a non-linear relationship between the potential and the electron density which allows to close the system. More precisely, the Euler-Poisson-Boltzmann model is written
\begin{eqnarray}
& & \hspace{-1cm} \partial_t n + \nabla \cdot (nu) = 0, \label{EPB_n} \\
& & \hspace{-1cm} m ( \partial_t (nu) + \nabla \cdot (nu \otimes u)) = - e n \nabla \phi, \label{EPB_u} \\
& & \hspace{-1cm} - \Delta \phi = \frac{e}{{\epsilon}_0} (n - n^* \exp ( \frac{e \phi}{k_B T} )) . \label{EPB_phi}
\end{eqnarray}
Here, $n(x,t) \geq 0$, $u(x,t) \in {\mathbb R}^d $ and $\phi(x,t)\in {\mathbb R} $ stand for the ion density, ion velocity and electric potential respectively, which depend on the space-variable $x \in {\mathbb R}^d$ and on the time $t\geq0$. We suppose that the ions bear a single positive elementary charge $e$ and we denote by $m$, their mass. The electron temperature $T$ is supposed uniform and constant in time. $\epsilon_0$ and $k_B$ respectively refer to the vacuum permittivity and the Boltzmann constant. The operators $\nabla$, $\nabla \cdot $ and $\Delta$ are respectively the gradient, divergence and Laplace operators and $u \otimes u$ denotes the tensor product of the vector $u$ with itself. $n^*$ is usually fixed by either imposing zero total charge
$$ \int_{{\mathbb R}^d} ( n(x,t) - n^* \exp ( \frac{e \phi(x,t)}{k_B T} )) \, dx = 0 , $$
or by assuming that the net charge is zero at a given point $x^*$ (for instance at the boundary):
$$ n(x^*,t) - n^* \exp ( \frac{e \phi(x^*,t)}{k_B T} ) = 0 . $$
For simplicity, this work is restricted to dimension $d=1$ but the concepts extend to dimensions $d \geq 2$ without additional difficulties and numerical applications will be reported in future work.
The ion pressure force is neglected. This is a commonly made assumption in plasma physics \cite{Chen,Krall_Trivelpiece} for elementary text books of plasma physics. The inclusion of an ion pressure term would not modify the subsequent analysis and is omitted for simplicity. Additionally, the pressureless system has interesting multi-valued solutions which are lost in the case of the non-pressureless model \cite{Liu_Wang_1, Liu_Wang_2, Liu_Slemrod}.
If the quasi-neutral assumption is made, the Poisson equation (\ref{EPB_phi}) is replaced by the constraint of zero local charge:
$$ n = n^* \exp ( \frac{e \phi}{k_B T} ) , $$
In this context, we can write
\begin{eqnarray}
& & \hspace{-1cm} n \nabla \phi = n^* \exp ( \frac{e \phi}{k_B T} ) \nabla \phi = \frac{k_B T}{e} \nabla ( n^* \exp ( \frac{e \phi}{k_B T} ) ) = \frac{k_B T}{e} \nabla n, \label{n_nabla_phi}
\end{eqnarray}
and the quasi-neutral Euler-Poisson-Boltzmann model coincides with Isothermal Compressible Euler (ICE) model:
\begin{eqnarray*}
& & \hspace{-1cm} \partial_t n + \nabla \cdot (nu) = 0, \\
& & \hspace{-1cm} m ( \partial_t (nu) + \nabla \cdot (nu \otimes u)) + \nabla (n k_B T) = 0 . \end{eqnarray*}
The passage from EPB to ICE can be understood by a suitable scaling of the model, which highlights the role of the scaled Debye length:
\begin{eqnarray}
& & \hspace{-1cm} \lambda = \frac{\lambda_D}{L}, \quad \lambda_D = \left( \frac{\epsilon_0 k_B T}{e^2 n^*} \right)^{1/2}, \label{Debye}
\end{eqnarray}
where $L$ is the typical size of the system under consideration. $\lambda_D$ measures the spatial scale associated with the electrostatic interaction between the particles. The dimensionless parameter $\lambda$ is usually small, which formalizes the fact that the electrostatic interaction occurs at spatial scales which are much smaller than the usual scales of interest. However, there are situations, for instance in boundary layers, or at the plasma-vacuum interface, where the electrostatic interaction scale must be taken into account. This means that the choice of the relevant macroscopic length $L$ may depend on the location inside the system and that in general, the parameter $\lambda$ may vary by orders of magnitude from one part of the domain to another one. The scaling will be presented in more detail in section \ref{sec_scaling}.
This paper is concerned with discretization methods for the EPB model in situations where $\lambda$ can vary from order one to very small values. Therefore, the targeted schemes must correctly capture the transition from the EPB to the ICE models. With this objective in mind, we will compare two strategies: a first one which uses the EPB model in its original form, and a second one which reformulates the EPB model in such a way that it explicitly appears as a perturbation of the ICE model. This reformulation uses the Poisson equation in the form:
$$ n = n^* \exp ( \frac{e \phi}{k_B T} ) - \frac{{\epsilon}_0}{e} \Delta \phi. $$
Then, with the same algebra as for (\ref{n_nabla_phi}), we get:
\begin{eqnarray}
n \nabla \phi &=& n^* \exp ( \frac{e \phi}{k_B T} ) \nabla \phi - \frac{{\epsilon}_0}{e} \Delta \phi \nabla \phi \nonumber \\
&=& \frac{k_B T}{e} \nabla ( n^* \exp ( \frac{e \phi}{k_B T} ) ) - \frac{{\epsilon}_0}{e} (\nabla \cdot (\nabla \phi \otimes \nabla \phi) - \nabla ( \frac{ |\nabla \phi|^2}{2} ) ) \nonumber \\
& = & \frac{k_B T}{e} \nabla n + \frac{\epsilon_0 k_B T}{e^2} \nabla \Delta \phi - \frac{{\epsilon}_0}{e} (\nabla \cdot (\nabla \phi \otimes \nabla \phi) - \nabla ( \frac{ |\nabla \phi|^2}{2} ) ) . \label{n_nabla_phi_ref}
\end{eqnarray}
We note that this expression is reminiscent of the Maxwell stress tensor. Then, the EPB model is equivalently written as the following reformulated Euler-Poisson-Boltzmann (REPB) model:
\begin{eqnarray}
& & \hspace{-1cm} \partial_t n + \nabla \cdot (nu) = 0, \label{REPB_n} \\
& & \hspace{-1cm} m ( \partial_t (nu) + \nabla \cdot (nu \otimes u)) + \nabla (n k_B T) = \nonumber \\
& & \hspace{2cm} = - {\epsilon}_0 \nabla ( \frac{k_B T}{e} \Delta \phi + \frac{ |\nabla \phi|^2}{2} ) + {\epsilon}_0 \nabla \cdot (\nabla \phi \otimes \nabla \phi) , \label{REPB_u} \\
& & \hspace{-1cm} - \Delta \phi = \frac{e}{{\epsilon}_0} (n - n^* \exp ( \frac{e \phi}{k_B T} )) . \label{REPB_phi}
\end{eqnarray}
In this way, the ICE appears at the left-hand side of (\ref{REPB_n}), (\ref{REPB_u}). The scaling analysis will show that the right-hand side of (\ref{REPB_u}) is of order $\lambda^2$ and is therefore negligible (if the gradients of the potential are smooth) in the limit $\lambda \to 0$.
The goal of this paper is to propose and analyze two schemes for the EPB model which provide the correct ICE limit when $\lambda \to 0$: the first one is based on the initial formulation EPB and the second one, on the reformulated form REPB. To present the schemes, we note that both EPB and REPB can be put under the form
\begin{eqnarray*}
& & \hspace{-1cm} \partial_t n + \nabla \cdot (nu) = 0, \\
& & \hspace{-1cm} m ( \partial_t (nu) + \nabla \cdot (nu \otimes u)) + \nabla p(n) = S(n,\nabla \phi) , \\
& & \hspace{-1cm} - \Delta \phi = \frac{e}{{\epsilon}_0} (n - n^* \exp ( \frac{e \phi}{k_B T} )) ,
\end{eqnarray*}
with
$$ p(n) = 0, \quad S(n,\nabla \phi) = - e n \nabla \phi, $$
in the case of EPB and
$$ p(n) = n k_B T, \quad S(n,\nabla \phi) = - {\epsilon}_0 \nabla ( \frac{k_B T}{e} \Delta \phi + \frac{ |\nabla \phi|^2}{2} ) + {\epsilon}_0 \nabla \cdot (\nabla \phi \otimes \nabla \phi), $$
in the case of the REPB.
Both schemes use the following time-semi-discretization which is implicit in the Poisson equation and in the source terms of the momentum equation:
\begin{eqnarray*}
& & \hspace{-1cm} \delta^{-1} (n^{k+1} - n^k) + \nabla \cdot ((nu)^k) = 0, \\
& & \hspace{-1cm} m ( \delta^{-1} ((nu)^{k+1} - (nu)^k) + \nabla \cdot ((nu)^k \otimes u^k)) + \nabla p(n^k) = S(n^{k+1},\nabla \phi^{k+1}) , \\
& & \hspace{-1cm} - \Delta \phi^{k+1} = \frac{e}{{\epsilon}_0} (n^{k+1} - n^* \exp ( \frac{e \phi^{k+1}}{k_B T} )) .
\end{eqnarray*}
Here, $\delta$ is the time step and the exponent $k \in {\mathbb N}$ refers to the approximation at time $t^k = k \delta$ (i.e. $n^k(x) \approx n(x,t^k)$, \ldots). For the EPB model, this discretization is classical \cite{Fabre_JCP_101_445}. For both systems, in spite of its implicit character, the recursion can be solved in an explicit way, by first updating the mass equation to find $n^{k+1}$, then using the Poisson equation to find the potential $\phi^{k+1}$ and finally using the momentum equation to find $u^{k+1}$. Of course, the space operators have to be discretized as well and a simple shock-capturing method is used, namely the Local Lax-Friedrichs or Rusanov scheme \cite{Leveque_2, Rusanov, Deg_Pey_Rus_Vil}. A time-splitting is used where the hydrodynamic equations are evolved without the source terms. Then, the Poisson equation is solved with the value of the density found at the end of the first step of the splitting. With the newly computed value of the potential, the evolution of the hydrodynamic quantities due to the source terms are computed.
In section \ref{sec_time}, we will show that both schemes are Asymptotic-Preserving. The Asympto\-tic-Preserving property can be defined as follows. Consider a singular perturbation problem $P^\lambda$ whose solutions converge to those of a limit problem $P^0$ when $\lambda \to 0$ (here $P^\lambda$ is the EPB model and $P^0$ is the ICE model). A scheme $P^\lambda_{\delta,h}$ for problem $P^\lambda$ with time-step $\delta$ and space-step $h$ is called Asymptotic Preserving (or AP) if it is stable independently of the value of $\lambda$ when $\lambda \to 0$ and if the scheme $P^0_{\delta,h}$ obtained by letting $\lambda \to 0$ in $P^\lambda_{\delta,h}$ with fixed $(\delta,h)$ is consistent with problem $P^0$. This property is illustrated by the commutative diagram below:
$$ \begin{CD}
P^\lambda_{\delta,h} @>{(\delta,h) \to 0}>> P^\lambda \\
@VV{\lambda \to 0}V @VV{\lambda \to 0}V\\
P^0_{\delta,h} @>{(\delta,h) \to 0}>> P^0
\end{CD}
$$
The concept of an AP scheme has been introduced by S. Jin \cite{Jin} for diffusive limits of kinetic models and has been widely expanded since then.
In section \ref{sec_time}, we will perform a linearized stability analysis which shows that both schemes are stable independently of $\lambda$ in the limit $\delta \to 0$, provided that the usual CFL condition of the ICE model is satisfied. However, the scheme based on the REPB formulation has several advantages over the one based on the EPB form. A first advantage lies in the fact that the hydrodynamic part of the EPB model is a pressureless gas dynamics model, which is weakly unstable and can produce delta concentrations (see e.g. \cite{Bouchut, Bouchut_James, Brenier_Grenier, CL1, CL2}). By contrast, the hydrodynamic part of the REPB is the usual ICE model, which is strongly hyperbolic, and which is much more stable than the pressureless gas dynamics model. A second advantage is the fact that the limit $\lambda \to 0$ of the REPB-based scheme provides a conservative discretization of the ICE model, while that of the EPB-based scheme leads to a scheme in non-conservative form. We can expect that the accuracy of the PB-based scheme degrades when $\lambda \ll 1$ for solutions involving discontinuities, which is not the case for the REPB-based scheme.
To illustrate the theoretical findings, one-dimensional numerical simulations are presented in section \ref{sec_num}, following a discussion of the spatial discretization in section \ref{sec_spatial}.
First, setting $\lambda = 1$, analytic solutions can be derived in the form of solitary waves thanks to the Sagdeev potential theory \cite{Chen}. Both schemes are compared to these analytical solutions, and show a similar behavior, with a slightly larger numerical diffusion in the case of the REPB scheme. Then a Riemann problem test case consisting of two outgoing shock waves is investigated. In the $ \lambda \ll 1 $ regime, the REPB scheme captures the right hydrodynamic shocks while the EPB scheme develops spurious oscillations. This confirms the better behavior of the REPB scheme in the $ \lambda \ll 1 $ regime. Finally, a test-problem related to multivalued solutions and proposed in \cite{Liu_Wang_1,Liu_Wang_2} is investigated. In this case, both schemes shows a similar behavior. To summarize, the REPB scheme captures well the $\lambda \ll 1$ limit but is slightly more diffusive in the $ \lambda = O(1) $ regime. But the extra numerical diffusion is mild and then shows that the REPB scheme is superior to and should be preferred over the EPB scheme in most situations.
We conclude this section by some bibliographical remarks. The Euler-Poisson-Boltzmann model has been recently analyzed in the context of sheath dynamics \cite{Liu_Slemrod} and multi-valued solutions have been computed using level set methods \cite{Liu_Wang_1, Liu_Wang_2}. Numerical schemes for the quasineutral limit of plasma problems has been the subject of vast literature, mostly for the Vlasov-Poisson equation and for Particle-in-Cell (PIC) methods. It is virtually impossible to cite all relevant references and we only refer to the seminal ones \cite{Cohen_JCP_46_15, Langdon_JCP_51_107, Mason_JCP_41_233, Mason_JCP_51_484}. For fluid models of plasmas, the literature is comparatively less abundant. We can refer to the pioneering work \cite{Fabre_JCP_101_445}, and more recently to \cite{Choe_JCP_170_550, Collela_JCP_149_168, Schneider_IJNM_05_399, Shumlak_JCP_187_620}. Recently, AP-schemes for the two-fluid Euler-Poisson model \cite{Cri_Deg_Vig_07, Deg_Liu_Vig_08} or Vlasov-Poisson model
\cite{BCDS_09, DDNSV_10} in the quasineutral limit have been proposed. The drift-fluid limit of magnetized plasmas has also been considered \cite{Deg_Del_Neg_10, DDSV_09} as well as other applications such as small Mach-number flows \cite{Deg_Tan_10}. However, none of these works is concerned with Boltzmannian electrons.
\setcounter{equation}{0}
\section{The EPB model and the scaling}
\label{sec_scaling}
In this section we present a derivation of the EPB model from the two fluid Euler-Poisson
system and we introduce its scaling. From now on, we will restrict ourselves to one-dimensional models.
\subsection{Derivation of the EPB model}
\label{subsec_derivation}
We consider a plasma composed of two species of charged particles: positively charged ions and electrons. The ions are supposed singly charged. The modeling of such a plasma by means of fluid equations uses a system of compressible Euler equations for each species, coupled by the Poisson equation.
We denote by $ m_{i,e} $ the ion and electron masses,
$ n_{i,e} $ the ion and electron densities and
$ u_{i,e} $ the ion and electrons mean velocities.
We denote by $ e $ the elementary charge (i.e. the ion charge is $+e>0$
and the electron charge is $-e<0$).
We assume that the ion temperature is negligible so that
the ions follow a pressureless gas dynamics model.
Electrons are assumed isothermal with a
non-zero constant and uniform temperature $ T_e $.
Then the electron pressure law satisfies
$ p_e = n_e k_B T_e $
where $ k_B $ denotes the Boltzmann constant.
The balance laws for both species are given by
\begin{eqnarray}
\label{epbif-dim-iden}
& & \hspace{-1cm} \partial_t n_i +
\partial_x ( n_i u_i ) = 0 , \\
\label{epbif-dim-imom}
& & \hspace{-1cm} m_i (\partial_t ( n_i u_i ) +
\partial_x ( n_i u_i^2 )) =
- e n_i \partial_x \phi , \\
\label{epbif-dim-eden}
& & \hspace{-1cm} \partial_t n_e +
\partial_x ( n_e u_e ) = 0 , \\
\label{epbif-dim-emom}
& & \hspace{-1cm} m_e (\partial_t ( n_e u_e ) +
\partial_x ( n_e u_e^2 )) + \partial_x ( n_e k_B T_e ) =
e n_e \partial_x \phi ,
\end{eqnarray}
where $ \phi $ denotes the electric potential. It satisfies
the Poisson equation:
\begin{equation}
\label{bif-dim-poisson}
- \epsilon_0 \partial_{x}^2 \phi =
e ( n_i - n_e ),
\end{equation}
where $ \epsilon_0 $ is the vacuum permittivity.
The electrons being much lighter than the ions, it is legitimate to take the limit $ m_e \to 0 $ in the electron momentum equation. In this limit, we formally obtain:
$$ \partial_x ( n_e k_B T_e ) =
e n_e \partial_x \phi.
$$
Integration with respect to $ x $ leads to the Boltzmann relation:
\begin{equation}
\label{boltz-dim}
n_e = n^* \exp \left( \frac{ e \phi }{ k_B T_e } \right),
\end{equation}
where $ n^* $ is fixed by some condition (e.g. vanishing total charge or vanishing local charge at one given point such as a boundary point, see section \ref{sec_intro}). The Boltzmann relation shows that the electron density automatically adjusts
to the potential. The EPB model therefore consists of the ion mass and momentum conservation equations (\ref{epbif-dim-iden}), (\ref{epbif-dim-imom}), the Poisson equation (\ref{bif-dim-poisson}) and the Boltzmann relation (\ref{boltz-dim}). With a change of notation $n_i \to n$, $u_i \to u$, $m_i \to m$, $T_e \to T$, we find the EPB model (\ref{EPB_n})-(\ref{EPB_phi}) which has been introduced in section \ref{sec_intro}.
We note that, in the present one-dimensional setting, the electron velocity $u_e$ can be computed from the electron density equation (\ref{epbif-dim-eden}), thanks to the value of $n_e$ and therefore, of $\phi$, obtained by the resolution of the EPB model. Therefore, the computation of $u_e$ is decoupled from the computation of the other unknowns $n_i$, $u_i$ and $\phi$ involved in the EPB model and will be discarded in the present work. In two or higher dimensions, the computation of $u_e$ requires the resolution of the electron momentum equation, which takes the form in the small electron mass limit:
\begin{equation}
\label{emom_small_mass}
\partial_t ( n_e u_e ) +
\nabla \cdot ( n_e u_e \otimes u_e ) + n_e \nabla \psi = 0,
\end{equation}
where $\psi = \lim_{m_e \to 0} ( m_e^{-1} (k_B T \ln n_e - e \phi)) $. The quantity $\psi$ plays the same role as the pressure in the incompressible Euler equation. It is computed thanks to the electron density equation (\ref{epbif-dim-eden}) which appears as a (non-zero) divergence constraint on $u_e$. In this sense, the limit $m_e \to 0$ is similar to the 'low Mach-number' limit of isentropic compressible gas dynamics. However, the question of the resolution of (\ref{emom_small_mass}) is left to future work.
\subsection{Scaling of the EPB model}
\label{subsec_scaling}
In this section, we return to the EPB model in the form (\ref{EPB_n})-(\ref{EPB_phi}) and, with the notations of section \ref{sec_intro}, we introduce a scaling of the physical quantities. Let $x_0$, $t_0$, $u_0$, $\phi_0$ and $n_0$ be space, time, velocity, potential and density scales. Scaled position, time, velocity, potential and density are defined by $ \bar{x} = x/x_0$, $ \bar{t} = t/t_0$, $ \bar{u} = u/u_0$ , $ \bar{\phi} = - \phi/\phi_0$ and $ \bar{n} = n/n_0$. We choose $x_0$ to be the typical size of the system (for instance an inter-electrode distance or the size of the vacuum chamber). The velocity scale is chosen equal to the ion sound speed
$ u_0 = ( k_B T / m )^{1/2} $. We note that the ion sound speed is constructed with the ion mass but with the electron temperature. This is clear from the ICE model (see section \ref{sec_intro}). We also choose $n_0=n^*$. Finally, $\phi_0 = k_B T /e$ is the so-called thermal potential. Note that we have introduced a sign change in the potential scaling because we find it more convenient to work in terms of the electron potential energy rather than in terms of the electric potential.
Inserting this scaling and omitting the bars gives rise to the EPB model in scaled form:
\begin{eqnarray}
\label{nop-den}
& & \hspace{-1cm} \partial_t n^\lambda + \partial_x ( n^\lambda u^\lambda ) = 0, \\
\label{nop-mom}
& & \hspace{-1cm} \partial_t ( n^\lambda u^\lambda ) +
\partial_x ( n^\lambda u^\lambda u^\lambda ) =
n^\lambda \partial_x \phi^\lambda, \\
\label{nop-pot}
& & \hspace{-1cm} \lambda^2 \partial_{x}^2 \phi^\lambda = n^\lambda - e^{-\phi^\lambda}.
\end{eqnarray}
where $\lambda$ is the scaled Debye length (\ref{Debye}).
It will be useful to consider the linearized EPB model about the state defined by $n^\lambda = 1$, $u^\lambda = 0$, $\phi^\lambda = 0$ (which is obviously a stationary solution). Expanding $n^\lambda = 1 + \varepsilon \tilde n^\lambda$, $u^\lambda = \varepsilon \tilde u^\lambda$ and $\phi^\lambda = \varepsilon \tilde \phi^\lambda$, with $\varepsilon \ll 1$ being the intensity of the perturbation to the stationary state, and retaining only the linear terms in $\varepsilon$, we find the linearized EPB model:
\begin{eqnarray}
& & \hspace{-1cm} \partial_t \tilde n^\lambda + \partial_x \tilde u^\lambda = 0, \label{LEPB_n} \\
& & \hspace{-1cm} \partial_t \tilde u^\lambda = \partial_x \tilde \phi^\lambda , \label{LEPB_u} \\
& & \hspace{-1cm} \lambda ^2 \partial^2_x \tilde \phi^\lambda = \tilde n^\lambda + \tilde \phi^\lambda. \label{LEPB_phi}
\end{eqnarray}
Introducing $\hat n^\lambda$, $\hat u^\lambda$, $\hat \phi^\lambda$, the partial Fourier transforms of $\tilde n^\lambda$, $\tilde u^\lambda$, $\tilde \phi^\lambda$ with respect to $x$, we are led to the system of ODE's:
\begin{eqnarray}
& & \hspace{-1cm} \partial_t \hat n^\lambda + i \xi \hat u^\lambda = 0, \label{FLEPB_n} \\
& & \hspace{-1cm} \partial_t \hat u^\lambda = i \xi \hat \phi^\lambda , \label{FLEPB_u} \\
& & \hspace{-1cm} - \lambda ^2 \xi^2 \hat \phi^\lambda = \hat n^\lambda + \hat \phi^\lambda , \label{FLEPB_phi}
\end{eqnarray}
where $\xi$ is the Fourier dual variable to $x$. We note that the general solution of this model takes the form
\begin{eqnarray}
& & \hspace{-1cm} \left( \begin{array}{c} \hat n^\lambda \\ \hat u^\lambda \end{array} \right) = \sum_{\pm} e^{s^\lambda_\pm t} \left( \begin{array}{c} \hat n^\lambda_\pm \\ \hat u^\lambda_\pm \end{array} \right),
\label{EPB_gen_sol}
\end{eqnarray}
with
\begin{eqnarray}
& & \hspace{-1cm} s^\lambda_\pm = \pm \frac{i \xi}{(1 + \lambda^2 \xi^2)^{1/2}},
\label{EPB_s}
\end{eqnarray}
and $n^\lambda_\pm$, $u^\lambda_\pm$ are given functions of $\xi$, fixed by the initial conditions of the problem. In particular, since $s^\lambda_\pm $ are pure imaginary numbers, the $L^2$ norm of the solution is preserved with time.
Now, we investigate the quasi-neutral limit $\lambda \to 0$ in the next section.
\subsection{The quasineutral limit: the ICE model}
\label{subsec_quasineutral}
Formally passing to the limit $ \lambda \to 0 $ in the EPB model in scaled form and supposing that $n^\lambda \to n^0$, $u^\lambda \to u^0$, $\phi^\lambda \to \phi^0$, we are led to the following model:
\begin{eqnarray*}
& & \hspace{-1cm} \partial_t n^0 + \partial_x ( n^0 u^0 ) = 0, \\
& & \hspace{-1cm} \partial_t ( n^0 u^0 ) + \partial_x ( n^0 u^0 u^0 ) =
n^0 \partial_x \phi^0, \\
& & \hspace{-1cm} 0 = n^0 - e^{-\phi^0}.
\end{eqnarray*}
As a consequence of the last relation (which imposes to the ions to satisfy the Boltzmann relation of the electrons), we can write (see also (\ref{n_nabla_phi})):
\begin{equation}
n^0 \partial_x \phi^0 =
e^{-\phi^0} \partial_x \phi^0 =
- \partial_x \left( e^{-\phi^0} \right) =
- \partial_x n^0,
\label{n_nabla_phi_1D}
\end{equation}
and, inserting this relation into the momentum equation leads to:
$$ \partial_t ( n^0 u^0 ) +
\partial_x ( n^0 u^0 u^0 ) +
\partial_x n^0 = 0.
$$
Therefore, the quasineutral limit $\lambda \to 0$ consists of the Isothermal Compressible Euler system (ICE) complemented by the Boltzmann relation for the potential:
\begin{eqnarray*}
& & \hspace{-1cm} \partial_t n^0 + \partial_x ( n^0 u^0 ) = 0, \\
& & \hspace{-1cm} \partial_t ( n^0 u^0 ) +
\partial_x ( n^0 u^0 u^0 ) +
\partial_x n^0 = 0, \\
& & \hspace{-1cm} n^0 = e^{-\phi^0}.
\end{eqnarray*}
Similarly to the EPB model, the ICE model can be linearized about the state defined by $n^0 = 1$, $u^0 = 0$, $\phi^0 = 0$. We find (with the same notations as for the EPB model), in Fourier space:
\begin{eqnarray*}
& & \hspace{-1cm} \partial_t \hat n^0 + i \xi \hat u^0 = 0, \\
& & \hspace{-1cm} \partial_t \hat u^0 + i \xi \hat n^0 = 0 .
\end{eqnarray*}
The general solution of this model takes the same form (\ref{EPB_gen_sol}) as for the linearized EPB model with $s^\lambda_\pm$ replaced by $ s^0_\pm = \pm i \xi. $ We note that (see (\ref{EPB_s}))
$$ s^0_\pm = \lim_{\lambda \to 0} s^\lambda_\pm. $$
Therefore, the wave speeds of the linearized EPB model converge to those of the linearized ICE model (which are nothing but the acoustic wave speeds). There is no singularity of the limit $\lambda \to 0$ as regards the wave-speeds. In this respect the quasineutral limit $\lambda \to 0$ is not a singular limit. This fact contrasts with the situation of the quasineutral limit of the 2-fluid Euler system, where the electron plasma oscillation frequency converges to infinity. Therefore, we expect that the numerical treatment of the quasineutral limit in the Euler-Poisson-Boltzmann case will be easier.
\subsection{Reformulation of the EPB model}
\label{subsec_reformulation}
To better capture the transition from the EPB model to the ICE model, it is useful to reformulate the EPB model in such a way that it explicitly appears as a perturbation of the ICE model. Using (\ref{nop-pot}), in the spirit of (\ref{n_nabla_phi_1D}), we can write (see also (\ref{n_nabla_phi_ref})):
\begin{eqnarray*}
n^\lambda \partial_x \phi^\lambda &=&
\left( e^{-\phi^\lambda}+\lambda^2 \partial_{xx}^2 \phi^\lambda \right)
\partial_x \phi^\lambda \\
&=& \partial_x \left( - e^{-\phi^\lambda} + \frac{\lambda^2}{2} ( \partial_x \phi^\lambda )^2 \right) \\
&=& \partial_x
\left( \lambda^2 \partial_{xx}^2 \phi^\lambda -
n^\lambda + \frac{\lambda^2}{2} ( \partial_x \phi^\lambda )^2\right).
\end{eqnarray*}
We note that some simplification arises in the 1-dimensional case, compared to the multi-dimensional case of (\ref{n_nabla_phi_ref}). Inserting this expression in the momentum equation leads to the reformulated EPB systems (REPB):
\begin{eqnarray}
\label{ref-den}
& & \hspace{-1cm} \partial_t n^\lambda + \partial_x ( n^\lambda u^\lambda ) = 0, \\
\label{ref-mom}
& & \hspace{-1cm} \partial_t ( n^\lambda u^\lambda ) +
\partial_x ( n^\lambda u^\lambda u^\lambda ) +
\partial_x n^\lambda = \lambda^2 \partial_x
\left( \partial_{xx}^2 \phi^\lambda +
\frac{1}{2} ( \partial_x \phi^\lambda )^2 \right), \\
\label{ref-pot}
& & \hspace{-1cm} n^\lambda - e^{-\phi^\lambda} = \lambda^2 \partial_{x}^2 \phi^\lambda .
\end{eqnarray}
In this formulation, the ICE model explicitly appears at the left-hand side of (\ref{ref-den})-(\ref{ref-pot}). Additionally, the remaining terms, at the right-hand side of the equations are formally of order $\lambda^2$. Therefore, the EPB model explicitly appears as an order $O(\lambda^2)$ perturbation of the ICE model.
We stress the fact that the REPB model is {\bf equivalent} to the original EPB model. However, at the discrete level, schemes based on the REPB model may differ from those based on the EPB model. The goal of the present article is to compare the properties of schemes based on these two formulations, in relation to their Asymptotic-Preserving (AP) properties when $\lambda \to 0$.
\begin{remark}
A formal expansion (using a Chapman-Enskog methodology) up to the first order in $ \lambda^2 $ of the EPB model leads to the following model:
\begin{eqnarray*}
& & \hspace{-1cm} \partial_{t} n^{\lambda} + \partial_{x} (n^{\lambda} u^{\lambda} ) = 0, \\
& & \hspace{-1cm} \partial_{t} (n^{\lambda} u^{\lambda} ) + \partial_{x} ( n^{\lambda} u^{\lambda} u^{\lambda} )
+ \partial_{x} n^{\lambda} = - \lambda^2 n^{\lambda} \partial_{x}
\left( \frac{1}{n^{\lambda}} \partial_{x}^{2} ( \log n^{\lambda} ) \right) + O(\lambda^4).
\end{eqnarray*}
We see that the EPB is a perturbation of the ICE model by a dispersive term with a third order derivative in $n^{\lambda}$. For this reason, in the sequel, the $ \lambda = O(1) $ regime will be referred to as the dispersive regime, while the $ \lambda \ll 1 $ will be referred to as the hydrodynamic regime.
\end{remark}
\setcounter{equation}{0}
\section{Time semi-discretization, AP property and linearized stability}
\label{sec_time}
\subsection{Time-semi-discretization and AP property}
\label{subsec_time_disc}
We denote by $ \delta $ the time step. For any function $g(x,t)$, we denote by
$ g^m (x) $ an approximation of $g(x,t^m)$ with $ t^m = m \delta $. We present two time-semi-discretizations of the problem. The first one is based on the EPB formulation, and the second one, on the REPB formulation.
\subsubsection{Time-semi-discretization based on the EPB formulation}
\label{subsubsec_time_EPB}
Classically, when dealing with Euler-Poisson problems, the force term in the momentum equation is taken implicitly. In the case of the two-fluid model (when the electrons are modeled by the compressible Euler equations instead of being described by the Boltzmann relation), S. Fabre has shown that this implicitness is needed for the stability of the scheme (an explicit treatment of the force term leads to an unconditionally unstable scheme \cite{Fabre_JCP_101_445}). Additionally, this implicitness still gives rise to an explicit resolution, since the mass conservation can be used to update the density, then the Poisson equation is used to update the potential, and finally the resulting potential is inserted in the momentum equation to update the velocity.
We will reproduce this strategy here and consider the following time-semi-discretization based on the EPB formulation:
\begin{eqnarray*}
& & \hspace{-1cm} \delta^{-1} (n^{\lambda, m+1} - n^{\lambda, m}) +
\partial_x ( n^{\lambda, m} u^{\lambda, m} ) = 0, \\
& & \hspace{-1cm} \delta^{-1} ( n^{\lambda, m+1} u^{\lambda, m+1} - n^{\lambda, m} u^{\lambda, m})
+ \partial_x ( n^{\lambda, m} u^{\lambda, m} u^{\lambda, m} ) =
n^{\lambda, m+1} \partial_x \phi^{\lambda, m+1}, \\
& & \hspace{-1cm} \lambda^2 \partial_{x}^2 \phi^{\lambda, m+1} =
n^{\lambda, m+1} - e^{-\phi^{\lambda, m+1}}.
\end{eqnarray*}
This scheme is Asymptotic-Preserving. Indeed, letting $\lambda \to 0$ in this scheme with a fixed $\delta$ leads to
\begin{eqnarray}
\nonumber
& & \hspace{-1cm} \delta^{-1} (n^{0, m+1} - n^{0, m}) +
\partial_x ( n^{0, m} u^{0, m} ) = 0, \\
\nonumber
& & \hspace{-1cm} \delta^{-1} ( n^{0, m+1} u^{0, m+1} - n^{0, m} u^{0, m})
+ \partial_x ( n^{0, m} u^{0, m} u^{0, m} ) =
n^{0, m+1} \partial_x \phi^{0, m+1}, \\
\label{class-pot-0}
& & \hspace{-1cm} 0 =
n^{0, m+1} - e^{-\phi^{0, m+1}}.
\end{eqnarray}
By using (\ref{class-pot-0}) and the same algebra as for (\ref{n_nabla_phi_1D}), we find that this scheme is equivalent to
\begin{eqnarray*}
& & \hspace{-1cm} \delta^{-1} (n^{0, m+1} - n^{0, m}) +
\partial_x ( n^{0, m} u^{0, m} ) = 0, \\
& & \hspace{-1cm} \delta^{-1} ( n^{0, m+1} u^{0, m+1} - n^{0, m} u^{0, m})
+ \partial_x ( n^{0, m} u^{0, m} u^{0, m} ) + \partial_x ( n^{0, m+1} ) =
0, \\
& & \hspace{-1cm} 0 =
n^{0, m+1} - e^{-\phi^{0, m+1}},
\end{eqnarray*}
which provides a semi-implicit discretization of the ICE model, with an implicit treatment of the pressure term in the momentum conservation equation.
However, when the scheme is discretized in space, the algebra leading to (\ref{n_nabla_phi_1D}) is no longer exact. Let us denote by $D \phi^{0, m+1}$ the discretization of the space derivative operator $\partial_x \phi^{0, m+1}$. Then, the limit $\lambda \to 0$ of the fully discrete scheme gives rise to the approximation $n^{0, m+1} D (\ln n^{0, m+1})$ of the space derivative $\partial_x ( n^{0, m+1} )$ instead of the natural derivative $D n^{0, m+1}$. In particular, this expression is not in conservative form. Therefore, the use of this scheme may lead to a wrong shock speed if shock waves are present in the solution.
Another drawback of this scheme is the lack of pressure term in the momentum equation. As a consequence, the hydrodynamic part of the model is a pressureless gas dynamics model, which is a weakly ill-posed model (with, e.g. the possibility of forming delta concentrations \cite{Bouchut, Bouchut_James, Brenier_Grenier, CL1, CL2}). The weak instability may lead to spurious oscillations in the solution.
For these reasons, another scheme, based on the REPB formulation, is proposed in the next section.
\subsubsection{Time-semi-discretization based on the REPB formulation}
\label{subsubsec_time_REPB}
We reproduce the same strategy (i.e. an implicit evaluation of the force term in the momentum conservation equation) starting from the REPB formulation. This leads to the following scheme:
\begin{eqnarray}
\label{refs-den}
& & \hspace{-1cm} \delta^{-1} (n^{\lambda, m+1} - n^{\lambda, m})
+ \partial_x ( n^{\lambda, m} u^{\lambda, m} ) = 0, \\
\label{refs-mom}
& & \hspace{-1cm} \delta^{-1} (( n^{\lambda, m+1} u^{\lambda, m+1} )
- ( n^{\lambda, m} u^{\lambda, m} ))
+ \partial_x ( n^{\lambda, m} u^{\lambda, m} u^{\lambda, m}) + \partial_x n^{\lambda, m} = \nonumber \\
& & \hspace{6cm} = \lambda^2 \partial_x \left( \partial_{x}^2 \phi^{\lambda, m+1} +
\frac{1}{2} ( \partial_x \phi^{\lambda, m+1} )^2 \right), \\
\label{refs-pot}
& & \hspace{-1cm} \lambda^2 \partial_{x}^2 \phi^{\lambda, m+1} =
n^{\lambda, m+1} - e^{-\phi^{\lambda, m+1}}.
\end{eqnarray}
Formally passing to the limit $ \lambda \to 0 $ with fixed $\delta$
in this scheme leads to the following scheme:
\begin{eqnarray}
\label{refs-den-0}
& & \hspace{-1cm} \delta^{-1} (n^{0, m+1} - n^{0, m})
+ \partial_x ( n^{0, m} u^{0, m} ) = 0, \\
\label{refs-mom-0}
& & \hspace{-1cm} \delta^{-1} (( n^{0, m+1} u^{0, m+1} )
- ( n^{0, m} u^{0, m} ))
+ \partial_x ( n^{0, m} u^{0, m} u^{0, m}) + \partial_x n^{0, m} = 0, \\
\label{refs-pot-0}
& & \hspace{-1cm} 0 =
n^{0, m+1} - e^{-\phi^{0, m+1}}.
\end{eqnarray}
Eqs. (\ref{refs-den-0}), (\ref{refs-mom-0}) are the standard time-semi-discretization of the ICE model. We now note that the pressure term $\partial_x n^{0, m}$ is explicit (it was implicit in the scheme based on the EPB formulation). Additionally, if a space discretization is used, the discretization of this term will stay in conservative form, by contrast to the EPB-based scheme. Finally, the hydrodynamic part of the scheme (\ref{refs-den})-(\ref{refs-pot}) is based on the ICE model, not on the pressureless gas dynamics model. Therefore, its discretization will avoid the possible spurious oscillations that might appear in the EPB-based scheme in the presence of discontinuities or sharp gradients.
\subsection{Linearized stability analysis of the time-semi-discretization}
\label{sub_sec_stability}
The goal of this section is to analyze the linearized stability properties of both schemes. More precisely, we want to show that both schemes are stable under the CFL condition of the ICE model, irrespective of the value of $\lambda$ when $\lambda \to 0$. This property is known as 'Asymptotic-Stability' and is a component of the Asymptotic-Preserving property (see section \ref{sec_intro}). Indeed, the faculty of letting $\lambda \to 0$ in the scheme with fixed $\delta$ is possible only if the stability condition of the scheme is independent of $\lambda$ in this limit. We will prove $L^2$-stability uniformly with respect to $\lambda$ for the linearization of the problem (\ref{FLEPB_n})-(\ref{FLEPB_phi}).
In general, time semi-discretizations of hyperbolic problems are unconditionally unstable. This is easily verified on the discretization (\ref{refs-den-0})-(\ref{refs-pot-0}) of the ICE model. This is because the skew adjoint operator $\partial_x$ has the same effect as a centered space-differencing. For fully discrete schemes, stability is obtained at the price of adding numerical viscosity. To mimic the effect of this viscosity, in the present section, we will consider the linearized Viscous Euler-Poisson-Boltzmann (VEPB) model, which consists of the linearized EPB model (\ref{LEPB_n})-(\ref{LEPB_phi}) with additional viscosity terms (in this section, we drop the tildes for notational convenience):
\begin{eqnarray*}
& & \hspace{-1cm} \partial_t n^\lambda + \partial_x u^\lambda - \beta \partial_x^2 n^\lambda = 0, \\
& & \hspace{-1cm} \partial_t u^\lambda - \beta \partial_x^2 u^\lambda = \partial_x \phi^\lambda , \\
& & \hspace{-1cm} \lambda ^2 \partial^2_x \phi^\lambda = n^\lambda + \phi^\lambda.
\end{eqnarray*}
where $\beta$ is a numerical viscosity coefficient. We keep in mind that, in the spatially discretized case, $\beta$ proportional to the mesh size $h$:
\begin{equation}
\beta = c h , \label{beta}
\end{equation}
with the constant $c$ to be specified later on. Similarly, the linearized Reformulated Viscous Euler-Poisson-Boltzmann (RVEPB) model is written:
\begin{eqnarray*}
& & \hspace{-1cm} \partial_t n^\lambda + \partial_x u^\lambda - \beta \partial_x^2 n^\lambda = 0, \\
& & \hspace{-1cm} \partial_t u^\lambda + \partial_x n^\lambda - \beta \partial_x^2 u^\lambda = \lambda^2 \partial_x^3 \phi^\lambda , \\
& & \hspace{-1cm} \lambda ^2 \partial^2_x \phi^\lambda = n^\lambda + \phi^\lambda.
\end{eqnarray*}
The time discretization of these two formulations (which are also linearizations of the EPB or REPB-based schemes with added viscosity terms) are given by
\begin{eqnarray}
& & \hspace{-1cm} \delta^{-1} (n^{\lambda,m+1} - n^{\lambda,m}) + \partial_x u^{\lambda,m} - \beta \partial_x^2 n^{\lambda,m} = 0, \label{DLVEPB_n} \\
& & \hspace{-1cm} \delta^{-1} (u^{\lambda,m+1} - u^{\lambda,m}) - \beta \partial_x^2 u^{\lambda,m} = \partial_x \phi^{\lambda,m+1} ,\label{DLVEPB_u} \\
& & \hspace{-1cm} \lambda ^2 \partial^2_x \phi^{\lambda,m+1} = n^{\lambda,m+1} + \phi^{\lambda,m+1}, \label{DLVEPB_phi}
\end{eqnarray}
for the EPB-based scheme and by
\begin{eqnarray}
& & \hspace{-1cm} \delta^{-1} (n^{\lambda,m+1} - n^{\lambda,m}) + \partial_x u^{\lambda,m} - \beta \partial_x^2 n^{\lambda,m} = 0, \label{DLRVEPB_n} \\
& & \hspace{-1cm} \delta^{-1} (u^{\lambda,m+1} - u^{\lambda,m}) + \partial_x n^{\lambda,m} - \beta \partial_x^2 u^{\lambda,m} = \lambda^2 \partial_x^3 \phi^{\lambda,m+1} , \label{DLRVEPB_u} \\
& & \hspace{-1cm} \lambda ^2 \partial^2_x \phi^{\lambda,m+1} = n^{\lambda,m+1} + \phi^{\lambda,m+1}, \label{DLRVEPB_phi}
\end{eqnarray}
for the REPB-based one.
Passing to Fourier space with $\xi$ being the dual variable to $x$, and eliminating $\hat \phi^{\lambda,m+1}$, we find the following recursion relations:
\begin{eqnarray*}
& & \hspace{-1cm} \delta^{-1} (\hat n^{\lambda,m+1} - \hat n^{\lambda,m}) + i \xi \hat u^{\lambda,m} + \beta \xi^2 \hat n^{\lambda,m} = 0, \\
& & \hspace{-1cm} \delta^{-1} (\hat u^{\lambda,m+1} - \hat u^{\lambda,m}) + \frac{i \xi}{1+\lambda^2 \xi^2} \hat n^{\lambda,m+1} + \beta \xi^2 \hat u^{\lambda,m} = 0 ,
\end{eqnarray*}
for the EPB-based scheme and
\begin{eqnarray*}
& & \hspace{-1cm} \delta^{-1} (\hat n^{\lambda,m+1} - \hat n^{\lambda,m}) + i \xi \hat u^{\lambda,m} + \beta \xi^2 \hat n^{\lambda,m} = 0, \\
& & \hspace{-1cm} \delta^{-1} ( \hat u^{\lambda,m+1} - \hat u^{\lambda,m}) + i \xi \hat n^{\lambda,m} - \frac{i \lambda^2 \xi^3}{1+\lambda^2 \xi^2} \hat n^{\lambda,m+1} + \beta \xi^2 \hat u^{\lambda,m} = 0 ,
\end{eqnarray*}
for the REPB-based one.
The characteristic equations for these two recursion formulas are
\begin{eqnarray}
& & \hspace{-1cm}
q^2 - 2q (1 - \beta \xi^2 \delta - \frac{\xi^2 \delta^2}{2 (1+ \lambda^2 \xi^2)} ) + (1 - \beta \xi^2 \delta)^2 = 0
, \label{char_EPB}
\end{eqnarray}
and
\begin{eqnarray}
& & \hspace{-1cm}
q^2 - 2q (1 - \beta \xi^2 \delta + \frac{\lambda^2 \xi^4 \delta^2}{2 (1+ \lambda^2 \xi^2)} ) + (1 - \beta \xi^2 \delta)^2 + \xi^2 \delta^2 = 0
, \label{char_REPB}
\end{eqnarray}
respectively, where $q$ is the characteristic root. Each of these quadratic equations has two roots $q^\lambda_\pm(\xi)$ which provide the two independent solutions of the corresponding recursion formulas. Their most general solution is of the form
\begin{eqnarray*}
& & \hspace{-1cm} \left( \begin{array}{c} \hat n^{\lambda,m}(\xi) \\ \hat u^{\lambda,m}(\xi) \end{array} \right) = \sum_{\pm} (q^\lambda_\pm(\xi))^m \left( \begin{array}{c} \hat n^\lambda_\pm(\xi) \\ \hat u^\lambda_\pm(\xi) \end{array} \right), \quad \forall m \in {\mathbb N},
\end{eqnarray*}
where $n^\lambda_\pm(\xi)$ and $u^\lambda_\pm(\xi)$ depend on the initial condition only. A necessary and sufficient condition for $L^2$ stability is that $|q^\lambda_\pm(\xi)| <1$. However, requesting this condition for all $\xi \in {\mathbb R}$ is too restrictive. To account for the effect of a spatial discretization in this analysis, we must restrict the range of admissible Fourier wave-vectors $\xi$ to the interval $[-\frac{\pi}{h}, \frac{\pi}{h}]$. Indeed, a space discretization of step $h$ cannot represent wave-vectors of magnitude larger than $\frac{\pi}{h}$. This motivates the following definition of stability:
\begin{definition}
The scheme is stable if and only if
\begin{equation}
|q^\lambda_\pm(\xi)|\leq 1, \quad \forall \xi \quad \mbox{ such that } \quad |\xi| < \frac{\pi}{h}.
\label{stab_cnd}
\end{equation}
\label{def_stab}
\end{definition}
Now, our goal is to find sufficient conditions on $\delta$ such that either schemes are stable. More precisely, we prove:
\begin{proposition}
For both the EPB-based scheme (\ref{DLVEPB_n})-(\ref{DLVEPB_phi}) or the REPB-based scheme (\ref{DLRVEPB_n})-(\ref{DLRVEPB_phi}), there exists a constant $C>0$ independent of $\lambda$ when $\lambda \to 0$ such that if $\delta \leq C h$, the scheme is stable.
\label{prop_stab}
\end{proposition}
This condition states that the schemes are stable irrespective of how small $\lambda$ is. We say that the schemes are 'Asymptotically-Stable' in the limit $\lambda \to 0$. We note that this stability condition is similar to the CFL condition of the ICE model, which is the limit model when $\lambda \to 0$.
\noindent
{\bf Proof:} We first define conditions such that the constant term of the quadratic equations (\ref{char_EPB}) or (\ref{char_REPB}) is less than $1$. For the EPB-based scheme (see \ref{char_EPB}), this condition is
$\delta \leq {1}/{\beta \xi^2}$, for all $\xi$ such that $|\xi| \leq {\pi}/{h}$. For reasons which will become clear below, we rather impose:
\begin{eqnarray}
& & \hspace{-1cm}
\delta \leq \frac{1}{2 \beta \xi^2}, \quad \forall \xi \quad \mbox{ such that } \quad |\xi| \leq \frac{\pi}{h}
, \label{cond_EPB}
\end{eqnarray}
which, with (\ref{beta}), is equivalent to:
\begin{eqnarray}
& & \hspace{-1cm}
\delta \leq C_1 h, \quad \mbox{ with } \quad C_1 = \frac{1}{2 c \pi^2}
. \label{cond2_EPB}
\end{eqnarray}
For the REPB-based scheme (see \ref{char_REPB}), this condition is
$$\delta \leq \frac{2 \beta}{\beta^2 \xi^2 + 1}, \quad \forall \xi \quad \mbox{ such that } \quad |\xi| \leq \frac{\pi}{h}
, $$
or, with (\ref{beta}):
\begin{eqnarray}
& & \hspace{-1cm}
\delta \leq C_1 h, \quad \mbox{ with } \quad C_1 = \frac{2c}{1+c^2 \pi^2}
. \label{cond2_REPB}
\end{eqnarray}
Now, under these conditions, we are guaranteed that the two roots satisfy (\ref{stab_cnd}) if and only if the discriminant of the quadratic equation is non-positive. Indeed, in this condition, the two roots are conjugate complex numbers and their product, i.e. the square of their module, which is equal to the constant term of the quadratic equation, is less than one.
It is a matter of computation to check that the discriminant has the same sign as the expression $F(\delta)$ given by:
\begin{eqnarray}
& & \hspace{-1cm}
F(\delta) = \delta^2 + 4 \beta (1 + \lambda^2 \xi^2) \delta - 4 \frac{1 + \lambda^2 \xi^2}{\xi^2}
, \label{Delta_EPB}
\end{eqnarray}
in the case of the EPB-based scheme and by
$$F(\delta) = \delta^2 - 4 \beta \frac{1 + \lambda^2 \xi^2}{\lambda^2 \xi^2} \delta - 4 \frac{1 + \lambda^2 \xi^2}{\lambda^4 \xi^6}
, $$
in the case of the REPB-based scheme.
In the case of the EPB-based scheme, we use (\ref{cond_EPB}) to estimate the second term of $F(\delta)$ in (\ref{Delta_EPB}):
\begin{eqnarray*}
& & \hspace{-1cm}
F(\delta) \leq \delta^2 - 2 \frac{1 + \lambda^2 \xi^2}{\xi^2} ,
\end{eqnarray*}
and a sufficient condition for $F(\delta)$ to be non-positive is that $\delta \leq \sqrt 2 {(1 + \lambda^2 \xi^2)^{1/2}}{|\xi|}^{-1}$. This relation is true for all $\xi$ such that $ |\xi| \leq {\pi}/{h}$ if
$\delta \leq \sqrt 2 \pi^{-1} (h^2 + \lambda^2 \pi^2)^{1/2}$ and a sufficient condition is that $\delta \leq C_2 h$, with $C_2=\sqrt 2 \pi^{-1}$. Now, taking $C = \min\{C_1,C_2\}$ with $C_1$ given by (\ref{cond2_EPB}) leads to the result. In fact, the optimal numerical viscosity is such that $C_1=C_2$, i.e. $c= (2 \sqrt 2 \pi)^{-1}$.
In the case of the REPB scheme, we estimate $F(\delta)$ by
\begin{eqnarray*}
& & \hspace{-1cm}
F(\delta) \leq \delta^2 - 4 \beta \frac{1 + \lambda^2 \xi^2}{\lambda^2 \xi^2} \delta
,
\end{eqnarray*}
and a sufficient condition for $F(\delta)$ to be non-positive is that $\delta \leq 4 \beta {(1 + \lambda^2 \xi^2)}({\lambda^2 \xi^2})^{-1}$. In view of (\ref{beta}), this relation is true for all $\xi$ such that $ |\xi| \leq {\pi}/{h}$ if
$\delta \leq 4 c (\lambda^2 \pi^2)^{-1} h (h^2 + \lambda^2 \pi^2)^{1/2}$ and a sufficient condition is that $\delta \leq C_2 h$, with $C_2=4c$. Now, taking $C = \min\{C_1,C_2\}= C_1$ with $C_1$ given by (\ref{cond2_REPB}) leads to the result. The optimal numerical viscosity can be chosen to minimize $C$, which leads to $c= \pi^{-1}$. This ends the proof of statement \ref{prop_stab}. \null
{$\blackbox$}
As a conclusion, we can see that both the EPB-based and REPB-based schemes have similar Asymptotic-Stability properties as $\lambda \to 0$. Therefore, they must be selected on the basis of other criteria. The fact that the REPB-based scheme has a well-posed hydrodynamic part and leads to a discretization of the ICE model in conservative form in the limit $\lambda \to 0$ are indications that this scheme should be preferred to the EPB-based scheme. In the next section, we will present numerical results that support this statement.
\setcounter{equation}{0}
\section{Spatial discretization}
\label{sec_spatial}
We introduce $ ( C_j )_{j=1}^{N} $ a uniform subdivision of
the computational domain $ \Omega \in \mathbb{R} $ such that
$ \Omega = \cup_{j=1}^{N} C_j $.
The interface between $ C_{j} $ and $ C_{j+1} $ is the point $ x_{j+1/2} $.
We denote by $ U_{j}^{m} $ the approximate
vector of the density and momentum at time~$ t^{m} $ on the cell~$ C_j $,
$$ U_{j}^{m} = \begin{pmatrix}
n_{j}^{m} \\
(n u)_{j}^{m}
\end{pmatrix}.
$$
We use a time-splitting method to compute the
density and momentum at time $ t^{m+1} $.
There are three steps to pass from $ U^{m} $ to $ U^{m+1} $ which are described below.
\subsection{Hydrodynamic part}
\label{subsec_hydro}
The first step of the splitting is the finite-volume computation of the state
$ U^{\#} $ such that
$$ \frac{ U^{\#} - U_{j}^{m} }{ \delta } +
\frac{ F_{j+1/2}^{m} - F_{j-1/2}^{m} }{ h } = 0,
$$
where $ F_{j+1/2} $ is the numerical flux computed
at the interface $ x_{j+1/2} $.
We have used a Local Lax-Friedrichs \cite{Leveque_2} (or Rusanov \cite{Rusanov} or degree 0 polynomial \cite{Deg_Pey_Rus_Vil}) solver. This solver is an improved version of the Lax-Friedrich solver
which has been successfully used in conjunction with AP-schemes for the two-fluid Euler-Poisson problem (see \cite{Cri_Deg_Vig_07}) or for small Mach-number flows \cite{Deg_Tan_10}. This solver depends on a local estimate of the maximal characteristic speed. This estimate proceeds as follows. We introduce
$$ (a^{+})_{j+1/2}^{m} = \max \left( u_{j+1/2}^m + 1 , u_{j+1}^m + 1 \right) \quad \mbox{ and } \quad (a^{-})_{j+1/2}^{m} = \min \left( u_j^m - 1 , u_{j+1/2}^m - 1 \right) , $$
where $ u_{j+1/2}^m = ( u_j^m + u_{j+1}^m )/2 $.
Then, the local maximal characteristic velocity is estimated by
$$ a_{j+1/2}^m = \max \left( | (a^{-})_{j+1/2}^{m} | , | (a^{+})_{j+1/2}^{m} | \right), $$
and the numerical flux at $ x_{j+1/2} $ is given by:
\begin{equation}
F_{j+1/2} = \frac{1}{2} \left( F(U_j^m) + F(U_{j+1}^m)
+ a_{j+1/2}^m \left( U_j^m - U_{j+1}^m \right) \right).
\label{num_flux}
\end{equation}
The time step $ \delta $ must satisfy the CFL condition
$ \frac{ \delta }{ h } \leq \max a_{j+1/2}^m $
to ensure stability.
In practice, the time step is chosen at each iteration
to enforce this stability condition. As for boundary conditions, we impose fictitious states $U_l$ and $U_r$ across the left and right boundaries respectively and compute the corresponding fluxes across the boundaries using the same formula
(\ref{num_flux}).
\subsection{Potential update}
\label{subsec_pot}
The second step of the time-splitting is the computation of the potential.
We use $ n^{\#} $ to compute $ \phi^{m+1} $ with the discretized
Poisson equation given by a finite difference approximation of the Poisson equation.
$$ \lambda^2 h^{-2} (\phi_{j-1}^{m+1} - 2 \phi_{j}^{m+1}
+ \phi_{j+1}^{m+1})
+ e^{ - \phi_{j}^{m+1} } = n_{j}^{\#}.
$$
The boundary conditions are given by
$ \phi_l = - \log n_{l}^{\#} $ on
the left hand side of the domain
and $ \phi_r = - \log n_{r}^{\#} $
on the right hand side of the domain.
The non-linear system is solved with newton method.
This iterative algorithm needs a good initial guess of the solution to
be efficient.
The initial guess is the potential at previous time $ \phi^{m} $.
For the first step, we choose the quasi-neutral potential
$ ( \phi_{j}^{0} ) = ( - \log n_{j}^0 ) $ as an initial guess.
\subsection{Source term}
\label{subsec_source}
In the REPB form, the source term $\mathcal{Q}$ at the right-hand side of (\ref{refs-mom}) can be written according to:
$$ \mathcal{Q} = \lambda^2 \left( \partial_x ( \partial_{xx}^2 \phi) +
\partial_x \phi \, \partial^2_x \phi \right) , $$
This expression is discretized thanks to a finite difference approximation.
For a cell $ C_{j} $ in the domain, the source term $ \mathcal{Q}_j $ is given by
centered finite difference approximation:
\begin{eqnarray*}
\mathcal{Q}_{j} = \frac{\lambda^2}{2h^3} \left( \left(
\phi_{j+2} - 2 \phi_{j+1} + 2 \phi_{j-1} - \phi_{j-2} \right)
+ \left( \phi_{j+1} - 2 \phi_{j} + \phi_{j-1} \right)
\left( \phi_{j+1} - \phi_{j-1} \right) \right).
\end{eqnarray*}
On the first cell $ C_{1} $ the source term is computed
using a decentered finite difference approximation:
\begin{eqnarray*}
\mathcal{Q}_{1} = \frac{\lambda^2}{h^3} \left( \left(
\phi_{3} - 3 \phi_{2} + 3 \phi_{1} - \phi_{l} \right)
+ \frac{1}{2} \left( \phi_{2} - 2 \phi_{1} + \phi_{l} \right) \left( \phi_{2} - \phi_{l} \right) \right),
\end{eqnarray*}
and similarly in the last cell.
\setcounter{equation}{0}
\section{Numerical results}
\label{sec_num}
We present three classes of numerical results :
the first test case is a solitary wave travelling in a plasma.
This test case shows the ability of the numerical schemes to handle the dispersive regime.
The second test case is related to the quasi-neutral limit $ \lambda \to 0 $ of the Euler-Poisson-Boltzmann model:
it is a Riemann problem to check the ability of the scheme to handle hydrodynamic phenomena like shocks.
The last test case has been previously investigated by Liu and Wang in \cite{Liu_Wang_1,Liu_Wang_2} and corresponds to the occurrence of multi-valued solutions in the semi-classical setting.
\subsection{Soliton test case}
\label{subsec_soliton}
\subsubsection{Description}
\label{subsubsec_soliton_description}
The dispersive nature of the Euler-Poisson system is shown in \cite{Liu_Slemrod}.
Therefore, the EPB system, like other nonlinear dispersive models such as the KdV equation, exhibits solitary
wave solutions. These special solutions are particularly convenient
to test the ability of the EPB and REPB schemes
to capture the dispersive regime. Solitary waves also provide interesting quantitative checks. Indeed, while travelling through the plasma, the soliton maintains its shape and velocity.
Therefore, one can check the accuracy of the numerical schemes by observing how well
they preserve the soliton shape and velocity over time.
We now summarize the establishment of this special solution.
We refer to \cite{Chen} for a detailed description and
physical considerations. For this derivation, we use non-dimensional units and we now precise the corresponding scaling units.
The space scale related to these solitons is the Debye length $ \lambda_{D} $.
For this reason, we take $ \lambda = 1 $ in all this section.
The size of the computational domain is equal to several
Debye lengths (about $ 50 \lambda_{D} $ are used in the subsequent simulations).
The density of the undisturbed plasma (away from the support of the solitary wave) is
chosen as the characteristic density and in dimensionless units, is equal to $ 1 $, so that the electron density is equal to $e^{-\phi}$ where $\phi$ is the electrostatic potential energy.
In the frame moving with the wave, we denote by $ n_{s}, u_{s}, \phi_{s} $
the density, velocity and potential of the plasma.
These quantities are constant in time, and satisfy the following
relations:
\begin{eqnarray*}
& & \partial_x ( n_s u_s ) = 0, \\
& & \partial_x ( n_s u_s u_s ) = n_s \partial_x \phi_s, \\
& & \partial_{x}^{2} \phi_s = n_s - e^{-\phi_s}.
\end{eqnarray*}
The momentum being uniform in $ x $, we write $ q = n_s u_s = n_0 u_0$ where $n_0 = n_s(0)$ and $u_0 = u_s(0)$.
The momentum conservation law can be written as $ \partial_x q^2 / n_s = n_s \partial_x \phi_s $.
Consequently, we have $ \partial_x \left( {q^2}/{n_s^2} \right) = 2 \partial_x \phi_s $.
For all $ x \in [0,x_{\max}] $, we get:
\begin{equation}
\frac{1}{2} \left( \frac{q^2}{n_s^2} \right)(x) -
\frac{1}{2} \left( \frac{q^2}{n_s^2} \right)(0) =
\phi_s ( x ) - \phi_s ( 0 ).
\end{equation}
The potential being defined up to an additive constant, we choose this constant such that
$ \phi_s (0) = 0 $.
The ion density is then given by
\begin{equation}
\label{eq:density_sagdeev}
n_s (x) = \left( \frac{1}{n_0^2} + \frac{2 \phi_s}{ n_0^2 u_0^2 } \right)^{-1/2},
\end{equation}
In the present analysis, we assume that $ n_{0} = 1 $, i.e. $n_0$ is equal to the density of the undisturbed plasma.
Inserting this relation in the Poisson
equation yields the following equation
for the potential:
\begin{equation} \label{sol_poisson_eq}
\partial_{x}^{2} \phi_s
= \left( 1 + \frac{2 \phi_s}{ u_0^2 } \right)^{-1/2} - e^{-\phi_s},
\end{equation}
with the condition
\begin{equation} \label{sol_poisson_eq_x=0}
\phi_s (0) = 0 .
\end{equation}
One needs another condition at $ x = 0 $ to set up a Cauchy problem for (\ref{sol_poisson_eq}).
Note that $ \phi_s \equiv 0 $ is
an obvious solution of equation (\ref{sol_poisson_eq}) and satisfies the homogeneous condition $\partial_x \phi_s ( 0 ) = 0 $.
It corresponds to the state of the undisturbed plasma.
To capture a non-trivial solution, we must
alter this condition by a small
disturbance, setting it to
\begin{equation} \label{sol_poisson_eq_x=1}
\partial_x \phi_s ( 0 ) = \eta ,
\end{equation}
with $\eta$ 'small'.
The behavior of these solutions
can be clarified thanks to the theory of the Sagdeev potential
(which is a primitive with respect to $\phi_s$ of the right-hand side of (\ref{sol_poisson_eq})).
Details on this study can be found in \cite{Chen}.
Here we just recall that
shock waves in a cold-ion plasma can exist
only for $ 1 < u_0 < 1.6 $ (Bohm criterion).
The sign of $ \eta $ determines
the type of solution which can be found:
a positive $ \eta $ leads to potential barrier that forms a sheath,
whereas a negative value gives rise to a monotonic transition to a negative $ \phi $ which forms a solitary wave corresponding to a potential
and density disturbance propagating to the
right (for instance) with velocity $ u_0 $.
There is no analytic solution to equation (\ref{sol_poisson_eq}).
Resorting to numerical simulation is the only way to determine these solutions.
Details about this numerical method are given below.
Once the soliton potential is known,
the density is computed thanks to (\ref{eq:density_sagdeev}).
Since the present analysis has been performed in a co-moving frame with the soliton, moving with velocity $u_0$, the velocity in the laboratory frame is $u_s + u_0$ with $u_s = q/n_s$.
In the subsequent numerical simulations $ n_{s}, u_{0}+u_{s}, \phi_{s} $ are taken as
an initial condition for the scheme.
\subsubsection{Numerical results for the soliton test case}
\label{subsubsec_results_soliton}
In this test case, where the Debye length and the size of the computational domain are of the same order, both the EPB and REPB schemes are expected to be correct.
However, this test case provides a way to achieve quantitative comparisons of the numerical solutions to an analytical reference solution. These comparisons permit to quantify the order of accuracy and amount of numerical diffusion of the two schemes.
The analytical solution is easily obtained at time $ t $ by
a spatial translation of the solution at time $0$ of a distance $ u_0 t $.
This test case is implemented as follows.
The boundary conditions are periodic, which ensures that the
soliton can be tracked on long simulation times without
the need for a large computational domain.
The length $L$ of the computational domain is defined in relation to the choice of the initial condition as explained below.
We denote by $ t_{L} = L / u_{0} $ the travel time of the soliton
in the domain $ [0,L] $ .
First a numerical solution of (\ref{sol_poisson_eq}) with initial conditions (\ref{sol_poisson_eq_x=0}), (\ref{sol_poisson_eq_x=1}) is computed
with an explicit finite difference scheme on a mesh of step $ \Delta {x}^{\text{ref}} $ and provides the reference solution.
$ \Delta {x}^{\text{ref}} $ is chosen small enough to provide an almost 'exact' reference solution.
For suitable $ \eta $ and $ u_{0} $ given by the study of the Sagdeev potential \cite{Chen},
the potential oscillates in space for positive $ x $.
One wants a single potential well to initialize the scheme.
To this aim the number of nodes $ N^{\text{ref}} $ is taken such that the initial condition shows a single peak.
Moreover, it is such that $ \phi^{\text{ref}}_{N^{\text{ref}}} $ is close
enough to $ 0 $ to ensure that periodic boundary conditions will be accurate enough.
This reference solution is interpolated to provide an initial condition
for the EPB and REPB schemes, and to compute numerical errors on the density,
momentum and potential.
The results of this comparisons are now commented.
Figure \ref{soliton1} and \ref{soliton2} show the density and velocity in a soliton at times
$ 0 $, $ t_L/5 $, $ 2 t_{L}/5$ and $ t_{L} $, computed with the reformulated scheme.
The shape of the initial condition is conserved with time, but the peak
is damped and does not return to its original location after $ t_{L} $.
This effect is due to the numerical diffusion, which is inherent to the numerical method,
and can be observed with the classical scheme as well.
In order to accurately compare the reformulated and classical schemes, one needs to perform
a more precise study of this damping.
The forthcoming convergence study uses six grids, with a range of space steps
from $ h $ to $ h/64 $, where $ h $ corresponds to $ 250 $ cells.
It confirms that both schemes are first order in space,
and even if the REPB scheme suffers from a larger numerical diffusion than
the classical one, both provide satisfactory results for this test case.
\begin{figure}
\caption{\label{soliton1}
\label{soliton1}
\end{figure}
\begin{figure}
\caption{\label{soliton2}
\label{soliton2}
\end{figure}
The numerical convergence of the schemes in space is investigated by comparison with
the reference solution;
at time $ t/5 $ the $ L^{\infty} $ relative error with the reference solution is computed.
For instance the density error is
$$ \varepsilon_{EPB}(n) = \frac{ \max || n_{\text{num}} - n_{\text{ref}} ||_{\infty} }{ || n_{\text{ref}} ||_{\infty} },
$$
where $ n_{\text{ref}} $ is the density of the reference solution, i.e. the initial density
translated by $ L/5 $, and $ n_{\text{num}} $ is the density computed
with the classical scheme.
Such errors are defined for the reformulated scheme and for the momentum
and potential.
The schemes are tested on six grids, made of $ 250,500,1000,2000,4000, 8000 $ and $ 16000 $ cells.
Table \ref{error_tab} and figure \ref{error_fig} confirm that
both numerical schemes are first order in space.
The REPB scheme suffers from a larger numerical dissipation than the EPB scheme.
\begin{table}[hbtp]
\begin{tabular}{|c|c|c||c|c||c|c|}
\hline
N & $ \varepsilon_{EPB}(n) $ & $ \varepsilon_{REPB}(n) $
& $ \varepsilon_{EPB}(nu) $ & $ \varepsilon_{REPB}(nu) $
& $ \varepsilon_{EPB}(\phi) $ & $ \varepsilon_{REPB}(\phi) $ \\
\hline
\hline
$h$ & $ 0.053 $&$ 0.102 $&$ 0.111 $&$ 0.215 $&$ 0.066 $&$ 0.116 $ \\
$h/2$ & $ 0.028 $&$ 0.060 $&$ 0.060 $&$ 0.131 $&$ 0.028 $&$ 0.066 $ \\
$h/4$ & $ 0.014 $&$ 0.035 $&$ 0.032 $&$ 0.073 $&$ 0.014 $&$ 0.035 $ \\
$h/8$ & $ 7.6\times 10^{-3} $&$ 18.5\times 10^{-3} $&$ 1.64\times 10^{-2} $&$
3.87\times 10^{-2} $&$ 7.12\times 10^{-3} $&$ 18.4\times 10^{-3} $ \\
$h/16$ & $ 3.86\times 10^{-3} $&$ 9.57\times 10^{-3} $&$ 8.40\times 10^{-3} $&$
2.00\times 10^{-2} $&$ 3.50\times 10^{-3} $&$ 9.42\times 10^{-3} $ \\
$h/32$ & $ 1.76\times 10^{-3} $&$ 4.67\times 10^{-3} $&$ 3.83\times 10^{-3} $&$
1.00\times 10^{-2} $&$ 2.31\times 10^{-3} $&$ 4.71\times 10^{-3} $ \\
$h/64$ & $ 8.89\times 10^{-4} $&$ 2.37\times 10^{-3} $&$ 1.97\times 10^{-3} $&$
5.09\times 10^{-3} $&$ 1.07\times 10^{-3} $&$ 2.41\times 10^{-3} $ \\
\hline
\end{tabular}
\caption{\label{error_tab} Comparison of the error in $ L^{\infty} $ norm
for the density, momentum and potential with various space grid sizes
using the EPB and REPB schemes.
The computation of the error is made with the analytical solution for
the soliton test case}
\end{table}
\begin{figure}
\caption{\label{error_fig}
\label{error_fig}
\end{figure}
The numerical dissipation of the schemes can be measured in another way.
Indeed, the amplitude of the numerical soliton is damped with time.
The following study quantifies the damping rates of both the EPB and REPB schemes.
This study is performed on a long time simulation: its final time is $ 2 t_{L} $.
This study compares the damping rate of the density amplitude over one time increment $ \Delta t $
(not related with the numerical time step), at different times of the simulation.
The time increment $ \Delta t $ is $ t_{L}/5 $.
The density amplitude $ n_{\max} ((k+1)\Delta t)$ is compared to the density amplitude at the previous time increment $ n_{\max} (k \Delta t)$
for $ 0 \leq k \leq 9 $.
We measure the decrement of the amplitude $ \Delta_{k} $ between $ k\Delta t $ and $ (k+1)\Delta t $ as follows:
\begin{equation}
\Delta_{k} = \frac{1}{\Delta t}
\left| \ln \left( \frac{ n_{\max}((k+1)\Delta t)}{n_{\max}(k\Delta t)} \right) \right|
\end{equation}
The damping rate of the wave amplitude appears on figure \ref{damping_fig} for both the EPB and REPB schemes.
Two spatial grids are used for this comparison: a coarse grid with $ 1000 $ cells and a fine grid with $ 16000 $ cells.
The REPB scheme shows a larger dissipation than the classical one.
The evolution of the damping rates with simulation time is similar for the two schemes :
on a coarse grid the peak is damped faster at the beginning of the simulation.
\begin{figure}
\caption{\label{damping_fig}
\label{damping_fig}
\end{figure}
\subsection{Riemann problem}
\label{subsec_riemann}
The previous test case, in which the domain size and Debye length are of the same order of magnitude, was designed for the dispersive regime $\lambda = O(1)$. The present test case aims at investigating how the schemes perform in the hydrodynamic (or quasi-neutral) regime $\lambda \ll 1$. Therefore, in this test case, values of $\lambda$ ranging from small to very small small are used.
When $\lambda$ is small, the REPB model explicitly appears as a perturbation of the ICE model, which is a strongly hyperbolic and conservative model. Therefore the REPB-based scheme should be accurate in the hydrodynamic regime, in particular for the computation of solutions involving discontinuities.
By contrast, the hydrodynamic part of the EPB model is a pressureless gas dynamics model, which is not strictly hyperbolic, and is thus weakly unstable.
For this reason, the EPB-based scheme is expected to be less accurate in the small Debye length limit. The present test problem aims at testing the validity of these predictions.
The test case is a shock tube problem involving two outgoing shock waves.
The initial density is a constant, while the initial velocity has a jump at $ x = 0 $ between the constants $ u_{L} = +1 $ and $ u_{R} = -1$.
The density in the intermediate state of the Riemann problem is larger and the velocity is zero. Two outgoing shock waves appear on each side of this intermediate state.
The computational domain is $ [-0.2;0.2] $. The dimensionless Debye
length $ \lambda $ varies from $ 10^{-2} $ to $ 10^{-4} $.
The value $ \lambda = 10^{-2} $ is large enough for singularities near the shock waves to appear, due to the dispersive nature of the Euler-Poisson-Boltzmann model. In the semi-classical setting, the framework of multi-valued solutions \cite{Liu_Wang_1,Liu_Wang_2} can be used to explain the qualitative features of the classical solutions. Indeed, classical solutions keep a signature of these underlying multi-valued solutions, in the form of singularities (when the number of branches changes) and oscillations (when several branches co-exist and the solution 'oscillates' between these branches). In the $\lambda \to 0$ limit, the entropic solutions of the ICE model capture the average value of these oscillations, but not the details of them. As we will see, the EPB-based scheme keeps better track of these oscillations but when the mesh size does not resolve the Debye length, the oscillations become mesh-dependent. On the other hand, the REPB-based scheme directly provides the entropic solution of the limiting ICE model and is better suited to capture the average value of these oscillations (i.e. the weak limit of the solutions of the EPB model when $\lambda \to 0$).
Fig. \ref{densities} confirms that the numerical solution provided by the EPB-based scheme in under-resolved situation is mesh-dependent. It displays the density computed by the EPB-based schemes for two different mesh sizes. The density peaks computed on the finer mesh are much finer and higher than those computed on the coarse mesh. By contrast, the velocity and potential remain finite regardless of the mesh size (not displayed).
Fig. \ref{vit_pot} shows the velocity and potential computed with the EPB-based scheme compared to the REPB-based scheme.
Like in the soliton test case, the REPB-based scheme shows a slightly larger numerical dissipation.
\begin{figure}
\caption{\label{densities}
\label{densities}
\caption{\label{vit_pot}
\label{vit_pot}
\end{figure}
The value $ \lambda = 10^{-4} $ is small enough to observe the hydrodynamic regime.
Both schemes show an accurate determination of the shock speed but the EPB scheme leads to spurious oscillations in the neighborhood of the shock.
These oscillations are shown on figure \ref{4_densities}.
They are mesh dependent and occur even with smaller dimensionless Debye length $ \lambda $.
Whatever the mesh size is the solution computed with the reformulated REPB scheme does not present such oscillations.
\begin{figure}
\caption{\label{4_densities}
\label{4_densities}
\end{figure}
This test case shows that the EPB and REPB-based schemes have a very different behavior when $\lambda \ll 1$. In such under-resolved situations, while the EPB scheme presents mesh-dependent oscillations of finite amplitude, the REPB-based scheme provides an accurate approximation of the entropic solution of the limiting ICE model. As a conclusion, the REPB-based scheme should be preferred for the $\lambda \ll 1$ regime.
\subsection{Dispersive solutions test-cases}
\label{subsec_HLtest}
\subsubsection{Description}
\label{subsubsec_dispersive_test_description}
In this section, we present test-cases which are inspired from \cite{Liu_Wang_2}. In \cite{Liu_Wang_2}, the goal was to explore the computation of multi-valued solutions in the semi-classical setting by means of the level-set method. Here we consider only classical solutions.
The first test case is referred to as a five branch test case (because it corresponds to the occurrence of a five branch multi-valued solutions in the semi-classical setting).
The second test is a seven branch test case (for the same reason).
The initial densities of both test-cases are bumps with a gaussian shape.
This shape leads to a potential well which generates an induced electric field. This electric field in turn contracts the density bump leading to a positive feedback amplification.
This effect can be further amplified by setting up appropriate initial velocities.
In the dispersive regime the amplification of the density peak can lead to singularities, whereas in the hydrodynamic regime
the density spreads out in the entire computational domain and its profile remains smooth.
The emergence of singularities has been investigated by Liu and Wang \cite{Liu_Wang_1, Liu_Wang_2}.
The authors compute multi-valued solution for similar test cases but with the standard Poisson equation (without the exponential term coming from the Boltzmann relation) that allows the computation of analytical solutions.
Here, no analytical solution is available but the two schemes (EPB and REPB) are tested one against each other and against a numerically computed reference solution on a very fine mesh. Due to the singularities appearing when $ \lambda = 1 $ the numerical error is computed with the potential, which remains finite in every situation. The tests are run with $ \lambda = 1 $ and $ \lambda = 10^{-2} $ in order to explore both the dispersive and hydrodynamic regimes. The results would be similar if $\lambda$ was further reduced.
\subsubsection{Five-branch solution}
\label{subsubsec_test1}
The initial condition for this first test-case is given by
\begin{eqnarray*}
& & n_{0} = \frac{1}{\pi} e^{ -(x-\pi)^2 }, \\
& & u_{0} = \sin^{3}x.
\end{eqnarray*}
The computational domain is $ [0,2 \pi] $.
The initial density and velocity appear on figure \ref{ct5b_0_dv} and \ref{ct5b_2_dv}, together
with the numerical solutions computed at time $ t = 1 $ by means of the EPB and REPB-based schemes.
Table \ref{error_ct5b_tab} shows the numerical error on the potential by comparison against a solution computed with the classical scheme
on a fine grid.
\begin{figure}
\caption{\label{ct5b_0_dv}
\label{ct5b_0_dv}
\caption{\label{ct5b_2_dv}
\label{ct5b_2_dv}
\end{figure}
\begin{table}[hbtp]
\begin{tabular}{|c|c|c|c|c|}
\hline
$ N $ & $ \varepsilon_{EPB}(\phi), \lambda = 1 $ & $ \varepsilon_{REPB}(\phi), \lambda = 1 $ &
$ \varepsilon_{EPB}(\phi), \lambda = 10^{-2} $ & $ \varepsilon_{REPB}(\phi), \lambda = 10^{-2} $ \\
\hline
\hline
$8000$ & $ 5 \times 10^{-5} $&$ 1.4 \times 10^{-4} $&$ 4.0 \times 10^{-4} $&$ 7.6 \times 10^{-4} $ \\
$4000$ & $ 1.5 \times 10^{-4} $&$ 2.8 \times 10^{-4} $&$ 1.0 \times 10^{-3} $&$ 1.8 \times 10^{-3} $ \\
$2000$ & $ 3.4 \times 10^{-4} $&$ 5.4 \times 10^{-3} $&$ 2.3 \times 10^{-3} $&$ 3.7 \times 10^{-3}$ \\
\hline
\end{tabular}
\caption{\label{error_ct5b_tab} Five-branch test case :
comparison of the error in $ L^{\infty} $ norm
on the potential for various grid sizes and dimensionless Debye length $ \lambda $
using the EPB and REPB-based schemes.}
\end{table}
\subsubsection{Seven-branch solution}
\label{subsubsec_test2}
The initial condition for this second test-case is given by
\begin{eqnarray*}
& & n_{0} = \frac{1}{\pi} e^{ -(x-\pi)^2 }, \\
& & u_{0} = \sin(2x) \cos x.
\end{eqnarray*}
The initial density and velocity appear on figure \ref{ct7b_0_dv} and \ref{ct7b_2_dv}, together
with the numerical solutions computed at time $ t = 1 $ using the EPB and REPB-based schemes.
Table \ref{error_ct7b_tab} shows the numerical error on the potential by comparison against a solution computed with the classical scheme
on a fine grid.
\begin{figure}
\caption{\label{ct7b_0_dv}
\label{ct7b_0_dv}
\caption{\label{ct7b_2_dv}
\label{ct7b_2_dv}
\end{figure}
\begin{table}[hbtp]
\begin{tabular}{|c|c|c|c|c|}
\hline
$ N $ & $ \varepsilon_{EPB}(\phi), \lambda = 1 $ & $ \varepsilon_{REPB}(\phi), \lambda = 1 $ &
$ \varepsilon_{EPB}(\phi), \lambda = 10^{-2} $ & $ \varepsilon_{REPB}(\phi), \lambda = 10^{-2} $ \\
\hline
\hline
$8000$ & $ 4.6 \times 10^{-5} $&$ 4.5 \times 10^{-4} $&$ 2.3 \times 10^{-4} $&$ 7.6 \times 10^{-4} $ \\
$4000$ & $ 1.4 \times 10^{-4} $&$ 7.4 \times 10^{-4} $&$ 6.7 \times 10^{-4} $&$ 1.6 \times 10^{-3} $ \\
$2000$ & $ 3.2 \times 10^{-4} $&$ 1.2 \times 10^{-3} $&$ 1.29 \times 10^{-3} $&$ 3.5 \times 10^{-3}$ \\
\hline
\end{tabular}
\caption{\label{error_ct7b_tab} Seven-branch test case : comparison of the error in $ L^{\infty} $ norm
on the potential for various grid sizes and dimensionless Debye length $ \lambda $
using the classical and reformulated Euler-Poisson-Boltzmann schemes.}
\end{table}
\subsubsection{Analysis of the results for the five and seven branch test cases}
\label{subsubsec_dispersive_analysis}
In both the dispersive ($\lambda = 1 $) and hydrodynamic ($\lambda = 10^{-2}$) regimes, the two schemes give similar results.
On figure \ref{ct5b_0_dv}, \ref{ct5b_2_dv}, \ref{ct7b_0_dv} and \ref{ct7b_2_dv}, overlapping lines for the solution at time $ t = 1 $
computed with the EPB and REPB-based schemes confirm this similar behavior.
One can exhibit some differences thanks to the numerical convergence study.
Tables \ref{error_ct5b_tab} and \ref{error_ct7b_tab} show the same differences between the two schemes as in the previously discussed soliton test case.
The error of the reformulated scheme is slightly larger than that of the classical scheme,
and this difference is more obvious when $ \lambda = 1 $.
\setcounter{equation}{0}
\section{Conclusion}
\label{sec_conclu}
In this paper, we have analyzed two schemes for the Euler-Poisson-Boltzmann (EPB) model of plasma physics, and compared them in different regimes characterized by different values of the dimensionless Debye length $\lambda$. The dispersive regime corresponds to $\lambda = O(1)$ while the hydrodynamic regime is characterized by $\lambda \ll 1$. When $\lambda \to 0$, the EPB model formally converges to the Isothermal Compressible Euler (ICE) model. The first scheme we have considered is based on the original EPB formulation of the model. The second one uses a reformulation (referred to as the REPB model) in which the model more explicitly appears as a singular perturbation of the ICE Model.
We have provided a stability analysis of the two schemes, showing that both schemes are stable in both the dispersive and hydrodynamic regimes, with stability constraints on the time and mesh steps which are independent of $\lambda$ when $\lambda \to 0$. Finally, we have tested them on three different one-dimensional test problems. The first test problem, the soliton test, provides an analytical solution in the dispersive regime. The second test problem, the Riemann problem with two expanding shock waves, is suitable to explore the hydrodynamic regime. Finally, the third test problem involves singularity formation in the dispersive regime.
We have concluded that both scheme have similar behavior in the dispersive regime (with a slightly increased, but perfectly acceptable numerical diffusion in the case of the REPB-based schemes). By contrast, in the hydrodynamic regime, the EPB-based schemes develop oscillations and singularities, which, in under-resolved situations (i.e. when the time and space steps are too large to resolve the spatio-temporal variations of the solution) prevent any grid convergence of the solution. By contrast, the REPB-based scheme well captures the entropic solution of the ICE model, which provides a good approximation of the weak limit of the dispersive (oscillatory) solutions of the EPB model in the small $\lambda$ regime.
Future works concern the extension of this analysis to the two- or multi-dimensional case, the passage to second order schemes and the pursuit of the analytical investigations of the accuracy and stability of the schemes in both regimes.
\end{document}
|
\begin{document}
\rightline{\small\tt Final version, October 12, 2007}
\title[Inequalities for products of polynomials I]
{Inequalities for products of polynomials I}
\author{I. E. Pritsker and S. Ruscheweyh}
\address{Department of Mathematics, 401 Mathematical Sciences, Oklahoma State
University, Stillwater, OK 74078-1058, U.S.A.}
\email{[email protected]}
\address{Institut f\"ur Mathematik, Universit\"at W\"urzburg, Am Hubland,
97074 W\"urzburg, Germany}
\email{[email protected]}
\thanks{Research of I.P. was partially supported by the National Security
Agency (grant H98230-06-1-0055), and by the Alexander von Humboldt
Foundation. S.R. acknowledges partial support from the
German-Israeli Foundation (grant G-809-234.6/2003), from FONDECYT
(grants 1040366 and 7040069) and from DGIP-UTFSM (grant 240104). }
\subjclass[2000]{Primary 30C10; Secondary 30C85, 31A15}
\keywords{Polynomials, products, factors, uniform norm, logarithmic
capacity, equilibrium measure, subharmonic function, Fekete points}
\begin{abstract}
We study inequalities connecting the product of uniform norms of
polynomials with the norm of their product. This circle of problems
include the Gelfond-Mahler inequality for the unit disk and the
Kneser-Borwein inequality for the segment $[-1,1]$. Furthermore, the
asymptotically sharp constants are known for such inequalities over
arbitrary compact sets in the complex plane. It is shown here that
this best constant is smallest (namely: 2) for a disk. We also
conjecture that it takes its largest value for a segment, among all
compact connected sets in the plane.
\end{abstract}
\maketitle
\section{The problem and its history} \label{sec1}
Let $E$ be a compact set in the complex plane ${\mathbb{C}}$. For a function
$f:E\rightarrow \mathbb{C}$ define the
uniform (sup) norm as follows:
$$\| f \|_E = \sup_{z \in E} |f(z)|.$$
Clearly $\norm{f_1f_2}_E \le \norm{f_1}_E \norm{f_2}_E,$ but this
inequality is not reversible, in general, not even with a constant
factor in front of the right hand side. Indeed, $\norm{f_1}_E
\norm{f_2}_E \le C \norm{f_1f_2}_E$ does not hold for functions with
disjoint supports in $E$, for example. However, the situation is
quite different for algebraic polynomials $\{ p_k (z) \}_{k =1}^m$
and their product $p(z) := \prod_{k =1}^m p_k (z).$ Polynomial
inequalities of the form
\begin{equation} \label{1.1}
\prod_{k =1}^m \| p_k \|_E \leq C \| p \|_E,
\end{equation}
exist and are readily available. One of the first results in this
direction is due to Kneser \cite{Kn}, for $E = [-1,1]$ and $m =2$
(see also Aumann \cite{Au}), who proved that
\begin{equation} \label{1.2}
\| p_1 \|_{[-1,1]} \|p_2 \|_{[-1,1]} \leq K_{\ell ,n} \| p_1 p_2
\|_{[-1,1]}, \quad \deg p_1 = \ell,\ \deg p_2 =n-\ell,
\end{equation}
where
\begin{equation} \label{1.3}
K_{\ell ,n} := 2^{n -1} \prod_{k =1}^{\ell} \left( 1 + \cos \frac{2k
-1}{2n} \pi \right) \prod_{k =1}^{n - \ell} \left( 1 + \cos \frac{2k
-1}{2n} \pi \right).
\end{equation}
Note that equality holds in (\ref{1.2}) for the Chebyshev
polynomial\\ $t(z) = \cos n \arccos z = p_1 (z) p_2 (z)$, with a
proper choice of the factors $p_1 (z)$ and $p_2 (z)$. P. B. Borwein
\cite{Bor} generalized this to the multifactor inequality
\begin{equation} \label{1.4}
\prod_{k =1}^m \| p_k \|_{[-1,1]} \leq 2^{n -1} \prod_{k =1}^{[
\frac{n}{2} ]} \left( 1 + \cos \frac{2k -1}{2n} \pi \right)^2 \| p
\|_{[-1,1]}.
\end{equation}
He also showed that
\begin{equation} \label{1.5}
2^{n -1} \prod_{k =1}^{[ \frac{n}{2} ]} \left( 1 + \cos \frac{2k
-1}{2n} \pi \right)^2 \sim (3.20991 \ldots )^n \mbox{ as } n
\rightarrow \infty.
\end{equation}
A different version of inequality (\ref{1.1}) for $E = D$, where $D
:= \{ w: |w| \le 1 \}$ is the closed unit disk, was considered by
Gelfond \cite[p. 135]{Ge} in connection with the theory of
transcendental numbers:
\begin{equation} \label{1.6}
\prod_{k =1}^m \| p_k \|_{D} \leq e^n \| p \|_{D} .
\end{equation}
The latter inequality was improved by Mahler \cite{Ma1}, who
replaced $e$ by $2$:
\begin{equation} \label{1.7}
\prod_{k =1}^m \| p_k \|_{D} \leq 2^n \| p \|_{D} .
\end{equation}
It is easy to see that the base $2$ cannot be decreased, if $m =n$
and $n \rightarrow \infty$. However, (\ref{1.7}) has recently been
further improved in two directions. D. W. Boyd \cite{Boy1, Boy2}
showed that, given the number of factors $m$ in (\ref{1.7}), one has
\begin{equation} \label{1.8}
\prod_{k =1}^m \| p_k \|_{D} \leq (C_m)^n \| p \|_{D},
\end{equation}
where
\begin{equation} \label{1.9}
C_m := \exp \left( \frac{m}{\pi} \int_0^{\pi/m} \log \left(2 \cos
\frac{t}{2}\right) dt \right)
\end{equation}
is asymptotically best possible for {\em each fixed} $m$, as $n
\rightarrow \infty$. Kro\'{o} and Pritsker \cite{KP} showed that,
for any $m \leq n,$
\begin{equation} \label{1.10}
\prod_{k =1}^m \| p_k \|_{D} \leq 2^{n -1} \| p \|_{D},
\end{equation}
where equality holds in (\ref{1.10}) for {\it each} $n \in {\mathbb{N}}$,
with $m =n$ and $p(z) = z^n -1$.
Inequalities (\ref{1.2})-(\ref{1.10}) clearly indicate that the
constant $C$ in (\ref{1.1}) grows exponentially fast with $n$, with
the base for the exponential depending on the set $E$. A natural
general problem arising here is to find the {\it smallest} constant
$M_E > 0,$ such that
\begin{equation} \label{1.11}
\prod_{k =1}^m \| p_k \|_E \leq M_E^n \| p \|_E
\end{equation}
for arbitrary algebraic polynomials $\{ p_k (z) \}_{k =1}^m$ with
complex coefficients, where $p(z) = \prod_{k =1}^m p_k (z)$ and $n =
\deg p$. The solution of this problem is based on the logarithmic
potential theory (cf. \cite{Ts} and \cite{Ra}). Let ${\rm cap}(E)$
be the {\it logarithmic capacity} of a compact set $E \subset {\mathbb{C}}$.
For $E$ with ${\rm cap}(E)>0$, denote the {\it equilibrium measure}
of $E$ by $\mu_E$. We remark that $\mu_E$ is a positive unit Borel
measure supported on $\partial E$ (see \cite[p. 55]{Ts}). Define
\begin{equation} \label{1.12}
d_E(z) := \max_{t \in E} |z -t|, \qquad z \in {\mathbb{C}},
\end{equation}
which is clearly a positive and continuous function in ${\mathbb{C}}$. It is
easy to see that the logarithm of this distance function is
subharmonic in $\mathbb{C}.$ Furthermore, it has the following integral
representation
\[
\log d_E(z) = \int \log |z -t| d \sigma_E(t), \quad z \in {\mathbb{C}} ,
\]
where $\sigma_E$ is a positive unit Borel measure in ${\mathbb{C}}$ with
unbounded support, see Lemma 5.1 of \cite{Pr1} and \cite{LP01}. For
further in-depth analysis of the representing measure $\sigma_E$, we
refer to the recent paper of Gardiner and Netuka \cite{GN}. This
integral representation is the key fact used by the first author to
prove the following result \cite{Pr1}.
\begin{theorem} \label{thm1.1}
Let $E \subset {\mathbb{C}}$ be a compact set, ${\rm cap} (E) >0$. Then the
best constant $M_E$ in {\rm (\ref{1.11})} is given by
\begin{equation} \label{1.13}
M_E = \frac{\exp\left(\displaystyle\int \log d_E(z) d \mu_E (z)\right)}{{\rm
cap} (E)} .
\end{equation}
\end{theorem}
Theorem \ref{thm1.1} is applicable to any compact set with a
connected component consisting of more than one point (cf. \cite[p.
56]{Ts}). In particular, if $E$ is a continuum, i.e., a connected
set, then we obtain a simple universal bound for $M_E$ \cite{Pr1}:
\begin{corollary} \label{cor1.2}
Let $E \subset \mathbb{C}$ be a bounded continuum (not a single point). Then
we have
\begin{equation} \label{1.14}
M_E \leq \frac{{\rm diam}(E)}{{\rm cap} (E)} \leq 4,
\end{equation}
where ${\rm diam}(E)$ is the Euclidean diameter of the set $E$.
\end{corollary}
On the other hand, for non-connected sets $E$ the constants $M_E$ can be
arbitrarily large. For
example, consider $E_k=[-\sqrt{k+4},-\sqrt{k}] \cup
[\sqrt{k},\sqrt{k+4}]$, so that cap$(E_k)=1$ \cite{Ra} and
\[
M_E = \exp\left(\int \log d_{E_k}(z)\,d\mu_{E_k}(z)\right) \ge
e^{\log (2\sqrt{k})} \to \infty\quad \mbox{as } k\to\infty.
\]
For the closed unit disk $D$, we have that ${\rm cap}(D) =1$
\cite[p. 84]{Ts} and that
\begin{equation} \label{1.15}
d\mu_{D} = \frac{d\theta}{2 \pi},
\end{equation}
where $d \theta$ is the arclength on $\partial D$. Thus Theorem
\ref{thm1.1} yields
\begin{equation} \label{1.16}
M_{D} = \exp\left(\frac{1}{2 \pi} \int_0^{2 \pi} \log d_{D}(e^{i
\theta})\ d \theta\right) = \exp\left(\frac{1}{2 \pi} \int_0^{2 \pi}
\log 2\ d \theta\right) =2,
\end{equation}
so that we immediately obtain Mahler's inequality (\ref{1.7}).
If $E = [-1,1]$ then ${\rm cap}([-1,1]) = 1/2$ and
\begin{equation} \label{1.17}
d\mu_{[-1,1]} = \frac{dx}{\pi \sqrt{1 -x^2}} , \quad x \in [-1,1],
\end{equation}
which is the Chebyshev (or arcsin) distribution (see \cite[p.
84]{Ts}). Using Theorem \ref{thm1.1}, we obtain
\begin{eqnarray} \label{1.18}
M_{[-1,1]} & = & 2\exp\left(\frac{1}{\pi} \int_{-1}^1 \frac{\log
d_{[-1,1]}(x)}{\sqrt{1 -x^2}} dx\right) = 2\exp\left(\frac{2}{\pi}
\int_0^1 \frac{\log (1 +x)}{\sqrt{1 -x^2}} dx \right) \nonumber \\
& = & 2\exp\left(\frac{2}{\pi} \int_0^{\pi/2} \log (1 + \sin t) dt
\right) \approx 3.2099123,
\end{eqnarray}
which gives the asymptotic version of Borwein's inequality
(\ref{1.4})-(\ref{1.5}).
Considering the above analysis of Theorem \ref{thm1.1}, it is
natural to conjecture that the sharp universal bounds for $M_E$ are
given by
\begin{equation} \label{1.19}
2=M_{D} \le M_E \le M_{[-1,1]} \approx 3.2099123,
\end{equation}
for any bounded non-degenerate continuum $E$, see \cite{Pr3}.
It follows directly from the definition that $M_E$ is invariant with
respect to the similarity transformations of the plane. Thus we can
normalize the problem by setting ${\rm cap} (E)=1$. Thus,
equivalently, we want to find the maximum and the minimum of the
functional
\begin{equation} \label{1.20}
\tau(E):=\int \log d_E(z) d \mu_E (z)
\end{equation}
over all compact connected sets $E$ in the plane satisfying the
above normalization. These questions are addressed in Section 2 of
the paper. Section 3 discusses a more refined version of our problem
on the best constant in \eqref{1.1}. All proofs are given in Section
4.
In the forthcoming paper \cite{PR}, we consider various improved
bounds of the constant $M_E$, e.g., bounds for rotationally
symmetric sets. From a different perspective, the results of Boyd
\eqref{1.8}-\eqref{1.9} suggest that for some sets the constant
$M_E$ can be replaced by a smaller one, if the number of factors is
fixed. We characterize such sets in \cite{PR}, and find the improved
constant.
The problems considered in this paper have many applications in
analysis, number theory and computational mathematics. We mention
specifically applications in transcendence theory (see Gelfond
\cite{Ge}), and in designing algorithms for factoring polynomials
(see Boyd \cite{Boy3} and Landau \cite{La}). A survey of the results
involving norms different from the sup norm (e.g., Bombieri norms)
can be found in \cite{Boy3}. For polynomials in several variables,
see the results of Mahler \cite{Ma2} for the polydisk, of Avanissian
and Mignotte \cite{AM} for the unit ball in ${\mathbb{C}}^k$. Also, see
Beauzamy and Enflo \cite{BeEn}, and Beauzamy, Bombieri, Enflo and
Montgomery \cite{BBEM} for multivariate polynomials in different
norms.
{\bf Acknowledgements.} The authors wish to express their gratitude
to Richard Laugesen for several helpful discussions about these
problems. Alexander Solynin communicated to the first author a
sketch of proof for the inequality $M_E \ge 2$ for connected sets.
We would like to thank him for the kind permission to use his
argument in the proof of Theorem \ref{thm2.5}. This paper was
written while the first author was visiting the University of
W\"urzburg as a Humboldt Foundation Fellow. He would like to thank
the Department of Mathematics and the Function Theory research group
for their hospitality.
\section{Sharp bounds for the constant $M_E$} \label{sec2}
We study bounds for the constant $M_E$ in this section, where
$E\subset\mathbb{C}$ is a compact set satisfying ${\rm cap} (E)>0.$ Our main
goal here is to prove \eqref{1.19}. It is convenient to first give
some general observations on the properties of $M_E$.
\begin{theorem} \label{thm2.1}
Let $I \subset E$ be compact sets in $\mathbb{C}$, ${\rm cap} (I) >0$.
Denote the unbounded components of $\overline\mathbb{C}\setminus E$ and
$\overline\mathbb{C}\setminus I$ by $\Omega_E$ and $\Omega_I$. If $d_E(z)=d_I(z)$
for all $z \in \partial \Omega_I$ then $M_E \le M_I,$ with equality
holding only when $\textup{cap}(\Omega_I\setminus \Omega_E)=0.$
\end{theorem}
This theorem gives several interesting consequences. In particular,
we show that if the set $E$ is contained in a disk whose diameter
coincides with the diameter of $E$ then its constant $M_E$ does not
exceed that of a segment. Thus segments indeed maximize $M_E$ among
such sets. Denote the closed disk of radius $r$ centered at $z$ by
$D(z,r).$
\begin{corollary} \label{cor2.2}
Let $z,w\in E$ satisfy $\textup{diam}\,E=|z-w|$ and $[z,w]\subset
E.$ If $E\subset
D\left(\frac{z+w}{2},\frac{\textup{diam}\,E}{2}\right)$ then $M_E
\le M_{[z,w]} = M_{[-2,2]}.$
\end{corollary}
The next results shows that the constant decreases when the set is
enlarged in a certain way.
\begin{corollary} \label{cor2.3}
Let $E^*:=\bigcap_{z\in \partial\Omega_E} D(z,d_E(z))$, where $E
\subset {\mathbb{C}}$ is compact, ${\rm cap} (E) >0$. If $H$ is a compact
set such that $E\subset H\subset E^*,$ then $M_H \le M_E.$ Equality
holds if and only if $\textup{cap}(\Omega_E\setminus \Omega_H)=0.$
\end{corollary}
Let conv$(H)$ be the convex hull of $H$. The operation of taking the
convex hull of a set satisfies the assumption of Corollary
\ref{cor2.3} (or Theorem \ref{thm2.1}), which gives
\begin{corollary} \label{cor2.4}
Let $V\subset\mathbb{C}$ be a compact set, ${\rm cap}(V)>0.$ If
$H:=\overline\mathbb{C}\setminus\Omega_V$ is not convex, then $M_{{\rm conv}(H)} <
M_H.$
\end{corollary}
The above results help us to show that the minimum of $M_E$ is
attained for the closed unit disk $D,$ among all sets of positive
capacity (connected or otherwise).
\begin{theorem} \label{thm2.5}
Let $E \subset {\mathbb{C}}$ be an arbitrary compact set, ${\rm cap} (E)
>0$. Then $M_E\ge 2,$ where equality holds if and only if
$\overline\mathbb{C}\setminus\Omega_E$ is a closed disk.
\end{theorem}
In other words, $M_E=2$ only for sets whose polynomial convex hull
is a disk. This may also be described by saying that $M_E = 2$ if
and only if $\partial U \subset E \subset U$, where $U$ is a closed
disk.
Proving that the maximum of $M_E$ for {\em arbitrary} continua is
attained for a segment is a more difficult problem. In fact, it is
related to some old open problems on the moments of the equilibrium
measure (or circular means of conformal maps), see P\'olya and
Schiffer \cite{PS}, and Pommerenke \cite{Po}. In particular, we use
the results of \cite{PS} and \cite{Po} to show that
\begin{theorem} \label{thm2.6}
Let $E \subset {\mathbb{C}}$ be a connected compact set, ${\rm cap} (E)
>0$.\\
\textup{(i)} If the center of mass $c:=\int z\,d\mu_E(z)$ for
$\mu_E$ belongs to $E$, then
\begin{align} \label{2.2}
M_E < 2+4.02/\pi \approx 3.279606.
\end{align}
\textup{(ii)} If $E$ is convex then
\begin{align} \label{2.3}
M_E < 2+4/\pi \approx 3.27324.
\end{align}
\end{theorem}
This should be compared with $M_{[-2,2]} = M_{[-1,1]} \approx
3.2099123.$
After this paper had been written, a new related manuscript
\cite{BLP} appeared. That manuscript contains a proof of our
conjecture $M_E \le M_{[-2,2]}$ for centrally symmetric continua, as
well as another quite general conjecture (if true) implying $M_E \le
M_{[-2,2]}$ holds for all continua.
\section{Refined problem} \label{sec5}
The constant $M_E$ represents the base of rather crude exponential
asymptotic for the constant in inequality \eqref{1.1}. A more
refined question is to find the sharp constant attained with
equality. Such constants are known in the case of a segment, see
\eqref{1.4} and \cite{Bor}; and in the case of a disk, see
\eqref{1.10} and \cite{KP}. Let $E$ be any compact set in the plane,
and let $\prod_{k=1}^m p_k(z) = \prod_{j=1}^n (z-z_j),$ where
$p_k(z)$ are arbitrary monic polynomials with complex coefficients.
Define the constant
\begin{align} \label{5.1}
C_E(n) := \sup_{p_k} \frac{\displaystyle\prod_{k=1}^m \norm{p_k}_E}
{\norm{\displaystyle\prod_{k=1}^m p_k}_E} = \sup_{z_j\in\mathbb{C}}
\frac{\displaystyle\prod_{j=1}^n \norm{z-z_j}_E} {\norm{\displaystyle\prod_{j=1}^n
(z-z_j)}_E}.
\end{align}
If cap$(E)>0$ then it follows from Theorem \ref{thm1.1} that $1 \le
C_E(n) \le M_E^n.$ The refined version of our conjecture in
\eqref{1.19} is as follows:
\begin{align} \label{5.2}
2^{n-1}=C_D(n) \le C_E(n) \le C_{[-2,2]}(n) = 2^{n -1} \prod_{k
=1}^{[n/2]} \left( 1 + \cos \frac{2k -1}{2n} \pi \right)^2
\end{align}
for any connected compact set $E$ of positive capacity.
\section{Proofs} \label{sec6}
\begin{proof}[Proof of Theorem \ref{thm2.1}]
Since $I\subset E$, we have that $\textup{cap}(E)\ge\textup{cap}(I)
> 0$. Let $g_E(z,\infty)$ and $g_I(z,\infty)$ be the Green's
functions for $\Omega_E$ and $\Omega_I$, with poles in infinity. We
follow the standard convention by setting $g_E(z,\infty)=0,\
z\not\in\overline\Omega_E$ and $g_I(z,\infty)=0,\ z\not\in\overline\Omega_I.$ It
follows from the maximum principle that $g_E(z,\infty) \le
g_I(z,\infty)$ for all $z\in\mathbb{C}.$ Furthermore, this inequality is
strict in $\Omega_E$, unless $\textup{cap}(\Omega_I\setminus
\Omega_E)=0.$
Using the integral representation for $d_E(z)$ from Lemma 5.1 of
\cite{Pr1} (see also \cite{LP01} and \cite{GN}) and the Fubini
theorem, we obtain that
\begin{align*}
\log M_E &= \int \log d_E(z)\,d\mu_E(z) - \log \textup{cap}(E) \\ &=
\int \int \log |z-t|\,d\sigma_E(t) d\mu_E(z) - \log \textup{cap}(E)
\\ &= \int \left( \int \log |z-t|\,d\mu_E(z) - \log \textup{cap}(E)
\right) d\sigma_E(t) = \int g_E(t,\infty)\,d\sigma_E(t),
\end{align*}
where the last equality follows from the well known identity\\
$g_E(t,\infty)=\int \log |z-t|\,d\mu_E(z) - \log \textup{cap}(E)$
\cite{Ra}. It is clear that
\[
\int g_E(t,\infty)\,d\sigma_E(t) \le \int
g_I(t,\infty)\,d\sigma_E(t),
\]
with equality possible if and only if
$\textup{cap}(\Omega_I\setminus \Omega_E)=0.$ Indeed, if we have
equality in the above inequality, then $g_E(z,\infty) =
g_I(z,\infty)$ for all $z\in\textup{supp}\,\sigma_E.$ But
$\textup{supp}\,\sigma_E$ is unbounded, so that $g_E(z,\infty) =
g_I(z,\infty)$ in $\Omega_E$ by the maximum principle. Hence we
obtain that
\begin{align*}
\log M_E &\le \int g_I(t,\infty)\,d\sigma_E(t) = \int \left( \int
\log |z-t|\,d\mu_I(z) - \log \textup{cap}(I) \right) d\sigma_E(t)
\\ &= \int \log d_E(z)\,d\mu_I(z) - \log \textup{cap}(I) =
\int \log d_I(z)\,d\mu_I(z) - \log \textup{cap}(I) \\ &= \log M_I,
\end{align*}
with equality if and only if $\textup{cap}(\Omega_I \setminus
\Omega_E)=0.$ Note that we used supp$\,\mu_I \subset
\partial\Omega_I,$ so that $d_E(z)=d_I(z)$ for
$z\in \textup{supp}\,\mu_I.$
\end{proof}
\begin{proof}[Proof of Corollary \ref{cor2.2}]
Let $I=[z,w]$ be the segment connecting the points $z$ and $w$,
i.e., the common diameter of $E$ and the disk containing it. Observe
that we have $d_E(t)=d_I(t)$ for all $t\in\partial\Omega_I=I$ under
the stated geometric conditions. Since all assumptions of Theorem
\ref{thm2.1} are satisfied, we obtain that $M_E\le M_{[z,w]} =
M_{[-2,2]},$ where the last equality follows from the invariance
with respect to the similarity transformations of the plane.
\end{proof}
\begin{proof}[Proof of Corollary \ref{cor2.3}]
Observe that $E\subset D(z,d_E(z))$ for any $z\in\mathbb{C}.$ Hence
$E\subset E^*.$ Since $E\subset H \subset E^*,$ we immediately
obtain that $d_E(z) \le d_H(z) \le d_{E^*}(z),\ z\in\mathbb{C}.$ On the
other hand, the definition of $E^*$ gives that $d_E(z) = d_{E^*}(z)$
for all $z\in\partial\Omega_E.$ Therefore $d_E(z) = d_{H}(z)$ for
all $z\in\partial\Omega_E,$ and the result follows from Theorem
\ref{thm2.1}.
\end{proof}
\begin{proof}[Proof of Corollary \ref{cor2.4}]
We apply Theorem \ref{thm2.1} again, with $I=H$ and
$E=\textup{conv}(H).$ It was shown in \cite{LP01} that $d_H(z) =
d_{\textup{conv}(H)}(z)$ for all $z\in\mathbb{C}$, where $H$ is an arbitrary
compact set. Since $H$ is not convex in our case, we obtain that
$\textup{cap}(\Omega_I\setminus \Omega_E)>0$ and $M_E < M_I.$
\end{proof}
For the proof of Theorem~\ref{thm2.5} we need a special case of the
following lemma, which may be of some independent interest. Let
$\mathbb{D}elta:=\{w:|w|>1\}$, and $\mathbb{D}:=\{z: |z|<1\}$ the unit disk.
\begin{lemma} \label{lem6.1}
Let $\Gamma$ be a Jordan domain and let $\Psi(z):=
cw+\sum_{k=0}^{\infty}a_kw^{-k}$ be a conformal map of $\mathbb{D}elta$ onto
$\Omega_\Gamma$. Furthermore assume that
\begin{equation}
\label{eq:1}
\forall x,z\in\partial\mathbb{D}elta:\quad |\Psi(z)-\Psi(x)|\leq |\Psi(z)-\Psi(-z)|.
\end{equation}
Then $\Gamma$ is a disk.
\end{lemma}
\begin{proof}
First note that by Carath\'eodory's theorem \cite[p. 18]{Po92} $\Psi$
extends to a homeomorphism of $\overlineerline{\mathbb{D}elta}$, so that \eqref{eq:1}
makes
sense. Also there is no loss of generality in assuming $0\in\Gamma$,
so that $\Psi(z)\neq 0$ in $\overlineerline{\mathbb{D}elta}$. Let
$$
g(z):=\frac{1}{\Psi(1/z)},\quad z\in\mathbb{D}B.
$$
Then $g(z)=z/c+\sum_{k=2}^{\infty}b_kz^k$ is a homeomorphism of $\mathbb{D}B$
onto the closure of the Jordan domain $\Gamma^*$, the interior
domain of the Jordan curve $1/\partial\Gamma$. Note that $g(0)=0,
g'(0)=1/c\neq0$.
Let $1/z\in\mathbb{R}D$, and in \eqref{eq:1} we replace $1/x\in\mathbb{R}D$ by $-1/xz$ which
is also in $\mathbb{R}D$. Condition \eqref{eq:1} then becomes
$$
1\geq\left|
\frac{\frac{1}{g(z)}-\frac{1}{g(-xz)}}{\frac{1}{g(z)}-\frac{1}{g(-z)}}
\right|=
\left|
\frac{xg(-z)}{g(-xz)}\frac{g(-xz)-g(z)}{g(-z)-g(z)}
\right|,\quad x,z\in\mathbb{R}D.
$$
Note that the function
$$
F(x,z):=\frac{xg(-z)}{g(-xz)}\frac{g(-xz)-g(z)}{g(-z)-g(z)}
$$
is analytic in $(x,z)\in\mathbb{D}^2$, and by the maximum principle, applied
to both variables separately, we find that
$$
|F(x,z)|\leq1, \quad x,z\in\mathbb{D}B.
$$
Now fix $z_0$ with $0<|z_0|<1$. Then $x\mapsto F(x,z_0)$ is analytic
in $\mathbb{D}B$, satisfies $ |F(x,z_0)|\leq1$ for $x\in\mathbb{D}B$, and, in
addition, $F(1,z_0)=1$. The Julia-Wolf Lemma \cite[p. 82]{Po92}
then says that $F'(1,z_0)>0$, or
$$
1+\frac{-z_0g'(-z_0)}{g(-z_0)}\frac{g(z_0)}{g(-z_0)-g(z_0)}>0.
$$
Obviously this must be true for any $z_0$, and so, by the identity
principle, we are left with the relation
$$
\frac{-zg'(-z)}{g(-z)}\frac{g(z)}{g(-z)-g(z)}\equiv\alpha,\quad
z\in\mathbb{D},
$$
where $\alpha>-1$ is some real constant. Letting $z\rightarrow 0$,
we find $\alpha=-\frac{1}{2}$. Hence we are left with the
difference-differential equation
\begin{equation}
\label{eq:2}
\frac{zg'(z)}{g(z)}\frac{g(-z)}{g(-z)-g(z)}=\frac{1}{2},\quad
z\in\mathbb{D}.
\end{equation}
In terms of $\Psi$ this reads
$$
2w\Psi'(w)=\Psi(w)-\Psi(-w),\quad w\in \Omega_\Gamma.
$$
From this we conclude that $w\Psi'(w)$ is an odd function, which, in
turn, implies that $\Phi(w):=\Psi(w)-a_0$ is odd as well. For $\Phi$
we then get the equation $w\Phi'(w)=\Phi(w)$, or $\Phi(w)=cw$. This
implies $\Psi(w)=cw+a_0$ and therefore that $\Gamma$ is a disk.
\end{proof}
\begin{proof}[Proof of Theorem \ref{thm2.5}]
Note that for any compact set $E$, we have $M_E=M_W$, where
$W:=\overline\mathbb{C}\setminus\Omega_E$. This follows because $\mu_E=\mu_W$
\cite{Ra} and $d_E(z) = d_W(z), \ z\in\mathbb{C}.$ Corollary \ref{cor2.4}
now implies that
\[
\inf\{ M_E: E \mbox{ is compact}\} = \inf\{ M_H: H \mbox{ is convex
and compact}\}.
\]
Hence we can assume that $E$ is convex from the start. We also set
cap$(E)=1,$ because $M_E$ is invariant under similarity transforms.
Thus $\partial E$ is a rectifiable Jordan curve (or a segment when
$E=\partial E$). The following argument that shows $M_E \ge 2$ for
all connected sets is due to A. Solynin. Let
$\Psi:\mathbb{D}elta\to\Omega_E$ be the standard conformal map:
\[
\Psi(w)=w+a_0+\sum_{k=1}^{\infty} \frac{a_k}{w^k},\qquad w\in\mathbb{D}elta.
\]
Recall that $\Psi$ can be extended as a homeomorphism of $\overline\mathbb{D}elta$
onto $\overline\Omega_E,$ with $\Psi(\mathbb{T})=\partial E,\ \mathbb{T}:=\partial\mathbb{D}elta.$
It is clear that
\[
d_E(\Psi(e^{it})) \ge |\Psi(e^{it})-\Psi(-e^{it})|,\qquad
t\in[0,2\pi).
\]
Since $\Psi(w)$ is univalent in $\mathbb{D}elta,$ the function
\[
H(w):=\frac{\Psi(w)-\Psi(-w)}{w}
\]
is analytic and non-vanishing in $\mathbb{D}elta$, including $w=\infty.$
Furthermore, $H(\infty):=\displaystyle\lim_{w\to\infty} H(w) = 2.$ It follows
that $h(w):=\log |H(w)|$ is harmonic in $\mathbb{D}elta.$ Recall that the
equilibrium measure $\mu_E$ is the harmonic measure of $\Omega_E$ at
$\infty,$ which is invariant under the conformal transformation
$\Psi,$ see \cite{Ra}. Hence
\begin{align*}
\log M_E &= \int \log d_E(z)\,d\mu_E(z) = \frac{1}{2\pi}
\int_0^{2\pi} \log d_E(\Psi(e^{it}))\,dt \\ &\ge \frac{1}{2\pi}
\int_0^{2\pi} \log \left| \frac{\Psi(e^{it})-\Psi(-e^{it})}{e^{it}}
\right|\,dt = \log 2,
\end{align*}
where we used the Mean Value Theorem for $h(w)$ on the last step.
Thus we conclude that $M_E \ge 2=M_D$ holds for all compact sets
$E.$
Recall that $M_E=M_W$, where $W=\overline\mathbb{C}\setminus\Omega_E$. If $M_E=2$
then $M_W=2$, so that $W$ must be convex by Corollary \ref{cor2.4}.
Since $M_W>3.2$ for any segment, we have that $W$ is the closure of
a convex domain. We can assume that cap$(W)=1$ after a dilation.
Repeating the above argument for $W$ instead of $E$, we obtain that
\begin{align*}
\log 2 &= \log M_W = \frac{1}{2\pi} \int_0^{2\pi} \log
d_W(\Psi(e^{it}))\,dt \\ &\ge \frac{1}{2\pi} \int_0^{2\pi} \log
\left| \Psi(e^{it})-\Psi(-e^{it}) \right|\,dt = \log 2.
\end{align*}
It follows that
\[
\int_0^{2\pi} \left( \log d_W(\Psi(e^{it})) - \log \left|
\Psi(e^{it})-\Psi(-e^{it}) \right| \right)\,dt = 0,
\]
and that $d_W(\Psi(e^{it})) = \left| \Psi(e^{it})-\Psi(-e^{it})
\right|$ a.e. on $[0,2\pi).$ But these functions are clearly
continuous, so that
\[
d_W(\Psi(e^{it})) = \left| \Psi(e^{it})-\Psi(-e^{it}) \right| \quad
\forall t\in\mathbb{R}.
\]
An application of Lemma~\ref{lem6.1} with $\Gamma$ the interior
domain of $W$ shows that $W$ must be a disk. We would also like to
mention that A. Solynin obtained a different proof of the fact that
$M_E=2$ for a connected set $E$ implies $W$ is a disk.
\end{proof}
\begin{proof}[Proof of Theorem \ref{thm2.6}]
Recall that $M_E$ is invariant under similarity transformations.
Hence we can assume again that cap$(E)=1$ and $\displaystyle \int z \,
d\mu_E(z) = 0.$ The latter condition means that the center of mass
for the equilibrium measure is at the origin. If we introduce the
conformal map $\Psi:\mathbb{D}elta\to\Omega_E$, as in the previous proof,
then this condition translates into $a_0=0$, i.e.,
\[
\Psi(w)=w+\sum_{k=1}^{\infty} \frac{a_k}{w^k},\qquad w\in\mathbb{D}elta.
\]
Theorem 1.4 of \cite[p. 19]{Po75} gives that $E\subset D(0,2)$, so
that $d_E(z) \le 2 + |z|, \ z\in E,$ by the triangle inequality.
Note that this is sharp for $E=[-2,2]$. Applying Jensen's
inequality, we have
\[
\log M_E = \int \log d_E(z)\,d\mu_E(z) \le \int \log(2+|z|)\, d
\mu_E (z) < \log\left(2 + \int |z|\, d \mu_E (z)\right).
\]
Estimates \eqref{2.2} and \eqref{2.3} now follow from the results of
Pommerenke \cite{Po}, and of P\'olya and Schiffer \cite{PS}, who
estimated the integral
\[
\int |z|\, d \mu_E (z) = \frac{1}{2\pi} \int_0^{2\pi}
|\Psi(e^{it})|\,dt < 4.02/\pi \quad(\mbox{or }\le 4/\pi),
\]
under the corresponding assumptions.
\end{proof}
\end{document}
|
\begin{document}
\title[Mendes Conjecture for time-one maps]{Foliations and Conjugacy, II: \\The Mendes Conjecture \\for time-one maps of flows}
\author{Jorge Groisman}
\address{Instituto de Matem\'atica y Estad\'{\i}stica Prof. Ing. Rafael Laguardia,
Facultad de Ingenier\'{\i}a Julio Herrera y Reissig 565 11300, MONTEVIDEO, Uruguay}
\email{[email protected]}
\author{Zbigniew Nitecki}
\address{Department of Mathematics, Tufts University, Medford, MA 02155}
\email{[email protected]}
\thanks{The second author thanks IMERL for its hospitality and support during a visit in August 2018}
\date{}
\keywords{Anosov diffeomorphism, non-compact dynamics, plane foliations}
\subjclass{37D, 37E}
\begin{abstract}A diffeomorphism \selfmap{f}{\Realstwo} in the plane is \emph{Anosov} if it has a hyperbolic splitting
at every point of the plane.
The two known topological conjugacy classes of such diffeomorphisms are linear hyperbolic automorphisms
and translations (the existence of Anosov structures for plane translations was originally shown by W. White).
P. Mendes conjectured that these are the only topological conjugacy classes for Anosov diffeomorphisms in the plane.
We prove that this claim holds when the Anosov diffeomorphism is the time-one map of a flow, via a theorem about
foliations invariant under a time one map.
\end{abstract}
\newcommand{\clints}[3]{\ensuremath{\clint{#1}{#2}_{#3}}}
\newcommand{\opints}[3]{\ensuremath{\opint{#1}{#2}_{#3}}}
\newcommand{\ropints}[3]{\ensuremath{\ropint{#1}{#2}_{#3}}}
\newcommand{\lopints}[3]{\ensuremath{\lopint{#1}{#2}_{#3}}}
\newcommand{\uopint}{\opint{0}{1}}
\newcommand{\ensuremath{\mathcal{F}}}{\ensuremath{\mathcal{F}}}
\newcommand{\ensuremath{\mathcal{F}}s}[1]{\ensuremath{\ensuremath{\mathcal{F}}_{#1}}}
\newcommand{\Fclint}[2]{\ensuremath{\clints{#1}{#2}{\ensuremath{\mathcal{F}}}}}
\newcommand{\Fopint}[2]{\ensuremath{\opints{#1}{#2}{\ensuremath{\mathcal{F}}}}}
\newcommand{\Fropint}[2]{\ensuremath{\ropints{#1}{#2}{\ensuremath{\mathcal{F}}}}}
\newcommand{\Flopint}[2]{\ensuremath{\lopints{#1}{#2}{\ensuremath{\mathcal{F}}}}}
\newcommand{\cF-arc}{\ensuremath{\mathcal{F}}-arc}
\newcommand{\cF-box}{\ensuremath{\mathcal{F}}-box}
\newcommand{\ensuremath{{\mathcal{R}_{\cF}}}}{\ensuremath{{\mathcal{R}_{\ensuremath{\mathcal{F}}}}}}
\newcommand{\ensuremath{\mathcal{F}}st}{\ensuremath{\ensuremath{\mathcal{F}}^{s}}}
\newcommand{\Fsclint}[2]{\ensuremath{\clints{#1}{#2}{\ensuremath{\mathcal{F}}st}}}
\newcommand{\ensuremath{\mathcal{F}}ut}{\ensuremath{\ensuremath{\mathcal{F}}^{u}}}
\newcommand{\Fuclint}[2]{\ensuremath{\clints{#1}{#2}{\ensuremath{\mathcal{F}}ut}}}
\newcommand{\ensuremath{\mathcal{R}}}{\ensuremath{\mathcal{R}}}
\newcommand{\ensuremath{E^{s}_{1}}}{\ensuremath{E^{s}_{1}}}
\newcommand{\ensuremath{E^{s}_{2}}}{\ensuremath{E^{s}_{2}}}
\newcommand{\ensuremath{E^{u}_{1}}}{\ensuremath{E^{u}_{1}}}
\newcommand{\ensuremath{E^{u}_{2}}}{\ensuremath{E^{u}_{2}}}
\newcommand{\ensuremath{\mathcal{G}}}{\ensuremath{\mathcal{G}}}
\newcommand{\ensuremath{\mathcal{G}}s}[1]{\ensuremath{\ensuremath{\mathcal{G}}_{#1}}}
\newcommand{\Gclint}[2]{\ensuremath{\clints{#1}{#2}{\ensuremath{\mathcal{G}}}}}
\newcommand{\Gopint}[2]{\ensuremath{\opints{#1}{#2}{\ensuremath{\mathcal{G}}}}}
\newcommand{\Gropint}[2]{\ensuremath{\ropints{#1}{#2}{\ensuremath{\mathcal{G}}}}}
\newcommand{\Glopint}[2]{\ensuremath{\lopints{#1}{#2}{\ensuremath{\mathcal{G}}}}}
\newcommand{\vphi-length}{\vphi-length}
\newcommand{\cG-arc}{\ensuremath{\mathcal{G}}-arc}
\newcommand{\cG-box}{\ensuremath{\mathcal{G}}-box}
\newcommand{\cG-leaf}{\ensuremath{\mathcal{G}}-leaf}
\newcommand{\ensuremath{\mathcal{G}}so}{\ensuremath{\mathcal{G}}s{\xs{0}}}
\newcommand{\cG-neighborhood}{\ensuremath{\mathcal{G}}-neighborhood}
\newcommand{\ensuremath{\sqsubset}}{\ensuremath{\sqsubset}}
\renewcommand{\ensuremath{\mathcal{H}}}{\ensuremath{\mathcal{H}}}
\newcommand{\sideof}[2]{\ensuremath{\ensuremath{\mathcal{H}}_{#2}^{#1}}}
\newcommand{\sidepos}[1]{\sideof{+}{#1}}
\newcommand{\sideneg}[1]{\sideof{-}{#1}}
\newcommand{\vphi}{\vphi}
\newcommand{\vphito}[1]{\ensuremath{\vphi^{#1}}}
\newcommand{\vphitoof}[2]{\ensuremath{\vphito{#1}(#2)}}
\newcommand{\vphit}{\vphito{t}}
\newcommand{\vphiclint}[2]{\ensuremath{\clints{#1}{#2}{\vphi}}}
\newcommand{\vphiopint}[2]{\ensuremath{\opints{#1}{#2}{\vphi}}}
\newcommand{\vphiropint}[2]{\ensuremath{\ropints{#1}{#2}{\vphi}}}
\newcommand{\vphilopint}[2]{\ensuremath{\lopints{#1}{#2}{\vphi}}}
\newcommand{\ensuremath{J}}{\ensuremath{J}}
\newcommand{\prolp}[2]{\ensuremath{\ensuremath{J}_{#1}^{+}(#2)}}
\newcommand{\prolpfl}[1]{\prolp{\vphit}{#1}}
\newcommand{\prolpf}[1]{\prolp{f}{#1}}
\newcommand{\proln}[2]{\ensuremath{\ensuremath{J}_{#1}^{-}(#2)}}
\newcommand{\prolnfl}[1]{\proln{\vphit}{#1}}
\newcommand{\prolnf}[1]{\proln{f}{#1}}
\newcommand{\ensuremath{\mathcal{O}}}{\ensuremath{\mathcal{O}}}
\newcommand{\orb}[2]{\ensuremath{\ensuremath{\mathcal{O}}_{#1}(#2)}}
\newcommand{\orbfl}[1]{\orb{\vphi}{#1}}
\newcommand{\orbf}[1]{\orb{f}{#1}}
\newcommand{\ensuremath{\Gamma_{+}}}{\ensuremath{\Gamma_{+}}}
\newcommand{\ensuremath{\Gamma_{-}}}{\ensuremath{\Gamma_{-}}}
\newcommand{\ensuremath{\mathcal{U}}}{\ensuremath{\mathcal{U}}}
\newcommand{\ensuremath{\partial\U}}{\ensuremath{\partial\ensuremath{\mathcal{U}}}}
\newcommand{\ensuremath{D_{-}}}{\ensuremath{D_{-}}}
\newcommand{\ts{i}}{\ts{i}}
\newcommand{\ks{i}}{\ks{i}}
\newcommand{\xpps}[1]{\ensuremath{x\ppr_{#1}}}
\newcommand{\ypps}[1]{\ensuremath{y\ppr_{#1}}}
\newcommand{\ensuremath{\xpps{i}}}{\ensuremath{\xpps{i}}}
\newcommand{\ensuremath{\ypps{i}}}{\ensuremath{\ypps{i}}}
\newcommand{\ensuremath{T\pr}}{\ensuremath{T\pr}}
\renewcommand{\ensuremath{T\pr}s}[1]{\ensuremath{\ensuremath{T\pr}_{#1}}}
\newcommand{\ensuremath{T\pr}p}{\ensuremath{T\ppr}}
\newcommand{\ensuremath{\mathcal{L}}}{\ensuremath{\mathcal{L}}}
\newcommand{\ensuremath{\mathcal{L}}s}[1]{\ensuremath{\ensuremath{\mathcal{L}}_{#1}}}
\newcommand{\Lclint}[2]{\ensuremath{\clints{#1}{#2}{\ensuremath{\mathcal{L}}}}}
\newcommand{\Lopint}[2]{\ensuremath{\opints{#1}{#2}{\ensuremath{\mathcal{L}}}}}
\newcommand{\Lropint}[2]{\ensuremath{\ropints{#1}{#2}{\ensuremath{\mathcal{L}}}}}
\newcommand{\Llopint}[2]{\ensuremath{\lopints{#1}{#2}{\ensuremath{\mathcal{L}}}}}
\newcommand{\cL-arc}{\ensuremath{\mathcal{L}}-arc}
\newcommand{\cL-box}{\ensuremath{\mathcal{L}}-box}
\newcommand{\Phitoof}[2]{\ensuremath{\Phi^{#1}(#2)}}
\newcommand{\ensuremath{V}}{\ensuremath{V}}
\newcommand{\ensuremath{V}G}{\ensuremath{\ensuremath{V}_{\ensuremath{\mathcal{G}}}}}
\newcommand{\ensuremath{V}L}{\ensuremath{\ensuremath{V}_{\ensuremath{L_{A}}}}}
\renewcommand{\half}{\ensuremath{\frac{1}{2}}}
\newcommand{\ensuremath{L_{A}}}{\ensuremath{L_{A}}}
\newcommand{\ensuremath{\mathcal{W}}}{\ensuremath{\mathcal{W}}}
\newcommand{\ensuremath{\mathcal{W}}sof}[1]{\ensuremath{\of{\ensuremath{\mathcal{W}}^{s}}{#1}}}
\newcommand{\ensuremath{\mathcal{W}}uof}[1]{\ensuremath{\of{\ensuremath{\mathcal{W}}^{u}}{#1}}}
\newcommand{\ensuremath{\mathcal{J}}}{\ensuremath{\mathcal{J}}}
\newcommand{\ensuremath{\mathcal{J}}sof}[1]{\ensuremath{\ensuremath{\mathcal{J}}^{s}_{#1}}}
\newcommand{\ensuremath{\mathcal{J}}uof}[1]{\ensuremath{\ensuremath{\mathcal{J}}^{u}_{#1}}}
\newcommand{\ensuremath{J}Asof}[1]{\ensuremath{{\ensuremath{\mathcal{J}}_{\ensuremath{L_{A}}}^{s}}_{#1}}}
\newcommand{\ensuremath{J}Auof}[1]{\ensuremath{{\ensuremath{\mathcal{J}}_{A}^{u}}_{#1}}}
\newcommand{\ensuremath{V}so}{\ensuremath{V}s{0}}
\newcommand{\ensuremath{\tilde{V}}}{\ensuremath{\tilde{V}}}
\newcommand{\ensuremath{\tilde{V}}so}{\ensuremath{\ensuremath{\tilde{V}}_{0}}}
\renewcommand{\Qs}[1]{\ensuremath{Q_{#1}}}
\newcommand{\Qs{i}}{\Qs{i}}
\newcommand{\ensuremath{Q\pr_{i}}}{\ensuremath{Q\pr_{i}}}
\newcommand{\ensuremath{\tilde{Q}_{i}}}{\ensuremath{\tilde{Q}_{i}}}
\newcommand{\ensuremath{\tilde{g}}}{\ensuremath{\tilde{g}}}
\renewcommand{\gams}[1]{\ensuremath{\gamma_{#1}}}
\newcommand{\qs{i}}{\qs{i}}
\newcommand{\ensuremath{\mu}}{\ensuremath{\mu}}
\newtheorem*{Mendescon}{Mendes' Conjecture}
\newtheorem*{ThmA}{Theorem A}
\newtheorem*{ThmB}{Theorem B}
\newtheorem*{ThmC}{Theorem C}
\maketitle
\section{Introduction}\label{sec:intro}
A diffeomorphism \selfmap{f}{M} of a compact manifold $M$ is called \deffont{Anosov}
if it has a global hyperbolic splitting of the tangent bundle.
Such diffeomorphisms have been studied extensively in the past fifty years.
The existence of a splitting implies the existence of two foliations, into stable \resp{unstable} manifolds, preserved by the
diffeomorphism, such that the map shrinks distances along the stable leaves, while its inverse does so for the unstable ones.
Anosov diffeomorphisms of compact manifolds have strong recurrence properties.
The existence of an Anosov structure when $M$ is compact is independent of the Riemann metric used to define it,
and the foliations are invariants of topological conjugacy.
By contrast, an Anosov structure on a non-compact manifold is highly dependent on the Riemann metric,
and the recurrence properties observed in the compact case do not hold in general.
This is strikingly illustrated by Warren White's example \cite{White} of a complete Riemann metric on the plane \Realstwo{}
for which the horizontal translation is Anosov. Furthermore, as we showed in an earlier paper \cite{GroismanNi1}, the
stable and unstable foliations are not invariants of topological conjugacy among Anosov diffeomorphisms.
Prompted by White's example, Pedro Mendes \cite{Mendes} formulated the following
\begin{definition}\label{dfn:Anosov}
An \deffont{Anosov structure} on \Realstwo{} for a diffeomorphism \selfmap{f}{\Realstwo} consists of
a complete Riemannian metric
on \Realstwo{} and
\begin{description}
\item[Stable and Unstable Foliations] two continuous foliations \ensuremath{\mathcal{F}}st{} and \ensuremath{\mathcal{F}}ut{} with \Cr{1} leaves
varying continuously in the \Cr{1} topology and
respected by $f$: the image of a leaf of \ensuremath{\mathcal{F}}st{} \resp{\ensuremath{\mathcal{F}}ut} is again a leaf of \ensuremath{\mathcal{F}}st{} \resp{\ensuremath{\mathcal{F}}ut};
\item[Hyperbolicity]
there exist constants $C$ and $\lam>1$ such that for any positive integer $n$
and any vector \vv{} tangent to a
leaf of \ensuremath{\mathcal{F}}ut{}.
\begin{equation*}
\norm{Df^{n}(\vv)}\geq C\lam^{n}\norm{\vv}
\end{equation*}
while for any vector \vv{} tangent to a leaf of \ensuremath{\mathcal{F}}st{}
\begin{equation*}
\norm{Df^{n}(\vv)}\leq C\lam^{-n}\norm{\vv}
\end{equation*}
where \norm{\vv} denotes the length of a vector using the metric \ensuremath{\mu}.
\end{description}
\end{definition}
We shall use the adjectives \emph{Anosov}, \emph{stable} and \emph{unstable} in the natural way:
a diffeomorphism is \deffont{Anosov} if it has an Anosov structure; the leaf of \ensuremath{\mathcal{F}}st{} \resp{\ensuremath{\mathcal{F}}ut} through a point is
its \deffont{stable} \resp{\deffont{unstable}} \deffont{leaf}.
He proved several general properties of Anosov diffeomorphisms of the plane, and asked if the two known examples
represent all possible topological conjugacy classes among them:
\begin{Mendescon}
If an orientation-preserving diffeomorphism \selfmap{f}{\Realstwo} has an Anosov structure, then $f $ is topologically conjugate to either
\begin{itemize}
\item the translation
\begin{equation*}
T(x,y)= (x+1,y)
\end{equation*}
or
\item the hyperbolic linear automorphism \selfmap{\ensuremath{L_{A}}}{\Realstwo} defined by
\begin{equation*}
\of{\ensuremath{L_{A}}}{\vx}=A\vx
\end{equation*}
where
\begin{equation*}
A=\left[\begin{array}{cc}2 & 0 \\0 & \frac{1}{2}\end{array}\right].
\end{equation*}
\end{itemize}
\end{Mendescon}
In a first step toward establishing this conjecture, Mendes proved
\begin{theorem}[Mendes, \cite{Mendes}]\label{thm:MendesThm}
If \selfmap{f}{\Realstwo} is a diffeomorphism of the plane with an Anosov structure, then
\begin{enumerate}
\item $f$ has at most one nonwandering point (which then must be a hyperbolic fixedpoint);
\item any point with nonempty $\alpha$-\resp{$\omega$-}limit set
has empty forward \resp{backward} prolongational limit set under $f$.
\end{enumerate}
\end{theorem}
In this paper, we establish the truth of Mendes' conjecture under an additional assumption:
\begin{ThmA}
If \vphit{} is a \Cr{1} flow on \Realstwo{} and $f=\vphito{1}$ is its time-one map, then the existence of an Anosov structure
for $f$ implies the conclusion of Mendes' Conjecture.
\end{ThmA}
Our proof divides into the two cases given by the first conclusion in \refer{thm}{MendesThm}:
\begin{description}
\item[Case 1] $f$ has empty nonwandering set (\ie{} $f$ is is a ``Brouwer translation'');
\item[Case 2] $f$ has a unique nonwandering point.
\end{description}
In the first case, the assumption that $f=\vphito{1}$ is fixedpoint-free implies that the flow \vphit{} has no fixedpoints.
Thus the flowlines of \vphit{} form a foliation \ensuremath{\mathcal{G}}{} of \Realstwo.
A foliation \ensuremath{\mathcal{G}}{} of \Realstwo{} of is \deffont{trivial} if there is a homeomorphism \selfmap{H}{\Realstwo} taking leaves
of \ensuremath{\mathcal{G}}{} to horizontal lines.
Triviality of the orbit foliation of a fixedpoint-free flow is equivalent to topological conjugacy of its time-one map
with a translation (\refer{prop}{trivial}).
In \refer{subsec}{action}, we establish the following theorem about
foliations preserved by the time-one map of a nontrivial flow:
\footnote{
This does not assume an Anosov structure for $f$.
}
\begin{ThmB}
Suppose \vphit{} is a fixedpoint-free \Cr{1} flow in the plane with flow line foliation \ensuremath{\mathcal{G}}.
Let \selfmap{f=\vphito{1}}{\Realstwo} be the time-one map
of \vphit, and suppose \ensuremath{\mathcal{F}}{} is a \Cr{1} foliation preserved by $f$.
If \ensuremath{\mathcal{G}}{} is nontrivial, then some leaf of \ensuremath{\mathcal{F}}{} is invariant under $f$.
\end{ThmB}
In \refer{subsec}{trivReeb}, we give a proof of Theorem C, a characterization of nontrivial foliations in terms of the existence of
nontrivial prolongation relations between leaves (``Reeb components''), which forms the basis of our proof of Theorem B.
Applying this to the stable foliation in case $f$ is Anosov, we see that when the time-one map of a flow on \Realstwo{} is an Anosov
Brouwer translation, the orbit foliation must be trivial, and hence the map must be topologically conjugate to a translation
(\refer{cor}{Mendes1}).
In the second case, the unique nonwandering point of $f$ must be a fixedpoint of the flow;
\footnote{
The only way a fixedpont of a time-one map is not a fixedpoint of the flow is if it lies on a period-one closed orbit
of the flow. By the Poincar\'e-Bendixson Theorem, this would force a fixedpoint of the flow elsewhere in the plane.
}
the presence of an Anosov structure means
that it is a hyperbolic saddle point, and the stable \resp{unstable} leaf through this point
consists of the two incoming \resp{outgoing} separatrices together with the fixedpoint itself.
The second conclusion in \refer{thm}{MendesThm} implies that these separatrices escape to infiinity, and hence
separate the plane into four quadrants.
Another application of Theorem A to the restriction of $f$ to any one of these quadrants and to its stable foliation
shows that the restriction of the foliation to each (open) quadrant is trivial.
In \refer{subsec}{fixedpoint} we use a standard ``fundamental domain'' argument to construct a topological conjugacy
between the restrictions of $f$ and \ensuremath{L_{A}}{} to invariant neighborhoods of the fixedpoint, and then use the triviality of the
flow line foliation \ensuremath{\mathcal{G}}{} in each quadrant to extend this conjugacy to the whole plane.
A subtle point here is that examples in \cite{GroismanNi1} show that in general we cannot hope to preserve the stable and unstable foliations under
this conjugacy.
Although the conjugacy we construct on the invariant neighborhood of the fixedpoint \emph{does} preserve the restriction of
these foliations to the neighborhood, the extension to the rest of the plane need not do so.
\section{Foliations Invariant under a time one map}\label{sec:FITOM}
In this section, we prove Theorem B.
\input{PlaneFoliations}
\input{FinvFoliations}
\section{The Mendes Conjecture-a partial resolution}\label{sec:Mendes}
In this section we prove Theorem A.
We separate the two cases: (1) $f$ is fixedpoint free, and (2) $f$ has a unique nonwandering point.
\subsection{Case 1: $f$ is a Brouwer translation}
When \selfmap{f}{\Realstwo} is fixedpoint free (a Brouwer translation) then the Mendes conjecture says that $f$ must be
topologically conjugate to the translation
\footnote{
Note that all translations are mutually topologically conjugate.
}
$(x,y)\mapsto(x+1,y)$.
Under the additional assumption that $f$ is the time-one map of some flow, using Theorem B we have
the following
\begin{prop}\label{prop:trivial}
If \vphit{} is a flow with trivial orbit foliation then there is a homeomorphism \selfmap{h}{\Realstwo} which is equivariant
with respect to \vphit{} and the translation flow $\Phi^{t}$ defined by $\Phi^{t}(x,y)=(x+t,y)$:
\begin{equation}\label{eqn:flowconj}
\hof{\vphitoof{t}{(x,y)}}=\Phi^{t}(\hof{x,y})\text{ for all $(x,y)\in\Realstwo$ and $t\in\Reals$}.
\end{equation}
\end{prop}
\begin{proofof}{\refer{prop}{trivial}}
For flows in the plane, triviality of the orbit foliation of \vphit{} is equivalent to the existence of a single, connected
cross-section--a line meeting every orbit at exactly once. Pick such a section $T$ for \vphit{}
and initially define $h$ on $T$ to be any homeomorphism between $T$ and the \axis{y} in \Realstwo.
Then we extend the definition of $h$ to the whole plane by noting that for each $(x,y)\in\Realstwo$
there is a (unique) point $(\xp,\yp)\in T$ and $t\in\Reals$ such that $\varphi^{t}(\xp,\yp)=(x,y)$; by definition, we want
\begin{equation*}
\hof{(x,y)}=\Phi^{t}(\hof{\xp,\yp})
\end{equation*}
which gives the required conjugacy.
\end{proofof}
\begin{corollary}\label{cor:Mendes1}
If the time-one map $f$ of a fixedpoint-free flow \vphit{} in \Realstwo{} has an Anosov structure, then the
action of the flow is conjugate to that of the translation flow, and so $f$ is topologically conjugate
to the translation $T$.
\end{corollary}
\begin{proofof}{\refer{cor}{Mendes1}}
Suppose the flowline foliation of \vphit{} is non-trivial, and $f$ has an Anosov structure.
Let \ensuremath{\mathcal{F}}{} be the associated stable foliation of \Realstwo{}.
Clearly, \ensuremath{\mathcal{F}}{} is $f$-invarient, so by Theorem B some leaf of \ensuremath{\mathcal{F}}{} is $f$-invariant.
But then $f$ restricted to this leaf is a contraction with respect to the metric giving the Anosov structure,
and hence has a fixedpoint, contrary to the assumption that \vphit{} is fixedpoint-free.
Thus, \ensuremath{\mathcal{G}}{} must be trivial. But then by \refer{prop}{trivial} there is a homeomorphism \selfmap{h}{\Realstwo}
such that \refer{eqn}{flowconj} holds; in particular, setting $t=1$, we get the conjugacy condition
\begin{equation*}
\hof{\fof{(x,y)}}=\Phi^1(\hof{x,y})\text{ for all $(x,y)\in\Realstwo$}.
\end{equation*}
\end{proofof}
\subsection{Case 2: $f$ has a fixedpoint}\label{subsec:fixedpoint}
Our standing assumption in this subsection is that $f$ is the time-one map of a \Cr{1} flow \vphit{} on \Realstwo,
has an Anosov structure,
and has a unique fixedpoint.
The second condition in \refer{thm}{MendesThm} implies
that the stable and unstable manifolds of this fixedpoint escape to infinity.
Then the ``cross'' $X$ consisting of the fixedpoint and its separatrices
separates the plane into four $f$-invariant open quadrants $\Qs{i}$, $i=1,...4$.
If $f$ is the time-one map of a flow \vphit,
the restriction of the flow to each quadrant is fixedpoint-free, hence generates a flowline foliation \ensuremath{\mathcal{G}}.
By Theorem B, if \ensuremath{\mathcal{G}}{} is nontrivial, then any foliation in this quadrant which is preserved by $f$
must have an $f$-invariant leaf. Applying this to the foliation (of the open quadrant) by stable manifolds, we would have to have
a second fixedpoint of $f$, contrary to the first condition in \refer{thm}{MendesThm}. It follows that the foliation \ensuremath{\mathcal{G}}{} restricted to each quadrant must be trivial:
\begin{remark}\label{rmk:quad}
There is a homeomorphism of each open quadrant \Qs{i}{} to \Realstwo{}
(which here we represent as the open upper half-plane)
taking flow lines of \vphit{} to horizontal lines.
\end{remark}
We will construct a conjugating homeomorphism \selfmap{h}{\Realstwo} using a standard trick.
We call a closed topological disc $D\subset\Realstwo$ a \deffont{fundamental domain} for a homeomorphism
\selfmap{g}{\Realstwo} if there are two closed arcs \gams{-} and \gams{+} in its boundary such that
\begin{equation*}
\gams{+}=\gof{\gams{-}}=D\cap\gof{D}.
\end{equation*}
\begin{remark}\label{rmk:fundom}
If $D$ \resp{$\tilde{D}$} is a fundamental domain for $g$ \resp{\ensuremath{\tilde{g}}} and \map{h}{D}{\tilde{D}} is a homeomorphism
taking \gams{\pm} to $\tilde{\gamma}_{\pm}$, then $h$ extends to a homeomorphism conjugating
$g|\bigcup_{k\in\Integers}\toof{g}{k}{D}$ with $\ensuremath{\tilde{g}}|\bigcup_{k\in\Integers}\toof{\ensuremath{\tilde{g}}}{k}{\tilde{D}}$ via
\begin{equation*}
\hof{\toof{g}{k}{x}}=\toof{\ensuremath{\tilde{g}}}{k}{\hof{x}} \text{ for all } x\in D.
\end{equation*}
\end{remark}
We let \ensuremath{\mathcal{F}}st{} \resp{\ensuremath{\mathcal{F}}ut} be the foliation of \Realstwo{} by the stable \resp{unstable} manifolds of $f$. Note that the stable
\resp{unstable} separatrices of the fixedpoint are contained in a leaf of \ensuremath{\mathcal{F}}st{} \resp{\ensuremath{\mathcal{F}}ut}.
\begin{lemma}\label{lem:fbox}
There is a rectangular neighborhood \ensuremath{\mathcal{R}}{} of the fixedpoint of $f$
whose horizontal \resp{vertical} edges are \ensuremath{\mathcal{F}}ut-arcs \resp{\ensuremath{\mathcal{F}}st-arcs},
which is simultaneously an \ensuremath{\mathcal{F}}st-box and a \ensuremath{\mathcal{F}}ut-box.
\end{lemma}
\begin{subproof}{\refer{lem}{fbox}}
Since the \ensuremath{\mathcal{F}}st-leaf and \ensuremath{\mathcal{F}}ut-leaf through the fixedpoint are transversal (and consist, respectively, of the appropriate
separatrices together with the fixedpoint itself), there is a disc neighborhood of the fixedpoint on which the two
foliations form a product structure: any \ensuremath{\mathcal{F}}st-arc in the neighborhood intersects any \ensuremath{\mathcal{F}}ut-arc in at most one point
(and is transversal). Pick a pair of \ensuremath{\mathcal{F}}ut-arcs, one through a point on each stable separatrix.
\ensuremath{\mathcal{F}}st-arcs of points near the fixedpoint intersect both arcs; pick one such \ensuremath{\mathcal{F}}st-arc through a point on each unstable
separatrix. There are four points \qs{i} of intersection between the two \ensuremath{\mathcal{F}}st-arcs and the two \ensuremath{\mathcal{F}}ut-arcs; with
appropriate numbering the rectangle \ensuremath{\mathcal{R}}{} formed by
$\ensuremath{E^{u}_{1}}=\Fuclint{\qs{1}}{\qs{2}}$, $\ensuremath{E^{s}_{1}}=\Fsclint{\qs{2}}{\qs{3}}$,
$\ensuremath{E^{u}_{2}}=\Fuclint{\qs{3}}{\qs{4}}$,
and $\ensuremath{E^{s}_{2}}=\Fsclint{\qs{4}}{\qs{1}}$ is foliated by \ensuremath{\mathcal{F}}ut-arcs joining the two ``vertical'' edges \ensuremath{E^{s}_{1}}{} and
\ensuremath{E^{s}_{2}}, and also by the \ensuremath{\mathcal{F}}st-arcs joining the ``horizontal'' edges \ensuremath{E^{u}_{1}}{} and \ensuremath{E^{u}_{2}}.
\end{subproof}
For future reference, we note that each of these four edges crosses one of the separatrices of the fixedpoint at a unique point;
denote the ``cross'' formed by the fixedpoint together with its four separatrices by $X$ and set
\begin{align*}
\ps{1}&=X\cap\ensuremath{E^{u}_{1}}\\
\ps{2}&=X\cap\ensuremath{E^{s}_{1}}\\
\ps{3}&=X\cap\ensuremath{E^{u}_{2}}\\
\ps{4}&=X\cap\ensuremath{E^{s}_{2}}.
\end{align*}
\begin{figure}
\caption{The rectangle \ensuremath{\mathcal{R}
\label{fig:rect}
\end{figure}
We now form a larger neighborhood \ensuremath{V}so{} of the fixedpoint by first taking the union $\ensuremath{\mathcal{R}}\cup\fof{\ensuremath{\mathcal{R}}}$, then further enlarging
by joining each vertex \qs{i}{} of \ensuremath{\mathcal{R}}{} with its image \fof{\qs{i}} by the \ensuremath{\mathcal{G}}-arc $\ensuremath{\mathcal{G}}s{i}=\Gclint{\qs{i}}{\fof{\qs{i}}}$;
the resulting topological octagon
\begin{equation*}
\partial \ensuremath{V}so=\ensuremath{E^{u}_{1}}\cup\ensuremath{\mathcal{G}}s{1}\cup\fof{\ensuremath{E^{s}_{1}}}\cup\ensuremath{\mathcal{G}}s{2}\cup\ensuremath{E^{u}_{2}}\cup\ensuremath{\mathcal{G}}s{3}\cup\fof{\ensuremath{E^{s}_{2}}}\cup\ensuremath{\mathcal{G}}s{4}
\end{equation*}
bounds a closed topological disc \ensuremath{V}so{} which is also simultaneously an \ensuremath{\mathcal{F}}st-box and a \ensuremath{\mathcal{F}}ut-box
(provided our initial choices were sufficiently close to the fixedpoint).
\begin{figure}
\caption{\ensuremath{V}
\label{fig:Vso}
\end{figure}
The corresponding region \ensuremath{\tilde{V}}so{} for \ensuremath{L_{A}}{} is defined by the inequalities
\begin{align*}
\abs{xy}&\leq 1\\
x^{2}&\leq 1\\
y^{2}&\leq 1.
\end{align*}
We note that the two components $V^{u}_{i}$, $i=1,2$ of $\ensuremath{V}so\setminus\interior{\fof{\ensuremath{\mathcal{R}}}}$ are \ensuremath{\mathcal{F}}ut-boxes,
those of $\ensuremath{V}so\setminus\interior{\ensuremath{\mathcal{R}}}$ ($V^{s}_{i}$) are \ensuremath{\mathcal{F}}st-boxes, and all four
are fundamental domains for $f$.
\begin{lemma}\label{lem:honV}
Let $V=\bigcup_{k\in\Integers}\toof{f}{k}{\ensuremath{V}so}$ and $\ensuremath{\tilde{V}}=\bigcup_{k\in\Integers}\toof{\ensuremath{L_{A}}}{k}{\ensuremath{\tilde{V}}so}$.
Then there is a homeomorphism $h$ conjugating $f|V$ with $\ensuremath{L_{A}}|\ensuremath{\tilde{V}}$.
\end{lemma}
\begin{subproof}{\refer{lem}{honV}}
First, we define $h$ on $X$: for $i=1,...,4$,
the interval \clint{\ps{i}}{\fof{\ps{i}}} is the one-dimensional analogue of a fundamental
domain for $f$ (it is an interval abutting its $f$-image) and the analogue of \refer{rmk}{fundom}
allows us to define a conjugacy between each separatrix of $f$ and the corresponding separatrix of \ensuremath{L_{A}}.
Since the orbit of each \ps{i} converges monotonically to the fixedpoint in one of the time directions,
this definition, together with taking the fixedpoint to the origin, defines a homeomorphism $h$ taking $X$
to the union of the two axes in \Realstwo, conjugating $f$ with \ensuremath{L_{A}}{} there.
Next, we use the foliations \ensuremath{\mathcal{F}}st{} and \ensuremath{\mathcal{F}}ut{} to define a coordinate system on $V$: every point
$\vx\in\ensuremath{V}so$ is the (unique) point of intersection of the \ensuremath{\mathcal{F}}st-arc through a point \xof{\vx} on the horizontal
arc in $X\cap\ensuremath{V}so$ with the \ensuremath{\mathcal{F}}ut-arc through some point \yof{\vx} on the vertical arc in $X\cap\ensuremath{V}so$;
then the action of $f$ extends this property to all of $V$.
We define \hof{\vx} to be the point $(\hof{\xof{\vx}},\hof{\yof{\vx}})$.
Note that
the images of any transversal to one of the separatrices
have as their limit set both of the "other" separatrices, together with the fixed point;
this guarantees that the separate definitions of $h$ generated by the fundamental
domains $V^{u}_{i}$ and $V^{s}_{i}$ agree on overlaps and have the right limit behavior at $X$.
\end{subproof}
Finally, we extend the definition of $h$ on $V$ to each of the four components of the complement of $V$.
Each such component is a component of the complement of one of the leaves \ensuremath{\mathcal{G}}s{\qs{i}} in the quadrant \Qs{i}.
By \refer{rmk}{quad}, there is a homeomorphism \vphi{} of \Qs{i}{} with the upper half plane that takes \ensuremath{\mathcal{G}}-lines to
horizontal lines. The restriction of this homeomorphism to the union \ensuremath{Q\pr_{i}}{} of \ensuremath{\mathcal{G}}s{\qs{i}} with the component of the
complement of $V$ which it bounds maps onto a closed half-plane, with \ensuremath{\mathcal{G}}s{\qs{i}} going to the bounding horizontal line.
Let \Ts{0} be the \vphi-preimage of the vertical ray through some point \vphiof{p} on the boundary of the half plane: this is a global
cross section to the foliation \ensuremath{\mathcal{G}}{} in \ensuremath{Q\pr_{i}}, as is its $f$-image $\Ts{1}=\fof{\Ts{0}}$. Let $S$ be the strip in \ensuremath{Q\pr_{i}}{} bounded by
the two cross-sections \Ts{0} and \Ts{1} together with the \ensuremath{\mathcal{G}}-arc \Gclint{p}{\fof{p}}.
The \vphi-image of \Ts{1} is not \emph{a priori} a vertical line; however, there is a homotopy of the plane, moving images of
points along horizontal lines, which fixes $\vphiof{\Ts{0}\cup\Gclint{p}{\fof{p}}}$ and moves \vphiof{\Ts{1}} to a vertical line.
Composing $\vphi|S$ with this homotopy, we have a homeomorphism taking $S$ (which is a fundamental domain for $f$)
to a fundamental domain for the horizontal translation in the half plane. Applying \refer{rmk}{fundom}, we can extend this
homeomorphism to a conjugacy $h$ between $f|\ensuremath{Q\pr_{i}}$ and the horizontal translation in the half plane, which agrees with the
previous definition of $h$ on \ensuremath{\mathcal{G}}s{i}.
There is an easy corresponding conjugation of \ensuremath{L_{A}}{} restricted to one of the components $\ensuremath{\tilde{Q}_{i}}$ of the complement of \ensuremath{\tilde{V}}{}
and the horizontal translation on a half plane. Composing the inverse of this conjugation with the one above gives
a homeomorphism between $f|\ensuremath{Q\pr_{i}}$ and $\ensuremath{L_{A}}|\ensuremath{\tilde{Q}_{i}}$ which agrees with the conjugacy $h|V$, defined previously, on the
common boundary.
This proves
\begin{prop}\label{prop:Mendes2}
If $f$ is the time-one map of a \Cr{1} flow on \Realstwo{} with a single fixedpoint and has an Anosov structure, then it is topologically
conjugate to the linear hyperbolic automorphism \ensuremath{L_{A}}.
\end{prop}
In light of \refer{thm}{MendesThm}, \refer{cor}{Mendes1} and \refer{prop}{Mendes2} together prove Theorem A.
\end{document}
|
\begin{document}
\title[] {Sharp non-uniqueness for the 3D hyperdissipative Navier-Stokes equations:
above the Lions exponent}
{\alpha}uthor{Yachun Li}
{\alpha}ddress{School of Mathematical Sciences, CMA-Shanghai, MOE-LSC, and SHL-MAC, Shanghai Jiao Tong University, China.}
\email[Yachun Li]{[email protected]}
\thanks{}
{\alpha}uthor{Peng Qu}
{\alpha}ddress{School of Mathematical Sciences $\&$ Shanghai Key Laboratory for Contemporary Applied Mathematics, Fudan University, China.}
\email[Peng Qu]{[email protected]}
\thanks{}
{\alpha}uthor{Zirong Zeng}
{\alpha}ddress{School of Mathematical Sciences, Shanghai Jiao Tong University, China.}
\email[Zirong Zeng]{[email protected]}
\thanks{}
{\alpha}uthor{Deng Zhang}
{\alpha}ddress{School of Mathematical Sciences, CMA-Shanghai, Shanghai Jiao Tong University, China.}
\email[Deng Zhang]{[email protected]}
\thanks{}
\keywords{Convex integration,
hyperdissipative Navier-Stokes equations, Lady\v{z}enskaja-Prodi-Serrin condition,
non-uniqueness,
partial regularity }
\sigmaubjclass[2010]{35A02,\ 35Q30,\ 76D05.}
\begin{abstract}
We study the 3D hyperdissipative Navier-Stokes equations on the torus,
where the viscosity exponent ${\alpha}lpha$ can be larger than the Lions exponent $5/4$.
It is well-known that,
due to Lions \cite{lions69},
for any $L^2$ divergence-free initial data,
there exist unique smooth Leray-Hopf solutions when ${\alpha}lpha g_{(\tau)}eq 5/4$.
We prove that even in this high dissipative regime,
the uniqueness would fail in the supercritical spaces $L^g_{(\tau)}ammamma_tW^{s,p}_x$,
in view of the generalized Lady\v{z}enskaja-Prodi-Serrin condition.
The non-uniqueness is proved in the strong sense
and, in particular, yields the sharpness at two endpoints
$(3/p+1-2{\alpha}lpha, \infty, p)$
and $(2{\alpha}lpha/g_{(\tau)}ammamma+1-2{\alpha}lpha, g_{(\tau)}ammamma, \infty)$.
Moreover, the constructed solutions are allowed to coincide with the unique
Leray-Hopf solutions near the initial time
and, more delicately,
admit the partial regularity outside a fractal set of singular times
with zero Hausdorff $\mathcal{H}^{\eta_*}$ measure,
where $\eta_*>0$ is any given small positive constant.
These results also provide the sharp non-uniqueness
in the supercritical Lebesgue and Besov spaces.
Furthermore, the strong vanishing viscosity result is obtained
for the hyperdissipative Navier-Stokes equations.
\end{abstract}
\maketitle
{
\tableofcontents
}
\sigmaection{Introduction and main results}
\sigmaubsection{Background} \lambda_qbel{Subsec-intro}
We consider the
three-dimensional hyperdissipative Navier-Stokes equations on the torus ${\mathbb{T}}^3:=[-\partiali,\partiali]^3$,
\begin{equation}\lambda_qbel{equa-NS}
\left\{{\alpha}ligned
&\partial_tu +\nu(-\Delta)^{{\alpha}lpha} u+(u\cdot {\nabla}bla )u + {\nabla}bla P=0, \\
& {\mathrm{div}} u = 0,
\endaligned
\right.
\end{equation}
where $u=(u_1,u_2,u_3)^\top(t,x)\in {\mathbb R}^3 $
and $P=P(t,x)\in {\mathbb R}$
represent the velocity field and pressure of the fluid, respectively,
$\nu> 0$ is the viscous coefficient,
${\alpha}lpha\in [1,2)$,
and $(-\Delta)^{{\alpha}lpha}$ is the fractional Laplacian defined
via the Fourier transform on the flat torus
$$
\mathcal{F}((-\Delta)^{{\alpha}lpha}u)({\bf x}i)=|{\bf x}i|^{2{\alpha}lpha}\mathcal{F}(u)({\bf x}i),\ \ {\bf x}i\in \mathbb{Z}^3.
$$
In particular,
\eqref{equa-NS} are the classical Navier-Stokes equations (NSE for short) when ${\alpha}lpha =1$,
and the Euler equations when the viscosity vanishes, i.e., $\nu=0$,
\begin{equation}\lambda_qbel{equa-Euler}
\left\{{\alpha}ligned
&\partial_tu +(u\cdot {\nabla}bla )u + {\nabla}bla P=0, \\
& {\mathrm{div}} u = 0.
\endaligned
\right.
\end{equation}
In the groundbreaking paper \cite{leray1934},
Leray constructed the weak solutions to NSE in the space
$L^\infty_tL^2_x\cap L^2_t{\rm d}ot{H}_x^1$,
which obey the energy inequality
\begin{align}\lambda_qbel{nsenergy}
\|u(t)\|_{L^2}^2
+ 2 \nu \int_{t_0}^t \|(-\Delta)^\frac {\alpha}lpha 2 u(s)\|_{L^2}^2 {\rm d} s
\leq \|u(t_0)\|_{L^2}^2
\end{align}
with ${\alpha}lpha =1$, for any $t>0$ and a.e. $t_0g_{(\tau)}eq 0$.
This class of weak solutions is now referred to as Leray-Hopf weak solutions,
due to the important contributions by Hopf \cite{hopf1951}
in the case of bounded domains.
Moreover,
Leray \cite{leray1934} proved that
for every such weak solution,
there exists a closed set $S\sigmaubseteq \mathbb{R}^+$
of measure zero, such that
the solution is smooth on
$\mathbb{R}^3\times (\mathbb{R}^+\sigmaetminus S)$,
and the $1/2$ Hausdorff measure $\mathcal{H}^{1/2}(S)=0$.
Since then,
there has been a vast amount of literature on the uniqueness,
regularity and
global existence of solutions to NSE in wider spaces.
Until now, the uniqueness of Leray-Hopf solutions still remains
a challenging problem.
The scaling consideration usually suggests a heuristic way
to find suitable functional spaces for the solvability of partial differential equations.
It provides a useful classification of subcritical,
critical and supercritical spaces.
A general philosophy is that equations are well-posed
in the subcritical spaces,
while solutions may exhibit ill-posedness phenomena in the supercritical spaces.
We refer to the papers \cite{K00,K17} of Klainerman
for comprehensive discussions.
For the interested readers,
we refer to \cite{CCT03,KPV01,BT08,XZ22}
and the references therein
for the norm-inflation and discontinuity of solution map
for nonlinear Schr\"odinger equations, KdV equations
and nonlinear wave equations in supercritical spaces.
For the current hyperdissipative NSE \eqref{equa-NS},
it is invariant under the scaling
\begin{align} \lambda_qbel{scaling-hyperNSE}
u(t,x) \mapsto \lambda_qmbda^{2{\alpha}lpha-1} u (\lambda_qmbda^{2{\alpha}lpha}t, \lambda_qmbda x),\ \
P(t,x) \mapsto \lambda_qmbda^{4{\alpha}lpha-2} P (\lambda_qmbda^{2{\alpha}lpha}t, \lambda_qmbda x).
\end{align}
This suggests the {\it critical space} $\mathbb{X}$ for \eqref{equa-NS}
if the corresponding norm of solutions is
invariant under the scaling \eqref{scaling-hyperNSE}.
One typical critical space is $\mathbb{X}=C_tL^2_x$
if ${\alpha}lpha = 5/4$.
In the (sub)critical regime where ${\alpha}lpha g_{(\tau)}eq 5/4$,
a remarkable result proved by Lions \cite{lions69} is that,
for any divergence-free $L^2$ initial data,
the hyperdissipative NSE
\eqref{equa-NS} admits unique smooth Leray-Hopf solutions.
See also the global strong solvability by
Mattingly-Sinai \cite{MS99}.
The well-posedness of \eqref{equa-NS}
also holds for ${\alpha}lpha$ slightly below $5/4$
due to Tao \cite{tao09}.
Moreover, Katz-Pavlovi\'c \cite{KP02} proved that
the Hausdorff dimension of the singular set at the time of first blow-up is at most $5-4{\alpha}lpha$.
In contrast, for the supercritical regime where ${\alpha}lpha <5/4$,
in the breakthrough work \cite{bv19b} Buckmaster-Vicol
first proved the non-uniqueness of finite energy weak solutions to NSE (i.e. ${\alpha}lpha =1$),
based on the convex integration scheme.
The approach of convex integration was introduced to 3D Euler equations
in the pioneering papers by De Lellis and Sz\'ekelyhidi \cite{dls09, dls10}
and has been proven very successful
in the fluid community.
In particular, a recent milestone is the resolution of the Onsager conjecture,
developed in \cite{B15,bdis15,bdls16,dls14,dls13}
and finally settled by Isett \cite{I18} and Buckmaster-De Lellis-Sz\'ekelyhidi-Vicol \cite{bdsv19}.
The crucial ingredient introduced by Buckmaster-Vicol \cite{bv19b} is the $L^2_x$-based intermittent spatial building blocks,
which in particular permit to control the hard dissipativity term $(-\Delta)u$ in NSE.
By making the full use of the spatial intermittency,
Luo-Titi \cite{lt20} proved the non-uniqueness of weak solutions in $C_tL^2_x$
to hyperdissipative NSE \eqref{equa-NS},
whenever the exponent ${\alpha}lpha$ is less than the Lions exponent,
i.e., ${\alpha}lpha <5/4$.
Furthermore, in the recent work \cite{bcv21},
Buckmaster-Colombo-Vicol constructed the non-unique weak solutions to \eqref{equa-NS}
when ${\alpha}lpha \in [1,5/4)$, which are smooth
outside a singular set in time with Hausdorff dimension less than one.
The intermittent convex integration also has been applied to various other models.
We refer, e.g., to \cite{lq20} for 2D hypoviscous NSE,
\cite{luo19} for stationary NSE,
and \cite{CDR18,DR19} for the non-uniqueness of Leray solutions to hypodissipative NSE.
See the surveys \cite{dls09,bv21,bv19r,dls17} for other interesting applications.
We also refer to another method by Jia and \v{S}ver\'ak \cite{js14,js15}
for the non-uniqueness of Leray-Hopf solutions under a certain assumption
for the linearized Navier-Stokes operator,
and the very recent work \cite{ABC21}
for the non-uniqueness of Leray solutions
of the forced NSE.
Hence, in view of the works \cite{lions69,lt20,bcv21},
${\alpha}lpha=5/4$ is exactly the {\it critical threshold of viscosity} for the well-posedness
in $C([0,T];L^2)$
for hyperdissipative NSE \eqref{equa-NS}.
Another type of critical spaces extensively used is the mixed Sobolev space
$\mathbb{X}=L^g_{(\tau)}ammamma_t{\rm d}ot{W}^{s,p}_x$,
where the exponents $(s,g_{(\tau)}ammamma, p)$ satisfy
\begin{align} \lambda_qbel{critical-LPS-hyperNSE}
\frac{2{\alpha}lpha}{g_{(\tau)}ammamma} + \frac{3}{p} = 2{\alpha}lpha -1 +s.
\end{align}
In the case where $g_{(\tau)}ammamma =\infty$
we may also consider $\mathbb{X}=C_t{\rm d}ot{W}^{\frac 3p+1-2{\alpha}lpha,p}_x$.
In particular,
the mixed Lebesgue space $L^g_{(\tau)}ammamma_tL^p_x$
(or, more generally, Strichartz space frequently used
for dispersive equations, like the Schr\"odinger equations
and wave equations) is critical for the classical NSE,
when the exponents $(g_{(\tau)}ammamma, p)$ satisfy the well-known
{\it Lady\v{z}enskaja-Prodi-Serrin condition}
\begin{align} \lambda_qbel{critical-LPS-NSE}
\frac 2 g_{(\tau)}ammamma + \frac 3p =1.
\end{align}
Due to the weak-strong uniqueness (\cite{prodi59,serrin62,L67,SvW84}),
the extra integrability in (sub)critical spaces $L^g_{(\tau)}ammamma_tL^p_x$
with $2/ g_{(\tau)}ammamma + 3 / p \leq 1$, $p\in[3,\infty)$,
suffices to guarantee the uniqueness
in the class of Leray-Hopf solutions to NSE.
The regularity in the delicate endpoint case $L^\infty_tL^3_x$ was solved by
Escauriaza-Seregin-\v{S}ver\'ak \cite{iss03}.
See also \cite{R02} for the weak-strong uniqueness
in $L^g_{(\tau)}ammamma_tW^{s,p}_x$
when $(s,g_{(\tau)}ammamma,p)$ satisfies \eqref{critical-LPS-hyperNSE}
with ${\alpha}lpha=1$,
\cite{GIP03} for the case of critical Besov spaces,
and \cite{LR16} for quite general Prodi-Serrin uniqueness criterion.
Furthermore,
due to the works of \cite{FJR72,FLRT00,LM01},
any weak solution to NSE (in the distributional sense,
see Definition \ref{Def-Weak-Sol} below)
in the (sub)critical spaces $L^g_{(\tau)}ammamma_tL^p_x$
is automatically the unique regular Leray-Hopf solution,
see \cite[theorem 1.3]{cl20.2} for the precise statements on the torus.
There are also many uniqueness results for the hyperdissipative NSE,
under the generalized Lady\v{z}enskaja-Prodi-Serrin condition \eqref{critical-LPS-hyperNSE}
or in the critical spaces.
We refer to, for instance,
\cite{Z07} for the mixed space $L^g_{(\tau)}ammamma_tL^p_x$
when $2{\alpha}lpha/g_{(\tau)}ammamma+3/p\leq 2{\alpha}lpha-1$,
and \cite{W06} for the Besov space ${\rm d}ot{B}^{1-2{\alpha}lpha+\frac dp}_{p,q}$.
In contrast to the positive side,
many questions remain open in the supercritical regime.
In the recent remarkable paper \cite{cl20.2},
Cheskidov-Luo proved the sharp non-uniqueness of NSE
near the endpoint $(s,g_{(\tau)}ammamma, p)=(0,2,\infty)$
of the Lady\v{z}enskaja-Prodi-Serrin condition \eqref{critical-LPS-NSE}.
The proof in particular exploits the temporal intermittency
in the convex integrations scheme.
See also \cite{cl21,cl22} for the application of temporal intermittency
to transport equations,
and \cite{lzz21} for the case of MHD equations.
The non-uniqueness in \cite{cl20.2} is indeed proved in the {\it strong} sense that
every weak solution is non-unique,
and the Hausdorff dimension of the corresponding singularity set
in time can be less than any given small constant ${\varepsilon}>0$.
It is also conjectured by Cheskidov-Luo \cite{cl20.2}
that the non-uniqueness of weak solutions
shall be valid in the full range of the supercritical regime
$2/g_{(\tau)}ammamma + 3/p >1$.
More recent progress has been made in \cite{cl21.2}
for the other endpoint
$(s,g_{(\tau)}ammamma, p)=(0,\infty, 2)$ for the 2D NSE.
It is worth noting that,
the endpoint case $(s,g_{(\tau)}ammamma,p)=(3/p+1-2{\alpha}lpha, \infty, p)$
corresponds exactly to the critical space $C_tL^2_x$ for
equation \eqref{equa-NS} when ${\alpha}lpha =5/4$ and $p=2$.
The significance of the endpoint $(s,g_{(\tau)}ammamma,p)=(3/p+1-2{\alpha}lpha,\infty,p)$
can be also seen from
its close relationship to more general critical Besov and Triebel-Lizorkin spaces.
Specifically,
for the classical NSE when ${\alpha}lpha =1$,
one has the embedding of critical spaces:
\begin{align} \lambda_qbel{embed-critical-NSE}
L^3 {B_q}ookrightarrow {\rm d}ot{B}^{\frac 3p-1}_{p | 2\leq p<\infty, \infty}
{B_q}ookrightarrow BMO^{-1} (={\rm d}ot{F}^{-1}_{\infty,2})
{B_q}ookrightarrow {\rm d}ot{B}^{-1}_{\infty,\infty}.
\end{align}
The solvability of NSE in these critical spaces has
attracted significant interests in literature.
It is usually obtained by the mild formulation of equations,
dating back to Kato and Fujita \cite{FK64,K84}.
One well-known critical space is $BMO^{-1}$,
due to Koch and Tataru \cite{kt01}.
See the monographs \cite{C04,LR16,M99} for more details.
It was a long standing problem whether
NSE is well-posed in the largest critical space ${\rm d}ot{B}^{1-2{\alpha}lpha}_{\infty,\infty}$
(\cite{C04,M99}).
Quite surprisingly,
the negative answer was provided by
Bourgain-Pavlovi\'c \cite{BP08},
by showing a phenomenon of norm-inflation instability in ${\rm d}ot{B}^{-1}_{\infty,\infty}$
for NSE.
Germin \cite{G08} also proved that the solution map associated to NSE is not $C^2$
in the space ${\rm d}ot{B}^{-1}_{\infty,q}$, $q>2$.
Afterwards, the norm-inflation in ${\rm d}ot{B}^{-1}_{\infty,q}$ for $qg_{(\tau)}eq 1$
was proved by Yoneda \cite{Y10} and Wang \cite{W15}.
The ill-posedness phenomena also exhibit for the hyperdissipative NSE.
There exist discontinuous Leray-Hopf solutions in the critical space
$B^{1-2{\alpha}lpha}_{\infty, \infty}$
with ${\alpha}lpha \in [1,5/4)$,
due to Cheskidov-Shvydkoy \cite{CS12}, with arbitrarily small initial data.
Cheskidov-Dai \cite{CD14} also proved the norm-inflation instability
in ${\rm d}ot{B}^{-s}_{\infty,q}$
for all $sg_{(\tau)}eq {\alpha}lphag_{(\tau)}eq 5/4$, $q\in (2,\infty]$.
Enlightened by the above progresses,
we consider the following three non-uniqueness questions:
\begin{enumerate}
\item[$\bullet $] In the highly dissipative regime ${\alpha}lpha g_{(\tau)}eq 5/4$
where the global solvability of Leray-Hopf solutions was known due to Lions \cite{lions69},
would it be possible to find non-unique and non-Leray-Hopf weak solutions
even with the same initial data of Leray-Hopf solutions ?
\item[$\bullet$]
As conjectured in the NSE context \cite{cl20.2},
in view of the generalized Lady\v{z}enskaja-Prodi-Serrin condition \eqref{critical-LPS-hyperNSE},
do there exist non-unique weak solutions to the hyperdissipative NSE \eqref{equa-NS}
in the supercritical spaces $L^g_{(\tau)}ammamma_tW_x^{s,p}$,
where $
{2{\alpha}lpha}/{g_{(\tau)}ammamma} + {3}/{p} > 2{\alpha}lpha -1 +s$ ?
\item[$\bullet$]
In view of the positive well-posedness results in critical spaces,
e.g., \cite{K84,kt01,W06},
are there non-unique weak solutions
to hyperdissipative NSE in the
supercritical Lebesgue, Besov, or Triebel-Lizorkin spaces ?
\end{enumerate}
It is worth noting that,
the global solvability of Leray-Hopf solutions when ${\alpha}lpha g_{(\tau)}eq 5/4$
makes it significantly hard to construct non-unique weak solutions
to \eqref{equa-NS}.
Actually, it is not possible to construct non-unique weak solutions
as in \cite{bv19b,lt20,bcv21}
in the space $C_tL^2_x$,
since any weak solution in $C_tL^2_x$
is the unique Leray-Hopf solution
due to \cite{lions69}.
In the present work,
we give the positive answers to the first and third questions,
and to the second question at two endpoints.
These results are contained in the main result, i.e., Theorem \ref{Thm-Non-hyper-NSE}
below, concerning the non-uniqueness result
for every weak solution in the space $L^g_{(\tau)}ammamma_tW^{s,p}_x$,
where $(s,g_{(\tau)}ammamma,p)$ lies in the supercritical regimes
$\mathcal{A}_1$ and $\mathcal{A}_2$,
respectively, for ${\alpha}lpha\in [5/4,2)$ and ${\alpha}lpha \in [1,2)$.
See \eqref{A-regularity1} and \eqref{A-regularity2} below for the precise
formulations of $\mathcal{A}_i$, $i=1,2$.
To the best of our knowledge,
it is the first non-uniqueness result for the hyperdissipative NSE,
when the viscosity exponent ${\alpha}lpha$ is beyond the Lions exponent $5/4$.
In particular,
the non-uniqueness results
in $L^g_{(\tau)}ammamma_tW^{s,p}_x$ hold in the strong sense as in \cite{cl20.2}
and are sharp at two endpoints
$(3/p+1-2{\alpha}lpha, \infty, p)$
and $(2{\alpha}lpha/g_{(\tau)}ammamma+1-2{\alpha}lpha, g_{(\tau)}ammamma, \infty)$,
in view of the generalized Lady\v{z}enskaja-Prodi-Serrin condition \eqref{critical-LPS-hyperNSE}.
It also provides the non-unique weak solutions to \eqref{equa-NS}
in the spaces $C_t\mathbb{X}$,
where $\mathbb{X}$ can be the supercritical
Lebesgue, Besov and Triebel-Lizorkin spaces.
In particular, in view of the well-posedness results
in \cite{W06} and Theorem \ref{Thm-GWP-HNSE-Lp} below,
the non-uniqueness results are sharp in the
Lebesgue and Besov spaces.
Furthermore,
the delicate phenomenon exhibited here is that,
even in the hyperdissipative case ${\alpha}lpha g_{(\tau)}eq 5/4$,
albeit the unique smooth Leray-Hopf solutions to \eqref{equa-NS},
there indeed exist weak solutions
in any small $L^g_{(\tau)}ammamma_tW^{s,p}_x$-neighborhood of Leray-Hopf solutions,
which coincide with Leray-Hopf solutions near the initial time,
are smooth outside a null set in time,
and have the zero Hausdorff $\mathcal{H}^{\eta_*}$ measure of
the singular set,
where $\eta_*>0$ can be any given small constant.
This fine structure of the temporal singular set
is exploited by using the gluing technique,
which was first developed to solve the Onsager conjecture
\cite{B15,I18,bdsv19}
and has been recently implemented in the context of NSE \cite{bcv21,cl20.2}.
The last result of the present work is concerned with
the viscosity vanishing result.
Namely, given any weak solution to Euler equations \eqref{equa-Euler}
in the space $H^{{\bf{w}}t \beta}_{t,x}$,
where ${\bf{w}}t \beta >0$,
we show that
it is a strong vanishing viscosity limit in $H^{{\bf{w}}t \beta}_{t,x}$ of a sequence of weak solutions
to the hyperdissipative NSE \eqref{equa-NS} with ${\alpha}lpha \in [1,2)$.
Hence, it extends the viscosity vanishing result in the NSE case
to the hyperdissipative NSE.
The construction of non-unique weak solutions,
inspired by the recent works \cite{bcv21,bv19b,cl20.2,cl21.2},
is based on the approach of intermittent convex integration,
which features both the spatial and temporal intermittency.
The fundamental spatial building blocks are the intermittent jets
for the endpoint case $(3/p+1-2{\alpha}lpha, \infty, p)$,
and the concentrated Mikado flows for the other endpoint case
$(2{\alpha}lpha/g_{(\tau)}ammamma+1-2{\alpha}lpha, g_{(\tau)}ammamma, \infty)$.
In both cases,
the extra temporal intermittency shall be exploited
in an almost optimal way,
in order to control the high dissipativity,
time derivative errors and
oscillation errors,
and simultaneously, to respect the supercritical regularity.
As we shall see below,
in the very high dissipativity regime
where ${\alpha}lpha$ is close to $2$,
the suitable temporal intermittency roughly equals to 3D,
and respectively, 4D spatial intermittency
in the supercritical regimes $\mathcal{A}_1$ and $\mathcal{A}_2$. \\
{\bf Notations.} To simplify the notations, for $p\in [1,\infty]$ and $s\in {\mathbb R}$, we denote
\begin{align*}
L^p_t:=L^p(0,T),\quad L^p_x:=L^p({\mathbb{T}}^3),\quad H^s_x:=H^s({\mathbb{T}}^3), \quad W^{s,p}_x:=W^{s,p}({\mathbb{T}}^3),
\end{align*}
where $W^{s,p}_x$ is the usual Sobolev space
and $H^s_x=W^{s,2}_x$.
Moreover, $L^g_{(\tau)}ammamma_tL^p_x$ denotes the usual Banach space
$L^g_{(\tau)}ammamma(0,T;L^p({\mathbb{T}}^3))$,
$p, g_{(\tau)}ammamma\in [1,\infty]$.
Let
\begin{align*}
\norm{u}_{W^{N,p}_{t,x}}:=\sigmaum_{0\leq m+|\zeta|\leq N} \norm{\partial_t^m {\nabla}^{\zeta} u}_{L^p_{t,x}}, \ \
\norm{u}_{C_{t,x}^N}:=\sigmaum_{0\leq m+|\zeta|\leq N}
\norm{\partial_t^m {\nabla}^{\zeta} u}_{C_{t,x}},
\end{align*}
where $\zeta=(\zeta_1,\zeta_2,\zeta_3)$ is the multi-index
and ${\nabla}^\zeta:= \partialartial_{x_1}^{\zeta_1} \partialartial_{x_2}^{\zeta_2} \partialartial_{x_3}^{\zeta_3}$.
In particular, we write $L^p_{t,x}:= L^p_tL^p_x$ for brevity.
Given any Banach space $X$,
$C([0,T];X)$ denotes the space of continuous functions from $[0,T]$ to $X$,
equipped with the norm $\|u\|_{C_tX}:=\sigmaup_{t\in [0,T]}\|u(t)\|_X$.
We also use the Besov space ${B}_{p, q}^{s} ({\mathbb{T}}^3)$
endowed with the norm
\begin{align*}
\|f\|_{{B}_{p, q}^{s}({\mathbb{T}}^3)}= ( \sigmaum_{j g_{(\tau)}eq 0}\big|2^{js} \| \Delta_j f\|_{L^p({\mathbb{T}}^3)}\big|^q )^{\frac1q},
\end{align*}
where $(s,p,q)\in (-\infty, \infty) \times [1,\infty] \times [1,\infty]$,
$\{\Delta_j\}_{j\in \mathbb{Z}}$ is the Littlewood-Paley decomposition
of the unity.
Let ${F}_{p, q}^{s}({\mathbb{T}}^3)$ denote the Triebel-Lizorkin space,
endowed with the norm
\begin{align*}
\|f \|_{{F}_{p, q}^{s}({\mathbb{T}}^3)}
=\|\big(\sigmaum_{jg_{(\tau)}eq 0}| 2^{js} \Delta_{j}f|^q \big)^{\frac{1}{q}}\|_{L^{p}({\mathbb{T}}^3)},
\end{align*}
where $(s,p,q)\in (-\infty, \infty) \times [1,\infty) \times [1,\infty]$.
The homogeneous Besov ${\rm d}ot{B}_{p, q}^{s}({\mathbb{T}}^3)$
and Triebel-Lizorkin spaces ${\rm d}ot{F}_{p, q}^{s}({\mathbb{T}}^3)$
are defined similarly where the summation is over $j\in \mathbb{Z}$.
We refer to \cite{ST87} for more details.
For any $A\sigmaubseteq [0,T]$, ${\varepsilon}_*>0$,
the neighborhood of $A$ in $[0,T]$ is defined by
\begin{align*}
N_{\varepsilon_*}(A):=\{t\in [0,T]:\ \exists\,s\in A,\ s.t.\ |t-s|\leq \varepsilon_*\}.
\end{align*}
We also use the notation $a\lesssim b$, which means that $a\leq C b$ for some constant $C>0$.
\sigmaubsection{Main results} \lambda_qbel{Subsec-Main}
Before formulating the main results,
let us first present the notion of weak solutions
in the distributional sense to equation \eqref{equa-NS}.
\begin{definition} \lambda_qbel{Def-Weak-Sol} (Weak solutions)
Given any weakly divergence-free datum $u_0 \in L^2(\mathbb{T}^3)$,
we say that $u\in L^2([0,T]\times \mathbb{T}^3)$
is a weak solution for the hyperdissipative Navier-Stokes equations \eqref{equa-NS}
if $u$ is divergence-free for a.e. $t\in [0,T]$,
and
\begin{align*}
\int\limits_{\mathbb{T}^3} u_0 {\varphi}(0,x) dx
= - \int_0^T \int\limits_{\mathbb{T}^3}
u(\partialartial_t {\varphi} - \nu (-\Delta)^{\alpha}lpha {\varphi} + (u\cdot {\nabla}) {\varphi}) dx dt
\end{align*}
for any divergence-free test function
${\varphi}\in C_0^\infty([0,T)\times\mathbb{T}^3)$.
\end{definition}
We focus on the following two supercritical regimes,
whose borderlines contain two endpoints
of the generalized Lady\v{z}enskaja-Prodi-Serrin condition \eqref{critical-LPS-hyperNSE}.
More precisely,
in the case ${\alpha}\in [5/4,2)$
we consider the supercritical regime $\mathcal{A}_1$ given by
\begin{align} \lambda_qbel{A-regularity1}
\mathcal{A}_1:=\bigg\{ (s,g_{(\tau)}ammamma,p)\in [0,3)\times [1, \infty] \times[1,\infty]: 0\leq s< \frac{4{\alpha}-5}{g_{(\tau)}ammamma}+ \frac{3}{p}+1-2{\alpha} \bigg\},
\end{align}
and in the case ${\alpha}\in [1,2)$ we consider
supercritical regime $\mathcal{A}_2$ given by
\begin{align} \lambda_qbel{A-regularity2}
\mathcal{A}_2:=\bigg\{ (s,g_{(\tau)}ammamma,p)\in [0,3)\times [1, \infty]\times[1,\infty]: 0\leq s< \frac{2{\alpha}}{g_{(\tau)}ammamma}+\frac{2{\alpha}-2}{p}+1-2{\alpha} \bigg\}.
\end{align}
The supercritical regimes $\mathcal{A}_1$ and $\mathcal{A}_2$ in the case $s=0$
can be seen in Figure $1$ below.
\begin{figure}
\caption{The case ${\alpha}
\end{figure}
The main result of this paper is formulated in Theorem \ref{Thm-Non-hyper-NSE} below,
which in particular gives the non-uniqueness in the
shaded part in Figure $1$,
including any small neighborhood near two endpoints.
\begin{theorem} \lambda_qbel{Thm-Non-hyper-NSE}
Let $\tilde{u}$ be any smooth, divergence-free and mean-free vector field on $[0,T]\times {\mathbb{T}}^3$.
Then, there exists $\beta'\in(0,1)$,
such that for any $\varepsilon_*, \eta_*>0$
and for any $(s,p,g_{(\tau)}ammamma)\in \mathcal{A}_1$ or $(s,p,g_{(\tau)}ammamma)\in \mathcal{A}_2$,
respectively, if ${\alpha}lpha \in [5/4,2)$ or ${\alpha}lpha\in[1,2)$,
there exist a velocity field $u$
and a set
\begin{align*}
\mathcal{G} = \bigcup\limits_{i=1}^\infty (a_i,b_i) \in [0,T],
\end{align*}
such that the following hold:
\begin{enumerate}[(i)]
\item Weak solution: $u$ is a weak solution
to \eqref{equa-NS} with the initial datum ${\bf{w}}t u(0)$
and has zero spatial mean.
\item Regularity: $u \in H^{\beta'}_{t,x} \cap L^g_{(\tau)}ammamma_tW^{s,p}_x$,
and
\begin{align*}
u|_{\mathcal{G}\times \mathbb{T}^3} \in C^\infty (\mathcal{G}\times \mathbb{T}^3).
\end{align*}
Moreover,
if there exists $t_0\in (0,T)$ such that
${\bf{w}}t u$ is the solution to \eqref{equa-NS} on $[0,t_0]$,
then $u$ agrees with ${\bf{w}}t u$ near $t=0$.
\item The Hausdorff dimension of the singular set
$\mathcal{B} = [0,T]/\mathcal{G}$ satisfies
\begin{align*}
d_{\mathcal{H}}(\mathcal{B}) <\eta_*.
\end{align*}
In particular, the singular set $\mathcal{B}$
has zero Hausdorff $\mathcal{H}^{\eta_*}$ measure,
i.e., $\mathcal{H}^{\eta_*}(\mathcal{B})=0$.
\item Small deviations of temporal support:
$$\sigmaupp_t u \sigmaubseteq N_{\varepsilon_*}(\sigmaupp_t \tilde{u}).$$
\item Small deviations on average:
$$\|u-\tilde{u}\|_{L^1_tL^2_x}+\|u-\tilde{u}\|_{L^g_{(\tau)}ammamma_tW^{s,p}_x}\leq \varepsilon_*.$$
\end{enumerate}
\end{theorem}
The first direct consequence of Theorem \ref{Thm-Non-hyper-NSE}
is the following strong non-uniqueness of weak solutions to \eqref{equa-NS}
in the hyperdissipative case where ${\alpha}lpha \in [5/4,2)$.
\begin{corollary} \lambda_qbel{Cor-Strong-Nonuniq}
(Strong non-uniqueness)
Let ${\alpha}lpha\in [5/4,2)$.
Then, for any weak solution ${\bf{w}}t u$ to \eqref{equa-NS},
there exists a different weak solution $u \in L^g_{(\tau)}ammamma_tW^{s,p}_x$ to \eqref{equa-NS}
with the same initial data,
where $(s,g_{(\tau)}ammamma,p) \in \mathcal{A}_1\cup \mathcal{A}_2$.
Moreover, for every divergence-free $L^2_x$ initial data,
there exist infinitely many weak solutions in $L^g_{(\tau)}ammamma_{t}W^{s,p}_x$
to \eqref{equa-NS}
which are smooth almost everywhere in time.
\end{corollary}
Another interesting consequence of Theorem \ref{Thm-Non-hyper-NSE}
is the non-uniqueness in the supercritical Lebesgue,
Besov and Triebel-Lizorkin spaces.
\begin{corollary} \lambda_qbel{Cor-Nonuniq-Supercri}
(Non-uniqueness in supercritical spaces)
Let ${\alpha}lpha \in [5/4,2)$.
Then, there exist non-unique weak solutions to \eqref{equa-NS} in the supercritical spaces $C_t\mathbb{X}$,
where $\mathbb{X}$ can be one of the following three types of spaces:
\begin{enumerate}
\item[(i)] $L^p$, $1\leq p<\frac{3}{2{\alpha}lpha-1}$;
\item[(ii)] ${B}^{s}_{p,q}$, $-\infty<s<\frac 3p +1-2{\alpha}lpha$,
$1< p<\infty$, $1\leq q\leq \infty$;
\item[(iii)] ${F}^{s}_{p,q}$, $-\infty<s<\frac 3p +1-2{\alpha}lpha$,
$1< p<\infty$, $1\leq q\leq \infty$.
\end{enumerate}
\end{corollary}
At last, we have the vanishing viscosity result
which extends the corresponding result for NSE in \cite{bv19b} to the hyperdissipative NSE \eqref{equa-NS}.
\begin{theorem} \lambda_qbel{Thm-hyperNSE-Euler-limit}
(Strong vanishing viscosity limit)
Let ${\alpha}lpha \in (1,2)$
and $u\in H^{{\bf{w}}t{\beta}}_{t,x}([-2T,2T]\times {\mathbb{T}}^3)$ be any mean-free weak solution
to the Euler equation \eqref{equa-Euler},
where ${\bf{w}}t \beta >0$.
Then, there exist $\beta' \in (0, {\bf{w}}t \beta)$ and a sequence of weak solutions
$u^{(\nu_{n})}\in H^{\beta'}_{t,x} $
to \eqref{equa-NS},
where $\nu_{n}$
is the viscosity coefficient,
such that
as $\nu_{n}\rightarrow 0$,
\begin{align}\lambda_qbel{convergence}
u^{(\nu_{n})}\rightarrow u \quad\text{strongly in}\ H^{\beta'}_{t,x}.
\end{align}
\end{theorem}
\sigmaubsection{Comments on main results.}
In the following let us present some comments on the main results.
{\bf (i) Strong non-uniqueness for the high dissipativity above the Lions exponent.}
It is folklore that one has global solvability in the high dissipative case
when ${\alpha}lpha g_{(\tau)}eq 5/4$.
Actually, in view of the works \cite{lions69,lt20,bcv21},
${\alpha}lpha =5/4$ is the critical threshold for the well-posedness of
solutions in $C_tL^2_x$ to \eqref{equa-NS}.
That is, weak solutions in $C_tL^2_x$
are unique if ${\alpha}lpha g_{(\tau)}eq 5/4$, while non-unique if ${\alpha}lpha <5/4$.
Quite surprisingly,
Theorem \ref{Thm-Non-hyper-NSE} shows that,
even in the high dissipative regime ${\alpha}lpha g_{(\tau)}eq 5/4$,
the uniqueness fails in the spaces $L^g_{(\tau)}ammamma_tW^{s,p}_x$
where $(s,g_{(\tau)}ammamma,p)$ lies in the supercritical regimes
$\mathcal{A}_1 \cup \mathcal{A}_2$, defined in \eqref{A-regularity1} and \eqref{A-regularity2}, respectively.
The non-uniqueness even exhibits in the strong sense that,
any solution in $L^g_{(\tau)}ammamma_t W^{s,p}_x$
is non-unique.
In particular, in the case where ${\alpha}lpha =5/4$,
Corollary \ref{Cor-Nonuniq-Supercri} $(i)$ yields
the non-uniqueness of weak solutions in $C_tL^p_x$ to \eqref{equa-NS}
for any $p<2$,.
Thus, in view of the well-posedness results \cite{lions69},
the non-uniqueness of Corollary \ref{Cor-Nonuniq-Supercri} $(i)$ is sharp in $C_tL^2_x$.
{\bf (ii) Sharp non-uniqueness at two endpoints of generalized Lady\v{z}enskaja-Prodi-Serrin condition.}
In the remarkable paper \cite{cl20.2},
Cheskidov-Luo first proved the sharp non-uniqueness at the endpoint
$(s,g_{(\tau)}ammamma,p)=(0,2,\infty)$.
That is,
for any $g_{(\tau)}ammamma<2$, there exist non-unique solutions
in $L^g_{(\tau)}ammamma_tL^\infty_x$ to NSE (${\alpha}lpha=1$)
in all dimensions $dg_{(\tau)}eq 2$.
It is also conjectured that the non-uniqueness shall be valid
in the whole supercritical regime determined by the
Lady\v{z}enskaja-Prodi-Serrin condition \eqref{critical-LPS-NSE}.
The non-uniqueness for the other endpoint case $(s,g_{(\tau)}ammamma,p)=(0,\infty,2)$
has been recently achieved in \cite{cl21.2} for the 2D NSE.
In view of the generalized Lady\v{z}enskaja-Prodi-Serrin condition \eqref{critical-LPS-hyperNSE}
and the well-posedness in the (sub) critical cases \cite{Z07},
Theorem \ref{Thm-Non-hyper-NSE} provides the sharp non-uniqueness
for the hyperdissipative NSE \eqref{equa-NS}
at two endpoints, i.e.,
$(3/p+1-2{\alpha}lpha,\infty,p)$ for ${\alpha}lpha \in [5/4,2)$,
and $(2{\alpha}lpha/g_{(\tau)}ammamma+1-2{\alpha}lpha,g_{(\tau)}ammamma,\infty)$ for ${\alpha}lpha \in (1,2)$.
This in particular extends the results in
\cite{cl21.2} and \cite{cl20.2},
respectively, to the 3D hyperdissipative NSE where
${\alpha}lpha\in [5/4,2)$ and ${\alpha}lpha \in (1,2)$.
We would expect the non-uniqueness
for the remaining supercritical regimes when ${\alpha}lpha\in[5/4,2)$,
and for the supercritical regime near the
endpoint $(3/p+1-2{\alpha}lpha, \infty, p)$ when ${\alpha}lpha \in [1,5/4)$.
This seems out of the reach of present method,
due to the $L^2_{t,x}$-criticality of space-time convex integration method.
As a matter of fact, as pointed out in \cite{cl20.2,cl21.2},
the temporal intermittency allows to
raise the temporal integrability exponent $g_{(\tau)}ammamma >2$,
yet at the cost of reducing the spatial integrability exponent $p<2$.
We note that, in the endpoint case $(0,\infty, {3}/{(2{\alpha}lpha-1)})$
when ${\alpha}lpha <5/4$,
both the temporal and spatial integrability exponents would be larger than two.
\iffalse
\begin{figure}
\caption{The case ${\alpha}
\end{figure}
\fi
{\bf (iii) Sharp non-uniqueness in the supercritical Lebesgue and Besov spaces.}
There have been extensive results on the well-posedness
in the (sub)critical spaces,
see, e.g. \cite{C04,LR16,M99} for the NSE in the critical spaces:
\begin{align} \lambda_qbel{critical-space-NSE}
L^3
{B_q}ookrightarrow {\rm d}ot{B}^{\frac 3p-1}_{p|2\leq p<\infty,\infty}
{B_q}ookrightarrow BMO^{-1} (={\rm d}ot{F}^{-1}_{\infty,2}).
\end{align}
Corollary \ref{Cor-Nonuniq-Supercri} appears to be the first non-uniqueness
result for hyperdissipative NSE \eqref{equa-NS}
in the space $C_t\mathbb{X}$,
where $\mathbb{X}$ can be the supercritical
Lebesgue, Besov and Triebel-Lizorkin spaces.
Let us mention that,
the well-posedness of NSE in the critical space $L^3_x$
was proved in the famous paper by Kato \cite{K84}.
The mild formulation strategy proposed in \cite{K84}
has been now frequently used to obtain the well-posedness
of NSE in various spaces.
For the hyperdissipative NSE \eqref{equa-NS},
we include the well-posedness result in the critical space
$L^{3/(2{\alpha}lpha-1)}_x$ in the Appendix.
In particular,
this shows that the non-uniqueness in Corollary \ref{Cor-Nonuniq-Supercri} $(i)$
is sharp in the supercritical Lebesgue spaces.
Moreover, it has been proved in \cite{W06} that,
for ${\alpha}lpha >1/2$, \eqref{equa-NS} is well-posed with small data
in ${\rm d}ot{B}^{\frac 52-2{\alpha}lpha}_{2,q}(\mathbb{R}^d)$
for $1<q\leq \infty$.
The proof also applies to the torus case.
Hence,
the non-uniqueness in Corollary \ref{Cor-Nonuniq-Supercri} $(ii)$
is sharp in the Besov spaces.
Furthermore,
for any $s<1-2{\alpha}lpha$,
we may take $\eta>0$ small enough
such that $1-2{\alpha}lpha -\eta>s$.
Then, by the embedding of Besov spaces
we have for any $1\leq q\leq \infty$,
\begin{align*}
{B}^{\frac 3p+1-2{\alpha}lpha-\eta}_{p,q}
{B_q}ookrightarrow {B}^{1-2{\alpha}lpha-\eta}_{p,q}
{B_q}ookrightarrow {B}^{s}_{{\infty},q}.
\end{align*}
Hence, by virtue of Corollary \ref{Cor-Nonuniq-Supercri} $(ii)$,
we also have the non-uniqueness of weak solutions in
${B}^{s}_{{\infty},q}$,
for any $s<1-2{\alpha}lpha$, $1\leq q\leq \infty$.
This may also be seen as a complement
to the ill-posedness results in \cite{CD14},
where the norm-inflation instability was proved for equation \eqref{equa-NS}
with ${\alpha}lpha g_{(\tau)}eq 5/4$
in the Besov spaces ${B}^{s}_{\infty,q}$,
for any $s\leq -{\alpha}lpha$, $2<q\leq \infty$.
{\bf (iv) Partial regularity of weak solutions.}
In the pioneering paper \cite{leray1934},
Leray proves that
the Leray-Hopf solutions to NSE
are smooth outside a closed singular set of times,
which has zero Hausdorff $\mathcal{H}^{1/2}$ measure.
This provides another possible way to tackle the global existence problem.
In particular,
following the works of Scheffer \cite{S76,S77},
Caffarelli-Kohn-Nirenberg \cite{CKN82}
proved a space-time regularity version
and showed the existence of global Leray-Hopf solutions
which have singular sets in $\mathbb{R}^3\times \mathbb{R}^+$
of zero Hausdorff $\mathcal{H}^1$ measure.
See also the simplified proofs in \cite{Lin98,Vasseur07}.
For hyperdissipative NSE with ${\alpha}lpha \in (1,5/4]$,
Katz-Pavlovi\'c \cite{KP02} proved that
the Hausdorff dimension of the singular set at the time of first blow-up is at most $5-4{\alpha}lpha$.
Recently, Colombo-De Lelli-Massaccesi \cite{CDM20}
proved a stronger version of the Katz-Pavlovi\'c result,
and showed the existence of Leray-Hopf solutions
which have singular space-time sets
of zero Hausdorff $\mathcal{H}^{5-4{\alpha}lpha}$ measure,
thus extending the Caffarelli-Kohn-Nirenberg theorem
to hyperdissipative NSE.
Theorem \ref{Thm-Non-hyper-NSE} shows that,
in the high dissipative regime ${\alpha}lpha \in [5/4,1)$,
for any small $\eta_*>0$,
there exist weak solutions to \eqref{equa-NS}
in any small $L^g_{(\tau)}ammamma_tW^{s,p}_x$-neighborhood of Leray-Hopf solutions,
$(s,g_{(\tau)}ammamma,p)\in \mathcal{A}_1\cup\mathcal{A}_2$,
which coincide with the Leray-Hopf solutions near $t=0$,
and have singular sets of times with
zero Hausdorff $\mathcal{H}^{\eta_*}$ measure.
The proof of partial regularity in time takes advantage of the gluing technique,
which was developed in \cite{B15,bdls16,I18,bdsv19} to solve the famous Onsager conjecture for 3D Euler equations.
The gluing technique to singular set of weak solutions to NSE was first
implemented in \cite{bcv21},
where the Hausdorff dimension of the constructed solution is
strictly less than one.
The results of \cite{bcv21} also
imply the strong uniqueness of weak solutions for NSE in dimensions $d=3,4$.
This technique was later used by Cheskidov-Luo \cite{cl20.2}
to obtain the strong uniqueness of weak solutions in the endpoint case $(s,g_{(\tau)}ammamma,p)=(0,2,\infty)$,
which have small Hausdorff dimension of the singular sets in time.
It has been also used in
\cite{CDR18,DR19} to prove the non-unique Leray-Hopf solutions
for hypodissipative NSE when ${\alpha}lpha <1/3$.
{\bf (v) Non-uniqueness for MHD equations above the Lions exponent.}
The non-uniqueness problem for
magnetohydrodynamic equations (MHD for short)
has attracted increasing interests in recent years.
We refer to \cite{bbv20,fls21,fls21.2} for the recent progresses for the ideal MHD
and the relationship to the Taylor conjecture.
One delicate point here is that,
as pointed in \cite{bbv20},
the geometry of MHD equations restricts the oscillation directions
and so limits the spatial intermittency.
Hence, it is hard to control the viscosity and resistivity
of MHD equations when the exponent is larger than one.
In the recent work \cite{lzz21},
the non-uniqueness has been proved for MHD equations,
where the viscosity and resistivity exponents are
allowed to be larger than one, yet below the Lions exponent $5/4$,
based on the construction of building blocks
which are adapted to the geometry of MHD
and feature both the temporal and spatial intermittency.
We would expect that the refined building blocks and parameters in this paper
permit to obtain the non-uniqueness for MHD above the Lions exponent $5/4$.
The strong non-uniqueness with fine smoothness outside a small
fractal set in time would also be expected.
\sigmaection{Outline of the proof}
Our proof is mainly inspired by the intermittent convex integration method
developed in \cite{bcv21,bv19b,bv19r,cl21.2,cl20.2}.
It is based on the iterative construction of approximate solutions to the
hyperdissipative Navier-Stokes-Reynolds system, namely,
for each integer $qg_{(\tau)}eq 0$,
\begin{equation}\lambda_qbel{equa-nsr}
\left\{{\alpha}ligned
&\partial_t {u_q}+\nu(-\Delta)^{{\alpha}lpha} {u_q}+ {\mathrm{div}}({u_q}\otimes{u_q})+{\nabla}bla P_q={\mathrm{div}} \mathring{R}_q, \\
&{\mathrm{div}} {u_q} = 0,
\endaligned
\right.
\end{equation}
where the Reynolds stress $\mathring{R}_q$ is a symmetric traceless $3\times 3$ matrix.
In order to exploit the fine temporal singular set of approximate solutions,
we adapt the notion of the well-prepared solutions from \cite{cl20.2} here.
\begin{definition} (Well-preparedness)
Let $\eta\in (0,\eta_*)$. We say that
the smooth solution $(u_q,\mathring{R}_q)$ to \eqref{equa-nsr} on $[0,T]$ is well-prepared
if there exist a set $I$ and a length scale $\theta>0$,
such that $I$ is a union of at most $\theta^{-\eta}$ many closed intervals of length scale $5\theta$ and
\begin{align*}
\mathring{R}_q(t,x)=0 \quad \text{if} \quad \operatorname{dist}(t,I^c)\leq \theta.
\end{align*}
\end{definition}
Two important quantities to measure the size of the relaxation solutions $(u_q, \mathring{R}_q)$,
$q\in \mathbb{N}$,
are the frequency parameter $\lambda_q$ and the amplitude parameter ${\rm d}elta_{q+2}$:
\begin{equation}\lambda_qbel{la}
\lambda_q=a^{(b^q)}, \ \
{\rm d}elta_{q+2}=\lambda_qmbda_{q+2}^{-2\beta}.
\end{equation}
Here $a\in \mathbb{N}$ is a large integer to be determined later,
$\beta>0$ is the regularity parameter,
$b\in 2\mathbb{N}$ is a large integer of multiple $2$ such that
\begin{align}
b>\frac{1000}{\varepsilonrepsilon\eta_*}, \ \
0<\beta<\frac{1}{100b^2}, \lambda_qbel{b-beta-ve}
\end{align}
where for the given $(s,p,g_{(\tau)}ammamma)\in \mathcal{A}_1$, $\varepsilonrepsilon\in \mathbb{Q}_+$ is sufficiently small such that
\begin{equation}\lambda_qbel{e3.1}
\varepsilonrepsilon\leq\frac{1}{20}\min\{2-{\alpha}lpha,\,\frac{4{\alpha}-5}{g_{(\tau)}ammamma}+\frac{3}{p}-(2{\alpha}-1)-s \}\quad \text{and}\quad b{\varepsilon}\in\mathbb{N},
\end{equation}
and for the given $(s,p,g_{(\tau)}ammamma)\in \mathcal{A}_2$, $\varepsilonrepsilon>0$ is sufficiently small such that
\begin{equation}\lambda_qbel{ne3.1}
\varepsilonrepsilon\leq\frac{1}{20}\min\{2-{\alpha}lpha,\,\frac{2{\alpha}lpha}{g_{(\tau)}ammamma}+\frac{2{\alpha}-2}{p}-(2{\alpha}-1)-s \}\quad \text{and}\quad b(2-{\alpha}-8{\varepsilon})\in \mathbb{N}.
\end{equation}
The idea is then to prove the vanishing of Reynolds stress in an appropriate space
as $q$ tends to infinity.
Thus, intuitively,
the limit of $u_q$ is expected to solve the original equation \eqref{equa-NS}.
This procedure is quantified in the following iterative estimates:
\begin{align}
& \|{u_q}\|_{L^{\infty}_tH^3_x} \lesssim \lambda_qmbda_{q}^{5}, \lambda_qbel{uh3} \\
& \|\partial_t {u_q}\|_{L^{\infty}_tH^2_x} \lesssim \lambda_qmbda_{q}^{8}, \lambda_qbel{upth2} \\
& \|\mathring{R}_q\|_{L^{\infty}_tH^3_x} \lesssim \lambda_qmbda_{q}^{9},\lambda_qbel{rh3} \\
& \|\mathring{R}_q\|_{L^{\infty}_tH^4_x} \lesssim \lambda_qmbda_{q}^{10},\lambda_qbel{rh4} \\
& \|\mathring{R}_q\|_{L^{1}_{t,x}} \leq \lambda_q^{-{\varepsilon}_R}{\rm d}elta_{q+1}, \lambda_qbel{rl1}
\end{align}
where the implicit constants are independent of $q$
and ${\varepsilon}_R>0$ is a small parameter such that
\begin{align*}
{\varepsilon}_R< \frac{{\varepsilon}}{10}.
\end{align*}
\begin{remark}
We note that,
the approximate solutions $(u_q, \mathring{R}_q)$
are measured in the more regular spaces $L^\infty_tH^N_x$,
$N=2,3,4$, and have larger frequency upper bounds
than those in \cite{lzz21}.
This is in part due to the full oscillation and concentration
in space and time,
in order to achieve the sharp non-uniqueness in the endpoints cases.
Moreover,
it is also imposed here to be
compatible with the gluing stage in Section \ref{Sec-Concen-Rey},
in order to exploit the fine singular set of times.
\end{remark}
The crucial iteration results of the relaxation solutions $(u_{q}, \mathring{R}_{q})$
are formulated below.
\begin{theorem} [Main iteration]\lambda_qbel{Prop-Iterat}
Let $(s,p,g_{(\tau)}ammamma)\in \mathcal{A}_1$ for ${\alpha}lpha \in [5/4,2)$,
or $(s,p,g_{(\tau)}ammamma) \in \mathcal{A}_2$ for ${\alpha}lpha \in [1,2)$.
Then, there exist $\beta\in (0,1)$,
$M^*>0$ large enough and $a_0=a_0(\beta, M^*)$,
such that for any integer $ag_{(\tau)}eq a_0$,
the following holds:
Suppose that
$({u_q}, \mathring{R}_q )$ is a well-prepared solution to \eqref{equa-nsr}
for the set $I_q$ and the length scale $\theta_q$
and satisfies \eqref{uh3}-\eqref{rl1}.
Then, there exists another well-prepared solution $(u_{q+1}, \mathring{R}_{q+1} )$
to \eqref{equa-nsr} for some set $I_{q+1}\sigmaubseteq I_q$,
$0,T\notin I_{q+1}$, and the length scale $\theta_{q+1}<\theta_q/2$,
and $(u_{q+1}, \mathring{R}_{q+1} )$ satisfies \eqref{uh3}-\eqref{rl1} with $q+1$ replacing $q$.
In addition, we have
\begin{align}
&\|u_{q+1}-u_{q}\|_{L^{2}_{t,x}} \leq M^*{\rm d}elta_{q+1}^{\frac{1}{2}}, \lambda_qbel{u-B-L2tx-conv}\\
&\|u_{q+1}-u_{q}\|_{L^1_tL^{2}_{x}} \leq {\rm d}elta_{q+2}^{\frac{1}{2}}, \lambda_qbel{u-B-L1L2-conv}\\
&\norm{ u_{q+1} - u_q }_{L^g_{(\tau)}ammamma_tW^{s,p}_x} \leq {\rm d}elta_{q+2}^{\frac{1}{2}},\lambda_qbel{u-B-Lw-conv}
\end{align}
and
\begin{align}
&\sigmaupp_t (u_{q+1}, \mathring{R}_{q+1})
\sigmaubseteq N_{{\rm d}elta_{q+2}^{\frac12}}( \sigmaupp_t (u_{q}, \mathring{R}_{q})).\lambda_qbel{suppru}
\end{align}
\end{theorem}
The proof of the main iteration theorem will occupy most parts of the present paper.
It relies crucially on the
gluing procedure and the approach of space-time intermittent convex integration.
\sigmaubsection{Gluing stage}
The first stage is to concentrate the Reynolds stress
into a smaller region,
which eventually enables us to concentrate
the singular times into a null set with small Hausdorff dimension.
More precisely, given
a well-prepared solution $({u_q},\mathring{R}_q)$ to \eqref{equa-nsr} at level $q$,
we divide the $[0,T]$ into $m_{q+1}$ many sub-intervals $[t_i,t_{i+1}]$ with length $ T/m_{q+1}$,
where $t_i:= i T/m_{q+1} $ and $m_{q+1}$ may depend on $({u_q},\mathring{R}_q)$.
Then, we solve the following hyperdissipative Navier-Stokes equations
on each small interval $[t_i,t_{i+1}+\theta_{q+1}]$:
\begin{equation}\lambda_qbel{equa-nsvi}
\left\{{\alpha}ligned
& \partial_tv_i +\nu(-\Delta)^{{\alpha}lpha} v_i+(v_i\cdot {\nabla}bla )v_i + {\nabla}bla P_i=0, \\
& {\mathrm{div}} v_i = 0,\\
& v_i|_{t=t_i}={u_q}(t_i),
\endaligned
\right.
\end{equation}
where $\theta_{q+1}=(T/m_{q+1})^{1/\eta}$,
$\eta$ satisfies that
$\eta \sigmaimeq \eta^*$
with $\eta_*$ as in Theorem~\ref{Thm-Non-hyper-NSE}
and
\begin{align}\lambda_qbel{ne2.14}
\theta_{q+1}^{-30} \sigmaimeq m_{q+1}^{\frac{30}{\eta}}\ll \lambda_qq^{{\varepsilon}},
\end{align}
where ${\varepsilon}$ is as in \eqref{e3.1}-\eqref{ne3.1}.
In particular, by Lemmas \ref{mae-endpt1} and \ref{mae-endpt2},
\eqref{ne2.14} indicates that
the amplitudes of velocity perturbations
oscillate with much weaker frequency
than those of temporal and spatial building blocks.
Due to the classical local well-posedness theory,
there exists a unique local smooth solution $v_i$ to \eqref{equa-nsvi}
if $\theta_{q+1}$ is small enough (or, $m_{q+1}$ is sufficiently large).
Then, in order to construct a global approximate solution to \eqref{equa-nsr}
and concentrate the Reynolds stress into smaller subintervals,
we glue these local solutions $v_i$ together
with a partition of unity $\{\chi_i\}_i$
(see \eqref{def-chi1}-\eqref{def-chi3} below):
\[
{\bf{w}}t u_q:=\sigmaum_{i=0}^{m_{q+1}-1} \chi_i v_i,
\]
which satisfies the equation
\begin{align}\lambda_qbel{equa-wtu}
\partialartial_{t} {\bf{w}}t u_q+ \nu(-\Delta)^{\alpha} {\bf{w}}t u_q +\operatorname{div}({\bf{w}}t u_q \otimes {\bf{w}}t u_q) +{\nabla}bla {\bf{w}}t p={\mathrm{div}} \mathring{{\bf{w}}t R}_q,
\end{align}
for some pressure ${\bf{w}}t p$,
and the new Reynolds stress is of form
\begin{align}\lambda_qbel{def-nr}
\mathring{{\bf{w}}t R}_q =\partialartial_t\chi_i\mathcal{R}(v_{i}-v_{i-1}) -\chi_{i}(1-\chi_{i})((v_{i}-v_{i-1})\mathring\otimes (v_{i}-v_{i-1})),
\ \ t\in [t_i, t_{i+1}],
\end{align}
where $\mathcal{R}$ is the inverse-divergence operator
given by \eqref{calR-def},
$0\leq i\leq m_{q+1}-1$
(we let $v_{-1}\equiv 0$).
Note that,
by the definition of $\{\chi_i\}_i$,
the new Reynolds stress $\mathring{{\bf{w}}t R}_q$ is supported
on a $\theta_{q+1}$-neighborhood of $t_i$ for each $0\leq i<m_{q+1}$.
This yields that the singular set of ${\bf{w}}t u_q$
can be covered by $m_{q+1}$ many small intervals of length scale $\theta_{q+1}$,
thereby having the small Hausdorff dimension.
Moreover, as pointed out in \cite{bcv21},
since ${u_q}$ is already a smooth solution to \eqref{equa-NS} on a majority of $[0,T]$,
namely the $\theta_q$-neighborhood of
the complement of some small set $I_q^c$,
if $t_{i-1}$ and $t_i$ both lie in this region,
one has ${\bf{w}}t u_q=v_{i-1}=v_i={u_q}$ on $\sigmaupp (\chi_i\chi_{i-1})$.
Thus, we can define an index set $\mathcal{C}$
(see \eqref{def-indexsetb} below) to
extract those regions where ${\bf{w}}t u_q$ is not necessarily
an exact solution to \eqref{equa-NS},
such that the bad set $I_{q+1}$ of $({\bf{w}}t u_q, \mathring{{\bf{w}}t R}_q)$
is contained in $I_q$.
Last but not least,
due to the stability estimates of the local solutions to \eqref{equa-nsvi},
the new Reynolds stress shares almost the same decay rate
with the old one in the $L^1_{t,x}$ space
(see \eqref{nrl1} below).
In other words, the procedure of concentrating the Reynolds stress error
only costs a loss of $\lambda_q^{-3{\varepsilon}_R/4}$ decay rate,
which is acceptable in the next stage of convex integration (see also \cite[p.8]{cl20.2}).
\sigmaubsection{Space-time convex integration stage}
The next stage is to construct the key velocity perturbations,
particularly to fulfill the interactive objectives,
i.e., estimates \eqref{uh3}-\eqref{rl1}.
We will treat the two supercritical regimes $\mathcal{A}_1$
and $\mathcal{A}_2$ separately.
{\bf $\bullet$ Endpoint case $(s,g_{(\tau)}ammamma, p)=(3/p+1-2{\alpha}lpha,\infty, p)$.}
For the endpoint $(s,g_{(\tau)}ammamma, p)=(3/p+1-2{\alpha}lpha,\infty, p)$,
${\alpha}lpha \in[5/4,2)$,
we choose the intermittent jets $W_{(k)}$,
developed in \cite{bcv21},
as the main spatial building blocks (see \eqref{snwd} below).
The intermittent jets are indexed by four parameters $(r_{\perp},r_{\parallel},\lambda_qmbda,\mu)$,
where $r_{\perp}$ and $r_{\parallel}$ parameterize the concentration of the flows,
$\lambda_qmbda$ is the frequency parameter,
and $\mu$ is the temporal oscillation parameter.
One main feature of the intermittent jet is the almost 3D intermittency, i.e.,
\begin{align}
\|W_{(k)}\|_{L^{\infty}_tL_x^1}\lesssim \lambda_qmbda^{-\frac32+},
\end{align}
which succeeds in controlling the dissipativity
$(-\Delta)^{\alpha}lpha$ when ${\alpha}lpha \leq 5/4$,
see \cite{bcv21}.
In order to control the high viscosity $(-\Delta)^{\alpha}lpha$
when ${\alpha}lpha$ is beyond the Lions exponent $5/4$,
we need to oscillate $W_{(k)}$ in time by using the temporal concentration functions
(see \eqref{gk} below),
which are indexed by two parameters $(\tau, \sigmaigma)$
and provide the additional intermittency.
The crucial constrains to run the convex integration mechanism
are listed in the following:
\begin{subequations}\lambda_qbel{constset}
\begin{align}
\lambda_qmbda^sr_{\perp}^{\frac{2}{p}-1}r_{\parallel}^{\frac1p-\frac12}\tau^{\frac12-\frac{1}{g_{(\tau)}ammamma}} &\ll 1 \quad\ (w_{q+1}^{(p)}\in L^g_{(\tau)}ammamma_tW^{s,p}_x) \lambda_qbel{setpw} \\
\mur_{\perp}^{2}r_{\parallel}^{-\frac12}\tau^{-\frac12}&\ll 1 \quad\ (\text{Time derivative error for}\ w_{q+1}^{(p)}) \lambda_qbel{setpt} \\
\lambda_qmbda^{2{\alpha}-1}r_{\perp}r_{\parallel}^{\frac12}\tau^{-\frac12}&\ll 1 \quad\ (\text{Hyperdissipativity error for}\ w_{q+1}^{(p)}) \lambda_qbel{setdeltap} \\
\lambda_qmbda^{2{\alpha}-1}\mu^{-1}&\ll 1 \quad\ (\text{Hyperdissipativity error for}\ w_{q+1}^{(t)}) \lambda_qbel{setdeltat} \\
\lambda_qmbda^{-1}r_{\perp}^{-1 }&\ll 1 \quad\ (\text{Oscillation error for}\ w_{q+1}^{(p)}) \lambda_qbel{setrosc1} \\
\mu^{-1} \sigmaigma\tau&\ll 1 \quad\ (\text{Oscillation error for}\ w_{q+1}^{(t)}) \lambda_qbel{setrosc2}
\end{align}
\end{subequations}
It is important here that,
in order to ensure the validity of these constrains,
one shall exploit the temporal intermittency
in an almost optimal way.
It turns out that,
the suitable temporal intermittency will roughly equal to
$(4{\alpha}lpha-5)$-dimensional
spatial intermittency.
In particular,
the temporal intermittency almost achieves the 3D
spatial intermittency when ${\alpha}lpha$ is close to $2$.
We show that there do exist six admissible parameters $(r_{\perp},r_{\parallel},\lambda_qmbda,\mu,\tau,\sigmaigma)$
and give the
precise choice in \eqref{larsrp} below.
{\bf $\bullet$ Endpoint case $(2{\alpha}lpha/g_{(\tau)}ammamma+1-2{\alpha}lpha, g_{(\tau)}ammamma, \infty)$.}
Regarding the other endpoint $(2{\alpha}lpha/g_{(\tau)}ammamma+1-2{\alpha}lpha, g_{(\tau)}ammamma, \infty)$,
${\alpha}lpha\in [1,2)$,
one may be inclined to use the building blocks in the previous endpoint case,
which, unfortunately, leads to the emptiness of
the admissible parameters for the constrains \eqref{setpw}-\eqref{setrosc2}.
This is mainly due to the presence of the $\mu t$ term in the intermittent jets,
which gives rise to the restrictions \eqref{setdeltat} and \eqref{setrosc2}
that contradict with each other.
Inspired by the work \cite{cl20.2},
we use the concentrated Mikado flows instead as the spatial building blocks
(see \eqref{snwd-endpt2} below),
which are indexed by two parameters $(r_{\perp},\lambda_qmbda)$.
We note that the $\mu t$ term and
the concentration parameter $r_{\parallel}$
are not involved in the concentrated Mikado flows.
This permits to reduce the constrains to
\begin{subequations}\lambda_qbel{constset2}
\begin{align}
\lambda_qmbda^sr_{\perp}^{\frac{2}{p}-1}\tau^{\frac12-\frac{1}{g_{(\tau)}ammamma}} &\ll 1 \quad\ (w_{q+1}^{(p)}\in L^g_{(\tau)}ammamma_tW^{s,p}_x) \lambda_qbel{setpw.2} \\
\sigmaigma\lambda_qmbda^{-1}r_{\perp}\tau^{\frac12}&\ll 1 \quad\ (\text{Time derivative error for}\ w_{q+1}^{(p)}) \lambda_qbel{setpt.2} \\
\lambda_qmbda^{2{\alpha}-1}r_{\perp}\tau^{-\frac12}&\ll 1 \quad\ (\text{Hyperdissipativity error for}\ w_{q+1}^{(p)}) \lambda_qbel{setdeltap.2} \\
\lambda_qmbda^{-1}r_{\perp}^{-1 }&\ll 1 \quad\ (\text{Oscillation error for}\ w_{q+1}^{(p)}) \lambda_qbel{setrosc1.2}
\end{align}
\end{subequations}
The absence of the large parameter $\mu$ permits more flexibility for the
choice of parameters $(r_{\perp}, \lambda_qmbda, \tau, \sigmaigma)$.
It turns out that,
there do exist four admissible parameters to fulfill the constrains \eqref{setpw.2}-\eqref{setrosc1.2}.
Even though the Mikado flows give at most 2D intermittency,
the suitable temporal building blocks would provide
much more intermittency,
which almost reaches 4D spatial intermittency when ${\alpha}lpha$ is close to $2$.
The precise choice of the four parameters is given in \eqref{larsrp-endpt2} below.
Let us mention that,
unlike the intermittent jets,
the concentrated Mikado flows proved 2D intermittency,
which is insufficient to handle the endpoint case $(3/p+1-2{\alpha}lpha,\infty,p)$.
Thus, the building blocks for the endpoint $(2{\alpha}lpha/g_{(\tau)}ammamma+1-2{\alpha}lpha, g_{(\tau)}ammamma, \infty)$
are not applicable to the previous endpoint case.
It would be very interesting to construct the building blocks
in a unified manner
for both endpoints and even for the rest of critical scaling values in \eqref{critical-LPS-hyperNSE},
which still remains open.
Nevertheless, once the velocity perturbations constructed,
we prove Theorems \ref{Thm-Non-hyper-NSE}
and \ref{Thm-hyperNSE-Euler-limit} by using unified arguments.
The rest structure of this paper is organized as follows.
In Section \ref{Sec-Concen-Rey} we use the gluing technique to construct
new approximate solutions.
In particular, the new Reynolds stress concentrates
on smaller temporal supports.
Then, Sections \ref{Sec-Flow-Endpt1} and \ref{Sec-Rey-Endpt1} are mainly devoted to the
endpoint point case $(3/p+1-2{\alpha}lpha,\infty,p)$.
More precisely, we first construct the velocity perturbations and
prepare the important algebraic identities and analytic estimates in
Section \ref{Sec-Flow-Endpt1}.
Then,
we treat the Reynolds stress in Section \ref{Sec-Rey-Endpt1}.
The other endpoint case $(2{\alpha}lpha/g_{(\tau)}ammamma+1-2{\alpha}lpha, g_{(\tau)}ammamma, \infty)$
is mainly treated in Section \ref{Sec-Endpt2}.
At last, the proofs of main results are contained in Section \ref{Sub-Proof-Main}.
Section \ref{Set-App}, i.e., the Appendix, contains some preliminary results used in the proof.
\sigmaection{Concentrating the Reynolds error} \lambda_qbel{Sec-Concen-Rey}
This section is devoted to construct a new smooth solution $({\bf{w}}t u_q, \mathring{{\bf{w}}t R}_q)$ to \eqref{equa-nsr}.
In particular, the new Reynolds stress concentrates on much smaller intervals,
while still keeping the rapid decay in the space $L^1_{t,x}$.
For this purpose,
we divide the time interval $[0,T]$ into $m_{q+1}$ many subintervals $[t_i, t_{i+1}]$,
and denote by $\theta_{q+1}$ the length scale of bad sets supporting the new Reynolds stress.
The two parameters $m_{q+1}$ and $\theta_{q+1}$ are chosen in the following way
\begin{align}\lambda_qbel{def-mq-thetaq}
T/m_{q+1}=\lambda_q^{-12}, \quad
\theta_{q+1}:=(T/m_{q+1})^{1/\eta} \sigmaimeq \lambda_qmbda_q^{-\frac{12}{\eta}},
\end{align}
where $\eta$ is a small constant such that
$$0<\frac{\eta_*}{2}<\eta<\eta_*<1.$$
Without loss of generality, we assume $m_{q+1}$ is an integer to
such that the time interval is perfectly divided.
We also recall from \cite{dls13} the inverse-divergence operator $\mathcal{R}$,
defined by
\begin{align} \lambda_qbel{calR-def}
& (\mathcal{R} v)^{kl} := \partialartial_k \Delta^{-1} v^l + \partialartial_l \Delta^{-1} v^k - \frac{1}{2}({\rm d}elta_{kl} + \partialartial_k \partialartial_l \Delta^{-1}){\mathrm{div}} \Delta^{-1} v,
\end{align}
where $v$ is mean-free, i.e., $\int_{\mathbb{T}^3} v dx =0$.
Note that, the inverse-divergence operator $\mathcal{R}$
maps mean-free functions to symmetric and trace-free matrices.
Moreover, one has the algebraic identity
\begin{align*}
{\mathrm{div}} \mathcal{R}(v) = v.
\end{align*}
Proposition \ref{prop-nunr} below is the main result of this section,
which provides a new well-prepared solution
$({\bf{w}}t u_q, \mathring{{\bf{w}}t R}_q)$ to \eqref{equa-nsr}
with concentrated support set $I_{q+1}$ and
much smaller length scale $\theta_{q+1}$.
\begin{proposition} \lambda_qbel{prop-nunr}
Consider $(s,p,g_{(\tau)}ammamma)\in \mathcal{A}_1$ for ${\alpha}lpha \in [5/4,2)$,
or $(s,p,g_{(\tau)}ammamma) \in \mathcal{A}_2$ for ${\alpha}lpha \in [1,2)$.
Let $(u_q,\mathring{R}_q)$ be a well-prepared smooth solution to \eqref{equa-nsr}
for some set $I_q$ and a length scale $\theta_q$.
Then, there exists another well-prepared solution $({\bf{w}}t u_q,\mathring{{\bf{w}}t R}_q)$ to \eqref{equa-nsr}
for some set $I_{q+1}\sigmaubseteq I_q$, $0,T\notin I_{q+1}$
and the length scale $\theta_{q+1}(<\theta_q/2)$,
satisfying:
\begin{align}
& \mathring{{\bf{w}}t R}_q(t,x)=0 \quad \text{if} \quad \operatorname{dist}(t,I_{q+1}^c)\leq {3}\theta_{q+1}/2,\lambda_qbel{suppnr}\\
& \|{\bf{w}}t u_q\|_{L^\infty_tH^3_x}\lesssim \lambda_qmbda_{q}^{5},\lambda_qbel{nuh3}\\
& \|{\bf{w}}t u_q -{u_q} \|_{L^{\infty}_tL^2_x} \lesssim \lambda_q^{-3},\lambda_qbel{uuql2}\\
& \|\mathring{{\bf{w}}t R}_q\|_{L^{1}_{t,x}} \leq \lambda_q^{-\frac{{\varepsilon}_R}{4}}{\rm d}elta_{q+1}, \lambda_qbel{nrl1}\\
& \|\partialartial_t^M {\nabla}bla^N \mathring{{\bf{w}}t R}_q\|_{L^\infty_tH^3_x} \lesssim \theta_{q+1}^{-M-1}m_{q+1}^{-\frac{N}{2{\alpha}lpha}}\lambda_qmbda_q^{5}
\lesssim \theta_{q+1}^{-M-N-1} \lambda_qmbda_q^{5}, \lambda_qbel{nrh3}
\end{align}
where the implicit constants are independent of $q$.
\end{proposition}
\sigmaubsection{Stability estimates}
We recall the regularity estimates of the strong solutions
in \cite{bcv21} for ${\alpha}\in [1,5/4)$,
the proof there also applies to the case where ${\alpha}\in [1,2)$.
\begin{proposition} (\cite{bcv21}) \lambda_qbel{Prop-LWP-Hyper-NLSE}
Let ${\alpha}\in [1,2)$, $v_0\in H_x^3({\mathbb{T}}^3)$ be a mean free function and consider the
Cauchy problem for \eqref{equa-NS} with initial data $v|_{t=t_0}=v_0$. If
\begin{align} \lambda_qbel{t*-t0-v0H3}
0<t_*-t_{0}\leq \frac{c}{\|v_0\|_{H^3_x}}
\end{align}
for some universal constant $c\in (0,1]$,
then there exists a unique strong solution $v$ to \eqref{equa-NS} on
$[t_0,t_{*}]$ satisfying
\begin{align}
\sigmaup _{t \in\left[t_0,t_{*}\right]}\|v(t)\|_{L^{2}_x}^{2}
+2 \int_{t_{0}}^{t_{*}}\|v(t)\|_{{\rm d}ot{H}_x^{{\alpha}lpha}}^{2} d t
& \leq\left\|v_{0}\right\|_{L^{2}_x}^{2}, \lambda_qbel{vl2}\\
\sigmaup _{t \in\left[t_0,t_{*}\right]}\|v(t)\|_{H^{3}_x} &\leq 2\left\|v_{0}\right\|_{H^{3}_x}. \lambda_qbel{vh3}
\end{align}
Moreover, if
\begin{align} \lambda_qbel{con-pdvh3}
0< t_{*}-t_0 \leq \frac{ c}{ \norm{v_0}_{H^3_x} (1+ \norm{v_0}_{L^2_x})^{\frac{1}{2{\alpha}lpha-1}}},
\end{align}
then it holds that for any $Ng_{(\tau)}eq 0$ and $M \in\{0,1\}$,
\begin{align}
\sigmaup_{t\in (t_0,t_{*}]} |t-t_0|^{\frac{N}{2{\alpha}lpha}+M}
\norm{\partialartial_t^M {\nabla}^N v(t)}_{H_x^3} \lesssim \norm{v_0}_{H^3_x}\,,
\lambda_qbel{pdvh3}
\end{align}
where the implicit constant depends on ${\alpha}lpha, N, M$.
\end{proposition}
On each subinterval $[t_i, t_{i+1}+\theta_{q+1}]$,
$0\leq i\leq m_{q+1}-1$,
we solve the following hyperdissipative Navier-Stokes equations
\begin{equation}\lambda_qbel{equa-nsuq}
\left\{{\alpha}ligned
&\partial_tv_i +\nu(-\Delta)^{{\alpha}lpha} v_i+(v_i\cdot {\nabla}bla )v_i + {\nabla}bla P_i=0, \\
& {\mathrm{div}} \ v_i = 0,\\
& v_i|_{t=t_i}=u_q(t_i).
\endaligned
\right.
\end{equation}
The existence of strong solutions to \eqref{equa-nsuq} can be guaranteed by the classical local well-posedness
theory of the hyperdissipative Navier-Stoke equations.
Actually, by \eqref{uh3} and \eqref{upth2},
the conditions \eqref{t*-t0-v0H3} and \eqref{con-pdvh3}
are verified with $t_*, t_0$ replaced by $t_{i+1}+\theta_{q+1}$ and $t_i$,
respectively. Thus, there exists a unique solution $v_i$ to \eqref{equa-nsuq}
on $[t_i, t_{i+1}+\theta_{q+1}]$.
Let $w_i:=u_q-v_i$.
Then $w_i: [t_i, t_{i+1}]\times {\mathbb{T}}^3\rightarrow {\mathbb R}^3$ satisfies the following equations
\begin{equation}\lambda_qbel{equa-vi}
\left\{\begin{array}{l}
\partialartial_{t} w_{i}+(-\Delta)^{\alpha} w_{i}+{\mathrm{div}}(v_i\otimes w_i+w_i\otimes u_q )+{\nabla}bla p_i ={\mathrm{div}} \mathring{R}_q, \\
\operatorname{div} w_{i}=0, \\
w_{i}|_{t=t_i}=0,
\end{array}\right.
\end{equation}
for some $p_i:[t_i,t_{i+1}]\times {\mathbb{T}}^3\rightarrow {\mathbb R}$.
Lemma \ref{lem-est-vi} below contains the stability estimates
for the solutions $w_i$ to \eqref{equa-vi}.
\begin{lemma} (Stability estimates) \lambda_qbel{lem-est-vi}
Let ${\alpha}\in [1,2)$, $1<\rho\leq 2$ and $(u_q,\mathring{R}_q)$ be the well-prepared solution to \eqref{equa-nsr}.
Then, there exists a universal constant $C$,
depending only on ${\alpha}$ and $\rho$,
such that the following estimates hold
\begin{align}
&\|w_i\|_{L^{\infty}([t_i,t_{i+1}+\theta_{q+1}];L^\rho_x)}\leq C\int_{t_i}^{t_{i+1}+\theta_{q+1}}\| {\alpha}bs{{\nabla}bla} \mathring{R}_q(s)\|_{L^\rho_x} ds,\lambda_qbel{est-vilp}\\
&\|w_i\|_{L^{\infty}([t_i,t_{i+1}+\theta_{q+1}];H^3_x)}\leq C\int_{t_i}^{t_{i+1}+\theta_{q+1}}\| {\alpha}bs{{\nabla}bla} \mathring{R}_q(s)\|_{H^3_x} ds,\lambda_qbel{est-vih3}\\
&\|\mathcal{R}w_i\|_{L^{\infty}([t_i,t_{i+1}+\theta_{q+1}];L^\rho_x)}\leq C\int_{t_i}^{t_{i+1}+\theta_{q+1}}\| \mathring{R}_q(s)\|_{L^\rho_x} ds, \lambda_qbel{est-rvi}
\end{align}
where $\mathcal{R}$ is the inverse divergence operator given by \eqref{calR-def}.
\end{lemma}
\begin{proof}
Without loss of generality, we may consider the case $t_i=0$. Let $\mathbb{P}_{H}$ denote the Helmholtz-Leray projector, i.e.,
$\mathbb{P}_{H}=\Id-{\nabla}bla\Delta^{-1}{\mathrm{div}}$.
In order to prove \eqref{est-vilp},
we apply the semigroup method and reformulate equation \eqref{equa-vi}
as follows
\begin{align}\lambda_qbel{e2.12}
w_i(t)=\int_{0}^{t} e^{-(t-s)(-\Delta)^{\alpha}} \mathbb{P}_H {\mathrm{div}} (\mathring{R}_q-v_i\otimes w_i-w_i\otimes u_q )(s){\rm d} s.
\end{align}
Applying the classical semigroup estimates (cf. \cite[(3.14)]{bcv21}) to \eqref{e2.12}
we get
\begin{align}\lambda_qbel{e2.13}
\|w_i(t)\|_{L^\rho_x} & \leq \int_{0}^{t} \|e^{-(t-s)(-\Delta)^{\alpha}} \mathbb{P}_H {\mathrm{div}} (\mathring{R}_q-v_i\otimes w_i-w_i\otimes u_q )(s)\|_{L^\rho_x}{\rm d} s \notag\\
& \lesssim \int_{0}^{t} \|{\alpha}bs{{\nabla}bla}\mathring{R}_q\|_{L^\rho_x}+(t-s)^{-\frac{1}{2{\alpha}}}\|(v_i\otimes w_i+w_i\otimes u_q )(s)\|_{L^\rho_x}{\rm d} s \notag \\
& \leq C_* \int_{0}^{t} \|{\alpha}bs{{\nabla}bla}\mathring{R}_q\|_{L^\rho_x}+(t-s)^{-\frac{1}{2{\alpha}}}(\|v_i(s)\|_{L^{\infty}_x}+\|u_q(s)\|_{L^{\infty}_x})\|w_i(s)\|_{L^\rho_x}{\rm d} s,
\end{align}
for some universal constant $C_*$ depending only on $\rho$ and ${\alpha}$.
We claim that for all $t\in [0,t_1+\theta_{q+1}]$,
\begin{align}\lambda_qbel{e2.14}
\|w_i(t)\|_{L^\rho_x}\leq 2 C_* \int_{0}^{t} \|{\alpha}bs{{\nabla}bla}\mathring{R}_q(s)\|_{L^\rho_x}ds.
\end{align}
We prove \eqref{e2.14} via the bootstrap argument.
First note that,
\eqref{e2.14} is valid for $t=0$.
Moreover, if \eqref{e2.14} holds,
we prove that the same estimate also holds with
the constant $2C_*$
replaced by a smaller constant such as $3C_*/2$.
To this end, plugging \eqref{e2.14} into \eqref{e2.13} we get
\begin{align}
\norm{w_i(t)}_{L^\rho_x}
& \leq 2 C_* \int_{0}^{t} \|{\alpha}bs{{\nabla}bla}\mathring{R}_q(s)\|_{L^\rho_x}ds\left( \frac{1}{2} + C_*\left( \norm{v_i}_{L^\infty_{t,x}} + \norm{u_q}_{L^\infty_{t,x}} \right) \int_0^t (t-s)^{-\frac{1}{2{\alpha}lpha}}ds\right) \notag\\
&\leq 2 C_* \int_{0}^{t} \|{\alpha}bs{{\nabla}bla}\mathring{R}_q(s)\|_{L^\rho_x}ds\left( \frac{1}{2} + \frac{2{\alpha} C_*}{2{\alpha}-1}t^{1-\frac{1}{2{\alpha}}}\left( \norm{v_i}_{L^\infty_{t,x}} + \norm{u_q}_{L^\infty_{t,x}} \right) \right) .
\lambda_qbel{e2.15}
\end{align}
Since ${\alpha}\in [1,2)$, for any $t_1$ small enough such that
\begin{align} \lambda_qbel{e2.16}
\frac{2{\alpha}lpha C_*}{2{\alpha}lpha-1} (t_1+\theta_{q+1})^{1-\frac{1}{2{\alpha}}} \left( \norm{v_i}_{L^\infty_{t,x}} + \norm{u_q}_{L^\infty_{t,x}} \right) \leq \frac 14,
\end{align}
we have \eqref{e2.14} with the constant replaced by ${3}C_*/2$.
Concerning the left-hand-side of \eqref{e2.16},
using the Sobolev embedding $H^3_x{B_q}ookrightarrow L^{\infty}_x$,
\eqref{uh3}, \eqref{def-mq-thetaq} and \eqref{vh3} we have
\begin{align}
\frac{2{\alpha}lpha C_*}{2{\alpha}lpha-1}
(t_1+\theta_{q+1})^{1-\frac{1}{2{\alpha}}}\left(\norm{v_i}_{L^\infty_{t,x}} + \norm{u_q}_{L^\infty_{t,x}} \right)
&\leq 4CC_* (T/m_{q+1})^{1-\frac{1}{2{\alpha}}}\left( \norm{v_i}_{L^\infty_{t}H^3_x} + \norm{u_q}_{L^\infty_{t}H^3_x} \right)\notag\\
&\leq 12CC_* (\lambda_q^{-12})^{\frac12+\frac{{\alpha}-1}{2{\alpha}}} \norm{u_q}_{L^\infty_{t}H^3_x} \notag\\
&\leq C' (\lambda_q^{-12})^{\frac12+\frac{{\alpha}-1}{2{\alpha}}}\lambda_q^5
\leq C' \lambda_q^{-1},
\end{align}
for some universal constant $C'$.
Thus, \eqref{e2.16} is verified
for $a$ sufficiently large such that $C'\lambda_q^{-1}\leq 1/4$.
This yields \eqref{e2.14} and so \eqref{est-vilp}.
Regarding \eqref{est-vih3}, since $H^3_x$ is an algebra,
using the classical semigroup estimates we get
\begin{align}\lambda_qbel{vih3}
\|w_i(t)\|_{H^3_x} & \leq \int_{0}^{t} \|e^{-(t-s)(-\Delta)^{\alpha}} \mathbb{P}_H {\mathrm{div}} (\mathring{R}_q-v_i\otimes w_i-w_i\otimes u_q )(s)\|_{H^3_x}{\rm d} s \notag\\
& \leq C_* \int_{0}^{t} \|{\alpha}bs{{\nabla}bla}\mathring{R}_q\|_{H^3_x}+(t-s)^{-\frac{1}{2{\alpha}}}(\|v_i(s)\|_{H^3_x}+\|u_q(s)\|_{H^3_x})\|w_i(s)\|_{H^3_x}{\rm d} s.
\end{align}
Similarly to \eqref{e2.14}, we claim that
for all $t\in [0,t_1+\theta_{q+1}]$,
\begin{align}\lambda_qbel{claim-vih3}
\|w_i(t)\|_{H^3_x}\leq 2 C_* \int_{0}^{t} \|{\alpha}bs{{\nabla}bla}\mathring{R}_q(s)\|_{H^3_x}ds.
\end{align}
Actually, plugging \eqref{claim-vih3} into \eqref{vih3} we get
\begin{align}
\norm{w_i(t)}_{H^3_x}& \leq 2C_* \int_{0}^{t} \|{\alpha}bs{{\nabla}bla}\mathring{R}_q(s)\|_{H^3_x}ds\left( \frac{1}{2}
+ \frac{2{\alpha} C_*}{2{\alpha}-1}t^{1-\frac{1}{2{\alpha}}}\left( \norm{v_i}_{L^\infty_{t}H^3_{x}} + \norm{u_q}_{L^\infty_{t}H^3_{x}} \right) \right).
\end{align}
In view of \eqref{uh3}, \eqref{def-mq-thetaq} and \eqref{vh3}, one has
\begin{align}\lambda_qbel{e2.23}
\frac{2{\alpha} C_*}{2{\alpha}-1}t^{1-\frac{1}{2{\alpha}}}\left( \norm{v_i}_{L^\infty_{t}H^3_{x}} + \norm{ u_q}_{L^\infty_{t}H^3_{x}} \right)\notag
\leq &\, 6C_*(t_1+\theta_{q+1})^{\frac12+\frac{2{\alpha}-1}{2{\alpha}}} \norm{u_q}_{L^\infty_{t}H^3_x} \notag\\
\leq &\, C' (\lambda_q^{-12})^{\frac12+\frac{2{\alpha}-1}{2{\alpha}}} \lambda_q^{5}
\leq C' \lambda_q^{-1},
\end{align}
where $C'$ is a universal constant.
Thus, we see that the constant in \eqref{claim-vih3} can be improved by $3C_*/2$,
by assuming $a$ sufficiently large such that $C'\lambda_q^{-1}<1/4$,
which, via the bootstrap argument yields \eqref{claim-vih3}
for all $t\in [0,t_1+\theta_{q+1}]$, as claimed.
Thus, estimate \eqref{est-vih3} follows.
It remains to prove \eqref{est-rvi}.
Let
\begin{align} \lambda_qbel{z-wi}
z: = \Delta^{-1}{\mathrm{curl}} w_i.
\end{align}
Note that ${\mathrm{curl}} z=-w_i$,
as $w_i$ is divergence free.
By the boundedness of Calder\'{o}n-Zygmund operators in $L^\rho$ spaces for $1<\rho<{\infty}$,
it holds that for any $t\in [t_i,t_{i+1}]$,
$$ \|\mathcal{R}w_i(t)\|_{L^\rho_x}\leq C_1 \|z(t)\|_{L^\rho_x}, $$
where $C_1$ is a universal constant. Moreover, $z$ satisfies the equation (see e.g. \cite{bcv21,I18} for more details),
\begin{align}
\partialartial_t z + v_i \cdot {\nabla}bla z +(- \Delta)^{\alpha}lpha z
&= \Delta^{-1} {\mathrm{curl}} {\mathrm{div}} \mathring{R}_q + \Delta^{-1} {\mathrm{curl}} {\mathrm{div}} \left( (z\times {\nabla}bla) v_i \right) \notag\\
&\quad + \Delta^{-1} {\nabla}bla {\mathrm{div}} \left( ( z \cdot {\nabla}bla) v_i\right) + \Delta^{-1} {\mathrm{curl}} {\mathrm{div}} \left( ((z\times {\nabla}bla) u_q)^T \right), \lambda_qbel{equa-z}
\end{align}
which can be reformulated as follows
\begin{align}
z(t)
&= \int_0^t e^{-(t-s)(-\Delta)^{\alpha}lpha} \left( \Delta^{-1} {\mathrm{curl}} {\mathrm{div}} \mathring{R}_q + \Delta^{-1} {\mathrm{curl}} {\mathrm{div}} \left( ((z\times {\nabla}bla) u_q)^T \right) - {\mathrm{div}} (v_i \otimes z)\right)(s) {\rm d} s \notag\\
&\qquad + \int_0^t e^{-(t-s)(-\Delta)^{\alpha}lpha} \left( \Delta^{-1} {\mathrm{curl}} {\mathrm{div}} \left( (z\times {\nabla}bla) v_i \right) + \Delta^{-1} {\nabla}bla {\mathrm{div}} \left( ( z \cdot {\nabla}bla) v_i\right) \right)(s) {\rm d} s.
\lambda_qbel{euqa-zinte}
\end{align}
Then, by virtue of the boundedness of Calder\'{o}n-Zygmund operators in $L^\rho$, $\rho\in(1,2)$,
and the classical semigroup estimates,
we derive
\begin{align}
\norm{z(t)}_{L^\rho_x} &\leq C_* \bigg(\int_0^t \norm{\mathring{R}_q(s)}_{L^\rho_x}{\rm d} s + \left( \norm{{\nabla}bla v_i }_{L^\infty_{t,x}}
+ \norm{{\nabla}bla u_q}_{L^\infty_{t,x}} \right) \int_0^t \norm{z(s)}_{L^\rho_x} {\rm d} s\notag\\
&\qquad\quad + \norm{v_i}_{L^\infty_{t,x}} \int_0^t (t-s)^{-\frac{1}{2{\alpha}lpha}}\norm{z(s)}_{L^\rho_x} {\rm d} s\bigg),
\lambda_qbel{e2.20}
\end{align}
where $C_*$ is a universal constant depending only on $\rho$ and ${\alpha}$.
As in the case of \eqref{e2.14},
we claim that for any $t\in [0,t_1+\theta_{q+1}]$,
\begin{align}
&\|z(t)\|_{L^\rho_x}\leq 2C_*\int_{0}^{t}\| \mathring{R}_q(s)\|_{L^\rho_x} {\rm d} s. \lambda_qbel{e2.21}
\end{align}
For this purpose, inserting \eqref{e2.21} into \eqref{e2.20} we get
\begin{align}
\norm{z(t)}_{L^\rho_x}& \leq 2 C_* \int_{0}^{t} \| \mathring{R}_q(s)\|_{L^\rho_x}{\rm d} s\left( \frac{1}{2}
+ C_*t\left( \norm{{\nabla}bla v_i}_{L^\infty_{t,x}} + \norm{{\nabla}bla u_q}_{L^\infty_{t,x}} \right)+ \frac{2{\alpha} C_*}{2{\alpha}-1}t^{1-\frac{1}{2{\alpha}}}\norm{v_i}_{L^\infty_{t,x}}\right) .
\lambda_qbel{e2.22}
\end{align}
Then, by \eqref{uh3}, \eqref{def-mq-thetaq}, \eqref{vh3} and the Sobolev embedding,
\begin{align}\lambda_qbel{e2.23}
&C_*t\left( \norm{{\nabla}bla v_i}_{L^\infty_{t,x}}
+ \norm{{\nabla}bla u_q}_{L^\infty_{t,x}} \right)
+ \frac{2{\alpha} C_*}{2{\alpha}-1}t^{1-\frac{1}{2{\alpha}}}\norm{v_i}_{L^\infty_{t,x}}\notag\\
\leq &\, CC_*\left((t_1+\theta_{q+1})\left( \norm{v_i}_{L^\infty_{t}H^3_x}
+ \norm{u_q}_{L^\infty_{t}H^3_x} \right)
+ 2(t_1+\theta_{q+1})^{1-\frac{1}{2{\alpha}}}\norm{v_i}_{L^\infty_{t}H^3_x}\right)\notag\\
\leq &\, CC_*(3(t_1+\theta_{q+1})+4(t_1+\theta_{q+1})^{1-\frac{1}{2{\alpha}}})\norm{u_q}_{L^\infty_{t}H^3_x}\notag\\
\leq &\, C'\left(\lambda_q^{-12}+(\lambda_q^{-12})^{\frac12+\frac{2{\alpha}-1}{2{\alpha}}}\right) \lambda_q^{5}
\leq 2C'\lambda_q^{-1},
\end{align}
which along with \eqref{e2.22} yields that
for $a$ sufficiently large such that $2C'\lambda_q^{-1}<1/4$,
the constant in \eqref{e2.21} can be replaced by $ 3C_*/2$.
This yields \eqref{e2.21} for all $t\in [0,t_1+\theta_{q+1}]$, as claimed.
Thus, in view of \eqref{z-wi},
we prove \eqref{est-rvi}.
Therefore, the proof is complete.
\end{proof}
\sigmaubsection{Temporal gluing of local solutions}
From the previous section,
we see that $v_i$ is exactly the solution to the hyperdissipative Navier-Stokes equations \eqref{equa-NS}
on each subinterval $[t_i,t_{i+1}+\theta_{q+1}]$, $0\leq i\leq m_{q+1}-1$.
Then, we glue the local solutions together
in an appropriate way such that the glued solution $\sigmaum_i \chi_i v_i$ is the exact solution to \eqref{equa-NS}
in a majority part of the time interval $[0,T]$,
while the Reynolds stress error has smaller disjoint supports in time.
More precisely, we let $\{\chi_i\}_{i=0}^{m_{q+1}-1}$ be a $C_0^{\infty}$ partition of unity on $[0,T]$ such that
$$
0\leq \chi_i(t) \leq 1, \quad\text{for} \quad t\in[0,T],
$$
for $0<i< m_{q+1}-1$,
\begin{align}\lambda_qbel{def-chi1}
\chi_{i}= \begin{cases}1 & \text { if } t_{i}+\theta_{q+1} \leq t \leq t_{i+1}, \\
0 & \text { if } t\leq t_{i}, \text { or } t g_{(\tau)}eq t_{i+1}+\theta_{q+1},\end{cases}
\end{align}
and for $i=0$,
\begin{align}\lambda_qbel{def-chi2}
\chi_{i}= \begin{cases}1 & \text { if } 0 \leq t \leq t_{i+1},
\\ 0 & \text { if } t g_{(\tau)}eq t_{i+1}+\theta_{q+1},\end{cases}
\end{align}
and for $i= m_{q+1}-1$,
\begin{align}\lambda_qbel{def-chi3}
\chi_{i}= \begin{cases}1 & \text { if } t_{i}+\theta_{q+1} \leq t \leq T, \\
0 & \text { if } t \leq t_{i}.\end{cases}
\end{align}
Furthermore, we assume that $\chi_i$, $0\leq i\leq m_{q+1}-1$,
satisfy the following bounds,
\begin{align}\lambda_qbel{est-chi}
\|\partialartial_t^M \chi_i\|_{L^{{\infty}}_t}\lesssim \theta_{q+1}^{-M},
\end{align}
where the implicit constant is independent of $\theta_{q+1}$ and $i, Mg_{(\tau)}eq 0$.
Now, let
\begin{align}\lambda_qbel{def-wtu}
{\bf{w}}t u_q:=\sigmaum_{i=0}^{m_{q+1}-1} \chi_i v_i.
\end{align}
Note that, ${\bf{w}}t u_q: [0,T]\times {\mathbb{T}}^3\rightarrow {\mathbb R}^3$ is divergence and mean free.
Moreover, for $t\in [t_i, t_{i+1}]$, we have
\[
{\bf{w}}t u_q=(1-\chi_i)v_{i-1}+\chi_i v_i,
\]
and ${\bf{w}}t u_q$ satisfies
\begin{align}\lambda_qbel{equa-wtu}
\partialartial_{t} {\bf{w}}t u_q+ \nu (-\Delta)^{\alpha} {\bf{w}}t u_q +\operatorname{div}({\bf{w}}t u_q \otimes {\bf{w}}t u_q) +{\nabla}bla {\bf{w}}t p={\mathrm{div}} \mathring{{\bf{w}}t R}_q,
\end{align}
where
the new Reynolds stress is of form
\begin{align}\lambda_qbel{def-nr}
\mathring{{\bf{w}}t R}_q =\partialartial_t\chi_i\mathcal{R}(v_{i}-v_{i-1}) -\chi_{i}(1-\chi_{i})((v_{i}-v_{i-1})\mathring\otimes (v_{i}-v_{i-1})),
\end{align}
and the pressure ${\bf{w}}t p:[0,1] \times \mathbb{T}^{3} \rightarrow \mathbb{R}$ is given by
\begin{align}\lambda_qbel{def-wp}
{\bf{w}}t p= \chi_{i} p_{i}+(1-\chi)p_{i-1}-\chi_i(1-\chi_i)\left(|v_{i}-v_{i-1}|^2-\int_{{\mathbb{T}}^3}|v_{i}-v_{i-1}|^2{\rm d} x\right).
\end{align}
\sigmaubsection{Proof of Proposition~\ref{prop-nunr}}
Using the definition of $\chi_i$ and $v_i$, \eqref{uh3} and \eqref{vh3} we get
\begin{align}\lambda_qbel{ver-nuh3}
\|{\bf{w}}t u_q\|_{L^{\infty}_tH^3_x}& \leq \|\sigmaum_i \chi_i v_i\|_{L^{\infty}_tH^3_x}\notag\\
&\leq \sigmaup_i \left(\|(1-\chi_i)v_{i-1}\|_{L^{\infty}_t(\sigmaupp(\chi_i\chi_{i-1});H^3_x)}+\|\chi_i v_i\|_{L^{\infty}_t(\sigmaupp(\chi_i);H^3_x)}\right)\notag\\
&\leq 4\|u_q\|_{L^{\infty}_tH^3_x}\lesssim \lambda_q^{5},
\end{align}
which yields \eqref{nuh3}.
Regarding the estimate \eqref{uuql2},
using \eqref{rh3}, \eqref{def-mq-thetaq} and \eqref{est-vilp}
we get
\begin{align}\lambda_qbel{ne2.42}
\|{\bf{w}}t u_q-u_q\|_{L^{\infty}_tL^2_{x}}& \leq \|\sigmaum_i \chi_i(v_i-u_q)\|_{L^{\infty}_tL^2_{x}} \notag\\
&\leq \sigmaup_i\left(\|v_i-u_q\|_{L^{\infty}_t(\sigmaupp(\chi_i);L^2_x)}+\|v_{i-1}-u_q\|_{L^{\infty}_t(\sigmaupp(\chi_i\chi_{i-1});L^2_x)}\right)\notag\\
& \lesssim \sigmaup_i|\sigmaupp(\chi_i)|\| {\alpha}bs{{\nabla}bla} \mathring{R}_q\|_{L^{\infty}_tL^2_x}\notag\\
&\lesssim m_{q+1}^{-1}\|\mathring{R}_q\|_{L^{\infty}_tH^3_x}\lesssim m_{q+1}^{-1}\lambda_q^9 \lesssim \lambda_q^{-3},
\end{align}
Hence, estimate \eqref{uuql2} is verified.
Concerning the $L^1$-estimate of the new Reynolds stress, by \eqref{def-nr},
\begin{align}
\|\mathring{{\bf{w}}t R}_q\|_{L_{t,x}^{1}} & \leq\|\partialartial_t\chi_i\mathcal{R}(v_{i}-v_{i-1})\|_{L_{t,x}^1 }+\|\chi_{i}(1-\chi_{i})((v_{i}-v_{i-1})\mathring\otimes (v_{i}-v_{i-1}))\|_{L_{t,x}^{1}} \notag\\
&=:J_1+J_2.\lambda_qbel{e2.33}
\end{align}
Regarding the estimate of $J_1$, we choose
\begin{align*}
1<\rho<\frac{4{\varepsilon}_R+36+8\beta b}{{\varepsilon}_R+36+8\beta b},
\end{align*}
where $b,\beta$ are given by \eqref{b-beta-ve} and
${\varepsilon}_R$ is given by \eqref{rl1},
to get
\begin{align}
J_1 \leq& \sigmaum_i \|\partial_t \chi_i\|_{L^1_t}\|\mathcal{R}(v_i-v_{i-1})\|_{L^{\infty}_tL^\rho_x} \notag \\
\lesssim& \sigmaum_i \|\mathcal{R}(w_i-w_{i-1})\|_{L^{\infty}_t(\sigmaupp(\chi_i\chi_{i-1});L^\rho_x)},
\end{align}
which along with the Gagliardo-Nirenberg inequality,
\eqref{rh3}, \eqref{rl1} and \eqref{est-rvi} yields that
\begin{align} \lambda_qbel{est-j1}
J_1 \lesssim& \sigmaum_i\int_{t_i}^{t_{i+1}} \|\mathring{R}_q(s)\|_{L^\rho_x}{\rm d} s \notag\\
\lesssim& \|\mathring{R}_q\|_{L^1_{t,x}}^{1-\frac{2(\rho-1)}{3\rho}}\|\mathring{R}_q\|_{L^{\infty}_tH^3_x}^{\frac{2(\rho-1)}{3\rho}} \notag \\
\lesssim& \lambda_q^{-{\varepsilon}_R+({\varepsilon}_R+9)\frac{2(\rho-1)}{3\rho}}{\rm d}elta_{q+1}^{1-\frac{2(\rho-1)}{3\rho}} \leq \lambda_q^{-\frac{3{\varepsilon}_R}{8}}{\rm d}elta_{q+1},
\end{align}
where in the last step we chose $a$ sufficiently large
and used $\lambda_q^{-{\varepsilon}_R/8}$ to absorb the implicit constant.
Concerning the estimate of $J_2$, using \eqref{est-vilp} we obtain
\begin{align}
J_2 & \leq \sigmaum_i \|\chi_{i}-\chi_{i}^{2} \|_{L^1_t} \| ((v_{i}-v_{i-1})\mathring\otimes (v_{i}-v_{i-1}))\|_{L^{\infty}_tL^1_x} \notag \\
&\lesssim \sigmaum_i |\sigmaupp_{t} \chi_i(1-\chi_i)|\|\chi_{i}-\chi_{i}^{2} \|_{L^{\infty}_t} \| w_{i}-w_{i-1}\|_{L^{\infty}_t(\sigmaupp(\chi_i\chi_{i-1});L^2_x)}^2 \notag \\
& \lesssim \sigmaum_i \theta_{q+1} \left(\int_{t_i}^{t_{i+1}}\||{\nabla}bla|\mathring{R}_q(s)\|_{L^2_x} {\rm d} s\right)^2 \notag\\
&\lesssim \theta_{q+1} \||{\nabla}bla|\mathring{R}_q\|_{L^1_tL^2_x} ^2,
\end{align}
then using \eqref{rh3}, \eqref{rl1} and the fact that $\lambda_q^{-1}\ll {\rm d}elta_{q+1}^{1/9}$ and $0<\eta<1$ we lead to
\begin{align} \lambda_qbel{est-j2}
J_2 \lesssim m_{q+1}^{-\frac{1}{\eta}} \|\mathring{R}_q\|_{L^1_{t,x}}^{\frac{8}{9}}\|\mathring{R}_q\|_{L^1_tH^3_x}^{\frac{10}{9}}
\lesssim \lambda_q^{-12/\eta}\lambda_q^{-\frac89{\varepsilon}_R}{\rm d}elta_{q+1}^{\frac89} \lambda_q^{10}\leq \lambda_q^{-\frac12{\varepsilon}_R}{\rm d}elta_{q+1},
\end{align}
where we also chose $a$ sufficient large and used $\lambda_q^{-{\varepsilon}_R/4}$ to absorb the implicit constant.
Thus, we conclude from \eqref{est-j1}-\eqref{est-j2} that
\begin{align}\lambda_qbel{est-trq}
\|\mathring{{\bf{w}}t R}_q\|_{L^1_{t,x}}\leq \lambda_q^{-\frac{{\varepsilon}_R}{4}}{\rm d}elta_{q+1},
\end{align}
and so \eqref{nrl1} is verified.
Regarding the estimate \eqref{nrh3},
by \eqref{def-nr}, for $t\in[t_i,t_{i+1}]$, $0\leq i\leq m_{q+1}-1$,
\begin{align}\lambda_qbel{e2.45}
\|\partialartial_t^M {\nabla}bla^N \mathring{{\bf{w}}t R}_q\|_{L^\infty_tH^3_x} & \leq\| \partialartial_t^M {\nabla}bla^N(\partialartial_t\chi_i\mathcal{R}(v_{i}-v_{i-1}))\|_{L^\infty_tH^3_x }\notag\\
&\quad +\|\partialartial_t^M {\nabla}bla^N(\chi_{i}(1-\chi_{i})((v_{i}-v_{i-1})\mathring\otimes (v_{i}-v_{i-1})))\|_{L^\infty_tH^3_x}.
\end{align}
For the first term on the right-hand-side of \eqref{e2.45},
by \eqref{uh3}, \eqref{vh3} and the fact that $\theta_{q+1}^{-1} g_{(\tau)}eq m_{q+1}$,
\begin{align}\lambda_qbel{e2.46}
&\norm{\partialartial_t^M {\nabla}bla^N (\partialartial_t \chi_i \mathcal{R}(v_i-v_{i-1})) }_{L^\infty_tH^3_x} \notag\\
\lesssim &\sigmaum_{M_1+M_2=M}\norm{\partialartial_t^{M_1+1} \chi_i}_{L^\infty_t} \left(\norm{\partialartial_t^{M_2} {\nabla}bla^N v_i}_{L^\infty_t(\sigmaupp(\chi_i);H^3_x)} + \norm{\partialartial_t^{M_2} {\nabla}bla^N v_{i-1}}_{L^\infty_t(\sigmaupp(\chi_{i-1});H^3_x)}\right) \notag\\
\lesssim& \sigmaum_{M_1+M_2=M} \theta_{q+1}^{-M_1-1}m_{q+1}^{\frac{N}{2{\alpha}lpha} + M_2} \lambda_qmbda_q^{5} \lesssim \theta_{q+1}^{-M-1} m_{q+1}^{\frac{N}{2{\alpha}lpha} } \lambda_qmbda_q^{5} \,,
\end{align}
where the implicit constants are independent of $i$ and $q$.
In order to control the second term of \eqref{e2.45}, using \eqref{uh3}, \eqref{vh3}, \eqref{pdvh3}
and the fact that $\theta_{q+1}^{-1} g_{(\tau)}eq m_{q+1} g_{(\tau)}eq \lambda_qmbda_q^{-5}$ we obtain
\begin{align}\lambda_qbel{e2.47}
&\norm{\partialartial_t^M {\nabla}bla^N ( \chi_i (1-\chi_i) (v_i-v_{i-1}) \mathring\otimes(v_i-v_{i-1}))}_{L^\infty_tH^3_x} \notag\\
\lesssim & \sigmaum_{M_1+M_2=M} \norm{\partialartial_t^{M_1}( \chi_i (1-\chi_i)}_{L^{\infty}_t}\norm{\partialartial_t^{M_2} {\nabla}bla^N(v_i-v_{i-1}) \mathring\otimes(v_i-v_{i-1}))}_{L^\infty_t(\sigmaupp(\chi_i\chi_{i-1});H^3_x)} \notag\\
\lesssim & \sigmaum_{M_1+M_2=M} \theta_{q+1}^{-M_1} \norm{\partialartial_t^{M_2} {\nabla}bla^N ( (v_i-v_{i-1}) \otimes(v_i-v_{i-1}))}_{L^\infty_t(\sigmaupp(\chi_i\chi_{i-1});H^3_x)} \notag\\
\lesssim & \sigmaum_{M_1+M_2=M} \theta_{q+1}^{-M_1}m_{q+1}^{\frac{N}{2{\alpha}lpha}+M_2 } \lambda_qmbda_q^{10}
\lesssim \theta_{q+1}^{-M-1} m_{q+1}^{\frac{N}{2{\alpha}lpha} }\lambda_qmbda_q^{5} ,
\end{align}
where the implicit constants are independent of $i$ and $q$.
Combing \eqref{e2.46} and \eqref{e2.47} we prove \eqref{nrh3}.
It remains to prove \eqref{suppnr} and the well-preparedness of the
$({\bf{w}}t u_q,\mathring{{\bf{w}}t R}_q)$.
By the choice of $m_q$ and $\theta_q$
in \eqref{def-mq-thetaq},
\begin{align}\lambda_qbel{thetaq1}
\theta_{q+1}= \lambda_qmbda_{q}^{-\frac{12}{\eta}}\ll \frac12\lambda_qmbda_{q-1}^{-\frac{12}{\eta}}=\frac12\theta_q.
\end{align}
Since $(u_q,\mathring{R}_q)$ is a well-prepared solution to \eqref{equa-nsr}
for the set $I_{q}$ and the length scale $\theta_q$,
we note that $u_q$ is an exact solution to \eqref{equa-NS} on the $\theta_{q}$-neighborhood of $I^c_q$.
In particular, if $t_{i-1}$ and $t_{i}$ both lie in this region,
we have that ${\bf{w}}t u_q =v_{i-1}=v_{i}=u_q$ on the overlapped region $\sigmaupp \chi_{i-1}\chi_{i}$,
thus ${\bf{w}}t u_q$ is an exact solution there.
Based on the above argument, we define the index set $\mathcal{C}$ by
\begin{align}\lambda_qbel{def-indexsetb}
\mathcal{C}:=\left\{ i\in \mathbb{Z}: 1\leq i\leq m_{q+1}-1\ \text{and}\ \mathring{R}_q\not\equiv 0\ \text{on}\ [t_{i-1},t_{i}+\theta_{q+1}]\cap [0,T] \right\},
\end{align}
and choose $I_{q+1}$ in the way
\begin{align} \lambda_qbel{Iq1-C-def}
I_{q+1} := \bigcup_{i\in \mathcal{C}} \left[t_i-2\theta_{q+1},t_i+3\theta_{q+1} \right].
\end{align}
We claim that for any $qg_{(\tau)}eq 0$,
\begin{align}\lambda_qbel{iq1}
I_{q+1}\sigmaubseteq I_q.
\end{align}
To this end, it is equivalent to show that for any $t_*\in I_{q}^c$,
it holds that $t_*\in I_{q+1}^c$.
We argue by contradiction.
Suppose that $t_*\in I_{q+1}$, then
\begin{align}\lambda_qbel{e3.56}
\mathring{R}_q \not\equiv 0\quad \text{on}\quad [t_*-2(T/m_{q+1}),t_*+2(T/m_{q+1})+\theta_{q+1}].
\end{align}
Since $t_*\in I_{q}^c$ and $2T/m_{q+1}+\theta_{q+1} \ll \theta_q/2$,
we infer that
\begin{align*}
\operatorname{dist} (t,I_{q}^c)\leq \frac{\theta_q}{2}, \ \
\forall t\in [t_*-2(T/m_{q+1}),t_*+2(T/m_{q+1})+\theta_{q+1}],
\end{align*}
which along with the well-preparedness of $(u_q,\mathring{R}_q)$ yields that
\begin{align*}
\mathring{R}_q(t)=0\quad \text{for all}\quad t\in [t_*-2(T/m_{q+1}),t_*+2(T/m_{q+1})+\theta_{q+1}].
\end{align*}
This leads to the contradiction with \eqref{e3.56}.
Thus, \eqref{iq1} is proved.
We next prove \eqref{suppnr}, i.e.,
\begin{align*}
\mathring{{\bf{w}}t R}_q(t,x)=0 \quad \text{if} \quad \operatorname{dist}(t,I_{q+1}^c)\leq {3}\theta_{q+1}/2,
\end{align*}
For this purpose,
we take any $t\in [0,T]$ such that $\operatorname{dist}(t, I_{q+1}^c)\leq {3}\theta_{q+1}/2$.
If $t\in I_{q+1}$,
by \eqref{Iq1-C-def},
$t\in [t_i-2\theta_{q+1},t_i+3\theta_{q+1}]$ for some $i\in \mathcal{C}$.
Then, we have
\begin{align}\lambda_qbel{e3.57}
t\in [t_i-2\theta_{q+1},t_i-\frac{\theta_{q+1}}{2}]\quad \text{or}\quad t\in [t_i+\frac{3\theta_{q+1}}{2},t_i+3\theta_{q+1}].
\end{align}
For $t\in [t_i-2\theta_{q+1},t_i-\theta_{q+1}/2]$,
since $\chi_{i-1}(t) =1$ and $\partialartial_t \chi_{i-1}(t)=0$,
we infer from \eqref{def-nr} with $i-1$ replacing $i$ that
$\mathring{{\bf{w}}t R}_q(t) = 0$.
Similarly arguments also apply to the case where $t\in [t_i+3\theta_{q+1}/2,t_i+3\theta_{q+1}]$.
If $t\in I_{q+1}^c$, then there exists $0\leq j\leq m_{q+1}-1$, such that $t\in [t_j,t_{j+1}]$.
If $t\in [t_j+\theta_{q+1},t_{j+1}]$,
since $\chi_{j}(t) =0$, $\partialartial_t\chi_j(t) = 0$,
we still have from \eqref{def-nr} with $j$ replacing $i$ that
$\mathring{{\bf{w}}t R}_q(t) =0$.
For $t\in [t_j,t_{j}+\theta_{q+1}]$,
we have that $j\notin \mathcal{C}$,
otherwise $t\in [t_j-2\theta_{q+1},t_j+3\theta_{q+1}]\sigmaubseteq I_{q+1}$.
Hence, it follows from \eqref{def-indexsetb} that
\begin{align*}
\mathring{R}_q\equiv0 \quad \text{on}\quad [t_{j-1},t_j+\theta_{q+1}].
\end{align*}
This means that $u_q$ solves solution to \eqref{equa-NS} on $[t_{j-1},t_j+\theta_{q+1}]$.
Thus, by the uniqueness in Proposition \ref{Prop-LWP-Hyper-NLSE},
\begin{align*}
v_{j-1}=v_j=u_q\ \ on\ [t_j, t_j+\theta_{q+1}].
\end{align*}
Plugging this into \eqref{def-nr} with $j$ replacing $i$
we obtain $\mathring{{\bf{w}}t R}_q(t) =0 $
and thus finish the proof of \eqref{suppnr}.
Therefore, the proof of Proposition~\ref{prop-nunr} is complete.
{B_q}fill $\sigmaquare$
\sigmaection{Velocity perturbations in the supercritical regime $\mathcal{A}_1$} \lambda_qbel{Sec-Flow-Endpt1}
In this section, we mainly construct the crucial velocity perturbations
whose borderline in particular includes the endpoint case
where $(s,g_{(\tau)}ammamma, p)=(3/p+1-2{\alpha}lpha, \infty, p)$,
${\alpha}lpha \in [5/4, 2)$.
The fundamental building blocks are indexed by six parameters
$r_{\perp}$, $r_{\parallel}$, $\lambda_qmbda$, $\mu$, $\tau$ and $\sigmaigma$,
chosen in the following way
\begin{equation}\lambda_qbel{larsrp}
r_{\perp} := \lambda_qmbda_{q+1}^{-1+2\varepsilonrepsilon},\ r_{\parallel} := \lambda_qmbda_{q+1}^{-1+4\varepsilonrepsilon},\
\lambda_qmbda := \lambda_qmbda_{q+1},\ \mu:=\lambda_qmbda_{q+1}^{2{\alpha}-1+2\varepsilonrepsilon}, \
\tau:=\lambda_qmbda_{q+1}^{4{\alpha}-5+11\varepsilonrepsilon}, \ \sigmaigma:=\lambda_qmbda_{q+1}^{2\varepsilonrepsilon},
\end{equation}
where $\varepsilonrepsilon$ is a sufficiently small constant satisfying \eqref{e3.1}.
We note that,
the parameters are chosen in this way,
in order to control both the strong dissipativity
and oscillation errors as shown in the constrains \eqref{setpw}-\eqref{setrosc2}.
\sigmaubsection{Spatial building blocks.}
We use the intermittent jets, first introduced in \cite{bcv21},
as the basic spatial building blocks.
More precisely,
we let $\mathbb{P}hi : \mathbb{R}^2 \to \mathbb{R}$ be a smooth cut-off function supported on a ball of radius $1$
and normalize $\mathbb{P}hi$ such that $\partialhi := - \Delta\mathbb{P}hi$ satisfies
\begin{equation}\lambda_qbel{e4.91}
\frac{1}{4 \partiali^2}\int_{\mathbb{R}^2} \partialhi^2(x){\rm d} x = 1.
\end{equation}
Moreover, let $\partialsi: \mathbb{R} \rightarrow \mathbb{R}$ be a smooth and mean-zero function, satisfying
\begin{equation}\lambda_qbel{e4.92}
\frac{1}{2 \partiali} \int_{\mathbb{R}} \partialsi^{2}\left(x\right) {\rm d} x=1, \quad \sigmaupp\partialsi\sigmaubseteq [-1,1].
\end{equation}
The corresponding rescaled cut-off functions are defined by
\begin{equation*}
\partialhi_{r_{\perp}}(x) := {r_{\perp}^{-1}}\partialhi\left(\frac{x}{r_{\perp}}\right), \quad
\mathbb{P}hi_{r_{\perp}}(x):= {r_{\perp}^{-1}} \mathbb{P}hi\left(\frac{x}{r_{\perp}}\right),\quad
\partialsi_{r_{\parallel}}\left(x\right) := {r_{\|}^{- \frac 12}} \partialsi\left(\frac{x}{r_{\|}}\right).
\end{equation*}
With this scaling, $\partialhi_{r_{\perp}}$ is supported in the ball of radius $r_{\perp}$ in ${\mathbb R}^2$ and $\partialsi_{r_{\parallel}}$ is supported in the ball of radius $r_{\parallel}$ in ${\mathbb{R}}$. By an abuse of notation,
we periodize $\partialhi_{r_{\perp}}$, $\mathbb{P}hi_{r_{\perp}}$ and $\partialsi_{r_{\parallel}}$ so that
$\partialhi_{r_{\perp}}$, $\mathbb{P}hi_{r_{\perp}}$ are treated as periodic functions defined on $\mathbb{T}^2$ and $\partialsi_{r_{\parallel}}$ is treated as a periodic function defined on $\mathbb{T}$.
Let $\Lambda \sigmaubset \mathbb{S}^2 \cap \mathbb{Q}^3$ be the wavevector set as in the Geometrical Lemma ~\ref{geometric lem 2},
and let $(k,k_1,k_2)$ be the orthonormal bases for every $k\in \Lambda$.
The \textit{intermittent jets} are defined by
\begin{equation*}
W_{(k)} := \partialsi_{r_{\parallel}}(\lambda_qmbda r_{\perp} N_{\Lambda}(k_1\cdot x+\mu t))\partialhi_{r_{\perp}}( \lambda_qmbda r_{\perp} N_{\Lambda}k\cdot (x-{\alpha}lpha_k),\lambda_qmbda r_{\perp} N_{\Lambda}k_2\cdot (x-{\alpha}lpha_k))k_1,\ \ k \in \Lambda.
\end{equation*}
Here, $N_{\Lambda}$ is given by \eqref{NLambda}.
The parameters $r_{\parallel}$ and $r_{\perp}$ measure the concentration effect of the intermittent jets,
and $\mu$ is the temporal oscillation parameter.
The shifts ${\alpha}_k\in {\mathbb R}^3$ are chosen suitably such that $W_{(k)}$ and $W_{(k')}$ have disjoint supports if $k\neq k'$.
The existence of such ${\alpha}_k$ can be guaranteed by taking $r_{\perp}$ sufficiently small
(see, e.g., \cite{bv19r}).
For brevity, we set
\begin{equation}\lambda_qbel{snp}
\begin{array}{ll}
&\partialsi_{(k_1)}(x) :=\partialsi_{r_{\parallel}}(\lambda_qmbda r_{\perp} N_{\Lambda}(k_1\cdot x+\mu t)), \\
&\partialhi_{(k)}(x) := \partialhi_{r_{\perp}}( \lambda_qmbda r_{\perp} N_{\Lambda}k\cdot (x-{\alpha}_k),\lambda_qmbda r_{\perp} N_{\Lambda}k_2\cdot (x-{\alpha}_k)), \\
&\mathbb{P}hi_{(k)}(x) := \mathbb{P}hi_{r_{\perp}}( \lambda_qmbda r_{\perp} N_{\Lambda}k\cdot (x-{\alpha}_k),\lambda_qmbda r_{\perp} N_{\Lambda}k_2\cdot (x-{\alpha}_k)),
\end{array}
\end{equation}
and thus
\begin{equation}\lambda_qbel{snwd}
W_{(k)} = \partialsi_{(k_1)}\partialhi_{(k)} k_1,\quad k\in \Lambda.
\end{equation}
Because $W_{(k)}$ is not divergence-free, we also need the corrector
\begin{equation}
\begin{aligned}
\lambda_qbel{corrector vector}
{\bf{w}}t W_{(k)}^c := \frac{1}{\lambda_qmbda^2N_{ \Lambda }^2} {\nabla}bla\partialsi_{(k_1)}\times{\mathrm{curl}}(\mathbb{P}hi_{(k)} k_1)
\end{aligned}
\end{equation}
and let
\begin{align} \lambda_qbel{Vk-def}
W^c_{(k)} := \frac{1}{\lambda_qmbda^2N_{\Lambda}^2 } \partialsi_{(k_1)}\mathbb{P}hi_{(k)} k_1.
\end{align}
Then, by straightforward computations,
\begin{equation}\lambda_qbel{wcwc}
W_{(k)} + {\bf{w}}t W_{(k)}^c
={\mathrm{curl}} {\mathrm{curl}} \left(\frac{1}{\lambda_qmbda^2N_{\Lambda}^2 } \partialsi_{(k_1)}\mathbb{P}hi_{(k)} k_1\right)
={\mathrm{curl}} {\mathrm{curl}} W^c_{(k)},
\end{equation}
which yields that
\begin{align} \lambda_qbel{div-Wck-Wk-0}
{\mathrm{div}} (W_{(k)}+ {\bf{w}}t W^c_{(k)}) =0.
\end{align}
Lemma \ref{buildingblockestlemma} below contains the key estimates of the intermittent jets.
\begin{lemma} [Estimates of intermittent jets, \cite{bv19r}] \lambda_qbel{buildingblockestlemma}
For $p \in [1,\infty]$, $N,\,M \in \mathbb{N}$, we have
\begin{align}
&\left\|{\nabla}bla^{N} \partialartial_{t}^{M} \partialsi_{(k_1)}\right\|_{C_t L^{p}_{x}}
\lesssim r_{\|}^{\frac 1p- \frac 12}\left(\frac{r_{\partialerp} \lambda_qmbda}{r_{\|}}\right)^{N}
\left(\frac{r_{\partialerp} \lambda_qmbda \mu}{r_{\|}}\right)^{M}, \lambda_qbel{intermittent estimates} \\
&\left\|{\nabla}bla^{N} \partialhi_{(k)}\right\|_{L^{p}_{x}}+\left\|{\nabla}bla^{N} \mathbb{P}hi_{(k)}\right\|_{L^{p}_{x}}
\lesssim r_{\partialerp}^{\frac 2p- 1} \lambda_qmbda^{N}, \lambda_qbel{intermittent estimates2}
\end{align}
where the implicit constants are independent of $r_{\perp},\,r_{\parallel},\,\lambda_qmbda$ and $\mu$. Moreover, it holds that
\begin{align}
&{\rm d}isplaystyle\left\|{\nabla}bla^{N} \partialartial_{t}^{M} W_{(k)}\right\|_{C_t L^{p}_{x}}
+\frac{r_{\|}}{r_{\partialerp}}\left\|{\nabla}bla^{N} \partialartial_{t}^{M} {\bf{w}}t W_{(k)}^{c}\right\|_{C_t L^{p}_{x}}
+\lambda_qmbda^{2}\left\|{\nabla}bla^{N} \partialartial_{t}^{M} W_{(k)}^c\right\|_{C_t L^{p}_{x}}{\rm d}isplaystyle \nonumber \\
&\qquad \lesssim r_{\partialerp}^{\frac 2p- 1} r_{\|}^{\frac 1p- \frac 12} \lambda_qmbda^{N}
\left(\frac{r_{\partialerp} \lambda_qmbda \mu}{r_{\|}}\right)^{M}, \ \ k\in \Lambda, \lambda_qbel{ew}
\end{align}
where the implicit constants are independent of $r_{\perp},\,r_{\parallel},\,\lambda_qmbda$ and $\mu$.
\end{lemma}
\sigmaubsection{Temporal building blocks.}
As already seen in the previous sections,
the space $C_tL^2_x$ is not a suitable candidate
to construct non-unique solutions for the hyperdissipative NSE \eqref{equa-NS}
when ${\alpha}lpha g_{(\tau)}eq 5/4$,
due to the well-posedness results \cite{lions69}.
The keypoint in the current case
is to exploit the temporal intermittency
through suitable temporal building blocks.
These temporal building blocks are indexed by two additional parameters $\tau$ and $\sigmaigma$,
which particularly parameterize the concentration and oscillation in time.
In particular,
in view of the choice \eqref{larsrp},
the strength of temporal oscillation is proportionate to the viscosity of the fluid,
and almost provides the 3D spatial intermittency
when the high dissipativity exponent ${\alpha}lpha$ is close to $2$.
More precisely, as in \cite{cl20.2},
we choose $g\in C_c^\infty([0,T])$ as a cut-off function such that
\begin{align*}
{\fint}_{0}^T g^2(t) {\rm d} t=1,
\end{align*}
and then rescale $g$ by
\begin{align}\lambda_qbel{gk1}
g_\tau(t)=\tau^{\frac 12} g(\tau t),
\end{align}
where the parameter $\tau$ is given by \eqref{larsrp}. By an abuse of notation, we periodic $g_\tau$ such that it is treated as a periodize function defined on $[0,T]$.
Then, let
\begin{align} \lambda_qbel{hk}
h_\tau(t):= \int_{0}^t \left(g_\tau^2(s) - 1\right)\ ds,\ \ t\in [0,T],
\end{align}
and set
\begin{align}\lambda_qbel{gk}
g_{(\tau)}:=g_\tau(\sigmaigma t),\ \
h_{(\tau)}(t):= h_\tau(\sigmaigma t),
\end{align}
when $\sigmaigma$ is as in \eqref{larsrp}.
We note that, $h_{(\tau)}$ satisfies
\begin{align} \lambda_qbel{pt-h-gt}
\partial_t(\sigmaigma^{-1} h_{(\tau)}) = g_{(\tau)}^2-1=g_{(\tau)}^2-{\fint}_{0}^T g_{(\tau)}^2(t) {\rm d} t.
\end{align}
It will be used in the construction of the temporal corrector ${\bf{w}}o$,
in order to balance the high temporal frequency error in \eqref{mag oscillation cancellation calculation}.
We recall from \cite{cl21,lzz21}
the crucial estimates of $g_{(\tau)}$ and $h_{(\tau)}$,
which are the contents of Lemma \ref{Lem-gk-esti} below.
\begin{lemma} [Estimates of temporal intermittency] \lambda_qbel{Lem-gk-esti}
For $g_{(\tau)}ammamma \in [1,\infty]$, $M \in \mathbb{N}$,
we have
\begin{align}
\left\|\partialartial_{t}^{M}g_{(\tau)} \right\|_{L^{g_{(\tau)}ammamma}_t} \lesssim \sigmaigma^{M}\tau^{M+\frac12-\frac{1}{g_{(\tau)}ammamma}},\lambda_qbel{gk estimate}
\end{align}
where the implicit constants are independent of $\sigmaigma$ and $\tau$.
Moreover, we have
\begin{align}\lambda_qbel{hk-esti}
\|h_{(\tau)}\|_{C_t}\leq 1.
\end{align}
\end{lemma}
\begin{remark}
Let us mention that,
the temporal building blocks for MHD in \cite{lzz21} provide the almost 1D intermittency, namely,
$\tau\sigmaim\lambda_qmbda_{q+1}^{1-}$.
Here, for the hyperdissipative NSE when ${\alpha}$ is close to $2$,
we need to oscillate the temporal building blocks in
a much larger frequency, which provides the almost 3D spatial intermittency, i.e.,
$\tau\sigmaim\lambda_qmbda_{q+1}^{3-}$.
\end{remark}
\sigmaubsection{Velocity perturbations} \lambda_qbel{Subsec-Velo-perturb}
Below we construct the velocity perturbation,
which mainly consist of the principal perturbation, the incompressibility corrector and two temporal correctors.
For this purpose,
let us first construct the amplitudes of perturbations,
which are important to provide the cancellation
between the low frequency part of the nonlinearity and the old Reynolds stress.\\
\partialaragraph{\bf Amplitudes}
Let $\chi: [0, \infty) \to \mathbb{R}$ be a smooth cut-off function satisfying
\begin{equation}\lambda_qbel{e4.0}
\chi (z) =
\left\{{\alpha}ligned
& 1,\quad 0 \leq z\leq 1, \\
& z,\quad z g_{(\tau)}eq 2,
\endaligned
\right.
\end{equation}
and
\begin{equation}\lambda_qbel{e4.1}
\frac 12 z \leq \chi(z) \leq 2z \quad \text{for}\quad z \in (1,2).
\end{equation}
Set
\begin{equation}\lambda_qbel{rhob}
\varepsilonrrho(t,x) := 2 \varepsilonrepsilon_u^{-1} \lambda_q^{- \frac{{\varepsilon}_R}{4}} {\rm d}elta_{q+ 1}
\chi\left( \frac{|\mathring{{\bf{w}}t R}_q(t, x) |}{\lambda_q^{-\frac{{\varepsilon}_R}{4}}{\rm d}elta_{q+1} } \right),
\end{equation}
where $\varepsilonrepsilon_u>0$ is the small constant as in the Geometric Lemma \ref{geometric lem 2}.
By \eqref{e4.0}, \eqref{e4.1} and \eqref{rhob},
\begin{equation}\lambda_qbel{rhor}
\left| \frac{\mathring{{\bf{w}}t R}_q}{\varepsilonrrho} \right|
= \left| \frac{\mathring{{\bf{w}}t R}_q}{2 \varepsilonrepsilon_u^{-1}\lambda_q^{-\frac{{\varepsilon}_R}{4}} {\rm d}elta_{q+ 1}\chi
( \lambda_q^{\frac{{\varepsilon}_R}{4}} {\rm d}elta_{q+1} ^{-1} |\mathring{{\bf{w}}t R}_q | )} \right| \leq \varepsilonrepsilon_u,
\end{equation}
and for any $p\in[1,\infty]$,
\begin{align}
\lambda_qbel{rhoblowbound}
&\varepsilonrrhog_{(\tau)}eq \varepsilonrepsilon_u^{-1} \lambda_q^{-\frac{{\varepsilon}_R}{4}}{\rm d}elta_{q+ 1},\\
\lambda_qbel{rhoblp}
&\norm{ \varepsilonrrho }_{L^p_{t,x}} \lesssim \varepsilonrepsilon^{-1}_u ( \lambda_q^{-\frac{{\varepsilon}_R}{4}} {\rm d}elta_{q+1} + \norm{\mathring{{\bf{w}}t R}_q}_{L^p_{t,x}} ).
\end{align}
Moreover, by \eqref{nrh3}, \eqref{rhoblowbound} and the standard H\"{o}lder estimate (see \cite[(130)]{bdis15}), for $1\leq N\leq 9$,
\begin{align}
& \norm{ \varepsilonrrho }_{C_{t,x}} \lesssim \theta_{q+1}^{-2} , \quad \norm{ \varepsilonrrho }_{C_{t,x}^N} \lesssim \theta_{q+1}^{-3N}, \lambda_qbel{rhoB-Ctx.1}\\
&\norm{ \varepsilonrrho^{1/2}}_{C_{t,x}} \lesssim \theta_{q+1}^{-1}, \quad \norm{ \varepsilonrrho^{1/2} }_{C_{t,x}^N} \lesssim \theta_{q+1}^{-3N}, \lambda_qbel{rhoB-Ctx.2} \\
&\norm{ \varepsilonrrho^{-1}}_{C_{t,x}} \lesssim \theta_{q+1}^{-1}, \quad\norm{ \varepsilonrrho^{-1} }_{C_{t,x}^N } \lesssim \theta_{q+1}^{-3N}, \lambda_qbel{rhoB-Ctx.3}
\end{align}
where the implicit constants are independent of $\lambda_q$, ${\rm d}elta_{q+1}$ and $\theta_{q+1}$.
In order to guarantee the temporal support of the perturbations
to be compatible with that of the concentrated Reynolds stress $\mathring{{\bf{w}}t R}_q$
in \S \ref{Sec-Concen-Rey},
we use the smooth temporal cut-off function $f: [0,T]\rightarrow [0,1]$, satisfying
\begin{itemize}
\item $0\leq f\leq 1$ and $f \equiv 1$ on $\sigmaupp_t \mathring{{\bf{w}}t R}_q$;
\item $\sigmaupp_t f\sigmaubseteq N_{\theta_{q+1}/2}(\sigmaupp_t \mathring{{\bf{w}}t R}_q )$;
\item $\|f \|_{C_t^N}\lesssim \theta_{q+1}^{-N}$,\ \ $1\leq N\leq 9$.
\end{itemize}
Now, we define the amplitudes of the perturbations by
\begin{equation}\lambda_qbel{akb}
a_{(k)}(t,x):= \varepsilonrrho^{\frac{1}{2} } (t,x) f (t)g_{(\tau)}ammamma_{(k)}
\left(\Id-\frac{\mathring{{\bf{w}}t R}_q(t,x)}{\varepsilonrrho(t,x)}\right), \quad k \in \Lambda,
\end{equation}
where $g_{(\tau)}ammamma_{(k)}$ and $\Lambda$ are as in is the Geometric Lemma~\ref{geometric lem 2}.
Applying the Geometric Lemma~\ref{geometric lem 2}
and using the expression \eqref{akb}
we have the following algebraic identity,
which enables us to reduce the effect of the concentrated Reynolds stress
(see also \eqref{mag oscillation cancellation calculation} below)
\begin{align}\lambda_qbel{magcancel}
\sigmaum\limits_{ k \in \Lambda} a_{(k)}^2 g_{(\tau)}^2
W_{(k)} \otimes W_{(k)}
= & \varepsilonrrho f^2 {\rm Id} -\mathring{{\bf{w}}t R}_q
+ \sigmaum\limits_{ k \in \Lambda} a_{(k)}^2g_{(\tau)}^2\mathbb{P}_{\neq 0}( W_{(k)} \otimes W_{(k)} ) \notag\\
& + \sigmaum_{k \in \Lambda} a_{(k)}^2 (g_{(\tau)}^2-1) {\fint}_{{\mathbb{T}}^3}W_{(k)}\otimes W_{(k)}{\rm d} x ,
\end{align}
where $\mathbb{P}_{\neq 0}$ denotes the spatial projection onto nonzero Fourier modes.
We also have the analytic estimates of the amplitudes below,
the proof is similar to that of \cite{lzz21}.
\begin{lemma} [Estimates of amplitudes, \cite{lzz21}] \lambda_qbel{mae-endpt1}
For $1\leq N\leq 9$, $k\in \Lambda$, we have
\begin{align}
\lambda_qbel{e3.15}
&\norm{a_{(k)}}_{L^2_{t,x}} \lesssim {\rm d}elta_{q+1}^{\frac{1}{2}} ,\\
\lambda_qbel{mag amp estimates}
& \norm{ a_{(k)} }_{C_{t,x}} \lesssim \theta_{q+1}^{-1},\ \ \norm{ a_{(k)} }_{C_{t,x}^N} \lesssim \theta_{q+1}^{-7N},
\end{align}
where the implicit constants are independent of $q$.
\end{lemma}
\partialaragraph{\bf Velocity perturbations}
We are now in stage to construct the velocity perturbations.
In the sequel, we will define their principal part, the incompressibility corrector
and two types of temporal correctors.
First, the principal part $w_{q+1}^{(p)}$ of the velocity perturbations is defined by
\begin{align} \lambda_qbel{pv}
w_{q+1}^{(p)} &:= \sigmaum_{k \in \Lambda } a_{(k)}g_{(\tau)} W_{(k)},
\end{align}
where $a_{(k)}$ is the amplitude given by \eqref{akb}
and $ W_{(k)}$, $g_{(\tau)}$ are the spatial and temporal building blocks,
respectively,
constructed in \eqref{snwd} and \eqref{gk}.
The important fact here is that, by \eqref{magcancel},
the effect of the concentrated Reynolds stress can be reduced by
the zero frequency part of $w_{q+ 1}^{(p)} \otimes w_{q+ 1}^{(p)}$:
\begin{align} \lambda_qbel{mag oscillation cancellation calculation}
w_{q+ 1}^{(p)} \otimes w_{q+ 1}^{(p)} + \mathring{{\bf{w}}t R}_{q}
=& \varepsilonrrho f^2 {\rm Id}+ \sigmaum_{k \in \Lambda } a_{(k)}^2 g_{(\tau)}^2 \mathbb{P}_{\neq 0}(W_{(k)}\otimes W_{(k)}) \notag\\
&+ \sigmaum_{k \in \Lambda } a_{(k)}^2 (g_{(\tau)}^2-1) {\fint}_{{\mathbb{T}}^3}W_{(k)}\otimes W_{(k)}{\rm d} x.
\end{align}
Because the principal part of perturbation is not divergence free,
we need the incompressibility corrector defined by
\begin{align} \lambda_qbel{wqc-dqc}
w_{q+1}^{(c)}
&:= \sigmaum_{k\in \Lambda } g_{(\tau)}\left({\mathrm{curl}} ({\nabla}bla a_{(k)} \times W^c_{(k)})
+ {\nabla}bla a_{(k)} \times {\mathrm{curl}} W^c_{(k)} +a_{(k)} {\bf{w}}t W_{(k)}^c \right) ,
\end{align}
where $W^c_{(k)}$ and ${\bf{w}}t W_k^c $ are given by \eqref{Vk-def} and \eqref{corrector vector}, respectively.
Note that, one has
\begin{align} \lambda_qbel{div free velocity}
& w_{q+1}^{(p)} + w_{q+1}^{(c)}
={\mathrm{curl}} {\mathrm{curl}} \left( \sigmaum_{k \in \Lambda} a_{(k)} g_{(\tau)} W^c_{(k)} \right),
\end{align}
and thus,
\begin{align} \lambda_qbel{div-wpc-dpc-0}
{\mathrm{div}} (w_{q+1}^{(p)} + w_{q +1}^{(c)})= 0.
\end{align}
Furthermore,
in order to balance
the high spatial and temporal frequency errors in \eqref{mag oscillation cancellation calculation},
we introduce another two types of temporal correctors as follows:
{\bf $\bullet$ Temporal corrector to balance spatial oscillations.}
We define the temporal corrector $w_{q+1}^{(t)}$ by
\begin{align} \lambda_qbel{veltemcor}
&w_{q+1}^{(t)} := -{\mu}^{-1} \sigmaum_{k\in \Lambda } \mathbb{P}_{H}\mathbb{P}_{\neq 0}(a_{(k)}^2g_{(\tau)}^2\partialsi_{(k_1)}^2 \partialhi_{(k)}^2 k_1).
\end{align}
It is introduced mainly to handle the high spatial frequency oscillations
in \eqref{mag oscillation cancellation calculation},
namely, by Leibniz's rule,
\begin{align} \lambda_qbel{utem}
\partialartial_{t} w_{q+1}^{(t)}+ \sigmaum_{k \in \Lambda} \mathbb{P}_{\neq 0}
\left(a_{(k)}^{2}g_{(\tau)}^2 {\mathrm{div}}(W_{(k)} \otimes W_{(k)})\right)
=&({\nabla}bla\Delta^{-1}{\mathrm{div}}) {\mu}^{-1} \sigmaum_{k \in \Lambda } \mathbb{P}_{\neq 0} \partialartial_{t}
\left(a_{(k)}^{2}g_{(\tau)}^2 \partialsi_{(k_1)}^{2} \partialhi_{(k)}^{2} k_1\right) \nonumber \\
& - {\mu}^{-1} \sigmaum_{k \in \Lambda } \mathbb{P}_{\neq 0}
\left(\partialartial_{t}( a_{(k)}^{2}g_{(\tau)}^2) \partialsi_{(k_1)}^{2} \partialhi_{(k)}^{2} k_1\right).
\end{align}
We note that,
$\partialartial_t w^{(t)}_{q+1}$ cancels the large spatial frequency of
${\mathrm{div}} (W_{(k)} \otimes W_{(k)})$ on the left-hand-side above.
The remaining terms include the low spatial frequency part
$\partialartial_{t}(a_{(k)}^{2}g_{(\tau)}^2)$ and the pressure term,
where the latter can be removed later by applying
the Helmholtz-Leray projector $\mathbb{P}_{H}$.
{\bf $\bullet$ Temporal corrector to balance temporal oscillations.}
Another type of the temporal corrector is defined by
\begin{align}
& w_{q+1}^{(o)}:= -\sigmaigma^{-1}\sigmaum_{k\in\Lambda }\mathbb{P}_{H}\mathbb{P}_{\neq 0}\left(h_{(\tau)}{\fint}_{{\mathbb{T}}^3} W_{(k)}\otimes W_{(k)}{\rm d} x{\nabla}bla (a_{(k)}^2) \right) ,\lambda_qbel{wo}
\end{align}
which is used to balance the high temporal frequency oscillations
in \eqref{mag oscillation cancellation calculation},
due to the presence of the temporal oscillation function $g_{(\tau)}$.
More precisely, by \eqref{hk}, \eqref{wo} and the Leibniz rule,
\begin{align} \lambda_qbel{utemcom}
&\partialartial_{t} w_{q+1}^{(o)}+
\sigmaum_{k\in \Lambda}
\mathbb{P}_{\neq 0}\left( (g_{(\tau)}^2-1 ){\fint}_{{\mathbb{T}}^3}W_{(k)}\otimes W_{(k)}{\rm d} x {\nabla}bla(a_{(k)}^{2}) \right) \nonumber \\
=&\left({\nabla}bla\Delta^{-1}{\mathrm{div}}\right) \sigmaigma^{-1} \sigmaum_{k \in \Lambda} \mathbb{P}_{\neq 0} \partialartial_{t}\left(h_{(\tau)}{\fint}_{{\mathbb{T}}^3}W_{(k)}\otimes W_{(k)}{\rm d} x{\nabla}bla (a_{(k)}^2)\right) \nonumber \\
&-\sigmaigma^{-1}\sigmaum_{k\in \Lambda}\mathbb{P}_{\neq 0}\left(h_{(\tau)}{\fint}_{{\mathbb{T}}^3}W_{(k)}\otimes W_{(k)}{\rm d} x\partial_t{\nabla}bla (a_{(k)}^2)\right).
\end{align}
We note that,
moduling the harmless pressure term,
the right-hand-side contains the low frequency part
$\partial_t{\nabla}bla (a_{(k)}^2)$,
which is acceptable in the convex integration scheme.
Hence, the high temporal frequencies in the term $(g_{(\tau)}^2-1)$
have been cancelled with the help of the temporal corrector $\partialartial_{t} w_{q+1}^{(o)}$.
Now, we define the velocity perturbation $w_{q+1}$ at level $q+1$ by
\begin{align}
w_{q+1} &:= w_{q+1}^{(p)} + w_{q+1}^{(c)}+ w_{q+1}^{(t)}+{\bf{w}}o.
\lambda_qbel{velocity perturbation}
\end{align}
By the constructions above,
$w_{q+1}$ is mean-free and divergence-free.
The velocity field at level $q+1$ is then defined by
\begin{align}
& u_{q+1}:= {\bf{w}}t u_{q} + w_{q+1},
\lambda_qbel{q+1 velocity}
\end{align}
where ${\bf{w}}t u_q$ is defined in \eqref{def-wtu} by the gluing stage.
We summarize the crucial estimates of velocity perturbations in Lemma \ref{totalest} below.
\begin{lemma} [Estimates of velocity perturbations] \lambda_qbel{totalest}
For any $\rho \in(1,\infty), g_{(\tau)}ammamma \in [1,\infty]$ and integers $0\leq N\leq 7$, we have the following estimates:
\begin{align}
&\norm{{\nabla}^N w_{q+1}^{(p)} }_{L^ g_{(\tau)}ammamma_tL^\rho_x } \lesssim \theta_{q+1}^{-1} \lambda^Nr_{\perp}^{\frac{2}{\rho}-1}r_{\parallel}^{\frac{1}{\rho}-\frac12}\tau^{\frac12-\frac{1}{ g_{(\tau)}ammamma}},\lambda_qbel{uprinlp-endpt1}\\
&\norm{{\nabla}^N w_{q+1}^{(c)} }_{L^g_{(\tau)}ammamma_tL^\rho_x } \lesssim \theta_{q+1}^{-1}\lambda^Nr_{\perp}^{\frac{2}{\rho}}r_{\parallel}^{\frac{1}{\rho}-\frac{3}{2}}\tau^{\frac12-\frac{1}{g_{(\tau)}ammamma}}, \lambda_qbel{ucorlp-endpt1}\\
&\norm{ {\nabla}^Nw_{q+1}^{(t)} }_{L^g_{(\tau)}ammamma_tL^\rho_x }\lesssim \theta_{q+1}^{-2}\lambda^N\mu^{-1}r_{\perp}^{\frac{2}{\rho}-2}r_{\parallel}^{\frac{1}{\rho}-1}\tau^{1-\frac{1}{g_{(\tau)}ammamma}} ,\lambda_qbel{dco rlp-endpt1}\\
&\norm{{\nabla}^N {\bf{w}}o }_{L^g_{(\tau)}ammamma_tL^\rho_x }\lesssim \theta_{q+1}^{-7N-9}\sigmaigma^{-1},\lambda_qbel{dcorlp-endpt1}
\end{align}
where the implicit constants depend only on $N$, $g_{(\tau)}ammamma$ and $\rho$. In particular, for integrals $1\leq N\leq 7$, we have,
\begin{align}
& \norm{ w_{q+1}^{(p)} }_{L^{\infty}_tH^N_x } + \norm{ w_{q+1}^{(c)} }_{L^{\infty}_tH^N_x}+\norm{ w_{q+1}^{(t)} }_{L^{\infty}_tH^N_x}+\norm{ {\bf{w}}o }_{L^{\infty}_tH^N_x}
\lesssim \lambda_qmbda^{N+2},\lambda_qbel{principal h3 est-endpt1}\\
& \norm{\partial_t w_{q+1}^{(p)} }_{L^{\infty}_tH^N_x } + \norm{\partial_t w_{q+1}^{(c)} }_{L^{\infty}_tH^N_x}+\norm{\partial_t w_{q+1}^{(t)} }_{L^{\infty}_tH^N_x}+\norm{\partial_t {\bf{w}}o }_{L^{\infty}_tH^N_x}
\lesssim \lambda_qmbda^{N+6},\lambda_qbel{pth2 est-endpt1}
\end{align}
where the implicit constants are independent of $\lambda$.
\end{lemma}
\begin{proof}
First,
using \eqref{ew}, \eqref{gk estimate}, \eqref{pv} and Lemma~\ref{mae-endpt1}
we have that for any $\rho \in (1,\infty)$,
\begin{align*}
\norm{{\nabla}bla^N w_{q+1}^{(p)} }_{L^g_{(\tau)}ammamma_tL^\rho_x }
\lesssim& \sigmaum_{k \in \Lambda}
\sigmaum\limits_{N_1+N_2 = N}
\|a_{(k)}\|_{C^{N_1}_{t,x}}\|g_{(\tau)}\|_{L_t^g_{(\tau)}ammamma}
\norm{ {\nabla}bla^{N_2} W_{(k)} }_{C_tL^\rho_x } \notag \\
\lesssim& \theta_{q+1}^{-1}\lambda^Nr_{\perp}^{\frac{2}{\rho}-1}r_{\parallel}^{\frac{1}{\rho}-\frac12}\tau^{\frac12-\frac{1}{g_{(\tau)}ammamma}},
\end{align*}
which verifies \eqref{uprinlp-endpt1}.
Moreover, by \eqref{b-beta-ve}, \eqref{def-mq-thetaq}, \eqref{ew},
\eqref{wqc-dqc} and Lemma \ref{mae-endpt1},
\begin{align*}
\quad \norm{{\nabla}^N w_{q+1}^{(c)} }_{L^g_{(\tau)}ammamma_tL^\rho_x}
\lesssim&
\sigmaum\limits_{k\in \Lambda }\|g_{(\tau)}\|_{L^g_{(\tau)}ammamma_t} \sigmaum_{N_1+N_2=N}
\left( \norm{ a_{(k)} }_{C_{t,x}^{N_1+2}} \norm{{\nabla}^{N_2} W^c_{(k)}}_{C_tW^{1,\rho}_x }
+ \norm{ a_{(k)} }_{C_{t,x}^{N_1}} \norm{ {\nabla}^{N_2}{\bf{w}}t W^c_{(k)}}_{C_tL^\rho_x } \right) \nonumber \\
\lesssim & \sigmaum_{N_1+N_2=N} \tau^{\frac12-\frac{1}{g_{(\tau)}ammamma}} ( \theta_{q+1}^{-7N_1-14}\lambda_qmbda^{N_2-1}r_{\partialerp}^{\frac{2}{\rho} - 1} r_{\partialarallel}^{\frac{1}{\rho} - \frac{1}{2}}
+ \theta_{q+1}^{-7N_1-1}\lambda_qmbda^{N_2}r_{\perp}^{\frac{2}{\rho} } r_{\parallel}^{\frac{1}{\rho} - \frac{3}{2}})\notag \\
\lesssim& \tau^{\frac12-\frac{1}{g_{(\tau)}ammamma}}(\theta_{q+1}^{-14}\lambda_qmbda^{N-1}r_{\partialerp}^{\frac{2}{\rho} - 1} r_{\partialarallel}^{\frac{1}{\rho}
- \frac{1}{2}}+\theta_{q+1}^{-1}\lambda_qmbda^{N}r_{\perp}^{\frac{2}{\rho}} r_{\parallel}^{\frac{1}{\rho} - \frac{3}{2}}) \notag \\
\lesssim& \theta_{q+1}^{-1}\lambda_qmbda^{N}r_{\perp}^{\frac{2}{\rho}} r_{\parallel}^{\frac{1}{\rho} - \frac{3}{2}}\tau^{\frac12-\frac{1}{g_{(\tau)}ammamma}}.
\end{align*}
Thus, we obtain \eqref{ucorlp-endpt1}.
Concerning the temporal correctors,
in view of \eqref{mag amp estimates},
\eqref{veltemcor}, Lemmas \ref{buildingblockestlemma}, \ref{Lem-gk-esti}, \ref{mae-endpt1}
and the boundedness of operators $\mathbb{P}_{\not =0}$ and $\mathbb{P}_H$ in $L^\rho$,
$\rho \in (1,\infty)$, we infer that
\begin{align*}
\norm{{\nabla}^N w_{q+1}^{(t)} }_{L^g_{(\tau)}ammamma_tL^\rho_x}
\lesssim & \,\mu^{-1} \sigmaum_{k \in \Lambda}
\norm{ g_{(\tau)} }_{L^{2g_{(\tau)}ammamma}_t }^2 \sigmaum_{N_1+N_2+N_3=N}
\|{\nabla}bla^{N_1}(a_{(k)}^2)\|_{C_{t,x} }\norm{ {\nabla}^{N_2} (\partialsi_{(k_1)}^2) }_{C_tL^{\rho}_x }\norm{ {\nabla}^{N_3}(\partialhi_{(k)}^2) }_{L^{\rho}_x } \notag \\
\lesssim & \,\mu^{-1} \tau^{1-\frac{1}{g_{(\tau)}ammamma}}
\sigmaum_{N_1+N_2+N_3=N} \theta_{q+1}^{-7N_1-2} \lambda_qmbda^{N_2} r_{\partialarallel}^{\frac{1}{\rho} -1}\lambda_qmbda^{N_3} r_{\partialerp}^{\frac{2}{\rho}-2} \notag \\
\lesssim & \, \theta_{q+1}^{-2} \lambda_qmbda^{N}\mu^{-1}r_{\partialerp}^{\frac{2}{\rho}-2} r_{\partialarallel}^{\frac{1}{\rho} -1}\tau^{1-\frac{1}{g_{(\tau)}ammamma}},
\end{align*}
which yields \eqref{dco rlp-endpt1}.
In view of \eqref{wo}, \eqref{hk-esti} and Lemma \ref{mae-endpt1},
we also have
\begin{align*}
\norm{ {\nabla}^N {\bf{w}}o }_{L^g_{(\tau)}ammamma_tL^\rho_x }
\lesssim \sigmaigma^{-1}\sigmaum_{k \in \Lambda}\|h_{(\tau)}\|_{C_{t}} \|{\nabla}bla^{N+1} (a^2_{(k)})\|_{C_{t,x}}
\lesssim \theta_{q+1}^{-7N-9} \sigmaigma^{-1}.
\end{align*}
Regarding the $L^{\infty}_tH^N_x$-estimate of velocity perturbations,
using estimates \eqref{uprinlp-endpt1}-\eqref{dcorlp-endpt1} we get
\begin{align*}
& \norm{ w_{q+1}^{(p)} }_{L^{\infty}_tH^N_x } + \norm{ w_{q+1}^{(c)} }_{L^{\infty}_tH^N_x}+\norm{ w_{q+1}^{(t)} }_{L^{\infty}_tH^N_x}+\norm{ {\bf{w}}o }_{L^{\infty}_tH^N_x}\notag \\
\lesssim &\, \theta_{q+1}^{-1}\lambda_qmbda^N \tau^{\frac12} +\theta_{q+1}^{-1}\lambda_qmbda^N r_{\partialerp} r_{\partialarallel}^{-1}\tau^{\frac12}
+ \theta_{q+1}^{-2} \lambda_qmbda^N \mu^{-1} r_{\partialerp}^{-1} r_{\partialarallel}^{-\frac12} \tau + \theta_{q+1}^{-7N-9}\sigmaigma^{-1} \notag\\
\lesssim &\,\theta_{q+1}^{-1}\lambda_qmbda^{N+2{\alpha}-\frac52+6{\varepsilon}} +\theta_{q+1}^{-1}\lambda_qmbda^{N+2{\alpha}-\frac52+4{\varepsilon}}+ \theta_{q+1}^{-2} \lambda_qmbda^{N+2{\alpha}-\frac52+5{\varepsilon}}+ \theta_{q+1}^{-7N-9} \lambda_qmbda^{-2{\varepsilon}}\notag \\ \lesssim&\, \lambda_qmbda^{N+2},
\end{align*}
where the last step is due to \eqref{b-beta-ve} and \eqref{e3.1}.
This verifies \eqref{principal h3 est-endpt1}.
It remains to prove \eqref{pth2 est-endpt1}, using \eqref{b-beta-ve}, \eqref{larsrp} and Lemmas \ref{buildingblockestlemma}, \ref{Lem-gk-esti} and \ref{mae-endpt1}
we get
\begin{align} \lambda_qbel{wprincipal h2 est}
\norm{\partial_t w_{q+1}^{(p)} }_{L^{\infty}_tH^N_x }
\lesssim& \sigmaum_{k \in \Lambda }
\|a_{(k)}\|_{C_{t,x}^{N+1} }
\sigmaum_{ M_1+M_2 =1} \norm{ \partial_t^{M_1}g_{(\tau)}}_{L^{\infty}_t}\norm{ \partial_t^{M_2}W_{(k)} }_{L^{\infty}_tH^N_x} \notag \\
\lesssim& \sigmaum_{ M_1+M_2 =1} \theta_{q+1}^{-7N-7}
\sigmaigma^{M_1} \tau^{M_1+\frac 12} \lambda^{N} \left(\frac{r_{\perp} \lambda \mu}{r_{\parallel}}\right)^{M_{2}} \notag \\
\lesssim &\, \theta_{q+1}^{-7N-7}\lambda_qmbda^{N+1}\mu \tau^{\frac12}
\end{align}
and
\begin{align} \lambda_qbel{uc h2 est}
&\quad \norm{\partial_t w_{q+1}^{(c)} }_{L^{\infty}_tH^N_x } \notag \\
& \lesssim \sigmaum_{k \in \Lambda }
\|a_{(k)}\|_{C_{t,x}^{N+3}}
\sigmaum_{ M_1+M_2 =1} \norm{ g_{(\tau)}}_{C_{t}^{M_1}}
\left(\norm{\partial_t^{M_2} W^c_{(k)} }_{L^{\infty}_tH^N_x} + \norm{ \partial_t^{M_2} {\nabla}bla W^c_{(k)} }_{L^{\infty}_tH^N_x}
+ \norm{\partial_t^{M_2} {\bf{w}}t W^c_{(k)}}_{L^{\infty}_tH^N_x } \right) \nonumber \\
& \lesssim \sigmaum_{ M_1+M_2=1}
\theta_{q+1}^{-7N-21} \sigmaigma^{M_1} \tau^{M_1+\frac 12} \left(\frac{r_{\perp} \lambda \mu}{r_{\parallel}}\right)^{M_2}
\left(\lambda^{N-2}+\lambda^{N-1} + \frac{r_{\perp}}{r_{\parallel}}\lambda^{N}\right) \notag \\
& \lesssim \theta_{q+1}^{-7N-21}\lambda_qmbda^{N+1}\mu r_{\partialerp} r_{\partialarallel}^{-1}\tau^{\frac12} .
\end{align}
Moreover, using the boundedness of $\mathbb{P}_H \mathbb{P}_{\not =0}$ in $H^N_x$
and applying Lemmas \ref{buildingblockestlemma}, \ref{Lem-gk-esti} and \ref{mae-endpt1}
we get
\begin{align} \lambda_qbel{ut h2 est}
\norm{\partial_t w_{q+1}^{(t)} }_{L^{\infty}_tH^N_x }
& \lesssim \mu^{-1} \sigmaum_{k \in \Lambda }
\|\partial_t(a_{(k)}^2 g_{(\tau)}^2 \partialsi^2_{(k_1)} \partialhi^2_{(k)})\|_{L^{\infty}_tH^N_x } \notag \\
& \lesssim \mu^{-1} \sigmaum_{k \in \Lambda }\|a_{(k)}^2\|_{C_{t,x}^{N+1} }
\sigmaum_{ M_1+M_2=1} \|\partial_t^{M_1}g_{(\tau)}^2\|_{L^{\infty}_t}\sigmaum_{0\leq N_1+N_2\leq N}\|\partial_t^{M_2}{\nabla}bla^{N_1}\partialsi^2_{(k_1)}\|_{L^{\infty}_tL^2_x }
\| {\nabla}bla^{N_2}\partialhi^2_{(k)}\|_{L^{\infty}_tL^2_x } \notag \\
& \lesssim \sigmaum_{ M_1+M_2=1} \theta_{q+1}^{-7N-9}\mu^{-1} \sigmaigma^{M_1} \tau^{M_1+1} \lambda^{N}r_{\partialerp}^{-1} r_{\partialarallel}^{-\frac12} \left(\frac{r_{\perp} \lambda \mu}{r_{\parallel}}\right)^{M_{2}} \notag\\
&\lesssim \theta_{q+1}^{-7N-9}\lambda_qmbda^{N+1}r_{\partialerp}^{-1} r_{\partialarallel}^{-\frac12} \tau .
\end{align}
Arguing as above and using \eqref{b-beta-ve}, \eqref{larsrp},
\eqref{pt-h-gt} and Lemma~\ref{Lem-gk-esti} we have
\begin{align} \lambda_qbel{wo h2 est}
\norm{\partial_t w_{q+1}^{(o)} }_{L^{\infty}_tH^N_x }
& \lesssim \sigmaigma^{-1} \sigmaum_{k \in \Lambda } \|\partial_t (h_{(\tau)} {\nabla} (a_{(k)}^2) )\|_{L^{\infty}_tH^N_x} \notag \\
& \lesssim \sigmaigma^{-1} \sigmaum_{k \in \Lambda }\sigmaum_{M_1+M_2=1} \|\partial_t^{M_1}h_{(\tau)}\|_{C_{t} } \|\partial_t^{M_2} {\nabla}bla (a^2_{(k)})\|_{C_{t,x}^{N}} \notag \\
& \lesssim \sigmaigma^{-1} \sigmaum_{M_1+M_2=1}\sigmaigma^{M_1}\tau^{M_1}\theta_{q+1}^{-9-7(M_2+N)}\lesssim \theta_{q+1}^{-7N-9}\tau .
\end{align}
Therefore, taking into account that $\theta_{q+1}^{-7N-14}\ll \lambda^{{\varepsilon}}$
and $0<{\varepsilon}\leq 1/20$, we conclude that
\begin{align*}
& \norm{\partial_t w_{q+1}^{(p)} }_{L^{\infty}_tH^N_x } + \norm{\partial_t w_{q+1}^{(c)} }_{L^{\infty}_tH^N_x}+\norm{\partial_t w_{q+1}^{(t)} }_{L^{\infty}_tH^N_x}+\norm{ \partial_t {\bf{w}}o }_{L^{\infty}_tH^N_x}\notag \\
\lesssim &\, \theta_{q+1}^{-7N-7}\lambda_qmbda^{N+1}\mu \tau^{\frac12} +\theta_{q+1}^{-7N-21}\lambda_qmbda^{N+1}\mu r_{\partialerp} r_{\partialarallel}^{-1}\tau^{\frac12}
+ \theta_{q+1}^{-7N-9} \lambda_qmbda^{N+1} r_{\partialerp}^{-1} r_{\partialarallel}^{-\frac12} \tau + \theta_{q+1}^{-7N-9}\tau \notag\\
\lesssim &\,\theta_{q+1}^{-7N-7}\lambda_qmbda^{N+4{\alpha}-\frac52+8{\varepsilon}} +\theta_{q+1}^{-7N-21}\lambda_qmbda^{N+4{\alpha}-\frac52+6{\varepsilon}}+ \theta_{q+1}^{-7N-9} \lambda_qmbda^{N+4{\alpha}-\frac52+7{\varepsilon}}+ \theta_{q+1}^{-7N-9} \lambda_qmbda^{4{\alpha}-5+11{\varepsilon}} \notag \\
\lesssim&\, \lambda_qmbda^{N+6},
\end{align*}
which yields \eqref{pth2 est-endpt1}.
Therefore, the proof of Lemma~\ref{totalest} is complete.
\end{proof}
\sigmaubsection{Verification of inductive estimates for velocity perturbations} \lambda_qbel{Subsec-induc-vel-mag}
As a direct consequence of these estimates,
we are now ready to verify the inductive estimates \eqref{uh3}, \eqref{upth2}, \eqref{u-B-L2tx-conv}-\eqref{u-B-Lw-conv}
for the velocity perturbations.
First, in order to
derive the decay of $L^2_{t,x}$-norms of the velocity perturbations,
since $a_{(k)}$ has compact supports on $[0,T]\times {\mathbb{T}}^3$, it can be regarded as a periodic function on ${\mathbb{T}}^4$.
We apply the $L^p$ decorrelation Lemma~\ref{Decorrelation1} with $f= a_{(k)}$, $g = g_{(\tau)}\partialsi_{(k_1)}\partialhi_{(k)}$, $\sigmaigma = \lambda_qmbda^{2{\varepsilon}}$
and then use \eqref{la}, \eqref{b-beta-ve} and Lemmas \ref{buildingblockestlemma}, \ref{Lem-gk-esti} and \ref{mae-endpt1} to get
\begin{align}
\lambda_qbel{Lp decorr vel}
\norm{w^{(p)}_{q+1}}_{L^2_{t,x}}
&\lesssim \sigmaum\limits_{k\in \Lambda}
{\bf B}ig(\|a_{(k)}\|_{L^2_{t,x}}\norm{ g_{(\tau)} }_{L^2_{t}} \norm{ \partialsi_{(k_1)}\partialhi_{(k)}}_{C_tL^2_{x}} \notag\\
&\qquad\qquad +\sigmaigma^{-\frac12}\|a_{(k)}\|_{C^1_{t,x}}\norm{ g_{(\tau)} }_{L^2_{t}} \norm{ \partialsi_{(k_1)}\partialhi_{(k)}}_{C_tL^2_{x}}{\bf B}ig) \notag\\
&\lesssim {\rm d}elta_{q+1}^{\frac{1}{2}}+\theta_{q+1}^{-7}\lambda_qmbda^{-{\varepsilon}}_{q+1} \lesssim {\rm d}elta_{q+1}^{\frac{1}{2}}.
\end{align}
Thus, in view of \eqref{b-beta-ve}, using \eqref{Lp decorr vel} and Lemma~\ref{totalest},
we bound the velocity perturbation by
\begin{align} \lambda_qbel{e3.41.1}
\norm{w_{q+1}}_{L^2_{t,x}} &\lesssim\norm{w_{q+1}^{(p)} }_{L^2_{t,x}} + \norm{ w_{q+1}^{(c)} }_{L^2_{t,x}} +\norm{ w_{q+1}^{(t)} }_{L^2_{t,x}}+\norm{ {\bf{w}}o }_{L^2_{t,x}}\notag \\
&\lesssim {\rm d}elta_{q+1}^{\frac{1}{2}} +\theta_{q+1}^{-1}r_{\partialerp} r_{\partialarallel}^{-1}
+ \theta_{q+1}^{-2} \mu^{-1} r_{\partialerp}^{-1} r_{\partialarallel}^{-\frac12} \tau^\frac 12 + \theta_{q+1}^{-9}\sigmaigma^{-1}\lesssim {\rm d}elta_{q+1}^{\frac{1}{2}},
\end{align}
and
\begin{align} \lambda_qbel{wql1.1}
\norm{w_{q+1}}_{L^1_tL^2_x} &\lesssim\norm{w_{q+1}^{(p)} }_{L^1_tL^2_x} + \norm{ w_{q+1}^{(c)} }_{L^1_tL^2_x} +\norm{ w_{q+1}^{(t)} }_{L^1_tL^2_x}+\norm{ {\bf{w}}o }_{L^1_tL^2_x}\notag \\
&\lesssim \theta_{q+1}^{-1}\tau^{-\frac12}+\theta_{q+1}^{-1} r_{\partialerp} r_{\partialarallel}^{-1} \tau^{-\frac12}+ \theta_{q+1}^{-2} \mu^{-1} r_{\partialerp}^{-\frac{1}{2}} r_{\partialarallel}^{-\frac12} + \theta_{q+1}^{-9}\sigmaigma^{-1}\lesssim \lambda_qmbda_{q+1}^{-{\varepsilon}}.
\end{align}
Next, we verify the iterative estimates for $u_{q+1}$.
Since $\lambda_q^{-3} \ll {\rm d}elta_{q+2}^{1/2}$,
using \eqref{uh3}, \eqref{nuh3},
\eqref{pdvh3},
\eqref{principal h3 est-endpt1} and \eqref{pth2 est-endpt1} we derive that
\begin{align}
\norm{u_{q+1}}_{L^{\infty}_tH^3_x}
\lesssim& \norm{{\bf{w}}t u_q}_{L^{\infty}_tH^3_x}+\norm{w_{q+1}}_{L^{\infty}_tH^3_x} \notag \\
\lesssim& \lambda_qmbda_{q}^5+ \lambda_qmbda_{q+1}^{5}\lesssim \lambda_qmbda_{q+1}^5, \lambda_qbel{verifyuc1}
\end{align}
and
\begin{align}
\norm{\partial_t u_{q+1}}_{L^{\infty}_tH^2_x}
\lesssim& \norm{\partial_t {\bf{w}}t u_q}_{L^{\infty}_tH^2_x}+\norm{\partial_t w_{q+1}}_{L^{\infty}_tH^2_x} \notag \\
\lesssim& \sigmaup_i \|\partial_t\left(\chi_iv_i \right) \|_{L^{\infty}_tH^2_x}+ \lambda_qmbda_{q+1}^{8} \notag\\
\lesssim& \sigmaup_i( \|\partial_t\chi_i\|_{C_t}\| v_i \|_{L^{\infty}(\sigmaupp \chi_i; H^2_x)}
+\|\chi_i\|_{C_t}\| \partial_t v_i \|_{L^{\infty}(\sigmaupp \chi_i;H^2_x)}) + \lambda_qmbda_{q+1}^{8} \notag\\
\lesssim&\, \theta_{q+1}^{-1}\lambda_q^{5}+m_{q+1}\lambda_q^5+ \lambda_qmbda_{q+1}^8\lesssim \lambda_qmbda_{q+1}^8. \lambda_qbel{verifyupth2}
\end{align}
Moreover, using \eqref{la}, \eqref{b-beta-ve}, \eqref{uuql2}
and \eqref{Lp decorr vel}-\eqref{wql1.1} we have
\begin{align}
& \norm{u_{q} - u_{q+1}}_{L^2_{t,x}} \leq \norm{ u_{q} -{\bf{w}}t u_{q} }_{L^2_{t,x}} + \norm{{\bf{w}}t u_{q} - u_{q+1}}_{L^2_{t,x}} \nonumber \\
&\qquad \qquad \qquad \ \lesssim \norm{ u_q - {\bf{w}}t u_{q} }_{L^{\infty}_{t}L^2_x}+ \norm{w_{q+1}}_{L^2_{t,x}} \nonumber \\
&\qquad \qquad \qquad \ \lesssim \lambda_q^{-3}+{\rm d}elta_{q+1}^{\frac{1}{2}} \leq M^*{\rm d}elta_{q+1}^{\frac{1}{2}}, \lambda_qbel{e3.43}
\end{align}
for $M^*$ sufficiently large and
\begin{align} \lambda_qbel{uql1l2.1}
\norm{u_{q} - u_{q+1}}_{L^1_tL^2_x}
\lesssim& \norm{ u_{q} -{\bf{w}}t u_{q} }_{L^{\infty}_{t}L^2_x}+ \norm{w_{q+1}}_{L^1_tL^2_x} \nonumber \\
\lesssim& \lambda_q^{-3}+\lambda_qmbda_{q+1}^{-{\varepsilon}} \leq {\rm d}elta_{q+2}^{\frac{1}{2}},
\end{align}
where we also chose $a$ sufficiently large
such that the last inequalities of \eqref{verifyuc1} are valid.
Regarding the iteration estimate \eqref{u-B-Lw-conv}, we first claim that the Sobolev embedding
\begin{align}\lambda_qbel{sobolevem}
H^3_x{B_q}ookrightarrow W^{s,p}_x
\end{align}
holds for any $(s,p,g_{(\tau)}ammamma)\in \mathcal{A}_1$.
To this end,
it suffices to consider the case of $g_{(\tau)}ammamma=1$.
For the given $(s,p,g_{(\tau)}ammamma)\in \mathcal{A}_1$,
it holds that for $1\leq p\leq 2$,
\begin{align*}
0\leq s< 4{\alpha}-5+\frac{3}{p}+(1-2{\alpha})\leq 2{\alpha}-1<3.
\end{align*}
Because $L^2_x{B_q}ookrightarrow L^p_x$ for $1\leq p\leq 2$,
we obtain $H^3_x {B_q}ookrightarrow W^{s,p}_x$.
Moreover, for $p>2$,
\begin{align*}
0\leq s< 4{\alpha}-5+\frac{3}{p} +(1-2{\alpha})< 2{\alpha}-\frac52<\frac32.
\end{align*}
Taking into account $H^3_x{B_q}ookrightarrow W_x^{s,{\infty}} {B_q}ookrightarrow W^{s,p}_x$ for $s<3/2$,
we thus obtain \eqref{sobolevem}.
Hence, by virtue of \eqref{rh4}, \eqref{est-vih3} and \eqref{sobolevem}, we have
\begin{align}\lambda_qbel{wtu-u}
\norm{{\bf{w}}t u_q-u_q}_{L^g_{(\tau)}ammamma_tW^{s,p}_x} & \lesssim \norm{\sigmaum_i\chi_i(v_i-u_q)}_{L^{\infty}_tH^3_x} \notag\\
& \lesssim \sigmaup_i {\bf B}ig(\norm{\chi_i(v_i-u_q)}_{L^{\infty}((\sigmaupp(\chi_i);H^3_x)}
+\norm{(1-\chi_i)(v_{i-1}-u_q)}_{L^{\infty}((\sigmaupp(\chi_i\chi_{i-1});H^3_x)}{\bf B}ig)\notag\\
&\lesssim \sigmaup_i |t_{i+1}+\theta_{q+1}-t_i| \||{\nabla}bla|\mathring{R}_q\|_{L^{\infty}_tH^3_x}\lesssim m_{q+1}^{-1}\lambda_q^{10}\lesssim \lambda_q^{-2}.
\end{align}
Therefore, for ${\alpha}\in [5/4,2)$,
by Lemma \ref{totalest},
\begin{align}\lambda_qbel{lw-est}
\norm{ u_{q+1} - u_q }_{L^g_{(\tau)}ammamma_tW^{s,p}_x}
&\lesssim \norm{{\bf{w}}t u_q-u_q}_{L^g_{(\tau)}ammamma_tW^{s,p}_x}
+\norm{w_{q+1}}_{L^g_{(\tau)}ammamma_tW^{s,p}_x} \notag\\
&\lesssim \lambda_q^{-2} + \theta_{q+1}^{-1}\lambda_{q+1}^{s}r_{\perp}^{\frac{2}{p}-1}r_{\parallel}^{\frac{1}{p}-\frac12}\tau^{\frac12-\frac{1}{g_{(\tau)}ammamma}}
+\theta_{q+1}^{-30}\sigmaigma^{-1} \notag\\
&\lesssim \lambda_qmbda_{q}^{-2}
+ \lambda_qmbda_{q+1}^{s+2{\alpha}-1-\frac{3}{p}-\frac{4{\alpha}-5}{g_{(\tau)}ammamma} +{\varepsilon}(2+\frac8p-\frac{11}{g_{(\tau)}ammamma}) }
+ \lambda_{q+1}^{-{\varepsilon}} ,
\end{align}
where in the last inequality we also used \eqref{b-beta-ve}, \eqref{larsrp} and Lemma \ref{totalest}.
Taking into account \eqref{e3.1}
we obtain
\begin{align}\lambda_qbel{endpt1-condition}
s+2{\alpha}-1-\frac{3}{p}-\frac{4{\alpha}-5}{g_{(\tau)}ammamma} +{\varepsilon}(2+\frac8p-\frac{11}{g_{(\tau)}ammamma})
\leq s+2{\alpha}-1-\frac{3}{p}-\frac{4{\alpha}-5}{g_{(\tau)}ammamma} +10{\varepsilon}
<-10{\varepsilon},
\end{align}
which yields that
\begin{align}
\norm{ u_{q+1} - u_q }_{L^g_{(\tau)}ammamma_tW^{s,p}_x} \leq {\rm d}elta_{q+2}^{\frac12}. \lambda_qbel{ne6.6}
\end{align}
Therefore, the iteration estimates \eqref{uh3}, \eqref{upth2}, \eqref{u-B-L2tx-conv}-\eqref{u-B-Lw-conv} are verified.
\sigmaection{Reynolds stress for the supercritical regime $\mathcal{A}_1$} \lambda_qbel{Sec-Rey-Endpt1}
The aim of this section is to verify the inductive estimates \eqref{rh3} and \eqref{rl1}
for the new Reynolds stress $\mathring{R}_{q+1}$
in the supercritical regime $\mathcal{A}_1$ when ${\alpha}lpha \in [5/4,2)$,
whose borderline in particular includes the endpoint case $(s,g_{(\tau)}ammamma,p)=(3/p+1-2{\alpha}lpha,\infty, p)$.
\sigmaubsection{Decomposition of Reynolds stress}
We derive from \eqref{q+1 velocity} and equation \eqref{equa-nsr}
of $({\bf{w}}t u_q, \mathring{{\bf{w}}t R}_q)$ that
the new Reynolds stress $\mathring{R}_{q+1}$ satisfies
\begin{align} \lambda_qbel{ru}
{\rm d}isplaystyle{\mathrm{div}}\mathring{R}_{q+1} - {\nabla}bla P_{q+1}
&{\rm d}isplaystyle = {u_q}nderbrace{\partialartial_t (w_{q+1}^{(p)}+w_{q+1}^{(c)}) +\nu(-\Delta)^{{\alpha}lpha} w_{q+1} +{\mathrm{div}}\big({\bf{w}}t u_q \otimes w_{q+1} + w_{q+ 1} \otimes {\bf{w}}t u_q \big) }_{ {\mathrm{div}}\mathring{R}_{lin} +{\nabla}bla P_{lin} } \notag\\
&{\rm d}isplaystyle\quad+ {u_q}nderbrace{{\mathrm{div}} (w_{q+1}^{(p)} \otimes w_{q+1}^{(p)}+ \mathring{{\bf{w}}t R}_q)+\partialartial_t w_{q+1}^{(t)}+\partialartial_t {\bf{w}}o}_{{\mathrm{div}}\mathring{R}_{osc} +{\nabla}bla P_{osc}} \notag\\
&{\rm d}isplaystyle\quad+ {u_q}nderbrace{{\mathrm{div}}{\bf B}ig((w_{q+1}^{(c)}+ w_{q+1}^{(t)}+{\bf{w}}o)\otimes w_{q+1}+ w_{q+1}^{(p)} \otimes (w_{q+1}^{(c)}+ w_{q+1}^{(t)}+{\bf{w}}o) {\bf B}ig)}_{{\mathrm{div}}\mathring{R}_{cor} +{\nabla}bla P_{cor}}.
\end{align}
Using the inverse divergence operator $\mathcal{R}$
we can choose the Reynolds stress at level $q+1$:
\begin{align}\lambda_qbel{rucom}
\mathring{R}_{q+1} := \mathring{R}_{lin} + \mathring{R}_{osc}+ \mathring{R}_{cor},
\end{align}
where the linear error
\begin{align}
\mathring{R}_{lin} & := \mathcal{R}\left(\partialartial_t (w_{q+1}^{(p)} +w_{q+1}^{(c)} )\right)
+ \nu \mathcal{R} (-\Delta)^{{\alpha}} w_{q+1} + \mathcal{R}\mathbb{P}_H {\mathrm{div}} \left({\bf{w}}t u_q\mathring{\otimes} w_{q+1} + w_{q+ 1}
\mathring{\otimes} {\bf{w}}t u_q\right), \lambda_qbel{rup}
\end{align}
the oscillation error
\begin{align}\lambda_qbel{rou}
\mathring{R}_{osc} :=& \sigmaum_{k \in \Lambda } \mathcal{R} \mathbb{P}_H\mathbb{P}_{\neq 0}\left(g_{(\tau)}^2 \mathbb{P}_{\neq 0}(W_{(k)}\otimes W_{(k)}){\nabla}bla (a_{(k)}^2)\right) \notag\\
& -\mu^{-1}\sigmaum_{k \in \Lambda }\mathcal{R} \mathbb{P}_H \mathbb{P}_{\neq 0}\left(\partial_t (a_{(k)}^2g_{(\tau)}^2)\partialsi_{(k_1)}^2\partialhi_{(k)}^2k_1\right)\notag\\
&-\sigmaigma^{-1}\sigmaum_{k\in \Lambda}\mathcal{R} \mathbb{P}_H \mathbb{P}_{\neq 0}\left(h_{(\tau)}{\fint}_{{\mathbb{T}}^3}W_{(k)}\otimes W_{(k)}{\rm d} x\, \partial_t{\nabla}bla(a_{(k)}^{2})\right),
\end{align}
and the corrector error
\begin{align}
\mathring{R}_{cor} &
:= \mathcal{R} \mathbb{P}_H {\mathrm{div}} \bigg( w^{(p)}_{q+1} \mathring{\otimes} (w_{q+1}^{(c)}+w_{q+1}^{(t)}+{\bf{w}}o)
+ (w_{q+1}^{(c)}+w_{q+1}^{(t)}+{\bf{w}}o) \mathring{\otimes} w_{q+1} \bigg). \lambda_qbel{rup2}
\end{align}
Moreover, one also has (see, e.g., \cite{bcv21,lzz21})
\begin{align} \lambda_qbel{calRuPHdiv-Ru}
\mathring{R}_{q+1} = \mathcal{R} \mathbb{P}_H {\mathrm{div}} \mathring{R}_{q+1}.
\end{align}
\sigmaubsection{Verification of $L^{\infty}_tH^N_x$-estimates of Reynolds stress}
Regarding the $L^{\infty}_tH^N_x$-estimates \eqref{rh3} and \eqref{rh4}
of the Reynolds stress,
using the identity \eqref{calRuPHdiv-Ru} and
equation \eqref{equa-nsr} for $(u_{q+1}, \mathring R_{q+1})$
we get that for $N=3,4$,
\begin{align}
\norm{\mathring R_{q+1}}_{L^{\infty}_tH^N_x}&\leq \norm{\mathcal R \mathbb{P}_H ({\mathrm{div}} \mathring{R}_{q+1})}_{L^{\infty}_tH^N_x}\notag \\
&\lesssim \norm{\partialartial_t u_{q+1}+{\mathrm{div}}(u_{q+1}\otimes u_{q+1}) +\nu(-\Delta )^{{\alpha}lpha}u_{q+1}}_{L^{\infty}_tH^{N-1}_x}\notag \\
&\lesssim \norm{\partialartial_t u_{q+1}}_{L^{\infty}_tH^{N-1}_x}+\norm{u_{q+1}\otimes u_{q+1}}_{L^{\infty}_tH^N_x} + \norm{u_{q+1}}_{L^{\infty}_tH^{N+3}_x}\notag \\
&\lesssim \norm{\partialartial_t u_{q+1}}_{L^{\infty}_tH^{N-1}_x} +\norm{u_{q+1}}_{L^{\infty}_tH^N_x} \norm{u_{q+1}}_{L^\infty_{t,x}} + \norm{u_{q+1}}_{L^{\infty}_tH^{N+3}_x}.\lambda_qbel{ine-rq1h3}
\end{align}
We claim that for every $0\leq {\bf{w}}t N\leq 4$ and for all $qg_{(\tau)}eq 0$,
\begin{align}\lambda_qbel{uh6}
\norm{u_{q+1}}_{L^{\infty}_tH^{{\bf{w}}t N+3}_x}\lesssim \lambda_qmbda_{q+1}^{{\bf{w}}t N+5}, \quad
\norm{\partial_t u_{q+1}}_{L^{\infty}_tH^{{\bf{w}}t N-1}_x}\lesssim \lambda_qmbda_{q+1}^{{\bf{w}}t N+5},
\end{align}
where the implicit constant is independent of $q$.
To this end,
in view of \eqref{def-mq-thetaq}, \eqref{nuh3},
\eqref{pdvh3}, \eqref{q+1 velocity} and Lemma~\ref{totalest},
we derive
\begin{align}\lambda_qbel{uhm3}
\|u_{q+1}\|_{L^{\infty}_tH^{{\bf{w}}t N+3}_x}& \leq \|{\bf{w}}t u_q\|_{L^{\infty}_tH^{{\bf{w}}t N+3}_x}+\|w_{q+1}\|_{L^{\infty}_tH^{{\bf{w}}t N+3}_x} \notag\\
&\lesssim \sigmaup_i\|v_i\|_{L^{\infty}(\sigmaupp (\chi_i); H^{{\bf{w}}t N+3}_x)} +\lambda_qq^{{\bf{w}}t N+5} \notag\\
&\lesssim m_{q+1}^{\frac{{\bf{w}}t N}{2{\alpha}}}\lambda_q^5+ \lambda_qq^{{\bf{w}}t N+5}\notag\\
& \lesssim \lambda_qmbda_{q+1}^{{\bf{w}}t N+5},
\end{align}
and
\begin{align}\lambda_qbel{ptuhm}
\|\partial_t u_{q+1}\|_{L^{\infty}_tH^{{\bf{w}}t N-1}_x}& \leq \|\partial_t {\bf{w}}t u_q\|_{L^{\infty}_tH^{{\bf{w}}t N-1}_x}+\|\partial_t w_{q+1}\|_{L^{\infty}_tH^{{\bf{w}}t N-1}_x} \notag\\
&\lesssim \sigmaup_i\|\partial_t (\chi_iv_i)\|_{L^{\infty}(\sigmaupp( \chi_i); H^{{\bf{w}}t N-1}_x)} +\lambda_qq^{{\bf{w}}t N+5}\notag\\
&\lesssim \sigmaup_i( \|\partial_t\chi_i\|_{C_t}\| v_i \|_{L^{\infty}(\sigmaupp (\chi_i); H^{{\bf{w}}t N-1}_x)}
+\|\chi_i\|_{C_t}\| \partial_t v_i \|_{L^{\infty}(\sigmaupp( \chi_i); H^{{\bf{w}}t N-1}_x)})+ \lambda_qq^{{\bf{w}}t N+5} \notag\\
& \lesssim \theta_{q+1}^{-1}\lambda_q^5+m_{q+1}\lambda_q^5+\lambda_qmbda_{q+1}^{{\bf{w}}t N+5}\lesssim \lambda_qmbda_{q+1}^{{\bf{w}}t N+5}.
\end{align}
Thus, we prove \eqref{uh6}, as claimed.
Concerning the $L^{\infty}_{t,x}$ estimates of $u_{q+1}$,
using \eqref{nuh3},
the Sobolev embedding $H^2_x{B_q}ookrightarrow L^{\infty}_x$
and Lemma~\ref{totalest} we have
\begin{align}\lambda_qbel{ul9}
\norm{u_{q+1}}_{L^\infty_{t,x}} & \leq \norm{{\bf{w}}t u_{q}}_{L^\infty_{t,x}}+ \norm{w_{q+1}}_{L^\infty_{t}H^2_x} \lesssim \norm{{\bf{w}}t u_{q}}_{L^\infty_{t}H^3_x}+ \lambda_qq^{4}
\lesssim\lambda_q^{5}+ \lambda_qmbda_{q+1}^{4} \lesssim \lambda_qmbda_{q+1}^{4},
\end{align}
where the implicit constant is independent of $q$.
Thus, inserting \eqref{verifyuc1}, \eqref{verifyupth2}, \eqref{uh6} and \eqref{ul9} into \eqref{ine-rq1h3}
we lead to
\begin{align*}
\norm{\mathring R_{q+1}}_{L^{\infty}_tH^N_x}
&\lesssim \lambda_qmbda_{q+1}^{N+5}+ \lambda_qmbda_{q+1}^{N+6} + \lambda_qmbda_{q+1}^{N+5}
\lesssim \lambda_qmbda_{q+1}^{N+6}
\end{align*}
for some universal constant.
Therefore, the $L^{\infty}_tH^N_x$-estimates \eqref{rh3} and \eqref{rh4} of $\mathring R_{q+1}$ are verified.
\sigmaubsection{Verification of $L^1_{t,x}$-decay of Reynolds stress}
Below we mainly verify the delicate $L^1_{t,x}$-decay \eqref{rl1}
of Reynolds stress $\mathring{R}_{q+1}$ at level $q+1$.
Since the building blocks constructed in
\S \ref{Sec-Flow-Endpt1} are highly oscillated and concentrated in space and time,
we need to be careful when estimating
the time/space derivatives in the linear error $\mathring{R}_{lin}$
and the high frequency errors in the oscillation error $\mathring{R}_{osc}$.
Since the Calder\'{o}n-Zygmund operators are bounded in the space $L^\rho_x$, $1<\rho<\infty$,
we choose
\begin{align}\lambda_qbel{defp}
\rho: =\frac{3-8\varepsilonrepsilon}{3-9\varepsilonrepsilon}\in (1,2),
\end{align}
where ${\varepsilon}$ is given by \eqref{e3.1}.
Note that,
\begin{equation}\lambda_qbel{setp}
(3-8\varepsilonrepsilon)(1-\frac{1}{\rho})={\varepsilon},
\end{equation}
and
\begin{align} \lambda_qbel{rs-rp-p-ve}
r_{\perp}^{\frac 2\rho-2}r_{\parallel}^{\frac 1\rho-1} = \lambda_qmbda^{{\varepsilon}},\ \
r_{\perp}^{\frac2\rho- 1}r_{\parallel}^{\frac 1\rho-\frac 12} = \lambda_qmbda^{-\frac32+5\varepsilonrepsilon},\ \
r_{\perp}^{\frac 2\rho} r_{\parallel}^{\frac 1\rho - \frac 32} = \lambda_qmbda^{-\frac 32+ 3{\varepsilon}}.
\end{align}
We shall treat the linear error, the oscillation error
and the corrector, separately.
\partialaragraph{\bf (i) Linear error.}
First, in view of Lemmas \ref{buildingblockestlemma},
\ref{Lem-gk-esti} and \ref{mae-endpt1}, \eqref{e3.1}, \eqref{larsrp} and \eqref{rs-rp-p-ve}, we have
\begin{align}
& \| \mathcal{R}\partialartial_t( w_{q+1}^{(p)}+ w_{q+1}^{(c)})\|_{L_t^1L_x^\rho} \nonumber \\
\lesssim& \sigmaum_{k \in \Lambda}\| \mathcal{R} {\mathrm{curl}}{\mathrm{curl}}\partialartial_t(g_{(\tau)} a_{(k)} W^c_{(k)}) \|_{L_t^1L_x^\rho} \nonumber \\
\lesssim& \sigmaum_{k \in \Lambda}{\bf B}ig(\| g_{(\tau)}\|_{L^1_t}(\| a_{(k)} \|_{C_{t,x}^2}\| W^c_{(k)} \|_{C_t W_x^{1,\rho}}
+\| a_{(k)} \|_{C_{t,x}^1}\| \partial_t W^c_{(k)} \|_{C_t W^{1,\rho}_x}) \nonumber \\
&\qquad\qquad+\| \partial_tg_{(\tau)}\|_{L_t^1}\| a_{(k)} \|_{C_{t,x}^1}\| W^c_{(k)} \|_{C_t W_x^{1,\rho}}{\bf B}ig)\nonumber \\
\lesssim& \tau^{-\frac12}(\theta_{q+1}^{-14}r_{\perp}^{\frac{2}{\rho}-1}r_{\parallel}^{\frac{1}{\rho}-\frac{1}{2}}\lambda_qmbda^{-1}
+\theta_{q+1}^{-7}r_{\perp}^{\frac{2}{\rho} }r_{\parallel}^{\frac{1}{\rho}-\frac{3}{2}}\mu)
+ \sigmaigma\tau^{\frac12}\theta_{q+1}^{-7}r_{\perp}^{\frac{2}{\rho}-1}r_{\parallel}^{\frac{1}{\rho}-\frac{1}{2}}\lambda_qmbda^{-1} \notag\\
\lesssim& \theta_{q+1}^{-14}(\lambda_qmbda^{-2{\alpha}-\frac {\varepsilon}2 }+ \lambda_qmbda^{-\frac {\varepsilon}2}+\lambda_qmbda^{2{\alpha}-5+13{\varepsilon}} )
\lesssim \theta_{q+1}^{-14}\lambda_qmbda^{-\frac {\varepsilon}2}.\lambda_qbel{time derivative}
\end{align}
Regarding the viscosity term $(-\Delta)^{\alpha}lpha w_{q+1}$,
we use \eqref{velocity perturbation} to estimate
\begin{align}
\norm{\nu \mathcal{R}(-\Delta)^{{\alpha}lpha} w_{q+1} }_{L_t^1L^\rho_x} \lesssim & \norm{\nu \mathcal{R}(-\Delta)^{{\alpha}lpha} w_{q+1}^{(p)} }_{L_t^1L^\rho_x}+\norm{ \nu \mathcal{R}(-\Delta)^{{\alpha}lpha} w_{q+1}^{(c)} }_{L_t^1L^\rho_x}\notag \\
& +\norm{ \nu\mathcal{R}(-\Delta)^{{\alpha}lpha} w_{q+1}^{(t)} }_{L_t^1L^\rho_x}+\norm{ \nu\mathcal{R}(-\Delta)^{{\alpha}lpha} {\rm d}elta_qo }_{L_t^1L^\rho_x}.\lambda_qbel{e5.17}
\end{align}
In order to estimate the
right-hand-side above,
we use the interpolation inequality (cf. \cite{BM18}), \eqref{uprinlp-endpt1} and the fact that $2-{\alpha}lpha g_{(\tau)}eq 20\varepsilon$
to derive
\begin{align}
\norm{ \nu\mathcal{R}(-\Delta)^{{\alpha}lpha} w_{q+1}^{(p)} }_{L_t^1L^\rho_x}
& \lesssim \norm{ |{\nabla}|^{2{\alpha}-1} w_{q+1}^{(p)} }_{L_t^1L^\rho_x}\notag\\
& \lesssim \norm{w_{q+1}^{(p)}}_{L_t^1L^\rho_x} ^{\frac{4-2{\alpha}}{3}} \norm{w_{q+1}^{(p)}}_{L_t^1W^{3,\rho}_x} ^{\frac{2{\alpha}-1}{3}}\notag\\
& \lesssim \theta_{q+1}^{-1}\lambda^{2{\alpha}lpha-1}r_{\perp}^{\frac{2}{\rho}-1}r_{\parallel}^{\frac{1}{\rho}-\frac{1}{2}}\tau^{-\frac12}\lesssim \theta_{q+1}^{-1}\lambda_qmbda^{-\frac {\varepsilon}2}.\lambda_qbel{e5.18}
\end{align}
Similarly, by Lemma \ref{totalest},
\begin{align}
&\norm{\nu \mathcal{R}(-\Delta)^{{\alpha}lpha} w_{q+1}^{(c)} }_{L_t^1L^\rho_x}
\lesssim \theta_{q+1}^{-1}\lambda^{2{\alpha}lpha -1}r_{\perp}^{\frac{2}{\rho} }r_{\parallel}^{\frac{1}{\rho}-\frac{3}{2}}\tau^{-\frac12}\lesssim \theta_{q+1}^{-1}\lambda_qmbda^{-2{\varepsilon}},\lambda_qbel{e5.19}\\
&\norm{\nu \mathcal{R}(-\Delta)^{{\alpha}lpha} w_{q+1}^{(t)} }_{L_t^1L^\rho_x}
\lesssim \theta_{q+1}^{-2}\lambda^{2{\alpha}lpha-1}\mu^{-1}r_{\perp}^{\frac{2}{\rho}-2}r_{\parallel}^{\frac{1}{\rho}-1}\lesssim \theta_{q+1}^{-2}\lambda_qmbda^{-{\varepsilon}},\lambda_qbel{e5.20}\\
&\norm{\nu \mathcal{R}(-\Delta)^{{\alpha}lpha} {\rm d}elta_qo }_{L_t^1L^\rho_x}
\lesssim \theta_{q+1}^{-30}\sigmaigma^{-1}\lesssim \theta_{q+1}^{-30}\lambda_qmbda^{-2{\varepsilon}} \lesssim \lambda_qmbda_{q+1}^{-{\varepsilon}}.\lambda_qbel{e5.21}
\end{align}
Hence, combining \eqref{e5.17}-\eqref{e5.21} and the fact that $\theta_{q+1}^{-30}\ll \lambda_qmbda^{{\varepsilon}}$ altogether we obtain
\begin{align} \lambda_qbel{mag viscosity}
\norm{\nu \mathcal{R}(-\Delta)^{{\alpha}lpha} w_{q+1} }_{L_t^1L^\rho_x} \lesssim \theta_{q+1}^{-1}\lambda_qmbda^{-\frac {\varepsilon}2} .
\end{align}
It remains to treat the nonlinearity in \eqref{rup}.
By the Sobolev embedding $H^3_x{B_q}ookrightarrow L^{\infty}_x$,
\eqref{nuh3} and Lemma \ref{totalest},
\begin{align} \lambda_qbel{linear estimate1}
&\norm{ \mathcal{R}\mathbb{P}_H{\mathrm{div}}\left(w_{q + 1} \otimes {\bf{w}}t u_q + {\bf{w}}t u_q \otimes w_{q+1}\right) }_{L_t^1L^\rho_x} \nonumber \\
\lesssim\,&\norm{w_{q + 1} \otimes {\bf{w}}t u_q + {\bf{w}}t u_q \otimes w_{q+1} }_{L_t^1L^\rho_x} \nonumber \\
\lesssim\,& \norm{ {\bf{w}}t u_q}_{L^{\infty}_{t}H^3_x} \norm{w_{q+1}}_{L_t^1L^\rho_x} \nonumber \\
\lesssim\, &\lambda_qmbda^5_q (\theta_{q+1}^{-1} r_{\perp}^{\frac{2}{\rho}-1}r_{\parallel}^{\frac{1}{\rho}-\frac12} \tau^{-\frac 12}+\theta_{q+1}^{-2}\mu^{-1}r_{\perp}^{\frac{2}{\rho}-2}r_{\parallel}^{\frac{1}{\rho}-1} +\theta_{q+1}^{-9}\sigmaigma^{-1} )
\lesssim \theta_{q+1}^{-10}\lambda_qmbda^{-2{\varepsilon}}.
\end{align}
Therefore, we conclude from \eqref{time derivative}, \eqref{mag viscosity} and \eqref{linear estimate1} that
\begin{align} \lambda_qbel{linear estimate}
\norm{\mathring{R}_{lin} }_{L_t^1L^\rho_x}
& \lesssim \theta_{q+1}^{-14}\lambda_qmbda^{-\frac {\varepsilon}2} +\theta_{q+1}^{-1}\lambda_qmbda^{-\frac {\varepsilon}2}+\theta_{q+1}^{-10}\lambda_qmbda^{-2{\varepsilon}}
\lesssim \theta_{q+1}^{-14}\lambda_qmbda^{-\frac {\varepsilon}2}.
\end{align}
\partialaragraph{\bf (ii) Oscillation error.}
Now let us treat the delicate oscillation error.
For this purpose, we decompose the oscillation error into three parts:
\begin{align*}
\mathring{R}_{osc} = \mathring{R}_{osc.1} + \mathring{R}_{osc.2}+ \mathring{R}_{osc.3},
\end{align*}
where the low-high spatial oscillation error
\begin{align*}
\mathring{R}_{osc.1}
&:= \sigmaum_{k \in \Lambda }\mathcal{R} \mathbb{P}_{H}\mathbb{P}_{\neq 0}\left(g_{(\tau)}^2 \mathbb{P}_{\neq 0}(W_{(k)}\otimes W_{(k)} ){\nabla}bla (a_{(k)}^2) \right),
\end{align*}
the high temporal oscillation error
\begin{align*}
\mathring{R}_{osc.2}
&:= - \mu^{-1} \sigmaum_{k \in \Lambda}\mathcal{R}\mathbb{P}_{H}\mathbb{P}_{\neq 0}\left(\partial_t (a_{(k)}^2g_{(\tau)}^2) \partialsi_{(k_1)}^2\partialhi_{(k)}^2k_1\right),
\end{align*}
and the low frequency error
\begin{align*}
\mathring{R}_{osc.3} &
:= -\sigmaigma^{-1}\sigmaum_{k\in \Lambda}\mathcal{R}\mathbb{P}_{H}\mathbb{P}_{\neq 0}
\left(h_{(\tau)}{\fint}_{{\mathbb{T}}^3}W_{(k)}\otimes W_{(k)} {\rm d} x\, \partial_t{\nabla}bla(a_{(k)}^{2})\right).
\end{align*}
For the low-high spatial oscillation error $\mathring{R}_{osc.1}$, we note that the velocity flows are of
high oscillations
\begin{align*}
\mathbb{P}_{\neq 0}(W_{(k)}\otimes W_{(k)} )=\mathbb{P}_{g_{(\tau)}eq (\lambda_qmbda r_{\perp}/2)}(W_{(k)}\otimes W_{(k)} ).
\end{align*}
Thus,
we use Lemmas \ref{buildingblockestlemma}, \ref{mae-endpt1}
and apply Lemma \ref{commutator estimate1}
with $a = {\nabla}bla (a_{(k)}^2)$ and $f = \partialsi_{(k_1)}^2\partialhi_{(k)}^2$
to get
\begin{align} \lambda_qbel{I1-esti-endpt1}
\norm{\mathring{R}_{osc.1} }_{L^1_tL^\rho_x}
&\lesssim \sigmaum_{ k \in \Lambda }
\|g_{(\tau)}\|_{L^2_t}^2\norm{|{\nabla}bla|^{-1} \mathbb{P}_{\not =0}
\left(\mathbb{P}_{g_{(\tau)}eq (\lambda_qmbda r_{\perp}/2)}(W_{(k)}\otimes W_{(k)} ){\nabla}bla (a_{(k)}^2)\right)}_{C_tL^\rho_x} \notag \nonumber \\
& \lesssim \sigmaum_{ k \in \Lambda } \||{\nabla}|^3 (a^2_{(k)})\|_{C_{t,x}}
\lambda_qmbda^{-1} r_{\perp}^{-1} \norm{ \partialsi^2_{(k_1)}}_{C_tL^{\rho}_x} \norm{\partialhi^2_{(k)} }_{C_tL^{\rho}_x} \nonumber \\
& \lesssim \theta_{q+1}^{-23} \lambda_qmbda^{-1} r_{\perp}^{\frac{2}{\rho}-3}r_{\parallel}^{\frac{1}{\rho}-1}.
\end{align}
Moreover, we apply Lemma~\ref{totalest}
and use the large temporal oscillation parameter $\mu$ to balance the high temporal oscillation error
$\mathring{R}_{osc.2}$:
\begin{align} \lambda_qbel{I2-esti-endpt1}
\norm{\mathring{R}_{osc.2} }_{L^1_tL_x^\rho}
&\lesssim {\mu}^{-1} \sigmaum_{k\in\Lambda }\norm{\mathcal{R} \mathbb{P}_{H} \mathbb{P}_{\neq 0}\left(\partial_t (a_{(k)}^2g_{(\tau)}^2)\partialsi_{(k_1)}^2\partialhi_{(k)}^2k_1\right)}_{L^1_tL_x^\rho} \nonumber \\
&\lesssim {\mu}^{-1} \sigmaum_{k\in\Lambda}
\left( \norm{\partial_t (a_{(k)}^2) }_{C_{t,x}}\norm{g_{(\tau)}^2 }_{L^1_t}+ \norm{a_{(k)} }_{C_{t,x}}^2\norm{\partial_t(g_{(\tau)}^2)}_{ L_t^1 } \right)
\norm{\partialsi_{(k_1)}}_{C_tL^{2\rho}_x}^2\norm{\partialhi_{(k)}}_{L^{2\rho}_x}^2 \nonumber \\
&\lesssim (\theta_{q+1}^{-9}+\theta_{q+1}^{-2}\tau\sigmaigma)\mu^{-1}r_{\perp}^{\frac{2}{\rho}-2}r_{\parallel}^{\frac{1}{\rho}-1} \notag\\
& \lesssim \theta_{q+1}^{-2}\tau\sigmaigma\mu^{-1}r_{\perp}^{\frac{2}{\rho}-2}r_{\parallel}^{\frac{1}{\rho}-1}.
\end{align}
The low frequency error $\mathring{R}_{osc.3} $
can be estimated easily by using \eqref{hk-esti} and \eqref{mag amp estimates},
\begin{align} \lambda_qbel{I3-esti-endpt1}
\norm{\mathring{R}_{osc.3} }_{L^1_tL^\rho_x}
&\lesssim \sigmaigma^{-1} \sigmaum_{k\in\Lambda}
\left\|h_{(\tau)}\, \partial_t{\nabla}bla(a_{(k)}^{2})\right\|_{L^1_tL^\rho_x} \nonumber \\
&\lesssim \sigmaigma^{-1} \sigmaum_{k\in\Lambda} \|h_{(\tau)}\|_{C_t}\left( \norm{a_{(k)} }_{C_{t,x}} \norm{a_{(k)} }_{C_{t,x}^2} +\norm{a_{(k)} }_{C_{t,x}^1}^2\right)\nonumber \\
&\lesssim \theta_{q+1}^{-15} \sigmaigma^{-1}.
\end{align}
Therefore, combing \eqref{I1-esti-endpt1}-\eqref{I3-esti-endpt1}
altogether and using \eqref{larsrp}, \eqref{rs-rp-p-ve}
and the bound $0<{\varepsilon}<{(2-{\alpha})}/{20}$
we conclude
\begin{align}
\lambda_qbel{oscillation estimate}
\norm{\mathring{R}_{osc}}_{L_t^1L^\rho_x}
&\lesssim \theta_{q+1}^{-23} \lambda_qmbda^{-1} r_{\perp}^{\frac{2}{\rho}-3}r_{\parallel}^{\frac{1}{\rho}-1}
+\theta_{q+1}^{-9}\tau\sigmaigma\mu^{-1}r_{\perp}^{\frac{2}{\rho}-2}r_{\parallel}^{\frac{1}{\rho}-1}+\theta_{q+1}^{-15} \sigmaigma^{-1} \notag\\
&\lesssim \theta_{q+1}^{-23} \lambda^{-{\varepsilon}}+\theta_{q+1}^{-2}\lambda^{2{\alpha}-4+12{\varepsilon}}+\theta_{q+1}^{-15} \lambda^{-2\varepsilon}\notag \\
& \lesssim \theta_{q+1}^{-23} \lambda_qmbda^{-{\varepsilon}}.
\end{align}
\partialaragraph{\bf (iii) Corrector error.}
As in \cite{lzz21},
we take $p_1,p_2\in(1,{\infty})$ such that
$$ \frac{1}{p_1}=1-{\bf{w}}t \eta,\quad \frac{1}{p_1}=\frac{1}{p_2}+\frac{1}{2},$$
with ${\bf{w}}t \eta\leq {\varepsilon}/(4(3-8{\varepsilon}))$.
Using H\"older's inequality, Lemma \ref{totalest},
\eqref{Lp decorr vel} and \eqref{e3.41}
we derive
\begin{align}
\norm{\mathring{R}_{cor} }_{L^1_{t}L^{p_1}_x}
\lesssim& \norm{ w_{q+1}^{(p)} \otimes (w_{q+1}^{(c)}+ w_{q+1}^{(t)}+{\bf{w}}o) -(w_{q+1}^{(c)}+w_{q+1}^{(t)}+{\bf{w}}o) \otimes w_{q+1} }_{L^1_{t}L^{p_1}_x} \notag \\
\lesssim& \norm{w_{q+1}^{(c)}+w_{q+1}^{(t)}+{\bf{w}}o }_{L^2_{t}L^{p_2}_x} (\norm{w^{(p)}_{q+1} }_{L^2_{t,x}} + \norm{w_{q+1} }_{L^2_{t,x}})\notag \\
\lesssim& {\rm d}elta_{q+1}^{\frac 12} ( \theta_{q+1}^{-1}r_{\perp}^{\frac{2}{p_2} }r_{\parallel}^{\frac{1}{p_2}-\frac32}+ \theta_{q+1}^{-2}\mu^{-1} r_{\perp}^{\frac{2}{p_2}-2}r_{\parallel}^{\frac{1}{p_2}-1} \tau^{\frac 12} +\theta_{q+1}^{-9}\sigmaigma^{-1} ) \notag \\
\lesssim& \theta_{q+1}^{-9}{\rm d}elta_{q+1}^\frac 12 (\lambda_qmbda^{-2{\varepsilon}+3{\bf{w}}t \eta-8{\bf{w}}t \eta{\varepsilon}}+\lambda_qmbda^{-\frac {\varepsilon}2+3{\bf{w}}t \eta-8{\bf{w}}t \eta{\varepsilon}}+ \lambda_qmbda^{-2{\varepsilon}} ) \lesssim \theta_{q+1}^{-9} \lambda_qmbda^{-\frac {\varepsilon}4}, \lambda_qbel{corrector estimate}
\end{align}
where the last step is due to the inequalities
$-{\varepsilon}/2 + 3{\bf{w}}t \eta - 8{\bf{w}}t \eta {\varepsilon} \leq -{{\varepsilon}}/{4}$.
Therefore, combining the estimates \eqref{linear estimate},
\eqref{oscillation estimate},
\eqref{corrector estimate} and the fact that $\theta_{q+1}^{-9}\ll \lambda_qmbda^{\frac {\varepsilon}8}$ altogether we conclude
\begin{align} \lambda_qbel{rq1b}
\|\mathring{R}_{q+1} \|_{L^1_{t,x}}
&\leq \| \mathring{R}_{lin} \|_{L^1_tL^\rho_{x}} + \| \mathring{R}_{osc}\|_{L^1_tL^\rho_{x}}
+ \|\mathring{R}_{cor} \|_{L^1_tL^{p_1}_{x}} \nonumber \\
&\lesssim \theta_{q+1}^{-14}\lambda_qmbda^{-\frac {\varepsilon}2}+\theta_{q+1}^{-23} \lambda_qmbda^{-{\varepsilon}} + \theta_{q+1}^{-9} \lambda_qmbda^{-\frac {\varepsilon}4}\nonumber \\
& \leq \lambda_qmbda_{q+1}^{-{\varepsilon}_R}{\rm d}elta_{q+2}.
\end{align}
Thus, the $L^1$-estimate \eqref{rl1} of Reynolds stress is verified.
\sigmaection{The supercritical regime $\mathcal{A}_2$} \lambda_qbel{Sec-Endpt2}
In this section,
we mainly treat the supercritical regime $\mathcal{A}_2$ when ${\alpha}lpha \in [1,2)$,
whose borderline in particular includes the other endpoint $(s,g_{(\tau)}ammamma,p)=(2{\alpha}lpha/g_{(\tau)}ammamma+1-2{\alpha}lpha, g_{(\tau)}ammamma, \infty)$.
\sigmaubsection{Space-time building blocks} \lambda_qbel{Sec-Interm-Flow}
Unlike in the previous endpoint case $(s,g_{(\tau)}ammamma,p)=(3/p+1-2{\alpha}lpha,\infty,p)$,
${\alpha}lpha\in [5/4,2)$,
the building blocks in this case are indexed by four parameters $r_{\perp}$, $\lambda_qmbda$, $\tau$ and $\sigmaigma$:
\begin{equation}\lambda_qbel{larsrp-endpt2}
r_{\perp} := \lambda_qmbda_{q+1}^{-{\alpha}+1-8\varepsilonrepsilon},\
\lambda_qmbda := \lambda_qmbda_{q+1},\ \tau:=\lambda_qmbda_{q+1}^{2{\alpha}}, \ \sigmaigma:=\lambda_qmbda_{q+1}^{2\varepsilonrepsilon},
\end{equation}
where $\varepsilonrepsilon$ is given by \eqref{ne3.1}.
Instead of the intermittent jets,
inspired by \cite{cl20.2},
we choose the concentrated Mikado flows defined by
\begin{equation*}
W_{(k)} := \partialhi_{r_{\perp}}( \lambda_qmbda r_{\perp} N_{\Lambda}k\cdot (x-{\alpha}lpha_k),\lambda_qmbda r_{\perp} N_{\Lambda}k_2\cdot (x-{\alpha}lpha_k))k_1,\ \ k \in \Lambda,
\end{equation*}
where we keep the same notations $\partialhi_{r_{\perp}}, r_{r_{\perp}}, N_{\Lambda}$
and $(k,k_1,k_2)$ as in \S \ref{Sec-Flow-Endpt1}.
We still use the same temporal building blocks $g_{(\tau)}$, $h_{(\tau)}$
as in \eqref{gk},
but with the different choice of parameters $\tau, \sigmaigma$
given by \eqref{larsrp-endpt2}.
It should be mentioned that,
unlike the intermittent jets defined in \S~\ref{Sec-Flow-Endpt1}, the term $\mu t$
and the concentration parameter $r_{\parallel}$ are not involved in the Mikado flows.
Hence, the Mikado flows provide at most 2D intermittency.
However, the temporal building blocks provide more intermittency,
which achieves even 4D spatial intermittency
when ${\alpha}$ is close to 2.
Then, setting
\begin{equation}\lambda_qbel{snp-endpt2}
\begin{array}{ll}
&\partialhi_{(k)}(x) := \partialhi_{r_{\perp}}( \lambda_qmbda r_{\perp} N_{\Lambda}k\cdot (x-{\alpha}_k),\lambda_qmbda r_{\perp} N_{\Lambda}k_2\cdot (x-{\alpha}_k)), \\
&\mathbb{P}hi_{(k)}(x) := \mathbb{P}hi_{r_{\perp}}( \lambda_qmbda r_{\perp} N_{\Lambda}k\cdot (x-{\alpha}_k),\lambda_qmbda r_{\perp} N_{\Lambda}k_2\cdot (x-{\alpha}_k)),
\end{array}
\end{equation}
we may rewrite
\begin{equation}\lambda_qbel{snwd-endpt2}
W_{(k)} = \partialhi_{(k)} k_1,\quad k\in \Lambda.
\end{equation}
The corresponding potential is then defined by
\begin{equation}
\begin{aligned}
\lambda_qbel{corrector vector-endpt2}
W_{(k)}^c := \frac{1}{\lambda_qmbda^2N_{ \Lambda }^2}\mathbb{P}hi_{(k)} k_1 .
\end{aligned}
\end{equation}
We summarize the estimates of spatial building blocks
in Lemma \ref{buildingblockestlemma-endpt2},
which follows from Lemma \ref{buildingblockestlemma}.
\begin{lemma} [Estimates of Mikado flows] \lambda_qbel{buildingblockestlemma-endpt2}
For any $p \in [1,\infty]$ and $N \in \mathbb{N}$, we have
\begin{align}
&\left\|{\nabla}bla^{N} \partialhi_{(k)}\right\|_{L^{p}_{x}}+\left\|{\nabla}bla^{N} \mathbb{P}hi_{(k)}\right\|_{L^{p}_{x}}
\lesssim r_{\partialerp}^{\frac 2p- 1} \lambda_qmbda^{N}, \lambda_qbel{intermittent estimates2-endpt2}
\end{align}
where the implicit constants are independent of $r_{\perp},\,r_{\parallel},\,\lambda_qmbda$ and $\mu$. Moreover, it holds that
\begin{align}
&{\rm d}isplaystyle \|{\nabla}bla^{N} W_{(k)}\|_{C_t L^{p}_{x}}
+\lambda_qmbda^{2} \|{\nabla}bla^{N} W_{(k)}^c\|_{C_t L^{p}_{x}}\lesssim r_{\partialerp}^{\frac 2p- 1} \lambda_qmbda^{N}, \ \ k\in \Lambda. \lambda_qbel{ew-endpt2}
\end{align}
\end{lemma}
\sigmaubsection{Velocity perturbations} \lambda_qbel{Sec-Pert}
We define the amplitudes of the velocity perturbations by
\begin{equation}\lambda_qbel{akb-endpt2}
a_{(k)}(t,x):= \varepsilonrrho^{\frac{1}{2} } (t,x) f (t)g_{(\tau)}ammamma_{(k)}
({\rm Id}-\frac{\mathring{{\bf{w}}t R}_q(t,x)}{\rho(t,x)}), \quad k \in \Lambda,
\end{equation}
where $\varepsilonrrho, f, g_{(\tau)}ammamma_{(k)}$ are defined as in \S \ref{Subsec-Velo-perturb}.
Note that,
the amplitudes $a_{(k)}$, $k\in \Lambda$,
obey the same estimates as in Lemma \ref{mae-endpt1}.
Namely, we have
\begin{lemma} \lambda_qbel{mae-endpt2}
For $1\leq N\leq 9$, $k\in \Lambda$, we have
\begin{align}
\lambda_qbel{e3.15.2}
&\norm{a_{(k)}}_{L^2_{t,x}} \lesssim {\rm d}elta_{q+1}^{\frac{1}{2}} ,\\
\lambda_qbel{mag amp estimates-endpt2}
& \norm{ a_{(k)} }_{C_{t,x}} \lesssim \theta_{q+1}^{-1},\ \ \norm{ a_{(k)} }_{C_{t,x}^N} \lesssim \theta_{q+1}^{-7N},
\end{align}
where the implicit constants are independent of $q$.
\end{lemma}
Next, we define the principal part $w_{q+1}^{(p)}$ of the velocity perturbations by
\begin{align}
w_{q+1}^{(p)} &:= \sigmaum_{k \in \Lambda } a_{(k)}g_{(\tau)} W_{(k)},
\lambda_qbel{pv-endpt2}
\end{align}
which satisfies the same algebraic identity as in \eqref{mag oscillation cancellation calculation}.
The corresponding incompressibility corrector is then defined by
\begin{align}\lambda_qbel{wqc-dqc-endpt2}
w_{q+1}^{(c)}
&:= \sigmaum_{k\in \Lambda }g_{(\tau)} ({\nabla}bla a_{(k)} \times {\mathrm{curl}} W_{(k)}^c+ {\mathrm{curl}} ({\nabla}bla a_{(k)} \times W_{(k)}^c)).
\end{align}
Note that, the incompressibility corrector \eqref{wqc-dqc-endpt2}
is different from the previous one in \eqref{wqc-dqc}.
By straightforward computations,
\begin{align}
& w_{q+1}^{(p)} + w_{q+1}^{(c)}=\sigmaum_{k \in \Lambda} {\mathrm{curl}} {\mathrm{curl}} ( a_{(k)} g_{(\tau)} W_{(k)}^c) , \lambda_qbel{div free velocity}
\end{align}
which yields immediately that
\begin{align*}
{\mathrm{div}} (w_{q+1}^{(p)} + w_{q +1}^{(c)})= 0.
\end{align*}
Regarding the temporal corrector,
because the new spatial building block \eqref{snwd-endpt2} satisfies ${\mathrm{div}} (W_{(k)}\otimes W_{(k)})=0$,
it is not necessary to introduce the temporal corrector $w_{q+1}^{(t)}$ as in \eqref{veltemcor}
to balance the spatial oscillation.
We will only need the temporal corrector $w_{q+1}^{(o)}$
to balance the high temporal frequency oscillation
in \eqref{mag oscillation cancellation calculation}:
\begin{align}
& w_{q+1}^{(o)}:= -\sigmaigma^{-1}\sigmaum_{k\in\Lambda }\mathbb{P}_{H}\mathbb{P}_{\neq 0}\left(h_{(\tau)}{\fint}_{{\mathbb{T}}^3} W_{(k)}\otimes W_{(k)}{\rm d} x{\nabla}bla (a_{(k)}^2) \right) . \lambda_qbel{wo-endpt2}
\end{align}
Then, by virtue of \eqref{hk} and \eqref{wo-endpt2}
and Leibniz's rule
we see that the algebraic identity \eqref{utemcom} is still valid.
Now, we are ready to define the velocity perturbation $w_{q+1}$ at level $q+1$ by
\begin{align}
w_{q+1} &:= w_{q+1}^{(p)} + w_{q+1}^{(c)}+{\bf{w}}o
\lambda_qbel{velocity perturbation-endpt2}
\end{align}
and the velocity field at level $q+1$ by
\begin{align}
& u_{q+1}:= {\bf{w}}t u_q + w_{q+1},
\lambda_qbel{q+1 velocity-endpt2}
\end{align}
where ${\bf{w}}t u_q$ is
the velocity field already prepared in
the gluing stage in \S \ref{Sec-Concen-Rey}.
By the above constructions, $w_{q+1}$ is mean-free and divergence-free.
Analogous to Lemma \ref{totalest}.
we have the estimates of the velocity perturbations below.
\begin{lemma} [Estimates of perturbations] \lambda_qbel{totalest-endpt2}
For any $\rho \in(1,\infty), g_{(\tau)}ammamma \in [1,\infty]$ and
every integer $0\leq N\leq 7$,
we have the following estimates:
\begin{align}
&\norm{{\nabla}^N w_{q+1}^{(p)} }_{L^ g_{(\tau)}ammamma_tL^\rho_x } \lesssim \theta_{q+1}^{-1} \lambda^Nr_{\perp}^{\frac{2}{\rho}-1}\tau^{\frac12-\frac{1}{ g_{(\tau)}ammamma}},\lambda_qbel{uprinlp-endpt2}\\
&\norm{{\nabla}^N w_{q+1}^{(c)} }_{L^g_{(\tau)}ammamma_tL^\rho_x } \lesssim \theta_{q+1}^{-7}\lambda^{N-1}r_{\perp}^{\frac{2}{\rho}-1}\tau^{\frac12-\frac{1}{g_{(\tau)}ammamma}}, \lambda_qbel{ucorlp-endpt2} \\
&\norm{{\nabla}^N {\bf{w}}o }_{L^g_{(\tau)}ammamma_tL^\rho_x }\lesssim \theta_{q+1}^{-7N-9}\sigmaigma^{-1} ,\lambda_qbel{dcorlp-endpt2}
\end{align}
where the implicit constants depend only on $N$, $g_{(\tau)}ammamma$ and $\rho$. In particular, for integers $1\leq N\leq 7$, we have
\begin{align}
& \norm{ w_{q+1}^{(p)} }_{L^{\infty}_tH^N_x } + \norm{ w_{q+1}^{(c)} }_{L^{\infty}_tH^N_x}+\norm{ {\bf{w}}o }_{L^{\infty}_tH^N_x}\lesssim \lambda_qmbda^{N+2},\lambda_qbel{principal h3 est-endpt2}\\
& \norm{\partial_t w_{q+1}^{(p)} }_{L^{\infty}_tH^N_x } + \norm{\partial_t w_{q+1}^{(c)} }_{L^{\infty}_tH^N_x}+\norm{\partial_t {\bf{w}}o }_{L^{\infty}_tH^N_x}\lesssim \lambda_qmbda^{N+5},\lambda_qbel{pth2 est-endpt2}
\end{align}
where the implicit constants are independent of $\lambda_qmbda$.
\end{lemma}
\begin{proof}
First, using \eqref{gk estimate}, \eqref{ew-endpt2},
\eqref{pv-endpt2} and Lemma~\ref{mae-endpt2}
we get that for any $\rho \in (1,\infty)$,
\begin{align}\lambda_qbel{uplp}
\norm{{\nabla}bla^N w_{q+1}^{(p)} }_{L^g_{(\tau)}ammamma_tL^\rho_x }
\lesssim& \sigmaum_{k \in \Lambda}
\sigmaum\limits_{N_1+N_2 = N}
\|a_{(k)}\|_{C^{N_1}_{t,x}}\|g_{(\tau)}\|_{L_t^g_{(\tau)}ammamma}
\norm{ {\nabla}bla^{N_2} W_{(k)} }_{C_tL^\rho_x } \notag \\
\lesssim& \theta_{q+1}^{-1}\lambda^Nr_{\perp}^{\frac{2}{\rho}-1}\tau^{\frac12-\frac{1}{g_{(\tau)}ammamma}},
\end{align}
and thus \eqref{uprinlp-endpt2} follows.
Moreover, by \eqref{b-beta-ve},
\eqref{gk estimate}, \eqref{ew-endpt2}, \eqref{wqc-dqc-endpt2} and Lemma \ref{mae-endpt2},
\begin{align*}
\norm{{\nabla}^N w_{q+1}^{(c)} }_{L^g_{(\tau)}ammamma_tL^\rho_x} \lesssim&
\sigmaum\limits_{k\in \Lambda }\|g_{(\tau)}\|_{L^g_{(\tau)}ammamma_t} \sigmaum_{N_1+N_2=N}
\left( \norm{ a_{(k)} }_{C_{t,x}^{N_1+1}} \norm{{\nabla}^{N_2} W^c_{(k)}}_{C_tW^{1,\rho}_x }+ \norm{ a_{(k)} }_{C_{t,x}^{N_1+2}} \norm{{\nabla}^{N_2} W^c_{(k)}}_{C_tL^{\rho}_x } \right) \nonumber \\
\lesssim & \theta_{q+1}^{-7}\lambda_qmbda^{N-1}r_{\perp}^{\frac{2}{\rho}-1} \tau^{\frac12-\frac{1}{g_{(\tau)}ammamma}},
\end{align*}
which implies \eqref{ucorlp-endpt2}.
We then estimate the temporal corrector ${\bf{w}}o$
by using \eqref{wo-endpt2}, \eqref{hk-esti} and Lemmas \ref{mae-endpt2}:
\begin{align*}
\norm{ {\nabla}^N {\bf{w}}o }_{L^g_{(\tau)}ammamma_tL^\rho_x }
\lesssim \sigmaigma^{-1}\sigmaum_{k \in \Lambda}\|h_{(\tau)}\|_{C_{t}} \|{\nabla}bla^{N+1} (a^2_{(k)})\|_{C_{t,x}}
\lesssim \theta_{q+1}^{-7N-9} \sigmaigma^{-1}.
\end{align*}
Regarding the $L^{\infty}_tH^N_x$-estimates of velocity perturbations,
using \eqref{ne3.1},
\eqref{larsrp-endpt2},
\eqref{uprinlp-endpt2}-\eqref{dcorlp-endpt2} we get
\begin{align*}
& \norm{ w_{q+1}^{(p)} }_{L^{\infty}_tH^N_x } + \norm{ w_{q+1}^{(c)} }_{L^{\infty}_tH^N_x}+\norm{ {\bf{w}}o }_{L^{\infty}_tH^N_x}\notag \\
\lesssim &\, \theta_{q+1}^{-1}\lambda_qmbda^N \tau^{\frac12} +\theta_{q+1}^{-7}\lambda_qmbda^{N-1} \tau^{\frac12} + \theta_{q+1}^{-7N-9}\sigmaigma^{-1} \notag\\
\lesssim &\,\theta_{q+1}^{-1}\lambda_qmbda^{{\alpha}+N} +\theta_{q+1}^{-7}\lambda_qmbda^{{\alpha}+N-1}+ \theta_{q+1}^{-7N-9} \lambda_qmbda^{-2{\varepsilon}} \lesssim \lambda_qmbda^{N+2},
\end{align*}
which verifies \eqref{principal h3 est-endpt2}.
It remains to prove \eqref{pth2 est-endpt2}. By virtue of \eqref{b-beta-ve}, \eqref{larsrp-endpt2} and Lemmas \ref{Lem-gk-esti}, \ref{buildingblockestlemma-endpt2} and \ref{mae-endpt2}, we get
\begin{align} \lambda_qbel{wprincipal h2 est.2}
\norm{\partial_t w_{q+1}^{(p)} }_{L^{\infty}_tH^N_x }
\lesssim& \sigmaum_{k \in \Lambda }
\|a_{(k)}\|_{C_{t,x}^{N+1} }
\norm{ \partial_t g_{(\tau)}}_{L^{\infty}_t}\norm{ W_{(k)} }_{L^{\infty}_tH^N_x}
\lesssim \theta_{q+1}^{-7N-7} \lambda^{N} \sigmaigma \tau^{\frac 32}
\end{align}
and
\begin{align} \lambda_qbel{uc h2 est.2}
\norm{\partial_t w_{q+1}^{(c)} }_{L^{\infty}_tH^N_x }
& \lesssim \sigmaum_{k \in \Lambda }
\|a_{(k)}\|_{C_{t,x}^{N+3}}
\norm{\partial_t g_{(\tau)}}_{C_{t}}
(\norm{ W^c_{(k)} }_{L^{\infty}_tH^N_x} + \norm{ {\nabla}bla W^c_{(k)} }_{L^{\infty}_tH^N_x} ) \nonumber \\
& \lesssim \theta_{q+1}^{-7N-21} \sigmaigma \tau^{\frac 32} (\lambda^{N-2}+\lambda^{N-1}) \notag \\
& \lesssim \theta_{q+1}^{-7N-21} \lambda^{N-1}\sigmaigma \tau^{\frac 32} .
\end{align}
Since $\mathbb{P}_H \mathbb{P}_{\not =0}$ is bounded in $H^N_x$,
similarly to \eqref{wo h2 est},
we have
\begin{align} \lambda_qbel{wo h2 est.2}
\norm{\partial_t w_{q+1}^{(o)} }_{L^{\infty}_tH^N_x }
\lesssim \sigmaigma^{-1} \sigmaum_{k \in \Lambda } \|\partial_t (h_{(\tau)} {\nabla} (a_{(k)}^2) )\|_{L^{\infty}_tH^N_x}
\lesssim \theta_{q+1}^{-7N-9}\tau .
\end{align}
Therefore, taking into account that $\theta_{q+1}^{-7N-21}\leq \lambda^{N{\varepsilon}/2}$
and $0<{\varepsilon}\leq (2-{\alpha})/20$ we conclude that
\begin{align*}
& \norm{\partial_t w_{q+1}^{(p)} }_{L^{\infty}_tH^N_x } + \norm{\partial_t w_{q+1}^{(c)} }_{L^{\infty}_tH^N_x}+\norm{ \partial_t {\bf{w}}o }_{L^{\infty}_tH^N_x}\notag \\
\lesssim &\, \theta_{q+1}^{-7N-7}\lambda_qmbda^N\sigmaigma \tau^{\frac32}
+\theta_{q+1}^{-7N-21}\lambda_qmbda^{N-1} \sigmaigma\tau^{\frac32}
+ \theta_{q+1}^{-7N-9}\tau \notag\\
\lesssim &\,\theta_{q+1}^{-7N-7}\lambda_qmbda^{3{\alpha}+N+2{\varepsilon}} +\theta_{q+1}^{-7N-21}\lambda_qmbda^{3{\alpha}+N-1+2{\varepsilon}}+ \theta_{q+1}^{-7N-9} \lambda_qmbda^{2{\alpha}} \lesssim \lambda_qmbda^{N+6}.
\end{align*}
Therefore, the proof of Lemma \ref{totalest-endpt2} is complete.
\end{proof}
\partialaragraph{\bf Verification of the inductive estimates for velocity.}
We apply the $L^p$ decorrelation Lemma~\ref{Decorrelation1} with $f= a_{(k)}$, $g = g_{(\tau)}\partialhi_{(k)}$ and $\sigmaigma = \lambda_qmbda^{2{\varepsilon}}$
and then using \eqref{la}, \eqref{b-beta-ve}
and Lemmas \ref{Lem-gk-esti}, \ref{buildingblockestlemma-endpt2}
and \ref{mae-endpt2}
to derive
\begin{align}
\lambda_qbel{Lp decorr vel-endpt2}
\norm{w^{(p)}_{q+1}}_{L^2_{t,x}}
&\lesssim \sigmaum\limits_{k\in \Lambda}
{\bf B}ig(\|a_{(k)}\|_{L^2_{t,x}}\norm{ g_{(\tau)} }_{L^2_{t}} \norm{ \partialhi_{(k)}}_{C_tL^2_{x}} +\sigmaigma^{-\frac12}\|a_{(k)}\|_{C^1_{t,x}}\norm{ g_{(\tau)} }_{L^2_{t}} \norm{ \partialhi_{(k)}}_{C_tL^2_{x}}{\bf B}ig) \notag\\
&\lesssim {\rm d}elta_{q+1}^{\frac{1}{2}}+\theta_{q+1}^{-7}\lambda_qmbda^{-{\varepsilon}}_{q+1} \lesssim {\rm d}elta_{q+1}^{\frac{1}{2}}.
\end{align}
Then, using \eqref{b-beta-ve}, \eqref{Lp decorr vel-endpt2}
and Lemma \ref{totalest-endpt2} we obtain
\begin{align} \lambda_qbel{e3.41}
\norm{w_{q+1}}_{L^2_{t,x}} &\lesssim\norm{w_{q+1}^{(p)} }_{L^2_{t,x}} + \norm{ w_{q+1}^{(c)} }_{L^2_{t,x}} +\norm{ {\bf{w}}o }_{L^2_{t,x}}\notag \\
&\lesssim {\rm d}elta_{q+1}^{\frac{1}{2}} +\theta_{q+1}^{-7}\lambda_qmbda^{-1}+ \theta_{q+1}^{-9}\sigmaigma^{-1}\lesssim {\rm d}elta_{q+1}^{\frac{1}{2}},
\end{align}
and
\begin{align} \lambda_qbel{wql1}
\norm{w_{q+1}}_{L^1_tL^2_x} &\lesssim\norm{w_{q+1}^{(p)} }_{L^1_tL^2_x} + \norm{ w_{q+1}^{(c)} }_{L^1_tL^2_x}+\norm{ {\bf{w}}o }_{L^1_tL^2_x}\notag \\
&\lesssim \theta_{q+1}^{-1}\tau^{-\frac12}+\theta_{q+1}^{-7} \lambda^{-1} \tau^{-\frac12} + \theta_{q+1}^{-9}\sigmaigma^{-1}\lesssim \lambda_qmbda_{q+1}^{-{\varepsilon}}.
\end{align}
We are now ready to verify the iterative estimates for $u_{q+1}$.
In view of \eqref{uh3}, \eqref{uuql2}, \eqref{pdvh3}, \eqref{q+1 velocity-endpt2} and \eqref{principal h3 est-endpt2},
we have that for $a$ large enough,
similarly to \eqref{uh6},
\begin{align}
\norm{u_{q+1}}_{L^{\infty}_tH^3_x}
& \lesssim\norm{{\bf{w}}t u_q}_{L^{\infty}_tH^3_x}+\norm{w_{q+1}}_{L^{\infty}_tH^3_x} \notag \\
&\lesssim \lambda_qmbda_{q}^5+ \lambda_qmbda_{q+1}^{5}\lesssim \lambda_qmbda_{q+1}^5, \lambda_qbel{verifyuc1-endpt2} \\
\norm{\partial_t u_{q+1}}_{L^{\infty}_tH^2_x}
& \lesssim\norm{\partial_t {\bf{w}}t u_q}_{L^{\infty}_tH^2_x}+\norm{\partial_t w_{q+1}}_{L^{\infty}_tH^2_x} \notag \\
&\lesssim \theta_{q+1}^{-1}\lambda_q^{5}+m_{q+1}\lambda_q^5+ \lambda_qmbda_{q+1}^{8}\lesssim \lambda_qmbda_{q+1}^8. \lambda_qbel{verifyupth2-endpt2}
\end{align}
Moreover, we derive from \eqref{uuql2}, \eqref{e3.41} and \eqref{wql1} that
\begin{align}
\norm{u_{q} - u_{q+1}}_{L^2_{t,x}} & \leq \norm{ u_{q} -{\bf{w}}t u_q }_{L^2_{t,x}} + \norm{{\bf{w}}t u_q - u_{q+1}}_{L^2_{t,x}} \nonumber \\
&\lesssim \norm{ u_q - {\bf{w}}t u_q }_{L^{\infty}_tL^2_x}+ \norm{w_{q+1}}_{L^2_{t,x}} \nonumber \\
&\lesssim \lambda_qmbda_q^{-3}+{\rm d}elta_{q+1}^{\frac{1}{2}} \leq M^*{\rm d}elta_{q+1}^{\frac{1}{2}}, \lambda_qbel{e3.43}
\end{align}
for $M^*$ sufficiently large and
\begin{align} \lambda_qbel{uql1l2}
\norm{u_{q} - u_{q+1}}_{L^1_tL^2_x}
&\lesssim \norm{ u_q -{\bf{w}}t u_q }_{L^{\infty}_tL^2_x}+ \norm{w_{q+1}}_{L^1_tL^2_x} \nonumber \\
&\lesssim \lambda_qmbda_q^{-3}+\lambda_qmbda_{q+1}^{-{\varepsilon}} \leq {\rm d}elta_{q+2}^{\frac{1}{2}}.
\end{align}
Concerning the iterative estimate \eqref{u-B-Lw-conv},
let us first show the embedding
\begin{align}\lambda_qbel{sobolevem2}
H^3_x{B_q}ookrightarrow W^{s,p}_x.
\end{align}
In order to prove \eqref{sobolevem2},
when $(s,p,g_{(\tau)}ammamma)\in \mathcal{A}_2$,
we see that for $1\leq p\leq 2$,
\begin{align}\lambda_qbel{e7.9}
0\leq s< 2{\alpha}+\frac{2{\alpha}-2}{p}+(1-2{\alpha})\leq 2{\alpha}-1<3.
\end{align}
which, via the embedding $H^3_x{B_q}ookrightarrow H_x^s {B_q}ookrightarrow W^{s,p}_x$,
yields \eqref{sobolevem2}.
Moreover, for $p>2$,
since ${\alpha}lpha<2$, $sg_{(\tau)}eq 0$,
\begin{align}\lambda_qbel{e7.10}
0<1+\frac{2{\alpha}-2}{p}-s<1+\frac{2}{p}-\frac{2}{3}s,
\end{align}
which implies that $3/2>s-3/p$,
thereby yielding \eqref{sobolevem2}
by the Sobolev embedding.
We thus prove \eqref{sobolevem2}.
Thus, similar to \eqref{wtu-u}, by virtue of \eqref{nrh3}, \eqref{est-vih3} and \eqref{sobolevem2}, we have
\begin{align*}
\norm{{\bf{w}}t u_q-u_q}_{L^g_{(\tau)}ammamma_tW^{s,p}_x} & \lesssim \norm{\sigmaum_i\chi_i(v_i-u_q)}_{L^{\infty}_tH^3_x}\lesssim \lambda_q^{-2}.
\end{align*}
Therefore, for any ${\alpha}\in [1,2)$,
using \eqref{b-beta-ve}, \eqref{larsrp-endpt2}, \eqref{sobolevem2}
and Lemma \ref{totalest-endpt2},
we derive
\begin{align}\lambda_qbel{lw-est.2}
\norm{ u_{q+1} - u_q }_{L^g_{(\tau)}ammamma_tW^{s,p}_x}
&\lesssim \norm{{\bf{w}}t u_q-u_q}_{L^g_{(\tau)}ammamma_tW^{s,p}_x}
+\norm{w_{q+1}}_{L^g_{(\tau)}ammamma_tW^{s,p}_x} \notag\\
&\lesssim \lambda_q^{-2}+ \theta_{q+1}^{-1}\lambda_qq^{s}r_{\perp}^{\frac{2}{p}-1}\tau^{\frac12-\frac{1}{g_{(\tau)}ammamma}}
+\theta_{q+1}^{-30}\sigmaigma^{-1} \notag\\
&\lesssim \lambda_q^{-2}+ \lambda_qmbda_{q+1}^{s+2{\alpha}-1-\frac{2{\alpha}}{g_{(\tau)}ammamma}-\frac{2{\alpha}-2}{p}+{\varepsilon}(9-\frac{16}{p}) }
+ \lambda_{q+1}^{-{\varepsilon}} .
\end{align}
Taking into account that, by \eqref{ne3.1},
\begin{align}\lambda_qbel{endpt2-condition}
s+2{\alpha}-1-\frac{2{\alpha}}{g_{(\tau)}ammamma}-\frac{2{\alpha}-2}{p}+{\varepsilon}(9-\frac{16}{p})
\leq s+2{\alpha}-1-\frac{2{\alpha}}{g_{(\tau)}ammamma}-\frac{2{\alpha}-2}{p}+9{\varepsilon} <-10{\varepsilon},
\end{align}
we thus obtain
\begin{align}
\norm{ u_{q+1} - u_q }_{L^g_{(\tau)}ammamma_tW^{s,p}_x} \leq {\rm d}elta_{q+2}^{\frac12}. \lambda_qbel{nne6.6}
\end{align}
Therefore, the iterative estimates \eqref{uh3}, \eqref{upth2}, \eqref{u-B-L2tx-conv} and \eqref{u-B-Lw-conv} are verified.
\sigmaubsection{Reynolds stress} \lambda_qbel{Subsec-Reynolds-Endpt2}
Below we treat the Reynolds stress for the endpoint case
$(2{\alpha}lpha/g_{(\tau)}ammamma+1-2{\alpha}lpha, g_{(\tau)}ammamma, \infty)$.
We derive from equation \eqref{equa-nsr} at level $q+1$ that
the new Reynolds stress satisfies the equation
\begin{align}
{\rm d}isplaystyle{\mathrm{div}}\mathring{R}_{q+1} - {\nabla}bla P_{q+1}
&{\rm d}isplaystyle = {u_q}nderbrace{\partialartial_t (w_{q+1}^{(p)}+w_{q+1}^{(c)}) +\nu(-\Delta)^{{\alpha}lpha} w_{q+1} +{\mathrm{div}}\big({\bf{w}}t u_q \otimes w_{q+1} + w_{q+ 1} \otimes {\bf{w}}t u_q \big) }_{ {\mathrm{div}}\mathring{R}_{lin} +{\nabla}bla P_{lin} } \notag\\
&{\rm d}isplaystyle\quad+ {u_q}nderbrace{{\mathrm{div}} (w_{q+1}^{(p)} \otimes w_{q+1}^{(p)}+ \mathring{{\bf{w}}t R}_q)+\partialartial_t {\bf{w}}o}_{{\mathrm{div}}\mathring{R}_{osc} +{\nabla}bla P_{osc}} \notag\\
&{\rm d}isplaystyle\quad+ {u_q}nderbrace{{\mathrm{div}}{\bf B}ig((w_{q+1}^{(c)} +{\bf{w}}o)\otimes w_{q+1}+ w_{q+1}^{(p)} \otimes (w_{q+1}^{(c)} +{\bf{w}}o) {\bf B}ig)}_{{\mathrm{div}}\mathring{R}_{cor} +{\nabla}bla P_{cor}}. \lambda_qbel{ru-endpt2}
\end{align}
Then, using the inverse divergence operator $\mathcal{R}$
we can choose the Reynolds stress at level $q+1$ by
\begin{align}\lambda_qbel{rucom-endpt2}
\mathring{R}_{q+1} := \mathring{R}_{lin} + \mathring{R}_{osc}+ \mathring{R}_{cor},
\end{align}
where the linear error
\begin{align}
\mathring{R}_{lin} & := \mathcal{R}\left(\partialartial_t (w_{q+1}^{(p)} +w_{q+1}^{(c)} )\right)
+ \nu \mathcal{R} (-\Delta)^{{\alpha}} w_{q+1} + \mathcal{R}\mathbb{P}_H {\mathrm{div}} \left({\bf{w}}t u_q \mathring{\otimes} w_{q+1} + w_{q+ 1}
\mathring{\otimes} {\bf{w}}t u_q\right), \lambda_qbel{rup-endpt2}
\end{align}
the oscillation error
\begin{align}\lambda_qbel{rou}
\mathring{R}_{osc} :=& \sigmaum_{k \in \Lambda } \mathcal{R} \mathbb{P}_H\mathbb{P}_{\neq 0}\left(g_{(\tau)}^2 \mathbb{P}_{\neq 0}(W_{(k)}\otimes W_{(k)}){\nabla}bla (a_{(k)}^2)\right) \notag\\
&-\sigmaigma^{-1}\sigmaum_{k\in \Lambda}\mathcal{R} \mathbb{P}_H \mathbb{P}_{\neq 0}\left(h_{(\tau)}{\fint}_{{\mathbb{T}}^3}W_{(k)}\otimes W_{(k)}{\rm d} x\partial_t{\nabla}bla(a_{(k)}^{2})\right),
\end{align}
and the corrector error
\begin{align}
\mathring{R}_{cor} &
:= \mathcal{R} \mathbb{P}_H {\mathrm{div}} \bigg( w^{(p)}_{q+1} \mathring{\otimes} (w_{q+1}^{(c)} +{\bf{w}}o)
+ (w_{q+1}^{(c)} +{\bf{w}}o) \mathring{\otimes} w_{q+1} \bigg). \lambda_qbel{rup2}
\end{align}
In the following we verify the inductive estimates for the new Reynolds stress $\mathring R_{q+1}$. \\
\partialaragraph{\bf Verification of $L^{\infty}_tH^N_x$-estimate of Reynolds stress.}
Since by \eqref{principal h3 est-endpt2} and \eqref{pth2 est-endpt2},
the velocity perturbations obey the same upper bounds as in
\eqref{principal h3 est-endpt1} and \eqref{pth2 est-endpt1},
we can argue as in a similar manner as in \eqref{ine-rq1h3}-\eqref{ul9}
and obtain \eqref{rh3} and \eqref{rh4} in the supercritical regime
$\mathcal{A}_2$. The details are omitted here. \\
\partialaragraph{\bf Verification of $L^1_{t,x}$-decay of Reynolds stress}
We aim to verify the $L^1_{t,x}$-decay \eqref{rl1} of the Reynolds stress $\mathring{R}_{q+1}$ at level $q+1$.
In this case, we choose
\begin{align}\lambda_qbel{defp}
\rho: =\frac{2{\alpha}-2+16\varepsilonrepsilon}{2{\alpha}-2+14\varepsilonrepsilon}\in (1,2),
\end{align}
where ${\varepsilon}$ is given by \eqref{ne3.1}.
Then, we have
\begin{equation}\lambda_qbel{setp}
(1-{\alpha}-8{\varepsilon})(\frac{2}{\rho}-1)=1-{\alpha}-6{\varepsilon},
\end{equation}
and
\begin{align} \lambda_qbel{rs-rp-p-ve-endpt2}
r_{\perp}^{\frac 2\rho-1} = \lambda_qmbda^{1-{\alpha}-6{\varepsilon}}.
\end{align}
\partialaragraph{\bf (i) Linear error.}
Note that, by Lemmas \ref{Lem-gk-esti},
\ref{buildingblockestlemma-endpt2}
and \ref{mae-endpt2}, \eqref{ne3.1}, \eqref{larsrp-endpt2}, \eqref{div free velocity}
and \eqref{rs-rp-p-ve-endpt2},
\begin{align}
& \| \mathcal{R}\partialartial_t( w_{q+1}^{(p)}+ w_{q+1}^{(c)})\|_{L_t^1L_x^\rho} \nonumber \\
\lesssim& \sigmaum_{k \in \Lambda}\| \mathcal{R} {\mathrm{curl}}{\mathrm{curl}}\partialartial_t(g_{(\tau)} a_{(k)} W^c_{(k)}) \|_{L_t^1L_x^\rho} \nonumber \\
\lesssim& \sigmaum_{k \in \Lambda}{\bf B}ig(\| g_{(\tau)}\|_{L^1_t}\| a_{(k)} \|_{C_{t,x}^2}\| W^c_{(k)} \|_{C_t W_x^{1,\rho}}+\| \partial_tg_{(\tau)}\|_{L_t^1}\| a_{(k)} \|_{C_{t,x}^1}\| W^c_{(k)} \|_{C_t W_x^{1,\rho}}{\bf B}ig)\nonumber \\
\lesssim& \theta_{q+1}^{-14}\tau^{-\frac12}r_{\perp}^{\frac{2}{\rho}-1}\lambda_qmbda^{-1} + \theta_{q+1}^{-7}\sigmaigma\tau^{\frac12}r_{\perp}^{\frac{2}{\rho}-1}\lambda_qmbda^{-1}
\lesssim \theta_{q+1}^{-7}\lambda_qmbda^{-4{\varepsilon}}.\lambda_qbel{time derivative-endpt2}
\end{align}
For the viscosity term, by \eqref{velocity perturbation-endpt2},
\begin{align}
\norm{ \nu\mathcal{R}(-\Delta)^{{\alpha}lpha} w_{q+1} }_{L_t^1L^\rho_x} \lesssim & \norm{ \nu\mathcal{R}(-\Delta)^{{\alpha}lpha} w_{q+1}^{(p)} }_{L_t^1L^\rho_x}+\norm{ \nu \mathcal{R}(-\Delta)^{{\alpha}lpha} w_{q+1}^{(c)} }_{L_t^1L^\rho_x}+\norm{ \nu\mathcal{R}(-\Delta)^{{\alpha}lpha} {\rm d}elta_qo }_{L_t^1L^\rho_x}.\lambda_qbel{e5.17.2}
\end{align}
Note that,
by the interpolation estimate, \eqref{larsrp-endpt2},
\eqref{uprinlp-endpt2}, \eqref{rs-rp-p-ve-endpt2}
and the fact that $2-{\alpha}lpha g_{(\tau)}eq 5\varepsilon$,
\begin{align}
\norm{ \nu\mathcal{R}(-\Delta)^{{\alpha}lpha} w_{q+1}^{(p)} }_{L_t^1L^\rho_x}
& \lesssim \norm{ |{\nabla}|^{2{\alpha}-1} w_{q+1}^{(p)} }_{L_t^1L^\rho_x}\notag\\
& \lesssim \norm{w_{q+1}^{(p)}}_{L_t^1L^\rho_x} ^{\frac{4-2{\alpha}}{3}} \norm{w_{q+1}^{(p)}}_{L_t^1W^{3,\rho}_x} ^{\frac{2{\alpha}-1}{3}}\notag\\
& \lesssim \theta_{q+1}^{-1}\lambda^{2{\alpha}lpha-1}r_{\perp}^{\frac{2}{\rho}-1}\tau^{-\frac12}\lesssim \theta_{q+1}^{-1}\lambda_qmbda^{-6{\varepsilon}}.\lambda_qbel{e5.18.2}
\end{align}
Similarly, by Lemma \ref{totalest-endpt2},
\begin{align}
&\norm{ \nu\mathcal{R}(-\Delta)^{{\alpha}lpha} w_{q+1}^{(c)} }_{L_t^1L^\rho_x}
\lesssim \theta_{q+1}^{-7}\lambda^{2{\alpha}lpha-2}r_{\perp}^{\frac{2}{\rho}-1}\tau^{-\frac12}\lesssim \theta_{q+1}^{-7}\lambda_qmbda^{-1-6{\varepsilon}},\lambda_qbel{e5.19.2}\\
&\norm{\nu \mathcal{R}(-\Delta)^{{\alpha}lpha} {\rm d}elta_qo }_{L_t^1L^\rho_x} \lesssim \theta_{q+1}^{-30}\sigmaigma^{-1}\lesssim \theta_{q+1}^{-30}\lambda_qmbda^{-2{\varepsilon}}.\lambda_qbel{e5.21.2}
\end{align}
Hence, combining \eqref{e5.17.2}-\eqref{e5.21.2} altogether we obtain
\begin{align} \lambda_qbel{mag viscosity-endpt2}
\norm{ \nu\mathcal{R}(-\Delta)^{{\alpha}lpha} w_{q+1} }_{L_t^1L^\rho_x} \lesssim \theta_{q+1}^{-30}\lambda_qmbda^{-2{\varepsilon}} .
\end{align}
Moreover,
using \eqref{nuh3}, Lemma \ref{totalest-endpt2}
and \eqref{rs-rp-p-ve-endpt2} we have
\begin{align} \lambda_qbel{linear estimate1-endpt2}
&\norm{ \mathcal{R}\mathbb{P}_H{\mathrm{div}}\left(w_{q + 1} \otimes {\bf{w}}t u_q + {\bf{w}}t u_q \otimes w_{q+1}\right) }_{L_t^1L^\rho_x} \nonumber \\
\lesssim\,&\norm{w_{q + 1} \otimes {\bf{w}}t u_q + {\bf{w}}t u_q \otimes w_{q+1} }_{L_t^1L^\rho_x} \nonumber \\
\lesssim\,& \norm{{\bf{w}}t u_q}_{L^{\infty}_tH^3_x} \norm{w_{q+1}}_{L_t^1L^\rho_x} \nonumber \\
\lesssim\, &\lambda_qmbda^5_q (\theta_{q+1}^{-1} r_{\perp}^{\frac{2}{\rho}-1} \tau^{-\frac 12} +\theta_{q+1}^{-9}\sigmaigma^{-1} )
\lesssim \theta_{q+1}^{-10}\lambda_qmbda^{-2{\varepsilon}}.
\end{align}
Therefore,
we conclude from \eqref{time derivative-endpt2}, \eqref{mag viscosity-endpt2} and \eqref{linear estimate1-endpt2} that
\begin{align} \lambda_qbel{linear estimate-endpt2}
\norm{\mathring{R}_{lin} }_{L_t^1L^\rho_x}
& \lesssim \theta_{q+1}^{-7}\lambda_qmbda^{-4{\varepsilon}} +\theta_{q+1}^{-30}\lambda_qmbda^{-2{\varepsilon}}+\theta_{q+1}^{-10}\lambda_qmbda^{-2{\varepsilon}}
\lesssim \theta_{q+1}^{-30}\lambda_qmbda^{-2{\varepsilon}}.
\end{align}
\partialaragraph{\bf (ii) Oscillation error.}
Unlike in the previous endpoint case,
we only need to decompose the oscillation error into two parts here:
\begin{align*}
\mathring{R}_{osc} = \mathring{R}_{osc.1} + \mathring{R}_{osc.2},
\end{align*}
where the low-high spatial oscillation error
\begin{align*}
\mathring{R}_{osc.1}
&:= \sigmaum_{k \in \Lambda }\mathcal{R} \mathbb{P}_{H}\mathbb{P}_{\neq 0}\left(g_{(\tau)}^2 \mathbb{P}_{\neq 0}(W_{(k)}\otimes W_{(k)} ){\nabla}bla (a_{(k)}^2) \right),
\end{align*}
and the low frequency error
\begin{align*}
\mathring{R}_{osc.2} &
:= -\sigmaigma^{-1}\sigmaum_{k\in \Lambda}\mathcal{R}\mathbb{P}_{H}\mathbb{P}_{\neq 0}
\left(h_{(\tau)}{\fint}_{{\mathbb{T}}^3}W_{(k)}\otimes W_{(k)} {\rm d} x\, \partial_t{\nabla}bla(a_{(k)}^{2})\right).
\end{align*}
Then, applying Lemmas \ref{buildingblockestlemma-endpt2}, \ref{mae-endpt2} and \ref{commutator estimate1}
with $a = {\nabla}bla (a_{(k)}^2)$ and $f = \partialhi_{(k)}^2$
we get
\begin{align} \lambda_qbel{I1-esti}
\norm{\mathring{R}_{osc.1} }_{L^1_tL^\rho_x}
&\lesssim \sigmaum_{ k \in \Lambda }
\|g_{(\tau)}\|_{L^2_t}^2\norm{|{\nabla}bla|^{-1} \mathbb{P}_{\not =0}
\left(\mathbb{P}_{g_{(\tau)}eq (\lambda_qmbda r_{\perp}/2)}(W_{(k)}\otimes W_{(k)} ){\nabla}bla (a_{(k)}^2)\right)}_{C_tL^\rho_x} \notag \nonumber \\
& \lesssim \sigmaum_{ k \in \Lambda }
\||{\nabla}|^3 (a^2_{(k)})\|_{C_{t,x}} \lambda_qmbda^{-1} r_{\perp}^{-1}\norm{\partialhi^2_{(k)} }_{C_tL^{\rho}_x} \nonumber \\
& \lesssim \theta_{q+1}^{-23} \lambda_qmbda^{-1} r_{\perp}^{\frac{2}{\rho}-3}.
\end{align}
Moreover, as in \eqref{I3-esti-endpt1},
the low frequency part $\mathring{R}_{osc.2} $
can be estimated by using \eqref{hk-esti} and \eqref{mag amp estimates-endpt2}:
\begin{align} \lambda_qbel{I3-esti}
\norm{\mathring{R}_{osc.2} }_{L^1_tL^\rho_x}
\lesssim \sigmaigma^{-1} \sigmaum_{k\in\Lambda} \|h_{(\tau)}\|_{C_t}\left( \norm{a_{(k)} }_{C_{t,x}} \norm{a_{(k)} }_{C_{t,x}^2} +\norm{a_{(k)} }_{C_{t,x}^1}^2\right)
\lesssim \theta_{q+1}^{-15} \sigmaigma^{-1}.
\end{align}
Therefore, combing \eqref{I1-esti} and \eqref{I3-esti} altogether and using \eqref{larsrp-endpt2} and \eqref{rs-rp-p-ve-endpt2}
we conclude
\begin{align}
\lambda_qbel{oscillation estimate-endpt2}
\norm{\mathring{R}_{osc}}_{L_t^1L^\rho_x}
&\lesssim \theta_{q+1}^{-23} \lambda_qmbda^{-1} r_{\perp}^{\frac{2}{\rho}-3}+\theta_{q+1}^{-15} \sigmaigma^{-1} \notag \\
&\lesssim \theta_{q+1}^{-23} \lambda^{{\alpha}-2+10{\varepsilon}} +\theta_{q+1}^{-15} \lambda^{-2\varepsilon} \notag\\
&\lesssim \theta_{q+1}^{-15} \lambda^{-2\varepsilon},
\end{align}
where the last step is due to \eqref{ne3.1}.
\partialaragraph{\bf (iii) Corrector error.}
We use H\"older's inequality, Lemma \ref{totalest-endpt2}
and \eqref{rs-rp-p-ve-endpt2} to get
\begin{align}
\norm{\mathring{R}_{cor} }_{L^1_{t}L^{\rho}_x}
\lesssim& \norm{ w_{q+1}^{(p)} \otimes (w_{q+1}^{(c)} +{\bf{w}}o) -(w_{q+1}^{(c)} +{\bf{w}}o) \otimes w_{q+1} }_{L^1_{t}L^{\rho}_x} \notag \\
\lesssim& \norm{w_{q+1}^{(c)} +{\bf{w}}o }_{L^2_{t}L^{{\infty}}_x} (\norm{w^{(p)}_{q+1} }_{L^2_{t}L^{\rho}_x} + \norm{w_{q+1} }_{L^2_{t}L^{\rho}_x})\notag \\
\lesssim& \left( \theta_{q+1}^{-7}\lambda^{-1}r_{\perp}^{-1 }+\theta_{q+1}^{-9}\sigmaigma^{-1}\right) \left(\theta_{q+1}^{-1} r_{\perp}^{\frac{2}{\rho}-1} +\theta_{q+1}^{-7}\lambda^{-1} r_{\perp}^{\frac{2}{\rho}-1} + \theta_{q+1}^{-9} \sigmaigma^{-1}\right) \notag \\
\lesssim& \left( \theta_{q+1}^{-7}\lambda^{{\alpha}-2+8{\varepsilon}}+\theta_{q+1}^{-9}\lambda_qmbda^{-2{\varepsilon}}\right) \left(\theta_{q+1}^{-1} \lambda^{-{\alpha}+1-6{\varepsilon}} + \theta_{q+1}^{-9}\lambda_qmbda^{-2{\varepsilon}}\right) \notag \\
\lesssim& \, \theta_{q+1}^{-18} \lambda_qmbda^{-4{\varepsilon}}. \lambda_qbel{corrector estimate-endpt2}
\end{align}
Therefore,
we conclude from estimates \eqref{linear estimate-endpt2},
\eqref{oscillation estimate-endpt2},
\eqref{corrector estimate-endpt2} that
\begin{align} \lambda_qbel{rq1b}
\|\mathring{R}_{q+1} \|_{L^1_{t,x}}
&\leq \| \mathring{R}_{lin} \|_{L^1_tL^\rho_{x}} + \| \mathring{R}_{osc}\|_{L^1_tL^\rho_{x}}
+ \|\mathring{R}_{cor} \|_{L^1_tL^\rho_{x}} \nonumber \\
&\lesssim \theta_{q+1}^{-30}\lambda_qmbda^{-2\varepsilonrepsilon}
+\theta_{q+1}^{-15}\lambda_qmbda^{-2{\varepsilon}} + \theta_{q+1}^{-18}\lambda_qmbda^{-4{\varepsilon}} \nonumber \\
& \leq \lambda_qmbda^{-{\varepsilon}_R} {\rm d}elta_{q+2}.
\end{align}
This justifies the inductive estimate \eqref{rl1}
for the $L^1_{t,x}$-norm of the new Reynolds stress $\mathring{R}_{q+1}$.
\sigmaection{Proof of main results} \lambda_qbel{Sub-Proof-Main}
This section contains the proofs of the main results,
i.e. Theorems \ref{Prop-Iterat} and \ref{Thm-Non-hyper-NSE},
Corollaries \ref{Cor-Strong-Nonuniq} and \ref{Cor-Nonuniq-Supercri},
and the strong vanishing viscosity result in Theorem \ref{Thm-hyperNSE-Euler-limit}.
\partialaragraph{\bf Proof of Theorem \ref{Prop-Iterat}}
Because the iterative estimates \eqref{uh3}-\eqref{rl1} and \eqref{u-B-L2tx-conv}-\eqref{u-B-Lw-conv}
have been verified in the previous sections,
we only need to prove the well-preparedness of $(u_{q+1}, \mathring{R}_{q+1})$ and the temporal inductive inclusion \eqref{suppru}.
Regarding the well-preparedness of $(u_{q+1}, \mathring{R}_{q+1})$,
we first note from the support of $a_{(k)}$ that
\begin{align*}
w_{q+1}(t)=0 \quad \text{if} \quad \operatorname{dist}(t,I_{q+1}^c)\leq \theta_{q+1}.
\end{align*}
Therefore, $u_{q+1}(t)={\bf{w}}t u_q(t)$ if $\operatorname{dist}(t,I_{q+1}^c)\leq \theta_{q+1}$.
Then, by the well-preparedness of $({\bf{w}}t u_q, \mathring{{\bf{w}}t R}_q)$,
we infer that
\begin{align*}
\mathring{R}_{q+1}(t)=\mathring{{\bf{w}}t R}_q(t)=0 \quad \text{if} \quad \operatorname{dist}(t,I_{q+1}^c)\leq \theta_{q+1},
\end{align*}
which verifies the well-preparedness of $(u_{q+1}, \mathring{R}_{q+1})$.
Concerning the temporal inductive inclusion \eqref{suppru},
first note that
\begin{align}
& \sigmaupp_t w_{q+1} \sigmaubseteq \bigcup_{k\in \Lambda }\sigmaupp_t a_{(k)} \sigmaubseteq N_{2\theta_{q+1}}(\sigmaupp_t \mathring{{\bf{w}}t R}_{q}). \lambda_qbel{e4.43}
\end{align}
Next we prove that
\begin{align}
& I_{q+1}\sigmaubseteq N_{4T/m_{q+1}}(\sigmaupp_t \mathring{R}_{q}). \lambda_qbel{ne4.43}
\end{align}
To this end, for any $t\in I_{q+1}$,
we have $t\in [t_i-2\theta_{q+1},t_i+3\theta_{q+1}]$ for some $i\in \mathcal{C}$.
Hence, there exists $t_*\in [t_{i-1}, t_i+\theta_{q+1}]$
such that $\mathring{R}_q(t_*)\neq 0$. Since
\begin{align*}
|t-t_*|\leq \frac{T}{m_{q+1}}+3\theta_{q+1} < \frac{4T}{m_{q+1}},
\end{align*}
we infer that $t\in N_{4T/m_{q+1}}(\sigmaupp_t \mathring{R}_{q})$,
which proves \eqref{ne4.43}, as claimed.
Combining \eqref{e4.43} and \eqref{ne4.43} together we obtain
\begin{align}
& \sigmaupp_t w_{q+1} \sigmaubseteq N_{2\theta_{q+1}}(\sigmaupp_t \mathring{{\bf{w}}t R}_{q})
\sigmaubseteq N_{2\theta_{q+1}}(I_{q+1})\sigmaubseteq N_{6T/m_{q+1}}(\sigmaupp_t \mathring{R}_{q}). \lambda_qbel{e4.45}
\end{align}
Regarding the temporal support of ${\bf{w}}t u_{q}$, we claim that
\begin{align}\lambda_qbel{suppwtuq}
\sigmaupp_t {\bf{w}}t u_{q}\sigmaubseteq N_{2T/m_{q+1}}(\sigmaupp_tu_{q}).
\end{align}
To this end, for any $t\in [0,T]$ such that ${\bf{w}}t u_q(t)\neq 0$,
there exists $0\leq i\leq m_{q+1}-1$ such that $t\in [t_i,t_{i+1}]$.
If $t\in [t_i+\theta_{q+1},t_{i+1}]$, then we have $u_q(t_i)\neq 0$,
otherwise ${\bf{w}}t u_q(t)=v_i(t)=0$.
Since $|t-t_i|\leq T/m_{q+1}$,
we see that \eqref{suppwtuq} is valid.
Moreover, if $t\in [t_i, t_i+\theta_{q+1}]$,
we have $u_q(t_i)\neq 0$ or $u_q(t_{i-1})\neq 0$.
Actually, if $u_q(t_i) = u_q(t_i) =0$,
the uniqueness in Proposition \ref{Prop-LWP-Hyper-NLSE}
yields that
$v_i(t) = v_{i-1}(t)=0$,
and so
\begin{align*}
{\bf{w}}t u_q(t) =\chi_i(t) v_i(t)+(1-\chi_i(t))v_{i-1}(t)=0,
\end{align*}
which contradicts the fact that ${\bf{w}}t u_q(t)\not = 0$.
Hence, taking into account
$|t-t_i|\leq T/m_{q+1}$ and $|t-t_{i-1}|\leq 2(T/m_{q+1})$
we prove \eqref{suppwtuq}, as claimed.
Thus, we deduce from \eqref{e4.45} and \eqref{suppwtuq} that
\begin{align}
\sigmaupp_t u_{q+1}
\sigmaubseteq \sigmaupp_t {\bf{w}}t u_{q} \cup \sigmaupp_t w_{q+1}
\sigmaubseteq N_{6T/m_{q+1}}( \sigmaupp_t (u_{q}, \mathring{R}_{q}))
\sigmaubseteq N_{{\rm d}elta_{q+2}^{\frac 12}} ((\sigmaupp_t(u_{q}, \mathring{R}_{q})), \lambda_qbel{suppbq}
\end{align}
where the last step is due to
$6T/m_{q+1} \ll {\rm d}elta_{q+2}^{1/2}$.
Moreover,
in view of \eqref{e4.43}, \eqref{e4.45} and \eqref{suppwtuq},
we get
\begin{align}
& \sigmaupp_t \mathring{R}_{q+1}\sigmaubseteq \bigcup\limits_{k\in \Lambda}\sigmaupp_t a_{(k)}
\cup \sigmaupp_t {\bf{w}}t u_q \sigmaubseteq N_{{\rm d}elta_{q+2}^{\frac12}}( \sigmaupp_t (u_{q}, \mathring{R}_{q})). \lambda_qbel{supp-Ru-RB-q+1}
\end{align}
Therefore, putting \eqref{suppbq} and \eqref{supp-Ru-RB-q+1}
altogether we prove Theorem \ref{Prop-Iterat}.
{B_q}fill $\sigmaquare$ \\
\partialaragraph{\bf Proof of Theorem \ref{Thm-Non-hyper-NSE}} \lambda_qbel{Proof-Nonuniq}
Below we prove the statements $(i)$-$(iv)$ in Theorem~\ref{Thm-Non-hyper-NSE} below.
$(i)$. Take $u_0=\tilde{u}$ and set
\begin{align}
&\mathring{R}_0 :=\mathcal{R}\left(\partial_t u_0+\nu(-\Delta)^{{\alpha}lpha} u_0\right) + u_0\mathring\otimes u_0, \lambda_qbel{r0u} \\
& P_0 := -\frac{1}{3} |u_0|^2.
\end{align}
Thus, $(u_0, \mathring{R}_0)$ is a well-prepared solution to \eqref{equa-nsr}
with the set $I_0 = [0,T]$ and the length scale $\theta_0=T$.
Let ${\rm d}elta_{1}:= \|\mathring{R}_0 \|_{L^1_{t,x}}$
and choose $a$ sufficiently large such that \eqref{uh3}-\eqref{rl1} are satisfied at level $q=0$.
Then, in view of Theorem~\ref{Prop-Iterat}, there exists a sequence of solutions $\{u_{q},\mathring{R}_{q}\}_{q}$ to \eqref{equa-nsr} satisfying the inductive estimates \eqref{uh3}-\eqref{suppru} for all $qg_{(\tau)}eq 0$.
Note that, by \eqref{uh3} and \eqref{upth2},
\begin{align}\lambda_qbel{ine-uuqh1}
\norm{ u_{q+1} - u_q }_{H^1_{t,x}} & \leq \norm{\partial_t( u_{q+1} - u_q) }_{L^{\infty}_tH^2_{x}}+ \norm{ u_{q+1} - u_q }_{L^{\infty}_tH^3_{x}} \notag\\
& \leq \norm{\partial_t u_{q+1}}_{L^{\infty}_tH^2_{x}}+\norm{\partial_t u_{q}}_{L^{\infty}_tH^2_{x}}+\norm{u_{q+1}}_{L^{\infty}_tH^3_{x}}+ \norm{ u_{q}}_{L^{\infty}_tH^3_{x}}\notag\\
&\lesssim \lambda_qq^{8}+\lambda_q^8+\lambda_qq^5+\lambda_q^5\lesssim \lambda_qq^{8}.
\end{align}
Then, using the interpolation, \eqref{la}, \eqref{uh3},
\eqref{u-B-L2tx-conv} and \eqref{ine-uuqh1}
we infer that for any $\beta'\in (0,\frac{\beta}{8+\beta})$,
\begin{align}
\sigmaum_{q g_{(\tau)}eq 0} \norm{ u_{q+1} - u_q }_{H^{\beta'}_{t,x}}
\leq & \, \sigmaum_{q g_{(\tau)}eq 0} \norm{ u_{q+1} - u_q }_{L^2_{t,x}}^{1- \beta'}\norm{ u_{q+1} - u_q }_{H^1_{t,x}}^{\beta'}\notag\\
\lesssim &\, \sigmaum_{q g_{(\tau)}eq 0} (M^*)^{1-\beta'} {\rm d}elta_{q+1}^{\frac{1-\beta'}{2}}\lambda_qmbda_{q+1}^{8 \beta' } \notag\\
\lesssim &\, (M^*)^{1-\beta'} {\rm d}elta_{1}^{\frac{1-\beta'}{2}}\lambda_qmbda_{1}^{8 \beta' } +
\sigmaum_{ q g_{(\tau)}eq 1} (M^*)^{1-\beta'} \lambda_qmbda_{q+1}^{-\beta(1 - \beta') + 8\beta' } <{\infty}, \lambda_qbel{interpo}
\end{align}
where the last step is due to the inequality $-\beta(1 - \beta') + 8\beta' <0$.
Therefore, $\{u_q\}_{qg_{(\tau)}eq 0}$ is a Cauchy sequence in $H^{\beta'}_{t,x}$,
and thus there exists $u\in H^{\beta'}_{t,x}$ such that $\lim_{q\rightarrow\infty}(u_q)=u$ in $H^{\beta'}_{t,x}$.
Taking into account the fact that
$\lim_{q \to \infty} \mathring{R}_{q} = 0 $ in $L^1_{t,x}$
and $u_q(0)={\bf{w}}t u(0)$ for all $qg_{(\tau)}eq 0$,
we conclude that $u$ is a weak solution to \eqref{equa-NS}
with the initial datum ${\bf{w}}t u(0)$.
$(ii)$. Regarding the regularity of the weak solution $u$,
by virtue of \eqref{ne6.6} and \eqref{nne6.6}, we have
\begin{align}
\sigmaum_{q g_{(\tau)}eq 0}\norm{ u_{q+1} - u_q }_{L^g_{(\tau)}ammamma_tW^{s,p}_x} < {\infty}, \lambda_qbel{result-lw}
\end{align}
which yields that $\{u_q\}_{qg_{(\tau)}eq 0}$
is also a Cauchy sequence in $L^g_{(\tau)}ammamma_tW^{s,p}_x$.
Thus, using the uniqueness of weak limits
we obtain
$$u \in H^{\beta^\partialrime}_{t,x} \cap L^g_{(\tau)}ammamma_tW^{s,p}_x,$$
thereby proving the regularity statement $(ii)$.
$(iii).$ Regarding the Hausdorff measure of the singular set,
we set
\[
\mathcal{G} = \bigcup_{q g_{(\tau)}eq 0} I_q^c \sigmaetminus \{0,T\}, \ \
\mathcal{B}:=[0,T] \sigmaetminus \mathcal{G}.
\]
By construction, $u_q$ is a smooth solution to the hyperdissipative Navier-Stokes equation \eqref{equa-NS}
on $\mathcal{G}$,
and $u \equiv u_q $ on $I_{q}^c$ for each $q$.
Thus, $\mathcal{B}$ contains the singular set of time.
Since by \eqref{Iq1-C-def},
each $I_q$ is covered by at most $m_{q}=\theta_q^{-{\eta}}$ many balls of radius $5\theta_q$,
the Hausdorff dimension of the potential singular set $\mathcal{B}$ can be estimated by
\begin{align*}
d_{\mathcal{H}} ( \mathcal{B}) = d_{\mathcal{H}} (\bigcap_{qg_{(\tau)}eq 0}I_q )=d_{\mathcal{H}} (\limsup_q I_q )\leq \eta<\eta_* .
\end{align*}
$(iv).$ Concerning the small deviations of temporal supports,
we note that
\begin{align}
& \sigmaupp_t \mathring{R}_0
\sigmaubseteq K_0 := \sigmaupp_t u_0
= \sigmaupp_t {\bf{w}}t u.
\end{align}
Then, set
\begin{align}
K_q:= \sigmaupp_t u_q\cup \sigmaupp_t \mathring{R}_q, \ \ qg_{(\tau)}eq 1.
\end{align}
Using \eqref{suppru} we have
\begin{align}
K_{q+1} \sigmaubseteq N_{{\rm d}elta_{q+2}^\frac 12} K_{q}
\sigmaubseteq \cdots
\sigmaubseteq N_{\sigmaum\limits_{j=2}^{q+2}{\rm d}elta_{j}^\frac 12} K_{0}.
\end{align}
Thus, taking into account
$\sigmaum_{qg_{(\tau)}eq 0}{\rm d}elta_{q+2}^{1/2}\leq {\varepsilon}_*$
for $a$ large enough
we get
\begin{align}
&\sigmaupp_t u \sigmaubseteq \bigcup_{qg_{(\tau)}eq 0} K_q
\sigmaubseteq N_{{\varepsilon}_*} ( \sigmaupp_t \tilde{u}).
\end{align}
This verifies the temporal support statement $(iii)$.
$(v).$ Finally, for the small deviations on average,
we infer from \eqref{u-B-Lw-conv} that
\begin{align}
\norm{ u - \tilde{u} }_{L^1_tL^2_x}+\norm{ u - \tilde{u} }_{L^g_{(\tau)}ammamma_tW^{s,p}_x}
\leq &\,\sigmaum_{q g_{(\tau)}eq 0}(\norm{ u_{q+1} - u_q }_{L^1_tL^2_x}+ \norm{ u_{q+1} - u_q }_{L^g_{(\tau)}ammamma_tW^{s,p}_x})\notag \\
\leq &\,2\sigmaum_{qg_{(\tau)}eq 0} {\rm d}elta_{q+2} ^{\frac12} \leq 2\sigmaum_{qg_{(\tau)}eq 2} a^{-\beta b^q} \leq 2\sigmaum_{qg_{(\tau)}eq 2} a^{-\beta bq}
=\frac{2a^{-2\beta b}}{1-a^{-\beta b}}\leq {\varepsilon}_*, \lambda_qbel{e6.5}
\end{align}
where the last inequality holds
for $a$ large enough (depending on ${\varepsilon}_*$).
Therefore, the proof of Theorem~\ref{Thm-Non-hyper-NSE} is complete.
{B_q}fill $\sigmaquare$ \\
\partialaragraph{\bf Proof of Corollary \ref{Cor-Strong-Nonuniq}.}
Let ${\bf{w}}t u$ be a weak solution to \eqref{equa-NS} with
divergence-free ${\bf{w}}t u(0)={\bf{w}}t u_0\in L^2$.
If ${\bf{w}}t u$ is not a Leray-Hopf solution,
then due to Lions \cite{lions69},
there exists a unique smooth Leray-Hopf solution $u$ to \eqref{equa-NS} on $[0,T]$.
Hence, $u$ is different from ${\bf{w}}t u$.
If ${\bf{w}}t u$ is a Leray-Hopf solution to \eqref{equa-NS},
we may choose any smooth, divergence-free and mean-free vector field $v$ on $[0,T]$
and set
\begin{align*}
u_m^*:={\bf{w}}t u+\frac{m}{c_0}v,
\end{align*}
where $m \in \mathbb{N}_+$ and $c_0:= \|v\|_{L^1([T/2,T];L^2_x)} (>0)$.
Then, we glue ${\bf{w}}t u$ and $u^*_m$ together by
\begin{align}\lambda_qbel{def-wtu1}
{\bf{w}}t u_m:= \chi {\bf{w}}t u+(1-\chi)u^*_m,
\end{align}
where $\chi\in C^{\infty}([0,T])$ satisfies
\begin{align}\lambda_qbel{def-chi2}
\chi_{i}= \begin{cases}1 & \text { if } 0 \leq t \leq \frac{T}{4},
\\ 0 & \text { if } \frac{T}{2} \leq t\leq T.\end{cases}
\end{align}
Note that,
${\bf{w}}t u_m$ is a smooth, divergence-free and mean-free vector field on $[0,T]$,
such that
\begin{align*}
{\bf{w}}t u_m(0)={\bf{w}}t u_0, \ \
{\bf{w}}t u_m|_{[\frac T 2, T]} = u_m^*|_{[\frac T 2, T]}.
\end{align*}
Then, for any ${\varepsilon}_* \in (0,1/4)$,
Theorem~\ref{Thm-Non-hyper-NSE}
gives weak solutions $u_m\in H^{\beta'}_{t,x} \cap L^g_{(\tau)}ammamma_tW^{s,p}_x$
to \eqref{equa-NS} on $[0,T]$,
$mg_{(\tau)}eq 1$,
which are smooth outside a null set in time and
satisfy
\begin{align}\lambda_qbel{condition-u1}
u_m(0)={\bf{w}}t u_m(0)={\bf{w}}t u_0 \quad \text{and}\quad \|u_m-{\bf{w}}t u_m\|_{L^1([0,T];L^2_x)}\leq \varepsilon_*.
\end{align}
Hence, taking into account
\begin{align*}
\|u_m-{\bf{w}}t u\|_{L^1(T/2,T;L^2_x)}&g_{(\tau)}eq\|{\bf{w}}t u_m - {\bf{w}}t u\|_{L^1(T/2,T;L^2_x)}- \|{\bf{w}}t u_m -u_m\|_{L^1(T/2,T;L^2_x)}\\
&= \| u^*_m - {\bf{w}}t u\|_{L^1(T/2,T;L^2_x)}-\|{\bf{w}}t u_m -u_m\|_{L^1(T/2,T;L^2_x)}\\
&g_{(\tau)}eq m-{\varepsilon}_*>\frac12,
\end{align*}
which yields that $u_m\not = {\bf{w}}t u$ on $[0,T]$ for every $mg_{(\tau)}eq 1$.
Thus, we conlcude that for any weak solution ${\bf{w}}t u$ to \eqref{equa-NS},
there exists another different weak solution to \eqref{equa-NS}
in the space $L^g_{(\tau)}ammamma_tW^{s,p}_x$
with the same initial datum ${\bf{w}}t u(0)$,
where $(s,g_{(\tau)}ammamma, p) \in \mathcal{A}_1 \cup \mathcal{A}_2$.
Moreover, for any $m,m'g_{(\tau)}eq 1$, $m\neq m'$,
we have
\begin{align*}
\|u_m-u_{m'}\|_{L^1(T/2,T;L^2_x)}&g_{(\tau)}eq\|{\bf{w}}t u_m - {\bf{w}}t u_{m'}\|_{L^1(T/2,T;L^2_x)}- \|{\bf{w}}t u_m -u_m\|_{L^1(T/2,T;L^2_x)}
-\|{\bf{w}}t u_{m'} -u_{m'}\|_{L^1(T/2,T;L^2_x)}\\
&= \| u^*_m - u^*_{m'}\|_{L^1(T/2,T;L^2_x)}- \|{\bf{w}}t u_m -u_m\|_{L^1(T/2,T;L^2_x)}
-\|{\bf{w}}t u_{m'} -u_{m'}\|_{L^1(T/2,T;L^2_x)}\\
&g_{(\tau)}eq |m-m'|-2{\varepsilon}_*> \frac12,
\end{align*}
which yields that $u_m\not = u_{m'}$ on $[0,T]$.
Therefore, there exist infinitely many different weak solutions to \eqref{equa-NS}
with the same initial datum ${\bf{w}}t u(0)$.
We finish the proof of Corollary~\ref{Cor-Strong-Nonuniq}.
{B_q}fill $\sigmaquare$ \\
\partialaragraph{\bf Proof of Corollary \ref{Cor-Nonuniq-Supercri}.}
Taking $s=0$ and $g_{(\tau)}ammamma =\infty$ in the supercritical regime $\mathcal{A}_1$
and then applying Theorem \ref{Thm-Non-hyper-NSE} we obtain the non-uniqueness
in the supercritical spaces $L^p_x$
for any $1\leq p< 3/(2{\alpha}lpha-1)$.
Note that,
the initial data of non-unique solutions in Theorem \ref{Thm-Non-hyper-NSE}
are in $L^2_x$, and hence also in $L^p_x$,
due to the embeddings on torus:
$L^2_x{B_q}ookrightarrow L^{3/(2{\alpha}lpha-1)}_x {B_q}ookrightarrow L^p_x$
as ${\alpha}lpha \in [5/4,2)$.
Regarding $(ii)$, for any $s<s_p:=3/p+1-2{\alpha}lpha$,
we may find a small constant $\eta>0$ such that
$s<s_p-\eta$.
Then, using the embedding theorems (cf.\cite[p.164]{ST87}) we infer
\begin{align*}
F^{s_p-\eta}_{p,q}
{B_q}ookrightarrow B^{s_p-\eta}_{p,p{\varepsilon}e q}
{B_q}ookrightarrow B^{s}_{p,q}.
\end{align*}
Then, letting ${\bf{w}}t p := \frac{3}{2{\alpha}lpha-1+\eta}< \frac{3}{2{\alpha}lpha-1}$
(we may take $\eta$ even smaller such that ${\bf{w}}t p>1$
and using the embedding of Triebel-Lizorkin spaces
(cf. \cite[p.170]{ST87}) we get
\begin{align*}
L^{{\bf{w}}t p} = F^{0}_{{\bf{w}}t p,2}
{B_q}ookrightarrow F^{s_p-\eta}_{p,q}.
\end{align*}
Thus, we obtain
\begin{align*}
L^{{\bf{w}}t p} {B_q}ookrightarrow B^{s}_{p,q},
\end{align*}
which along with the statement $(i)$ yields
the existence of non-unique weak solutions in ${B}^{s}_{p,q}$.
Similarly, the last statement $(iii)$ follows from $(i)$
and the following embedding (cf. \cite[p.165]{ST87})
\begin{align*}
L^{{\bf{w}}t p}
{B_q}ookrightarrow F^{s_p-\eta}_{p,q}
{B_q}ookrightarrow F^{s}_{p,q}.
\end{align*}
The proof is therefore complete.
{B_q}fill $\sigmaquare$ \\
\partialaragraph{\bf Proof of Theorem~\ref{Thm-hyperNSE-Euler-limit}}
We choose two families of standard compactly support Friedrichs mollifiers $\left\{\partialhi_{\varepsilonrepsilon}\right\}_{\varepsilonrepsilon>0}$
and $\left\{\varepsilonrphi_{\varepsilonrepsilon}\right\}_{\varepsilonrepsilon>0}$ on ${\mathbb{T}}^{3}$ and ${\mathbb R}$, respectively.
Set
\begin{align} \lambda_qbel{un-u-Bn-B}
u_{n} :=\left(u *_{x} \partialhi_{\lambda_qmbda_{n}^{-1}}\right) *_{t} \varepsilonrphi_{\lambda_qmbda_{n}^{-1}},
\end{align}
for some $n>0$, restricted to $[0,T]$.
Since $u$ is a weak solution to the Euler equation \eqref{equa-Euler},
we infer that $u_n$ satisfies
\begin{equation}\lambda_qbel{mhd2}
\left\{{\alpha}ligned
&\partial_t u_n+\lambda_qmbda_{n}^{-2{\alpha}lpha}(-\Delta)^{{\alpha}lpha} u_n+ {\mathrm{div}}(u_n\otimes u_n)+{\nabla}bla P_n={\mathrm{div}} \mathring{R}_n , \\
&{\mathrm{div}} u_n=0 , \\
\endaligned
\right.
\end{equation}
where the Reynolds stress
\begin{align}
\mathring{R}_n
:=& u_{n} \mathring\otimes u_{n}
-((u \mathring\otimes u) *_{x} \partialhi_{\lambda_qmbda_{n}^{-1}}) *_{t} \varepsilonrphi_{\lambda_qmbda_{n}^{-1}} +\lambda_qmbda_{n}^{-2{\alpha}lpha}\mathcal{R} (-\Delta)^{{\alpha}lpha} u_{n}, \lambda_qbel{rnu}
\end{align}
and the pressure
\begin{align*}
P_n:= P*_x \partialhi_{\lambda_n^{-1}} *_t {\varphi}_{\lambda_n^{-1}}
-|u_n|^2
+ |u|^2 *_x \partialhi_{\lambda_n^{-1}} *_t {\varphi}_{\lambda_n^{-1}}.
\end{align*}
Let $ \nu:=\nu_{n}:=\lambda_qmbda_n^{-2{\alpha}lpha}$
and ${\bf{w}}idetilde{M}:= \|u \|_{H^{{\bf{w}}idetilde{\beta}}_{t,x}}$.
We claim that for $a$ sufficiently large,
$(u_n,\mathring{R}_n)$ satisfy the iterative estimates \eqref{uh3}-\eqref{rl1} at level $q=n(g_{(\tau)}eq 1)$.
To this end, let us first consider the most delicate estimate \eqref{rl1}.
Using the Minkowski inequality and the Slobodetskii-type norm of Sobolev spaces
we have
(see, e.g., \cite[(6.35)]{lzz21})
\begin{align} \lambda_qbel{u-un-lbbn}
\|u-u_{n}\|_{L^2_{t,x}}
\lesssim&\, \lambda_qmbda_{n}^{- {\bf{w}}idetilde{\beta}} \|u\|_{H^{{\bf{w}}idetilde{\beta}}_{t,x}}\lesssim \lambda_qmbda_{n}^{- {\bf{w}}idetilde{\beta}}{\bf{w}}idetilde{M}.
\end{align}
Moreover, we note that
\begin{align} \lambda_qbel{uu-uun-wtM}
& \|u_{n} \otimes u_{n}-((u \otimes u) *_{x} \partialhi_{\lambda_qmbda_{n}^{-1}}) *_{t} \varepsilonrphi_{\lambda_qmbda_{n}^{-1}} \|_{L^{1}_{t,x}} \notag \\
\lesssim& \|u-u_n\|_{L^2_{t,x}}^2
+ \|(|s|+|y|)^{4+2{\bf{w}}t \beta} \partialhi_{\lambda_qmbda_{n}^{-1}} \varepsilonrphi_{\lambda_qmbda_{n}^{-1}} \|_{L^{\infty}_{s,y}}
\left\|\frac{u(t,x)-u(t-s,x-y)}{(|s|+|y|)^{2+{\bf{w}}t \beta}} \right\|_{L^2_{t,x}L^2_{s,y}}^2 \notag \\
\lesssim& \lambda_n^{-2{\bf{w}}t \beta} \|u\|_{H^{{\bf{w}}t \beta}_{t,x}}^2
\lesssim \lambda_n^{-2{\bf{w}}t \beta} {\bf{w}}t M^2,
\end{align}
where the last step is due to \eqref{u-un-lbbn}.
Estimating as in \eqref{e5.18} we also get
\begin{align} \lambda_qbel{DeltaRu-L1-wtM}
\|\lambda_n^{-2{\alpha}lpha}\mathcal{R} (-\Delta)^{{\alpha}lpha} u_n\|_{L^1_{t,x}}
\lesssim \lambda_n^{-2{\alpha}lpha} \left( \|u_n\|_{L^1_{t}L^2_x}^{\frac{4-2{\alpha}lpha}{3}} \|u_n\|_{L^1_{t}H^3_x}^{\frac{2{\alpha}lpha-1}{3}} \right)
\lesssim \lambda_n^{-1} \|u\|_{L^2_{t,x}}
\lesssim \lambda_n^{-1}{\bf{w}}t M.
\end{align}
Thus, combing \eqref{rnu}, \eqref{uu-uun-wtM} and \eqref{DeltaRu-L1-wtM} altogether
we conclude that
\begin{align} \lambda_qbel{Ru-wtM-L1}
\|\mathring{R}_n\|_{L^{1}_{t,x}}
& \lesssim \|u_{n} \otimes u_{n}- ((u \otimes u) *_{x} \partialhi_{\lambda_qmbda_{n}^{-1}}) *_{t} \varepsilonrphi_{\lambda_qmbda_{n}^{-1}} \|_{L^{1}_{t,x}}
+ \|\lambda_qmbda_{n}^{-2{\alpha}lpha}\mathcal{R} (-\Delta)^{{\alpha}lpha} u_{n}\|_{L^1_{t,x}} \notag \\
& \lesssim \lambda_qmbda_{n}^{-1}{\bf{w}}idetilde{M}
+ \lambda_qmbda_{n}^{-2{\bf{w}}idetilde{\beta}} {\bf{w}}idetilde{M}^{2},
\end{align}
which verifies \eqref{rl1} at level $n$ by choosing $\beta$ and ${\varepsilon}_R$ sufficiently small,
such that ${\bf{w}}t \beta>{\varepsilon}_R/2 + \beta b$,
where ${\varepsilon}_R$ and $\beta$ are as in the proof of Theorem~\ref{Prop-Iterat}.
Regarding the inductive estimate \eqref{uh3},
by Sobolev's embedding $H^1_{t} {B_q}ookrightarrow L^{\infty}_{t}$
and Young's inequality,
\begin{align} \lambda_qbel{un-Bn-C1}
\left\|u_{n}\right\|_{L^{\infty}_tH^3_x} &\leq \sigmaum_{0\leq |N|\leq 3}\|{\nabla}bla^{N} u_n\|_{L^{\infty}_{t}L^2_x}\notag\\
&\lesssim \sigmaum_{0\leq |N|\leq 3}\|\|u*_t\varepsilonrphi_{\lambda_qmbda_n^{-1}}\|_{L^{\infty}_t}\|_{L^2_x}\|{\nabla}bla^{N} \partialhi_{\lambda_qmbda_n^{-1}}\|_{L^1_x} \notag\\
&\lesssim \sigmaum_{0\leq |N|\leq 3}\sigmaum_{0\leq M\leq 1}\|u\|_{L^2_{t,x}}\|\partial_t^{M}\varepsilonrphi_{\lambda_qmbda_n^{-1}}\|_{L^1_t}
\|{\nabla}bla^{N} \partialhi_{\lambda_qmbda_n^{-1}}\|_{L^1_x} \notag \\
&\lesssim \lambda_n^4 {\bf{w}}t M,
\end{align}
which verifies \eqref{uh3} at level $n$.
Moreover, by the Sobolev embedding $H^3_{t,x} {B_q}ookrightarrow L^{\infty}_{t,x}$,
\begin{align} \lambda_qbel{un-pth3}
\left\|\partial_t u_{n}\right\|_{L^{\infty}_tH^2_x}
\lesssim \sigmaum_{0\leq |N|\leq 2}\|\partial_t {\nabla}bla^{N}u_n\|_{L^{\infty}_{t,x}}
\lesssim \|u_n\|_{H^6_{t,x}}
\lesssim \lambda_n^6 {\bf{w}}t M,
\end{align}
which verifies \eqref{upth2} at level $n$.
Finally, for the estimate \eqref{rh3},
by the Sobolev embedding $W^{4,1}_{t,x}{B_q}ookrightarrow L^{\infty}_{t,x}$, we obtain
\begin{align}
\|\mathring{R}_n\|_{L^{\infty}_tH^3_x}\leq & \sigmaum_{0\leq|N|\leq 3}\|{\nabla}bla^{N}\mathring{R}_n\|_{L^{\infty}_{t,x}} \lesssim \|\mathring{R}_n\|_{W^{7,1}_{t,x}} \notag\\
\leq& \|u_{n} \otimes u_{n} - (u \otimes u) *_{x} \partialhi_{\lambda_qmbda_{n}^{-1}} *_{t} \varepsilonrphi_{\lambda_qmbda_{n}^{-1}} \|_{W^{7,1}_{t,x}}
+\lambda_qmbda_{n}^{-2{\alpha}lpha} \|\mathcal{R} (-\Delta)^{{\alpha}lpha} u_{n}\|_{W^{7,1}_{t,x}} \notag \\
\lesssim&\sigmaum\limits_{0\leq M_{1}+M_{2}+N_1+N_2\leq 7}
\|\partialartial_t^{M_{1}}{\nabla}^{N_1}u_n\|_{L^2_{t,x}} \|\partialartial_t^{M_{2}}{\nabla}^{N_2}u_n\|_{L^2_{t,x}} \notag \\
& \quad + \sigmaum\limits_{0\leq M+ N\leq 7} \|u\|_{L^2_{t,x}}^2
\|{\nabla}^{N} \partialhi_{\lambda_qmbda_{n}^{-1}} \|_{L^1_x} \|\partialartial_t^{M} \varepsilonrphi_{\lambda_qmbda_{n}^{-1}} \|_{L^1_t} + \lambda_n^{-2{\alpha}lpha} \||{\nabla}|^{2{\alpha}lpha-1} u_n\|_{H^7_{t,x}} \notag \\
\lesssim& \lambda_n^7 \|u\|_{L^2_{t,x}}^2+\lambda_n^{-2{\alpha}lpha} \|u_n\|_{L^2_{t,x}}^{\frac{2-{\alpha}lpha}{5}}\|u_n\|_{H^{10}_{t,x}}^{\frac{3+{\alpha}lpha}{5}} \notag \\
\lesssim& \lambda_n^7 ({\bf{w}}t M+ {\bf{w}}t M^2 ).
\end{align}
Therefore, taking $a$ sufficiently large, we verify the inductive estimate \eqref{rh3}.
Thus, we can apply Theorem~\ref{Prop-Iterat} to
the approximate equation \eqref{mhd2} and
then let $q\rightarrow\infty$ to obtain a weak solution $u^{(\nu_{n})} \in H^{\beta'}_{t,x}$ to \eqref{equa-NS}
for some $\beta'\in (0,\beta/(8+\beta))$.
Furthermore,
estimating as in \eqref{interpo},
using \eqref{u-un-lbbn} and taking $\beta'$ sufficiently small such that
$0<\beta'<\min\{{\bf{w}}t \beta, \beta/(8+\beta)\}$
we deduce that for any $ng_{(\tau)}eq 1$,
\begin{align*}
\|u^{(\nu_{n})}-u\|_{H^{\beta^{\partialrime}}_{t,x}}
&\leq\|u^{(\nu_{n})}-u_{n}\|_{H^{\beta^{\partialrime}}_{t,x}}
+\|u-u_{n}\|_{H^{\beta^{\partialrime}}_{t,x}} \notag\\
&\leq C \left(\sigmaum_{q=n}^{\infty} \lambda_qmbda_{q+1}^{-\beta(1-\beta^{\partialrime})} \lambda_qmbda_{q+1}^{8 \beta^{\partialrime}}+ \|u-u_n\|_{L^2_{t,x}}^{1-\frac{\beta'}{{\bf{w}}t \beta}} \|u-u_n\|_{H^{{\bf{w}}t \beta}_{t,x}}^{\frac{\beta'}{{\bf{w}}t \beta}}\right)\notag\\
&\leq C \left(\sigmaum_{q=n}^{\infty} \lambda_qmbda_{q+1}^{-\beta(1-\beta^{\partialrime})} \lambda_qmbda_{q+1}^{8 \beta^{\partialrime}}+ \lambda_n^{-({\bf{w}}t \beta- \beta')} {\bf{w}}t M\right)
\leq \frac{1}{n},
\end{align*}
where the last step is valid for $a$ sufficiently large.
This verifies the strong convergence \eqref{convergence} in $H^{\beta^{\partialrime}}_{t,x}$.
Therefore, the proof of Theorem \ref{Thm-hyperNSE-Euler-limit} is complete.
{B_q}fill $\sigmaquare$
\sigmaection{Appendix} \lambda_qbel{Set-App}
In this section,
we collect some preliminary results used in the previous sections
and the well-posedness result in the critical space $L^{3/(2{\alpha}lpha-1)}_x$.
\begin{lemma} ({\bf Geometric Lemma}, \cite[Lemma 4.1]{bcv21})
\lambda_qbel{geometric lem 2}
There exists a set $\Lambda \sigmaubset \mathbb{S}^2 \cap \mathbb{Q}^3$ that consists of vectors $k$
with associated orthonormal bases $(k, k_1, k_2)$, $\varepsilonrepsilon_u> 0$,
and smooth positive functions $g_{(\tau)}ammamma_{(k)}: B_{\varepsilonrepsilon_u}(\Id) \to \mathbb{R}$,
where $B_{\varepsilonrepsilon_u}(\Id)$ is the ball of radius $\varepsilonrepsilon_u$ centered at the identity
in the space of $3 \times 3$ symmetric matrices,
such that for $S \in B_{\varepsilonrepsilon_u}(\Id)$ we have the following identity:
\begin{equation}
\lambda_qbel{sym}
S = \sigmaum_{k \in \Lambda} g_{(\tau)}ammamma_{(k)}^2(S) k_1 \otimes k_1.
\end{equation}
\end{lemma}
As pointed out in \cite{bcv21},
there exists $N_{\Lambda} \in \mathbb{N}$ such that
\begin{equation} \lambda_qbel{NLambda}
\{ N_{\Lambda} k,N_{\Lambda}k_1 , N_{\Lambda}k_2 \} \sigmaubseteq N_{\Lambda} \mathbb{S}^2 \cap \mathbb{Z}^3.
\end{equation}
We denote by $M_*$ the geometric constant such that
\begin{align}
\sigmaum_{k \in \Lambda} \norm{g_{(\tau)}ammamma_{(k)}}_{C^4(B_{\varepsilonrepsilon_u}(\Id))} \leq M_*.
\lambda_qbel{M bound}
\end{align}
This parameter is universal and will be used later in the estimates of the size of perturbations.
Then, we recall the $L^p$ decorrelation lemma introduced by \cite[Lemma 2.4]{cl21} (see also \cite[Lemma 3.7]{bv19b}), which is the key lemma to obtain the $L_{t,x}^2$ estimates of the perturbations.
\begin{lemma}[\cite{cl21}, Lemma 2.4] \lambda_qbel{Decorrelation1}
Let $\sigmaigma\in \mathbb{N}$ and $f,g:\mathbb{T}^d\rightarrow {\mathbb R}$ be smooth functions. Then for every $p\in[1,\infty]$,
\begin{equation}\lambda_qbel{lpdecor}
\big|\|fg(\sigmaigma\cdot)\|_{L^p({\mathbb{T}}^d)}-\|f\|_{L^p({\mathbb{T}}^d)}\|g\|_{L^p({\mathbb{T}}^d)} \big|\lesssim \sigmaigma^{-\frac{1}{p}}\|f\|_{C^1({\mathbb{T}}^d)}\|g\|_{L^p({\mathbb{T}}^d)}.
\end{equation}
\end{lemma}
The following stationary phase lemma is a main tool to handle the errors of Reynolds stress.
\begin{lemma}[\cite{lt20}, Lemma 6; see also \cite{bv19b}, Lemma B.1] \lambda_qbel{commutator estimate1}
Let $a \in C^{2}\left(\mathbb{T}^{3}\right)$. For all $1<p<\infty$ we have
$$
\left\||{\nabla}bla|^{-1} \mathbb{P}_{\neq 0}\left(a \mathbb{P}_{g_{(\tau)}eq k} f\right)\right\|_{L^{p}\left(\mathbb{T}^{3}\right)}
\lesssim k^{-1}\left\|{\nabla}bla^{2} a\right\|_{L^{\infty}\left(\mathbb{T}^{3}\right)}\|f\|_{L^{p}\left(\mathbb{T}^{3}\right)}
$$
for any smooth function $f \in L^{p}\left(\mathbb{T}^{3}\right)$.
\end{lemma}
We close this section
with the well-posedness result for equation \eqref{equa-NS}
in the critical space $C_tL^{\frac{3}{2{\alpha}-1}}_x$.
The proof follows closely from the strategy
by Kato \cite{K84} and Cannone \cite{can97}.
\begin{theorem} (Well-posedness in critical space $L^{\frac{3}{2{\alpha}lpha-1}}_x$) \lambda_qbel{Thm-GWP-HNSE-Lp}
Let ${\alpha}\in (1,2)$.
Then, there exists ${\rm d}elta>0$
such that for any $u_0\in L^{\frac{3}{2{\alpha}lpha-1}}_x$,
$\|u_0\|_{ L^{\frac{3}{2{\alpha}lpha-1}}_x} \leq {\rm d}elta$,
there exists a unique global solution $u$
to \eqref{equa-NS} satisfying
$u(0)=u_0$,
\begin{align*}
&u \in C ([0, T]; L^{\frac{3}{2{\alpha}-1}} ({\mathbb{T}}^{3} ) ),\ \
t^{\frac{2{\alpha}-1-3/p}{2{\alpha}}} u \in C\left([0, T] ; L^{p}\left({\mathbb{T}}^{3}\right)\right),
\end{align*}
where $\frac{3}{2{\alpha}-1} < p \leq \frac{6}{2{\alpha}-1}$,
and in addition
\begin{align*}
\lim _{t \rightarrow 0} t^{\frac{2{\alpha}-1-3/p}{2{\alpha}}}\|u(t)\|_{L^p_x}=0.
\end{align*}
\end{theorem}
\begin{proof}
Let us formulate equation \eqref{equa-NS} in the mild form
\begin{align}\lambda_qbel{equa-mild-HNS}
u(t)=e^{-t(-\Delta)^{\alpha}}u_0-\int_{0}^{t} e^{-(t-s)(-\Delta)^{\alpha}} \mathbb{P}_H {\mathrm{div}}(u \otimes u)(s){\rm d} s.
\end{align}
We choose the Banach space $X$
which consist of functions $v$ satisfying
\begin{align}
&v\in C ([0, T] ; L^{\frac{3}{2{\alpha}-1}} ({\mathbb{T}}^{3} ) ),\lambda_qbel{est-ul3.8}\\
&t^{\frac{2{\alpha}-1-3/p}{2{\alpha}}} v \in C\left([0,T] ; L^{p}\left({\mathbb{T}}^{3}\right)\right),\lambda_qbel{est-ulq.8}\\
&\lim _{t \rightarrow 0} t^{\frac{2{\alpha}-1-3/p}{2{\alpha}}}\|v(t)\|_{L^p_x}=0, \lambda_qbel{est-ulimit.8}
\end{align}
and are equipped with the norm
\begin{align*}
\|u\|_X:= \|u\|_{C_tL^{\frac{3}{2{\alpha}-1}}_x}
+\sigmaup_{t>0}t^{\frac{2{\alpha}-1-3/p}{2p}}\|u(t)\|_{L^{p}_x}.
\end{align*}
In order to prove Theorem \ref{Thm-GWP-HNSE-Lp},
by virtue of Lemma 1.5 of \cite{can97},
it suffices to prove that
\begin{enumerate}
\item [$(i)$] If $u_0\in X$, then $e^{-t(\Delta)^{\alpha}lpha} u_0 \in X$.
\item [$(ii)$] The bilinear operator defined by
\begin{align*}
B(u,v)(t) :=-\int_{0}^{t} e^{-(t-s)(-\Delta)^{\alpha}}\mathbb{P}_H {\mathrm{div}}( u \otimes v)(s) {\rm d} s
\end{align*}
is bicontinuous in $X\times X \to X$.
\end{enumerate}
The important ingredients of proof are the following
standard estimates for the
semigroup $\{e^{-t(-\Delta)^{\alpha}lpha}\}$,
that is,
for any $\rho, \in [1, \infty)$ and $1\leq \rho_2\leq \rho_1 <{\infty}$,
\begin{align}
& \|e^{-t(-\Delta)^{\alpha}lpha} v\|_{L^\rho_x} \leq C \|v\|_{L^\rho_x}, \lambda_qbel{semigroup-bdd} \\
& \|e^{-t(-\Delta)^{\alpha}lpha} v\|_{L^{\rho_1}_x} \leq C
t^{-\frac{3}{2{\alpha}lpha}(\frac{1}{\rho_2}-\frac{1}{\rho_1})} \|v\|_{L^{\rho_2}_x}, \lambda_qbel{semigroup-rho1-rho2} \\
& \|{\nabla} e^{-t(-\Delta)^{\alpha}lpha} v\|_{L^{\rho_1}_x} \leq C
t^{-\frac{1}{2{\alpha}lpha}-\frac{3}{2{\alpha}lpha}(\frac{1}{\rho_2}-\frac{1}{\rho_1})} \|v\|_{L^{\rho_2}_x}. \lambda_qbel{semigroup-nabla}
\end{align}
The property $(i)$ follows immediately
from estimates \eqref{semigroup-bdd} and \eqref{semigroup-rho1-rho2}
with $\rho_1 = p$ and $\rho=\rho_2=3/(2{\alpha}lpha-1)$.
Regarding the second property $(ii)$,
we use estimate \eqref{semigroup-nabla} with
$\rho_1=3/(2{\alpha}lpha-1)$ and $\rho_2=p/2$
to get
\begin{align}\lambda_qbel{verify-ul3.8}
\| B(v_1,v_2)(t)\|_{L^{\frac{3}{2{\alpha}-1}}_x}
& \leq C\int_{0}^{t} (t-s)^{-\frac{1-{\alpha}+3/p}{{\alpha}}} \|v_1(s)\|_{L^p_x}\|v_2(s)\|_{L^p_x}{\rm d} s \notag\\
&\leq C \int_{0}^{t} (t-s)^{-\frac{1-{\alpha}+3/p}{{\alpha}}} s^{-\frac{2{\alpha}-1-3/p}{{\alpha}}}{\rm d} s
\sigmaup_{0\leq s\leq t} (s^{\frac{2{\alpha}-1-3/p}{2{\alpha}}}\|v_1(s)\|_{L^p_x} )
\sigmaup_{0\leq s\leq t} (s^{\frac{2{\alpha}-1-3/p}{2{\alpha}}}\|v_2(s)\|_{L^p_x}) \notag\\
&\leq C \|v_1\|_{X} \|v_2\|_{X}.
\end{align}
Similarly, applying \eqref{semigroup-nabla} with $\rho_1=p$
and $\rho_2=p/2$ we get
\begin{align}\lambda_qbel{verify-ulq.8}
\| B(v_1,v_2)(t)\|_{L^{p}_x}
& \leq C\int_{0}^{t} (t-s)^{-\frac{1+3/p}{2{\alpha}}} \|v_1(s)\|_{L^p_x}\|v_2(s)\|_{L^p_x}{\rm d} s \notag\\
&\leq C \int_{0}^{t} (t-s)^{-\frac{1+3/p}{2{\alpha}}} s^{-\frac{2{\alpha}-1-3/p}{{\alpha}}}{\rm d} s
\sigmaup_{0\leq s\leq t} (s^{\frac{2{\alpha}-1-3/p}{2{\alpha}}}\|v_1(s)\|_{L^p_x} )
\sigmaup_{0\leq s\leq t} (s^{\frac{2{\alpha}-1-3/p}{2{\alpha}}}\|v_2(s)\|_{L^p_x}) \notag\\
&\leq Ct^{-\frac{2{\alpha}-1-3/p}{2{\alpha}}} \|v_1\|_{X} \|v_2\|_{X}.
\end{align}
Hence, estimates \eqref{verify-ul3.8} and \eqref{verify-ulq.8}
together yield that
\begin{align}
\| B(v_1,v_2)\|_{X} \leq C \|v_1\|_{X} \|v_2\|_{X}.
\end{align}
Moreover,
arguing as in the \eqref{verify-ulq.8}
we also infer from that,
for any $v_1,v_2\in X$,
\begin{align*}
t^{\frac{2{\alpha}-1-3/p}{2{\alpha}}}
\| B(v_1, v_2)(t)\|_{L^{p}_x}
\leq C
\sigmaup\limits_{0\leq s\leq t}(s^{\frac{2{\alpha}-1-3/p}{2{\alpha}}}\| v_1(s)\|_{L^{p}_x})
\sigmaup\limits_{0\leq s\leq t}(s^{\frac{2{\alpha}-1-3/p}{2{\alpha}}}\| v_2(s)\|_{L^{p}_x})
\to 0, \ as\ \ t\to 0.
\end{align*}
Thus, the second property $(ii)$ is verified.
The proof is complete.
\end{proof}
\noindent{\bf Acknowledgment.}
Yachun Li thanks the support by NSFC (No. 11831011, 12161141004).
Peng Qu thanks the supports by NSFC (No. 12122104, 11831011)
and Shanghai Science and Technology Programs 21ZR1406000, 21JC1400600, 19JC1420101.
Deng Zhang thanks the supports by NSFC (No. 11871337, 12161141004)
and Shanghai Rising-Star Program 21QA1404500.
Yachun Li and Deng Zhang are also grateful for the supports by
Institute of Modern Analysis--A Shanghai Frontier Research Center.
\end{document}
|
\begin{document}
\title{ The Cobordism Hypothesis in Dimension $1$}
\begin{abstract}
In~\cite{lur1} Lurie published an expository article outlining a proof for a higher version of the cobordism hypothesis conjectured by Baez and Dolan in~\cite{bd}. In this note we give a proof for the 1-dimensional case of this conjecture. The proof follows most of the outline given in~\cite{lur1}, but differs in a few crucial details. In particular, the proof makes use of the theory of quasi-unital $\infty$-categories as developed by the author in~\cite{har}.
\end{abstract}
\tableofcontents
\section{ Introduction }
Let $\mathcal{B}^{\ori}_1$ denote the $1$-dimensional oriented cobordism $\infty$-category, i.e. the symmetric monoidal $\infty$-category whose objects are oriented $0$-dimensional closed manifolds and whose morphisms are oriented $1$-dimensional cobordisms between them.
Let $\mathcal{D}$ be a symmetric monoidal $\infty$-category with duals. The $1$-dimensional cobordism hypothesis concerns the $\infty$-category
$$ \mathcal{F}un^{\otimes}(\mathcal{B}^{\ori}_1,\mathcal{D}) $$
of symmetric monoidal functors ${\varphi}: \mathcal{B}^{\ori}_1 \longrightarrow \mathcal{D}$. If $X_+ \in \mathcal{B}^{\ori}_1$ is the object corresponding to a point with positive orientation then the evaluation map $Z \mapsto Z(X_+)$ induces a functor
$$ \mathcal{F}un^{\otimes}(\mathcal{B}^{\ori}_1,\mathcal{D}) \longrightarrow \mathcal{D} $$
It is not hard to show that since $\mathcal{B}^{\ori}_1$ has duals the $\infty$-category $\mathcal{F}un^{\otimes}(\mathcal{B}^{\ori}_1,\mathcal{D})$ is in fact an $\infty$-groupoid, i.e. every natural transformation between two functors $F,G: \mathcal{B}^{\ori}_1 \longrightarrow \mathcal{D}$ is a natural equivalence. This means that the evaluation map $Z \mapsto Z(X_+)$ actually factors through a map
$$ \mathcal{F}un^{\otimes}(\mathcal{B}^{\ori}_1,\mathcal{D}) \longrightarrow \wtl{\mathcal{D}} $$
where $\wtl{\mathcal{D}}$ is the maximal $\infty$-groupoid of $\mathcal{D}$. The cobordism hypothesis then states
\begin{thm}\label{cobordism-hypothesis}
The evaluation map
$$ \mathcal{F}un^{\otimes}(\mathcal{B}^{\ori}_1,\mathcal{D}) \longrightarrow \wtl{\mathcal{D}} $$
is an equivalence of $\infty$-categories.
\end{thm}
\begin{rem}
From the consideration above we see that we could have written the cobordism hypothesis as an equivalence
$$ \wtl{\mathcal{F}un}^{\otimes}(\mathcal{B}^{\ori}_1,\mathcal{D}) \stackrel{\simeq}{\longrightarrow} \wtl{\mathcal{D}} $$
where $\wtl{\mathcal{F}un}^{\otimes}(\mathcal{B}^{\ori}_1,\mathcal{D})$ is the maximal $\infty$-groupoid of $\mathcal{F}un^{\otimes}(\mathcal{B}^{\ori}_1,\mathcal{D})$ (which in this case happens to coincide with $\mathcal{F}un^{\otimes}(\mathcal{B}^{\ori}_1,\mathcal{D})$). This $\infty$-groupoid is the fundamental groupoid of the space of maps from $\mathcal{B}^{\ori}_1$ to $\mathcal{D}$ in the $\infty$-category $\mathcal{C}at^{\otimes}$ of symmetric monoidal $\infty$-categories.
\end{rem}
In his paper~\cite{lur1} Lurie gives an elaborate sketch of proof for a higher dimensional generalization of the $1$-dimensional cobordism hypothesis. For this one needs to generalize the notion of $\infty$-categories to $(\infty,n)$-categories. The strategy of proof described in~\cite{lur1} is inductive in nature. In particular in order to understand the $n=1$ case, one should start by considering the $n=0$ case.
Let $\mathcal{B}^{\un}_0$ be the $0$-dimensional unoriented cobordism category, i.e. the objects of $\mathcal{B}^{\un}_0$ are $0$-dimensional closed manifolds (or equivalently, finite sets) and the morphisms are diffeomorphisms (or equivalently, isomorphisms of finite sets). Note that $\mathcal{B}^{\un}_0$ is a (discrete) $\infty$-groupoid.
Let $X \in \mathcal{B}^{\un}_0$ be the object corresponding to one point. Then the $0$-dimensional cobordism hypothesis states that $\mathcal{B}^{\un}_0$ is in fact the free $\infty$-groupoid (or $(\infty,0)$-category) on one object, i.e. if $\mathcal{G}$ is any other $\infty$-groupoid then the evaluation map $Z \mapsto Z(X)$ induces an equivalence of $\infty$-groupoids
$$ \mathcal{F}un^{\otimes}(\mathcal{B}^{\un}_0,\mathcal{G}) \stackrel{\simeq}{\longrightarrow} \mathcal{G} $$
\begin{rem}
At this point one can wonder what is the justification for considering non-oriented manifolds in the $n=0$ case oriented ones in the $n=1$ case. As is explained in~\cite{lur1} the desired notion when working in the $n$-dimensional cobordism $(\infty,n)$-category is that of \textbf{$n$-framed} manifolds. One then observes that $0$-framed $0$-manifolds are unoriented manifolds, while taking $1$-framed $1$-manifolds (and $1$-framed $0$-manifolds) is equivalent to taking the respective manifolds with orientation.
\end{rem}
Now the $0$-dimensional cobordism hypothesis is not hard to verify. In fact, it holds in a slightly more general context - we do not have to assume that $\mathcal{G}$ is an $\infty$-groupoid. In fact, if $\mathcal{G}$ is \textbf{any symmetric monoidal $\infty$-category} then the evaluation map induces an equivalence of $\infty$-categories
$$ \mathcal{F}un^{\otimes}(\mathcal{B}^{\un}_0,\mathcal{G}) \stackrel{\simeq}{\longrightarrow} \mathcal{G} $$
and hence also an equivalence of $\infty$-groupoids
$$ \wtl{\mathcal{F}un}^{\otimes}(\mathcal{B}^{\un}_0,\mathcal{G}) \stackrel{\simeq}{\longrightarrow} \wtl{\mathcal{G}} $$
Now consider the under-category $\mathcal{C}at^{\otimes}_{\mathcal{B}^{\un}_0/}$ of symmetric monoidal $\infty$-categories $\mathcal{D}$ equipped with a functor $\mathcal{B}^{\un}_0 \longrightarrow \mathcal{D}$. Since $\mathcal{B}^{\un}_0$ is free on one generator this category can be identified with the $\infty$-category of \textbf{pointed} symmetric monoidal $\infty$-categories, i.e. symmetric monoidal $\infty$-categories with a chosen object. We will often not distinguish between these two notions.
Now the point of positive orientation $X_+ \in \mathcal{B}^{\ori}_1$ determines a functor $\mathcal{B}^{\un}_0 \longrightarrow \mathcal{B}^{\ori}_1$, i.e. an object in $\mathcal{C}at^{\otimes}_{\mathcal{B}^{\un}_0/}$, which we shall denote by $\mathcal{B}^+_1$. The $1$-dimensional coborodism hypothesis is then equivalent to the following statement:
\begin{thm}\label{0-to-1}[Cobordism Hypothesis $0$-to-$1$]
Let $\mathcal{D} \in \mathcal{C}at^{\otimes}_{\mathcal{B}^{\un}_0 /}$ be a pointed symmetric monoidal $\infty$-category with duals. Then the $\infty$-groupoid
$$ \wtl{\mathcal{F}un}^{\otimes}_{\mathcal{B}^{\un}_0 /}(\mathcal{B}^+_1,\mathcal{D}) $$
is \textbf{contractible}.
\end{thm}
Theorem~\ref{0-to-1} can be considered as the inductive step from the $0$-dimensional cobordism hypothesis to the $1$-dimensional one. Now the strategy outlines in~\cite{lur1} proceeds to bridge the gap between $\mathcal{B}^{\un}_0$ to $\mathcal{B}^{\ori}_1$ by considering an intermediate $\infty$-category
$$ \mathcal{B}^{\un}_0 \hookrightarrow \mathcal{B}^{\ev}_1 \hookrightarrow \mathcal{B}^{\ori}_1 $$
This intermediate $\infty$-category is defined in~\cite{lur1} in terms of framed functions and index restriction. However in the $1$-dimensional case one can describe it without going into the theory of framed functors. In particular we will use the following definition:
\begin{define}
Let $\iota: \mathcal{B}^{\ev}_1 \hookrightarrow \mathcal{B}^{\ori}_1$ be the subcategory containing all objects and only the cobordisms $M$ in which every connected component $M_0 \subseteq M$ is either an identity segment or an evaluation segment.
\end{define}
Let us now describe how to bridge the gap between $\mathcal{B}^{\un}_0$ and $\mathcal{B}^{\ev}_1$. Let $\mathcal{D}$ be an $\infty$-category with duals and let
$$ {\varphi}:\mathcal{B}^{\ev}_1 \longrightarrow \mathcal{D} $$
be a symmetric monoidal functor. We will say that ${\varphi}$ is \textbf{non-degenerate} if for each $X \in \mathcal{B}^{\ev}_1$ the map
$$ {\varphi}(\ev_X): {\varphi}(X) \otimes {\varphi}(\check{X}) \simeq {\varphi}(X \otimes \check{X}) \longrightarrow {\varphi}(1) \simeq 1 $$
is \textbf{non-degenerate}, i.e. identifies ${\varphi}(\check{X})$ with a dual of ${\varphi}(X)$. We will denote by
$$ \mathcal{C}at^{\nd}_{\mathcal{B}^{\ev}_1 /} \subseteq \mathcal{C}at^{\otimes}_{\mathcal{B}^{\ev}_1 /} $$
the full subcategory spanned by objects ${\varphi}: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{D}$ such that $\mathcal{D}$ has duals and ${\varphi}$ is non-degenerate.
Let $X_+ \in \mathcal{B}^{\ev}_1$ be the point with positive orientation. Then $X_+$ determines a functor
$$ \mathcal{B}^{\un}_0 \longrightarrow \mathcal{B}^{\ev}_1 $$
The restriction map ${\varphi} \mapsto {\varphi}|_{\mathcal{B}^{\un}_0}$ then induces a functor
$$ \mathcal{C}at^{\nd}_{\mathcal{B}^{\ev}_1 /} \longrightarrow \mathcal{C}at^{\otimes}_{\mathcal{B}^{\un}_0 /} $$
Now the gap between $\mathcal{B}^{\ev}_1$ and $\mathcal{B}^{\un}_0$ can be climbed using the following lemma (see~\cite{lur1}):
\begin{lem}\label{0-to-1-ev}
The functor
$$ \mathcal{C}at^{\nd}_{\mathcal{B}^{\ev}_1 /} \longrightarrow \mathcal{C}at^{\otimes}_{\mathcal{B}^{\un}_0 /} $$
is fully faithful.
\end{lem}
\begin{proof}
First note that if $F:\mathcal{D} \longrightarrow \mathcal{D}'$ is a symmetric monoidal functor where $\mathcal{D},\mathcal{D}'$ have duals and ${\varphi}: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{D}$ is non-degenerate then $f \circ {\varphi}$ will be non-degenerate as well. Hence it will be enough to show that if $\mathcal{D}$ has duals then the restriction map induces an equivalence between the $\infty$-groupoid of non-degenerate symmetric monoidal functors
$$ \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{D} $$
and the $\infty$-groupoid of symmetric monoidal functors
$$ \mathcal{B}^{\un}_0 \longrightarrow \mathcal{D} $$
Now specifying a non-degenerate functor
$$ \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{D} $$
is equivalent to specifying a pair of objects $D_+,D_- \in \mathcal{D}$ (the images of $X_+,X_-$ respectively) and a non-degenerate morphism
$$ e: D_+ \otimes D_- \longrightarrow 1 $$
which is the image of $\ev_{X_+}$. Since $\mathcal{D}$ has duals the $\infty$-groupoid of triples $(D_+,D_-,e)$ in which $e$ is non-degenerate is equivalent to the $\infty$-groupoid of triples $(D_+,\check{D}_-,f)$ where $f: D_+ \longrightarrow \check{D}_-$ is an equivalence. Hence the forgetful map $(D_+,D_-,e) \mapsto D_+$ is an equivalence.
\end{proof}
Now consider the natural inclusion $\iota: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{B}^{\ori}_1$ as an object in $\mathcal{C}at^{\nd}_{\mathcal{B}^{\ev}_1 /}$. Then by Lemma~\ref{0-to-1-ev} we see that the $1$-dimensional cobordism hypothesis will be established once we make the following last step:
\begin{thm}[Cobordism Hypothesis - Last Step]\label{cobordism-last-step}
Let $\mathcal{D}$ be a symmetric monoidal $\infty$-category with duals and let ${\varphi}: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{D}$ be a \textbf{non-degenerate} functor. Then the $\infty$-groupoid
$$ \wtl{\mathcal{F}un}^{\otimes}_{\mathcal{B}^{\ev}_1 /}(\mathcal{B}^{\ori}_1,\mathcal{D}) $$
is contractible.
\end{thm}
Note that since $\mathcal{B}^{\ev}_1 \longrightarrow \mathcal{B}^{\ori}_1$ is essentially surjective all the functors in
$$ \wtl{\mathcal{F}un}^{\otimes}_{\mathcal{B}^{\ev}_1 /}(\mathcal{B}^{\ori}_1,\mathcal{D}) $$
will have the same essential image of ${\varphi}$. Hence it will be enough to prove for the claim for the case where ${\varphi}: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{D}$ is \textbf{essentially surjective}. We will denote by
$$ \mathcal{C}at^{\sur}_{\mathcal{B}^{\ev}_1 /} \subseteq \mathcal{C}at^{\nd}_{\mathcal{B}^{\ev}_1 /} $$
the full subcategory spanned by essentially surjective functors ${\varphi}: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{D}$. Hence we can phrase Theorem~\ref{cobordism-last-step} as follows:
\begin{thm}[Cobordism Hypothesis - Last Step 2]\label{cobordism-last-step-2}
Let $\mathcal{D}$ be a symmetric monoidal $\infty$-category with duals and let ${\varphi}: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{D}$ be an \textbf{essentially surjective non-degenerate} functor. Then the space of maps
$$ \mathcal{M}ap_{\mathcal{C}at^{\sur}_{\mathcal{B}^{\ev}_1 /}}(\iota,{\varphi}) $$
is contractible.
\end{thm}
The purpose of this paper is to provide a formal proof for this last step. This paper is constructed as follows. In \S~\ref{s-qu-cobordism} we prove a variant of Theorem~\ref{cobordism-last-step-2} which we call the quasi-unital cobordism hypothesis (Theorem~\ref{qu-cobordism}). Then in \S~\ref{s-from-qu-to-regular} we explain how to deduce Theorem~\ref{cobordism-last-step-2} from Theorem~\ref{qu-cobordism}. Section \S~\ref{s-from-qu-to-regular} relies on the notion of \textbf{quasi-unital $\infty$-categories} which is developed rigourously in~\cite{har} (however \S~\ref{s-qu-cobordism} is completely independent of~\cite{har}).
\section{ The Quasi-Unital Cobordism Hypothesis }\label{s-qu-cobordism}
Let ${\varphi}: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{D}$ be a non-degenerate functor and let $\mathcal{G}rp_\infty$ denote the $\infty$-category of $\infty$-groupoids. We can define a lax symmetric functor $M_{\varphi}: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{G}rp_{\infty}$ by setting
$$ M_{\varphi}(X) = \mathcal{M}ap_{\mathcal{D}}(1,{\varphi}(X)) $$
We will refer to $M_{\varphi}$ as the \textbf{fiber functor} of ${\varphi}$. Now if $\mathcal{D}$ has duals and ${\varphi}$ is non-degenerate, then one can expect this to be reflected in $M_{\varphi}$ somehow. More precisely, we have the following notion:
\begin{define}
Let $M: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{G}rp_{\infty}$ be a lax symmetric monoidal functor. An object $Z \in M(X \otimes \check{X})$ is called \textbf{non-degenerate} if for each object $Y \in \mathcal{B}^{\ev}_1$ the natural map
$$ M(Y \otimes \check{X}) \stackrel{Id \times Z}{\longrightarrow} M(Y \otimes \check{X}) \times M(X \otimes \check{X}) \longrightarrow M(Y \otimes \check{X} \otimes X \otimes \check{X}) \stackrel{M(Id \otimes \ev \otimes Id)}{\longrightarrow} M(Y \otimes \check{X}) $$
is an equivalence of $\infty$-groupoids.
\end{define}
\begin{rem}\label{uniqueness}
If a non-degenerate element $Z \in M(X \otimes \check{X})$ exists then it is unique up to a (non-canonical) equivalence.
\end{rem}
\begin{example}\label{unit}
Let $M: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{G}rp_{\infty}$ be a lax symmetric monoidal functor. The lax symmetric structure of $M$ includes a structure map $1_{\mathcal{G}rp_{\infty}} \longrightarrow M(1)$ which can be described by choosing an object $Z_1 \in M(1)$. The axioms of lax monoidality then ensure that $Z_1$ is non-degenerate.
\end{example}
\begin{define}
A lax symmetric monoidal functor $M: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{G}rp_{\infty}$ will be called \textbf{non-degenerate} if for each object $X \in \mathcal{B}^{\ev}_1$ there exists a non-degenerate object $Z \in M(X \otimes \check{X})$.
\end{define}
\begin{define}
Let $M_1,M_2: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{G}rp_{\infty}$ be two non-degenerate lax symmetric monoidal functors. A lax symmetric natural transformation $T: M_1 \longrightarrow M_2$ will be called \textbf{non-degenerate} if for each object $X \in \mathcal{B}ord^{\ev}$ and each non-degenerate object $Z \in M(X \otimes \check{X})$ the objects $T(Z) \in M_2(X \otimes \check{X})$ is non-degerate.
\end{define}
\begin{rem}
From remark~\ref{uniqueness} we see that if $T(Z) \in M_2(X \otimes \check{X})$ is non-degenerate for \textbf{at least one} non-degenerate $Z \in M_1(X \otimes \check{X})$ then it will be true for all non-degenerate $Z \in M_1(X \otimes \check{X})$.
\end{rem}
Now we claim that if $\mathcal{D}$ has duals and ${\varphi}: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{D}$ is non-degenerate then the fiber functor $M_{\varphi}$ will be non-degenerate: for each object $X \in \mathcal{B}^{\ev}_1$ there exists a coevaluation morphism
$$ \coev_{{\varphi}(X)}: 1 \longrightarrow {\varphi}(X) \otimes {\varphi}(\check{X}) \simeq {\varphi}(X \otimes \check{X}) $$
which determines an element in $Z_X \in M_{\varphi}(X \otimes \check{X})$. It is not hard to see that this element is non-degenerate.
Let $\mathcal{F}un^{\lax}(\mathcal{B}^{\ev}_1,\mathcal{G}rp_{\infty})$ denote the $\infty$-category of lax symmetric monoidal functors $\mathcal{B}^{\ev}_1 \longrightarrow \mathcal{G}rp_{\infty}$ and by
$$ \mathcal{F}un_{\nd}^{\lax}(\mathcal{B}^{\ev}_1,\mathcal{G}rp_{\infty}) \subseteq \mathcal{F}un^{\lax}(\mathcal{B}^{\ev}_1,\mathcal{G}rp_{\infty}) $$
the subcategory spanned by non-degenerate functors and non-degenerate natural transformations. Now the construction ${\varphi} \mapsto M_{\varphi}$ determines a functor
$$ \mathcal{C}at^{\nd}_{\mathcal{B}^{\ev}_1 /} \longrightarrow \mathcal{F}un_{\nd}^{\lax}(\mathcal{B}^{\ev}_1,\mathcal{G}rp_{\infty}) $$
In particular if ${\varphi}: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{C}$ and $\psi: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{D}$ are non-degenerate then any functor $T:\mathcal{C} \longrightarrow \mathcal{D}$ under $\mathcal{B}^{\ev}_1$ will induce a non-degenerate natural transformation
$$ T_*: M_{{\varphi}} \longrightarrow M_{\psi} $$
The rest of this section is devoted to proving the following result, which we call the "quasi-unital cobordism hypothesis":
\begin{thm}[Cobordism Hypothesis - Quasi-Unital] \label{qu-cobordism}
Let $\mathcal{D}$ be a symmetric monoidal $\infty$-category with duals, let ${\varphi}: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{D}$ be a non-degenerate functor and let $\iota: \mathcal{B}^{\ev}_1 \hookrightarrow \mathcal{B}^{\ori}_1$ be the natural inclusion. Let $M_\iota,M_{\varphi} \in \mathcal{F}un^{\lax}_{\nd}$ be the corresponding fiber functors. Them the space of maps
$$ \mathcal{M}ap_{\mathcal{F}un^{\lax}_{\nd}}(M_\iota, M_{\varphi}) $$
is contractible.
\end{thm}
\begin{proof}
We start by transforming the lax symmetric monoidal functors $M_\iota,M_{\varphi}$ to \textbf{left fibrations} over $\mathcal{B}^{\ev}_1$ using the symmetric monoidal analogue of Grothendieck's construction, as described in~\cite{lur1}, page $67-68$.
Let $M: \mathcal{B} \longrightarrow \mathcal{G}rp_\infty$ be a lax symmetric monoidal functor. We can construct a symmetric monoidal $\infty$-category $\mathcal{G}roth(\mathcal{B},M)$ as follows:
\begin{enumerate}
\item
The objects of $\mathcal{G}roth(\mathcal{B},M)$ are pairs $(X, \eta)$ where $X \in \mathcal{B}$ is an object and $\eta$ is an object of $M(X)$.
\item
The space of maps from $(X,\eta)$ to $(X',\eta')$ in $\mathcal{G}roth(\mathcal{B},M)$ is defined to be the classifying space of the $\infty$-groupoid of pairs $(f,{\alpha})$ where $f: X \longrightarrow X'$ is a morphism in $B$ and ${\alpha}: f_*\eta \longrightarrow \eta$ is a morphism in $M(X')$. Composition is defined in a straightforward way.
\item
The symmetric monoidal structure on $\mathcal{G}roth(\mathcal{B},M)$ is obtained by defining
$$ (X,\eta) \otimes (X',\eta') = (X \otimes X',{\beta}a_{X,Y}(\eta \otimes \eta')) $$
where ${\beta}a_{X,Y}: M(X) \times M(Y) \longrightarrow M(X \otimes Y)$ is given by the lax symmetric structure of $M$.
\end{enumerate}
The forgetful functor $(X,\eta) \mapsto X$ induces a \textbf{left fibration}
$$ \mathcal{G}roth(\mathcal{B},M) \longrightarrow \mathcal{B} $$
\begin{thm}\label{unstraightening}
The association $M \mapsto \mathcal{G}roth(\mathcal{B},M)$ induces an equivalence between the $\infty$-category of lax-symmetric monoidal functors $\mathcal{B} \longrightarrow \mathcal{G}rp_\infty$ and the full subcategory of the over $\infty$-category
$ \mathcal{C}at^{\otimes}_{/\mathcal{B}} $
spanned by left fibrations.
\end{thm}
\begin{proof}
This follows from the more general statement given in~\cite{lur1} Proposition $3.3.26$. Note that any map of left fibrations over $\mathcal{B}$ is in particular a map of coCartesian fibrations because if $p: \mathcal{C} \longrightarrow \mathcal{B}$ is a left fibration then any edge in $\mathcal{C}$ is $p$-coCartesian.
\end{proof}
\begin{rem}
Note that if $\mathcal{C} \longrightarrow \mathcal{B}$ is a left fibration of symmetric monoidal $\infty$-categories and $\mathcal{A} \longrightarrow \mathcal{B}$ is a symmetric monoidal functor then the $\infty$-category
$$ \mathcal{F}un^{\otimes}_{/ \mathcal{B}}(\mathcal{A},\mathcal{C}) $$
is actually an \textbf{$\infty$-groupoid}, and by Theorem~\ref{unstraightening} is equivalent to the $\infty$-groupoid of lax-monoidal natural transformations between the corresponding lax monoidal functors from $\mathcal{B}$ to $\mathcal{G}rp_\infty$.
\end{rem}
Now set
$$ \mathcal{F}_\iota \stackrel{\df}{=} \mathcal{G}roth(\mathcal{B}^{\ev}_1,M_{\iota}) $$
$$ \mathcal{F}_{\varphi} \stackrel{\df}{=} \mathcal{G}roth(\mathcal{B}^{\ev}_1,M_{{\varphi}}) $$
Let
$$ \mathcal{F}un^{\nd}_{/\mathcal{B}^{\ev}_1}(\mathcal{F}_{\iota},\mathcal{F}_{{\varphi}}) \subseteq \mathcal{F}un^{\otimes}_{/\mathcal{B}^{\ev}_1}(\mathcal{F}_{\iota},\mathcal{F}_{{\varphi}}) $$
denote the full sub $\infty$-groupoid of functors which correspond to \textbf{non-degenerate} natural transformations
$$ M_\iota \longrightarrow M_{\varphi} $$
under the Grothendieck construction. Note that $\mathcal{F}un^{\nd}_{/\mathcal{B}^{\ev}_1}(\mathcal{F}_{\iota},\mathcal{F}_{{\varphi}})$ is a union of connected components of the $\infty$-groupoid $\mathcal{F}un^{\otimes}_{/\mathcal{B}^{\ev}_1}(\mathcal{F}_{\iota},\mathcal{F}_{{\varphi}})$.
We now need to show that the $\infty$-groupoid
$$ \mathcal{F}un^{\nd}_{/\mathcal{B}^{\ev}_1}(\mathcal{F}_{\iota},\mathcal{F}_{{\varphi}}) $$
is contractible.
Unwinding the definitions we see that the objects of $\mathcal{F}_{\iota}$ are pairs $(X,M)$ where $X \in \mathcal{B}^{\ev}_1$ is a $0$-manifold and $M \in \mathcal{M}ap_{\mathcal{B}^{\ori}_1}(\emptyset,X)$ is a cobordism from $\emptyset$ to $X$. A morphism in ${\varphi}$ from $(X,M)$ to $(X',M')$ consists of a morphism in $\mathcal{B}^{\ev}_1$
$$ N:X \longrightarrow X' $$
and a diffeomorphism
$$ T:M \coprod_{X} N \cong M' $$
respecting $X'$. Note that for each $(X,M) \in \mathcal{F}_{\iota}$ we have an identification $X \simeq \partial M$. Further more the space of morphisms from $(\partial M,M)$ to $(\partial M',M')$ is \textbf{homotopy equivalent to the space of orientation-preserving $\pi_0$-surjective embeddings of $M$ in $M'$} (which are not required to respect the boundaries in any way).
Now in order to analyze the symmetric monoidal $\infty$-category $\mathcal{F}_\iota$ we are going to use the theory of \textbf{$\infty$-operads}, as developed in~\cite{lur2}. Recall that the category $\mathcal{C}at^{\otimes}$ of symmetric monoidal $\infty$-categories admits a forgetful functor
$$ \mathcal{C}at^{\otimes} \longrightarrow \Op^{\infty} $$
to the $\infty$-category of \textbf{$\infty$-operads}. This functor has a left adjoint
$$ \mathcal{E}nv: \Op^{\infty} \longrightarrow \mathcal{C}at^{\otimes} $$
called the \textbf{monoidal envelope} functor (see~\cite{lur2} \S $2.2.4$). In particular, if $\mcal{C}^{\otimes}$ is an $\infty$-operad and $\mathcal{D}$ is a symmetric monoidal $\infty$-category with corresponding $\infty$-operad $\mathcal{D}^{\otimes} \longrightarrow \N(\mathcal{G}am_*)$ then there is an \textbf{equivalence of $\infty$-categories}
$$ \mathcal{F}un^{\otimes}(\mathcal{E}nv(\mathcal{C}^{\otimes}),\mathcal{D}) \simeq \mathcal{A}lg_{\mcal{C}}(\mathcal{D}^{\otimes}) $$
Where $\mathcal{A}lg_{\mcal{C}}\left(\mathcal{D}^{\otimes}\right) \subseteq \mathcal{F}un_{/\N(\mathcal{G}am_*)}(\mcal{C}^{\otimes},\mathcal{D}^{\otimes})$ denotes the full subcategory spanned by $\infty$-operad maps (see Proposition $2.2.4.9$ of~\cite{lur2}).
Now observing the definition of monoidal envelop (see Remark $2.2.4.3$ in~\cite{lur2}) we see that $\mathcal{F}_{\iota}$ is equivalent to the monoidal envelope of a certain simple $\infty$-operad
$$ F_\iota \simeq \mathcal{E}nv\left(\mathcal{OF}^{\otimes}\right) $$
which can be described as follows: the underlying $\infty$-category $\mathcal{OF}$ of $\mathcal{OF}^{\otimes}$ is the $\infty$-category of \textbf{connected} $1$-manifolds (i.e. either the segment or the circle) and the morphisms are \textbf{orientation-preserving embeddings} between them. The (active) $n$-to-$1$ operations of $\mathcal{OF}$ (for $n\geq 1$) from $(M_1,...,M_n)$ to $M$ are the orientation-preserving embeddings
$$ M_1 \coprod ... \coprod M_n \longrightarrow M $$
and there are no $0$-to-$1$ operations.
Now observe that the induced map $\mathcal{OF}^{\otimes} \longrightarrow (\mathcal{B}^{\ev}_1)^{\infty}$ is a fibration of $\infty$-operads. We claim that $\mathcal{F}_{\iota}$ is not only the enveloping symmetric monoidal $\infty$-category of $\mathcal{OF}^{\otimes}$, but that $\mathcal{F}_{\iota} \longrightarrow \mathcal{B}^{\ev}_1$ is the enveloping \textbf{left fibration} of $\mathcal{OF} \longrightarrow \mathcal{B}^{\ev}_1$. More precisely we claim that for any left fibration $\mathcal{D} \longrightarrow \mathcal{B}^{\ev}_1$ of symmetric monoidal $\infty$-categories the natural map
$$ \mathcal{F}un^{\otimes}_{/\mathcal{B}^{\ev}_1}\left(F_{\iota},\mathcal{D}\right) \longrightarrow \mathcal{A}lg_{\mathcal{OF} / \mathcal{B}^{\ev}_1}(\mathcal{D}^{\otimes}) $$
is an equivalence if $\infty$-groupoids (where both terms denote mapping objects in the respective \textbf{over-categories}). This is in fact not a special property of $F_{\iota}$:
\begin{lem}\label{left-envelope}
Let $\mathcal{O}$ be a symmetric monoidal $\infty$-category with corresponding $\infty$-operad $\mathcal{O}^{\otimes} \longrightarrow \N(\mathcal{G}am_*)$ and let $p:\mathcal{C}^{\otimes} \longrightarrow \mathcal{O}^{\otimes}$ be a fibration of $\infty$-operads such that the induced map
$$ \ovl{p}:\mathcal{E}nv\left(\mathcal{C}^{\otimes}\right) \longrightarrow \mathcal{O} $$
is a left fibration. Let $\mathcal{D} \longrightarrow \mathcal{O}$ be some other left fibration of symmetric monoidal categories. Then the natural map
$$ \mathcal{F}un^{\otimes}_{/\mathcal{O}}\left(\mathcal{E}nv\left(\mathcal{C}^{\otimes}\right),\mathcal{D}\right) \longrightarrow \mathcal{A}lg_{\mathcal{C} / \mathcal{O}}(\mathcal{D}^{\otimes}) $$
is an equivalence of $\infty$-categories. Further more both sides are in fact $\infty$-groupoids.
\end{lem}
\begin{proof}
Consider the diagram
$$ \stackrelymatrix{
\mathcal{F}un^{\otimes}(\mathcal{E}nv\left(\mathcal{C}^{\otimes}\right),\mathcal{D}) \ar^{\simeq}[r]\ar[d] & \mathcal{A}lg_{\mathcal{C}}\left(\mathcal{D}^{\otimes}\right) \ar[d] \\
\mathcal{F}un^{\otimes}(\mathcal{E}nv\left(\mathcal{C}^{\otimes}\right),\mathcal{O}) \ar^{\simeq}[r] & \mathcal{A}lg_{\mathcal{C}}\left(\mathcal{O}^{\otimes}\right) \\
}$$
Now the vertical maps are left fibrations and by adjunction the horizontal maps are equivalences. By~\cite{lur3} Proposition $3.3.1.5$ we get that the induced map on the fibers of $p$ and $\ovl{p}$ respectively
$$ \mathcal{F}un^{\otimes}_{/\mathcal{O}}\left(\mathcal{E}nv\left(\mathcal{C}^{\otimes}\right),\mathcal{D}\right) \longrightarrow \mathcal{A}lg_{\mathcal{C} / \mathcal{O}}(\mathcal{D}^{\otimes}) $$
is a weak equivalence of $\infty$-groupoids.
\end{proof}
\begin{rem}
In~\cite{lur2} a relative variant $\mathcal{E}nv_{\mathcal{B}^{\ev}_1}$ of $\mathcal{E}nv$ is introduced which sends a fibration of $\infty$-operads $\mathcal{C}^{\otimes} \longrightarrow (\mathcal{B}^{\ev}_1)^{\otimes}$ to its enveloping coCartesin fibration $\mathcal{E}nv_{\mathcal{O}}\left(\mathcal{C}^{\otimes}\right) \longrightarrow \mathcal{B}^{\ev}_1$. Note that in our case the map
$$ \mathcal{F}_{\iota} \longrightarrow \mathcal{B}^{\ev}_1 $$
is \textbf{not} the enveloping coCartesian fibration of $\mathcal{OF}^{\otimes} \longrightarrow (\mathcal{B}^{\ev}_1)^{\otimes}$. However from Lemma~\ref{left-envelope} it follows that the map
$$ \stackrelymatrix{
\mathcal{F}_{\iota} \ar[rr]\ar[dr] && \mathcal{E}nv_{\mathcal{B}^{\ev}_1}\left(\mathcal{OF}^{\otimes}\right) \ar[dl] \\
& \mathcal{B}^{\ev}_1 & \\
}$$
is a \textbf{covariant equivalence} over $\mathcal{B}^{\ev}_1$, i.e. induces a weak equivalence of simplicial sets on the fibers (where the fibers on the left are $\infty$-groupoids and the fibers on the right are $\infty$-categories). This claim can also be verified directly by unwinding the definition of $\mathcal{E}nv_{\mathcal{B}^{\ev}_1}\left(\mathcal{OF}^{\otimes}\right)$.
\end{rem}
Summing up the discussion so far we observe that we have a weak equivalence of $\infty$-groupoids
$$ \mathcal{F}un^{\otimes}_{/\mathcal{B}^{\ev}_1}\left(\mathcal{F}_{\iota},\mathcal{F}_{{\varphi}}\right) \stackrel{\simeq}{\longrightarrow} \mathcal{A}lg_{\mathcal{OF} / \mathcal{B}^{\ev}_1}\left(\mathcal{F}_{{\varphi}}^{\otimes}\right) $$
Let
$$ \mathcal{A}lg^{\nd}_{\mathcal{OF} / \mathcal{B}^{\ev}_1}\left(\mathcal{F}_{{\varphi}}^{\otimes}\right) \subseteq \mathcal{A}lg_{\mathcal{OF} / \mathcal{B}^{\ev}_1}\left(\mathcal{F}_{{\varphi}}^{\otimes}\right) $$
denote the full sub $\infty$-groupoid corresponding to
$$ \mathcal{F}un^{\nd}_{/\mathcal{B}^{\ev}_1}(\mathcal{F}_{\iota},\mathcal{F}_{{\varphi}}) \subseteq \mathcal{F}un^{\otimes}_{/\mathcal{B}^{\ev}_1}(\mathcal{F}_{\iota},\mathcal{F}_{{\varphi}}) $$
under the adjunction. We are now reduced to prove that the $\infty$-groupoid
$$ \mathcal{A}lg^{\nd}_{\mathcal{OF} / \mathcal{B}^{\ev}_1}\left(\mathcal{F}_{{\varphi}}^{\otimes}\right) $$
is contractible.
Let $\mathcal{OI}^{\otimes} \subseteq \mathcal{OF}^{\otimes}$ be the full sub $\infty$-operad of $\mathcal{OF}^{\otimes}$ spanned by connected $1$-manifolds which are diffeomorphic to the segment (and all $n$-to-$1$ operations between them). In particular we see that $\mathcal{OI}^{\otimes}$ is equivalent to the \textbf{non-unital associative $\infty$-operad}.
We begin with the following theorem which reduces the handling of $\mathcal{OF}^{\otimes}$ to $\mathcal{OI}^{\otimes}$.
\begin{thm}\label{removing-circles}
Let $q:\mathcal{C}^{\otimes} \longrightarrow \mathcal{O}^{\otimes}$ be a left fibration of $\infty$-operads. Then the restriction map
$$ \mathcal{A}lg_{\mathcal{OF} / \mathcal{O}}(\mathcal{C}^{\otimes}) \longrightarrow \mathcal{A}lg_{\mathcal{OI} / \mathcal{O}}(\mathcal{C}^{\otimes}) $$
is a weak equivalence.
\end{thm}
\begin{proof}
We will base our claim on the following general lemma:
\begin{lem}\label{free-algebra}
Let $\mathcal{A}^{\otimes} \longrightarrow \mathcal{B}^{\otimes}$ be a map of $\infty$-groupoids and let $q:\mathcal{C}^{\otimes} \longrightarrow \mathcal{O}^{\otimes}$ be \textbf{left fibration} of $\infty$-operads. Suppose that for every object $B \in \mathcal{B}$, the category
$$ \mathcal{F}_B = \mathcal{A}^{\otimes}_{\act} \times_{\mathcal{B}^{\otimes}_{\act}} \mathcal{B}^{\otimes}_{/B} $$
is weakly contractible (see~\cite{lur2} for the terminology). Then the natural restriction map
$$ \mathcal{A}lg_{\mathcal{A} / \mathcal{O}}(\mathcal{C}^{\otimes}) \longrightarrow \mathcal{A}lg_{\mathcal{B} / \mathcal{O}}(\mathcal{C}^{\otimes}) $$
is a weak equivalence.
\end{lem}
\begin{proof}
In~\cite{lur2} \S $3.1.3$ it is explained how under certain conditions the forgetful functor (i.e. restriction map)
$$ \mathcal{A}lg_{\mathcal{A} / \mathcal{O}}(\mathcal{C}^{\otimes}) \longrightarrow \mathcal{A}lg_{\mathcal{B} / \mathcal{O}}(\mathcal{C}^{\otimes}) $$
admits a left adjoint, called the \textbf{free algebra functor}. Since $\mathcal{C}^{\otimes} \longrightarrow \mathcal{O}^{\otimes}$ is a left fibration both these $\infty$-categories are $\infty$-groupoids, and so any adjunction between them will be an equivalence. Hence it will suffice to show that the conditions for existence of left adjoint are satisfies in this case.
Since $q: \mathcal{C}^{\otimes} \longrightarrow \mathcal{O}^{\otimes}$ is a left fibration $q$ is \textbf{compatible with colimits indexed by weakly contractible diagrams} in the sense of~\cite{lur2} Definition $3.1.1.18$ (because weakly contractible colimits exists in every $\infty$-groupoid and are preserved by any functor between $\infty$-groupoids). Combining Corollary $3.1.3.4$ and Proposition $3.1.1.20$ of~\cite{lur2} we see that the desired free algebra functor exists.
\end{proof}
In view of Lemma~\ref{free-algebra} it will be enough to check that for every object $M \in \mathcal{OF}$ (i.e. every connected $1$-manifolds) the $\infty$-category
$$ \mathcal{F}_M \stackrel{\df}{=} \mathcal{OI}^{\otimes}_{\act} \times_{\mathcal{OF}^{\otimes}_{\act}} \left(\mathcal{OF}^{\otimes}_{\act}\right)_{/M} $$
is weakly contractible.
Unwinding the definitions we see that the objects of $\mathcal{F}_M$ are tuples of $1$-manifolds $(M_1,...,M_n)$ ($n \geq 1$), such that each $M_i$ is diffeomorphic to a segment, together with an orientation preserving embedding
$$ f: M_1 \coprod ... \coprod M_n \hookrightarrow M $$
A morphisms in $\mathcal{F}_M$ from
$$ f: M_1 \coprod ... \coprod M_n \hookrightarrow M $$
to
$$ g: M_1' \coprod ... \coprod M_m' \hookrightarrow M $$
is a $\pi_0$-surjective orientation-preserving embedding
$$ T:M_1 \coprod ... \coprod M_n \longrightarrow M_1' \coprod ... \coprod M_m' $$
together with an \textbf{isotopy} $g \circ T \sim f$.
Now when $M$ is the segment then $\mathcal{F}_M$ contains a terminal object and so is weakly contractible. Hence we only need to take care of the case of the circle $M=S^1$.
It is not hard to verify that the category $F_{S^1}$ is in fact discrete - the space of self isotopies of any embedding $f:M_1 \coprod ... \coprod M_n \hookrightarrow M $ is equivalent to the loop space of $S^1$ and hence discrete. In fact one can even describe $F_{S^1}$ in completely combinatorial terms. In order to do that we will need some terminology.
\begin{define}
Let ${\Lambda}_{\infty}$ be the category whose objects correspond to the natural numbers $1,2,3,...$ and the morphisms from $n$ to $m$ are (weak) order preserving maps $f: \mathbb{Z} \longrightarrow \mathbb{Z}$ such that $f(x+n) = f(x)+m$.
\end{define}
The category ${\Lambda}_{\infty}$ is a model for the the universal fibration over the cyclic category, i.e., there is a left fibration ${\Lambda}_\infty \longrightarrow {\Lambda}$ (where ${\Lambda}$ is connes' cyclic category) such that the fibers are connected groupoids with a single object having automorphism group $\mathbb{Z}$ (or in other words circles). In particular the category ${\Lambda}_{\infty}$ is known to be weakly contractible. See~\cite{kal} for a detailed introduction and proof (Lemma $4.8$).
Let ${\Lambda}^{\sur}_{\infty}$ be the sub category of ${\Lambda}_\infty$ which contains all the objects and only \textbf{surjective} maps between. It is not hard to verify explicitly that the map ${\Lambda}^{\sur}_\infty \longrightarrow {\Lambda}_\infty$ is cofinal and so ${\Lambda}^{\sur}_{\infty}$ is contractible as well. Now we claim that $F_{S^1}$ is in fact equivalent to ${\Lambda}^{\sur}_{\infty}$.
Let ${\Lambda}^{\sur}_{\bg}$ be the category whose objects are linearly ordered sets $S$ with an order preserving automorphisms ${\sigma}: S \longrightarrow S$ and whose morphisms are surjective order preserving maps which commute with the respective automorphisms. Then ${\Lambda}^{\sur}_{\infty}$ can be considered as a full subcategory of ${\Lambda}^{\sur}_{\bg}$ such that $n$ corresponds to the object $(\mathbb{Z},{\sigma}_n)$ where ${\sigma}_n: \mathbb{Z} \longrightarrow \mathbb{Z}$ is the automorphism $x \mapsto x+n$.
Now let $p:\mathbb{R} \longrightarrow S^1$ be the universal covering. We construct a functor $F_{S^1} \longrightarrow {\Lambda}^{\sur}_{\bg}$ as follows: given an object
$$ f: M_1 \coprod ... \coprod M_n \hookrightarrow S^1 $$
of $F_{S^1}$ consider the fiber product
$$ P = \left[M_1 \coprod ... \coprod M_n\right] \times_{S^1} \mathbb{R} $$
note that $P$ is homeomorphic to an infinite union of segments and the projection
$$ P \longrightarrow \mathbb{R} $$
is injective (because $f$ is injective) giving us a well defined linear order on $P$. The automorphism ${\sigma}: \mathbb{R} \longrightarrow \mathbb{R}$ of $\mathbb{R}$ over $S^1$ given by $x \mapsto x + 1$ gives an order preserving automorphism $\wtl{{\sigma}}: P \longrightarrow P$.
Now suppose that $((M_1,...,M_n),f)$ and $((M_1',...,M_m'),g)$ are two objects and we have a morphism between them, i.e. an embedding
$$ T:M_1 \coprod ... \coprod M_n \longrightarrow M_1' \coprod ... \coprod M_m' $$
and an isotopy $\psi: g \circ T \sim f$. Then we see that the pair $(T,\psi)$ determine a well defined order preserving map
$$ \left[M_1 \coprod ... \coprod M_n\right] \times_{S^1} \mathbb{R} \longrightarrow \left[M_1' \coprod ... \coprod M_m'\right] \times_{S^1} \mathbb{R} $$
which commutes with the respective automorphisms. Clearly we obtain in this way a functor $u:F_{S^1} \longrightarrow {\Lambda}^{\sur}_{\bg}$ whose essential image is the same as the essential image of ${\Lambda}^{\sur}_\infty$. It is also not hard to see that $u$ is fully faithful. Hence $F_{S^1}$ is equivalent to ${\Lambda}^{\sur}_\infty$ which is weakly contractible. This finishes the proof of the theorem.
\end{proof}
Let
$$ \mathcal{A}lg^{\nd}_{\mathcal{OI} / \mathcal{B}^{\ev}_1}\left(\mathcal{F}_{{\varphi}}^{\otimes}\right) \subseteq \mathcal{A}lg_{\mathcal{OI} / \mathcal{B}^{\ev}_1}\left(\mathcal{F}_{{\varphi}}^{\otimes}\right) $$
denote the full sub $\infty$-groupoid corresponding to the full sub $\infty$-groupoid
$$ \mathcal{A}lg^{\nd}_{\mathcal{OF} / \mathcal{B}^{\ev}_1}\left(\mathcal{F}_{{\varphi}}^{\otimes}\right) \subseteq \mathcal{A}lg_{\mathcal{OF} / \mathcal{B}^{\ev}_1}\left(\mathcal{F}_{{\varphi}}^{\otimes}\right) $$
under the equivalence of Theorem~\ref{removing-circles}.
Now the last step of the cobordism hypothesis will be complete once we show the following:
\begin{lem}\label{final-lemma}
The $\infty$-groupoid
$$ \mathcal{A}lg^{\nd}_{\mathcal{OI} / \mathcal{B}^{\ev}_1}\left(\mathcal{F}_{{\varphi}}^{\otimes}\right) $$
is contractible.
\end{lem}
\begin{proof}
Let
$$ q: p^*\mathcal{F}_{{\varphi}} \longrightarrow \mathcal{OI}^{\otimes} $$
be the pullback of left fibration $\mathcal{F}_{\varphi} \longrightarrow \mathcal{B}^{\ev}_1$ via the map $p: \mathcal{OI}^{\otimes} \longrightarrow B^{\ev}_1$, so that $q$ is a left fibration as well. In particular, since $\mathcal{OI}^{\otimes}$ is the non-unital associative $\infty$-operad, we see that $q$ classifies an $\infty$-groupoid $q^{-1}(\mathcal{OI})$ with a non-unital monoidal structure. Unwinding the definitions one sees that this $\infty$-groupoid is the fundamental groupoid of the space
$$ \mathcal{M}ap_{\mathcal{C}}(1,{\varphi}(X_+) \otimes {\varphi}(X_-)) $$
where $X_+,X_- \in \mathcal{B}^{\ev_1}$ are the points with positive and negative orientations respectively. The monoidal structure sends a pair of maps
$$ f,f': 1 \longrightarrow {\varphi}(X_+) \otimes {\varphi}(X_-) $$
to the composition
$$ 1 \stackrel{f \otimes f'}{\longrightarrow} \left[{\varphi}(X_+) \otimes {\varphi}(X_-)\right] \otimes \left[{\varphi}(X_+) \otimes {\varphi}(X_-)\right] \stackrel{\simeq}{\longrightarrow} $$
$$ {\varphi}(X_+) \otimes \left[{\varphi}(X_-) \otimes {\varphi}(X_+)\right] \otimes {\varphi}(X_-) \stackrel{Id \otimes {\varphi}(\ev) \otimes Id}{\longrightarrow} {\varphi}(X_+) \otimes {\varphi}(X_-) $$
Since $\mathcal{C}$ has duals we see that this monoidal $\infty$-groupoid is equivalent to the fundamental $\infty$-groupoid of the space
$$ \mathcal{M}ap_{\mathcal{C}}({\varphi}(X_+),{\varphi}(X_+)) $$
with the monoidal product coming from \textbf{composition}.
Now
$$ \mathcal{A}lg_{\mathcal{OI} / \mathcal{B}^{\ev}_1}(\mathcal{F}_{{\varphi}}) \simeq \mathcal{A}lg_{\mathcal{OI} / \mathcal{OI}}(p^*\mathcal{F}_{{\varphi}}) $$
classifies $\mathcal{OI}^{\otimes}$-algebra objects in $p^*\mathcal{F}_{{\varphi}}$, i.e. non-unital algebra objects in
$$ \mathcal{M}ap_{\mathcal{C}}({\varphi}(X_+),{\varphi}(X_+)) $$
with respect to composition. The full sub $\infty$-groupoid
$$ \mathcal{A}lg^{\nd}_{\mathcal{OI} / \mathcal{B}^{\ev}_1}(\mathcal{F}_{{\varphi}}) \subseteq \mathcal{A}lg_{\mathcal{OI} / \mathcal{B}^{\ev}_1}(\mathcal{F}_{{\varphi}}) $$
will then classify non-unital algebra objects $A$ which correspond to \textbf{self equivalences}
$$ {\varphi}(X_+) \longrightarrow {\varphi}(X_+) $$
It is left to prove the following lemma:
\begin{lem}
Let $\mathcal{C}$ be an $\infty$-category. Let $X \in \mathcal{C}$ be an object and let $\mathcal{E}_X$ denote the $\infty$-groupoid of self equivalences $u: X \longrightarrow X$ with the monoidal product induced from composition. Then the $\infty$-groupoid of non-unital algebra objects in $\mathcal{E}_X$ is contractible.
\end{lem}
\begin{proof}
Let $\mathcal{A}ss_{\nun}$ denote the non-unital associative $\infty$-operad. The identity map $\mathcal{A}ss_{\nun} \longrightarrow \mathcal{A}ss_{\nun}$ which is in particular a left fibration of $\infty$-operads classifies the terminal non-unital monoidal $\infty$-groupoid $\mathcal{A}$ which consists of single automorphismless idempotent object $a \in \mathcal{A}$. The non-unital algebra objects in $\mathcal{E}_X$ are then classified by non-unital lax monoidal functors
$$ \mathcal{A} \longrightarrow \mathcal{E}_X $$
Since $\mathcal{E}_X$ is an $\infty$-groupoid this is same as non-unital monoidal functors (without the lax)
$$ \mathcal{A} \longrightarrow \mathcal{E}_X $$
Now the forgetful functor from unital to non-unital monoidal $\infty$-groupoids has a left adjoint. Applying this left adjoint to $\mathcal{A}$ we obtain the $\infty$-groupoid $\mathcal{UA}$ with two automorphismless objects
$$ \mathcal{UA} = \{1,a\} $$
such that $1$ is the unit of the monoidal structure and $a$ is an idempotent object.
Hence we need to show that the $\infty$-groupoids of monoidal functors
$$ \mathcal{UA} \longrightarrow \mathcal{E}_X $$
is contractible. Now given a monoidal $\infty$-groupoid $\mathcal{G}$ we can form the $\infty$-category $\mathcal{B}(\mathcal{G})$ having a single object with endomorphism space $\mathcal{G}$ (the monoidal structure on $\mathcal{G}$ will then give the composition structure). This construction determines a fully faithful functor from the $\infty$-category of monoidal $\infty$-groupoids and the $\infty$-category of pointed $\infty$-categories (see~\cite{lur1} Remark $4.4.6$ for a much more general statement). In particular it will be enough to show that the $\infty$-groupoid of \textbf{pointed functors}
$$ \mathcal{B}(\mathcal{UA}) \longrightarrow \mathcal{B}(\mathcal{E}_X) $$
is contractible. Since $\mathcal{B}(\mathcal{E}_X)$ is an $\infty$-groupoid it will be enough to show that $\mathcal{B}(\mathcal{UA})$ is weakly contractible.
Now the nerve $\N\mathcal{B}(\mathcal{UA})$ of $\mathcal{B}(\mathcal{UA})$ is the simplicial set in which for each $n$ there exists a single \textbf{non-degenerate} $n$-simplex ${\sigma}_n \in \N\mathcal{B}(\mathcal{UA})_n$ such that $d_i({\sigma}_n) = {\sigma}_{n-1}$ for all $i=0,...,n$. By Van-Kampen it follows that $\N\mathcal{B}(\mathcal{UA})$ is simply connected and by direct computation all the homology groups vanish.
\end{proof}
This finishes the proof of Lemma~\ref{final-lemma}.
\end{proof}
This finishes the proof of Theorem~\ref{qu-cobordism}.
\end{proof}
\section{ From Quasi-Unital to Unital Cobordism Hypothesis }\label{s-from-qu-to-regular}
In this section we will show how the quasi-unital cobordism hypothesis (Theorem~\ref{qu-cobordism}) implies the last step in the proof of the $1$-dimensional cobordism hypothesis (Theorem~\ref{cobordism-last-step-2}).
Let $M: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{G}rp_{\infty}$ be a non-degenerate lax symmetric monoidal functor. We can construct a pointed \textbf{non-unital} symmetric monoidal $\infty$-category $\mathcal{C}_M$ as follows:
\begin{enumerate}
\item
The objects of $\mathcal{C}_M$ are the objects of $\mathcal{B}^{\ev}_1$. The marked point is the object $X_+$.
\item
Given a pair of objects $X, Y \in \mathcal{C}_M$ we define
$$ \mathcal{M}ap_{\mathcal{C}_M}(X, Y) = M(\check{X} \otimes Y) $$
Given a triple of objects $X, Y, Z \in \mathcal{C}_M$ the composition law $$ \mathcal{M}ap_{\mathcal{C}_M}(\check{X}, Y) \times \mathcal{M}ap_{\mathcal{C}_M}(\check{Y},Z) \longrightarrow \mathcal{M}ap_{\mathcal{C}_M}(\check{X},Z) $$
is given by the composition
$$ M(\check{X} \otimes Y) \times M(\check{Y} \otimes Z) \longrightarrow M(\check{X} \otimes Y \otimes \check{Y} \otimes Z) \longrightarrow M(\check{X} \otimes Z) $$
where the first map is given by the lax symmetric monoidal structure on the functor $M$ and the second is induced by the evaluation map
$$ \ev_Y : \check{Y} \otimes Y \longrightarrow 1 $$
in $\mathcal{B}^{\ev}_1 $.
\item
The symmetric monoidal structure is defined in a straight forward way using the lax monoidal structure of $M$.
\end{enumerate}
It is not hard to see that if $M$ is non-degenerate then $\mathcal{C}_M$ is \textbf{quasi-unital}, i.e. each object contains a morphism which \textbf{behaves} like an identity map (see~\cite{har}). This construction determines a functor
$$ G: \mathcal{F}un_{\nd}^{\lax}(\mathcal{B}^{\ev}_1,\mathcal{G}rp_{\infty}) \longrightarrow \mathcal{C}at^{\qu,\otimes}_{\mathcal{B}^{\un}_0 /} $$
where $\mathcal{C}at^{\qu,\otimes}$ is the $\infty$-category of symmetric monoidal quasi-unital categories (i.e. commutative algebra objects in the $\infty$-category $\mathcal{C}at^{\qu}$ of quasi-unital $\infty$-categories). In~\cite{har} it is proved that the forgetful functor
$$ S:\mathcal{C}at \longrightarrow\mathcal{C}at^{\qu} $$
From $\infty$-categories to quasi-unital $\infty$-categories is an \textbf{equivalence} and so the forgetful functor
$$ S^{\otimes}:\mathcal{C}at^{\otimes} \longrightarrow \mathcal{C}at^{\qu,\otimes} $$
is an equivalence as well.
Now recall that
$$ \mathcal{C}at^{\sur}_{\mathcal{B}^{\ev}_1 /} \subseteq \mathcal{C}at^{\nd}_{\mathcal{B}^{\ev}_1 /} $$
is the full subcategory spanned by essentially surjective functors ${\varphi}: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{C}$. The fiber functor construction ${\varphi} \mapsto M_{\varphi}$ induces a functor
$$ F: \mathcal{C}at^{\sur}_{\mathcal{B}^{\ev}_1 /} \longrightarrow \mathcal{F}un_{\nd}^{\lax}(\mathcal{B}^{\ev}_1,\mathcal{G}rp_{\infty}) $$
The composition $G \circ F$ gives a functor
$$ \mathcal{C}at^{\sur}_{\mathcal{B}^{\ev}_1 / } \longrightarrow \mathcal{C}at^{\qu,\otimes}_{\mathcal{B}^{\un}_0 /} $$
We claim that $G \circ F$ is in fact \textbf{equivalent} to the composition
$$ \mathcal{C}at^{\sur}_{\mathcal{B}^{\ev}_1 / } \stackrel{T}{\longrightarrow} \mathcal{C}at^{\otimes}_{\mathcal{B}^{\un}_0 / } \stackrel{S}{\longrightarrow} \mathcal{C}at^{\qu,\otimes}_{\mathcal{B}^{\un}_0 / } $$
where $T$ is given by the restriction along $X_+:\mathcal{B}^{\un}_0 \hookrightarrow \mathcal{B}^{\ev}_1$ and $S$ is the forgetful functor.
Explicitly, we will construct a natural transformation
$$ N:G \circ F \stackrel{\simeq}{\longrightarrow} S \circ T $$
In order to construct $N$ we need to construct for each non-degenerate functor ${\varphi}: \mathcal{B}^{\ev}_1 \longrightarrow \mathcal{D}$ a natural pointed functor
$$ N_{\varphi}: \mathcal{C}_{M_{\varphi}} \longrightarrow \mathcal{D} $$
The functor $N_{\varphi}$ will map the objects of $\mathcal{C}_{M_{\varphi}}$ (which are the objects of $\mathcal{B}^{\ev}_1$) to $\mathcal{D}$ via ${\varphi}$. Then for each $X,Y \in \mathcal{B}^{\ev}_1$ we can map the morphisms
$$ \mathcal{M}ap_{\mathcal{C}_{M_{{\varphi}}}}(X,Y) = \mathcal{M}ap_{\mathcal{D}}(1,\check{X} \otimes Y) \longrightarrow \mathcal{M}ap_{\mathcal{D}}(X,Y) $$
via the duality structure - to a morphism $f: 1 \longrightarrow \check{X} \otimes Y$ one associates the morphism $\what{f}: X \longrightarrow Y$ given as the composition
$$ X \stackrel{Id \otimes f}{\longrightarrow} X \otimes \check{X} \otimes Y \stackrel{{\varphi}(\ev_X) \otimes Y}{\longrightarrow} Y $$
Since $\mathcal{D}$ has duals we get that $N_{\varphi}$ is fully faithful and since we have restricted to essentially surjective ${\varphi}$ we get that $N_{\varphi}$ is essentially surjective. Hence $N_{\varphi}$ is an equivalence of quasi-unital symmetric monoidal $\infty$-categories and $N$ is a natural equivalence of functors.
In particular we have a homotopy commutative diagram:
$$ \stackrelymatrix{
& \mathcal{C}at^{\sur}_{\mathcal{B}^{\ev}_1 / } \ar_{F}[dl] \ar^{T}[dr] & \\
\mathcal{F}un_{\nd}^{\lax}(\mathcal{B}^{\ev}_1,\mathcal{G}rp_{\infty}) \ar_{G}[dr] & & \mathcal{C}at^{\otimes}_{\mathcal{B}^{\un}_0 /} \ar^{S}[dl] \\
& \mathcal{C}at^{\qu,\otimes}_{\mathcal{B}^{\un}_0 /} & \\
}$$
Now from Lemma~\ref{0-to-1-ev} we see that $T$ is fully faithful. Since $S$ is an equivalence of $\infty$-categories we get
\begin{cor}\label{retract}
The functor $G \circ F$ is fully faithful.
\end{cor}
We are now ready to complete the proof of~\ref{cobordism-last-step-2}. Let $\mathcal{D}$ be a symmetric monoidal $\infty$-category with duals and let ${\varphi}: \mathcal{B} \longrightarrow \mathcal{D}$ be a non-degenerate functor. We wish to show that the space of maps
$$ \mathcal{M}ap_{\mathcal{C}at^{\sur}_{\mathcal{B}^{\ev}_1 /}}(\iota,{\varphi}) $$
is contractible. Consider the sequence
$$ \mathcal{M}ap_{\mathcal{C}at^{\sur}_{\mathcal{B}^{\ev}_1 /}}(\iota,{\varphi}) \longrightarrow \mathcal{M}ap_{\mathcal{F}un_{\nd}^{\lax}(\mathcal{B}^{\ev}_1,\mathcal{G}rp_{\infty})}(M_\iota,M_{\varphi}) \longrightarrow
\mathcal{M}ap_{\mathcal{C}at^{\qu,\otimes}_{\mathcal{B}^{\un}_0 /}}(\mathcal{B}^{\ori}_1,\mathcal{D}) $$
By Theorem~\ref{qu-cobordism} the middle space is contractible and by lemma~\ref{retract} the composition
$$ \mathcal{M}ap_{\mathcal{C}at^{\sur}_{\mathcal{B}^{\ev}_1 /}}(\iota,{\varphi}) \longrightarrow \mathcal{M}ap_{\mathcal{C}at^{\qu,\otimes}_{\mathcal{B}^{\un}_0 /}}(\mathcal{B}^{\ori}_1,\mathcal{D}) $$
is a weak equivalence. Hence we get that
$$ \mathcal{M}ap_{\mathcal{C}at^{\sur}_{\mathcal{B}^{\ev}_1 /}}(\iota,{\varphi}) $$
is contractible. This completes the proof of Theorem~\ref{cobordism-last-step-2}.
\end{document}
|
\begin{document}
\title{\LARGE \bf
Robust output regulation of $2 \times 2$ hyperbolic systems part I: Control law and Input-to-State Stability\\
}
\thispagestyle{empty}
{\rm \bf p}agestyle{empty}
\begin{abstract}
We consider the problem of output feedback regulation for a linear first-order hyperbolic system with collocated input and output in presence of a general class of disturbances and noise. The proposed control law is designed through a backstepping approach incorporating an integral action. To ensure robustness to delays, the controller only cancels part of the boundary reflection by means of a tunable parameter. This also enables a trade-off between disturbance and noise sensitivity. We show that the boundary condition of the obtained target system can be transformed into a Neutral Differential Equation (NDE) and that this latter system is Input-to-State Stable (ISS). This proves the boundedness of the controlled output for the target system. This extends previous works considering an integral action for this kind of system~\cite{LDM16}, and constitutes an important step towards practical implementation of such controllers. Applications and practical considerations, in particular regarding the system's sensitivity functions are derived in a companion paper.
\end{abstract}
\section{Introduction}
\label{sec:introduction}
In this paper, we solve the problem of output feedback regulation for a system composed of two linear hyperbolic PDEs with collocated boundary input and output in presence of disturbances and noise in the measurements. The proposed controller combines a backstepping approach with an integral action. The resulting feedback law is proved to be Input-to-State Stable (ISS). This paper extends the results stated in~\cite{LDM16} where uncorrupted anti-collocated measurements were considered in presence of static disturbances.
A large number of physical networks may be represented by hyperbolic systems. Among them we can cite the hydraulic networks~\cite{Bastin2011,DSBCAN08}, road traffic networks~\cite{FHS14}, oil well drilling~\cite{A13,DMBPA14} or gas pipeline networks~\cite{GDL11}. Due to the importance of such applications, a large number of results concerning their control has emerged this last decade. Among the different challenges, the disturbance rejection problem has been recently considered in~\cite{A13,AA15,D16,D17,DSBCAN08,LBL15,TK14}. In~\cite{A13,AA15}, the rejection of a perturbation affecting the uncontrolled boundary side of a $2 \times 2$ linear hyperbolic system is solved using a backstepping approach. In~\cite{LBL15}, a proportional-integral controller is introduced to ensure the stabilization of a reference trajectory. An integral action is considered in~\cite{DSBCAN08} to ensure output rejection and its effectiveness is validated on experimental data. In~\cite{TK14}, a sliding mode control approach is used to reject a boundary time-varying input disturbance.
The main contribution of this paper is to solve the problem of output disturbance rejection for a $2 \times 2$ first-order hyperbolic system with collocated boundary input and output.
Besides, the class of disturbances considered in this paper, namely bounded signals, is more general than the one proposed in~\cite{D16,D17} in which the disturbance signal is generated by an exosystem of finite dimension, or than the smooth disturbances considered in~\cite{LBL15,LDM16}.
Our approach is the following. Similarly to~\cite{LDM16}, the original system is mapped to a simple target system where an integral term is added. The disturbances are incorporated into the target system. To state that the resulting target system is ISS with respect to perturbations and noise, we show that the output satisfies a Neutral Differential Equation (NDE). Using existing results on such systems, the ISS property is finally obtained.
The paper is organized as follows. The original disturbed system and the notations are introduced in Section~\ref{sec:pb_desciption}. In Section~\ref{sec:output-regulation}, we present the stabilization result: using a backstepping transformation, the original system is mapped to a target system for which the in-domain couplings are removed. The control law is then designed. The resulting closed-loop system can be rewritten as a neutral delay-equation which is proved to be ISS with respect to the noise and the disturbances. To envision practical application, an observer-controller is introduced in Section~\ref{sec:boundary-observer}. In Section~\ref{sec:feedb-outp-regul} we prove that the resulting output feedback control law still stabilizes the output. Besides, it is shown that static disturbances are completely rejected. This result has already been proved in~\cite{LDM16} in the case of an uncorrupted measurement and for anti-collocated input and output.
\section{Problem Description}
\label{sec:pb_desciption}
We consider the following system
\begin{align}
\label{eq:perturbed_system_1} u_t(t,x) + \lambda(x)
u_x(t,x) & = \gamma_1(x) v(t,x) + d_1(t)m_1(x) \\
\label{eq:perturbed_system_2}
v_t(t,x) -\mu(x) v_x(t,x) & = \gamma_2(x) u(t,x) + d_2(t)m_2(x) \, ,
\end{align}
under the boundary conditions
\begin{align}
\label{eq:perturbed_system_3} u(t,0) & = q v(t,0)
+ d_3(t) \\
\label{eq:perturbed_system_4}
v(t,1) & = \rho u(t,1) + U(t) + d_4(t) \, ,
\end{align}
where~$t \in \left[0,+\infty\right)$ is the time variable,~$x \in
\left[0,1\right]$ is the space variable,~$q\neq0$ is a constant parameter, and~$U$ is the control
input. The initial conditions~$u^0(x)=u(0,x)$ and~$v^0(x)=v(0,x)$ are assumed to be bounded and therefore in~$L^\infty((0,1);\mathbb{R})$. We make the following assumption on the velocities~$\lambda$ and~$\mu$ and on the in-domain-coupling terms~$\gamma_1$ and~$\gamma_2$.
\begin{assum}
\label{assum:function_regularity}
The functions~$\lambda$,~$\mu:[0,1]\rightarrow \mathbb{R}$ are Lipschitz-continuous and satisfy~$\lambda(x)$,~$\mu(x)>0$, for all~${x\in[0,1]}$. The functions~$\gamma_1$,~$\gamma_2$ belong to~$C^1([0,1];\mathbb{R})$. The product of the distal reflection $q$ with the proximal reflection $\rho$ is assumed to be strictly lower than one to ensure delay-robustness \cite{A17}.
\end{assum}
The functions~$d_1$ and $d_2$ correspond to disturbances acting on the
right-hand side of~\eqref{eq:perturbed_system_1} and
\eqref{eq:perturbed_system_2}. The locations of these distributed
disturbances are given by the unknown functions~$m_1$ and~$m_2$. The
functions~$d_3$ and~$d_4$ correspond to disturbances acting on the
right-hand side of~(\ref{eq:perturbed_system_3}) and (\ref{eq:perturbed_system_4}), respectively.
Moreover, we assume that the measured output is also subject to an unknown noise~$n(t)$
\begin{equation}
y_m(t) = u(t,1) + n(t) \, .
\end{equation}
The aim of this paper is to regulate the output
\begin{equation}
y(t) = u(t,1) \, .
\end{equation}
Let state the following assumption on the disturbances.
\begin{assum}
\label{assum:disturbance_regularity}
The disturbances~$d_i$,~$i=1,\dots,4$, are in~$W^{2,\infty}\left((0,\infty);\mathbb{R}\right)$, the noise~$n$ is assumed to be in~$L^\infty((0,\infty);\mathbb{R})$, and the disturbance input locations~$m_1$ and~$m_2$ are in~$C\left([0,1];\mathbb{R}^+\right)$.
\end{assum}
With the two former assumptions, using the characteristics method and classical fixed point arguments we have the following result (see e.g.~\cite{B00}).
\begin{theorem}
\label{theo:wellposedness}
Under Assumptions~\ref{assum:function_regularity} and~\ref{assum:disturbance_regularity} system~\eqref{eq:perturbed_system_1}--\eqref{eq:perturbed_system_4} admits an unique solution in ~$C\left(\left[0,\infty\right);L^\infty\left((0,1);\mathbb{R}^2\right) \cap L^1\left((0,1);\mathbb{R}^2\right)\right)$.
\end{theorem}
We denote by~$E'$ the set of bounded functions~$y : [0,1] \rightarrow \mathbb{R}^2$. Therefore,~$E'$ belongs to~$L^\infty((0,1);\mathbb{R}^2)$ and let $E:=E'\times \mathbb{R}$. The notation~$\left\lVert y \right\rVert_{E'}$ refers to~$\left\lVert y \right\rVert_{L^\infty((0,1);\mathbb{R}^2)}$ and for $z=\left(z_1,z_2,z_3\right)\in E' \times \mathbb{R}$, $\left\lVert z \right\rVert_E = \left\lVert \left(z_1,z_2\right) \right\rVert_{E'} + \left|z_3\right|$.
\section{Output Regulation}
\label{sec:output-regulation}
To achieve output regulation we choose to design a controller combining a backstepping controller $U_{BS}$ and an integrator term $k_I \eta$, namely
\begin{align}
\label{eq:Controller}
U(t) & = U_{BS}(t) + k_I \eta(t) \\
\label{eq:eta_dot}
\dot{\eta} (t) & = y_m(t) \, .
\end{align}
In what follows, we design~$U_{BS}$ and~$k_I$ to perform output
regulation. We make the assumption of full-state measurement. In the next section, using a backstepping transformation, we map the original system~\mbox{\eqref{eq:perturbed_system_1}--\eqref{eq:perturbed_system_4}} to a simple target system from which the in-domain couplings have been removed.
\subsection{Backstepping Transformation and Target System}
Let us consider the backstepping transformation $\Gamma_1[(u,v)(t)](\cdot) =
\alpha(t,\cdot)$ and ${\Gamma_2[(u,v)(t)](\cdot) = \beta(t,\cdot)}$ defined by
\begin{align}
\alpha(t,x) & = u(t,x) - \int_0^x K^{uu}(x,\xi)u(t,\xi) d\xi \nonumber \\
& \hphantom{=} - \int_0^xK^{uv}(x,\xi)v(t,\xi)d\xi \label{transfo_11}\\
\beta(t,x) & = v(t,x) - \int_0^x K^{vu}(x,\xi)u(t,\xi)d\xi \nonumber \\
& \hphantom{=} -\int_0^xK^{vv}(x,\xi)v(t,\xi)d\xi \, , \label{transfo_12}
\end{align}
where the kernels~$K^{uu}, K^{uv}, K^{vu}$, and~$K^{vv}$ are defined in~\cite{CVKB13} in~$L^\infty(\mathcal{T})$, where~$\mathcal{T}=\{(x,\xi)\in [0,1]^2 |\quad \xi \leq x\}$.
We recall the following lemma
\begin{lemma}[~\cite{CVKB13}]
The transformation~\eqref{transfo_11}--\eqref{transfo_12} is invertible and the inverse transformation can be expressed as follow
\begin{align}
u(t,x)& = \alpha(t,x) + \int^x_0 L^{\alpha\alpha}(x,\xi)\alpha(t,\xi)d\xi \nonumber \\
& \hphantom{=} + \int^x_0L^{\alpha\beta}(x,\xi)\beta(t,\xi)d\xi\label{back_inv1} \\
v(t,x)& =\beta(t,x) + \int^x_0 L^{\beta\alpha}(x,\xi)\alpha(t,\xi)d\xi \nonumber \\
& \hphantom{=} + \int^x_0 L^{\beta\beta}(x,\xi)\beta(t,\xi)d\xi \, , \label{back_inv2}
\end{align}
where~$L^{\alpha\alpha}$,~$L^{\alpha\beta}$,~$L^{\beta\alpha}$, and~$L^{\beta\beta}$ belong to~$L^{\infty}(\mathcal{T})$.
\end{lemma}
The transformation \eqref{transfo_11}-\eqref{transfo_12} maps the original system~\mbox{\eqref{eq:perturbed_system_1}--\eqref{eq:perturbed_system_4}} to the following target system
\begin{align}
& \alpha_t + \lambda(x)\alpha_x = \mathcal{D}_1(t)M_1(x) \label{target_1} \\
& \beta_t - \mu(x)\beta_x = \mathcal{D}_2(t)M_2(x) \, ,
\end{align}
with the boundary conditions
\begin{align}
\alpha(t,0) & = q \beta(t,0) + d_3(t) \\
\beta(t,1) & = \rho\int^1\left(L^{\alpha\alpha}(1,\xi)\alpha(t,\xi)
+ L^{\alpha\beta}(1,\xi)\beta(t,\xi)\right)d\xi \nonumber \\
& \hphantom{=} - \int_0^1 \left(L^{\beta\alpha}(1,\xi)\alpha(t,\xi)+L^{\beta\beta}(1,\xi)\beta(t,\xi)\right)d\xi \nonumber \\
& \hphantom{=} +\rho \alpha(t,1) + U(t)+ k_I \eta(t) + d_4(t) \, , \label{target_BC}
\end{align}
where
\begin{align}
\dot{\eta}(t)& = \alpha(t,1)+n(t) \nonumber \\
& \hphantom{=} + \int^1_0(L^{\alpha\alpha}(1,\xi)\alpha(t,\xi)+L^{\alpha\beta}(1,\xi)\beta(t,\xi))d\xi \, , \label{eq_eta}
\end{align}
with
\begin{align}
\mathcal{D}_1(t)M_1(x) & = d_1(t)m_1(x)-K^{uu}(x,0)\lambda(0)d_3(t) \nonumber \\
& \hphantom{=} -\int_0^x K^{uu}(x,\xi)d_1(t)m_1(\xi)d\xi \nonumber \\
& \hphantom{=} - \int_0^x K^{uv}(x,\xi)d_2(t)m_2(\xi) d\xi
\end{align}
\begin{align}
\mathcal{D}_2(t)M_2(x)& = d_2(t)m_2(x)-K^{vu}(x,0)\lambda(0)d_3(t) \nonumber \\
& \hphantom{=} -\int_0^x K^{vu}(x,\xi)d_1(t)m_1(\xi)d\xi \nonumber \\
& \hphantom{=} -\int_0^x K^{vv}(x,\xi)d_2(t)m_2(\xi)d\xi \, .
\end{align}
Note that if~$\dot{\eta}$ converges to zero and if~$n(t)=0$, then~$u(t,1)$ converges to 0, due to \eqref{back_inv1}. Unconventionally, we define the control law $U_{BS}$ in terms of the variables of the target system~$\alpha$ and $\beta$ as
\begin{align}
U_{BS}(t)& = -\tilde{\rho} \alpha(t,1) \nonumber \\
& \hphantom{=}-\rho \int^1_0\left(L^{\alpha\alpha}(1,\xi)\alpha(t,\xi)+L^{\alpha\beta}(1,\xi)\beta(t,\xi)\right)d\xi \nonumber \\
& \hphantom{=} + \int_0^1 \left(L^{\beta\alpha}(1,\xi)\alpha(t,\xi)+L^{\beta\beta}(1,\xi)\beta(t,\xi)\right)d\xi \nonumber \\
&\hphantom{=} -k_I\int_0^1 \left(l_1(\xi) \alpha(t,\xi) + l_2(\xi) \beta(t,\xi)\right)d\xi, \label{eq:U_2}
\end{align}
where the tuning parameter $\tilde{\rho}$ satisfies
\begin{align}
|\rho q|+|\tilde{\rho} q|<1,
\end{align}
which is well defined since $\rho q<1$. The functions~$l_1$ and~$l_2$ on the interval~$[0,1]$ are defined as the solution of the system
\begin{align}
& (l_1(x)\lambda(x))'=L^{\alpha\alpha}(1,x) \label{eq_l1}\\
& (l_2(x)\mu(x))'=-L^{\alpha\beta}(1,x) \, \label{eq_l2},
\end{align}
with the boundary conditions
\begin{align}
l_2(1)=0 \, , \qquad l_1(0)=\frac{\mu(0)}{q\lambda(0)}l_2(0). \label{bound_l}
\end{align}
This control law is composed of two parts that have two distinct effects. The first one (made of the three first lines) corresponds to the control law derived in~\cite{A17}. It would stabilize the original system in the absence of disturbances and of the integral term $k_i\eta(t)$. Note that the purpose of the term $-\tilde{\rho}\alpha(t,1)$ is to avoid a complete cancellation of the proximal reflexion and thus to guarantee some delay-robustness~\cite{A17}. The second term of the control law (made of the last line of~\eqref{eq:U_2}) is related to the integral action. In order to ensure the existence of a solution to~\eqref{eq_l1}-\eqref{bound_l}, we make the following assumption
\begin{assum}
\label{assum:condition_neq_L}
\begin{align}
1+\int_0^1 L^{\alpha \alpha}(1,\xi)d\xi +\frac{1}{q}\int_0^1 L^{\alpha \beta}(1,\xi)d\xi \ne 0\label{funda_eq} \, .
\end{align}
\end{assum}
Unfortunately, this assumption has no physical interpretation.
Using equation~\eqref{back_inv1}-\eqref{back_inv2}, one can write the control law~\eqref{eq:U_2} in terms of the original variables $u$ and $v$.
In the next sections, we prove that this control law ensures output regulation. We first investigate a pseudo-steady state of the closed loop system.
\subsection{Pseudo-steady state}
In this section, we consider a pseudo-steady state of the target system~\eqref{target_1}--\eqref{eq_eta} in presence of the control law~\eqref{eq:U_2}, that corresponds to~$u^{ss}(t,1)=\alpha(t,1)+\int_0^1 L^{\alpha \alpha}(1,\xi) \alpha^{ss}(t,\xi)d\xi +\int_0^1 L^{\alpha \beta}(1,\xi) \beta^{ss}(t,\xi) d\xi=0$. We then derive the error system, i.e the difference between the real state and this pseudo-steady state. This pseudo steady-state is defined by
\begin{align}
\frac{d}{dx}\begin{pmatrix} \alpha^{ss}(t,x) \\ \beta^{ss}(t,x) \end{pmatrix}= \begin{pmatrix} \frac{\mathcal{D}_1(t)M_1(x)}{\lambda(x)} \\ -\frac{\mathcal{D}_2(t)M_2(x)}{\mu(x)} \end{pmatrix} \label{steady_eq}
\end{align}
along with the initial conditions
\begin{align}
\beta^{ss}(t,0) & =\frac{1}{q}(\alpha^{ss}(t,0)-d_3(t)) \label{steady_boun1}\\
\alpha^{ss}(t,1) & =-\int_0^1 L^{\alpha \alpha}(1,\xi) \alpha^{ss}(t,\xi)d\xi \nonumber \\
& \hphantom{=} -\int_0^1 L^{\alpha \beta}(1,\xi) \beta^{ss}(t,\xi) d\xi \, .\label{steady_boun2}
\end{align}
We have the following lemma regarding the existence of a solution to
the ODE~\eqref{steady_eq},~\eqref{steady_boun1}, and~\eqref{steady_boun2}.
\begin{lemma}
\label{lem:existence_ss}
If equation~\eqref{funda_eq} holds, the ordinary differential
equation~\eqref{steady_eq} with boundary
conditions~\eqref{steady_boun1} and~\eqref{steady_boun2} has a unique
solution. Moreover, for every $x \in [0,1]$ one has
$\alpha^{ss}(\cdot,x)$ and $\beta^{ss}(\cdot,x)$ in~$W^{2,\infty}\left((0,\infty);\mathbb{R}\right)$.
\end{lemma}
\begin{proof}
Let us define the matrix~$A_1$ by
\begin{align}
A_1=\begin{pmatrix}
1+\int_0^1 L^{\alpha \alpha}(1,\xi)d\xi & \int_0^1 L^{\alpha \beta}(1,\xi)d\xi \\ -\frac{1}{q} & 1
\end{pmatrix}.
\end{align}
Due to~\eqref{funda_eq}, this matrix is invertible. We then define~${a = \begin{pmatrix}
a_1 & a_2
\end{pmatrix}^\top}$ by $a = A_1^{-1} b$
with~$b = \begin{pmatrix}b_1 & b_2\end{pmatrix}^\top$ where
\begin{align}
b_1 & = \int_0^1 L^{\alpha \alpha}(1,\xi) \int_\xi^1 \frac{\mathcal{D}_1(t)M_1(\nu)}{\lambda(\nu)}d\nu d\xi \nonumber \\
& \hphantom{=} + \int_0^1L^{\beta \alpha}(1,\xi) \int_0^\xi \frac{\mathcal{D}_2(t)M_2(\nu)}{\mu(\nu)}d\nu d\xi \\
b_2 & = -\frac{d_3(t)}{q}-
\int_0^1\frac{\mathcal{D}_1(t)M_1(\xi)}{q\lambda(\xi)}d\xi \, .
\end{align}
One can thencheck that the function
\begin{align}
\begin{pmatrix}\alpha^{ss}(t,x) \\ \beta^{ss}(t,x) \end{pmatrix}=\begin{pmatrix}
a_1-\int_x^1 \frac{\mathcal{D}_1(t)M_1(\xi)}{\lambda(\xi)}d\xi \\ a_2 -\int_0^x \frac{\mathcal{D}_2(t)M_2(\xi)}{\mu(\xi)}d\xi
\end{pmatrix},
\end{align}
is solution of~\eqref{steady_eq} with the boundary
conditions~\eqref{steady_boun1} and \eqref{steady_boun2}. This concludes the proof of Lemma~\ref{lem:existence_ss}.
\end{proof}
Let us state
\begin{align}
\label{eq:eta_ss}
\eta^{ss}(t)& = \frac{\beta^{ss}(t,1)-(\rho-\tilde{\rho})\alpha^{ss}(t,1)-d_4(t)}{k_I} \nonumber \\
& \hphantom{=} + \int_0^1 \left(l_1(\xi) \alpha^{ss}(t,\xi) + l_2(\xi) \beta^{ss}(t,\xi)\right)d\xi \, .
\end{align}
By defining the error variables $\bar{\alpha} =\alpha -\alpha^{ss}$, $\bar{\beta}=\beta -\beta^{ss}$, and $\bar{\eta}=\eta -\eta^{ss}$, one gets the following system
\begin{align}
\bar{\alpha}_t + \lambda(x) \bar{\alpha}_x=-\alpha_t^{ss} \label{eq_alpha_bar} \\
\bar{\beta}_t -\mu(x) \bar{\beta}_x=-\beta_t^{ss} \, , \label{eq_beta_bar}
\end{align}
with the boundary conditions
\begin{align}
\bar{\alpha}(t,0)&=q\bar{\beta}(t,0) \label{bound_alpha_bar}\\
\bar{\beta}(t,1) &=\left(\rho-\tilde{\rho}\right)\bar{\alpha}(t,1)+k_I\bar{\eta}(t)\nonumber \\
& \hphantom{=} -k_I\int_0^1 \left(l_1(\xi) \bar{\alpha}(t,\xi)+l_2(\xi) \bar{\beta}(t,\xi)\right)d\xi \, .
\end{align}
Noticing that~$\alpha^{ss}(t,1)=-\int_0^1 L^{\alpha\alpha}(1,\xi)\alpha^{ss}(t,\xi)d\xi-\int_0^xL^{\alpha\beta}(1,\xi)\beta^{ss}(t,\xi)d\xi$, we also have that
\begin{align}
\dot{\bar{\eta}}(t) & =\int^1_0 \left(L^{\alpha\alpha}(1,\xi)\bar{\alpha}(t,\xi)+L^{\alpha\beta}(1,\xi)\bar{\beta}(t,\xi)\right)d\xi \nonumber \\
& \hphantom{=}+ \bar{\alpha}(t,1)+n(t) -\dot{\eta}^{ss}(t) \, \label{eta_bar_eq}.
\end{align}
\subsection{Stability Analysis}
In this section, we analyze the stability properties of system~\eqref{eq_alpha_bar}--\eqref{eta_bar_eq}. More precisely, we derive conditions on $k_I$ that ensure the Input-to-State Stability of system~\eqref{eq_alpha_bar}--\eqref{eta_bar_eq}. The proof will be done in three steps. First, using a simple transformation, we rewrite the system~\eqref{eq_alpha_bar}--\eqref{eta_bar_eq} as a neutral-delay equation (NDE). We then recall some conditions that guarantee the stability of this NDE in the absence of disturbances. Finally, we prove that these conditions imply the Input-to-State Stability.
Let us consider the inversible transformation
\begin{align}
\gamma(t)=\bar{\eta}(t)-\int_0^1 \left( l_1(\xi) \bar{\alpha}(t,\xi)+l_2(\xi) \bar{\beta}(t,\xi)\right) d\xi.
\end{align}
System~\eqref{eq_alpha_bar}--\eqref{eta_bar_eq} rewrites
\begin{align}
&\bar{\beta}(t,1)=(\rho-\tilde{\rho})\bar{\alpha}(t,1)+k_I\gamma(t) \label{bound_beta_bar}\\
&\dot{\gamma}(t)=\left(1+l_1(1)\lambda(1)\right) \bar{\alpha}(t,1) +n(t) -\dot{\eta}^{ss}(t). \label{bound_gamma}
\end{align}
Using~\eqref{eq_l1} and~\eqref{bound_l}, we have
\begin{align}
1+l_1(1)\lambda(1)
&=1+l_2(0)\frac{\mu(0)}{q}+\int_0^1 L^{\alpha \alpha}(1,\xi) d\xi \nonumber \\
&=1+\frac{1}{q}\int_0^1 L^{\alpha \beta}(1,\xi) d\xi \nonumber \\
& \hphantom{=} +\int_0^1 L^{\alpha \alpha}(1,\xi) d\xi.
\end{align}
Thus, due to Assumption~\ref{assum:condition_neq_L},~$1+l_1(1) \lambda(1) \ne 0$.
In the sequel we denote by~${\rm \bf p}hi_1(x)$ and~${\rm \bf p}hi_2(x)$ the following functions
\begin{equation}
{\rm \bf p}hi_1(x) = \int_0^x \frac{1}{\lambda(\xi)}d\xi \, , \quad {\rm \bf p}hi_2(x) = \int_0^x \frac{1}{\mu(\xi)}d\xi \, ,
\end{equation}
and by~$\tau_1$,~$\tau_2$, and~$\tau$ the following transport times
\begin{align}
\tau_1 = {\rm \bf p}hi_1(1),~\tau_2 = {\rm \bf p}hi_2(1),~\tau = \tau_1 + \tau_2 \, . \label{eq_tau}
\end{align}
Using the characteristics method, it is straightforward to show that for all~$t \geq \tau$,
\begin{align}
\overline{\alpha}(t,1) & = \overline{\alpha}\left(t -\tau_1,0\right) \nonumber \\
& \hphantom{=} - \int_0^1 \frac{1}{\lambda(\xi)} \alpha_t^{ss} \left(\xi,t - \int_\xi^1 \frac{1}{\lambda({\rm \bf z}eta)}d{\rm \bf z}eta\right) d\xi \label{eq:alpha_bar_1} \\
\overline{\beta}(t,0) & = \overline{\beta}\left(t- \tau_2,1\right) - \int_0^1 \frac{\beta_t^{ss}\left(\xi,t - {\rm \bf p}hi_2(\xi)\right)}{\mu(\xi)} d\xi \, . \label{eq:beta_bar_0}
\end{align}
Combining these expression with the boundary conditions~\eqref{bound_alpha_bar} and~\eqref{bound_beta_bar}, we get for all~$t\geq \tau$,
\begin{align}
\overline{\alpha}(t,1) & = q \overline{\beta}\left(t-\tau,1\right) \nonumber \\
& \hphantom{=} - q \int_0^1 \frac{1}{\mu(\xi)} \beta_t^{ss}\left(\xi, t - \tau_1 -{\rm \bf p}hi_2(\xi)\right)d\xi \nonumber \\
& \hphantom{=} - \int_0^1
\frac{1}{\lambda(\xi)} \alpha_t^{ss}
\left(\xi,t - \int_\xi^1
\frac{1}{\lambda({\rm \bf z}eta)}d{\rm \bf z}eta\right) d\xi \label{eq:alpha_1_equality}
\end{align}
Using again boundary condition~\eqref{bound_beta_bar},
relationship~\eqref{eq:alpha_1_equality} becomes
\begin{align}
\overline{\alpha}(t,1) & = (\rho -\tilde{\rho})q\overline{\alpha}(t-\tau,1)+k_Iq\gamma(t-\tau) \nonumber \\
& \hphantom{=} - q \int_0^1 \frac{1}{\mu(\xi)} \beta_t^{ss}\left(\xi, t - \tau_1 - {\rm \bf p}hi_2(\xi)\right)d\xi \nonumber \\
& \hphantom{=} - \int_0^1
\frac{1}{\lambda(\xi)} \alpha_t^{ss}
\left(\xi,t - \int_\xi^1
\frac{1}{\lambda({\rm \bf z}eta)}d{\rm \bf z}eta\right) d\xi
\, . \label{eq:alpha_bar_1_2}
\end{align}
By differentiating~\eqref{eq:alpha_bar_1_2} with respect to time, one has
\begin{align}
\label{eq:NDE_perturbed}
\dot{\overline{\alpha}}(t,1) & = \left(\rho - \tilde{\rho}\right)
q\dot{\overline{\alpha}}(t-\tau,1)
+k_Iq\left(1+l_1(1)\lambda(1)\right) \nonumber \\
& \hphantom{=} \times \overline{\alpha}(t-\tau,1) +
K(t) \, ,
\end{align}
where
\begin{align}
K(t) & = k_Iq(n(t-\tau) - \dot{\eta}^{ss}(t-\tau)) \nonumber \\
& \hphantom{=} - q
\int_0^1 \frac{1}{\mu(\xi)} \beta_{tt}^{ss}\left(\xi, t -
\tau_1 - {\rm \bf p}hi_2(\xi)\right)d\xi \nonumber \\
& \hphantom{=} - \int_0^1
\frac{1}{\lambda(\xi)} \alpha_{tt}^{ss}
\left(\xi,t - \int_\xi^1
\frac{1}{\lambda({\rm \bf z}eta)}d{\rm \bf z}eta\right) d\xi
\, .
\end{align}
Let us denote $k_1 = \left(\rho-\tilde{\rho}\right)q$ and $k_2 = k_Iq\left(1+l_1(1)\lambda(1)\right)$. The characteristic equation of~(\ref{eq:NDE_perturbed}) is given by
\begin{equation}
\label{eq:char-eq}
s - \left(k_1s +k_2\right)e^{-s \tau} = 0 \, .
\end{equation}
We recall the following theorem that gives conditions to ensure the stability of~$\eqref{eq:NDE_perturbed}$ in the absence of disturbances.
\begin{theorem}\cite{CT15}
\label{theo:coron_tamasoiu}
Let us assume that $k_2 \neq 0$. The characteristic equation~\eqref{eq:char-eq} has its zeroes in the complex half-left plane if and only if the feedback parameters $k_1$ and $k_2$ satisfy $\left|k_1\right|<1$, $k_2 < 0$ and the time delay $\tau$ is such that $\tau \in \left(0,\tau_0\right)$ where $\tau_0$ is defined by
\begin{align}
\tau_0 & =
-\frac{\sqrt{1-k_1^2}}{\left|k_2\right|}\arctan\left(\frac{\sqrt{1-k_1^2}}{\left|k_1\right|}\right) \nonumber \\
& \hphantom{=} + \frac{{\rm \bf p}i\sqrt{1-k_1^2}}{\left|k_2\right|} \, ,\quad \text{ if } k_1 \in (-1,0) \\
\tau_0 & =\frac{{\rm \bf p}i}{2\left|k_2\right|}\, , \quad \text{ if } k_1 = 0 \, , \\
\tau_0 & = \frac{\sqrt{1-k_1^2}}{\left|k_2\right|} \arctan\left(\frac{\sqrt{1-k_1^2}}{k_2}\right)\, , \, \text{if } k_1 \in (0,1) \, .
\end{align}
\end{theorem}
We recall the definition of Input-to-State Stability (ISS).
\begin{definition}
The system described by the equations~\eqref{eq:NDE_perturbed} is said to be
Input-to-State Stable (ISS) if there exist a~$\mathcal{K}L$ function~$f$
and a~$\mathcal{K}$ function~$g$ such that, for any bounded initial
state~$\left(\overline{\alpha}^0,\overline{\beta}^0\right)^\top$ and any measurable locally essentially bounded
input~$K$, the solution exists for all~$t\geq 0$, and furthermore it
satisfies
\begin{align}
\left|
\overline{\alpha}(t,1)
\right| & \leq
f\left(\left\lVert\overline{\alpha}_0\right\rVert_\infty+\left\lVert
\overline{\beta}_0\right\rVert_\infty,t\right) \nonumber \\
& \hphantom{\leq} + g\left(\left\lVert
K\right\rVert_{L^\infty((0,t);\mathbb{R})}\right) \, .
\end{align}
\end{definition}
Using this result and the fact that $\left(\rho-\tilde{\rho}\right)q<1$ we may state the following Proposition assessing the ISS of system~\eqref{eq:NDE_perturbed}.
\begin{proposition}
\label{prop:ISS_NDE}
Let us choose $k_I$ such that conditions of Theorem~\ref{theo:coron_tamasoiu} for $k_1 = \left(\rho-\tilde{\rho}\right)q$ and $k_2= k_Iq\left(1+l_1(1)\lambda(1)\right)$ hold, then system~\eqref{eq:NDE_perturbed} is ISS with
respect to the input~$K$.
\end{proposition}
\begin{proof}
Let us denote $z(t) = \overline{\alpha}(t,1)$. The variation-of-constants formula for the NDE~\eqref{eq:NDE_perturbed} reads (see~\cite{HL13} page~31)
\begin{align}
z\left(\left(\overline{\alpha}^0,\overline{\beta}^0\right),K\right)(t) & = z\left(\left(\overline{\alpha^0},\overline{\beta}^0\right),0\right)(t) \nonumber \\
& \hphantom{=} + \int_0^t X(t-s)K(s)ds \, , \label{eq:representation_formula}
\end{align}
where $z\left(\left(\overline{\alpha}^0,\overline{\beta}^0\right),0\right)(t)$ denotes the solution of the homogeneous NDE~\eqref{eq:NDE_perturbed} (i.e. when $K \equiv 0$) in term of the fundamental solution $X$ (see~\cite{HL13} for a definition of the fundamental solution). Theorem~7.6 page 32 in~\cite{HL13} guarantees that if $s_0$ is the supremum of the real part of the roots of the characteristic equation~\eqref{eq:char-eq} then for any $s > s_0$ there exists $k = k\left(s\right)$ such that the fundamental solution $X$ satisfies the inequality
\begin{equation}
\label{eq:inequality_X}
\left\lVert X(t) \right\Vert \leq ke^{s t} \, , \quad t \geq 0 \, .
\end{equation}
Conditions of Theorem~\ref{theo:coron_tamasoiu} ensure that $s_0 <
0$ and consequently that there exists $s<0$ and $k$ such
that inequality~\eqref{eq:inequality_X} holds. Then, using this bound together with the representation formula~\eqref{eq:representation_formula} we immediately conclude the proof of Proposition~\ref{prop:ISS_NDE}.
\end{proof}
\subsection{Output Regulation}
\label{sec:output-regulation-1}
The following theorem assesses the output regulation of
system~(\ref{eq:perturbed_system_1})--(\ref{eq:perturbed_system_4}),~(\ref{eq:Controller}),~\eqref{eq:eta_dot},
and~\eqref{eq:U_2}.
\begin{theorem}
\label{theo:rejection_1}
Consider system~(\ref{eq:perturbed_system_1}),~(\ref{eq:perturbed_system_2}) with
boundary
conditions~(\ref{eq:perturbed_system_3}),~(\ref{eq:perturbed_system_4})
where $U$ is given by~\eqref{eq:Controller} with $U_{BS}$ given by~\eqref{eq:U_2}, $\eta$
satisfying~\eqref{eq:eta_dot}, and with bounded initial conditions
$\left(u^0,v^0,\eta^0\right) \in E$. Then, assuming that conditions of Proposition~\ref{prop:ISS_NDE} hold, there exists a positive constant $M$ such
that the controlled output $y(t)$ satisfies
\begin{equation}
\label{eq:boundedness_y}
\left| y(t)\right| \leq M \, .
\end{equation}
Furthermore, if ${\rm \bf p}artial_t d_1 = {\rm \bf p}artial_t d_2 = \dot{d}_3 =
\dot{d}_4 = n = 0$, then the controlled output satisfies
\begin{align}
\label{eq:rejection_1}
\lim_{t\rightarrow \infty} \left| y(t)\right| = 0\, .
\end{align}
\end{theorem}
\begin{proof}
Let us recall that one has
\begin{align}
\lim_{t\rightarrow \infty} |u(t,1)|& = \lim_{t\rightarrow \infty} \left|\alpha(t,1)+\int_0^1L^{\alpha \alpha}(1,\xi) \alpha(t,\xi)d\xi \right. \nonumber \\
& \hphantom{=} \left.+ \int_0^1 L^{\alpha \beta}(1,\xi) \beta(t,\xi) d\xi\right| \nonumber \\
& =\lim_{t\rightarrow \infty}
\left|\bar{\alpha}(t,1)+\int_0^1L^{\alpha
\alpha}(1,\xi) \bar{\alpha}(t,\xi) d\xi \right.\nonumber \\
& \hphantom{=} \left. + \int_0^1L^{\alpha\beta}(1,\xi) \bar{\beta}(t,\xi)
d\xi\right| \, . \label{eq:lim_u1}
\end{align}
Now let us observe that for all $t \geq \tau$ and all $\theta \in [0,x]$,
\begin{align}
\label{eq:alpha_bar_relation_proof_theo_1} \overline{\alpha}(t,x) & = \overline{\alpha}\left(t-\int_\theta^x
\frac{1}{\lambda({\rm \bf z}eta)}d{\rm \bf z}eta,\theta\right) \nonumber \\
& \hphantom{=} - \int_\theta^x
\frac{1}{\lambda({\rm \bf z}eta)}\alpha_t^{ss} \left(t-\int_{\rm \bf z}eta^x
\frac{1}{\lambda(s)}ds,s\right)d{\rm \bf z}eta \, ,
\\
\overline{\beta}(t,x)
& =
\frac{1}{q}\overline{\alpha}\left(t+{\rm \bf p}hi_2(x),0\right) \nonumber \\
& \hphantom{=} + \int_0^x
\frac{1}{\mu(\xi)}\beta^{ss}_t\left(t+\int_\xi^x
\frac{1}{\mu({\rm \bf z}eta)}d{\rm \bf z}eta,\xi\right)d\xi
\, . \label{eq:beta_bar_relation_proof_theo_1}
\end{align}
Besides, Lemma~\ref{lem:existence_ss} ensures that
$\alpha_t^{ss}$ and $\beta_t^{ss}$ are bounded. Therefore, relationships~\eqref{eq:alpha_bar_relation_proof_theo_1}
and~\eqref{eq:beta_bar_relation_proof_theo_1} combined with the ISS
of $\overline{\alpha}(t,1)$ as proved in
Proposition~\ref{prop:ISS_NDE} ensure that
$\overline{\alpha}(t,x)$ and $\overline{\beta}(t,x)$ are bounded for
all $x \in [0,1]$. Then, with~\eqref{eq:lim_u1} one
gets~\eqref{eq:boundedness_y}. Now, if ${\rm \bf p}artial_t d_1 = {\rm \bf p}artial_t d_2 = \dot{d}_3 =
\dot{d}_4 = n = 0$, then $\alpha_t^{ss} = \beta_t^{ss} =
\dot{\eta}^{ss} = 0$ where $\alpha^{ss}$ and $\beta^{ss}$ are
solutions to the ODE given in~\eqref{steady_eq}, and
$\eta^{ss}$ is given in~\eqref{eq:eta_ss}. In virtue of the ISS of system~\eqref{eq:NDE_perturbed} stated in Proposition~\ref{prop:ISS_NDE} and using the relationships~\eqref{eq:alpha_bar_relation_proof_theo_1}
and~\eqref{eq:beta_bar_relation_proof_theo_1} one has
\begin{align}
\lim_{t\rightarrow \infty} |u(t,1)|
& = \left|\alpha^{ss}(1)+\int_0^1L^{\alpha \alpha}(1,\xi) \alpha^{ss}(\xi)d\xi \right. \nonumber \\
& \hphantom{=} \left.+ \int_0^1 L^{\alpha \beta}(1,\xi) \beta^{ss}(\xi) d\xi\right| = 0 \, .
\end{align}
This concludes the proof of Theorem~\ref{theo:rejection_1}.
\end{proof}
\section{Boundary Observer}
\label{sec:boundary-observer}
In this section we design an observer that relies on the noisy measurements at the right boundary:~${y_m(t)=u(t,1)+n(t)}$. This observer will be designed as a function of a parameter~$\epsilon$ that can be interpreted as a measure of trust in our measurements relative to the model (or unmeasured disturbances).
\subsection{Observer Design}
Similarly to \cite{VKC11}, the observer equations are set as follows
\begin{align}
\hat{u}_t+\lambda(x)\hat{u}_x=&\gamma_1(x)\hat{v}
-P^+(x)\left(\hat{u}(t,1)-y_m(t)\right) \label{hat_u} \\
\hat{v}_t-\mu(x) \hat{v}_x=&\gamma_2(x)\hat{u}
-P^-(x)\left(\hat{u}(t,1)-y_m(t)\right)
\, , \label{hat_v}
\end{align}
with the modified boundary conditions
\begin{align}
&\hat{u}(t,0)=q\hat{v}(t,0) \label{hat_boundary1}\\
&\hat{v}(t,1)=\rho (1-\epsilon) \hat{u}(t,1) +\rho \epsilon y_m(t) +
U(t) \, . \label{hat_boundary}
\end{align}
The gains~$P^+(\cdot)$ and~$P^-(\cdot)$ are defined as
\begin{align}
P^+(x) & =-\lambda(x) P^{uu}(x,1)+\mu(x) \rho (1-\epsilon) P^{uv}(x,1) \label{eq_P_eps_1} \\
P^-(x) & =-\lambda(x) P^{vu}(x,1)+\mu(x) \rho (1-\epsilon) P^{vv}(x,1)
\, , \label{eq_P_eps_2}
\end{align}
where the kernels~$P^{uu}, P^{uv}, P^{vu}$, and~$P^{vv}$ are defined in~\cite{VKC11}.
\begin{remark}
The coefficient~$\epsilon \in [0,1]$ in~\eqref{hat_boundary} can be
interpreted as a measure of trust in our measurements relative to
the model (or unmeasured disturbances), where~$\epsilon=1$ results
in relying more on the measurements and~$\epsilon=0$ relying more on
the model. This trade-off will be made explicit in terms of the magnitude of
~$d_i$,~$i=1,\dots,4$ relative to~$n$ in the following.
\end{remark}
\begin{remark}
The coefficient~$\epsilon$ cannot be chosen arbitrarily in~$[0,1]$. As it will appear in the next subsection, it has to be close enough to 1 to ensure the convergence of the observer.
\end{remark}
Combining the observer \eqref{hat_u}--\eqref{hat_boundary} to the system \eqref{eq:perturbed_system_1}--\eqref{eq:perturbed_system_4} yields the error system (denoting~$\tilde{u}(t,x)=u(t,x)-\hat{u}(t,x)$ and~$\tilde{v}(t,x)=v(t,x)-\hat{v}(t,x)$):
\begin{align}
\tilde{u}_t+\lambda(x) \tilde{u}_x& =\gamma_1(x)\tilde{v} -P^+(x)\tilde{u}(t,1)\nonumber \\
&\hphantom{=}
-n(t)P^+(x) +d_1(t)m_1(x) \label{tilde_u}\\
\tilde{v}_t-\mu(x) \tilde{v}_x& =\gamma_2(x)\tilde{u} -P^-(x)\tilde{u}(t,1)\nonumber \\
& \hphantom{=}
-n(t)P^-(x) +
d_2(t)m_2(x) \, , \label{tilde_v}
\end{align}
with the boundary conditions
\begin{align}
\tilde{u}(t,0)&=q\tilde{v}(t,0)+d_3(t),\label{tilde_boundary0} \\
\tilde{v}(t,1)&=\rho(1-\epsilon)\tilde{u}(t,1)+d_4(t)-\rho \epsilon
n(t) \, .\label{tilde_boundary}
\end{align}
\subsection{Ideal Error System}
In this section, we consider the unperturbed system with uncorrupted measurements; to give insight on the impact of~$\epsilon$ in the ideal case. Using the backstepping approach and a Volterra transformation identical to the one presented in~\cite{VKC11}, we can map system \eqref{tilde_u}--\eqref{tilde_boundary} to a simpler target system.
Consider the kernels~$P^{uu}, P^{uv}, P^{vu}$, and~$P^{vv}$ defined in~\cite{VKC11} and the following Volterra transformation
\begin{align}
\tilde{u}(t,x)=\tilde{\alpha}_{id}(t,x)-\int_x^1(P^{uu}(x,\xi)\tilde{\alpha}_{id}(t,\xi) \nonumber \\
+P^{uv}(x,\xi)\tilde{\beta}_{id}(t,\xi))d\xi \label{eq_M0} \\
\tilde{v}(t,x)=\tilde{\beta}_{id}(t,x)-\int_x^1(P^{vu}(x,\xi)\tilde{\alpha}_{id}(t,\xi) \nonumber \\
+P^{vv}(x,\xi)\tilde{\beta}_{id}(t,\xi))d\xi \, . \label{eq_N0}
\end{align}
Differentiating \eqref{eq_M0} and \eqref{eq_N0} with respect to space and time, one can prove that system \eqref{tilde_u}--\eqref{tilde_boundary} is equivalent to the following system
\begin{align}
&(\tilde{\alpha}_{id})_t+\lambda(x) (\tilde{\alpha}_{id})_x=0 \label{alpha_eq}\\
&(\tilde{\beta}_{id})_t-\mu(x) (\tilde{\beta}_{id})_x=0 \, ,
\end{align}
with the following boundary conditions
\begin{align}
\tilde{\alpha}_{id}(t,0)&=q\tilde{\beta}_{id}(t,0)\\
\tilde{\beta}_{id}(t,1)&=\rho(1-\epsilon)\tilde{\alpha}_{id}(t,1) \, . \label{alpha_bound}
\end{align}
We then have the following lemma (see e.g \cite{A17} for details).
\begin{lemma}
System \eqref{alpha_eq}--\eqref{alpha_bound} is exponentially stable if and only if
\begin{align}
1-\frac{1}{|\rho q|} < \epsilon\leq 1 \, . \label{epsilon}
\end{align}
\end{lemma}
\begin{remark}
In the case~$\epsilon=1$ we have the same target system as the one presented in~\cite{VKC11}. It converges in finite time~$\tau$ to zero.
\end{remark}
Note that due to Assumption~\ref{assum:function_regularity} the proposed interval is non-empty.
\subsection{Error System including Noise and Disturbance}
We consider in this section the real error-system~\mbox{\eqref{tilde_u}--\eqref{tilde_boundary}}, including the noise and
disturbances~$n$,~$d_i$,~$i=1,\dots,4$. Applying the Volterra transformations~\eqref{eq_M0} and~\eqref{eq_N0}, system~\eqref{tilde_u}--\eqref{tilde_boundary} is mapped to the following target system
\begin{align}
\tilde{\alpha}_t+\lambda(x)
\tilde{\alpha}_x & =n(t)f_1(x)+d_1(t)f_2(x) \nonumber \\
& \hphantom{=} + d_2(t)f_3(x)+d_4(t)f_4(x) \label{alpha_eq_disturbed}\\
\tilde{\beta}_t-\mu(x)
\tilde{\beta}_x & = n(t)g_1(x)+d_1(t)g_2(x) \nonumber \\
& \hphantom{=} +d_2(t)g_3(x)+d_4(t)g_4(x) \, , \label{beta_eq_disturbed}
\end{align}
with the boundary conditions
\begin{align}
\tilde{\alpha}(t,0)&=q\tilde{\beta}(t,0)+d_3(t) \label{alpha_bound_disturbed} \\
\tilde{\beta}(t,1)&=\rho(1-\epsilon)\tilde{\alpha}(t,1)+d_4(t)-\rho\epsilon
n(t) \, , \label{beta_bound_disturbed}
\end{align}
where~$f_i$,~$i=1,\dots,8$, are the solutions of the following integral equations
\begin{align}
f_1(x) & = \int_x^1\left( P^{uu}(x,\xi)f_1(\xi)+P^{uv}(x,\xi)g_1(\xi)\right)d\xi\nonumber \\
& \hphantom{=} -P^+(x) -\mu(1)\rho\epsilon
P^{uv}(x,1) \label{eq:f_1} \\
f_2(x) & = m_1(x) + \int_x^1 P^{uu}(x,\xi)f_2(\xi)d\xi \nonumber \\
& \hphantom{=} + \int_x^1P^{uv}(x,\xi)g_2(\xi)d\xi \\
f_3(x) & = \int_x^1
\left(P^{uu}(x,\xi)f_3(\xi)+P^{uv}(x,\xi)g_3(\xi)\right)d\xi\\
f_4(x) & = \mu(1)P^{uv}(x,1) + \int_x^1 P^{uu}(x,\xi)f_4(\xi)d\xi
\nonumber \\
& \hphantom{=} + \int_x^1 P^{uv}(x,\xi)g_4(\xi)d\xi \\
g_1(x) & =\int_x^1\left(P^{vu}(x,\xi)f_1(\xi) + P^{vv}(x,\xi)g_1(\xi)\right)d\xi \nonumber \\
& \hphantom{=} -P^-(x) -\mu(1)\rho\epsilon P^{vv}(x,1) \\
g_2(x) & = \int_x^1
\left(P^{uv}(x,\xi)f_2(\xi)+P^{vv}(x,\xi)g_2(\xi)\right)d\xi
\\
g_3(x) & = m_2(x) + \int_x^1 P^{vu}(x,\xi)f_3(\xi)d\xi \nonumber \\
& \hphantom{=} +\int_x^1 P^{vv}(x,\xi)g_3(\xi)d\xi \\
g_4(x) & = \mu(1)P^{vv}(x,1) + \int_x^1 P^{vu}(x,\xi)f_4(\xi)d\xi
\nonumber \\
& \hphantom{=} + \int_x^1 P^{vv}(x,\xi)g_4(\xi)d\xi \label{eq:g_4} \, .
\end{align}
The functions $f_i$ and $g_i$ are well defined as solution of an integral equation \cite{Y60}.
The following theorem states that the system is ISS with
respect to~$n$ and~$d_i$,~$i=1,\dots,4$, and thus remains
stable in presence of bounded noise and disturbances
\begin{proposition}
\label{prop:iss_observer}Let us
assume that~$\rho$,~$q$, and~$\epsilon$ satisfy~\eqref{epsilon}. Then, system~\eqref{alpha_eq_disturbed},~\eqref{beta_eq_disturbed} with boundary
conditions~\eqref{alpha_bound_disturbed} and~\eqref{beta_bound_disturbed} is ISS
with respect to~$n$ and~$d_i$,~$i=1,\dots,4$. More precisely there
exist a~$\mathcal{K}L$ function~$h_1$ and a~$\mathcal{K}$ function~$h_2$ such that for
any initial condition~$\left(\tilde{\alpha}^0,\tilde{\beta}^0\right)^\top \in E'$ the following
holds, for all $t\geq 0$,
\begin{align}
\left\lVert \left(\tilde{\alpha},\tilde{\beta}\right)^\top
\right\rVert_{E'} & \leq h_2\left(\left\lVert \left(n,d_1,\dots,d_4\right)^\top
\right\rVert_{L^\infty\left((0,t);\mathbb{R}^5\right)}\right) \nonumber \\
& \hphantom{=} + h_1\left(\left(\tilde{\alpha}^0,\tilde{\beta}^0\right)^\top,t\right) \, . \label{eq:ISS_tilde_alpha_tilde_beta}
\end{align}
\end{proposition}
\begin{proof}
The mechanisms of the proof use the characteristics
method and an iteration process. For the sake of simplicity we introduce the notations~$\underline{\lambda}$, $\underline{\mu}$, $K_1$,~$K_2$, and~$\tilde{d}$
\begin{align}
\underline{\lambda} & = \min_{x\in [0,1]} \lambda(x),\quad \underline{\mu} = \min_{x \in [0,1]} \mu(x) \\
K_1(t,x) & = n(t)f_1(x)+d_1(t)f_2(x) \nonumber \\
& \hphantom{=} + d_2(t)f_3(x)+d_4(t)f_4(x) \label{eq:K_1}
\end{align}
\begin{align}
K_2(t,x) & = n(t)g_1(x)+d_1(t)g_2(x) \nonumber \\
& \hphantom{=} + d_2(t)g_3(x)+d_4(t)g_4(x) \label{eq:K_2} \\
\tilde{d}(t) & = d_4(t)-\rho\epsilon n(t) \, . \label{eq:tilde_d}
\end{align}
In what follows, for the sake of brevity we write
~$\left|K_{1_{[0,t)}}\right|_\infty$ for
~$\left|K_1\right|_{L^\infty([0,t)\times(0,1))}$. By the characteristics method we have
\begin{align}
\tilde{\alpha}&(\tau,x)
= d_3(\tau-{\rm \bf p}hi_1(x)) + \tilde{d}\left(\tau - {\rm \bf p}hi_1(x) - \tau_2\right) \nonumber \\
& \hphantom{=} + q\rho(1-\epsilon) \left(\vphantom{\int_0^{\tau - {\rm \bf p}hi_1(x) - {\rm \bf p}hi_2(1)}}\tilde{\alpha}^0(x)+\int_0^{\tau_1 - {\rm \bf p}hi_1(x)}K_1\left(\xi,w(x,\xi)\right)d\xi \right) \nonumber \\
& \hphantom{=} + \int_0^x \frac{K_1\left(t-\int_\xi^x \frac{1}{\lambda({\rm \bf z}eta)}d{\rm \bf z}eta,\xi\right)}{\lambda(\xi)}d\xi \nonumber \\
& \hphantom{=} + \int_0^1 q\frac{K_2\left(\tau - {\rm \bf p}hi_1(x)-{\rm \bf p}hi_2(\xi),\xi\right)}{\mu(\xi)}d\xi \, ,
\end{align}
where $w(x,\xi) = {\rm \bf p}hi_1^{-1}\left({\rm \bf p}hi_1(x)+\xi\right)$. Therefore, one has
\begin{align}
\left|\tilde{\alpha}\left(\tau,x\right)\right| & \leq \left|q\rho(1-\epsilon)\right| \left|\tilde{\alpha}^0\right|_\infty + \left| d_{3_{[0,\tau)}} \right|_\infty + \left|\tilde{d}_{[0,\tau)}\right|_\infty \nonumber \\
& \hphantom{\leq} +\left(\frac{1}{\underline{\lambda}}+ \left|q\rho(1-\epsilon)\right|\tau\right)\left|K_{1_{[0,\tau)}}\right|_\infty \nonumber \\
& \hphantom{\leq} + \frac{\left|K_{2_{[0,\tau)}}\right|_\infty}{\underline{\mu}} \, .
\end{align}
Recursively, we get
\begin{align}
\left|\tilde{\alpha}\left(n\tau,x\right)\right|_\infty & \leq \left|q\rho(1-\epsilon)\right|^n \left|\tilde{\alpha}^0\right|_\infty \nonumber \\
& \hphantom{\leq} +\frac{1}{\underline{\lambda}} \sum_{i=1}^n\left|q\rho(1-\epsilon)\right|^{i-1}\left|K_{1_{[0,n\tau)}}\right|_\infty \nonumber \\
& \hphantom{\leq} + \tau \sum_{i=1}^n\left|q\rho(1-\epsilon)\right|^i\left|K_{1_{[0,n\tau)}}\right|_\infty \nonumber \\
& \hphantom{\leq} + \frac{1}{\underline{\mu}} \sum_{i=1}^n\left|q\rho(1-\epsilon)\right|^{i-1}\left|K_{2_{[0,n\tau)}}\right|_\infty \nonumber \\
& \hphantom{\leq} + \sum_{i=1}^n \left|q\rho(1-\epsilon)\right|^{i-1}\left| d_{3_{[0,n\tau)}} \right|_\infty \nonumber \\
& \hphantom{\leq} + \sum_{i=1}^n \left|q\rho(1-\epsilon)\right|^{i-1} \left|\tilde{d}_{[0,n\tau)}\right|_\infty \, .
\end{align}
Using the condition~\eqref{epsilon}, one has~$\left|q\rho(1-\epsilon)\right|<1$ it follows
\begin{align}
\left|\tilde{\alpha}\left(n\tau,x\right)\right| & \leq \left|q\rho(1-\epsilon)\right|^n \left\lVert\left(\tilde{\alpha}^0,\tilde{\beta}^0\right)^\top\right\rVert_E \nonumber \\
& \hphantom{\leq} + \left(\tau+ \frac{1}{\underline{\lambda}}\right)\frac{\left|K_{1_{[0,n\tau)}}\right|_\infty}{1 - \left|q\rho(1-\epsilon)\right|} \nonumber \\
& \hphantom{\leq} + \frac{\left|K_{2_[0,n\tau)}\right|_\infty}{\underline{\mu}-\underline{\mu}\left|q\rho(1-\epsilon)\right|} + \frac{\left| d_{3_{[0,n\tau)}} \right|_\infty}{1 - \left|q\rho(1-\epsilon)\right|} \nonumber \\
& \hphantom{\leq} + \frac{\left|\tilde{d}_{[0,n\tau)}\right|_\infty}{1 - \left|q\rho(1-\epsilon)\right|} \, .
\end{align}
The computation
showed for~$\tilde{\alpha}$ can be done in a similar way for~$\tilde{\beta}$. We get that for all~$t$ and all~$x$ such
that~${n\tau \leq t - {\rm \bf p}hi_1(x) < (n+1)\tau}$
\begin{align}
\left|\tilde{\alpha}(t,x)\right| & \leq (1+\left|q\right|)\left|q\rho(1-\epsilon)\right|^n \left\lVert\left(\tilde{\alpha}^0,\tilde{\beta}^0\right)^\top\right\rVert_E \nonumber \\
& \hphantom{\leq} +(1+\left|q\right|)\left(\tau+\frac{2}{\underline{\lambda}}\right)\frac{\left|K_{1_{[0,t)}}\right|_\infty}{1 - \left|q\rho(1-\epsilon)\right|} \nonumber \\
& \hphantom{\leq} +(1+\left|q\right|) \left(\tau+\frac{2}{\underline{\mu}}\right)\frac{\left|K_{2_{[0,t)}}\right|_\infty}{1-\left|q\rho(1-\epsilon)\right|} \nonumber \\
& \hphantom{\leq} + (1+\left|q\right|)\frac{2\left| d_{3_{[0,t)}} \right|_\infty}{1 - \left|q\rho(1-\epsilon)\right|} + \left|d_{3_{[0,t)}}\right| \nonumber \\
& \hphantom{=} + (1+\left|q\right|)\frac{2\left|\tilde{d}_{[0,t)}\right|_\infty}{1 - \left|q\rho(1-\epsilon)\right|} + \left|q\right|\tau \left|K_{2_{[0,t)}}\right|_\infty \nonumber \\
& \hphantom{=} + \left(\tau+\frac{1}{\underline{\lambda}}\right) \left|K_{1_{[0,t)}}\right|_\infty \, .
\end{align}
Finally, with the computations for~$\tilde{\beta}$ we prove
that~\eqref{eq:ISS_tilde_alpha_tilde_beta} holds with
\begin{align}
h_1(X,t) & = Ce^{-\nu t}X \\
h_2(X) & =
\left(2\frac{(2+\left|q\right|+\left|\rho(1-\epsilon)\right|)}{1
- \left|q\rho(1-\epsilon)\right|}\left(\tau+
\frac{1}{\underline{\lambda}}+\frac{1}{\underline{\mu}}+2\right)\right. \nonumber
\\
& \hphantom{=} +2+ \left|q\right|\tau +
\left|\rho(1-\epsilon)\right|\tau \nonumber \\
& \hphantom{=} \left. +
\left(2\tau+\frac{1}{\underline{\lambda}}+\frac{1}{\underline{\mu}}\right)\right)X
\, ,
\end{align}
with $C = (2 + \left|q\right|+\left|\rho(1-\epsilon)\right|)$ and $\nu = \frac{1}{\tau}\ln \left(\frac{1}{q\rho(1-\epsilon)}\right)$. This concludes the proof of Proposition~\ref{prop:iss_observer}.
\end{proof}
\begin{theorem}
\label{theo:iss_observer}Let us
assume that~$\rho$,~$q$, and~$\epsilon$ satisfy the condition
in~\eqref{epsilon}. Then, system~\eqref{tilde_u},~\eqref{tilde_v} with boundary
conditions~\eqref{tilde_boundary0} and~\eqref{tilde_boundary} is ISS
with respect to~$n$ and~$d_i$,~$i=1,\dots,4$. More precisely there
exist a~$\mathcal{K}L$ function~$h_1$ and a~$\mathcal{K}$ function~$h_2$ such that for
any initial condition~$\left(\tilde{u}^0,\tilde{v}^0\right)^\top \in E'$ the following
holds
\begin{align}
\left\lVert \left(\tilde{u},\tilde{v}\right)^\top
\right\rVert_{E'} & \leq h_2\left(\left\lVert \left(n,d_1,\dots,d_4\right)^\top
\right\rVert_{L^\infty\left((0,t);\mathbb{R}^5\right)}\right) \nonumber \\
& \hphantom{=} + h_1\left(\left(\tilde{u}^0,\tilde{v}^0\right)^\top,t\right) \, . \label{eq:ISS_tilde_alpha_tilde_beta2}
\end{align}
\begin{proof}
Using the fact that the backstepping transformation~\eqref{eq_M0},~\eqref{eq_N0} is invertible and Proposition~\ref{prop:iss_observer}, Theorem~\ref{theo:iss_observer} is proved.
\end{proof}
\end{theorem}
\section{Feedback Output Regulation}
\label{sec:feedb-outp-regul}
Consider system~\eqref{eq:perturbed_system_1},~\eqref{eq:perturbed_system_2} with boundary conditions~\eqref{eq:perturbed_system_3} and~\eqref{eq:perturbed_system_4} where $U$ is given by~\eqref{eq:Controller} with $U_{BS}$ given by
\begin{align}
U_{BS}(t) & = -\tilde{\rho}(1-\epsilon)\hat{u}(t,1) - \left(\rho-\tilde{\rho}\right)\int_0^1K^{uu}(1,\xi)\hat{u}(t,\xi)d\xi \nonumber \\
& \hphantom{=} - \left(\rho-\tilde{\rho}\right)\int_0^1K^{uv}(1,\xi)\hat{v}(t,\xi)d\xi-\tilde{\rho}\epsilon y_m(t) \nonumber \\
& \hphantom{=} + \int_0^1 \left(K^{vu}(1,\xi)\hat{u}(t,\xi)+ K^{vv}(1,\xi)\hat{v}(t,\xi)\right)d\xi \nonumber \\
& \hphantom{=} -k_I\int_0^1
l_1(\xi)\Gamma_1[(\hat{u},\hat{v})(t)](\xi)d\xi \nonumber \\
& \hphantom{=} - k_I \int_0^1 l_2(\xi)\Gamma_2[(\hat{u},\hat{v})(t)](\xi)d\xi \, , \label{eq:U_output_feedback}
\end{align}
where $\hat{u}$ and $\hat{v}$ are the solution to~\eqref{hat_u}--\eqref{hat_boundary}. The aim of this section is to prove that the
output $y(t)$ of the system is still regulated in the sense of Theorem~\ref{theo:rejection_1} with the control law in~\eqref{eq:U_output_feedback}. We have the second main result of this paper.
\begin{theorem}
\label{theo:rejection_2}
Consider system~\eqref{eq:perturbed_system_1},~\eqref{eq:perturbed_system_2} with boundary conditions~\eqref{eq:perturbed_system_3} and~\eqref{eq:perturbed_system_4} where $U$ is given by~\eqref{eq:Controller} with $U_{BS}$ given by~\eqref{eq:U_output_feedback}, $\eta$
satisfying~\eqref{eq:eta_dot}, and with bounded initial conditions
$\left(u^0,v^0,\eta^0\right) \in E$. Then, assuming that conditions of Proposition~\ref{prop:ISS_NDE} hold, there exists a positive constant $M$ such that the controlled output $y(t)$ satisfies
\begin{equation}
\label{eq:disturbance_bound}
\left| y(t)\right| \leq M \, .
\end{equation}
Furthermore, if ${\rm \bf p}artial_t d_1 = {\rm \bf p}artial_t d_2 = \dot{d}_3 =
\dot{d}_4 = \dot{d}_5 = 0$, then the output satisfies
\begin{equation}
\label{eq:convergence_theo}
\lim_{t\rightarrow \infty} \left| y(t)\right| = 0\, .
\end{equation}
\end{theorem}
\begin{proof}
We have $\hat{u} = \hat{u} - u + u = -\tilde{u} + u$ and ${\hat{v}= \hat{v} - v + v = -\tilde{v} + v}$. Therefore, one has
\begin{align}
U_{BS}(t) & = -\tilde{\rho}u(t,1) - \left(\rho-\tilde{\rho}\right)\int_0^1K^{uu}(1,\xi)u(t,\xi)d\xi \nonumber \\
& \hphantom{=} - \left(\rho-\tilde{\rho}\right)\int_0^1K^{uv}(1,\xi)v(t,\xi)d\xi -\tilde{\rho}\epsilon n(t)\nonumber \\
& \hphantom{=} + \int_0^1 \left(K^{vu}(1,\xi)u(t,\xi)+ K^{vv}(1,\xi)v(t,\xi)\right)d\xi \nonumber \\
& \hphantom{=} -k_I\int_0^1 l_1(\xi)\Gamma_1[(u,v)(t)](\xi)d\xi \nonumber \\
& \hphantom{=} - k_I \int_0^1 l_2(\xi)\Gamma_2[(u,v)(t)](\xi)d\xi + \mathcal{D}(t) \, . \label{eq:U_output_feedback_2}
\end{align}
where $\mathcal{D}(t)$ is given by $U_{BS}$ in~\eqref{eq:U_output_feedback} which $\hat{u}$ and $\hat{v}$ have been replaced by $-\tilde{u}$ and $-\tilde{v}$ respectively. Since, $\tilde{u}$ and $\tilde{v}$ are bounded thanks to Theorem~\ref{theo:iss_observer}, we can consider $\mathcal{D}-\tilde{\rho}\epsilon n$ as a new perturbation in the input and we can apply Theorem~\ref{theo:rejection_1} to conclude that~\eqref{eq:disturbance_bound} holds. Now, if the perturbation vanishes then $\mathcal{D}(t)-\tilde{\rho}\epsilon n(t)$ will vanish in virtue of ISS of the observer system and then again by applying Theorem~\ref{theo:rejection_1} we have~\eqref{eq:convergence_theo}. This concludes the proof of Theorem~\ref{theo:rejection_2}.
\end{proof}
\section{Concluding Remarks}
In this paper we have solved the output feedback regulation problem for a system composed of two linear hyperbolic PDEs with collocated boundary input and output in presence of disturbances and noise in the measurements. This has been done by combining in the control law a backstepping approach with an integral term. By transforming the boundary condition of the resulting target system into a Neutral Differential Equation we have proved that this former system is Input-to-State Stable with respect to disturbances and noise. The proposed controller has finally been combined with a backstepping-based observer to ensure output-feedback stabilization of the output. Both the proposed controller and the observer present some degrees of freedom (necessary to ensure robustness to delays) that enables a trade-off between disturbance and noise sensitivity. The effect of such parameters, in particular regarding the systems sensitivity functions are derived in a companion paper.
\end{document}
|
\begin{document}
\title{Quadratic covariation estimates in nonsmooth stochastic calculus}
\begin{abstract}
Given a Brownian Motion $W$, in this paper we study the asymptotic behavior, as $\varepsilon \to 0$, of the quadratic covariation between $f ( \varepsilon W)$ and $W$ in the case in which $f$ is not smooth. Among the main features discovered is that the speed of the decay in the case $f \in C^\alpha$ is at least polynomial in $\varepsilon$ and not exponential as expected. We use a recent representation as a backward- forward It\^o integral of $[ f ( \varepsilon W), W]$ to prove an $\varepsilon$-dependent approximation scheme which is of independent interest. We get the result by providing estimates to this approximation. The results are then adapted and applied to generalize the results of~\cite{MioNonl}, and~\cite{nhn} related to the Small Noise Exit from a Domian problem for the Saddle Case.
\keywords{Non-smooth It\^o's formula \and Quadratic Variation \and Large Deviation}
\end{abstract}
\mathbb{S}^1ection{Introduction}
One of the central results of stochastic calculus is It\^o's change of variables formula for twice
differentiable transformations of semimartingales.
It was realized recently that one also needs to study nonlinear maps
that are not smooth enough to allow an application of the classical It\^o formula. Various approaches to less regular changes of variables have
been introduced, see~\cite{Boleau},~\cite{BoleauYor},~\cite{EisenbaumIto},~\cite{FollmerQuad},~\cite{ItoC1}, and
references therein.
These studies show that the key feature of the It\^o formula, the quadratic covariation term, is well-defined under
much weaker assumptions than those leading to the traditional formula. However, no nontrivial quantitative estimates of the arising quadratic covariation
processes have appeared in the literature, to the best of our knowledge.
One area where such estimates are naturally needed is small random perturbations of dynamical systems. Often,
in the course of a study of a stochastic system one has to make a simplifying change of coordinates, transforming the system locally to a simpler one. If the transformation map
is $C^2$, then one can apply the classical It\^o calculus and easily control the It\^o correction term. However, there are situations where
a natural change of variables is less regular than $C^2$, and in these cases there is no readily available tool that could be used to control
the generalized It\^o correction.
The goal of this paper is to close this gap and provide quantitative estimates on the generalized It\^o
correction term under nonclassical assumptions on the transformation.
Let us now be more precise. Let $W$ be a standard $1$-dimensional Wiener process on a complete probability space $(\Omega,\Sigma,\mathbf{P})$ and $\varepsilon>0$ be a constant.
If $g: \mathbb{R} \to \mathbb{R}$ is $C^2$, then the classical It\^o formula is (see~\cite[Section II.7]{ProtterLibro})
\[
g(\varepsilon W(t))-g(0)=\varepsilon \int_0^t g'(\varepsilon W(s))dW(s)+\frac{\varepsilon^2}{2}\int_0^t g''(\varepsilon W(s))ds.
\]
Introducing $f=g'\in C^1$, we can also rewrite the second term in the r.h.s.\ as quadratic covariation between $f(\varepsilon W)$ and $\varepsilon W$:
for $Q_\varepsilon(t)= [f(\varepsilon W), \varepsilon W](t)$, we have
\begin{equation*} \label{eqn: quad-C1}
Q_\varepsilon(t)=\varepsilon^2 \int_0^t f' (\varepsilon W(s) )ds,\quad t\ge 0.
\end{equation*}
In particular, for any $T>0$, $\varepsilon^{-1} \mathbb{S}^1up_{ t \leq T} Q_\varepsilon(t) \to 0$ in probability as $\varepsilon \to 0$. In this paper we show that this converges holds in the case in which $f$ is not differentiable.
The motivation for this problem relies on small random perturbation of dynamical systems. Suppose that $b$ is a vector field with a critical point at $x^*$ and let $S$ denote the flow generated by $b$: \[
\frac{d}{dt}S^t x = b(S^t x), \quad S^0 x = x.
\]
It is well known (see Section 2.8 of \cite{Perko}) that there is a continuous change of variables $g$ so that locally around $g(x^*)$ the flow $g(S^tx)$ behaves like the linearized version of $S$. In the small random perturbation case, this combined with the traditional It\^o formula imply (see, e.g. \cite{nhn},~\cite{MioNonl}) that if $g$ is at least $C^2$, then the system
\[
dX_\varepsilon (t) = b(X_\varepsilon (t) ) dt + \varepsilon dW(t), \quad X_\varepsilon (0) = x_0,
\]
could be analyzed by working with the linear system
\[
d \tilde X_\varepsilon (t) = \left( A \tilde X_\varepsilon (t) + \frac{\varepsilon^2}{2} \Phi_\varepsilon (X^\varepsilon (t) ) \right) dt + \varepsilon \mathbb{S}^1igma (X_\varepsilon (t) ) dW(t), \quad \tilde X_\varepsilon (0) = g(x_0),
\]
where $x_0$ is close enough to $x^*$, $A$ is the Jacobian of $b$ at $x^*$, $\mathbb{S}^1igma$ is at least a continuous matrix valued function, and $\varepsilon^2 \Phi_\varepsilon$ is the term corresponding to the quadratic covariation between $g^\prime (X_\varepsilon)$ and $X_\varepsilon$. There are well established cases for which $g$ is known to be $C^1$, see e.g. Hartman Theorem on Section 2.8 of~\cite{Perko}. In these cases, an already known $C^1$ formulation of It\^o's formula implies a similar analogy between the non-linear and linear systems. Hence, estimates that show that in these cases the quadratic covariation term decays faster than the It\^o term allow to reduce the local analysis to simpler exit problem for Ornstein-Uhlenbeck processes.
The analysis of the quadratic covariation $[g'(X),X]$ in connection
with extensions of It\^o's formula for functions $g\notin C^2$ is fundamental for nonsmooth
It\^o calculus, see~\cite{EisenbaumLocal},~\cite{EisenbaumChapter},~\cite{FollmerQuad},~\cite{ItoC1},~\cite{ItoCov}.
In~\cite{FollmerQuad},~\cite{ItoC1},~\cite{ItoCov} methods from backward stochastic calculus were used (see also the summary~\cite{RussoCov}),
while in ~\cite{EisenbaumLocal},~\cite{EisenbaumChapter} a local time approach was used.
The basic result that has been explained
in the cited literature from several points of view is that for $T>0$,
\begin{equation} \label{eqn: dif_intro}
Q_\varepsilon (t) = - \varepsilon \int_0^t f(\varepsilon W(s) )dW(s) - \varepsilon \int_{T-t}^T f( \varepsilon W(T-s) ) dW(T-s),
\end{equation}
where both integrals can be understood as It\^o integrals w.r.t.\ appropriate filtrations. It is well known~\cite[page 389]{ProtterLibro}
that the integral with respect to $W(T-\cdot)$ in~\eqref{eqn: dif_intro} is the time reversal
of a semimartingale w.r.t.\ the natural filtration of $W(T-\cdot)$. Here the time reversal (with respect to $T>0$)
of a process $X$ is understood as $X(T-t)-X(T)$.
In this paper we exploit this structure by constructing an approximation scheme for $Q_\varepsilon$
and using martingale techniques to show its consistency. As far as we know, this is the first attempt to use such a scheme in small noise analysis.
See~\cite{ValloisQuad} for a related but different scheme for local time approximation in the case $\varepsilon=1$.
The text is organized as follows. In Section~\ref{sec: Prelim} we state our main results that include the martingale representation for the quadratic covariation, and, in Section~\ref{sec: Saddle}, the results related to the application of non-smooth calculous to a particular small noise problem. In Section~\ref{sec: smal_quad} we use the martingale representation to propose an approximation scheme that we then use to
prove the key bound that the main results depend upon. The proofs of the main theorems
are given in Section~\ref{sec: proof_thm}. In Section~\ref{sec: add_proof} proofs of auxiliary lemmas are given.
\mathbb{S}^1ection{Main results} \label{sec: Prelim}
We are going to study $Q_\varepsilon(t)= [f(\varepsilon W), \varepsilon W](t)$ assuming that $f:\mathbb{R} \to \mathbb{R}$ is a bounded and uniformly H\"older or Lipschitz function, although
these assumptions on $f$ can be relaxed.
It is convenient to formulate these assumptions in terms of modulus of continuity defined by:
\[
{ \rm osc}_f(\delta)=\mathbb{S}^1up_{ |t-s|< \delta} | f(s) - f(t) |,\quad \delta>0.
\]
Throughout the text, we work with an arbitrary fixed number $T>0$. We will not be explicit when including the
dependency on $T>0$ in the notation. We are ready to state the main results of the text.
\begin{theorem} \label{thm: Main}
Suppose ${ \rm osc}_f(\delta) \le C_f \delta^\alpha$ for some $\alpha \in (0,1)$, $C_f >0$, and all sufficiently small $\delta$. Then,
for every $\delta>0$, $\gamma \in (0,\alpha)$, and $\mu\in(\gamma,\alpha)$, there are constants $\varepsilon_{\delta,\mu}>0$ and $C_{\delta,\mu}>0$ such that
\[
\mathbf{P} \left\{ \varepsilon^{-(1+\gamma)} \mathbb{S}^1up_{t \leq T} |Q_\varepsilon (t)| > \delta \right \} \leq C_{\delta,\mu} \varepsilon^{ 2 (\alpha - \mu)/(1-\alpha) },\quad \varepsilon \in (0, \varepsilon_{\delta,\mu} ).\]
In particular, for any $\gamma\in (0,\alpha)$,
\[
\varepsilon^{-(1+\gamma)} \mathbb{S}^1up_{t \leq T} |Q_\varepsilon (t)| \mathbb{S}^1tackrel{\mathbf{P}}{\to} 0,\quad\varepsilon\to 0.
\]
\end{theorem}
This result is stronger than our initial claim that $\varepsilon^{-1}Q_\varepsilon\to 0$. Moreover, if $\alpha$ is close to $1$, the exponent $1+\gamma$ can be chosen to be close to $2$.
The method we employ to prove this theorem produces the following estimate in the Lipschitz case where $\alpha=1$:
\begin{theorem} \label{thm: Main_Lip}
Suppose ${ \rm osc}_f(\delta) \le C_f \delta$, for some constant $C_f >0$ and sufficiently small $\delta>0$. Then, for every $\delta>0$,
$\gamma \in (0,1)$, and $\mu\in(\gamma,1)$, there are constants $\varepsilon_{\delta,\mu}>0$ and $C_{\delta,\mu}>0$ such that
\[
\mathbf{P} \left\{ \varepsilon^{-(1+\gamma)} \mathbb{S}^1up_{t \leq T} |Q_\varepsilon (t)| > \delta \right \} \leq C_{\delta,\mu}
e^{ - \varepsilon^{ - (1 - \mu) } },\quad \varepsilon\in (0,\varepsilon_{\delta,\mu}).
\]
\end{theorem}
This theorem establishes that the rate of decay in probability is exponential in the Lipschitz case, which is coherent with the differentiable case in which almost sure convergence holds. For the Holder case, the method only shows a polynomial upper bound which in principle does not imply that the convergence rate can not be exponential.
The proof of Theorems~\ref{thm: Main} and~\ref{thm: Main_Lip} will be given in Section~\ref{sec: proof_thm}. An important part of the analysis is
Theorem~\ref{thm: MainII} given in Section~\ref{sec: smal_quad} and in principle one can
apply that result and its possible extensions to less regular functions $f$.
The proof of Theorem~\ref{thm: MainII} is in turn
based on a forward-backward martingale representation of the quadratic covariation that we explain in Section~\ref{sec: FB}. For now, we proceed to explain an application of the above results to a small noise problem studied in~\cite{MioNonl},~\cite{Bakhtin-SPA},~\cite{nhn},~\cite{Day}, and~\cite{Kifer}.
\mathbb{S}^1ubsection{Applications to the Small Noise Problem.}
\label{sec: Saddle}
In this section we consider the small noise scape from a saddle problem that, up to our knowledge, was first studied in~\cite{Kifer}. The objective of the section is to establish the role that Theorems~\ref{thm: Main}, and~\ref{thm: Main_Lip} have in the study of this problem. Let us start with the statement of the problem.
Consider a vector field $b:\mathbb{R}^d \to \mathbb{R}^d$, and a domain (open, bounded and convex set) $U \mathbb{S}^1ubset \mathbb{R}^d$ such that $0 \in U$ is the only critical point of $b$ in the closure of $U$. That is, $0 \in U$ is the only $x\in \bar{U}$ such that $b(x)=0$. Further, suppose that the vector field $b$ is such that its Jacobian at $0$, $A=Db(0)$ has at least one eigenvalue with positive real part, and one eigenvalue with negative real part. Under this conditions, consider the flow $S$ generated by $b$:
\[
\frac{d}{dt}S^tx = b( S^tx), \quad S^0x = x,
\]
and its small noise perturbation,
\[
dX_\varepsilon (t) = b( X_\varepsilon )dt + \varepsilon dW(t).
\]
The scape from a saddle problem is the study of the asymptotic behavior of the exit time
\[
\tau_\varepsilon(x) = \inf \left\{t>0: X_\varepsilon(t) \in \partial U \right\}, \quad x \in U,
\]
and the exit location $X_\varepsilon ( \tau_\varepsilon (x) )$. The case of interest for this problem is when the initial condition for the diffusion $X_\varepsilon(0)$ lies in the invariant stable manifold $$\mathcal{M}^s = \left \{ x: S^t x \to 0, \text{ as } t \to \infty \right \}.$$
The problem was first solved using a PDE approach in~\cite{Kifer}. In that paper, it is shown that the exit time is asymptotically logarithmic in $\varepsilon$ and that the exit location is concentrated on the intersection of $\partial U$ and the invariant unstable manifold $$\partial U \cap \mathcal{M}^u = \partial U \cap \left \{ x: S^t x \to 0, \text{ as } t \to -\infty \right \}.$$
Later,~\cite{Day} refined the result of the exit distribution in two dimensions, and further refinements were made in higher dimensions in~\cite{Bakhtin-SPA}.
In~\cite{nhn} a further generalization to the exit location was obtained, using the idea mentioned in the introduction of this paper. This result was later iterated to get the first result for a heteroclinic network, which is a more general case than the simple saddle case. The argument in~\cite{nhn} is as follows. It is well known (see Section 2.8 of \cite{Perko}) that there is a continuous change of variables $h$ so that locally around $h(0)$ the flow $h(S^tx)$ behaves like the linearized version of $S$. For $X_\varepsilon$ traditional It\^o formula imply (see, e.g. \cite{nhn},~\cite{MioNonl}) that if $h$ is at least $C^2$, then $\tilde X_\varepsilon = h(X_\varepsilon)$ satisfies
\[
d \tilde X_\varepsilon (t) = \left( A \tilde X_\varepsilon (t) + \frac{\varepsilon^2}{2} \Phi_\varepsilon ( \tilde X_\varepsilon (t) ) \right) dt + \varepsilon \mathbb{S}^1igma (X_\varepsilon (t) ) dW(t), \quad \tilde X_\varepsilon (0) = h(X_\varepsilon(0) ),
\]
where $x_0$ is to $0$, $\mathbb{S}^1igma$ is at least a continuous matrix valued function, and $\varepsilon^2 \Phi_\varepsilon ( \tilde X_\varepsilon (t) ) $ is the quadratic covariation term between the derivative of $h$ evaluated at $X_\varepsilon$ and $X_\varepsilon$ itself. Under the assumption that $h \in C^2$, the above converges to $0$ faster than the noise and hence it has no effect on the computation of the exit location. The limitation of this method is that the assumption $h \in C^2$ is quite restrictive. In~\cite{MioNonl} this restriction was studied by classifying systems that don't admit such a transformation $h$ in the $C^2$ class. The results of~\cite{nhn} were extended in~\cite{MioNonl} in the two dimensional setting: the change of coordinates $h$ transforms $X_\varepsilon$ to a specific polynomial drift SDE in two dimensions which is then solved. In~\cite{MioNonl} it is also shown that this approach can not immediately be generalized to the high dimensional case. As a consequence, in this paper, we attack the high dimensional case by following the approach proposed in~\cite{nhn} but by allowing the transformation $h$ to be not smooth.
We focus on a particular case to keep the exposition manageable. The novelty relies in the assumption on the smoothness for the change of coordinates, which is the main focus of the paper. The proof is a rearrangement of the main facts covered in the body of the paper, and its presented in Section~\ref{sec: ProofThmSmallNoise}. The theorem is stated in the spirit of Theorem 1 of~\cite{MioNonl}.
\begin{theorem} \label{thm: SmallNoise}
Suppose that $A$ has spectrum $\lambda_1,...,\lambda_d$ of real and simple eigenvalues such that $ \lambda_1 > ...> \lambda_{\nu-1} > 0 > \lambda_\nu >... > \lambda_d$ for some integer $\nu \leq d$. Also, assume that $h:U \to \mathbb{R}^d$ is a differentiable function with differentiable inverse, such that all its partial derivatives satisfy the conditions of Theorem~\ref{thm: Main} with $\alpha > 1/2$ and that $h(S^tx) = e^{At}h(x)$ in $U= (-\Delta, \Delta)^d$.
Denote $\partial U \cap \mathcal{M}^u = \{q_-, q_+\}$, and assume that $X_\varepsilon(0) = x_0 \in \mathcal{M}^s \cap U$. Then, there is a family of random vectors $( \phi_\varepsilon )_{\varepsilon >0}$, a family of random variables $( \psi_\varepsilon )_{ \varepsilon > 0 }$, and a number
\[
\beta = \left\{
\begin{array}{ll}
1, & \mbox{if } \nu = 2 \text{ and } -\lambda_\nu \geq \lambda_1, \\
-\frac{\lambda_v}{\lambda_1}, & \mbox{if } \nu = 2 \text{ and } -\lambda_\nu < \lambda_1, \\
1 - \frac{\lambda_1}{\lambda_2}, & \mbox{if } \nu > 2 \text{ and } -\lambda_\nu \geq \lambda_1 - \lambda_2, \\
-\frac{\lambda_\nu}{\lambda_1}, & \mbox{if } \nu > 2 \text{ and } -\lambda_\nu < \lambda_1 - \lambda_2, \\
\end{array}
\right.
\]
such that $X_\varepsilon( \tau_\varepsilon ) = h( \Delta q_{ { \rm sgn } \psi_\varepsilon} ) + \varepsilon^\beta \phi_\varepsilon$, and the random vector
\[
\left( \psi_\varepsilon, \phi_\varepsilon, \tau_\varepsilon - \frac{1}{\lambda_1} \log \varepsilon \right)
\]
converges in distribution as $\varepsilon \to 0$.
\end{theorem}
\begin{remark} The result is not a direct consequence of the results in this paper, since, as it will be clear at the beginning of Section~\ref{sec: ProofThmSmallNoise}, it requires a high dimensional version of the quadratic variation with drift. But we will see that to get this result the proof follows almost line by line the proof of the main results of this paper.
\end{remark}
\mathbb{S}^1ubsection{Forward-Backward Martingale Representation.} \label{sec: FB}
The proof of Theorem~\ref{thm: MainII} is based on a forward-backward martingale representation of the quadratic covariation. The focus of this section is to explain this representation as grounds to the full proof of Theorem~\ref{thm: MainII} to be given in the next section. In order to do so, we need some conventions on our notation that we state as definition:
\begin{definition}
The time reversal of a process $X=( X(t) )_{ t\geq 0 }$ with respect to $T>0$ is defined by
\[
\bar{X}(t)=X(T-t)-X(T), \quad t\in[0,T].
\]
Likewise, the backward of $X$ with respect to $T>0$ is defined by
\[\hat X(t) = X(T-t), \quad t \in [0,T].\]
\end{definition}
The starting point is the representation for $L_\varepsilon=\varepsilon^{-1} Q_\varepsilon$ implied by~\eqref{eqn: quad-C1}
. For any $T>0$,
\begin{equation} \label{eqn: quad-int}
L_\varepsilon (t) = - \int_0^t f( \varepsilon W(s) ) dW(s) -\int_{T-t}^T f( \varepsilon \hat{W} (s) ) d\hat{W}(s), \quad t\in[0,T].
\end{equation}
We will find a convenient way to rewrite this expression using an enlargement of filtration approach. Denoting the natural filtration of a process $X=(X_t)_{t\ge 0}$ by
$\mathcal{F}^X=(\mathcal{F}_t^X)_{t\ge 0}$, we note that that the integral with respect to $W$ in~\eqref{eqn: quad-int} is an
$\mathcal{F}^W$ martingale,
while the integral with respect to $\hat W$ is the time reversal of the $\mathcal{F}^{ \hat W }$ semimartingale
\[
N_\varepsilon(t)= \int_0^t f( \varepsilon \hat{W}(s) ) d \hat {W}(s).
\]
Therefore, one of the terms in~\eqref{eqn: quad-int} is a martingale, while the other one has a
nontrivial drift component. The following result reveals the structure of this time reversal.
\begin{theorem} \label{thm: Main_Mart}
Let $\mathcal{G}= ( \mathcal{G}_t )_{ t \in [0,T] }$ be the minimum filtration such that $W(T)$ is $\mathcal{G}_0$
measurable
and $\mathcal{F}^{ \hat W}_t \mathbb{S}^1ubset \mathcal{G}_t$. Then , $\hat W$ is a $\mathcal{G}$ semimartingale with Doob--Meyer
decomposition given by
\begin{equation} \label{eqn: Doob_Meyer}
\hat{W}(t)=W(T)-\int_0^t \frac{ \hat W(s) } {T-s} ds + \beta(t),
\end{equation}
for some Brownian Motion $\beta$ with respect to $\mathcal{G}$.
Moreover, if $\mathcal{H}=( \mathcal{H} )_{ t \in [0,T] }$ is the the minimum complete
filtration such that $W(T)$ is $\mathcal{H}_0$ measurable and $\mathcal{F}^\beta_t \mathbb{S}^1ubset \mathcal{H}_t$, then $\beta$
is an $\mathcal{H}$ Brownian Motion, $\hat W$ is an $\mathcal{H}$ semimartingale with the Doob-Meyer
decomposition~\eqref{eqn: Doob_Meyer} and $\hat W$ can be written as
\begin{equation} \label{eqn: What_sol}
\hat W (t) = W(T) ( 1 - t/T ) + ( T- t) \int_0^t \frac{ d \beta (s) }{ T-s }, \quad t \in [0,T].
\end{equation}
\end{theorem}
\begin{proof}
The result follows from Theorem~\cite[Theorem VI.3]{ProtterLibro}.
\end{proof}
\begin{remark} \rm In particular, since $\hat W$ is $\mathcal{H}$ adapted, for every function $F:\mathbb{R}^2 \to \mathbb{R}^2$ such that
\[
\mathbf{E} \int_0^T F(\hat W (s) )^2 ds< \infty,
\]
the process $t \mapsto \int_{ T-t}^T F(\hat W(s) ) d\beta (s)$ is the time reversal of a martingale.
\end{remark}
Using Theorem~\ref{thm: Main_Mart}, we can obtain a representation for $L_\varepsilon=\varepsilon^{-1} Q_\varepsilon$. This is given in the following:
\begin{corollary} \label{cor: L}
Let $\mathcal{H}$ be as in Theorem~\eqref{thm: Main_Mart}. Then, the process $L_\varepsilon= \varepsilon^{-1} Q_\varepsilon$ can be written as
\begin{equation} \label{eqn: L_rep}
L_\varepsilon(t)= -\int_0^t f(\varepsilon W(s) )dW(s) - \int_{T-t}^T f( \varepsilon \hat W(s) ) d\beta(s) + \int_0^t f( \varepsilon W(s) ) \frac{W(s)}{s}ds,
\end{equation}
which is the sum of a $\mathcal{F}^W$ martingale, a time reversal of an $\mathcal{H}$ martingale and a bounded variation
term.
\end{corollary}
\begin{proof}
This is an immediate consequence of Theorem~\ref{thm: Main_Mart} and~\eqref{eqn: quad-int} since $W(t)/t$ is integrable on the
interval $[0,T]$ and $f$ is bounded.
\end{proof}
Theorem~\ref{thm: Main_Mart} is the main element we need to propose our approximation scheme, which is the main focus of Section~\ref{sec: smal_quad}.
\mathbb{S}^1ection{Small Noise Analysis of Quadratic Covariation.} \label{sec: smal_quad}
In this section we study the quadratic covariation process $L_\varepsilon = [f( \varepsilon W),W]$. Recall the representation~\eqref{eqn: L_rep} given in Corollary~\ref{cor: L}. This will be one of the main ingredients in our proof.
Throughout this section, let $( n_{\varepsilonilon } )_{\varepsilonilon >0}$ be integers such that $n_\varepsilon \nearrow \infty$ as $\varepsilon \to 0$. Let us define $( \delta_{\varepsilonilon } )_{\varepsilon >0}$ by $\delta_\varepsilon = T / n_\varepsilon$, and observe that $\delta_\varepsilon \mathbb{S}^1earrow 0$ as $\varepsilonilon \to 0$. The main result of this section is the following:
\begin{theorem} \label{thm: MainII}
Let $q_\varepsilon=2\mathbb{S}^1qrt{ \delta_\varepsilon | \log \delta_\varepsilon |}$, and let $(\gamma_\varepsilon)_{ \varepsilon >0 }$ satisfy $\gamma_\varepsilon
\to 0$ and
\begin{equation*}
| \log \delta_\varepsilon|\frac{ {\rm osc}_f ( \varepsilon q_\varepsilon ) }{ q_\varepsilon \gamma_\varepsilon } \to 0, \quad \varepsilon \to 0.
\end{equation*}
Then, there are positive constants $K_1, K_2, K_3$ and $\varepsilon_0$ such that
\begin{align*}
\mathbf{P} \left\{ \varepsilon^{-1} \mathbb{S}^1up_{t \leq T} |Q_\varepsilon (t)| > \gamma_\varepsilon \right \} &\leq K_1 \gamma_\varepsilon^{-1} e^ {- K_2 \gamma_\varepsilon^2 /{\rm osc}_f ( \varepsilon q_\varepsilon)^2 } + K_3 \delta_\varepsilon , \quad \varepsilon \in (0,\varepsilon_0 ).\\
\end{align*}
\end{theorem}
The idea of the proof is to start with the representation~\eqref{eqn: dif_intro} and use Theorem~\ref{thm: Main_Mart}
to prove an approximation to each integral by a sum of increments. The result will follow once we combine the
approximating sum for each integral into one. We will devote the rest of this section to developing this idea.
\mathbb{S}^1ubsection{Approximating Processes} \label{sec: Approx}
Let $P_{\varepsilonilon }$ be the partition of the interval $[0,T]$ given
by points $0=s_{0}<...<s_{n_{\varepsilonilon }}=T$, where $s_i = i \delta_{\varepsilonilon }$, for $i=0,...,n_{\varepsilonilon }$. Also, define the backward
partition $\hat{P}_{\varepsilonilon }$ to be the partition of $[0,T]$
given by points $0= t_{0}<...<t_{n_{\varepsilonilon }}=T$, where $t_{i}=T-s_{n_{\varepsilonilon }-i}$.
For an arbitrary process $Y$ and times $ s,t \in [0,T]$ let $\Delta _{t,s}Y=Y(t)-Y(s)$. Then, for $t \in [0,T]$ we
introduce the following notation:
\begin{align}
S_{\varepsilonilon }(t) &=\int_{0}^{t}f( \varepsilon W (s) )dW (s),
\label{eqn: S_def} \\
\hat{S}_{\varepsilonilon }(t) &=\int_{T-t}^{T}f( \varepsilon \hat W (s) )d \hat{W}(s), \label{eqn: S_hat_def} \\
J_{\varepsilonilon }(t) &=\mathbb{S}^1um_{i=1}^{i(t)} f( \varepsilon W(s_{i-1}))\Delta_{s_{i},s_{i-1}} W, \label{eqn: J_def} \\
\hat{J}_{\varepsilonilon }(t) &=\mathbb{S}^1um_{i=1}^{ i(t) } f( \varepsilon W(s_{i}))\Delta_{s_{i},s_{i-1}} W, \label{eqn: J_hat_def}
\end{align}
where $i(t)$ is given by
\[
i(t)=\min \left \{ j \in [0,n_\varepsilon] \cap \mathbb{Z} : s_j \geq t \right \}.
\]
The idea is to approximate each element $S_\varepsilon$ and $\hat S_\varepsilon$ with $J_\varepsilon$ and $\hat J_\varepsilon$ respectively, so we can approximate $L_\varepsilon$ by $L_{\varepsilon,P_\varepsilon}=\hat{J}_\varepsilon -J_\varepsilon$. Note that since
\begin{equation*}
f( \varepsilon W(s_{i}))\Delta _{s_{i},s_{i-1}} W =- f( \varepsilon \hat{W}(t_{n_\varepsilon-i}) ) \Delta _{t_{n_{\varepsilonilon }-i+1},t_{n_{\varepsilonilon }-i}} \hat{W},
\end{equation*}
after reordering the sum in (\ref{eqn: J_hat_def}), we can rewrite $\hat{J}_\varepsilon $ as
\begin{equation}
\hat{J}_{\varepsilonilon }(t)=-\mathbb{S}^1um_{i=n_\varepsilon - i(t) }^{n_\varepsilon-1} f( \varepsilon \hat{W}(t_i) ) \Delta _{t_{i+1},t_i } \hat W, \label{eqn: J_hat_reversed}
\end{equation}
which is an integral sum of the It\^o integral $\hat{S}_\varepsilon$. We will use Theorem~\ref{thm: Main_Mart} to justify the application of martingale techniques to prove that $J_\varepsilon$ approximates $S_\varepsilon$ and that $\hat{J}_\varepsilon$ approximates $\hat{S}_\varepsilon$.
Once we have an approximation of $L_\varepsilon$ by $L_{\varepsilon, P_\varepsilon }$, we notice that
\begin{align}
L_{\varepsilon, P_\varepsilon} (t) &= \mathbb{S}^1um_{i=1}^{ i(t) }\Delta
_{s_{i},s_{i-1}} \left( f( \varepsilon W) \right) \Delta
_{s_{i},s_{i-1}}W . \label{eqn: Q_eps_Peps_def}
\end{align}
The differences in $f$ in the above expression will be used to prove that $L_{\varepsilon, P_\varepsilon } (t)$ converges to $0$ uniformly in probability and get the result.
We start with some preliminary results. The proofs will be postponed until Section~\ref{sec: add_proof} in order to keep the continuity of the paper. We state the next general lemma.
\begin{lemma} \label{Lemma: Martingale_general}
Let $\left( M_\varepsilon \right)_{\varepsilonilon > 0}$ be a family
of martingales such that for every $\varepsilonilon>0$, $M_\varepsilonilon (0)=0$, the quadratic variation
$\langle M_\varepsilon \rangle$ is absolutely continuous with respect to Lebesgue measure, and $ \left\langle
M_{\varepsilonilon}\right\rangle (T) \leq r_{\varepsilonilon }$. Then, for any $\delta >0$,
\begin{equation*}
\mathbf{P}\left\{ \mathbb{S}^1up_{t\leq T}|M_{\varepsilonilon }(t)|>\delta \right\}
< \mathbb{S}^1qrt{ \frac{8 r_\varepsilon}{ \pi \delta^{2} }}e^ { -\delta ^{2}/ ( 2 r_\varepsilon )} .
\end{equation*}
\end{lemma}
We give a slight generalization of Levy's modulus of continuity lemma:
\begin{lemma} \label{lemma: Levy}
For a Brownian motion $B$, define the modulus of continuity with respect to partition $P_\varepsilon$ by
\begin{equation} \label{eqn: Levy}
\delta_{B,\varepsilon} = \max_{i=1,...,n_\varepsilon} \mathbb{S}^1up_{s \in [s_{i-1},s_i]} | \Delta_{s,s_{i-1}} B |.
\end{equation}
Then, there is a constant $C>0$ independent of $\varepsilon>0$ such that for any $\delta>0$
\[
\mathbf{P} \left \{ \delta_{B,\varepsilon} > \delta \right \} \leq \frac{C} { \delta\mathbb{S}^1qrt{\delta_\varepsilon}} e^{ - \delta^2 / ( 2 \delta_\varepsilon ) }.
\]
In particular, there is a $K_2 >0$ such that
\[
\mathbf{P} \left \{ \delta_{B,\varepsilon} > q_\varepsilon \right \} \leq K_2 \delta_\varepsilon, \quad \varepsilon >0.
\]
\end{lemma}
With these two results at hand we are ready to estimate $L_{\varepsilon, P_\varepsilon }$
\begin{lemma}\label{lemma: cond_osc}
There is a positive constant $K$ such that for any $\delta>0$ and $\varepsilon >0$,
\[
\mathbf{P} \left \{ \mathbb{S}^1up_{ t \in [0,T] } | L_{\varepsilon,P_\varepsilon}(t) | > \delta \right \}
\leq \mathbf{P} \left \{ | \log \delta_\varepsilon|{\rm osc}_f ( \varepsilon q_\varepsilon ) > \frac{ q_\varepsilon \delta}{4T} \right \}+K \delta_\varepsilon.
\]
\end{lemma}
Of course, the probability in the r.h.s.\ is either $0$ or $1$, and the estimate is meaningful only if the inequality
in the curly brackets is violated.
\begin{proof}
Let us start with the simple inequality
\begin{equation} \label{eqn: quad_sum_simple}
\mathbb{S}^1up_{ t \in [0,T] } L_{\varepsilon,P_\varepsilon} (t) \leq \mathbb{S}^1um_{i=1}^{n_\varepsilon } \left| \Delta_{s_i,s_{i-1} } f( \varepsilon W) \right | \left | \Delta_{s_i,s_{i-1} } W \right|,
\end{equation}
derived from~\eqref{eqn: Q_eps_Peps_def}. We estimate each term of the sum in the r.h.s.\ of~\eqref{eqn:
quad_sum_simple}.
From definition~\eqref{eqn: Levy} it follows that
\begin{align*}
\max_{ i=1,..,n_\varepsilon} |\Delta _{s_i,s_{i-1}} f(\varepsilon W)| &\leq { \rm osc}_f ( \varepsilon \delta _{W,\varepsilon} ).
\end{align*}
Using this inequality and the definition of $n_\varepsilon$ in~\eqref{eqn: quad_sum_simple}, we see that
\begin{align*} \notag
\mathbb{S}^1up_{ t \in [0,T] } L_{\varepsilon,P_\varepsilon} (t) &\leq n_\varepsilon { \rm osc}_f ( \varepsilon \delta _{W,\varepsilon} ) \delta_{W, \varepsilon} \\
&\leq T \delta_{W, \varepsilon} { \rm osc}_f ( \varepsilon \delta _{W,\varepsilon} ) / \delta_ \varepsilon .
\end{align*}
Hence for every $\delta > 0$ the inequalities
\begin{align} \notag
\mathbf{P} \left \{ \mathbb{S}^1up_{ t \in [0,T] } | L_{\varepsilon,P_\varepsilon} | > \delta \right \}
& \leq \mathbf{P} \left \{ {\rm osc}_f ( \varepsilon \delta_{W,\varepsilon} ) \delta_{W ,\varepsilon} >\delta_\varepsilon \delta/T ,\ \delta_{W,\varepsilon} \leq q_\varepsilon \right \} + \mathbf{P} \left \{ \delta_{W,\varepsilon} > q_\varepsilon \right \} \\
& \leq \mathbf{P} \left \{ {\rm osc}_f ( \varepsilon q_\varepsilon ) q_\varepsilon > \delta_\varepsilon \delta/ T \right \}+ \mathbf{P} \left \{ \delta_{W,\varepsilon} > q_\varepsilon \right \}
\label{eqn: Q_bound}
\end{align}
hold. The second term in the r.h.s. of~\eqref{eqn: Q_bound} can be bounded using Lemma~\ref{lemma: Levy}, so we focus on
the first term. For this notice that
\[
{\rm osc}_f ( \varepsilon q_\varepsilon ) q_\varepsilon / \delta_\varepsilon = 4 | \log \delta_\varepsilon | {\rm osc}_f ( \varepsilon q_\varepsilon )/ q_\varepsilon,
\]
which implies that
\begin{align} \notag
\mathbf{P} \left \{ {\rm osc}_f ( \varepsilon q_\varepsilon ) q_\varepsilon > \delta_\varepsilon \delta/ T \right \} \leq \mathbf{P} \left \{ | \log \delta_\varepsilon|{\rm osc}_f ( \varepsilon q_\varepsilon ) > q_\varepsilon \delta/(4T) \right \} .
\end{align}
The result follows after combining this fact with~\eqref{eqn: Q_bound} and Lemma~\ref{lemma: Levy}.
\end{proof}
\mathbb{S}^1ubsection{Approximation of $L_\varepsilon$ by $L_{\varepsilon,P_\varepsilon}$} \label{sec: approx_proof}
We have shown that $L_{\varepsilon,P_\varepsilon}$ converges to $0$. In order to prove the convergence of $L_\varepsilon$ we need to prove that $L_{\varepsilon,P_\varepsilon}$ approximates $L_\varepsilon$.
In order to do so define
\begin{equation*}
M_{\varepsilonilon }(t):=S_{\varepsilonilon }(t)-J_{\varepsilonilon }(t)+f(\varepsilon W(s_{i(t)-1}))\Delta_{s_{i(t)},t} W,
\end{equation*}
and
\[
\hat{M}_{\varepsilonilon }(t):=\hat{S}_{\varepsilonilon }(t)+ \hat{J}_{\varepsilonilon }(t)+f(\varepsilon \hat{W}(t_{n_\varepsilon - i(t)
}))\Delta_{T-t,n_\varepsilon -i(t)} \hat W.
\]
Using~\eqref{eqn: S_def},~\eqref{eqn: J_def}, and $i(t)$, we see that the process $M_\varepsilon$ can be written as
\[
M_{\varepsilonilon } (t) =\mathbb{S}^1um_{i=0}^{ n_\varepsilon }\int_{s_{i-1} \wedge t}^{s_{i} \wedge t}\Delta _{s,s_{i-1}} f( \varepsilon W ) dW(s).
\]
Likewise, using~\eqref{eqn: S_hat_def},~\eqref{eqn: J_hat_def},~\eqref{eqn: J_hat_reversed} and the definition of the points $t_i$, we see that
\begin{align} \notag
\hat{M}_\varepsilon (t) &= \mathbb{S}^1um_{ i=0 }^{ n_\varepsilon-1 } \int_{ t_i \vee (T-t) }^{ t_{i+1} \vee (T-t) }\Delta _{s,t_i} f( \varepsilon \hat{W} ) d\hat W(s) \\
&= \mathbb{S}^1um_{ i=0}^{ n_\varepsilon-1} \int_{ t_i \vee (T-t) }^{ t_{i+1} \vee (T-t) }\Delta _{s,t_i} f( \varepsilon \hat{W} ) d\beta(s) - A_\varepsilon (t), \label{eqn: M_hat_sum}
\end{align}
where we defined
\begin{equation}
A_\varepsilon (t)=\mathbb{S}^1um_{ i=0 }^{n_\varepsilon-1} \int_{ t_i \vee (T-t) }^{ t_{i+1} \vee (T-t)}\Delta _{s,t_i} f( \varepsilon \hat{W} ) \frac{ \hat W (s) } { T-s } ds. \label{eqn: A_def}
\end{equation}
Notice that $M_\varepsilon$ is a $\mathcal{F}^W$ martingale and $\hat M_\varepsilon$ is the time reversal of a $\mathcal{F}^\beta$ semimartingale. This is the main fact in the proof of the following Lemma:
\begin{lemma} \label{lemma: Foward_Approx} There are positive constants $K_1,K_2, K_3$ and $\varepsilon_0$ such that for any $\delta >0$,
\begin{align*}
\mathbf{P} \left\{ \mathbb{S}^1up_{t\leq T}| \tilde{M}_{\varepsilonilon }(t)|> \delta \right\} &\leq ( K_1 / \delta ) e^{- K_3 \delta^2 / {\rm osc}_f \left( \varepsilon q_\varepsilon \right)^{2}} + K_2 \delta_\varepsilon , \quad \varepsilonilon \in (0,\varepsilonilon _0).
\end{align*}
Here $\tilde M_\varepsilon$ can be either $M_\varepsilon$ or $\hat{M}_\varepsilon$.
\end{lemma}
The following lemma will be used in the proof of Lemma~\ref{lemma: Foward_Approx}. The proof is postponed until
Section~\ref{sec: add_proof}.
\begin{lemma} \label{lemma: A}
There are positive constants $K_1, K_2, K_4$, and $\varepsilon_0$ such that for all $\delta>0$,
\[
\mathbf{P} \left \{ \mathbb{S}^1up_{ t\in (0,T) } | A_\varepsilon (t) | > \delta \right \} \leq ( K_1 /\delta) e^{ - K_4 \delta^2/ {\rm osc}_f ( \varepsilon q_\varepsilon )^2 } + K_2 \delta_\varepsilon, \quad \varepsilon \in (0, \varepsilon_0 ).
\]
\end{lemma}
\begin{proof}[Proof of Lemma~\ref{lemma: Foward_Approx} ]
Let us start with the proof for $M_\varepsilon$. As we said before, the process $M_\varepsilon$ is a martingale with quadratic variation $\Gamma _{\varepsilonilon }=\left\langle M_{\varepsilonilon
}\right\rangle $ given by
\begin{equation}
\Gamma _{\varepsilonilon }(t)= \mathbb{S}^1um_{i=1}^{n_\varepsilon} \int_{s_{i-1} \wedge t}^{s_{i}\wedge t}|\Delta _{s,s_{i-1}} f(\varepsilon W)|^{2}ds. \label{eqn: Gamma_def}
\end{equation}
In order to apply Lemma~\ref{Lemma: Martingale_general}, we need to find a bound on the (random) function $\Gamma_\varepsilon$. In this case~\eqref{eqn: Levy} implies that
\[
\mathbb{S}^1up_{ s \in [s_{i-1},s_i]} |\Delta _{s,s_{i-1}}f( \varepsilon W)| \leq {\rm osc}_f ( \varepsilon \delta_{W,\varepsilon} ),
\]
for all $\varepsilon>0$. Using this bound in~\eqref{eqn: Gamma_def} we see that
\begin{equation} \label{eqn: difference_quadratic_2}
\Gamma_\varepsilon (T) \leq T {\rm osc}_f ( \varepsilon \delta_{W,\varepsilon} )^2 .
\end{equation}
Lemma~\ref{Lemma: Martingale_general} implies that
\begin{align} \notag
\mathbf{P} \left\{ \mathbb{S}^1up_{t\leq T } |M_{\varepsilonilon }(t)|> \delta \right\} &\leq \mathbf{P} \left\{ \mathbb{S}^1up_{t\leq T}|M_{\varepsilonilon }(t)|> \delta, \Gamma_\varepsilon (T) \leq T {\rm osc}_f \left( \varepsilon q_\varepsilon \right)^2 \right\} \\ \notag
& \quad + \mathbf{P} \left \{ \Gamma_\varepsilon (T) > T {\rm osc}_f \left( \varepsilon q_\varepsilon \right)^2 \right \} \\
& \leq \mathbb{S}^1qrt{8 T \frac{ {\rm osc}_f ( \varepsilon q_\varepsilon)^2 } { \pi \delta^2 } } e^{ - \delta^2 / (2T {\rm osc}_f \left( \varepsilon q_\varepsilon\right)^2 ) } + \mathbf{P} \left \{ \Gamma_\varepsilon (T) > T {\rm osc}_f \left( \varepsilon q_\varepsilon \right)^2 \right \}, \label{eqn: ineq_exp_forward}
\end{align}
for all $\varepsilon >0$ small enough. It remains to estimate the second probability in~\eqref{eqn: ineq_exp_forward}.
Using~\eqref{eqn: difference_quadratic_2} it easily follows that for each $\varepsilon >0$,
\begin{align*}
\mathbf{P} \left \{ \Gamma_\varepsilon (T) > T {\rm osc}_f \left( \varepsilon q_\varepsilon \right)^2 \right \}
& \leq \mathbf{P} \left \{ {\rm osc}_f \left( \varepsilon \delta_{W,\varepsilon} \right) > {\rm osc}_f \left( \varepsilon q_\varepsilon \right)
\right \} \\
& \leq \mathbf{P} \left \{ \delta_{W,\varepsilon} > q_\varepsilon \right \}.
\end{align*}
Lemma~\ref{lemma: Levy} and~\eqref{eqn: ineq_exp_forward} imply the desired estimate for $M_\varepsilon$.
To obtain the estimate on $\hat M_\varepsilon$, we notice that~\eqref{eqn: M_hat_sum} and~\eqref{eqn: A_def} imply
\begin{align*}
\hat M_\varepsilon (T-t) +A_\varepsilon (T-t) &= \mathbb{S}^1um_{i=0}^{n_\varepsilon-1} \int_{ t_i \vee t }^{ t_{i+1} \vee t}\Delta_{s,t_i} f( \varepsilon \hat W ) d \beta (s)\\
&= \mathbb{S}^1um_{i=0}^{n_\varepsilon-1} \int_{ t_i }^{ t_{i+1} }\Delta_{s,t_i} f( \varepsilon \hat W ) d \beta (s)- \mathbb{S}^1um_{i=0}^{n_\varepsilon-1} \int_{ t_i \wedge t }^{ t_{i+1} \wedge t}\Delta_{s,t_i} f( \varepsilon \hat W ) d \beta (s).
\end{align*}
Then, it follows that
\begin{align*}
\mathbb{S}^1up_{ t \leq T } \left| \hat M (t)- A_\varepsilon(t) \right| &= \mathbb{S}^1up_{ t \leq T } \left| \hat M (T-t)- A_\varepsilon(T-t) \right| \\
& \leq \left| \mathbb{S}^1um_{i=0}^{n_\varepsilon-1} \int_{ t_i }^{ t_{i+1} }\Delta_{s,t_i} f( \varepsilon \hat W ) d \beta (s) \right| +\mathbb{S}^1up_{ t \leq T } \left| \mathbb{S}^1um_{i=0}^{n_\varepsilon-1} \int_{ t_i \wedge t }^{ t_{i+1} \wedge t}\Delta_{s,t_i} f( \varepsilon \hat W ) d \beta (s) \right| \\
&\leq 2 \mathbb{S}^1up_{ t \leq T } \left| \mathbb{S}^1um_{i=0}^{n_\varepsilon-1} \int_{ t_i \wedge t }^{ t_{i+1} \wedge t}\Delta_{s,t_i} f( \varepsilon \hat W ) d \beta (s) \right| .
\end{align*}
Using this bound to proceed in the same way as we did for $M_\varepsilon$, we obtain that for any $\delta >0$,
\[
\mathbf{P} \left \{ \mathbb{S}^1up_{ t \leq T } \left| \hat M (t)- A_\varepsilon(T) \right| > \delta \right \} \leq ( K_1/ \delta ) e^{- K_2 \delta^2 /{\rm osc}_f \left( \varepsilon q_\varepsilon \right)^2 } + K_2 \delta_\varepsilon,
\]
for all $\varepsilon >0 $ small enough. Since
\[
\mathbf{P} \left \{ \mathbb{S}^1up_{ t \leq T } \left| \hat M (t)\right| > \delta \right \} \leq
\mathbf{P} \left \{ \mathbb{S}^1up_{ t \leq T } \left| \hat M (t)- A_\varepsilon(t) \right| > \delta/2 \right \}
+ \mathbf{P} \left \{ \mathbb{S}^1up_{ t \leq T } \left| A_\varepsilon(t) \right| > \delta/2 \right \},
\]
the result follows from Lemma~\ref{lemma: A}.
\end{proof}
A consequence of Lemma~\ref{lemma: Foward_Approx} is the approximation of the quadratic covariation $L_{\varepsilonilon }=[f(
\varepsilon W), W]$ by $L_{\varepsilon,P_\varepsilon}$, given in the following Lemma:
\begin{lemma} \label{thm: Main_Quad_Brownian} If $( \gamma_\varepsilon)_{ \varepsilon > 0 } $ is such that $\gamma_\varepsilon \to 0$ and ${
\rm osc }_f ( \varepsilon q_\varepsilon ) q_\varepsilon \gamma_\varepsilon^{-1} \to 0$ as $\varepsilon \to 0$, then there are positive constants $K_1,
K_2, K_5,$ and $\varepsilon_0$ such that
\begin{align*}
\mathbf{P}\left\{ \mathbb{S}^1up_{t\leq T} |L_{\varepsilonilon }(t)-L_{\varepsilonilon,P_{\varepsilonilon }} (t)|> \gamma_\varepsilon \right\} &
\leq K_1 \gamma_\varepsilon^{-1} e^{- K_5 \gamma_\varepsilon^2 / {\rm osc}_f \left( \varepsilon q_\varepsilon \right)^2 } + K_2 \delta_\varepsilon, \quad \varepsilonilon \in (0,\varepsilonilon _0).
\end{align*}
\end{lemma}
\begin{proof}
Let $(\gamma_\varepsilon)_{ \varepsilon >0} $ be as in the statement of the Lemma. By the definition of $M_\varepsilon$ and $\hat{M}_\varepsilon$, it follows that
\begin{equation} \label{eqn: bnd_approx}
|L_{\varepsilonilon }(t)-L_{\varepsilonilon ,P_{\varepsilonilon }}(t)|\leq |M_{\varepsilonilon }|+|\hat{M
}_{\varepsilonilon }|+|\Delta _{s_{i(t)},s_{i(t)-1}}f( \varepsilon W) \Delta _{s_{i(t)},s_{i(t)-1}}W |.
\end{equation}
The result follows as a consequence of Lemmas~\ref{lemma: Levy} and~\ref{lemma: Foward_Approx}. Indeed, since
\[
|\Delta _{s_{i(t)},s_{i(t)-1}}f( \varepsilon W) \Delta _{s_{i(t)},s_{i(t)-1}}W | \leq {\rm osc }_f ( \varepsilon \delta_{W,\varepsilon} ) \delta_{W,\varepsilon},
\]
Lemma~\ref{lemma: Levy} implies
\begin{align*}
\mathbf{P} \left \{ \right |\Delta _{s_{i(t)},s_{i(t)-1}}f( \varepsilon W) \Delta _{s_{i(t)},s_{i(t)-1}}W | > \gamma_\varepsilon \} &\leq \mathbf{P} \left \{ {\rm osc }_f ( \varepsilon \delta_{W,\varepsilon} ) \delta_{W,\varepsilon} > \gamma_\varepsilon, \delta_{W,\varepsilon} \leq q_\varepsilon \right \} \\
& \quad + \mathbf{P} \left \{ \delta_{W,\varepsilon} > q_\varepsilon \right \} \\
& \leq \mathbf{P} \left \{ {\rm osc }_f ( \varepsilon q_\varepsilon ) q_\varepsilon > \gamma_\varepsilon \right \} + K_2 \delta_\varepsilon.
\end{align*}
Hence, there is a $\varepsilon_0>0$ such that
\[
\mathbf{P} \left \{ \right |\Delta _{s_{i(t)},s_{i(t)-1}}f( \varepsilon W) \Delta _{s_{i(t)},s_{i(t)-1}}W | > \gamma_\varepsilon \} \leq K_2 \delta_\varepsilon, \quad \varepsilon \in (0, \varepsilon_0).
\]
Using this bound and Lemma~\ref{lemma: Foward_Approx} in~\eqref{eqn: bnd_approx}, we obtain
\begin{align*}
\mathbf{P}\left\{ \mathbb{S}^1up_{t\leq T} |L_{\varepsilonilon }(t)-L_{\varepsilonilon,P_{\varepsilonilon }} (t)|> \gamma_\varepsilon \right\} &
\leq K_1\gamma_\varepsilon^{-1} e^{- K_5 \gamma_\varepsilon^2 / {\rm osc}_f \left( \varepsilon q_\varepsilon \right)^2 } + K_2 \delta_\varepsilon,
\quad \varepsilon \in (0, \varepsilon_0 ).
\end{align*}
The proof is finished.
\end{proof}
\mathbb{S}^1ection{Proof of Theorems~\ref{thm: Main},~\ref{thm: Main_Lip} and~\ref{thm: MainII}}\label{sec: proof_thm}
\begin{proof}[Proof of Theorem~\ref{thm: MainII}]
The result is a consequence of Lemmas~\ref{lemma: cond_osc} and~\ref{thm: Main_Quad_Brownian}. Indeed, if $( \gamma_\varepsilon)_{ \varepsilon>0}$ is as in the statement of the Theorem, it is immediate to see that
\begin{align} \notag
\mathbf{P} \left\{ \varepsilon^{-1} \mathbb{S}^1up_{t \leq T} |Q_\varepsilon (t)| > \gamma_\varepsilon \right \} & \leq \mathbf{P}\left\{ \mathbb{S}^1up_{t\leq T}
|L_{\varepsilonilon }(t)-L_{\varepsilonilon,P_{\varepsilonilon }} (t)| > \gamma_\varepsilon/2 \right\} \\
& \quad + \mathbf{P} \left \{ \mathbb{S}^1up_{ t \in [0,T] } | L_{\varepsilon,P_\varepsilon}(t) | > \gamma_\varepsilon/2 \right \} \label{eqn: two_prob}.
\end{align}
The result will follow by applying Lemmas~\ref{lemma: cond_osc} and~\ref{thm: Main_Quad_Brownian} to the two terms in r.h.s. of~\eqref{eqn: two_prob}.
First, note that
\[
\eta_\varepsilon = | \log \delta_\varepsilon | \frac{ {\rm osc}_f ( \varepsilon q_\varepsilon) }{ q_\varepsilon \gamma_\varepsilon } \to 0, \quad \varepsilon \to 0,
\]
implies that ${\rm osc}_f ( \varepsilon q_\varepsilon) q_\varepsilon \gamma_\varepsilon^{-1} = 4 \eta_\varepsilon \delta_\varepsilon \to 0$, as $\varepsilon \to 0$. Hence, from Lemma~\ref{thm: Main_Quad_Brownian} we get that for some positive constants $K_1^\prime, K_2, K_5^\prime$ and $\varepsilon_0^\prime$
\begin{align} \label{eqn: two_prob_approx}
\mathbf{P}\left\{ \mathbb{S}^1up_{t\leq T} |L_{\varepsilonilon }(t)-L_{\varepsilonilon,P_{\varepsilonilon }} (t)|> \gamma_\varepsilon/2 \right\} &
\leq K_1^{\prime} \gamma_\varepsilon^{-1} e^{- K_5^\prime \gamma_\varepsilon^2 / {\rm osc}_f \left( \varepsilon q_\varepsilon \right)^2 } + K_2 \delta_\varepsilon,
\end{align}
for all $ \varepsilon \in (0, \varepsilon_0 )$ . Likewise, since $\eta_\varepsilon \to 0$ as $\varepsilon \to 0$, Lemma~\ref{lemma: cond_osc}
implies that for some positive constants $\varepsilon_1$ and $K$,
\begin{equation} \label{eqn: two_prob_conv}
\mathbf{P} \left \{ \mathbb{S}^1up_{ t \in [0,T] } | L_{\varepsilon,P_\varepsilon}(t) | > \gamma_\varepsilon/2 \right \} \leq K \delta_\varepsilon, \quad \varepsilon \in (0, \varepsilon_1).
\end{equation}
The result follows by using~\eqref{eqn: two_prob_approx} and~\eqref{eqn: two_prob_conv} in~\eqref{eqn: two_prob}.
\end{proof}
\begin{proof}[Proof of Theorem~\ref{thm: Main}]
The proof is a consequence of Theorem~\ref{thm: MainII}. Indeed, let us find a family $(\delta_\varepsilon)_{ \varepsilon >0 }$ such that $\delta_\varepsilon \to 0$ and
\[
\lim_{\varepsilon \to 0 } |\log \delta_\varepsilon | \frac{ {\rm osc}_f ( \varepsilon q_\varepsilon )}{q_\varepsilon } =0.
\]
Let $A(\delta_\varepsilon, \varepsilon)=|\log \delta_\varepsilon | {\rm osc}_f ( \varepsilon q_\varepsilon )q_\varepsilon^{-1}$. A straightforward calculation
gives
\begin{align*}
A(\delta_\varepsilon, \varepsilon)&\leq C_f \varepsilon^\alpha \delta_\varepsilon^{ ( \alpha -1 )/2 } | \log \delta_\varepsilon |^{ ( \alpha + 1 )/2 }.
\end{align*}
Let $\mu \in (\gamma, \alpha)$ and take $\delta_\varepsilon=\varepsilon^{ 2( \alpha - \mu )/ ( 1 - \alpha ) }$. Then, $A(\varepsilon^{ 2( \alpha - \mu )/ ( 1 - \alpha ) },\varepsilon)\leq \hat{A}(\varepsilon)$, where $\hat{A}(\varepsilon)$ is given by
\[
\hat{A}(\varepsilon)=C_{\alpha, f } \varepsilon^{\mu} | \log \varepsilon |^{ ( \alpha + 1 )/2 },
\]
for some constant $C_{\alpha, f}>0$ independent of $\varepsilon>0$. So, we can use this $\delta_\varepsilon$ in Theorem~\ref{thm: MainII} to get that
\begin{align} \notag
\mathbf{P} \left\{ \varepsilon^{-1} \mathbb{S}^1up_{t \leq T} |Q_\varepsilon (t)| > \delta \right \} &\leq K_1 \delta^{-1} \exp \left \{- C_0 \frac{ \left(\delta \varepsilon^{-\alpha ( 1- \mu) / ( 1 - \alpha ) } \right)^2}
{ |\log \varepsilon|^\alpha} \right \} \\
& \quad + K_2 \varepsilon^{ 2 (\alpha - \mu)/(1 - \alpha ) }, \label{eqn: ineq_Lip}
\end{align}
for all $\varepsilon>0$ small enough and constants $K_1,K_2,C_0>0$ independent of $\varepsilon>0$ and $\delta>0$.
Theorem~\ref{thm: MainII} actually implies that inequality~\eqref{eqn: ineq_Lip} remains true as long as
$ \hat{A}(\varepsilon)/\delta \to 0$, as $\varepsilon \to 0$. So, since $\gamma \in (0,\mu)$, we can substitute $ \varepsilon^\gamma \delta$ for $\delta$ in~\eqref{eqn: ineq_Lip} to get that
\begin{align} \notag
\mathbf{P} \left\{ \varepsilon^{-( 1+\gamma) } \mathbb{S}^1up_{t \leq T} |Q_\varepsilon (t)| > \delta \right \} &\leq K_1 \delta^{-1} \varepsilon^{-\gamma} \exp \left \{- C_0 \frac{ \delta^2 \varepsilon^{2 \left(- (\alpha -\gamma) + \alpha ( \mu - \gamma) \right) / ( 1 - \alpha ) } }{ |\log \varepsilon|^\alpha} \right \} \\
& \quad + K_2 \varepsilon^{ 2(\alpha - \mu)/(1 - \alpha ) }. \label{eqn: algebraHard}
\end{align}
Since $\alpha \in (0,1) $ and $\mu < \alpha$, we have
\begin{align*}
\alpha ( \mu - \gamma) &< \alpha( \alpha -\gamma ) < \alpha - \gamma.
\end{align*}
Using this fact in~\eqref{eqn: algebraHard} we get that
\begin{align*}
\mathbf{P} \left\{ \varepsilon^{-( 1+\gamma) } \mathbb{S}^1up_{t \leq T} |Q_\varepsilon (t)| > \delta \right \} & \leq K_3 \varepsilon^{ 2(\alpha - \mu)/(1 - \alpha ) } ,
\end{align*}
for some $K_3>0$, any $\delta>0$, and all $\varepsilon >0$ small enough. The result is proved.
\end{proof}
\begin{proof}[ Proof of Theorem~\ref{thm: Main_Lip}]
The proof follows the same steps as the proof of Theorem~\ref{thm: Main}. The first step is to to follow Theorem~\ref{thm: MainII} by finding a family $(\delta_\varepsilon)_{ \varepsilon >0 }$ such that $\delta_\varepsilon \to 0$ and
\[
\lim_{\varepsilon \to 0 } |\log \delta_\varepsilon | \varepsilon =0.
\]
Given $\gamma \in (0,1)$, we propose $\delta_\varepsilon = e^{ - \varepsilon^{ - (1 - \mu) } }$, for $\mu \in ( \gamma, 1)$. In this
case, $|\log \delta_\varepsilon | \varepsilon=\varepsilon^{\mu}$, so Theorem~\ref{thm: MainII} implies that
\begin{align*}
\mathbf{P} \left\{ \varepsilon^{-1} \mathbb{S}^1up_{t \leq T} |Q_\varepsilon (t)| > \delta \right \} &\leq K_1 \delta^{-1} \exp \left \{- K_4 \delta^2 \varepsilon^{ -( 1 + \mu) } e^{\varepsilon^{- ( 1 - \mu )} } \right \} \\
& \quad + K_2 e^{ - \varepsilon^{ - (1 - \mu) } }.
\end{align*}
As in the proof of Theorem~\ref{thm: Main}, we can substitute $\delta \varepsilon^\gamma$ instead of $\delta$ in the last
inequality. We can finish the proof by extracting the leading term in the resulting estimate.
\end{proof}
\mathbb{S}^1ection{Proof of Theorem~\ref{thm: SmallNoise}} \label{sec: ProofThmSmallNoise}
Using the results from~\cite{ItoC1} (see also ~\cite{Nualart}, and reference therein) we observe that $Y_\varepsilon = h( X_\varepsilon )$ satisfies
\begin{align} \label{eqn: ItoC1}
dY_\varepsilon = \nabla h( X_\varepsilon (t ) ) \cdot b ( X_\varepsilon (t) ) dt + \varepsilon \nabla h( X_\varepsilon (t) ) dW(t) + \frac{1}{2} \mathcal{Q}_\varepsilon(t),
\end{align}
with initial condition $Y_\varepsilon(0) = h( X_\varepsilon(0 ) )$, and where $\mathcal{Q} _\varepsilon$ is an $\mathbb{R}^d$-valued process with j$^\text{th}$ coordinate $\mathcal{Q}^j_\varepsilon$ given by
\begin{align}\notag
\mathcal{Q} _\varepsilon^j (t) &= \mathbb{S}^1um_{ k=1}^d \left[ \partial_k h^j( X_\varepsilon ), X_\varepsilon^j \right] \\ \label{eqn: Qjdef}
&= \varepsilon \mathbb{S}^1um_{ k=1}^d \left[ \partial_k h^j( X_\varepsilon ), W^j \right], \quad j=1,...,d.
\end{align}
Differentiating with respect to $t$ the identity $h(S^tx) = e^{At}h(x)$, we get that $\nabla h( x ) b ( x ) = A h (x)$, which combined with~\eqref{eqn: ItoC1} implies
\begin{equation} \label{eqn: Yeqn}
dY_\varepsilon(t) = AY_\varepsilon(t) dt + \varepsilon \left( \mathbb{S}^1igma( Y_\varepsilon (t) ) dW(t) + \varepsilon^{-1} \mathcal{Q}_\varepsilon (t) \right ).
\end{equation}
From this expression, to conclude the proof it is enough to show that the term $\varepsilon^{-1}\mathcal{Q}_\varepsilon$ in the last display converges uniformly (in an appropriate time range) towards zero in probability. The proof of this fact extends the results of this paper, but it follows the same steps (with minor modifications) as the proof of~\cite{nhn}.
We are now going to establish what kind of convergence we need from term $\mathcal{Q}_\varepsilon$ in~\eqref{eqn: Yeqn} to finish the proof, and then state the result in a separate lemma.
Using the results of~\cite{Bakhtin-SPA} that assert that $\frac{\tau_\varepsilon}{-\log \varepsilon}$ converges to a constant in probability, we can find for every $\upsilon>0$, there is a large enough constant $K_\upsilon>0$ such that
\[
\mathbf{P} \left\{ \tau_\varepsilon > -K_\upsilon \log\varepsilon \right\} \leq \upsilon.
\]
Since $\upsilon$ is arbitrary,~\eqref{eqn: Qjdef} and~\eqref{eqn: Yeqn} imply that to finish the proof its enough to show that
\[
\mathbb{S}^1up_{ t \in [0,-K_\upsilon \log \varepsilon] } \left( \max_{j,k=1,...,d} \left[ \partial_j h^k ( X_\varepsilon ), W^j \right] \right ) \to 0
\]
in probability as $\varepsilon \to 0$. Lemma~\ref{lemma: quadh} implies this result and hence finishes the proof of this theorem.
\begin{lemma} \label{lemma: quadh}
Suppose $q:U \to \mathbb{R}$ is a function that satisfies the conditions of Theorem~\ref{thm: Main} with $\alpha > 1/2$. Then, for every $\Gamma>0$ and $\delta > 0$ it follows that
\[
\lim_{ \varepsilon \to 0} \mathbf{P} \left\{ \mathbb{S}^1up_{ t \in [0, - \Gamma \log \varepsilon ] } [q(X_\varepsilon), W^j](t) > \delta, \tau_\varepsilon < -\Gamma \log \varepsilon \right\}= 0,
\]
for every $j=1,...,d$.
\end{lemma}
\begin{proof}
The proof follow the exact same logic as the proof of Theorem~\ref{thm: Main} with slight modifications that we will point out. We keep the same notation as in Section~\ref{sec: Approx} when appropriate. For instance, $P_{\varepsilonilon }$ is a partition of the interval $[0,-\Gamma \log \varepsilon]$ given
by points $0=s_{0}<...<s_{n_{\varepsilonilon }}=T_\varepsilon = -\Gamma \log \varepsilon$, where $s_i = i \delta_{\varepsilonilon }$, for $i=0,...,n_{\varepsilonilon }$. Also, define the backward
partition $\hat{P}_{\varepsilonilon }$ to be the partition of $[0,T_\varepsilon]$
given by points $0= t_{0}<...<t_{n_{\varepsilonilon }}=T_\varepsilon$, where $t_{i}=T_\varepsilon-s_{n_{\varepsilonilon }-i}$.
Let us fix $j$ for the rest of the proof. Then, the idea is that the convergence towards $0$ of the process $\mathcal{q}_\varepsilon(t) = [q(X_\varepsilon), W^j]$ conditioned on the sigma algebra $\mathcal{A}^j_\varepsilon$ generated by the history of $W$ up to time $T_\varepsilon$ except for the j$^\text{th}$ component of $W$, is almost identical from the main result in Theorem~\ref{thm: Main}. We will show that this is the case, and then the proof will be finished due to the tower property of conditional expectations.
As mentioned before, \cite{FollmerQuad},~\cite{ItoC1},~\cite{ItoCov} and~\cite{RussoCov} imply that upon fixing $\varepsilonilon >0 $, conditioned on $\mathcal{A}^j_\varepsilon$,
\[
\mathcal{q}_\varepsilon (t) = - S_{\varepsilonilon }(t) - \hat{S}_{\varepsilonilon }(t) ,
\]
where (in analogy with the notation used in Section~\ref{sec: Approx}) we defined
\begin{align*}
S_{\varepsilonilon }(t) =\int_{0}^{t}f( X_\varepsilon (s) )dW^j(s), \text{ and }
\hat{S}_{\varepsilonilon }(t) =\int_{-\Gamma \log \varepsilon-t}^{-\Gamma \log \varepsilon}f( \hat X_\varepsilon (s) )d \hat{W}^j(s).
\end{align*}
Here the time reversal is taken with respect to time $T_\varepsilon = -\Gamma \log \varepsilon$.
As we did before, the proof now consists on approximating the above difference by its respective sums and then show that the approximating sequence converges to $0$. As expected to approximate the process $\mathcal{q}_\varepsilon$ all steps will be the analogous to the ones followed in Section~\ref{sec: Approx}. In particular, $\mathcal{q}_\varepsilon$ will be approximated by
\begin{align}
L_{\varepsilon, P_\varepsilon} (t) &= \mathbb{S}^1um_{i=1}^{ i(t) }\Delta
_{s_{i},s_{i-1}} \left( q(X_\varepsilon) \right) \Delta
_{s_{i},s_{i-1}}W^j , \label{eqn: Q_eps_Peps_def}
\end{align}
where $i(t)$ is given by
\[
i(t)=\min \left \{ j \in [0,n_\varepsilon] \cap \mathbb{Z} : s_j \geq t \right \}.
\]
To show that $L_{\varepsilon,P_\varepsilon}$ converges to $0$, we follow line by line the proof of Lemma~\ref{lemma: cond_osc}, with the only difference that $n_\varepsilon$ is of order $-\delta_\varepsilon^{-1} \log\varepsilon$, and that the modulus of continuity of $X_\varepsilon$ is now of the order $\max( \varepsilon \delta_{\varepsilon,W}, \delta_\varepsilon )$. Proceeding as described, we obtain that there is a positive constant $K$ such that for any $\delta>0$ and $\varepsilon >0$,
\begin{equation}
\mathbf{P} \left \{ \mathbb{S}^1up_{ t \in [0,- \Gamma \log \varepsilon ] } | L_{\varepsilon,P_\varepsilon}(t) | > \delta \right \}
\leq \mathbf{P} \left \{ | \log \delta_\varepsilon|{\rm osc}_q ( \max( \varepsilon q_\varepsilon, q_\varepsilon^2 ) ) > \frac{ q_\varepsilon \delta}{- 4\Gamma \log \varepsilon} \right \}+K \delta_\varepsilon.
\end{equation}
By choosing $\delta_\varepsilon = \varepsilon^2$, it follows that $q_\varepsilon$ is of the order $-\varepsilon \log \varepsilon$, and $\max( \varepsilon q_\varepsilon, q_\varepsilon^2 )$ is of the order $-\varepsilon^2 ( \log\varepsilon )^2$. Hence, in this case, from the last display, to ensure that $L_{\varepsilon,P_\varepsilon}$ converges to $0$, we need that $\varepsilon^{2\alpha - 1} \to 0$, as $\varepsilon \to 0$. That is, we need $\alpha > 1/2$, as stated in the statement of the theorem.
We are just left to show that the difference $L_\varepsilon - L_{\varepsilon, P_\varepsilon}$ converges to $0$ under the additional conditions that $\delta_\varepsilon$ is of order $\varepsilon^2$. In this case, the method used in Section~\ref{sec: approx_proof} to proof Lemma~\ref{thm: Main_Quad_Brownian} follow line by line with the appropriate modifications related to the modulus of continuity of $X_\varepsilon$, and the logarithmic grow of $\varepsilon$. We leave the reader to fill the details.
\end{proof}
\mathbb{S}^1ection{Additional Proofs} \label{sec: add_proof}
\begin{proof}[Proof of Lemma~\ref{Lemma: Martingale_general}]
For each $\varepsilonilon >0$, we use the representation of martingales as time changed Brownian Motion~\cite[Theorem
3.4.2]{Karatzas--Shreve}
to see that $M_{\varepsilonilon }=B
(\left\langle M_{\varepsilonilon }\right\rangle )$ in distribution in the space of
continuous functions, for some Brownian Motion $B$ (see \cite[Theorem
3.4.2]{Karatzas--Shreve}). Therefore,
\begin{equation*}
\mathbf{P}\left\{ \mathbb{S}^1up_{t\leq T }|M_{\varepsilonilon }(t)|>\delta
\right\} \leq \mathbf{P}\left\{ \mathbb{S}^1up_{t\leq r_{\varepsilonilon }}|B (t)|>\delta
\right\}.
\end{equation*}
Now the symmetry of $B $, reflection principle~\cite[Section 2.6]{Karatzas--Shreve}, and Brownian
scaling (self-similarity) imply that
\begin{align*}
\mathbf{P}\left\{ \mathbb{S}^1up_{t\leq r_{\varepsilonilon }}|B (t)|>\delta \right\}
&= \mathbf{P} \left \{ \mathbb{S}^1up_{t\leq r_{\varepsilonilon }}\max \{ B (t) , - B (t) \} > \delta \right \} \\
&\leq 2\mathbf{P}\left\{ \mathbb{S}^1up_{t\leq r_{\varepsilonilon }}B (t)>\delta \right\} \\
&\leq 4\mathbf{P}\left\{ B (r_{\varepsilonilon })>\delta \right\} \\
&=4\mathbf{P}\left\{ \mathbb{S}^1qrt{r_{\varepsilonilon }}B (1)>\delta \right\} .
\end{align*}
The result follows by a standard Gaussian Tail estimate.
\end{proof}
\begin{proof}[Proof of Lemma~\ref{lemma: Levy}]
Fix $\delta>0$ and note that
\begin{equation} \label{eqn: Levy_sum}
\mathbf{P} \left \{ \delta_{B,\varepsilon} > \delta \right \} \leq \mathbb{S}^1um_{i=1}^{n_\varepsilon} \mathbf{P} \left \{ \mathbb{S}^1up_{ s \in (s_{i-1},s_i ) } | \Delta_{ s,s_{i-1} } B| > \delta \right \}.
\end{equation}
We bound each of the probabilities in this sum. Since the process $\Delta_{ s,s_{i-1} } B$ is equal in distribution, on
the space of continuous functions, to a Brownian Motion itself up to a time shift, we can use reflection
principle~\cite[Theorem 2.9.25]{Karatzas--Shreve} and standard Gaussian bounds to get
\begin{align*}
\mathbf{P} \left \{ \mathbb{S}^1up_{ s \in (s_{i-1},s_i ) } | \Delta_{ s,s_{i-1} } B| > \delta \right \}
&\leq 4 \mathbf{P} \left \{ B ( \delta_\varepsilon ) > \delta \right \} \\
&\leq \delta^{-1} \mathbb{S}^1qrt{ \frac{8 \delta_\varepsilon} { \pi} } e^{ - \delta^2 / 2 \delta_\varepsilon }.
\end{align*}
Substituting this expression in~\eqref{eqn: Levy_sum} and using the fact that $n_\varepsilon \leq 2 T/ \delta_\varepsilon$, we see that there is a constant $C>0$ independent of $\varepsilon>0$ such that for any $\delta>0$
\[
\mathbf{P} \left \{ \delta_{B,\varepsilon} > \delta \right \} \leq \frac{C} { \delta\mathbb{S}^1qrt{\delta_\varepsilon}} e^{ - \delta^2 / ( 2 \delta_\varepsilon ) }
\]
as expected.
To prove the second part, use $\delta= q_\varepsilon=2\mathbb{S}^1qrt{ - \delta_\varepsilon \log \delta_\varepsilon }$ in the last expression to get that
\begin{align*}
\mathbf{P} \left \{ \delta_{B,\varepsilon} > q_\varepsilon \right \} &\leq \frac{C} { 2 \delta_\varepsilon \mathbb{S}^1qrt{- \log \delta_\varepsilon}}e^{ 2 \log \delta_\varepsilon } \\
& = \frac{C \delta_\varepsilon } { 2 \mathbb{S}^1qrt{- \log \delta_\varepsilon}} \\
& \leq K_2 \delta_\varepsilon.
\end{align*}
Hence the result follows.
\end{proof}
\begin{proof}[Proof of Lemma~\ref{lemma: A}]
We start with a basic inequality
\begin{align*}
\mathbb{S}^1up_{ t \in [0,T] } |A_\varepsilon (t)| & \leq \mathbb{S}^1um_{i=0}^{n_\varepsilon - 1} \int_{s_i}^{s_{i+1}} | \Delta_{ s, s_i } f( \varepsilon \hat W )| \frac{ | \hat W (s) | }{T-s}ds \\
& \leq 2 \mathbb{S}^1qrt{T} { \rm osc}_f ( \varepsilon \delta_{W,\varepsilon} ) \mathbb{S}^1up_{ s \leq T } \frac{ | \hat W(s) | }{ \mathbb{S}^1qrt{ T-s} } \\
& \leq 2 \mathbb{S}^1qrt{T} { \rm osc}_f ( \varepsilon \delta_{W,\varepsilon} ) \mathbb{S}^1up_{ s \leq T } \frac{ | W(s) | }{ \mathbb{S}^1qrt{s} }.
\end{align*}
It implies that
\begin{align*}
\mathbf{P} \left \{ \mathbb{S}^1up_{ t \in [0,T] } |A_\varepsilon (t)| > \delta \right \} & \leq \mathbf{P} \left \{ \mathbb{S}^1up_{ s \leq T } \frac{ |W(s)| }{\mathbb{S}^1qrt{s}} > \frac{\delta} { 2{\rm osc}_f ( \varepsilon q_\varepsilon ) \mathbb{S}^1qrt{T} }\right \} + \mathbf{P} \left \{ \delta_{W,\varepsilon} > q_\varepsilon \right \} \\
& \leq \mathbf{P} \left \{ \mathbb{S}^1up_{ s \leq T } \frac{ |W(s)| }{\mathbb{S}^1qrt{s}} > \frac{\delta} { 2 {\rm osc}_f ( \varepsilon q_\varepsilon ) \mathbb{S}^1qrt{T} }\right \} + K_1 \delta_\varepsilon,
\end{align*}
for some constant $K_1 > 0$ independent of $\delta>0 $ and $\varepsilon >0$. To finish the proof, we need to study the tail probability of the random variable $A=\mathbb{S}^1up_{ s \leq T }|W(s)|/ \mathbb{S}^1qrt{s}$.
In order to study the tail decay of the random variable $A$, note that, due to the symmetry of Brownian Motion,
\[
\mathbf{P} \left\{ A > \delta \right \} \leq 2 \mathbf{P} \left\{ \mathbb{S}^1up_{ t \leq T} \frac{ W(t) }{ \mathbb{S}^1qrt{t} } > \delta \right \}.
\]
So it is sufficient to focus on the tail probabilities of the random variable $N=\mathbb{S}^1up_{t \leq T} ( W(t)/
\mathbb{S}^1qrt{t} ) $, which is the supremum of a Gaussian process.
Equip the interval $[0,T]$ with the metric $\rho$ given by
\begin{align*}
\rho (s,t)^2 &= \mathbf{E} \left( \frac{W(s)}{\mathbb{S}^1qrt{s}} - \frac{W(t)}{\mathbb{S}^1qrt{t}} \right)^2 \\
& = 2 \left( 1- \mathbb{S}^1qrt{ \frac{s \wedge t} { s \vee t } } \right ), \quad s,t \in [0,T].
\end{align*}
We denote by $B_\theta (t) \mathbb{S}^1ubset [0,T]$ the $\rho$-ball of radius $\theta >0$ centered at $t \in [0,T]$.
Let
$H_\theta$ be the minimum number of balls of radius $\theta$ needed in order to cover $[0,T]$. According
to~\cite{Lifshits}[Section 14, Theorem 1], if
\begin{equation} \label{eqn: Dudley}
\int_0^{\mathbb{S}^1igma/2} \mathbb{S}^1qrt{ | \log H_\theta | } d\theta < \infty,
\end{equation}
with $\mathbb{S}^1igma = \mathbb{S}^1up_{ t \in [0,T] } {\rm var} ( W(t)/\mathbb{S}^1qrt{t} )=1$, then $\mathbf{E} N<\infty$.
Then, it is standard to see~\cite{Lifshits}[Corollary 2, Section 14] that there is a $\zeta_0> \mathbf{E} N$, such that for any $\zeta > \zeta_0$
\begin{equation} \label{eqn: concentration}
\mathbf{P} \left \{ |N-\mathbf{E} N| > \zeta \right \} \leq C e^{ - \zeta^2 /2 } / \zeta,
\end{equation}
for some universal constant $C>0$.
In our situation, if the integral in~\eqref{eqn: Dudley} is finite, this will be enough to finish the proof. Indeed, assuming~\eqref{eqn: concentration}, there is an $\varepsilon_0 >0 $ such that
\begin{align*}
\mathbf{P} &\left \{ \mathbb{S}^1up_{ s \leq T } \frac{ |W(s)| }{\mathbb{S}^1qrt{s}} > \frac{\delta} { 2 {\rm osc}_f ( \varepsilon q_\varepsilon ) \mathbb{S}^1qrt{T} } \right \}
\leq 2 \mathbf{P} \left \{ N > \frac{\delta} {2 {\rm osc}_f ( \varepsilon q_\varepsilon ) \mathbb{S}^1qrt{T} }\right \} \\
& \hspace{1.5in} \leq 2 \mathbf{P} \left \{ N - \mathbf{E} N > \frac{\delta} { 2 {\rm osc}_f ( \varepsilon q_\varepsilon ) \mathbb{S}^1qrt{T}} - \mathbf{E} N \right \} \\
& \hspace{1.5in} \leq C_1\left ( \frac{ \delta }{ 2 \mathbb{S}^1qrt{T} {\rm osc}_f ( \varepsilon q_\varepsilon ) } - \mathbf{E} N \right )^{-1} \exp \left \{ - C_2 \left ( \frac{ \delta }{ 2 \mathbb{S}^1qrt{T} {\rm osc}_f ( \varepsilon q_\varepsilon ) } - \mathbf{E} N \right ) ^2 \right \} \\
& \hspace{1.5in} \leq ( C_1 / \delta ) \exp \left \{ - C_3 \frac{ \delta ^2}{ {\rm osc}_f ( \varepsilon q_\varepsilon )^2} \right \},
\end{align*}
for some constants $C_1, C_2, C_3>0$ independent of $\varepsilon$ and $\delta$, and all $\varepsilon \in (0, \varepsilon_0) $. Hence we just need to show that the integral~\eqref{eqn: Dudley} is finite.
We are going to give an estimate of $H_\theta$, $\theta \in (0,1/2)$. Suppose $0 \leq s < t \leq T$, then $s
\in B_\theta (t)$ if and only if
\[
\mathbb{S}^1qrt{s} \geq \mathbb{S}^1qrt{t} ( 1 - \theta^2 / 2 ).
\]
Therefore, if $s$ and $t$ belong to the same ball of radius $\theta \in (0,1/2)$, then
\[
| t- s| \leq T \theta^2 .
\]
Hence, $H_\theta \leq 2 / \theta^2$, and $ \mathbb{S}^1qrt{ | \log H_\theta | }$ is integrable on the interval $[0,1/2]$, which
implies our claim.
\end{proof}
\end{document}
|
\betagin{document}
\title[Nonlinear Helmholtz equation ]{Complex solutions and stationary scattering for the nonlinear Helmholtz equation
}
\author{Huyuan Chen}
\operatorname{ad}dress{Department of Mathematics, Jiangxi Normal University, Nanchang,\\
Jiangxi 330022, PR China}
{e}mail{ [email protected]}
\author{Gilles Ev\'equoz }
\operatorname{ad}dress{School of Engineering, University of Applied Sciences of Western Switzerland, Route du Rawil 47,\\
1950 Sion, Switzerland}
{e}mail{ [email protected]}
\author{Tobias Weth}
\operatorname{ad}dress{Goethe-Universit\"{a}t Frankfurt, Institut f\"{u}r Mathematik, Robert-Mayer-Str. 10\\
D-60629 Frankfurt, Germany }
{e}mail{ [email protected]}
\betagin{abstract}
We study a stationary scattering problem related to the nonlinear Helmholtz equation
$
- \Deltalta u -k^2 u = f(x,u) \ \ \thetaxt{in $\mathbb{R}^N$,}
$
where $N \ge 3$ and $k>0$. For a given incident free wave $\varphi \in L^\infty(\mathbb{R}^N)$, we prove the existence of complex-valued solutions of the form $u=\varphi+u_{\thetaxt{sc}}$, where $u_{\thetaxt{sc}}$ satisfies the Sommerfeld outgoing radiation condition. Since neither a variational framework nor maximum principles are available for this problem, we use topological fixed point theory and global bifurcation theory to solve an associated integral equation involving the Helmholtz resolvent operator. The key step of this approach is the proof of suitable a priori bounds.
{e}nd{abstract}
\maketitle
\section{Introduction}
A basic model for wave propagation in an ambient medium with nonlinear response is provided by the nonlinear wave equation
\betagin{equation}\lambdabel{nlw0}
\frac{\partialrtial^2 \psi}{\partialrtial t^2}(t,x) - \Deltalta \psi(t,x) =f(x,\psi(t,x)), \qquad (t,x)\in\mathbb{R}\times\mathbb{R}^N.
{e}nd{equation}
Considering nonlinearities of the form $f(x,\psi)=g(x,|\psi|^2)\psi$,
where $g$ is a real-valued function, the time-periodic ansatz
\betagin{equation}
\lambdabel{psi-ansatz}
\psi(t,x)=e^{-i k t}u(x), \qquad k >0
{e}nd{equation}
leads to the nonlinear Helmholtz equation
\betagin{equation}\lambdabel{nlh-1}
- \Deltalta u -k^2 u = f(x,u) \qquad \thetaxt{in $\mathbb{R}^N$.}
{e}nd{equation}
Assuming in this model that nonlinear interactions occur only locally in space.
we are lead to restrict our attention to nonlinearities $f \in C(\mathbb{R}^N \times \mathbb{C},\mathbb{C})$ with $\lim \limits_{|x| \to \infty}f(x,u)=0$ for every $u \in \mathbb{R}$.
The stationary scattering problem then consists in analyzing solutions of the form $u=\varphi+u_{\thetaxt{sc}}$, where $\varphi$ is a solution of the homogeneous Helmholtz equation $-\Deltalta \varphi - k^2 \varphi=0$ and $u_{\thetaxt{sc}}$ obeys the Sommerfeld outgoing radiation condition
\betagin{equation}\lambdabel{sommerfeld-1}
r^{\frac{N-1}{2}}\left|\frac{\partialrtial u_{\thetaxt{sc}}}{\partialrtial r}-iku_{\thetaxt{sc}} \right|\to 0\quad\thetaxt{as }r=|x| \to\infty
{e}nd{equation}
or a suitable variant of it. The function $\varphi$ represents a given {{e}m incident free wave} whose interaction with the nonlinear ambient medium gives
rise to a scattered wave $u_{\thetaxt{sc}}$. Usually, $\varphi$ is chosen as a plane wave
\betagin{equation}
\lambdabel{eq:plane-wave}
\varphi(x)=e^{ik\:x\cdot \xi},\qquad \xi\in S^{N-1}
{e}nd{equation}
or as superposition of plane waves. To justify the notions of incident and scattered wave, let us assume for the moment that the nonlinearity is compactly supported in the space variable $x$. Then $u_{\thetaxt{sc}}$ has the asymptotics $u_{\thetaxt{sc}}(x)= r^{\frac{1-N}{2}}e^{i kr}g(\frac{x}{|x|})+ o(r^{\frac{1-N}{2}})$ as $r= |x| \to \infty$ with a function $g: S^{N-1} \to \mathbb{C}$ (see \cite[Theorem 2.5]{colton-kress} and \cite[Proposition 2.6]{EW1}). For incident plane waves $\varphi$ as in {e}qref{eq:plane-wave}, this leads to the asymptotic expansion
\betagin{equation*}
\psi(t,x)=e^{i k (x \cdot \xi-t)}+ r^{\frac{1-N}{2}}e^{i k(r-t)}g(\frac{x}{|x|})
+o(r^{\frac{1-N}{2}}) \qquad \thetaxt{as $r= |x| \to \infty$}
{e}nd{equation*}
uniformly in $t \in \mathbb{R}$ for the corresponding time periodic solution given by the ansatz~{e}qref{psi-ansatz}. This expansion clearly shows the asymptotic decomposition of the wave function $\psi$ in two parts, of which one is propagating with constant speed $k$ in the given direction $\xi$ and the other part is outward radiating in the radial direction. For a more detailed discussion of the connection of notions of stationary and dynamical scattering, we refer the reader to \cite{komech} and the references therein.
\qquad In the (affine) linear case $f(x,u)=a(x) u+b(x)$, both the forward and the inverse stationary scattering problem
have been extensively studied and are reasonably well understood from a functional analytic point of view (see e.g. \cite{colton-kress} and the references therein). In contrast, the nonlinear setting remains widely unexplored, although it appears in important models driven by applications and therefore is receiving fastly growing attention in recent years. Specifically, we mention the modeling of propagation and scattering of electromagnetic waves in localized nonlinear Kerr media as considered e.g. in \cite{fibich-tsynkov:2005,baruch-fibich-tsynkov:2009,wu-zou}. In this context, the nonlinear Helmholtz equation arises from a reduction of Maxwell's equations in the case of a linearly polarized electric field after elimination of the corresponding magnetic field. As noted in \cite{wu-zou}, this leads to a special case of equation (\ref{nlh-1}) given by
$$
- \Deltalta u -k^2 u = \rho 1_{\Omegaega}|u|^2 u \qquad \thetaxt{in $\mathbb{R}^N$.}
$$
Here $\Omegaega {\mathfrak{su}}bset \mathbb{R}^N$ is the support of the nonlinear Kerr medium and $\rho$ is the Kerr constant given as quotient of the Kerr coefficient of the medium and the index of refraction of the ambient homogeneous medium. Both from a theoretical and an applied point of view, it is of great interest to understand self-focusing and scattering effects of laser beams interacting with localized nonlinear media, and computational approaches to these questions have been developed e.g. in \cite{fibich-tsynkov:2005,baruch-fibich-tsynkov:2009,wu-zou}.
From a theoretical point of view, the current understanding of the stationary scattering problem for (\ref{nlh-1}) is
mainly restricted to the case of small incident waves $\varphi$ which
can be reduced to a perturbation of an associated linear problem in suitable function spaces. In this case, existence and well-posedness results have been obtained by Guti\'errez \cite{G}, Jalade \cite{J} and Gell-Redman et al. \cite{Gell-Red}. In \cite{J}, the scattering problem is studied for a small incident
plane wave and a family of compactly supported nonlinearities in dimension $N=3$.
The main result in \cite{G} yields, in dimensions $N=3,4$, the existence of solutions
to the scattering problem with small incident Herglotz wave $\varphi$ and cubic power nonlinearity. We recall that a Herglotz wave is a function of the type
\betagin{equation}
\lambdabel{eq:18}
x \mapsto \varphi(x):= \int_{S^{N-1}} e^{i k(x\cdot \xi)} g (\xi)\,d \sigmagma(\xi) \qquad
\thetaxt{for some function $g \in L^2(S^{N-1})$.}
{e}nd{equation}
Since plane waves of the form (\ref{eq:plane-wave}) cannot be written in this way, they are not admitted in \cite{G}. On the other hand, no asymptotic decay of the nonlinearity is required for the approach developed in \cite{G}. This is also the case for the approach in \cite{Gell-Red}, where more general nonlinearities are considered, while the class of admissible incident Herglotz waves $\varphi$ is restricted by assuming smallness measured in higher Sobolev norms on $S^{N-1}$.
\qquad The main reason for the smallness assumption in the papers \cite{G,J,Gell-Red} is the use of contraction mappings together with resolvent estimates for the Helmholtz operator.
The main aim of this paper is to remove this smallness assumption by means of different tools from nonlinear analysis and new a priori estimates on the set of solutions. More precisely, for a given solution $\varphi \in L^\infty(\mathbb{R}^N)$ of the homogeneous Helmholtz equation
$\Deltalta \varphi + \varphi= 0$ which we shall refer to as {{e}m incident free wave} in the following, we wish to find solutions of (\ref{nlh-1}) of the form
$u = \varphi + u_{sc} \in L^\infty(\mathbb{R}^N)$ with $u_{sc}$ satisfying (\ref{sommerfeld-1}) or a suitable variant of this radiation condition. This problem can be reduced to an integral equation involving the
Helmholtz resolvent operator ${\mathcal R}_k$, which is formally given as a convolution ${\mathcal R}_k f= \Phi_k * f$ with the fundamental solution
\betagin{equation}\lambdabel{eqn:fund_sol}
\Phi_k : \mathbb{R}^N \setminus \{0\} \to \mathbb{C}, \qquad \Phi_k(x)=\frac{i}{4} \Bigl(\frac{k}{2\pi |x|}\Bigr)^{\frac{N-2}{2}}H^{(1)}_{\frac{N-2}{2}}(k|x|)
{e}nd{equation}
associated to (\ref{sommerfeld-1}). Here $H^{(1)}_{\frac{N-2}{2}}$ is the
Hankel function of the first kind of order $\frac{N-2}{2}$, see e.g. \cite{AS}. It is easy to see from the asymptotics of $H^{(1)}_{\frac{N-2}{2}}$ that $\Phi_k$ satisfies (\ref{sommerfeld-1}), and the same is true for $u:= {\mathcal R}_k h= \Phi_k * h$ e.g. in the case where $h \in L^\infty(\mathbb{R}^N)$ has compact support.
\qquad By the estimate in \cite[Theorem 8]{G} and the remark following it, an integral variant of (\ref{sommerfeld-1}) is available under weaker assumptions on $h$. More precisely, if $N=3,4$ and $1 < p \le \frac{2(N+1)}{N+3}$ or $N \ge 5$ and $\frac{2N}{N+4} \le p \le \frac{2(N+1)}{N+3}$, then, for $h \in L^p(\mathbb{R}^N)$, the function $u= {\mathcal R}_k h$ is a well-defined strong solution of the inhomogeneous Helmholtz equation $-\Deltalta u - k^2 u = h$ satisfying the following variant of the Sommerfeld outgoing radiation condition:
\betagin{equation}\lambdabel{eqn:sommerfeld1-averaged}
\lim_{R\to\infty}\frac{1}{R} \int_{B_R}\left|\nabla
u(x)-iku(x)\frac{x}{|x|} \right|^2\, dx=0.
{e}nd{equation}
Hence, under appropriate assumptions on the nonlinearity $f$, we are led to study the integral equation
\betagin{equation}\lambdabel{nlh-1-integral}
u = {\mathcal R}_k(N_f(u))+ \varphi \qquad \thetaxt{in $L^\infty(\mathbb{R}^N)$}
{e}nd{equation}
for a given incident free wave $\varphi \in L^\infty(\mathbb{R}^N)$. Here $N_f$ is the substitution operator associated to $f$ given by $N_f(u)(x):= f(x,u(x))$.
\qquad To state our main results we need to introduce some more notation. It is convenient to define $\lambdangle x \rangle = (1+|x|^2)^{\frac{1}{2}}$ for $x \in \mathbb{R}^N$.
For $\alphapha\in\mathbb{R}$ and a measurable subset $A {\mathfrak{su}}bset \mathbb{R}^N$, we consider the Banach space $L^\infty_\alphapha(A)$ of measurable functions $w: A \to \mathbb{C}$ with
$$
\norm{w}_{L^\infty_\alphapha(A)}:=\| \lambdangle \,\cdot\, \rangle^{\alphapha} w\|_{L^\infty(A)
} <+\infty.
$$
In particular, $L^\infty(A)=L^\infty_0(A)$. In the case $A= \mathbb{R}^N$, we merely write $\|\cdot\|_{L^\infty_\alphapha}$ in place of $\|\cdot\|_{L^\infty_\alphapha(\mathbb{R}^N)}$. For subspaces of real-valued functions, we use the notations $L^p(A,\mathbb{R})$ for $1 \le p \le \infty$ and $L^\infty_\alphapha(A,\mathbb{R})$. We first note the following preliminary observation regarding properties of the resolvent operator ${\mathcal R}_k$.
\betagin{proposition}\lambdabel{resolvent-compact-and-continuous}
Let $N\geq2$, $\alphapha>\frac{N+1}{2}$ and $\tau(\alphapha)$ be defined by
\betagin{align} \lambdabel{exp 1}
\tau(\alphapha) &=
\betagin{cases} \alphapha-\frac{N+1}{2}\quad&{\rm if}\ \, \frac{N+1}{2}<\alphapha<N, \\[1.5mm]
\frac{N-1}{2}\quad&{\rm if}\ \, \alphapha\geq N.
{e}nd{cases}
{e}nd{align}
Then we have
\betagin{equation}
\lambdabel{eq:kappa-sigma-finite}
\kappappa_{\alphapha}:= {\mathfrak{su}}p \Big\{ \bigl \| |\Phi_k| * w \bigr \|_{L^\infty_{\tau(\alphapha)}}:\: w \in L^\infty_\alphapha(\mathbb{R}^N),\: \| w\|_{L^\infty_{\alphapha }}=1 \Big\}
< \infty,
{e}nd{equation}
so ${\mathcal R}_k$ defines a bounded linear map $L^\infty_\alphapha(\mathbb{R}^N) \to L^\infty_{\tau(\alphapha)}(\mathbb{R}^N)$. Moreover:
\betagin{enumerate}
\item[(i)] The resolvent operator defines a compact linear map ${\mathcal R}_k: L^\infty_\alphapha(\mathbb{R}^N) \to L^\infty(\mathbb{R}^N)$.\\
\item[(ii)] If $\alphapha > \frac{N(N+3)}{2(N+1)}$ and $h \in L^\infty_{\alphapha}(\mathbb{R}^N)$, then the function $u:= {\mathcal R}_k h$ is a strong solution of $-\Deltalta u - k^2 u = h$ satisfying~(\ref{eqn:sommerfeld1-averaged}). If $\alphapha > N$, then $u$ satisfies~(\ref{sommerfeld-1}).
{e}nd{enumerate}
{e}nd{proposition}
\qquad Our first main existence result is concerned with linearly bounded nonlinearities $f$.
\betagin{theorem} \lambdabel{W teo 1-sublinear}
Let, for some $\alphapha>\frac{N+1}{2}$, the nonlinearity $f: \mathbb{R}^N \times \mathbb{C} \to \mathbb{C}$ be a continuous function satisfying
\betagin{equation}
\lambdabel{eq:assumption-f1}
{\mathfrak{su}}p_{|u|\le M,x \in \mathbb{R}^N} \lambdangle x \rangle^{\alphapha}|f(x,u)|< \infty \qquad \thetaxt{for all $M >0$.}
{e}nd{equation}
Moreover, suppose that \underline{one} of the following assumptions is satisfied:
\betagin{enumerate}
\item[$(f_1)$] The nonlinearity is of the form $f(x,u)= a(x) u + b(x,u)$ with $a \in L^\infty_\alphapha(\mathbb{R}^N,\mathbb{R})$ and
$$
{\mathfrak{su}}p \limits_{|u|\le M, x \in \mathbb{R}^N} \lambdangle x \rangle^{\alphapha} |b(x,u)|= o(M) \qquad \thetaxt{as $M \to +\infty.$}
$$
\item[$(f_2)$] There exists $Q,b \in L^\infty_{\alphapha}(\mathbb{R}^N,\mathbb{R})$ with $\|Q\|_{L^\infty_\alphapha} < \frac{1}{\kappappa_\alphapha}$, where $\kappappa_\alphapha$ is given in (\ref{eq:kappa-sigma-finite}), and
\betagin{equation*}
|f(x,u)|\leq Q(x) |u|+b(x)\qquad\thetaxt{for all }\, (x,u)\in \mathbb{R}^N \times \mathbb{C}.
{e}nd{equation*}
{e}nd{enumerate}
Then, for any given solution $\varphi \in L^\infty(\mathbb{R}^N)$ of the homogeneous Helmholtz equation $\Deltalta \varphi + k^2 \varphi = 0$, the equation (\ref{nlh-1-integral}) admits a solution $u \in L^\infty(\mathbb{R}^N)$.
{e}nd{theorem}
\betagin{remark}
(i) In many semilinear elliptic problems with asymptotically linear nonlinearities as in assumption $(f_1)$, additional nonresonance conditions have to be assumed to guarantee a priori bounds which eventually lead to the existence of solutions. This is not the case in the present scattering problem. We shall establish a priori bounds merely as a consequence of $(f_1)$ by means of suitable nonexistence results for solutions of the linear Helmholtz equation satisfying the radiation condition~(\ref{eqn:sommerfeld1-averaged}). The key assumption here is that the function $a$ in $(f_1)$ is real-valued.
\qquad $(ii)$ Theorem~\ref{W teo 1-sublinear} leaves open the question of uniqueness of solutions to (\ref{nlh-1-integral}). In fact, under the sole assumptions of Theorem~\ref{W teo 1-sublinear}, uniqueness is not to be expected. If, however, for some $\alphapha>\frac{N+1}{2}$, the nonlinearity $f \in C(\mathbb{R}^N \times \mathbb{R}, \mathbb{R})$ satisfies (\ref{eq:assumption-f1}) and the Lipschitz condition
\betagin{equation}
\lambdabel{eq:assumption-f1-lipschitz}
{e}ll_\alphapha:= {\mathfrak{su}}p \Bigl \{ \lambdangle x \rangle^{\alphapha} \, \Bigl|\frac{f(x,u)-f(x,v)}{u-v}\Bigr|\::\: u,v \in \mathbb{R}, \: x \in \mathbb{R}^N \Bigr\} < \frac{1}{\kappappa_\alphapha},
{e}nd{equation}
then the contraction mapping principle readily yields the existence of a unique solution $u \in L^\infty(\mathbb{R}^N)$ of (\ref{nlh-1-integral}) for given $\varphi \in L^\infty(\mathbb{R}^N)$, see Theorem~\ref{theo-uniqueness} below.
{e}nd{remark}
\qquad Next we turn our attention to superlinear nonlinearities which do not satisfy $(f_1)$ or $(f_2)$. Assuming additional regularity estimates for $f$, we can still prove the existence of solutions of (\ref{nlh-1-integral}) in the case where $\|\varphi\|_{L^\infty(\mathbb{R}^N)}$ is small. More precisely, we have the following.
\betagin{theorem} \lambdabel{teo-implicit-function}
Let, for some $\alphapha>\frac{N+1}{2}$, the nonlinearity $f: \mathbb{R}^N \times \mathbb{C} \to \mathbb{C}$ be a continuous function satisfying
(\ref{eq:assumption-f1}). Suppose moreover that the function $f(x,\cdot):\mathbb{C} \to \mathbb{C}$ is real differentiable for every $x \in \mathbb{R}^N$, and that $f':= \partialrtial_u f: \mathbb{R}^N \times \mathbb{C} \to {\mathcal L}_{\mathbb{R}}(\mathbb{C},\mathbb{C})$ is a continuous function satisfying
\betagin{equation}
\lambdabel{eq:assumption-f1-diff}
{\mathfrak{su}}p_{|u|\le M,x \in \mathbb{R}^N} \lambdangle x \rangle^{\alphapha}\|f'(x,u)\|_{{\mathcal L}_{\mathbb{R}}(\mathbb{C},\mathbb{C})}< \infty.
{e}nd{equation}
Finally, suppose that $f(x,0)=0$ and $f'(x,0)= 0 \in {\mathcal L}_\mathbb{R}(\mathbb{C},\mathbb{C})$ for all $x \in \mathbb{R}^N$.
\qquad Then there exists open neighborhoods $U,V {\mathfrak{su}}bset L^\infty(\mathbb{R}^N)$ of zero with the property that for every $\varphi \in V$ there exists a unique solution $u= u_\varphi \in U$ of (\ref{nlh-1-integral}). Moreover, the map $V \to U$, $u \mapsto u_\varphi$ is of class $C^1$.
{e}nd{theorem}
\qquad The proof of this theorem is very short and merely based on the inverse function theorem, see Section~\ref{sec:proofs-main-results} below. It applies in particular to power type nonlinearities
\betagin{equation}
\lambdabel{eq:power-type}
f(x,u)=Q(x)|u|^{p-2}u.
{e}nd{equation}
More precisely, if $p>2$, and $Q \in L^\infty_{\alphapha}(\mathbb{R}^N)$ for some $\alphapha>\frac{N+1}{2}$, we find that $f(x,\cdot)$ is real differentiable for every $x \in \mathbb{R}^N$, and $f'= \partialrtial_u f \in {\mathcal L}_{\mathbb{R}}(\mathbb{C},\mathbb{C})$ is given by
$f'(x,u)v = Q(x)\bigl( \frac{p}{2}|u|^{p-2}v + \frac{p-2}{2}|u|^{p-4} u^2 \mathbf{a}r v\bigr)$, which implies that $$
\|f'(x,u)\|_{{\mathcal L}_{\mathbb{R}}(\mathbb{C},\mathbb{C})} \le (p-1)|Q(x)||u|^{p-2} \qquad \thetaxt{for $x \in \mathbb{R}^N$, $u \in \mathbb{C}$}.
$$
From this it is easy to deduce that the assumptions of Theorem~\ref{teo-implicit-function} are satisfied in this case. In particular, for given $\varphi \in L^\infty(\mathbb{R}^N)$, Theorem~\ref{teo-implicit-function} yields the existence of ${e}psilonilons>0$ and a unique local branch $(-{e}psilonilons,{e}psilonilons) \to L^\infty(\mathbb{R}^N)$, $\lambdambda \mapsto u_\lambdambda$ of solutions of the equation
\betagin{equation}\lambdabel{nlh-1-integral-parameter-lambda}
u = {\mathcal R}_k(Q|u|^{p-2}u)+ \lambdambda \varphi \qquad \thetaxt{in $L^\infty(\mathbb{R}^N)$.}
{e}nd{equation}
In our next result, we establish the existence of a global continuation of this local branch.
\betagin{theorem}\lambdabel{thm:rabinowitz-applied}
Let $N\geq 3$, $2<p<2^\ast$, $Q\in L^\infty_\alphapha(\mathbb{R}^N,\mathbb{R})\mathbf{a}ckslash\{0\}$ for some $\alphapha > \frac{N+1}{2}$ and $\varphi\in L^\infty(\mathbb{R}^N)$. Moreover, let
$$
{\mathcal S}_\varphi:= \{(\lambdambda,u)\::\: \lambdambda \ge 0,\: u \in L^\infty(\mathbb{R}^N),\: \thetaxt{$u$ solves (\ref{nlh-1-integral-parameter-lambda})}\}\:{\mathfrak{su}}bset \: [0,\infty) \times L^\infty(\mathbb{R}^N),
$$
and let ${\mathcal C}_\varphi {\mathfrak{su}}bset {\mathcal S}_\varphi$ denote the connected component of ${\mathcal S}_\varphi$ which contains the point $(0,0)$.
Then ${\mathcal C}_\varphi \setminus \{(0,0)\}$ is an unbounded subset of $(0,\infty) \times L^\infty(\mathbb{R}^N)$.
{e}nd{theorem}
\qquad We note that in general the unboundedness of ${\mathcal C}_\varphi$ does not guarantee that ${\mathcal C}_{\varphi}$ intersects $\{1\} \times \mathbb{R}^N$, since the branch given by ${\mathcal C}_\varphi$ may blow up in $L^\infty(\mathbb{R}^N)$ at some value $\lambdambda \in (0,1)$. In particular, under the general assumptions of Theorem~\ref{thm:rabinowitz-applied}, we cannot guarantee the existence of solutions of the equation (\ref{nlh-1-integral}).
For this, additional a priori bounds on the set of solutions are needed. We shall find such a priori bounds in the case where $Q \le 0$ in $\mathbb{R}^N$, which is usually refered to as the {{e}m defocusing case}. Moreover, we require $Q$ to have compact support with some control of its diameter. In the following, we let $L^\infty_c(\mathbb{R}^N)$ denote the set of functions $Q \in L^\infty(\mathbb{R}^N)$ with compact support ${\mathfrak{su}}pp Q {\mathfrak{su}}bset \mathbb{R}^N$, and we let $L^\infty_c(\mathbb{R}^N, \mathbb{R})$ denotes the subspace of real-valued functions in $L^\infty_c(\mathbb{R}^N)$. We then have the following result.
\betagin{theorem}\lambdabel{thm:unbounded_branch-defocusing}
Let $N\geq 3$, $2<p<2^\ast$, $Q\in L^\infty_c(\mathbb{R}^N,\mathbb{R})\mathbf{a}ckslash\{0\}$ and $\varphi\in L^\infty(\mathbb{R}^N)$. Assume furthermore that $Q\leq 0$ a.e. in $\mathbb{R}^N$ and $\thetaxt{diam}(\thetaxt{supp }Q)\le \frac{{\bf z({\text{\tiny $N$}})}}{k}$, where
${\bf z({\text{\tiny $N$}})}$ denotes the first positive zero
of the Bessel function $Y_{\frac{N-2}2}$ of the second kind of order $\frac{N-2}{2}$.
Then the set ${\mathcal C}_\varphi$ given in Theorem~\ref{thm:rabinowitz-applied} intersects $\{\lambdambda\} \times L^\infty(\mathbb{R}^N)$ for every $\lambdambda>0$. In particular, (\ref{nlh-1-integral-parameter-lambda}) admits a solution with $\lambdambda=1$.
{e}nd{theorem}
\qquad To put the assumption on the support of $Q$ into perspective, we note that ${\bf z({\text{\footnotesize $3$}})} = \frac{\pi}{2}$ since $Y_{\frac{1}{2}}(t)=- \sqrt{\frac{2}{\pi t}} \cos t$ for $t>0$. Moreover, ${\bf z({\text{\tiny $N$}})} > {\bf z({\text{\footnotesize $3$}})}$ for $N > 3$, see \cite[Section 9.5]{AS}. Consequently, the assumptions of Theorem~\ref{thm:unbounded_branch-defocusing} are satisfied if $Q\in L^\infty_c(\mathbb{R}^N,\mathbb{R})\mathbf{a}ckslash\{0\}$ is a nonpositive function with $\thetaxt{diam}(\thetaxt{supp }Q)< \frac{\pi}{2k}$. We also refer to \cite[p. 467]{AS} for a list of the values of ${\bf z({\text{\tiny $N$}})}$ for $3 \le N \le 15$.
\qquad It seems appropriate to compare our results with recent work on the existence of {{e}m real-valued (standing wave) solutions} of (\ref{nlh-1}). A large class of such real-valued solutions has been detected and studied extensively in recent years by considering the associated integral equation
\betagin{equation}
\lambdabel{eq:real-valued-variational}
u = \Psi_k \ast(N_f(u)),
{e}nd{equation}
where $\Psi_k$ is the real part of the fundamental solution $\Phi_k$, see e.g. \cite{MMP,EW0,EW1,EY} and the references therein. In particular, a variational approach to detect and analyze solutions of (\ref{eq:real-valued-variational}) has been set up in \cite{EW1} for the special case where the nonlinearity $f$ is of the form $f(x,u)=Q(x)|u|^{p-2}u$ with nonnegative $Q \in L^\infty(\mathbb{R}^N,\mathbb{R})$ and suitable exponents $p>2$. Variants of this variational approach have been developed further in \cite{MMP,EY} under appropriate assumptions on the nonlinearity. However, the variational methods in these papers are of no use in the context of the integral equation (\ref{nlh-1-integral}) which has no variational structure. The contrast between real standing wave solutions and complex scattering solutions is even more glaring as we shall see that the related homogeneous equation $u = {\mathcal R}_k [Q|u|^{p-2}u]$ admits only the trivial bounded solution $u{e}quiv 0$ if $p \ge 2$ and $Q \in L^\infty_\alphapha(\mathbb{R}^N,\mathbb{R})$ for some $\alphapha> \frac{N+1}{2}$. Indeed, we shall prove this Liouville type result in Proposition \ref{corol-kato} below by adapting a nonexistence result due to Kato \cite{kato59} to the present nonlinear context.
\qquad In the perturbative setting where a priori smallness assumptions are imposed, the detection of real and complex solutions of (\ref{nlh-1-integral}) follows the same strategy of applying contraction mapping arguments in suitable function spaces. In this context, we mention the paper \cite{Mandel1} where a variant of the contraction mapping argument of Guti\'errez \cite{G} is developed and used to detect continua of small real-valued solutions of (\ref{nlh-1}) for a larger class of nonlinearities than in \cite{G}. More precisely, these continua are found by solving the non-homogeneous variant
$u = \Psi_k \ast(Q|u|^{p-2}u) +\varphi$ of (\ref{eq:real-valued-variational}) for a range of given small real-valued solutions $\varphi$ of the homogeneous Helmholtz equation $-\Deltalta \varphi -\varphi =0$.
\qquad Due to the lack of a priori smallness assumptions and the lack of a variational structure, our main results given in Theorems 1.2, 1.5 and 1.6 require a different approach than in the above-mentioned papers. As mentioned earlier, this
approach is based on topological fixed point theory, and it therefore requires suitable a priori bounds. With regard to this aspect, the present paper is related to \cite{EW2} where continuous branches of real-valued standing wave solutions of (\ref{eq:real-valued-variational}) have been constructed. However, while the derivation of suitable priori bounds is the key step both in \cite{EW2} and in the present paper, these bounds are of different nature as they relate to different integral equations and to different classes of solutions. In \cite{EW2}, under suitable additional assumptions on $Q$ and $p$, a priori bounds are derived for real-valued solutions of $u = \Psi_k \ast(Q|u|^{p-2}u)$ which are positive within the support of the nonlinearity $f$. In contrast, here we need a priori bounds for complex solutions of (\ref{nlh-1-integral}), and for this we cannot use positivity properties and local maximum principles. Instead, the approach of the present paper is based on a Liouville theorem relying on Sommerfeld's radiation condition and on combining regularity and test function estimates with local monotonicity properties of the function $\Psi_k$, see Sections \ref{nonexistence} and~\ref{sec:priori-estim-defoc} below.
\qquad The paper is organized as follows. In Section~\ref{sec:estim-helmh-resolv} we establish basic estimates of the resolvent operator ${\mathcal R}_k$, and we prove Proposition~\ref{resolvent-compact-and-continuous}. In Section~\ref{sec:estim-subst-oper}, we show useful estimates and regularity properties of the substitution operator associated with the nonlinearity $f(x,u)$. In order to apply topological fixed point theory, we first need to prove the nonexistence of solutions to linear and superlinear integral equations related to the operator ${\mathcal R}_k$. This will be done in Section~\ref{nonexistence}. In Section~\ref{sec:priori-estim-defoc}, we then prove a priori bounds for solution of equation {e}qref{nlh-1-integral} and related variants under various assumptions on the nonlinearity $f$. The proof of the main theorems is then completed in Section~\ref{sec:proofs-main-results}. Finally, in the appendix, we provide a relative a priori bound based on bootstrap regularity estimates between $L^p$-spaces which is used in the proof of Theorem~\ref{thm:unbounded_branch-defocusing}.
\section{Estimates for the Helmholtz resolvent operator}
\lambdabel{sec:estim-helmh-resolv}
\betagin{lemma}\lambdabel{lm 2.1}
Let $N\geq2$, $k>0$ and for $\alphapha>\frac{N+1}{2}$, let $\tau(\alphapha)$ be defined by (\ref{exp 1}).
Then for any $ v\in L^\infty_{\alphapha } (\mathbb{R}^N)$ and $\alphapha>\frac{N+1}{2}$, we have
\[
\||\Phi_k|* v\|_{L^\infty_{\tau(\alphapha)}}\leq C\| v\|_{L^\infty_{\alphapha }},\quad \||\nabla \Phi_k|* v\|_{L^\infty_{\tau(\alphapha)}}\leq C\| v\|_{L^\infty_{\alphapha }},
\]
where the constant $C>0$ depends only on $N$, $\alphapha$ and $k$.
{e}nd{lemma}
\betagin{proof}
In the following, the letter $C>0$ always denotes constants which only depends on $N$, $\alphapha$ and $k$. We observe that
\[
|\Phi_k(x)|\leq
\betagin{cases}
C\, |x|^{2-N}\quad &\thetaxt{if }\ N\geq 3,\\[1.5mm]
C\, \log \frac2{|x|} &\thetaxt{if }\ N=2,\,
{e}nd{cases}\qquad |\nabla\Phi_k|\leq c|x|^{1-N}
\quad\,\thetaxt{for $0<|x|\leq 1$} \]
and
\[ |\Phi_k(x)|,\, |\nabla\Phi_k|\leq C\, |x|^{\frac{1-N}2} \quad \thetaxt{if } |x|>1. \]
It then follows that
\betagin{align*}
&|(|\Phi_k|* v)(x)|\leq \int_{\mathbb{R}^N} |\Phi_k(z)|\,| v(x-z)|\, dz\notag\\
&\leq \betagin{cases}
C\| v\|_{L^\infty_\alphapha}\,\Big(\int_{B_1(0)}|z|^{2-N}\lambdangle
x-z\rangle^{-\alphapha}\, dz + \int_{\mathbb{R}^N\mathbf{a}ckslash B_1(0)}|z|^{\frac{1-N}2}\, \lambdangle x-z\rangle^{-\alphapha}\, dz\Big)\ \ \thetaxt{if }\ N\geq3 ,\\[3mm]
C\| v\|_{L^\infty_\alphapha}\,\Big(\int_{B_1(0)}\log\frac2{|z|} \lambdangle
x-z\rangle^{-\alphapha}\, dz + \int_{\mathbb{R}^N\mathbf{a}ckslash B_1(0)}|z|^{\frac{1-N}2}\, \lambdangle x-z\rangle^{-\alphapha}\, dz\Big)\ \ \thetaxt{if }\ N=2.
{e}nd{cases}
{e}nd{align*}
For $|x|\leq 4$, it is easy to see that
\betagin{align}\lambdabel{2.2}
|(|\Phi_k|* v)(x)|\leq \betagin{cases}
C\| v\|_{L^\infty_\alphapha}\,\Big(\int_{B_1(0)}|z|^{2-N} \,dz + \int_{\mathbb{R}^N\mathbf{a}ckslash B_1(0)}|z|^{\frac{1-N}2-\alphapha}\, dz\Big)\ \ \thetaxt{if }N\geq3 \\[3mm]
C\| v\|_{L^\infty_\alphapha}\,\Big(\int_{B_1(0)}\log\frac2{|z|} \, dz + \int_{\mathbb{R}^N\mathbf{a}ckslash B_1(0)}|z|^{\frac{1-N}2-\alphapha}\, dz\Big)\ \ \thetaxt{if }N=2,
{e}nd{cases} {e}nd{align}
and
\betagin{align}\lambdabel{2.2-gradient}
|(|\nabla\Phi_k|* v)(x)|\leq C\| v\|_{L^\infty_\alphapha}\,\Big(\int_{B_1(0)}|z|^{1-N} \,dz + \int_{\mathbb{R}^N\mathbf{a}ckslash B_1(0)}|z|^{\frac{1-N}2-\alphapha}\, dz\Big),
{e}nd{align}
where $\frac{1-N}2-\alphapha<-N$.
\qquad In the following, we consider $|x|>4$. Since $\alphapha>\frac{N+1}{2}$, direct computation shows that
\betagin{align*}
I_1&:=\betagin{cases} \int_{B_1(0)}|z|^{2-N}\, \lambdangle
x-z\rangle^{-\alphapha}\, dz\quad{\rm if}\ N\geq 3 \\[1mm]
\int_{B_1(0)}\log\frac{2}{|z|}\, \lambdangle
x-z\rangle^{-\alphapha}\, dz\quad{\rm if}\ N=2
{e}nd{cases}
\\[1.5mm]
& \displaystyle\,\leq C |x|^{-\alphapha} \le C \lambdangle x\rangle^{-\alphapha}.
{e}nd{align*}
Moreover,
\betagin{align*}
I_2&:=\int_{B_{\frac{|x|}2} (0)\setminus B_1(0)} |z|^{\frac{1-N}2}\, \lambdangle
x-z\rangle^{-\alphapha}\, dz \leq C|x|^{-\alphapha} \int_{B_{\frac{|x|}2} (0)\setminus B_1(0)} |z|^{\frac{1-N}2} dz
\leq C|x|^{-\alphapha+\frac{N+1}{2}},\\
I_3&:=\int_{B_{\frac{|x|}2} (x)} |z|^{\frac{1-N}2}\, \lambdangle
x-z\rangle^{-\alphapha}\, dz \leq C|x|^{-\frac{N-1}{2}} \int_{B_{\frac{|x|}2} (x) } \lambdangle
x-z\rangle^{-\alphapha} dz
\leq C|x|^{-\tau(\alphapha)}
{e}nd{align*}
and
\betagin{align*} I_4:&=\int_{\mathbb{R}^N\setminus(B_{\frac{|x|}2} (0)\cup B_{\frac{|x|}2} (x)) } |z|^{\frac{1-N}2}\, \lambdangle
x-z\rangle^{-\alphapha}\, dz \\
&= |x|^{-\alphapha+\frac{N+1}{2}} \int_{\mathbb{R}^N\setminus(B_{\frac{1}2} (0)\cup B_{\frac{1}2} (e_x)) } |z|^{\frac{N-1}{2}}|z-\hat x|^{-\alphapha} dz
\leq C|x|^{-\alphapha+\frac{N+1}{2}},
{e}nd{align*}
where $\hat x=\frac{x}{|x|}$. Since $-\tau(\alphapha)\geq \max\{-\frac{N-1}{2}, -\alphapha, -\alphapha+\frac{N+1}{2}\}$, we may combine these estimates with (\ref{2.2}) to see that
\[|(|\Phi_k|*v)(x)|\leq C\| v\|_{L^\infty_{ \alphapha}}\,\bigg({\mathfrak{su}}m^{4}_{j=1}I_j\bigg) \leq C \lambdangle x \rangle^{-\tau(\alphapha)} \| v\|_{L^\infty_{ \alphapha}} \quad \thetaxt{for all $x\in \mathbb{R}^N.$}\]
Moreover, noting that
\betagin{align*}
\tilde I_1:= \int_{B_1(0)}|z|^{1-N}\, \lambdangle
x-z\rangle^{-\alphapha}\, dz
\leq C |x|^{-\alphapha} \leq C \lambdangle x\rangle^{-\alphapha}\qquad \thetaxt{for $|x|>4$,}
{e}nd{align*}
we find by (\ref{2.2-gradient}) that
\[|(|\nabla \Phi_k|*v)(x)|\leq C\| v\|_{L^\infty_{ \alphapha}}\,\bigg(\tilde I_1+{\mathfrak{su}}m^{4}_{j=2}I_j\bigg) \leq C \lambdangle x \rangle^{-\tau(\alphapha)}\| v\|_{L^\infty_{ \alphapha}}\quad \thetaxt{for all $x\in \mathbb{R}^N.$}\]
The proof is thus complete.
{e}nd{proof}
\betagin{proof}[Proof of Proposition~\ref{resolvent-compact-and-continuous}]
(i) Clearly, Lemma~\ref{lm 2.1} yields (\ref{eq:kappa-sigma-finite}) and therefore the continuity of the linear resolvent operator ${\mathcal R}_k: L^\infty_\alphapha(\mathbb{R}^N) \to L^\infty_{\tau(\alphapha)}(\mathbb{R}^N)$, whereas the latter space is continuously embedded in $L^\infty(\mathbb{R}^N)$. To see the compactness of ${\mathcal R}_k$ as a map $L^\infty_\alphapha(\mathbb{R}^N) \to L^\infty(\mathbb{R}^N)$, let $(u_n)_n$ be a sequence in
$L^\infty_\alphapha(\mathbb{R}^N)$ with
$$
m:= {\mathfrak{su}}p_{n \in \mathbb{N}} \|u_n \|_{L^\infty_{\alphapha}}< \infty.
$$
Moreover, let $v_n:= {\mathcal R}_k u_n = \Phi * u_n$ for $n \in \mathbb{N}$. By Lemma~\ref{lm 2.1}, we then have
\betagin{equation}
\lambdabel{eq:proof-est-compactness}
\|v_n\|_{L^\infty_{\tau(\alphapha)}} \le C m \quad \thetaxt{and}\qquad \|\nabla v_n\|_{L^\infty_{\tau(\alphapha)}}=
\|\nabla \Phi * u_n\|_{L^\infty_{\tau(\alphapha)}} \le C m
{e}nd{equation}
for all $n \in \mathbb{N}$. In particular, the sequence $(v_n)_n$ is bounded in $C^1_{loc}(\mathbb{R}^N)$. By the Arzel\`a-Ascoli theorem, there exists $v \in L^\infty_{loc}(\mathbb{R}^N)$ with
\betagin{equation}
\lambdabel{eq:proof-locally-uniformly}
\thetaxt{$v_n \mapsto v$ locally uniformly on $\mathbb{R}^N$.}
{e}nd{equation}
By (\ref{eq:proof-est-compactness}), it then follows that $v \in L^\infty_{\tau(\alphapha)}(\mathbb{R}^N)$ with $\|v\|_{L^\infty_{\tau(\alphapha)}} \le C m$.
\qquad Moreover, for given $R>0$ we have, with $A_R:= \mathbb{R}^N \setminus B_R(0)$
$$
\|v_n-v\|_{L^\infty(A_R)}\le \|v_n\|_{L^\infty(A_R)} + \|v\|_{L^\infty(A_R)} \le R^{-\tau(\alphapha)}\Bigl(\|v_n\|_{L^\infty_{\tau(\alphapha)}} + \|v\|_{L^\infty_{\tau(\alphapha)}}\Bigr) \le 2Cm R^{-\tau(\alphapha)}.
$$
Combining this estimate with (\ref{eq:proof-locally-uniformly}), we see that $\limsup \limits_{n \to \infty}\|v_n-v\|_{L^\infty(\mathbb{R}^N)}\le
2Cm R^{-\tau(\alphapha)}$ for every $R>0$. Since $\tau(\alphapha)>0$, we conclude that $v_n \to v$ in $L^\infty(\mathbb{R}^N)$. This shows the compactness of the operator $L^\infty_\alphapha(\mathbb{R}^N) \to L^\infty(\mathbb{R}^N)$.\\
(ii) Let $\alphapha > \frac{N(N+3)}{2(N+1)}$ and $h \in L^\infty_\alphapha(\mathbb{R}^N)$. It then follows that $h \in L^{\frac{2(N+1)}{N+3}}(\mathbb{R}^N)$. Consequently, \cite[Proposition A.1]{EW1} implies that $u= {\mathcal R}_k h$ is a strong solution of $-\Deltalta u - k^2 u = h$. Moreover, $u$ satisfies~(\ref{eqn:sommerfeld1-averaged}) by the estimate in \cite[Theorem 8]{G} and the remark following it.
Finally, we suppose that $\alphapha > N$. In this case, the linear map
$$
\widetilde{\mathcal R}_k: L^\infty_\alphapha(\mathbb{R}^N) \to L^\infty_{\thetaxt{\tiny $\frac{N-1}{2}$}}(\mathbb{R}^N), \qquad v \mapsto \widetilde{\mathcal R}_k(v):=\frac{d {\mathcal R}_k v}{dr} -ik {\mathcal R}_k v
$$
is well-defined and bounded by Lemma~\ref{lm 2.1}. Moreover, if $h \in L^\infty(\mathbb{R}^N)$ has compact support, the fact that $\Phi_k$ satisfies~(\ref{sommerfeld-1}) and elementary convolution estimates show that $u= {\mathcal R}_k h$ also satisfies~(\ref{sommerfeld-1}). In the general case $h \in L^\infty_\alphapha(\mathbb{R}^N)$, we consider a sequence of functions $h_n \in L^\infty_\alphapha(\mathbb{R}^N)$ with compact support and such that $h_n \to h$ in $L^\infty_\alphapha(\mathbb{R}^N)$, which then also implies that
\betagin{equation}
\lambdabel{eq:extra-argument-strong-s-c}
\widetilde{\mathcal R}_k h_n \to \widetilde{\mathcal R}_k h \qquad \thetaxt{in $\: L^\infty_{\thetaxt{\tiny $\frac{N-1}{2}$}}(\mathbb{R}^N).$}
{e}nd{equation}
Moreover, for every $n \in \mathbb{N}$ we have
\betagin{align*}
\limsup_{|x| \to \infty} |x|^{\frac{N-1}{2}}\bigl|[\widetilde{\mathcal R}_k h](x)\bigr| &\le
\limsup_{|x| \to \infty} |x|^{\frac{N-1}{2}}\bigl|[\widetilde{\mathcal R}_k h_n](x)\bigr| + \|\widetilde{\mathcal R}_k h-\widetilde{\mathcal R}_k h_n\|_{L^\infty_{\thetaxt{\tiny $\frac{N-1}{2}$}}}\\
&= \|\widetilde{\mathcal R}_k h-\widetilde{\mathcal R}_k h_n\|_{L^\infty_{\thetaxt{\tiny $\frac{N-1}{2}$}}},
{e}nd{align*}
and thus
$$
\limsup_{|x| \to \infty} |x|^{\frac{N-1}{2}}\bigl|[\widetilde{\mathcal R}_k h](x)\bigr| \le \lim_{n \to \infty}\|\widetilde{\mathcal R}_k h-\widetilde{\mathcal R}_k h_n\|_{L^\infty_{\thetaxt{\tiny $\frac{N-1}{2}$}}} = 0
$$
by (\ref{eq:extra-argument-strong-s-c}). Hence $u= {\mathcal R}_k h$ satisfies~(\ref{sommerfeld-1}).
{e}nd{proof}
\section{Estimates for the substitution operator}
\lambdabel{sec:estim-subst-oper}
\betagin{lemma}
\lambdabel{lem:nemytskii_cont}
Let, for some $\alphapha \in \mathbb{R}$, the nonlinearity $f: \mathbb{R}^N \times \mathbb{C} \to \mathbb{C}$ be a continuous function satisfying
\betagin{equation}
\lambdabel{eq:assumption-f1-1}
S_{f,M,\alphapha}:= {\mathfrak{su}}p_{|u|\le M,x \in \mathbb{R}^N} \lambdangle x \rangle^{\alphapha}|f(x,u)|< \infty \qquad \thetaxt{for all $M>0$.}
{e}nd{equation}
Then the superposition operator
$$
N_f: L^\infty (\mathbb{R}^N) \to L^{\infty}_{\alphapha'}(\mathbb{R}^N),\qquad N_f(u)(x):= f(x,u(x))
$$
is well defined, bounded and continuous for every $\alphapha'<\alphapha$.
{e}nd{lemma}
\betagin{proof}
It clearly follows from (\ref{eq:assumption-f1-1}) that $N_f$ is well defined and satisfies the estimate
$$
\|N_f(u)\|_{L^\infty_{\alphapha'}}\le \|N_f(u)\|_{L^\infty_\alphapha} \le S_{f,M,\alphapha} \qquad \thetaxt{for $M>0$ and $u \in L^\infty(\mathbb{R}^N)$ with $\|u\|_{L^\infty} \le M$.}
$$
To see the continuity we consider a sequence $(u_n)_n {\mathfrak{su}}bset L^\infty(\mathbb{R}^N)$ with $u_n \to u$ in $L^\infty(\mathbb{R}^N)$, and we put
$M:= {\mathfrak{su}}p \{\|u_n\|_{L^\infty}\::\: n \in \mathbb{N}\}$. For given $R>0$ we have, with $B_R:= B_R(0)$ and
$A_R:= \mathbb{R}^N \setminus B_R$,
\betagin{align*}
\|N_f(u_n)-N_f(u)\|_{L^\infty_{\alphapha'}(A_R)} & \le \|N_f(u_n)\|_{L^\infty_{\alphapha'}(A_R)} +\|N_f(u)\|_{L^\infty_{\alphapha'}(A_R)}\\
&\le R^{\alphapha'-\alphapha}\Bigl(
\|N_f(u_n)\|_{L^\infty_{\alphapha}(A_R)} +\|N_f(u)\|_{L^\infty_{\alphapha}(A_R)}\Bigr)\\
&\le 2 S_{f,M,\alphapha} R^{\alphapha'-\alphapha}.
{e}nd{align*}
Moreover, since $f$ is uniformly continuous on $D_R:= \{(x,z) \in \mathbb{R}^N \times \mathbb{C}\::\: \|x\| \le R,\:|z| \le M\}$, we find that
$$
\|N_f(u_n)-N_f(u)\|_{L^\infty(B_R)}= {\mathfrak{su}}p_{|x| \le R}|f(x,u_n(x))-f(x,u(x))| \to 0 \qquad \thetaxt{as $n \to \infty$.}
$$
We thus infer that $\limsup \limits_{n \to \infty}\|N_f(u_n)-N_f(u)\|_{L^\infty_{\alphapha'}(\mathbb{R}^N)}\le 2 S_{f,M,\alphapha} R^{\alphapha'-\alphapha}$ for every $R>0$. Since $\alphapha' < \alphapha$ by assumption, we conclude that $N_f(u_n) \to N_f(u)$ in $L^\infty_{\alphapha'}(\mathbb{R}^N)$. This shows the continuity of $N_f: L^\infty(\mathbb{R}^N) \to L^\infty_{\alphapha'}(\mathbb{R}^N)$.
{e}nd{proof}
\betagin{lemma}\lambdabel{lem:nemytskii_C-1}
Let, for some $\alphapha>\frac{N+1}{2}$, the nonlinearity $f: \mathbb{R}^N \times \mathbb{C} \to \mathbb{C}$ be a continuous function satisfying (\ref{eq:assumption-f1-1}). Suppose moreover that the function $f(x,\cdot):\mathbb{C} \to \mathbb{C}$ is real differentiable for every $x \in \mathbb{R}^N$, and that $f':= \partialrtial_u f: \mathbb{R}^N \times \mathbb{C} \to {\mathcal L}_{\mathbb{R}}(\mathbb{C},\mathbb{C})$ is a continuous function satisfying
\betagin{equation}
\lambdabel{eq:assumption-f1-1-C-1}
T_{f,M,\alphapha}:= {\mathfrak{su}}p_{|u|\le M,x \in \mathbb{R}^N} \lambdangle x \rangle^{\alphapha}\|f'(x,u)\|_{{\mathcal L}_\mathbb{R}(\mathbb{C},\mathbb{C})}< \infty \qquad \thetaxt{for all $M>0$.}
{e}nd{equation}
Then the superposition operator $N_f: L^\infty (\mathbb{R}^N) \to L^{\infty}_{\alphapha'}(\mathbb{R}^N)$ is of class $C^1$ for $\alphapha' < \alphapha$ with
\betagin{equation}
\lambdabel{eq:expression-derivative}
N_f'(u) := N_{f'}(u) \qquad \thetaxt{for $u \in L^\infty(\mathbb{R}^N)$,}
{e}nd{equation}
where $N_{f'}(u) \in {\mathcal L}_\mathbb{R}(L^\infty(\mathbb{R}^N),L^\infty_{\alphapha'}(\mathbb{R}^N))$ is defined by
\betagin{equation}
\lambdabel{eq:definition-substitution-deriv}
[N_f'(u)v](x):= f'(x,u(x))v(x) \qquad \thetaxt{for $v \in L^\infty(\mathbb{R}^N), x \in \mathbb{R}^N$.}
{e}nd{equation}
{e}nd{lemma}
\betagin{proof}
For the sake of brevity, we put $X:= L^\infty(\mathbb{R}^N)$ and $Y:= L^{\infty}_{\alphapha'}(\mathbb{R}^N)$. By assumption (\ref{eq:assumption-f1-1-C-1}) and a very similar argument as in the proof of Lemma~\ref{lem:nemytskii_cont}, the nonlinear operator
$$
N_{f'}: X \to {\mathcal L}_\mathbb{R}(X,Y)
$$
defined by (\ref{eq:definition-substitution-deriv}) is well-defined, bounded and continuous. Thus, it suffices to show that $N_f$ is G\^ateaux-differentiable, and that (\ref{eq:expression-derivative}) is valid as a directional derivative. So let $u,v \in X$, and let $M:= \|u\|_{L^\infty}+ \|v\|_{L^\infty}$. For $\theta \in \mathbb{R}$ and $x \in \mathbb{R}^N$, we estimate
\betagin{align*}
\Bigl|&\frac{N_f(u+\theta v)(x)-N_f(u)(x)}{\theta}-[N_{f'}(u)v](x)\Bigr|\\
&= \Bigl|\frac{f(x,[u+\theta v](x))-f(x,u(x))}{\theta}-f'(x,u(x))v(x)\Bigr|\\
&= \Bigl|\int_0^1 \bigl[f'(x,[u+\xi \theta v](x))-f'(x,u(x))\bigr] v(x)\, d\xi \Bigr| \le |v(x)| g_\theta(x)
{e}nd{align*}
with
$$
g_\theta(x):= {\mathfrak{su}}p_{\xi\in[0,1]}\bigl\|f'(x,[u+\xi \theta v](x))-f'(x,u(x))\bigr\|_{{\mathcal L}_{\mathbb{R}}(\mathbb{C},\mathbb{C})} \qquad \thetaxt{for $\theta \in \mathbb{R}$, $x \in \mathbb{R}^N$.}
$$
Since $\|u + \tau v\|_{L^\infty} \le M$ for $\tau \in \mathbb{R}$, $|\tau| \le 1$, we have
$$
|g_\theta(x)|\le {\mathfrak{su}}p_{\tau \in[0,1]}\|f'(x,[u+\tau v](x))\|_{{\mathcal L}_{\mathbb{R}}(\mathbb{C},\mathbb{C})}
+ \|f'(x,u(x))\|_{{\mathcal L}_{\mathbb{R}}(\mathbb{C},\mathbb{C})} \le 2T_{f,M,\alphapha}\lambdangle x\rangle^{-\alphapha}
$$
for $|\theta| \le 1$, $x \in \mathbb{R}^N$. Similarly as in the proof of Lemma~\ref{lem:nemytskii_cont}, we now define, for given $R>0$, $B_R:= B_R(0)$, $A_R:= \mathbb{R}^N \setminus B_R$, and $D_R:= \{(x,z) \in \mathbb{R}^N \times \mathbb{C}\::\: \|x\| \le R,\:|z| \le M\}$. From the estimate above, it then follows
\betagin{equation}
\lambdabel{diff-substitution-est-1}
\Bigl \| \frac{N_f(u+\theta v)-N_f(u)}{\theta}-N_{f'}(u)v \Bigr\|_{L^\infty_{\alphapha'}(A_R)} \le 2 \|v\|_X T_{f,M,\alphapha}R^{\alphapha'-\alphapha}.
{e}nd{equation}
Moreover, since, by assumption, $f'$ is uniformly continuous on the compact set $D_R$, we find that
$$
\|g_\theta\|_{L^\infty(B_R)} \to 0 \qquad \thetaxt{as $\theta \to 0$}.
$$
We thus conclude that
$$
\limsup \limits_{\theta \to 0}\, \Bigl \|\frac{N_f(u+\theta v)-N_f(u)}{\theta}-M(u)v \Bigr \|_{L^\infty_{\alphapha'}(\mathbb{R}^N)} \le 2 \|v\|_X\, T_{f,M,\alphapha}\,R^{\alphapha-\alphapha'}\qquad \thetaxt{for every $R>0$.}
$$
Since $\alphapha' < \alphapha$ by assumption, we conclude that $\frac{N_f(u+\theta v)-N_f(u)}{\theta} \to N_{f'}(u)v$ in $Y$ as $\theta \to 0$. The proof is thus finished.
{e}nd{proof}
\section{Nonexistence of outgoing waves for the nonlinear Helmholtz equation}
\lambdabel{nonexistence}
To begin this section, we recall the following nonexistence result for eigenfunctions of Schr\"odinger operators with positive eigenvalue.
It is a consequence of a result by Alsholm and Schmidt \cite[Proposition 2 of Appendix 3]{alsholm-schmidt70} extending earlier results due to Kato \cite{kato59}:
\betagin{proposition}[see {\cite[Proposition 2]{alsholm-schmidt70}}]\lambdabel{prop:kato}
Let $u\in W^{2,2}_{\thetaxt{loc}}(\mathbb{R}^N,\mathbb{C})$ solve $-\Deltalta u +Vu=k^2u$ in $\mathbb{R}^N$, where $V\in L^\infty(\mathbb{R}^N)$
satisfies
\betagin{equation}
\lambdabel{condition-V}
|V(x)|\leq C\lambdangle x\rangle^{-1-{e}psilonilons} \qquad \thetaxt{for a.e. $x\in\mathbb{R}^N$ with constants $C, {e}psilonilons>0$.}
{e}nd{equation}
If
$$
\liminf_{R\to\infty}\frac1R\int_{B_R(0)}(|\nabla u|^2+k^2|u|^2)\, dx=0,
$$
then there exists $R>0$ such that $u$ vanishes identically in $\mathbb{R}^N\mathbf{a}ckslash B_R(0)$ for some $R>0$.\\
If, moreover, $V$ is real-valued, then $u$ vanishes identically in $\mathbb{R}^N$.
{e}nd{proposition}
\betagin{proof}
It has been proved in {\cite[Proposition 2]{alsholm-schmidt70}} that $u$ vanishes identically in $\mathbb{R}^N\mathbf{a}ckslash B_R(0)$ for some $R>0$.
Assuming in addition that $V$ is real-valued, we then deduce by a unique continuation result that $u$ vanishes identically on $\mathbb{R}^N$.
More precisely, for $u_1=\thetaxt{Re}(u)$ and $u_2=\thetaxt{Im}(u)$ we have
$|\Deltalta u_i|\leq C |u_i|$ on $\mathbb{R}^N$ with some constant $C>0$. The strong unique continuation property \cite[Theorem 6.3]{jerison-kenig85}
(see also Remark 6.7 in the same paper) therefore implies $u_1=u_2=0$ on $\mathbb{R}^N$, and this concludes the proof.
{e}nd{proof}
\qquad From Proposition~\ref{prop:kato}, we shall now deduce the following nonexistence result for linear and superlinear variants of the corresponding integral equation involving the Helmholtz resolvent operator.
\betagin{proposition}
\lambdabel{corol-kato}
Let $N \ge 3$, $2 \le p < \infty$, $\alphapha >\frac{N+1}{2}$, and let $u \in L^\infty(\mathbb{R}^N)$ be a solution of
\betagin{equation}
\lambdabel{eq:corol-kato-eq}
u = {\mathcal R}_k [Q|u|^{p-2}u]
{e}nd{equation}
with a function $Q \in L^\infty_\alphapha(\mathbb{R}^N, \mathbb{R})$. Then $u {e}quiv 0$.
{e}nd{proposition}
\betagin{proof}
Let $V:= Q|u|^{p-2}$, so that (\ref{eq:corol-kato-eq}) writes in the form
\betagin{equation}
\lambdabel{eq:corol-kato-eq-variant}
u = {\mathcal R}_k [V u]
{e}nd{equation}
We then have $V \in L^\infty_\alphapha(\mathbb{R}^N, \mathbb{R})$ and also $Vu \in L^\infty_\alphapha(\mathbb{R}^N)$ since $u \in L^\infty(\mathbb{R}^N)$. Therefore Proposition~\ref{resolvent-compact-and-continuous} implies that $u \in L^\infty_{\tau(\alphapha)}(\mathbb{R}^N)$ with $\tau(\alphapha)$ given in (\ref{exp 1}). It then follows that $Vu \in L^\infty_{\alphapha_1}(\mathbb{R}^N)$
with $\alphapha_1 = \alphapha +\tau(\alphapha)$ and hence $u \in L^\infty_{\tau(\alphapha_1)}(\mathbb{R}^N)$ again by Proposition~\ref{resolvent-compact-and-continuous}. Defining inductively $\alphapha_{k}:= \alphapha_{k-1}+\tau(\alphapha_{k-1})$ for $k \ge 2$, we may iterate the application of Proposition~\ref{resolvent-compact-and-continuous} to obtain that $u \in L^\infty_{\tau(\alphapha_k)}(\mathbb{R}^N)$ for all $k \in \mathbb{N}$. After a finite number of steps, we therefore deduce from (\ref{exp 1}) that
$u \in L^\infty_{\thetaxt{\tiny $\frac{N-1}{2}$}}(\mathbb{R}^N)$ and therefore $Vu \in L^\infty_{\alphapha+\thetaxt{\tiny $\frac{N-1}{2}$}}(\mathbb{R}^N)$. Since $\alphapha > \frac{N+1}{2}$ by assumption, this implies that $Vu \in L^\infty(\mathbb{R}^N) \cap L^1(\mathbb{R}^N)$. It then follows e.g. from \cite[Proposition A.1]{EW1} that $u \in W^{r}_{\thetaxt{loc}}(\mathbb{R}^N) \cap L^{\frac{2(N+1)}{N-1}}(\mathbb{R}^N) \cap L^\infty(\mathbb{R}^N)$ for $r < \infty$, and $u$ is a strong solution of the differential equation
\betagin{equation}\lambdabel{eqn:nlh_power}
-\Deltalta u -k^2 u = V u\quad\thetaxt{in }\mathbb{R}^N.
{e}nd{equation}
Moreover, by \cite[Theorem 8]{G} and the remark following it, $u$ satisfies the Sommerfeld outgoing radiation condition in the form
given in (\ref{eqn:sommerfeld1-averaged}), e.g.
\betagin{equation}
\lambdabel{eq:sommerfeld-proof}
\lim_{R\to\infty}\frac{1}{R} \int_{B_R}\left|\nabla
u(x)-iku(x)\frac{x}{|x|} \right|^2\, dx=0.
{e}nd{equation}
We now proceed similarly as in the proof of Corollary 1 in \cite{G}. Expanding the terms in (\ref{eq:sommerfeld-proof}), the condition can be rewritten as
\betagin{align}\lambdabel{eqn:som2}
\lim_{R\to\infty}\frac1{R}\left\{\int_{B_R}(|\nabla u|^2+k^2|u|^2)\, dx
- 2k \int_0^R \thetaxt{Im}\left(\int_{\partialrtial B_\rho}\overline{u}\nabla u\cdot\frac{x}{|x|}\, d\sigmagma\right)\, d\rho\right\}=0.
{e}nd{align}
Since $u\in W^{2,2}_{\thetaxt{loc}}(\mathbb{R}^N)$ solves {e}qref{eqn:nlh_power} in the strong sense, the divergence theorem
gives
\betagin{align*}
\int_{\partialrtial B_\rho}\overline{u}\nabla u\cdot\frac{x}{|x|}\, d\sigmagma
&=\int_{B_\rho}|\nabla u|^2\, dx + \int_{B_\rho}\overline{u}\Deltalta u\, dx\\
&=\int_{B_\rho}|\nabla u|^2\, dx - \int_{B_\rho} (k^2|u|^2 + V|u|^2)\, dx,
{e}nd{align*}
where the right-hand side in the last line is purely real-valued, since by assumption $V= Q|u|^{p-2}$ takes only real values.
Consequently, we find
$$
\thetaxt{Im}\left( \int_{\partialrtial B_\rho}\overline{u}\nabla u\cdot\frac{x}{|x|}\, d\sigmagma \right)=0
$$
for all $\rho>0$, and plugging this into {e}qref{eqn:som2} yields
\betagin{equation}\lambdabel{eqn:som3}
\lim_{R\to\infty}\frac1{R}\int_{B_R}(|\nabla u|^2+k^2|u|^2)\, dx=0.
{e}nd{equation}
Moreover, since $V \in L^\infty_\alphapha(\mathbb{R}^N)$ and $\alphapha> \frac{N+1}{2}>1$, condition (\ref{condition-V}) is satisfied for $V$. Hence Proposition~\ref{prop:kato} implies that $u {e}quiv 0$ on $\mathbb{R}^N$.
{e}nd{proof}
\section{A priori bounds for solutions}
\lambdabel{sec:priori-estim-defoc}
The aim of this section is to collect various a priori bounds for solutions of (\ref{nlh-1-integral}) under different assumptions on the nonlinearity $f$.
{\mathfrak{su}}bsection{A priori bounds for the case of linearly bounded nonlinearities}
\lambdabel{sec-a-priori-linearly-bounded}
In this subsection we focus on linearly bounded nonlinearities, and we prove the following boundedness property.
\betagin{proposition}
\lambdabel{sec:proof-theorem-refw-1}
Let, for some $\alphapha>\frac{N+1}{2}$, the nonlinearity $f$ satisfy the assumption
\betagin{equation}
\lambdabel{eq:assumption-f1-section}
{\mathfrak{su}}p_{|u|\le M,x \in \mathbb{R}^N}\lambdangle x\rangle^{\alphapha}|f(x,u)|< \infty \qquad \thetaxt{for all $M>0$}
{e}nd{equation}
and \underline{one} of the assumptions $(f_1)$ or $(f_2)$ from Theorem~\ref{W teo 1-sublinear}.
Moreover, let $\varphi \in L^\infty(\mathbb{R}^N)$, and let ${\mathcal F} {\mathfrak{su}}bset L^\infty(\mathbb{R}^N)$ be the set of functions $u$ which solve the equation
\betagin{equation}
\lambdabel{schaefer-equation}
u = \mu \Bigl({\mathcal R}_k N_f(u) + \varphi\Bigr) \qquad \thetaxt{for some $\mu \in [0,1]$.}
{e}nd{equation}
Then ${\mathcal F}$ is bounded in $L^\infty(\mathbb{R}^N)$.
{e}nd{proposition}
\betagin{proof}
We first assume $(f_2)$. Let $u \in {\mathcal F}$. By (\ref{schaefer-equation}) and Proposition~\ref{resolvent-compact-and-continuous}, we then have
\betagin{align*}
\|u\|_{L^\infty} &\le \|{\mathcal R}_k N_f(u)\|_{L^\infty} + \|\varphi\|_{L^\infty} \le \bigl\| |\Phi| * N_f(u) \bigr\|_{L^\infty_{\tau(\alphapha)}}
+ \|\varphi\|_{L^\infty}\\
&\le \kappappa_{\alphapha} \|N_f(u)\|_{L^\infty_{\alphapha}}
+ \|\varphi\|_{L^\infty} \le \kappappa_{\alphapha} \Bigl(\|Q |u|\|_{L^\infty_{\alphapha}} + \|b\|_{L^\infty_{\alphapha}}\Bigr)+ \|\varphi\|_{L^\infty}\\
&\le \kappappa_{\alphapha} \|Q\|_{L^\infty_{\alphapha}}\|u\|_{L^\infty} + \kappappa_{\alphapha} \|b\|_{L^\infty_{\alphapha}} + \|\varphi\|_{L^\infty}.
{e}nd{align*}
Since $\kappappa_{\alphapha} \|Q\|_{L^\infty_{\alphapha}}<1$ by assumption, we conclude that
$$
\|u\|_{L^\infty} \le \bigl( 1- \kappappa_{\alphapha} \|Q\|_{L^\infty_{\alphapha}}\bigr)^{-1}\bigl(\kappappa_{\alphapha} \|b\|_{L^\infty_{\alphapha}} + \|\varphi\|_{L^\infty}\bigr),
$$
and this shows the boundedness of ${\mathcal F}$.
\qquad Next we assume $(f_1)$. In this case we argue by contradiction, so we assume that there exists a sequence $(u_n)_n$ in ${\mathcal F}$ such that $c_n:= \|u_n\|_{L^\infty} \to \infty$ as $n \to \infty$. Moreover, we let $\mu_n \in [0,1]$ be such that (\ref{schaefer-equation}) holds with $u=u_n$ and $\mu= \mu_n$. We then define $w_n:= \frac{u_n}{c_n} \in L^\infty(\mathbb{R}^N)$, so that $\|w_n\|_{L^\infty}= 1$ and, by assumption $(f_1)$,
\betagin{equation}
\lambdabel{schaefer-equation-wn-proof}
w_n = \mu_n {\mathcal R}_k (a w_n + g_n) + \frac{\mu_n}{c_n} \varphi \qquad \thetaxt{with $g_n \in L^\infty_\alphapha(\mathbb{R}^N)$, $g_n(x)= \frac{b(x,c_n w_n(x))}{c_n}$.}
{e}nd{equation}
Passing to a subsequence, we may assume that $\mu_n \to \mu \in [0,1]$. Moreover, by assumption $(f_1)$ we have
$$
g_n \to 0 \qquad \thetaxt{in $L^\infty_\alphapha(\mathbb{R}^N)$ as $n \to \infty$,}
$$
whereas the sequence $(a w_n)_n$ is bounded in $L^\infty_\alphapha(\mathbb{R}^N)$. Since also $\frac{\mu_n}{c_n} \to 0$ as $n \to \infty$, it follows from the compactness of the operator ${\mathcal R}_k:L^\infty_\alphapha(\mathbb{R}^N) \to L^\infty(\mathbb{R}^N)$ that, after passing to a subsequence, $w_n \to w \in L^\infty(\mathbb{R}^N)$. From this we then deduce that
$$
a w_n \to a w \qquad \thetaxt{in $L^\infty_\alphapha(\mathbb{R}^N)$,}
$$
and passing to the limit in (\ref{schaefer-equation-wn-proof}) yields
$$
w = \mu {\mathcal R}_k [a w]= {\mathcal R}_k [\mu a w].
$$
Applying Proposition~\ref{corol-kato} with $p=2$ and $Q:= \mu a$, we conclude that $w {e}quiv 0$, but this contradicts the fact that $\|w\|_\infty = \lim \limits_{n \to \infty}\|w_n\|_\infty = 1$. Again, we infer the boundedness of ${\mathcal F}$ in $L^\infty(\mathbb{R}^N)$.
{e}nd{proof}
{\mathfrak{su}}bsection{A priori bounds in the superlinear and defocusing case}
\lambdabel{sec:priori-bounds-superl}
In this subsection we restrict our attention to the case $f(x,u)= Q(x)|u|^{p-2}u$ with $Q \le 0$. In this case, we shall prove the following a priori estimate.
\betagin{proposition}\lambdabel{prop:apriori_defocusing}
Let $N\geq 3$, $k>0$, $2<p<2^\ast$, $Q\in L^\infty_c(\mathbb{R}^N,\mathbb{R})\mathbf{a}ckslash\{0\}$ and $\varphi\in L^\infty(\mathbb{R}^N)$.
Assume that
\betagin{itemize}
\item[(Q1)] $Q\leq 0$ a.e $\mathbb{R}^N$ and
\item[(Q2)] $\thetaxt{diam}(\thetaxt{supp }Q)\le \frac{{\bf z({\text{\tiny $N$}})}}{k}$,
where ${\bf z({\text{\tiny $N$}})}$ denotes the first positive zero
of the Bessel function $Y_{\frac{N-2}2}$ of the second kind of order $\frac{N-2}{2}$.
{e}nd{itemize}
Then, there exist $C=C(N,k,p,\|Q\|_\infty, |\thetaxt{supp }Q|)>0$
and $m=m(N,k,p)\in\mathbb{N}$
such that for any solution $u\in L^\infty(\mathbb{R}^N)$ of
\betagin{equation}\lambdabel{eqn:fp_complex}
\betagin{aligned}
u={\mathcal R}_k\bigl(Q |u|^{p-2}u\bigr) +\varphi
{e}nd{aligned}
{e}nd{equation}
we have
\betagin{equation}
\lambdabel{eq:apriori-superlinear-defocusing-estimate}
\|u\|_\infty\leq C\left(1+\|\varphi\|_\infty^{(p-1)^m}\right).
{e}nd{equation}
{e}nd{proposition}
\qquad For the proof, we first need two preliminary lemmas. The first lemma gives a sufficient condition for the nonnegativity
of the Fourier transform of a radial function. It is well known in the case $N=3$ (see for example \cite{tuck06}). Since we could
not find any reference for the general case, we give a proof for completeness.
\betagin{lemma}\lambdabel{lem:FT_rad_positive}
Let $N\geq 3$ and consider $f\in L^1(\mathbb{R}^N)$ radially symmetric, i.e., $f(x)=f(|x|)$, such that $f\geq 0$ on $\mathbb{R}^N$.
If the function $t\mapsto t^{\frac{N-1}2}f(t)$ is nonincreasing on $(0,\infty)$, then $\widehat{f}\geq 0$ on $\mathbb{R}^N$.
{e}nd{lemma}
\betagin{proof}
The Fourier transform of the radial function $f$ is given by
$$
\widehat{f}(\xi)=|\xi|^{-\frac{N-2}2}\int_0^\infty J_{\frac{N-2}2}(s|\xi|)f(s)s^{\frac{N}2}\, ds.
$$
Let $j^{({e}ll)}$, ${e}ll\in N$ denote the positive zeros of the Bessel function $J_{\frac{N-2}2}$ of the first kind of order $\frac{N-2}2$, arranged in increasing order,
and set $j^{(0)}:=0$. Then, it follows that $J_{\frac{N-2}2}>0$ in the interval $\bigl(j^{(2m-2)},j^{(2m-1)}\bigr)$ and
$J_{\frac{N-2}2}<0$ in the interval $\bigl(j^{(2m-1)},j^{(2m)}\bigr)$, $m\in\mathbb{N}$.
For $\xi\neq 0$, we can write therefore
\betagin{align*}
&\int_0^\infty J_{\frac{N-2}2}(s|\xi|)f(s)s^{\frac{N}2}\, ds
= {\mathfrak{su}}m_{{e}ll=1}^\infty \int_{\frac{j^{({e}ll-1)}}{|\xi|}}^{\frac{j^{({e}ll)}}{|\xi|}} s^\frac12J_{\frac{N-2}2}(s|\xi|) s^{\frac{N-1}2}f(s)\, ds\\
&\quad\geq {\mathfrak{su}}m_{m=1}^{\infty}\left(\frac{j^{(2m-1)}}{|\xi|}\right)^{\frac{N-1}2} f\bigl(\frac{j^{(2m-1)}}{|\xi|}\bigr)
\Bigl[ \int_{\frac{j^{(2m-2)}}{|\xi|}}^{\frac{j^{(2m-1)}}{|\xi|}} s^\frac12\bigl|J_{\frac{N-2}2}(s|\xi|)\bigr| ds
-\int_{\frac{j^{(2m-1)}}{|\xi|}}^{\frac{j^{(2m)}}{|\xi|}} s^\frac12\bigl|J_{\frac{N-2}2}(s|\xi|)\bigr| ds \Bigr]\\
&\quad={\mathfrak{su}}m_{m=1}^{\infty}|\xi|^{-\frac32}\left(\frac{j^{(2m-1)}}{|\xi|}\right)^{\frac{N-1}2} f\bigl(\frac{j^{(2m-1)}}{|\xi|}\bigr)
\Bigl[ \int_{j^{(2m-2)}}^{j^{(2m-1)}} t^\frac12\bigl|J_{\frac{N-2}2}(t)\bigr| dt
-\int_{j^{(2m-1)}}^{j^{(2m)}} t^\frac12\bigl|J_{\frac{N-2}2}(t)\bigr| dt \Bigr],
{e}nd{align*}
using the fact that $s\mapsto s^{\frac{N-1}2}f(s)$ is nonincreasing by assumption.
To conclude, an argument which goes back to Sturm \cite{sturm} (see also \cite{lorch-szego63,M}) shows that
\betagin{equation}
\lambdabel{eq:sturm-argument}
\int_{j^{(2m-2)}}^{j^{(2m-1)}} t^\frac12\bigl|J_{\frac{N-2}2}(t)\bigr| dt\geq
\int_{j^{(2m-1)}}^{j^{(2m)}} t^\frac12\bigl|J_{\frac{N-2}2}(t)\bigr| dt,\quad\thetaxt{ for all }m\in\mathbb{N},
{e}nd{equation}
provided $N\geq 3$, and this gives the desired result.
For the reader's convenience, we now give the proof of (\ref{eq:sturm-argument}).
\qquad Consider for $\nu>\frac12$ the function $z(t):=t^\frac12J_\nu(t)$. It satisfies $z(j^{({e}ll)})=0$ and $(-1)^{e}ll z'(j^{({e}ll)})>0$
for all ${e}ll\in\mathbb{N}_0$. Moreover, it solves the differential equation
\betagin{equation}\lambdabel{eqn:equa_diff_bessel}
z''(t) + \Bigl(1-\frac{\nu^2-\frac14}{t^2}\Bigr)z(t)=0\quad \thetaxt{for all $t>0.$}
{e}nd{equation}
For $m\in\mathbb{N}$ and $t$ in the interval $I:=\bigl(j^{(2m-1)},\min\{j^{(2m)}, 2j^{(2m-1)}-j^{(2m-2)}\}\bigr)$,
consider the functions $y_1(t)=-z(t)$ and $y_2(t)=z(2j^{(2m-1)}-t)$.
According to the above remark, we have $y_1, y_2>0$ in $I$
and $y_1(j^{(2m-1)})=y_2(j^{(2m-1)})=0$. Moreover, $y_1'(j^{(2m-1)})=y_2'(j^{(2m-1)})\in(0,\infty)$.
Using the differential equation {e}qref{eqn:equa_diff_bessel}, we find that
\betagin{align*}
\frac{d}{dt}\left(y_1'(t)y_2(t)-y_1(t)y_2'(t)\right)&=y_1''(t)y_2(t)-y_1(t)y_2''(t)\\
&=(\nu^2-\frac14)\left(\frac1{t^2}-\frac1{(2j^{(2m-1)}-t)^2}\right)y_1(t)y_2(t)\\
&<0 \quad\thetaxt{for all }t\in I.
{e}nd{align*}
Hence,
\betagin{equation}\lambdabel{eqn:phi_prime}
y_1'(t)y_2(t)-y_1(t)y_2'(t)<0\quad\thetaxt{ for all }j^{(2m-1)}<t\leq \min\{j^{(2m)}, 2j^{(2m-1)}-j^{(2m-2)}\},
{e}nd{equation}
and since $y_2(2j^{(2m-1)}-j^{(2m-2)})=0$ and $y_2'(2j^{(2m-1)}-j^{(2m-2)})=-z'(j^{(2m-2)})<0$, the positivity
of $y_1$ in $I$ implies that $j^{(2m)}<2j^{(2m-1)}-j^{(2m-2)}$, i.e. $I=\bigl(j^{(2m-1)},j^{(2m)}\bigr)$.
\qquad Moreover, from {e}qref{eqn:phi_prime}, we infer that the quotient $\frac{y_1}{y_2}$ is a decreasing
function in $I$ which vanishes at the right boundary of this interval. Consequently, $y_1(t)<y_2(t)$ in $I$,
i.e., $|z(t)|< |z(2j^{(2m-1)}-t)|$ for all $t\in(j^{(2m-1)},j^{(2m)})$ and we conclude that
$$
\int_{j^{(2m-2)}}^{j^{(2m-1)}}|z(t)|\, dt > \int_{j^{(2m-1)}}^{j^{(2m)}}|z(t)|\, dt.
$$
In the case $\nu=\frac12$, we have $z(t)=\sqrt{\frac2\pi}\sigman t$ and $j^{({e}ll)}={e}ll\pi$, ${e}ll\in\mathbb{N}_0$.
Thus,
$$
\int_{j^{({e}ll-1)}}^{j^{({e}ll)}}|z(t)|\, dt =\sqrt{\frac2\pi}\int_0^\pi \sigman t\, dt=2\sqrt{\frac2\pi}\quad\thetaxt{for all }{e}ll\in\mathbb{N},
$$
and this concludes the proof of (\ref{eq:sturm-argument}).
{e}nd{proof}
\qquad In our proof of the a priori bound given in Proposition~\ref{prop:apriori_defocusing}, we only need the following corollary of Lemma~\ref{lem:FT_rad_positive}.
\betagin{corollary}\lambdabel{prop:bilinear_positive}
Let $N\geq 3$, $k>0$ and choose $\deltalta>0$ such that $k\deltalta\leq {\bf z({\text{\tiny $N$}})}$, where ${\bf z({\text{\tiny $N$}})}$ denotes
the first positive zero of the Bessel function $Y_{\frac{N-2}{2}}$. Then,
$$
\int_{\mathbb{R}^N}f(x) [(1_{B_\deltalta} \Psi_k)\ast f](x)\, dx\geq 0 \quad \thetaxt{for all }f\in L^{p'}(\mathbb{R}^N,\mathbb{R}), \ 2\leq p\leq 2^\ast,
$$
where $\Psi_k$ denotes the real part of the fundamental solution $\Phi_k$ defined in (\ref{eq:18}).
{e}nd{corollary}
\betagin{proof}
Since $1_{B_\deltalta}\Psi_k\in L^1(\mathbb{R}^N)\cap L^{\frac{N}{N-2}}_{w}(\mathbb{R}^N)$, by the weak Young inequality
there is for each $2\leq p\leq 2^\ast$ a constant $C_p>0$ such that
$$
\left|\int_{\mathbb{R}^N}f(x) [(1_{B_\deltalta}\Psi_k)\ast f](x)\, dx\right|\leq C_p \|f\|_{p'}^2\quad \thetaxt{for all }f\in L^{p'}(\mathbb{R}^N,\mathbb{R}).
$$
Hence, it suffices to prove the conclusion for $f\in{\mathcal S}(\mathbb{R}^N,\mathbb{R})$. For such functions, Parseval's identity gives
\betagin{equation}\lambdabel{eqn:parseval}
\int_{\mathbb{R}^N}f(x) [(1_{B_\deltalta}\Psi_k)\ast f](x)\, dx
=(2\pi)^{\frac{N}2}\int_{\mathbb{R}^N} |\widehat{f}(\xi)|^2 {\mathcal F}\bigl(1_{B_\deltalta}\Psi_k\bigr)(\xi)\, d\xi.
{e}nd{equation}
\qquad It thus remains to show that
\betagin{equation}
\lambdabel{eq:positivity-Fourier-proof}
{\mathcal F}\bigl(1_{B_\deltalta}\Psi_k\bigr) \ge 0 \quad \thetaxt{on $\mathbb{R}^N$.}
{e}nd{equation}
In the radial variable, the radial function
$1_{B_\deltalta}\Psi_k$ is given, up to a positive constant factor, by $t \mapsto -t^{\frac{2-N}{2}}1_{[0,\deltalta]}(t)Y_{\frac{N-2}2}(kt)$. Moreover, for $N\geq 3$ the function
$t\mapsto t^\frac12Y_{\frac{N-2}2}(kt)$ is negative and increasing on $(0,\deltalta)$. Hence Lemma~\ref{lem:FT_rad_positive} implies (\ref{eq:positivity-Fourier-proof}), and the proof is finished.
{e}nd{proof}
\qquad We can now prove Proposition~\ref{prop:apriori_defocusing}.
\betagin{proof}[Proof of Proposition~\ref{prop:apriori_defocusing}]
We write $u:=v+\varphi$ and $u=u_1+iu_2$ with real-valued functions $u_1, u_2\in L^p_\thetaxt{loc}(\mathbb{R}^N)$.
Multiplying the equation {e}qref{eqn:fp_complex} by $Q|u|^{p-2}\overline{u}$ and
integrating over $\mathbb{R}^N$, we find
\betagin{align*}
&\int_{\mathbb{R}^N}Q|u|^p\, dx-\int_{\mathbb{R}^N}Q|u|^{p-2}\varphi\overline{u}\, dx \\
&\quad= \int_{\mathbb{R}^N}Q|u|^{p-2}(u_1-iu_2)[\Phi_k\ast\bigl(Q|u|^{p-2}(u_1+iu_2)\bigr)]\, dx \\
&\quad= \int_{\mathbb{R}^N}Q|u|^{p-2}u_1[\Phi_k\ast\bigl(Q|u|^{p-2}u_1\bigr)]\, dx
+\int_{\mathbb{R}^N}Q|u|^{p-2}u_2[\Phi_k\ast\bigl(Q|u|^{p-2}u_2\bigr)]\, dx \\
&\qquad + i \int_{\mathbb{R}^N}Q|u|^{p-2}u_1[\Phi_k\ast\bigl(Q|u|^{p-2}u_2\bigr)]\, dx
- i\int_{\mathbb{R}^N}Q|u|^{p-2}u_2[\Phi_k\ast\bigl(Q|u|^{p-2}u_1\bigr)]\, dx \\
&\quad=\int_{\mathbb{R}^N}Q|u|^{p-2}u_1[\Phi_k\ast\bigl(Q|u|^{p-2}u_1\bigr)]\, dx
+\int_{\mathbb{R}^N}Q|u|^{p-2}u_2[\Phi_k\ast\bigl(Q|u|^{p-2}u_2\bigr)]\, dx,
{e}nd{align*}
where the symmetry of the convolution has been used in the last step.
Taking real parts on both sides of the equality, we obtain
\betagin{equation}\lambdabel{eqn:integr_estim1}
\betagin{aligned}
\int_{\mathbb{R}^N}Q|u|^p\, dx-\int_{\mathbb{R}^N}Q|u|^{p-2}\thetaxt{Re}\left(\varphi\overline{u}\right)\, dx
&=\int_{\mathbb{R}^N}Q|u|^{p-2}u_1[\Psi_k\ast\bigl(Q|u|^{p-2}u_1\bigr)]\, dx\\
&\quad+\int_{\mathbb{R}^N}Q|u|^{p-2}u_2[\Psi_k\ast\bigl(Q|u|^{p-2}u_2\bigr)]\, dx.
{e}nd{aligned}
{e}nd{equation}
where again $\Psi_k$ denotes the real part of $\Phi_k$.
Notice in addition that setting $\deltalta=\thetaxt{diam}(\thetaxt{supp }Q)$, the assumption (Q2)
implies $\deltalta\le \frac{{\bf z({\text{\tiny $N$}})}}{k}$ and hence, for all $f\in L^{p'}_{\thetaxt{loc}}(\mathbb{R}^N)$,
$$
\int_{\mathbb{R}^N}Qf[\Psi_k\ast (Qf)]\, dx = \int_{\mathbb{R}^N}Qf[(1_{B_\deltalta}\Psi_k)\ast(Qf)]\, dx\geq 0,
$$
by Corollary~\ref{prop:bilinear_positive}. Thus, as a consequence of {e}qref{eqn:integr_estim1}, we find
$$
\int_{\mathbb{R}^N}Q|u|^p\, dx\geq \int_{\mathbb{R}^N}Q|u|^{p-2}\thetaxt{Re}\left(\overline{u}\varphi\right)\, dx,
$$
and, since $Q\leq 0$ on $\mathbb{R}^N$, by (Q1), it follows that
\betagin{equation}\lambdabel{eqn:first_bound}
\int_{\mathbb{R}^N}|Q|\ |u|^p\, dx \leq \|\varphi\|_\infty \int_{\mathbb{R}^N} |Q|\ |u|^{p-1}\, dx.
{e}nd{equation}
Using H\"older's inequality we then obtain the estimate
\betagin{align*}
\int_{\mathbb{R}^N} |Q|\ |u|^{p-1}\, dx & \leq \left(\int_{\mathbb{R}^N} |Q|\, dx\right)^{\frac1p}\left(\int_{\mathbb{R}^N}|Q|\ |u|^{p}\, dx\right)^{\frac1{p'}}\\
& \leq \left(\int_{\mathbb{R}^N} |Q|\, dx\right)^{\frac1p} \left(\|\varphi\|_\infty \int_{\mathbb{R}^N} |Q|\ |u|^{p-1}\, dx\right)^{\frac1{p'}},
{e}nd{align*}
and therefore
$$
\int_{\mathbb{R}^N} |Q|\ |u|^{p-1}\, dx \leq \|\varphi\|_\infty^{p-1}
\int_{\mathbb{R}^N}|Q|\, dx \leq |\Omegaega|\ \|Q\|_\infty\ \|\varphi\|_\infty^{p-1},
$$
where $\Omegaega=\{x\in\mathbb{R}^N\, :\, Q(x)\neq 0\}$.
Using again {e}qref{eqn:first_bound}, we deduce that
$$
\|\ |Q|^{\frac1{p'}}\ |u|^{p-1}\|_{p'}^{p'}
=\int_{\mathbb{R}^N}|Q|\ |u|^p\, dx
\leq |\Omegaega|\ \|Q\|_\infty\ \|\varphi\|_\infty^p.
$$
Since the support $Q$ is compact and since $p<2^\ast$,
H\"olders inequality yields the estimates
\betagin{align}
\|Q|u|^{p-1}\|_{(2^\ast)'} \leq |\Omegaega|^{\frac1{(2^\ast)'}-\frac1{p'}} \|Q|u|^{p-1}\|_{p'}
&\leq |\Omegaega|^{\frac1{(2^\ast)'}-\frac1{p'}} \|Q\|_\infty^{\frac1p} \|\ |Q|^{\frac1p'}|u|^{p-1}\|_{p'} \nonumber\\
&\leq |\Omegaega|^{\frac1{(2^\ast)'}} \|Q\|_\infty \|\varphi\|_\infty^{p'-1}=:D.
\lambdabel{eqn:final_bound}
{e}nd{align}
Lemma~\ref{lem:regularity1} with $a=Q$ and the
estimate~{e}qref{eqn:final_bound} imply the
existence of constants
$C=C(N,k,p,\|Q\|_\infty,|\Omegaega|)>0$ and $m=m(N,p)\in\mathbb{N}$ such that
\betagin{align*}
\|v\|_\infty\leq C\left(D+D^{(p-1)^m}+\|\varphi\|_\infty^{p-1} + \|\varphi\|_\infty^{(p-1)^m}\right).
{e}nd{align*}
Making $C>0$ larger if necessary, we thus obtain~(\ref{eq:apriori-superlinear-defocusing-estimate}), as claimed.
{e}nd{proof}
\section{Proofs of the main results}
\lambdabel{sec:proofs-main-results}
In this section, we complete the proofs of the main results in the introduction.
\betagin{proof}[Proof of Theorem~\ref{W teo 1-sublinear}]
Let $\varphi \in X:= L^\infty(\mathbb{R}^N)$. We write (\ref{nlh-1-integral}) as a fixed point equation
$$
u = {\mathcal A}(u) \qquad \thetaxt{in $X$}
$$
with the nonlinear operator
\betagin{equation}
\lambdabel{eq:A-operator}
{\mathcal A}: X \to X, \qquad {\mathcal A}[w]={\mathcal R}_k (N_f(w))+ \varphi.
{e}nd{equation}
Since $\alphapha > \frac{N+1}{2}$, we may fix $\alphapha' \in (\frac{N+1}{2}, \alphapha)$. By Lemma~\ref{lem:nemytskii_cont}, the nonlinear operator $N_f: X \to L^\infty_{\alphapha'}(\mathbb{R}^N)$ is well-defined and continuous. Moreover, ${\mathcal R}_k: L^\infty_{\alphapha'}(\mathbb{R}^N) \to X$ is compact by Proposition~\ref{resolvent-compact-and-continuous}. Consequently, ${\mathcal A}$ is a compact and continuous operator. Moreover, the set
$$
{\mathcal F}:=\{u\in X: \, u=\mu {\mathcal A}[u]\ \thetaxt{ for some } \mu\in[0,1]\}
$$
is bounded by Proposition~\ref{sec:proof-theorem-refw-1}. Hence Schaefer's fixed point theorem (see e.g. \cite[Chapter 9.2.2.]{Evans}) implies that ${\mathcal A}$ has a fixed point.
{e}nd{proof}
\qquad We continue with the proof of Theorem~\ref{thm:rabinowitz-applied}. For this we recall the following variant of Rabinowitz'
global continuation theorem (see \cite[Theorem 3.2]{rabinowitz71};
see also \cite[Theorem 14.D]{zeidler}).
\betagin{theorem}\lambdabel{thm:rabinowitz}
Let $(X,\|\cdot\|)$ be a real Banach space, and consider a continuous and compact mapping $G$: $\mathbb{R}\times X$ $\to$ $X$ satisfying $G(0,0)=0$.
Assume that
\betagin{itemize}
\item[(a)] $G(0,u)=u$ $\Leftrightarrow$ $u=0$, and
\item[(b)] there exists $r>0$ such that $\thetaxt{{e}m deg}(id-G(0,\cdot),B_r(0),0)\neq 0$,
where $\thetaxt{{e}m deg}$ denotes the Leray-Schauder degree.
{e}nd{itemize}
Moreover, denote by $S$ the set of solutions $(\lambdambda, u)\in \mathbb{R}\times X$ of the equation
$$
u=G(\lambdambda,u).
$$
Then the connected components $C^+$ and $C^-$ of $S$ in $[0,\infty)\times X$ and $(-\infty,0]\times X$
which contain $(0,0)$ are both unbounded.
{e}nd{theorem}
\betagin{proof}[Proof of Theorem~\ref{thm:rabinowitz-applied} (completed)]
Let $2<p<2^\ast$, $Q\in L^\infty_\alphapha(\mathbb{R}^N,\mathbb{R})\mathbf{a}ckslash\{0\}$ for some $\alphapha > \frac{N+1}{2}$,
$\varphi\in X:= L^\infty(\mathbb{R}^N)$
and consider $G$: $\mathbb{R} \times X \to X$ given by
\betagin{equation}
\lambdabel{eq:def-G-function}
G(\lambdambda,w)= {\mathcal R}_k\bigl(Q|w|^{p-2}w\bigr)+\lambdambda\varphi,
{e}nd{equation}
Using Proposition~\ref{resolvent-compact-and-continuous} and Lemma~\ref{lem:nemytskii_cont}, we obtain that the map $G$ is continuous and compact.
Moreover, if $w\in X$ satisfies $w=G(\lambdambda,w)$, then $w$ is a solution of (\ref{nlh-1-integral-parameter-lambda}).
Furthermore, if $w \in X$ satisfies $w = G(0,w) = {\mathcal R}_k\bigl(Q|w|^{p-2}w\bigr)$, then $w = 0$ by Proposition~\ref{corol-kato}.
\qquad To compute the Leray-Schauder degree, we remark that $G(0,0)=0$ and $\partialrtial_wG(0,0)=0$ by Lemma~\ref{lem:nemytskii_C-1}.
Hence, we can find some radius $r>0$
such that $\|G(0,w)\|_{L^\infty} \leq \frac12 \|w\|_{L^\infty}$
for all $w\in X$ such that $\|w\|_{L^\infty} \leq r$.
Therefore, the compact homotopy $H(t,w)=tG(0,w)$ is admissible in the ball
$B_r(0){\mathfrak{su}}bset X$
and we find that
\betagin{align*}
\thetaxt{deg}(id-G(0,\cdot),B_r(0),0)=\thetaxt{deg}(id-H(1,\cdot),B_r(0),0)
&=\thetaxt{deg}(id-H(0,\cdot),B_r(0),0)\\
&=\thetaxt{deg}(id,B_r(0),0)=1.
{e}nd{align*}
Theorem~\ref{thm:rabinowitz} therefore applies and
we obtain the existence of an unbounded branch
$C_\varphi {\mathfrak{su}}bseteq \bigl\{(\lambdambda,w)\in \mathbb{R}\times X\, :\,
w=G(\lambdambda,w)\thetaxt{ and } \lambdambda\geq 0\bigr\}$
which contains $(0,0)$. Moreover, $C_\varphi \setminus \{(0,0)\}$ is a subset of $(0,\infty) \times X$ since $w=G(0,w)$ implies $w=0$ by Proposition~\ref{corol-kato}, as noted above.
{e}nd{proof}
\betagin{remark}
\lambdabel{remark-rabinowitz}
The application of Theorem~\ref{thm:rabinowitz} to the function $G$ defined in (\ref{eq:def-G-function}) also yields a connected component
$$
C_\varphi^- {\mathfrak{su}}bset \bigl\{(\lambdambda,w)\in \mathbb{R}\times X\, :\,
w=G(\lambdambda,w)\thetaxt{ and } \lambdambda\le 0\bigr\}
$$
which contains $(0,0)$. However, this component is also obtained by passing from $\varphi$ to $-\varphi$ in the statement of Theorem~\ref{thm:rabinowitz-applied}, since by definition we have $C_\varphi^- = C_{-\varphi}$.
{e}nd{remark}
\qquad We may now also prove Theorem~\ref{thm:unbounded_branch-defocusing}.
\betagin{proof}[Proof of Theorem~\ref{thm:unbounded_branch-defocusing}]
Since, by assumption, $Q\leq 0$ in $\mathbb{R}^N$
and $\thetaxt{diam}(\thetaxt{supp }Q)\le \frac{{\bf z({\text{\tiny $N$}})}}{k}$,
the a priori bounds in Proposition~\ref{prop:apriori_defocusing}
imply that the unbounded branch $C_\varphi$ contains, for each $\lambdambda \ge 0$,
at least one pair $(\lambdambda,w)$, as claimed.
{e}nd{proof}
\qquad Next, we complete Theorem~\ref{teo-implicit-function}.
\betagin{proof}[Proof of Theorem~\ref{teo-implicit-function}]
Let again $X:= L^\infty(\mathbb{R}^N)$, and consider the nonlinear operator ${\mathcal B}: X \to X$, ${\mathcal B}(u):= u- {\mathcal R}_k N_f(u)$. Then ${\mathcal B}(0)=0$, since $N_f(0)=0$ by assumption. Since $N_f: X \to L^\infty_{\alphapha'}$ is differentiable by Lemma~\ref{lem:nemytskii_C-1}, ${\mathcal B}$ is differentiable as well. Moreover
$$
{\mathcal B}'(0)= {\rm id} - {\mathcal R}_k N_f'(0) = {\rm id} \in {\mathcal L}_{\mathbb{R}}(X,X),
$$
since $N_f'(0) =N_{f'}(0) =0 \in {\mathcal L}_{\mathbb{R}}(X,L^\infty_{\alphapha'})$ by assumption and Lemma~\ref{lem:nemytskii_C-1}. Consequently,
${\mathcal B}$ is a diffeomorphism between open neighborhoods $U,V {\mathfrak{su}}bset X$ of zero, and this shows the claim.
{e}nd{proof}
\qquad Finally, we state and prove the unique existence of solutions in the case where $f$ satisfies a suitable Lipschitz condition.
\betagin{theorem} \lambdabel{theo-uniqueness}
Let, for some $\alphapha>\frac{N+1}{2}$, the nonlinearity $f: \mathbb{R}^N \times \mathbb{C} \to \mathbb{C}$ be a continuous function satisfying (\ref{eq:assumption-f1}) and the Lipschitz condition
\betagin{equation}
\lambdabel{eq:assumption-f1-lipschitz2}
{e}ll_\alphapha:= {\mathfrak{su}}p \Bigl \{ \lambdangle x \rangle^{\alphapha} \, \Bigl|\frac{f(x,u)-f(x,v)}{u-v}\Bigr|\::\: u,v \in \mathbb{R}, \: x \in \mathbb{R}^N \Bigr\} < \frac{1}{\kappappa_\alphapha},
{e}nd{equation}
where $\kappappa_\alphapha$ is defined in Proposition~\ref{resolvent-compact-and-continuous}.
Then, for any given solution $\varphi \in L^\infty(\mathbb{R}^N)$ of the homogeneous Helmholtz equation $\Deltalta \varphi + k \varphi = 0$, the equation (\ref{nlh-1-integral}) admits precisely one solution $u \in L^\infty(\mathbb{R}^N)$.
{e}nd{theorem}
\betagin{proof}
Let $\varphi \in X:= L^\infty(\mathbb{R}^N)$. As in the proof of Theorem~\ref{W teo 1-sublinear} given above, we write (\ref{nlh-1-integral}) as a fixed point equation
$u = {\mathcal A}(u)$ in $X$ with the nonlinear operator ${\mathcal A}$ defined in (\ref{eq:A-operator}). Assumption (\ref{eq:assumption-f1-lipschitz2}) implies that
$$
\|{\mathcal A}(u)-{\mathcal A}(v)\|_{X} = \bigl \|{\mathcal R}_k \bigl (N_f(u)-N_f(v)\bigr)\bigr\| \le \kappappa_{\alphapha} \|N_f(u)-N_f(v)\|_{L^\infty_\alphapha}\le \kappappa_\alphapha {e}ll_\alphapha \|u-v\|_X
$$
with $\kappappa_\alphapha {e}ll_\alphapha<1$. Hence ${\mathcal A}$ is a contraction, and thus it has a unique fixed point in $X$.
{e}nd{proof}
\appendix
\section{Uniform regularity estimates}
\lambdabel{sec:unif-regul-estim}
In this section, we wish to prove uniform regularity estimates for solutions of (\ref{nlh-1-integral}) in the case where the nonlinearity $f$ is of the form given in (\ref{eq:power-type}). These estimates, which we used in the proof of the a priori bound given in Proposition~\ref{prop:apriori_defocusing}, allow to pass from uniform bounds in $L^{(2^*)'}(\mathbb{R}^N)$ to uniform bounds in $L^\infty(\mathbb{R}^N)$.
The proof of the following lemma is similar to a regularity estimate for real-valued solutions given in \cite[Proposition 3.1]{EW2}, but the differences justify to include a complete proof in this paper.
In the following, for $q \in [1,\infty]$, we let $L^q_c(\mathbb{R}^N)$ denote the space of functions in $L^q(\mathbb{R}^N)$ with compact support in $\mathbb{R}^N$.
\betagin{lemma}\lambdabel{lem:regularity1}
Let $N\geq 3$, $2<p<2^\ast$ and consider a function $a\in L^\infty_c(\mathbb{R}^N)$.
For $k>0$ and $\varphi\in L^\infty_{\thetaxt{loc}}(\mathbb{R}^N)$, every solution $v\in L^p_{\thetaxt{loc}}(\mathbb{R}^N)$ of
$$
v=\Phi_k \ast \bigl(a|v|^{p-2}v \bigr)+ \varphi
$$
satisfies $v\in W^{2,t}(\mathbb{R}^N)$ for all $2_\ast\leq t<\infty$.
In particular, $u\in L^\infty(\mathbb{R}^N)$ and there exist constants
$$
C=C\bigl(N,k,p,\|a\|_\infty\bigr)>0\qquad \thetaxt{and}\qquad m=m(N,p)\in\mathbb{N}
$$
independent of $v$ and $\varphi$ such that
\betagin{equation}\lambdabel{eqn:infty-estimate}
\|v\|_\infty\leq C\left( \|a |\varphi|^{p-1}\|_{(2^\ast)'}+\|a|v|^{p-1}\|_{(2^\ast)'}^{(p-1)^m}
+\|\varphi\|_{\infty}^{p-1}+\|\varphi\|_{\infty}^{(p-1)^m}\right).
{e}nd{equation}
{e}nd{lemma}
\noindent{\bf Proof.} Since, by assumption, $v \in L^p_{\thetaxt{loc}}(\mathbb{R}^N)$, and since $a\in L^\infty_c(\mathbb{R}^N)$, it follows that
\betagin{equation}\lambdabel{eqn:f1_f2_Lpq}
f:=a|v |^{p-2}v\in L^q_c(\mathbb{R}^N), \quad\thetaxt{ for all }1\leq q\leq p'.
{e}nd{equation}
Furthermore, since $v=\Phi_k\ast f + \varphi$, we deduce that
\betagin{equation}\lambdabel{eqn:f1_f2_estim_Rf}
|f|\leq 2^{p-2} |a| \bigl(|\Phi_k\ast f|^{p-1} + |\varphi|^{p-1}\bigr) \quad\thetaxt{a.e. in }\mathbb{R}^N.
{e}nd{equation}
\qquad We start by proving that $v\in L^\infty(\mathbb{R}^N)$. For this, we first remark
that $f\in L^{(2^\ast)'}_{c}(\mathbb{R}^N)$, since $p<2^\ast$.
Consequently, the mapping properties of $\Phi_k$ given in \cite[Proposition A.1]{EW1}
yield $\Phi_k\ast f\in L^{2^\ast}(\mathbb{R}^N)\cap W^{2,(2^\ast)'}_{\thetaxt{loc}}(\mathbb{R}^N)$ and, for every $0<R<2$, the existence of constants $\tilde{C}_0=\tilde{C}_0(N,k,R)>0$ and $D=D(N,k)>0$ such that
\betagin{align*}
\|\Phi_k\ast f\|_{W^{2,(2^\ast)'}(B_{R}(x_0))}
&\leq \tilde{C}_0 \left( \|\Phi_k\ast f\|_{L^{(2^\ast)'}(B_2(x_0))}+\|f\|_{L^{(2^\ast)'}(B_2(x_0))}\right)\\
&\leq \tilde{C}_0(D+1)\|f\|_{(2^\ast)'} \quad\thetaxt{for all }x_0\in\mathbb{R}^N.
{e}nd{align*}
\qquad Setting $C_0:=\tilde{C}_0(D+1)$, we consider a strictly decreasing sequence
$2>R_1>R_2>\ldots>R_j>R_{j+1}>\ldots>1$.
From Sobolev's embedding theorem, there is for each $1\leq t\leq 2^\ast$,
a constant $\kappappa_t^{(0)}=\kappappa_t^{(0)}(N,t)>0$ such that
$$
\|\Phi_k\ast f\|_{L^t(B_{R_1}(x_0))}\leq \kappappa_t^{(0)} C_0 \|f\|_{(2^\ast)'},
$$
where $C_0$ is given as above, with $R=R_1$.
Choosing $t_1:=\frac{2^\ast}{p-1}$, we obtain from {e}qref{eqn:f1_f2_estim_Rf}, there is some constant $D_2=D_2(N,p)>0$ such that
\betagin{align*}
\|f\|_{L^{t_1}(B_{R_1}(x_0))}
&\leq D_2 \|a\|_\infty \bigl(\|\Phi_k\ast f\|_{L^{2^\ast}(B_{R_1}(x_0))}^{p-1}+\|\varphi\|_{L^{2^\ast}(B_{R_1}(x_0))}^{p-1}\bigr)\\
&\leq D_2 \|a\|_\infty\left((\kappappa_{2^\ast}^{(0)} C_0)^{p-1} \|f\|_{(2^\ast)'}^{p-1}+|B_{R_1}|^{\frac1{t_1}}\|\varphi\|_\infty^{p-1}\right).
{e}nd{align*}
\qquad It then follows as in \cite[Proof of Proposition A.1(i)]{EW1} from elliptic regularity theory that $\Phi\ast f\in W^{2,t_1}_{\thetaxt{loc}}(\mathbb{R}^N)$ and for some
constant $\tilde{C}_1=\tilde{C}_1(N,k,p)>0$,
\betagin{align*}
\|\Phi_k\ast f&\|_{W^{2,t_1}(B_{R_2}(x_0))}
\leq \tilde{C}_1 \left( \|\Phi_k\ast f\|_{L^{t_1}(B_{R_1}(x_0))}+\|f\|_{L^{t_1}(B_{R_1}(x_0))}\right)\\
&\ \ \leq \tilde{C}_1\Bigl[\kappappa_{t_1}^{(0)}C_0\|f\|_{(2^\ast)'}
+D_2 \|a\|_\infty\left((\kappappa_{2^\ast}^{(0)} C_0)^{p-1} \|f\|_{(2^\ast)'}^{p-1}
+|B_{R_1}|^{\frac1{t_1}}\|\varphi\|_\infty^{p-1}\right)\Bigr]\\
&\ \ \leq C_1\left( \|f\|_{(2^\ast)'}+\|f\|_{(2^\ast)'}^{p-1}+\|\varphi\|_\infty^{p-1}\right)\qquad\thetaxt{for all }x_0\in\mathbb{R}^N,
{e}nd{align*}
where $C_1=C_1\bigl(N,k,p,\|a\|_\infty\bigr)$.
If $t_1\geq \frac{N}{2}$, Sobolev's embedding theorem gives for each $1\leq t<\infty$ the existence of a constant
$\kappappa^{(1)}_t=\kappappa_t^{(1)}(N,q,t)>0$ such that
$$
\|\Phi_k\ast f\|_{L^t(B_{R_2}(x_0))}\leq \kappappa^{(1)}_t C_1\left( \|f\|_{(2^\ast)'}+\|f\|_{(2^\ast)'}^{p-1}+\|\varphi\|_\infty^{p-1}\right).
$$
As a consequence, we obtain
\betagin{align*}
\|f\|_{L^t(B_{R_2}(x_0))}
&\leq D_2 \|a\|_\infty\Bigl(3^{p-2}(\kappappa_{t(p-1)}^{(1)} C_1)^{p-1} \left(\|f\|_{(2^\ast)'}^{p-1}+\|f\|_{(2^\ast)'}^{(p-1)^2}
+\|\varphi\|_\infty^{(p-1)^2}\right)\\&\quad +|B_{R_2}|^{\frac{p-1}{t}}\|\varphi\|_\infty^{p-1}\Bigr),
{e}nd{align*}
for all $1\leq t<\infty$. As in \cite[Proof of Proposition A.1(i)]{EW1}, it then follows from elliptic regularity theory that
$\Phi\ast f\in W^{2,N}_{\thetaxt{loc}}(\mathbb{R}^N)$, and since $R_2>1$, there exists some constant $\tilde{C}_2=\tilde{C}_2(N,k)>0$ such that
\betagin{align*}
\|\Phi_k\ast f&\|_{W^{2,N}(B_1(x_0))}\leq \tilde{C}_2 \left( \|\Phi_k\ast f\|_{L^N(B_{R_2}(x_0))}+\|f\|_{L^N(B_{R_2}(x_0))}\right)\\
&\leq \tilde{C}_2\Bigl\{\kappappa_N^{(1)}C_1\left(\|f\|_{(2^\ast)'}+\|f\|_{(2^\ast)'}^{p-1}+\|\varphi\|_\infty^{p-1}\right)\\
&+D_2 \|a\|_\infty\Bigl(3^{p-2}(\kappappa_{N(p-1)}^{(1)} C_1)^{p-1} \left(\|f\|_{(2^\ast)'}^{p-1}+\|f\|_{(2^\ast)'}^{(p-1)^2}
+\|\varphi\|_\infty^{(p-1)^2}\right)\\
&+|B_{R_2}|^{\frac{p-1}N}\|\varphi\|_\infty^{p-1}\Bigr)\Bigr\}\\
&\leq C_2\left( \|f\|_{(2^\ast)'}+\|f\|_{(2^\ast)'}^{(p-1)^2}+\|\varphi\|_\infty^{p-1}+\|\varphi\|_\infty^{(p-1)^2}\right)
{e}nd{align*}
for all $x_0\in\mathbb{R}^N$, where $C_2=C_2\bigl(N,k,p,\|a\|_\infty\bigr)$.
By Sobolev's embedding theorem, there is a constant $\kappappa_\infty=\kappappa_\infty(N)>0$ such that
$$
\|\Phi_k\ast f\|_{L^\infty(B_1(x_0))}
\leq \kappappa_\infty C_2\left( \|f\|_{(2^\ast)'}+\|f\|_{(2^\ast)'}^{(p-1)^2}+\|\varphi\|_\infty^{(p-1)^2}+\|\varphi\|_\infty^{p-1}\right)
$$
for all $x_0\in\mathbb{R}^N$. Therefore, $\Phi\ast f\in L^\infty(\mathbb{R}^N)$ and since $v=\Phi\ast f$, the estimate {e}qref{eqn:infty-estimate} holds
with $C=2\kappappa_\infty C_2$ and $m=2$.
\qquad If $t_1<\frac{N}{2}$, we infer from Sobolev's embedding theorem that
$$
\|\Phi_k\ast f\|_{L^t(B_{R_2}(x_0))}\leq \kappappa^{(1)}_t C_1\left(\|f\|_{(2^\ast)'}
+\|f\|_{(2^\ast)'}^{p-1}+\|\varphi\|_\infty^{p-1}\right)
$$
for each $1\leq t\leq \frac{Nt_1}{N-2t_1}$, where $\kappappa_t^{(1)}=\kappappa_t^{(1)}(N,p,t)$.
Therefore, setting $t_2:=\frac{Nt_1}{(N-2t_1)(p-1)}$, we obtain from {e}qref{eqn:f1_f2_estim_Rf},
\betagin{align*}
&\|f\|_{L^{t_2}(B_{R_2}(x_0))}
\\&\leq D_2 \|a\|_\infty\Bigl(3^{p-2}(\kappappa_{t_2(p-1)}^{(1)} C_1)^{p-1} \left(\|f\|_{(2^\ast)'}^{p-1}
+\|f\|_{(2^\ast)'}^{(p-1)^2}+\|\varphi\|_\infty^{(p-1)^2}\right)+|B_{R_2}|^{\frac{p-1}{t_2}}\|\varphi\|_\infty^{p-1}\Bigr).
{e}nd{align*}
Using again elliptic regularity theory as before, we find that $\Phi_k\ast f\in W^{2,t_2}_{\thetaxt{loc}}(\mathbb{R}^N)$
and for some constant $\tilde{C}_2=\tilde{C}_2(N,k,p)>0$,
\betagin{align*}
\|\Phi_k\ast f&\|_{W^{2,t_2}(B_{R_3}(x_0))}\leq \tilde{C}_2 \left( \|\Phi_k\ast f\|_{L^{t_2}(B_{R_2}(x_0))}+\|f\|_{L^{t_2}(B_{R_2}(x_0))}\right)\\
\ \ &\leq \tilde{C}_2\Bigl\{\kappappa_{t_2}^{(1)}C_1\left(\|f\|_{(2^\ast)'}+\|f\|_{(2^\ast)'}^{p-1}+\|\varphi\|_\infty^{p-1}\right)\\
& \ \ \quad +D_2 \|a\|_\infty\Bigl(3^{q-2}(\kappappa_{t_2(p-1)}^{(1)} C_1)^{p-1} \left(\|f\|_{(2^\ast)'}^{p-1}+\|f\|_{(2^\ast)'}^{(p-1)^2}+\|\varphi\|_\infty^{(p-1)^2}\right)\\
&\ \ \quad +|B_{R_2}|^{\frac{p-1}{t_2}}\|\varphi\|_\infty^{p-1}\Bigr)\Bigr\}\\
&\ \ \leq C_2\left( \|f\|_{(2^\ast)'}+\|f\|_{(2^\ast)'}^{(p-1)^2}+\|\varphi\|_\infty^{p-1}+\|\varphi\|_\infty^{(p-1)^2}\right),
{e}nd{align*}
for all $x_0\in\mathbb{R}^N$, where $C_2=C_2\bigl(N,k,p,\|a\|_\infty\bigr)$.
\qquad Remarking that $t_2>t_1$, since $p<2^\ast$, we may iterate the procedure.
At each step we find some constant $C_j=C_j\bigl(N,k,p,\|a\|_\infty\bigr)$ such that the estimate
$$
\|\Phi_k\ast f\|_{W^{2,t_j}(B_{R_{j+1}}(x_0))}
\leq C_j\left(\|f\|_{(2^\ast)'}+\|f\|_{(2^\ast)'}^{(p-1)^j}+\|\varphi\|_\infty^{p-1}+\|\varphi\|_\infty^{(p-1)^j}\right)
$$
holds and where $t_j$ is defined recursively via $t_0=(2^\ast)'$ and
$t_{j+1}=\frac{Nt_j}{(N-2t_j)(p-1)}$, as long as $t_j<\frac{N}{2}$.
Since $t_{j+1}\geq \frac{t_1}{p'}\,t_j$ and since $t_1>p'$, we reach after finitely many steps
$t_{e}ll\geq\frac{N}{2}$, where
${e}ll$ only depends on $N$ and $p$.
Since $R_j>1$ for all $j$, using the regularity properties of $\Phi $
and arguing as above, we obtain $\Phi\ast f\in W^{2,N}_{\thetaxt{loc}}(\mathbb{R}^N)$ as well as the estimate
$$
\|\Phi_k\ast f\|_{W^{2,N}(B_1(x_0))}\leq C_{{e}ll+1}\left(\|f\|_{(2^\ast)'}+\|f\|_{(2^\ast)'}^{(p-1)^{{e}ll+1}}
+\|\varphi\|_\infty^{p-1}+\|\varphi\|_\infty^{(p-1)^{{e}ll+1}}\right), $$
where $x_0$ is any point of $\mathbb{R}^N$ and $C_{{e}ll+1}=C_{{e}ll+1}\bigl(N,k,p,\|a\|_\infty\bigr)$
is independent of $x_0$.
Then, Sobolev's embedding theorem gives a constant $\kappappa_\infty=\kappappa_\infty(N)$ for which
$$
\|\Phi_k\ast f\|_{L^\infty(B_1(x_0))}
\leq \kappappa_\infty C_{{e}ll+1}\left(\|f\|_{(2^\ast)'}+\|f\|_{(2^\ast)'}^{(q-1)^{{e}ll+1}}
+\|\varphi\|_\infty^{p-1}+\|\varphi\|_\infty^{(p-1)^{{e}ll+1}}\right)
$$
holds for all $x_0\in\mathbb{R}^N$. Hence, $\Phi\ast f\in L^\infty(\mathbb{R}^N)$ and choosing
$C=\kappappa_\infty C_{{e}ll+1}$ and $m={e}ll+1$ concludes the proof of {e}qref{eqn:infty-estimate}.
We complete the proof.
$\Box$
\noindent{\bf Acknowledgements:} H. Chen is supported by NNSF of China, No: 12071189, 12001252,
by the Jiangxi Provincial Natural Science Foundation, No: 20202BAB201005, 20202ACBL201001 and by the Alexander von Humboldt Foundation. T. Weth is supported by the German Science Foundation (DFG) within the project WE-2821/5-2.
\betagin{thebibliography}{99}
\bibitem{AS} M. Abramowitz and I. A. Stegun,
\newblock
Handbook of mathematical functions with formulas, graphs, and mathematical tables.
\newblock National Bureau of Standards Applied Mathematics Series, 55
\newblock Washington, D.C. 1964 xiv+1046 pp.
\bibitem{alsholm-schmidt70}
P. Alsholm and G. Schmidt, Spectral and scattering theory for {S}chr\"odinger operators, {{e}m Arch. Rational Mech. Anal.}, 40, 281--311, (1970/1971).
\bibitem{baruch-fibich-tsynkov:2009}
G. Baruch, G. Fibich and S. Tsynkov,
\newblock A high-order numerical method for the nonlinear {H}elmholtz
equation in multidimensional layered media,
\newblock {{e}m J. Comput. Phys. 228(10)}, 3789--3815 (2009).
\bibitem{colton-kress}
D. Colton and R. Kress,
\newblock {{e}m Inverse acoustic and electromagnetic scattering theory},
volume~93 of {{e}m Applied Mathematical Sciences}.
\newblock Springer-Verlag, Berlin, 1992.
\bibitem{Evans} L.C. Evans, \newblock {{e}m Partial Differential Equations}, {{e}m Graduate Studies in Mathematics}, Vol. 19
\newblock AMS, Providence, 1991.
\bibitem{EW0} G. Ev\'{e}quoz and T. Weth, Real solutions to the nonlinear Helmholtz equation with local nonlinearity, {\it Arch. Rational Meth. Anal. 211}, 359--388 (2014).
\bibitem{EW1} G. Ev\'{e}quoz and T. Weth, Dual variational methods and nonvanishing for the nonlinear Helmholtz equation, {\it Adv. Math. 280}, 690--728 (2015).
\bibitem{EW2} G. Ev\'{e}quoz and T. Weth, Branch continuation inside the essential spectrum for the nonlinear Schr\"odinger equation, {\it J. Fixed Point Theory Appl. 19}, 475--502 (2017).
\bibitem{EY} G. Ev\'{e}quoz and T. Yesil, Dual ground state solutions for the critical nonlinear Helmholtz equation, {\it Proc. Roy. Soc. Edinburgh Sect. A}, published online, https://doi.org/10.1017/prm.2018.103.
\bibitem{fibich-tsynkov:2005} G. Fibich and S. Tsynkov,
\newblock Numerical solution of the nonlinear {H}elmholtz equation using
nonorthogonal expansions,
\newblock {{e}m J. Comput. Phys. 210(1)}, 183--224 (2001).
\bibitem{Gell-Red}
J. Gell-Redman, A. Hassell, J. Shapiro and J. Zhang, Existence and asymptotics of nonlinear Helmholtz eigenfunctions, Preprint, https://arxiv.org/abs/1908.04890.
\bibitem{G} S. Guti\'{e}rrez, Non trivial $L^q$ solutions to the Ginzburg-Landau equation,
{\it Math. Ann. 328}, 1-25 (2004).
\bibitem{J} E. Jalade, Inverse problem for a nonlinear Helmholtz equation,
{\it Ann. l'IHP Anal. non lin\'eaire. 21(4)}, 517--531 (2004).
\bibitem{jerison-kenig85} D. Jerison and C.~E. Kenig,
\newblock Unique continuation and absence of positive eigenvalues for
{S}chr\"odinger operators,
\newblock {{e}m Ann. of Math. 121(3),} 463--494 (1985).
\bibitem{kato59}
T. Kato,
\newblock Growth properties of solutions of the reduced wave equation with a
variable coefficient,
\newblock {{e}m Comm. Pure Appl. Math. 12,} 403--425 (1959).
\bibitem{komech} A. Komech,
On dynamical justification of quantum scattering cross section,
{\it J. Math. Anal. Appl. 432(1)}, 583--602 (2015).
\bibitem{lorch-szego63}
L. Lorch and P. Szego,
\newblock Higher monotonicity properties of certain {S}turm-{L}iouville
functions,
\newblock {{e}m Acta Math. 109,} 55--73 (1963).
\bibitem{M} E. Makai, On a monotonic property of certain Sturm-Liouville
functions, {\it Acta Math. Acad. Sci. Hungar. 3}, 163--172 (1952).
\bibitem{Mandel1} R. Mandel, Uncountably many solutions for nonlinear Helmholtz and curl-curl equations, {\it Adv. Nonlinear Stud. 19,}
569--593 (2019).
\bibitem{MMP} R. Mandel, E. Montefusco and B. Pellacci,
Oscillating solutions for nonlinear Helmholtz equations,
{\it Z. Angew. Math. Phys. 68(6)}, Art. 121, 19 pp (2017).
\bibitem{rabinowitz71}
Paul~H. Rabinowitz,
\newblock Some global results for nonlinear eigenvalue problems,
\newblock {{e}m J. Funct. Anal. 7}, 487--513 (1971).
\bibitem{sturm}
Ch. Sturm,
\newblock Sur les {\'e}quations diff{\'e}rentielles lin{\'e}aires de
deuxi{\`e}me ordre,
\newblock {{e}m J. Math. Pures Appl. 1}, 106--186 (1836).
\bibitem{tuck06}
E.~O. Tuck,
\newblock On positivity of {F}ourier transforms,
\newblock {{e}m Bull. Austral. Math. Soc. 74(1)}, 133--138 (2006).
\bibitem{wu-zou} H.~J. Wu and J. Zou,
\newblock Finite element method and its analysis for a nonlinear Helmholtz equation with high wave numbers,
\newblock {{e}m SIAM J. Numer. Anal. 56(3),} 1338--1359 (2018).
\bibitem{zeidler}
E. Zeidler,
\newblock {{e}m Nonlinear functional analysis and its applications. {I}
Fixed-point theorems},
\newblock Springer-Verlag, New York, 1986.
{e}nd{thebibliography}
{e}nd{document}
|
\begin{document}
\pacs{42.65.Lm,42.50.Dv,03.67.Bg}
\title{Modelling and optimization of photon pair sources based on spontaneous parametric down-conversion}
\author{Piotr Kolenderski}
\email{[email protected]}
\affiliation{Institute of
Physics, Nicolaus Copernicus University, Grudziadzka 5, 87-100
Toru{\'n}, Poland}
\author{Wojciech Wasilewski}
\affiliation{Institute of Experimental Physics, Warsaw University,
Ho{\.z}a 69, 00-681 Warsaw, Poland}
\affiliation{Niels Bohr Institute, University of Copenhagen, DK 2100, Denmark}
\affiliation{QUANTOP, Danish National Research Foundation Center for
Quantum Optics}
\author{Konrad Banaszek}
\affiliation{Institute of Physics, Nicolaus Copernicus University, Grudziadzka 5, 87-100 Toru{\'n}, Poland}
\date{\today}
\begin{abstract}
We address the problem of efficient modelling of photon pairs generated in spontaneous parametric down-conversion and coupled into single-mode fibers. It is shown that when the range of relevant transverse wave vectors is restricted by the pump and fiber modes, the computational complexity can be reduced substantially with the help of the paraxial approximation, while retaining the full spectral characteristics of the source. This approach can serve as a basis for efficient numerical calculations, or can be combined with analytically tractable approximations of the phase matching function. We introduce here a cosine-gaussian approximation of the phase matching function which works
for a broader range of parameters than the gaussian model used previously. The developed modelling tools are used to evaluate characteristics of the photon pair sources such as the pair production rate and the spectral purity quantifying frequency correlations. Strategies to generate spectrally uncorrelated photons, necessary in multiphoton interference experiments, are analyzed with respect to trade-offs between parameters of the source.
\end{abstract}
\maketitle
\section{Introduction}
Spontaneous parametric down conversion (SPDC) is a nonlinear process in which a pump photon interacting with a crystal decays into two daughter photons. The process has been successfully employed to demonstrate fundamental aspects of quantum mechanics such as the violation of Bell's inequalities \cite{CHSH1969,Kwiat1995}, and
utilized in implementations of quantum teleportation \cite{Boschi1998,Marcikic2003,Ursin2004}, quantum cryptography \cite{Gisin2002}, linear optical quantum information processing \cite{Kok2007}, and other quantum-enhanced technologies.
Typically, photon pairs emerging from non-linear media are described by a complicated spatio-temporal wave function that exhibits correlations in multiple degrees of freedom. In contrast, many applications of photon pairs require their preparation in single isolated spatio-temporal modes. Single spatial modes can be selected by coupling photons into single-mode fibers (SMFs) that in effect filter heavily the SPDC light. Furthermore, many protocols rely on interference between photons originating from independent sources \cite{Kaltenbaek2006,Riedmatten2003}. Although spatial modes are well defined by SMFs, the interference visibility may be compromised by undesirable spectral correlations within individual pairs. One way to tailor the spectral degree of freedom is to use narrowband interference filters, which is easy to implement in an experiment, but in consequence reduces the useful photon flux. An alternative approach is to adjust the setup parameters to enforce the source to produce spectrally uncorrelated pairs \cite{Dragan2004,URen2005,URen2007,Mosley2008}.
These issues bring the question of optimizing the useful fraction of photon pairs produced by SPDC sources. A purely experimental approach would be just to try various alignments of the source. In practice, this strategy would be rather burdensome owing to the large number of controllable parameters of the setup, their time-consuming adjustments, and long data acquisition times. A natural alternative is to resort to numerical modeling. This however presents its own challenges, as including all relevant degrees of freedom is computationally demanding.
In this paper we discuss approximate methods that alleviate the numerical load necessary to model faithfully realistic SPDC sources. Our approach is based on an observation that optical fibers collecting photons define a relatively narrow range of wave vectors that need to be included in calculations. This justifies applying the paraxial approximation, which makes a substantial portion of the problem tractable analytically. The paraxial approximation can be also combined with a simplification of the two-photon wave function to an analytically manageable form leading to closed formulas. We exploit these strategies to analyze the performance of SPDC sources in quantum information applications.
Coupling of down-converted photons into SMFs has been a subject of a number of works, especially in the case of cw pumping. Kurtsiefer {\em et al.} \cite{Kurtsiefer2001} gave a simple argument showing that careful matching of the SPDC output with the fiber modes increases the collection efficiency. Mathematical models for a cw-pumped source has been derived and compared with experimental data in Refs.~\cite{Bovino2003,Castelletto2004,Castelletto2005}. The collinear case has been analyzed theoretically in Ref.~\cite{Andrews2004}. Dragan \cite{Dragan2004} used a gaussian approximation to model fiber-coupled sources.
The counterintuitive scaling of the production rates with the crystal length has been pointed out by Lee {\em et al.} \cite{Lee2005}, and a detailed analysis of quasi-phase matched structures has been presented by Ljunggren and Tengner \cite{ljunggren2005,ljunggren2006}. More recently, Ling {\em et al.} \cite{Ling2008} provided a method to estimate the absolute emission rates for cw pumping. In the present paper, we concentrate on pulse-pumped SPDC sources and optimization of their performance parameters.
Our numerical calculations incorporate the exact form of dispersion relations for the nonlinear medium and use a second-order expansion of phase mismatch in the transverse wave vectors of the SPDC photons. The modeling is based on two strategies. The first approach resorts to numerical means, but with minimized computational effort that will nevertheless deliver highly accurate results in a broad range of parameters. This method has been used in Refs.~\cite{Wasilewski2006,wasilewski2007} to compare
experimentally measured characteristics of down-conversion sources with theoretical predictions. The second approach will provide expressions
for the biphoton wave function in a closed analytical form
through a further approximation to the phase matching functions. This approach, which we will call
the {\em cosine-gaussian approximation} (CGA) is based on a more accurate analytically integrable model of the phase matching function than the gaussian model studied previously \cite{URen2003,Dragan2004,URen2005}. We compare both the approaches with direct numerical calculations when no paraxial approximation is applied and all integrals are evaluated by numerical means. As an application of the developed tools, we discuss generation of spectrally uncorrelated photons in a type-I $\beta$-barium borate (BBO) crystal. We consider here two strategies to reduce spectral correlation: one method is to adjust carefully the pump pulse and collection modes, while the other one is to restrict the spectrum of the generated photons with the help of interference filters. We compare source brightness that can be achieved using both methods and relate these results to previous discussions \cite{Dragan2004}.
The paper is organized as follows. In Sec.~\ref{section:TwoPhotonInFreeSpace} we present the setup under consideration and derive the biphoton wave function in free space. Section \ref{section:NumericalApproach} presents basic assumptions about propagation of a pump beam and output photons and the impact of spatial filtering imposed by SMFs. The biphoton wave function within the paraxial approximation is derived. Next in \secref{section:CGA} we present the cosine gaussian approximation and apply it to derive an analytical formula for the wave function of a photon pair coupled into SMFs. The figures of merit are defined in \secref{section:FiguresOfMerit}, and the approximation of perfect phase matching is used to gain some basic intuitions. Next in Sec.~\ref{section:Comparison} we compare the computational effort and applicability of developed methods. Finally, in \secref{section:SpectrallyUncorrelatedPairs} we analyze strategies to reduce spectral correlations within photon pairs.
\section{Two-photon wave function}
\label{section:TwoPhotonInFreeSpace}
In the non-degenerate down-conversion process, the pump field, described by the positive-frequency part of the electric field $E^{(+)}_p(\VV{r},t)$, interacts with quantized signal and idler fields, whose creation-operator parts will be denoted as $\hat{E}_s^{(-)}(\VV{r},t)$ and $\hat{E}_i^{(-)}(\VV{r},t)$. The interaction hamiltonian has the form of an integral over the volume $V$ of the crystal \cite{Louisell1961}:
\begin{multline}\label{intred}
\hat{H}_I(t)=
\frac{\epsilon_0 \chi^{(2)}}{2}\int\limits_{V}^{} d^3\VV{r}\
E^{(+)}_p(\VV{r},t) \hat{E}_s^{(-)}(\VV{r},t)
\hat{E}_i^{(-)}(\VV{r},t) \\ + \text{H.c.},
\end{multline}
where $\epsilon_0$ is the vacuum permittivity and $\chi^{(2)}$ denotes the second-order nonlinear susceptibility coefficient, approximated by a constant.
We will assume that the nonlinear interaction is weak enough to neglect pump
depletion and to justify the first order perturbation theory. We will focus here on type-I phase-matching, when both the down-converted photons have the same polarization direction, perpendicular to that of the pump pulse. The case of type-II phase matching can be analyzed analogously.
We will take the nonlinear crystal to be a thin slab
of thickness $L$ oriented perpendicular to $z$-axis and extending from $z=-L/2$ to $z=L/2$,
as illustrated in Fig.~\ref{SPDC}. The pump pulse propagates along $z$--direction
outside the crystal. Following Rubin et al. \cite{klyshko} we
parameterize the waves using the frequencies $\omega$
and the wave vector components $\kp{}$ perpendicular to $z$. These
quantities are preserved at the crystal-free space interface.
\begin{figure}
\caption{(Color online) The geometry of a photon pair source. A crystal exhibiting $\chi^{(2)}
\label{SPDC}
\end{figure}
In order to calibrate the pump power,
it will be convenient to introduce a normalized pump pulse amplitude $A_p(\kp{p},\omega_p)$ satisfying $\int \text{d}^2\kp{p} \text{d}\omega_p |A_p(\kp{p}, \omega_p)|^2=1$.
We will assume the pump pulse amplitude in a factorable form, with
no spatiotemporal correlations:
\begin{equation}\label{eq:pumpamplidue}
A_p(\kp{},\omega)= A_p^{\text{temp}}(\omega) A_p^{\text{sp}}(\kp{})
\end{equation}
where $A_p^{\text{temp}}(\omega)$ refers to temporal and $A_p^{\text{sp}}(\kp{})$ to spatial part. Both parts will be taken in a gaussian form:
\begin{eqnarray}
A_p^{\text{temp}}(\omega) &=& \frac{\sqrt{\tau_p}}{\sqrt[4]{\pi}} \exp\left( -\frac{\tau_p^2}{2 }(\omega-2\omega_0)^2\right)\\
A_p^{\text{sp}}(\kp{}) &=& \frac{w_p}{\sqrt{\pi}} \exp\left(-\frac{w_p^2}{2}\kp{}^2 \right)
\end{eqnarray}
where $\tau_p$ stands for the pulse duration, $w_p$ for the pump beam width, and $2\omega_0$ is the central frequency of the pump pulse.
The positive-frequency part of the pump pulse electric field $E^{(+)}_p(\VV{r},t)$ is the Fourier transform of the spectral amplitude:
\begin{equation}\label{pumppulse}
E^{(+)}_p(\VV{r},t)= \mc{E}_p\int \text{d}^2\kp{p}\text{d} \omega_p A_p (\kp{p},\omega_p) \text{e}^{\text{i}(\kp{p}\VV{r}-\omega_p t )}
\end{equation}
where $\mathcal{E}_p$ characterizes the strength of the pump pulse and the squared modulus $|\mc{E}_p|^2$ proportional to the pump pulse energy.
Subsequently, we assume the following modal expansion for the signal $s$ and idler $i$
field operators:
\begin{multline}\label{fieldop}
\hat{E}^{(-)}_{\mu}(\VV{r},t) = \mathcal{E}_\mu \int \text{d}^2\kp{\mu}\text{d} \omega_{\mu} \, \text{e}^{-\text{i}\VV{k}_{\mu}\VV{r} + \text{i} \omega_{\mu} t} \hat{a}^\dagger(\kp{\mu},\omega_{\mu}),
\\ \mu=s,i.
\end{multline}
We approximated here the scaling factors defining the zero-point field fluctuations with frequency-independent constants $\mathcal{E}_\mu$.
The biphoton component of the wave function calculated in the first-order perturbation theory takes the form \cite{Rubin1996}:
\begin{multline}\label{wf:vacume}
|\Psi\rangle= \frac{1}{\text{i}\hbar}\int \text{d} t\
\hat{H}_I(t)|\text{vac}\rangle \\
= \int \text{d}^2\kp{s} \text{d}^2\kp{i} \text{d}\omega_s \text{d}\omega_i \Psi(\kp{s},\omega_s;\kp{i},\omega_i) \\
\times \hat{a}^\dagger\left(\kp{s},\omega_{s}\right)\hat{a}^\dagger\left( \kp{i},\omega_{i}\right)|\text{vac}\rangle
\end{multline}
where the probability amplitude reads:
\begin{multline}\label{eq:wf:intz}
\Psi(\kp{s},\omega_s;\kp{i},\omega_i)= \mc{N} \int_{-L/2}^{L/2}\text{d}z A_p(\kp{s}+\kp{i},\omega_s+\omega_i)
\\
\times \exp\left[ \text{i}\Delta k_z(\kp{s},\omega_s;\kp{i},\omega_i) z \right]
\end{multline}
and the $\mc{N}={\epsilon_0\chi^{(2)} \mc{E}_p \mc{E}_s
\mc{E}_i }/(2\text{i} \hbar)$. The phase mismatch $\Delta k_z(\kp{s},\omega_s;\kp{i},\omega_i)$
is defined using the $z$ components of the wave vectors
of the interacting fields:
\begin{multline}\label{eq:phasemismatching}
\Delta k_z(\kp{s},\omega_s;\kp{i},\omega_i)= \\
=k_{pz}(\kp{s}+\kp{i},\omega_s+\omega_i) -k_{sz}(\kp{s},\omega_s)-k_{iz}(\kp{i},\omega_i).
\end{multline}
These components are determined by the frequencies $\omega_s,\omega_i$ and the transverse wave vectors $\kp{s},\kp{i}$ \cite{klyshko}. The integral expression in \eq{eq:wf:intz} can be given meaningful
physical interpretation. Each slice of the crystal contributes to a
biphoton amplitude $\Psi(\kp{s},\omega_s;\kp{i},\omega_i)$. However,
the phase of this contribution changes from slice to slice, thus only for certain propagation directions the constructive interference occurs.
The wave function given in \eq{eq:wf:intz} describes the entire field emerging from the crystal into the free space. However, in a typical experiment the signal and idler photons are coupled into SMFs. For SMFs collecting light in the $x$--$z$ plane at angles $\alpha_s$ and $\alpha_i$ with respect to the $z$ axis,
the collected spatial modes can be approximated by gaussians centered at transverse wave vectors $\kp{s 0}=\hat x \omega_s \sin \alpha_s /c$ and $\kp{i 0}=-\hat x \omega_i \sin \alpha_i /c$:
\begin{equation}\label{eq:fibermodes}
u_\mu(\kp{\mu},\omega_\mu)=\frac{w_\mu}{\sqrt{\pi}} \exp\left(-\frac{w_\mu^2}{2}\left(\kp{\mu}-\kp{\mu 0}\right)^2 \right), \quad \mu=s,i
\end{equation}
Here the waists $w_s$ and $w_i$ define the spatial extent of the collected modes, assumed to be constant within the relevant spectral bandwidth.
The wave function $ \Psi(\omega_s,\omega_i)$ for both photons coupled into SMFs is given by an overlap of the wave function in free space $\Psi(\kp{s},\omega_s;\kp{i},\omega_i)$ with the spatial profiles $u_s(\kp{s},\omega_s)$ and $u_i(\kp{i},\omega_i)$ of the fiber modes:
\begin{multline}\label{eq:wf:fiber}
\Psi(\omega_s,\omega_i) = \\
\int \text{d}^2\kp{s} \text{d}^2\kp{i}\, u_s^*(\kp{s},\omega_s) u_i^*(\kp{i},\omega_i) \Psi(\kp{s},\omega_s;\kp{i},\omega_i).
\end{multline}
This object will be used to calculate coincidence count rates and spectral properties of generated photons. For a pump pulse amplitude in a factorable form as that in Eq.~(\ref{eq:pumpamplidue}), it will be convenient to write
\begin{equation}\label{eq:def:theta}
\Psi(\omega_s,\omega_i) = A_p^{\text{temp}}(\omega_s+\omega_i) \Theta(\omega_s,\omega_i).
\end{equation}
Here $\Theta(\omega_s,\omega_i)$ can be viewed as the effective phase matching function for
the collected modes that includes the geometry of the setup and the physical properties of the nonlinear medium. It is explicitly given by:
\begin{multline}\label{eq:epmf:DNI}
\Theta(\omega_s,\omega_i) = \mathcal{N}\int \text{d}^2\kp{s}\ \text{d}^2\kp{i}
\int_{-L/2}^{L/2}\text{d}z \, A_p^{\text{sp}}(\kp{s}+\kp{i}) \\
\times u_s^*(\kp{s},\omega_s)\ u_i^*(\kp{i},\omega_i)
\text{e}^{\text{i}
\Delta k_z(\kp{s},\omega_s;\kp{i},\omega_i) z }.
\end{multline}
One way to simplify the above equation is to evaluate analytically
the integral over length of the crystal, which yields:
\begin{multline} \label{Eq:empf:D}
\Theta^{(\text{D})}(\omega_s,\omega_i) = \frac{\mc{N} L }{2}
\int \text{d}^2\kp{s}\ \text{d}^2\kp{i} u_s^*(\kp{s},\omega_s)\ u_i^*(\kp{i},\omega_i) \\
\times A_p^{\text{sp}}(\kp{s}+\kp{i}) \text{sinc}\left(\frac{L}{2} \Delta k_z(\kp{s},\omega_s;\kp{i},\omega_i) \right).
\end{multline}
However, the remaining integrals over $\kp{s}$ and $\kp{i}$ are intractable analytically due to nontrivial form of phase mismatch $\Delta k_z$ and they must be performed by numerical means. We will refer to this procedure as \emph{direct numerical integration} and denote corresponding formulas with a superscript $(D)$. The four-dimensional integration task is computationally very demanding, and in the next two paragraphs we will present approximate methods which reduce the computational effort to compute effective phase matching function $\Theta(\omega_s,\omega_i)$.
\section{Paraxial approximation}
\label{section:NumericalApproach}
The expression for the effective phase matching function given in Eq.~(\ref{eq:epmf:DNI}) includes gaussian fiber mode functions $u_s(\kp{s},\omega_s)$ and $u_i(\kp{i},\omega_i)$ that vanish very fast as the transverse wave vectors $\kp{s}$ and $\kp{i}$ depart from the central observation directions $\VV{k}_{s0\perp}$ and $\VV{k}_{i0\perp}$. This implies that little error is introduced when expanding the phase mismatch $\Delta k_z$ given in \eq{eq:phasemismatching} up to the second order in deviations of the transverse wave vectors from $\kp{s0}$ and $\kp{i0}$. After such an expansion the entire integrand in Eq.~(\ref{eq:epmf:DNI}) takes a gaussian form
in $\kp{s}$ and $\kp{i}$, provided that the spatial pump profile is gaussian as well. Consequently, one can perform all the integrals over transverse wave vectors analytically. This is a great simplification of the computational complexity of the problem, as we are now left only with a one-dimensional integral over $z$ which needs to be performed numerically. We will call this method {\em paraxial approximation} in analogy to the standard description of paraxial wave propagation in classical optics.
It will be convenient to introduce the following notation for the expansion of the wave vector mismatch:
\begin{multline}\label{eq:approx}
\Delta k_z(\kp{s},\omega_s;\kp{i},\omega_i) \approx \\ \vec{D}_{0}(\omega_s,\omega_i)+\mathbf{D}_{1}^T(\omega_s,\omega_i)\boldsymbol{\kappa} +{\boldsymbol{\kappa}}^T \mathbf{D}_{2}(\omega_s,\omega_i)\boldsymbol{\kappa},
\end{multline}
where
\begin{equation}\label{eq:def:kappa}
\boldsymbol{\kappa}=(\kp{s}-\kp{s0},\kp{i}-\kp{i0})^T
\end{equation}
is a four-element vector of deviations from the central observation directions. The Taylor expansion coefficients can be grouped into
a scalar in the zeroth order
\begin{equation}
\vec{D}_{0}(\omega_s,\omega_i) = \Delta k_z(\kp{s0},\omega_{s};\kp{i0},\omega_{i})
\end{equation}
a vector in the first order
\begin{equation}
\label{Eq:D1}
\vec{D}_{1}(\omega_s,\omega_i) = \left(\begin{array}{c} \mathbf{d}_s(\omega_s,\omega_i)\\ \mathbf{d}_i(\omega_s,\omega_i)
\end{array}
\right)
\end{equation}
and a matrix in the second order:
\begin{equation}
\vec{D}_{2}(\omega_s,\omega_i) =
\left( \begin{array}{cc} \mathbf{d}_{ss}(\omega_s,\omega_i), & \mathbf{d}_{si}(\omega_s,\omega_i) \\ \mathbf{d}_{si}(\omega_s,\omega_i), & \mathbf{d}_{ii}(\omega_s,\omega_i) \end{array}\right)
\end{equation}
We wrote the vector $\vec{D}_{1}$ and the matrix $\vec{D}_{2}$ in a block form with entries given by:
\begin{equation}
\mathbf{d}_{\mu}(\omega_s,\omega_i) = \left.\left(\frac{\partial \Delta k_z}{\partial k_{\mu x}},\frac{\partial \Delta k_z}{\partial k_{\mu y}}\right)^T\right|_{\scriptsize\begin{array}{c}\kp{s}=\kp{s0}\\ \kp{i}=\kp{i0}\end{array}},
\end{equation}
and
\begin{multline}
\mathbf{d}_{\mu\nu}(\omega_s,\omega_i) = \\
=\frac{1}{2}\left.\left(
\begin{array}{cc} \displaystyle
\frac{\partial^2 \Delta k_z}{\partial k_{\mu x} \partial k_{\nu x}}, & \displaystyle \frac{\partial^2 \Delta k_z}{\partial k_{\mu x} \partial k_{\nu y}} \\ \displaystyle \frac{\partial^2 \Delta k_z}{\partial k_{\mu y} \partial k_{\nu x}}, & \displaystyle
\frac{\partial^2 \Delta k_z}{\partial k_{\mu y} \partial k_{\nu y}} \end{array}\right)\right|_{\scriptsize\begin{array}{c}\kp{s}=\kp{s0}\\ \kp{i}=\kp{i0}\end{array}},
\end{multline}
where $\mu, \nu = s,i$.
In order to write a compact formula for the effective phase matching function in the paraxial approximation, it will be helpful to represent the product of the fiber mode functions $u_s^\ast (\kp{s},\omega_s) u_i^\ast(\kp{i},\omega_i)$ and the pump beam profile $A_p^{\text{sp}}(\kp{s}+\kp{i})$ as an exponent of a quadratic expression:
\begin{multline}
u_s^\ast (\kp{s},\omega_s) u_i^\ast(\kp{i},\omega_i) A_p^{\text{sp}}(\kp{s}+\kp{i})=
\\
=\exp\left(-\vec{B}_0-\vec{B}_1^T\boldsymbol \kappa -\boldsymbol \kappa^T \vec{B}_2 \boldsymbol \kappa\right).
\end{multline}
where $ \boldsymbol \kappa$ is a four-element vector of deviations from central observation directions defined in \eqref{eq:def:kappa}. The coefficients of the quadratic expression are a scalar
\begin{equation}
\vec{B}_0
=
\frac{w_p^2}{2}\left(\vec{k}_{s0\perp}+\vec{k}_{i0\perp}\right)^2
\end{equation}
a four-component vector
\begin{equation}
\vec{B}_1
=w_p^2 \left(
\begin{array}{c}
\vec{k}_{s0\perp}+\vec{k}_{i0\perp} \\
\vec{k}_{s0\perp}+\vec{k}_{i0\perp}
\end{array}
\right)
\end{equation}
and a $4\times 4$ matrix
\begin{equation}
\vec{B}_2=
\frac{1}{2} \left(
\begin{array}{cc}
\displaystyle (w_p^2+w_s^2)\vec{I} & \displaystyle w_p^2\vec{I}\\
\displaystyle w_p^2\vec{I} &\displaystyle (w_p^2+w_i^2)\vec{I}
\end{array}
\right),
\end{equation}
where $\vec{I}$ denotes a two dimensional identity matrix. This notation allows us to write the result of four-dimensional gaussian integration of Eq.~(\ref{eq:epmf:DNI}) over the transverse wave vectors as:
\begin{multline}\label{eq:epmf:N}
\Theta^{(\text{P})}(\omega_s,\omega_i)=
\int_{-L/2}^{L/2}\text{d}z\, \frac{\mc{N} w_s w_i w_p}{\sqrt{\pi\det\vec{M}_2(z)}} \\
\times
{\exp\left(-\vec{M}_0(z) -\frac{1}{4} \vec{M}^T_1(z) \vec{M}^{-1}_2(z) \vec{M}_1(z)\right)}
\end{multline}
where the superscript $(P)$ stands for the paraxial approximation, we introduced
\begin{equation}
\vec{M}_j(z)=\vec{B}_j-\text{i}z\vec{D}_{j}, \qquad j=0,1,2
\end{equation}
and for notational simplicity we suppressed dependence on frequencies $\omega_s$ and $\omega_i$. The integral
over the crystal length in Eq.~(\ref{eq:epmf:N}) needs to be calculated numerically, which is substantially faster than direct numerical integration of Eq.~(\ref{Eq:empf:D}).
It is worthwhile to note that in Eq.~(\ref{eq:epmf:N}) the effects of spectral dispersion are fully taken into account, as no expansion in the signal and idler frequencies $\omega_s$ and $\omega_i$ has been applied. As we will see in \secref{section:Comparison}, this makes numerical results based on the paraxial approximation very precise.
\section{Cosine-Gaussian approximation}
\label{section:CGA}
The numerical effort to calculate the effective phase matching function can be reduced further at the cost of the accuracy. The basic idea is to replace the sinc term appearing in Eq.~(\ref{Eq:empf:D}) by an analytically tractable expression. Previous works \cite{Dragan2004,URen2005} introduced the {\em gaussian approximation} (GA), which approximated the sinc term by a gaussian function, thus enabling analytical integration. We will consider here a more general expression of the form:
\begin{equation}\label{eq:sincapprox}
\mathop{\text{sinc}} x\approx \exp(- \xi{x^2} ) \cos
\left(\zeta x \right) = {\textstyle\frac{1}{2}} \exp(- \xi{x^2} + \text{i} \zeta x) + \text{c.c.}
\end{equation}
As seen in \figref{fig:sinc}, using the parameters $\xi=\frac{1}{20}$ and $\zeta=\frac{1}{2}$ yields a more accurate approximation to the sinc function than the GA corresponding to the choice of parameters $\xi=\frac{1}{5}$ and $\zeta=0$.
\begin{figure}
\caption{(Color online) A comparison of the $\mathop{\text{sinc}
\label{fig:sinc}
\end{figure}
The above observation leads us to the idea of {\em cosine-gaussian approximation} (CGA). Specifically, in \eq{Eq:empf:D} we replace the sinc function with \eqref{eq:sincapprox} and expand the phase mismatch $\Delta k_z$ up to the linear term in transverse wave vectors around central observation directions:
\begin{multline}
\text{sinc}\left(\frac{L}{2} \Delta k_z (\kp{s},\omega_s;\kp{i},\omega_i)\right) \approx \\
{\textstyle\frac{1}{2}}\exp\left(-{\textstyle\frac{1}{4}}\xi(\vec{D}_0+\vec{D}_1^T \V{\kappa})^2 L^2+
\frac{\text{i}}{2}\zeta (\vec{D}_0+\vec{D}_1^T \V{\kappa})L\right)+\text{c.c.}
\end{multline}
We used here parametrization introduced in Eqs.~(\ref{eq:def:kappa})-(\ref{Eq:D1}).
After inserting the above expression into \eqref{Eq:empf:D}, the integrals over transverse wave vectors can be evaluated analytically as long as the pump and fiber modes are gaussian. This yields an expression for the effective phase matching function of the form:
\begin{multline}\label{eq:epmf:CGA}
\Theta^{(\text{C})}(\omega_s,\omega_i)=
\Gamma(\omega_s,\omega_i) e^{-f(\omega_s,\omega_i)} \cos[g(\omega_s,\omega_i)].
\end{multline}
The three functions appearing in the above formula
are given by:
\begin{eqnarray}
\Gamma(\omega_s,\omega_i)&=& \frac{\pi^2 \mathcal{N}}{\sqrt{\det \vec{K}}} \\
g(\omega_s,\omega_i) &=& \frac{1}{2}\zeta L \vec{D}_0+\frac{1}{16}\zeta L \vec{D}_1^T \V{K}^{-1}\left(\vec{B}_1+\frac{L^2}{2}\xi \vec{D}_0 \vec{D}_1\right)\\
f(\omega_s,\omega_i)&=& \vec{B}_0 + \frac{1}{4} \xi L^2 \vec{D}_0^2+\frac{1}{16}\vec{N}^T \vec{K}^{-1} \vec{N}
\end{eqnarray}
where we defined:
\begin{eqnarray}
\vec{K} &=& \vec{B}_2+\frac{1}{4}\xi L^2\vec{D}_1 \vec{D}_1^T\\
\vec{N} &=& \vec{B}_1+\frac{L}{2}\left(\xi L \vec{D}_0 +\zeta \right) \vec{D}_1.
\end{eqnarray}
For the sake of brevity we have omitted the frequency dependence. The expression for the effective
phase matching function in the gaussian approximation is easily obtained by inserting
$\xi= \frac{1}{5}$ and $\zeta=0$.
In order to analyze the applicability of CGA, it is convenient to view the biphoton wave function given in \eq{eq:wf:fiber} as an integral over $\kp{s}$ and $\kp{i}$ of a product of two factors. The first one is the phase matching term $\text{sinc}[\Delta k_z(\kp{s},\omega_s;\kp{i},\omega_i) L /2]$, while the second one, which we will call here the beam term, is a triple product of the pump pulse spatio-temporal profile $A_p(\kp{s}+\kp{i},\omega_s+\omega_i)$ and the fiber mode profiles $u_s(\kp{s},\omega_s)$ and $u_i(\kp{i},\omega_i)$. The beam term defines the range of transverse wave vectors and frequencies for which the cosine-gaussian approximation of the phase matching term should be accurate. This condition is satisfied when the sinc argument $\Delta k_z(\kp{s},\omega_s;\kp{i},\omega_i) L /2$ does not exceed approximately $3\pi/2$.
Let us analyze this condition more closely. For the profiles assumed throughout this paper, the beam term takes a gaussian form:
\begin{multline}
A_p(\kp{s}+\kp{i},\omega_s+\omega_i)u_s(\kp{s},\omega_s)u_i(\kp{i},\omega_i) \propto \\
\exp\left( - \frac{w_p^2 \alpha^2}{2 c^2}(\nu_s-\nu_i)^2- \frac{\tau_p^2}{2}(\nu_s+\nu_i)^2
-\boldsymbol{\kappa}^T \vec{B}_2 \boldsymbol{\kappa}\right),
\end{multline}
where $\nu_\mu = \omega_\mu - \omega_0$ are detunings from the central frequency and we assumed
that the photons are collected at identical angles $\alpha_s=\alpha_i=\alpha$. In the exponent, we neglected the cross-term correlating wave vectors with frequencies.
The characteristic width of the Gaussian function defines the relevant range of parameters. Thus the sum of the detunings is restricted by $|\nu_s+\nu_i| \lesssim \tau_p^{-1}$. Similarly the range of relevant transverse wave vectors can be crudely characterized by the smallest eigenvalue of the matrix $\vec{B}_2$, which is equal to $w_s^2$ in case of symmetric coupling $w_s=w_i$. This can be written as $|\boldsymbol{\kappa}|\lesssim w_s$. In the case of perfect phase matching for the central wave vectors $\kp{s0}$, $\kp{i0}$ at the frequency $\omega_0$ of the down-converted photons, we estimate the argument of the sinc function expanding
the wave vector mismatch $\Delta k_z$ up to the first order:
\begin{equation}\label{eq:CGA:phasemismatch}
\Delta k_z \approx \vec{D}_1(\omega_0,\omega_0)\V{\kappa}+ \beta(\nu_s+\nu_i) ,
\end{equation}
where $\beta= \left.\frac{\partial k_{pz}}{\partial \omega_p}\right|_{\omega_p=2\omega_0}- \left.\frac{\partial k_{sz}}{\partial \omega}\right|_{\omega=\omega_0} $. Thus we see that the CGA will be valid, if
$|\V{\kappa}|$ and $|\nu_s+\nu_i|$ within ranges defined by the beam term yield the argument of the sinc function $\lesssim 3\pi/2$. This gives:
\begin{equation}
\tau_p \gtrsim \beta L
\end{equation}
and
\begin{equation}
w_s \gtrsim L |\vec{D}_1 |
\end{equation}
As the right hand sides in the above formulas are estimates, we rounded up numerical factors to simpler forms.
\section{Figures of merit}\label{section:FiguresOfMerit}
We will employ the computational methods presented in the preceding sections to analyze two parameters characterizing the usefulness of SPDC sources. The first one is the brightness, proportional to the probability of producing a
fiber-coupled photon pair by a single pump pulse:
\begin{equation}\label{eq:def:rc}
R_c = \int \text{d}\omega_s \text{d}\omega_i\, | \Psi(\omega_s,\omega_i)|^2.
\end{equation}
We will set the brightness unit by putting the multiplicative factor appearing in
\eq{eq:wf:intz} to be $|\mc{N}|=1$.
The second important property of photon pairs is their suitability for multiphoton interference experiments. When interfering photons from independent sources, their spectral amplitudes cannot carry any distinguishing information about the origin of the photons. This means that the biphoton wave function for each pair should be factorable. The degree of factorability can be quantified with the help of the
Schmidt decomposition, which for the normalized wave function
$\Psi(\omega_s,\omega_i)/\sqrt{R_c}$ takes the form \cite{law2000}:
\begin{equation}\label{eq:schmodt}
\frac{1}{\sqrt{R_c}} \Psi(\omega_s,\omega_i)=\sum_{n=0}^\infty
\sqrt{\varsigma_n} \phi_n^s(\omega_s)\phi_n^i(\omega_i).
\end{equation}
In the above expression, $\phi_n^s(\omega_s)$ and $\phi_n^i(\omega_i)$ are two orthonormal sets
of mode functions for the signal and the idler photons. The nonnegative parameters $\varsigma_n$ characterize the contribution of each pair of modes to the superposition. They satisfy the normalization constraint $\sum_{n=0}^{\infty}\varsigma_n = 1$ and it is convenient to put them in the decreasing order. Perfect factorability thus corresponds to the condition $\varsigma_0 =1$.
The degree of factorability can be quantified by the visibility of two-photon interference. Suppose that two heralded signal photons produced by identical sources are superposed on a 50:50 beamsplitter and the depth of the Hong-Ou-Mandel dip \cite{Hong1987} is measured. The depth is given by a nonnegative expression
\begin{equation}\label{eq:def:purity}
\mathcal{P}=\sum_{n=0}^\infty \varsigma_n^2.
\end{equation}
which will be called the \emph{purity parameter} of a photon pair.
In general $\mathcal{P}\le 1$ and the equality sign holds only for a factorable biphoton wave function. The purity parameter is the inverse of cooperativity parameter introduced in Ref.~\cite{Huang1993}.
Typically, photon pairs are spectrally filtered in order to improve their characteristics and
to lower the background count rates. The effects of spectral filtering can be taken into account by multiplying the two-photon wave function by spectral amplitude transmissions $\Lambda_\mu(\omega_\mu)$ characterizing the filters:
\begin{equation}\label{eq:subst:bi}
\Psi(\omega_s,\omega_i)\rightarrow {\Lambda_s(\omega_s)\Lambda_i(\omega_i)}\Psi(\omega_s,\omega_i)
\end{equation}
Note that the above substitution correctly takes into account the decrease in count rates resulting from spectral filtering. We will model spectral filters using gaussian profiles with respective widths $\sigma_s$ and
$\sigma_i$, assuming perfect transmission at the peak frequency $\omega_0$:
\begin{equation}\label{eq:subst:one}
\Lambda_\mu(\omega)=\exp\left(-\frac{(\omega-\omega_0)^2}{ 2 \sigma_\mu^2 }\right),\quad \mu=s,i
\end{equation}
It is worthwhile to stress that the spatial filtering imposed by SMFs and spectral filtering implemented with interference filters are of different nature. The SMFs perform coherent filtering at the field level, i.e.\ add field amplitudes, while spectral filters transmit independently each frequency component.
Before discussing characteristics of realistic sources, it is insightful to consider the
limit of perfect phase matching, based on an assumption that
$\Delta k_z(\kp{s},\omega_s;\kp{i},\omega_i) L/2 \approx 0$ over the relevant range of frequencies and wave vectors. This approximation means that we can put $\vec{D}_0= \vec{D}_1=\vec{D}_2=0$, which makes the integrand in \eqref{eq:epmf:N} independent of $z$ and leads to a very simple formula for the fiber-coupled biphoton wave function:
\begin{multline}\label{eq:wf:zero}
\Psi^{(0)}(\omega_s,\omega_i)= \\
4\mathcal{N}\sqrt[4]{\pi}\frac{ L \bar w^2 \sqrt{\tau_p}} {w_i w_p w_s}
\exp\left(-\frac{ n_o^2(\omega_0) \bar{w}^2 }{2c^2} (\omega_s\alpha_s-\omega_i\alpha_i)^2 \right)
\\
\times \exp \left(-\frac{\tau_p^2}{2}(\omega_s +\omega_i
-2\omega_0)^2\right)
\end{multline}
where by the superscript $(0)$ we indicated the approximation of perfect phase matching. We also took the refractive indices at the central frequency $n_o(\omega_s)\approx n_o(\omega_i)\approx n_o(\omega_0)$ and denoted
\begin{equation}
\bar{w}=\left(\frac{1}{w_s^2}+\frac{1}{w_i^2}+\frac{1}{w_p^2} \right)^{-1/2}.
\end{equation}
Let us note that the assumption $\Delta k_z L/2 \approx 0$ implies a specific geometry of the setup. First, it means that the pump, signal and idler beams maintain good spatial overlap through the entire length of the crystal. Secondly, the length $L$ of the crystal must be much shorter than the characteristic Rayleigh range of the beams.
The wave function given in \eqref{eq:wf:zero} is gaussian, which leads to closed analytical formulas for parameters of interest.
The brightness can be easily calculated to be equal to:
\begin{equation}\label{eq:zero:rc}
R_c^{(0)}= \frac{16 \pi^{3/2} c L^2}{n_0(\omega_0)(\alpha_s+\alpha_i)}\frac{\bar w^3}{ w_s^2 w_i^2 w_p^2}
\end{equation}
It is instructive to analyze the scaling of the pair production rate in the parameters involved. The quadratic dependence on the crystal length $L$ is a result of a coherent summation of the probability amplitudes of generating a photon pair over the entire range of $-L/2 \le z \le L/2$.
Assuming that the waists of the pump, signal, and idler beams are of the same order characterized by $w$, the pair production rate scales as $1/w^3$. This scaling can be interpreted as a result of an interplay of two effects. The first one is the dependence of the nonlinear process on the transverse spatial dimension of the interacting modes. Suppose that the modes are confined to a transverse area of the order of $w^2$. Then their normalization includes a factor $1/w$ for each of the modes. As the probability amplitude for pair generation involves an integral of a product of three mode functions over an area of size $w^2$, this gives its scaling as $1/w$. Squaring this result gives the probability of pair generation scaling as $1/w^2$. The second effect is the broadening of the spectrum of the produced photons with decreasing waists seen in the first exponent in \eqref{eq:wf:zero}, which yields an additional factor of $1/w$.
The expression calculated in \eqref{eq:zero:rc} enables us to optimize the pair production rate with respect to some parameters of the setup. For example, suppose that the waists $w_s$ and $w_i$ of the collection modes are fixed. An easy calculation shows that the maximum production rate is achieved for the pump beam waist $w_p$ given by:
\begin{equation}\label{eq:zero:optimalwaist}
w_p=\frac{w_s w_i}{\sqrt{2(w_s^2 +w_i^2)}}
\end{equation}
which reduces to $w_p=w_s/2$ for equal waists of collection modes. We will use this coupling strategy through the rest of the article. Note that in the case of a monochromatic pump, in crude approximation of perfect phase mismatch the condition for optimal brightness for short crystal lengths takes the form $w_p=w_s/\sqrt{2}$ \cite{Ling2008}.
As noted in Refs.~\cite{URen2003, URen2005}, in the approximation of perfect phase matching the condition
for spectral decorrelation within a photon pair is achieved when
\begin{equation}\label{eq:zero:dec}
\tau_p=\frac{w_p\, \alpha_s \alpha_i}{c}.
\end{equation}
A more general analytical condition can be derived using the gaussian approximation \cite{Dragan2004}. Within this model the biphoton wave function takes following form:
\begin{equation}\label{eq:c1}
\Psi^{(\text{G})}(\omega_s,\omega_i)=\sqrt{\frac{\tau_p}{\sqrt{\pi}}}\Gamma(\omega_s,\omega_i) e^{-f(\omega_s,\omega_i)-{\tau_p^2}(\omega_s +\omega_i -2\omega_0)^2/2}
\end{equation}
Taking $\Gamma(\omega_s,\omega_i)\approx\Gamma(\omega_0,\omega_0)$ and expanding $f(\omega_s,\omega_i)$ up to the second order in frequencies around $\omega_0$ yields a gaussian expression in detunings. Spectral decorrelation corresponds to the vanishing cross-term $(\omega_s-\omega_0)( \omega_i-\omega_0)$ in the exponent, which gives:
\begin{equation}\label{eq:tpdec}
\tau_p^2=\left.2\frac{ \partial^2 f (\omega_s,\omega_i)}{\partial\omega_s\partial\omega_i}\right|_{\omega_s=\omega_i=\omega_0}.
\end{equation}
More accurate models of the effective phase matching function in Eqs.~(\ref{eq:epmf:N}) and (\ref{eq:epmf:CGA})
do not yield a decorrelation condition in a closed analytical form.
\section{Comparison}\label{section:Comparison}
Let us now compare computational methods introduced in the preceding sections for typical experimental settings. In \figref{fig:epmf} we depict the effective phase matching function $\Theta(\omega_s,\omega_i)$ for two exemplary lengths of the nonlinear medium calculated using direct numerical integration, the paraxial approximation,
the cosine-gaussian approximation and the gaussian approximation. Calculations were carried out for a beta-barium borate crystal with its optical axis lying in the plane of the collected modes and cut at $\theta_c=30^\circ$ with respect to $z$ axis. This corresponds to the symmetric cone half-opening angle equal to $\alpha=2.2^\circ$ for frequency-degenerate photons at $780$~nm. The beam waists were set to rather low values $w_s=w_i=2w_p=70~\mu$m to test the applicability limits of the paraxial approximation.
As seen in \figref{fig:epmf}, the main qualitative difference between the computational methods is the reproduction of the side lobes. The impact of the side lobes on observable quantities depends on the spectral width of the pump pulse. If the spectral bandwidth is narrower than the width of the central peak, then all the models can be expected to yield similar results. Because the characteristic width of $\Theta(\omega_s,\omega_i)$ along the axis $\omega_s = \omega_i$ decreases with a longer crystal length,
this regime corresponds to sufficiently narrow spectral bandwidths and short crystals. When leaving this regime, CGA can be expected to yield more accurate results in the intermediate regime compared to GA, as it reproduces correctly the lobes closest to the central peak.
\begin{figure}
\caption{The effective phase matching function $\Theta(\omega_s,\omega_i)$ calculated using (a, b) direct numerical integration; (c, d) paraxial approximation; (e, f) cosine-gaussian approximation and (g, h) gaussian approximation for the crystal length
(a, c, e, g) $L=100~\mu$m and (b, d, f, h) $L=1$~mm. The pump and collecting beam waists were set to $w_s=w_i=2w_p=70~\mu$m. The angular frequencies $\omega_s$ and $\omega_i$ are labelled with the corresponding wavelengths.}
\label{fig:epmf}
\end{figure}
These predictions are confirmed by the calculation of the brightness $R_c$ as a function of the crystal length using different models, with the results shown in \figref{fig:comaprisonRCRS}. The full width at half maximum of the gaussian pump pulse was taken equal to $\tau_p^{\text{FWHM}}=\tau_p \sqrt{\ln 2} = 100$~fs.
The brightness has been calculated through two-dimensional numerical integration of $|\Psi(\omega_s,\omega_i)|^2$ over the signal and the idler frequencies on a $32 \times 32$ square grid centered at $\omega_0$ for the relevant frequency range where wave function is nonzero. We have found that the further increase of grid density to $64\times 64$ did not change the results noticeably. In the paraxial approximation, the effective phase matching function $\Theta^{(\text{P})}$ was evaluated at each point of the grid using Gauss-Kronrod quadrature with three-digit precision. Results based on numerical integration of $|\Psi(\omega_s,\omega_i)|^2$ involving CGA and GA expressions for the effective phase matching function have been labeled respectively as \emph{numerical CGA} and \emph{numerical GA}.
In addition, we present results of applying a further simplification to CGA and GA, labeled as \emph{analytical CGA} and \emph{analytical GA}. The simplification consists in expanding
the functions $f(\omega_s,\omega_i)$ and $g(\omega_s,\omega_i)$ that appear in \eqref{eq:epmf:CGA} around the central frequency $\omega_0$ up to the second order and replacing $\Gamma(\omega_s,\omega_i)$ by its value at
$\omega_s=\omega_i=\omega_0$. After this expansion the squared absolute value of the biphoton wave function becomes a sum of three gaussian components and the integration over the frequencies $\omega_s$ and $\omega_i$ can be carried out analytically.
\figref{fig:comaprisonRCRS} shows that for short crystals all the models give similar results. Furthermore, in this regime the brightness $R_c$ exhibits quadratic dependence on the crystal length, which agrees with \eqref{eq:zero:rc} derived under the assumption of perfect phase matching. As expected, with an increasing crystal length the GA model departs earlier from the numerical results than the CGA model.
\begin{figure}
\caption{(Color online) The source brightness $R_c$ calculated using different numerical methods, specified in the inset, as a function of the crystal length $L$, for the waists $w_s=w_i=2w_p=70~\mu$m and the pump pulse duration $\tau_p^{\text{FWHM}
\label{fig:comaprisonRCRS}
\end{figure}
A more thorough way to compare the paraxial approximation with direct numerical integration is to evaluate two quantities: the scalar product between the normalized biphoton wave functions $\Phi^{(\text{P})}$ and $\Phi^{(\text{D})}$ obtained using both methods and the ratio of the corresponding pair production rates $R_c^{(\text{P})}/R_c^{(\text{D})}$. We carried out these calculations in an unfavorable regime of a long crystal $L=2$~mm, ultrashort pump pulses $\tau_p^{\text{FWHM}}=20$~fs,
and strong focusing $w_s=w_i=2w_p=40\mu$m. We found that both the quantities differed from one by less than $10^{-3}$. It should be noted that the computational effort required by the paraxial approximation was reduced in our calculations by $\sim 10^4$ compared to the direct numerical integration.
Finally, let us analyze the coincidence count rate $R_c$ as a function of the pump beam waist $w_p$ and the fiber mode waists in a symmetric setup, when $w_s=w_i$. In \figref{fig:Comparizon:Method} we depict results obtained using the paraxial approximation for two exemplary lengths of the crystal. It is seen that for a fixed waist of the fiber modes the brightness has a well pronounced maximum in $w_p$. This maximum is located to a good approximation at $w_p=w_s/2$, which is in an agreement with the result derived within the elementary model of perfect phase matching in \eqref{eq:zero:optimalwaist}. This motivated the choice of $w_s=w_i=2w_p$ in the presented examples.
\begin{figure}
\caption{(Color online) The natural logarithm of the brightness $\ln R_c$ as a function of pump beam waist $w_p$ and fiber mode waists $w_s=w_i$ for the crystal length (a) $L=1$~mm and (b) $L=100~\mu$m.
The dashed (red online) lines depict the condition $w_p= w_s/2$ specified in \eqref{eq:zero:optimalwaist}
\label{fig:Comparizon:Method:L01}
\label{fig:Comparizon:Method:L2}
\label{fig:Comparizon:Method}
\end{figure}
\section{Spectrally uncorrelated pairs}\label{section:SpectrallyUncorrelatedPairs}
A necessary condition for high-visibility multiphoton interference is the lack of distinguishing information about the origin of the photons, which means that each the photon should be prepared in an identical pure wavepacket. The most obvious way to achieve this regime is to insert interference filters whose bandwidth is smaller than the characteristic scale of spectral correlations within photon pairs. An intriguing alternative has been presented in Ref.~\cite{URen2005} which proposed to remove spectral correlations by exploiting geometric effects in SPDC. The purity of the produced photons needs to be analyzed in conjunction with other characteristics of the source, such as the pair production rate. In this section we will employ our computation tools to compare properties of spectrally decorrelated pairs generated by different methods.
Let us first analyze the geometric approach of Ref.~\cite{URen2005}. The underlying physics can be understood intuitively by looking at the biphoton wave function in the perfect phase matching approximation given by \eqref{eq:wf:zero}. The spectral pump amplitude introduces anticorrelations between frequencies of the down-converted photons, while the pump beam waist and emission angles define the degree of positive correlations. By balancing these two effects one can obtain a factorable biphoton wave function. More generally, without the approximation of perfect phase matching, one needs to analyze correlations introduced by the function $\Theta(\omega_s,\omega_i)$ defined in \eqref{eq:def:theta} combined with the spectral pump amplitude. As the nonlinear medium we considered a BBO crystal in the same configuration as discussed in Sec.~\ref{section:Comparison}. As the basic tool, we chose the paraxial method developed in \secref{section:NumericalApproach} due to its high precision and computational effectiveness. In order to evaluate the purity parameter $\mathcal{P}$ measuring degree of spectral correlations, the approach presented by Law \emph{et al.}~\cite{law2000} was used. The method is based on the singular value decomposition of the matrix representation of the biphoton wave function $\Psi(\omega_s,\omega_i)$ on a sufficiently fine discrete grid. The normalized singular values are approximations of Schmidt coefficients $\varsigma_n$ and as such are used to evaluate purity parameter $\mathcal{P}$. We found it sufficient to take the grid $32\times32$. Further increase of the grid density did not make any noticeable difference.
\begin{figure}
\caption{Contour plots of (a, b) the purity parameter $\mathcal{P}
\label{fig:ksimap}
\end{figure}
In \figref{fig:ksimap}(a,b) we present the purity parameter for two typical lengths of the crystal as a function of the pulse duration $\tau_p^{\text{FWHM}}$ and the collecting mode waist $w_s$. We assumed that the waists of the fiber modes and pump beam are $w_s = w_i = 2w_p$, which is motivated by the results presented in \figref{fig:Comparizon:Method}. The contour plots exhibit a clear relation between $\tau_p^{\text{FWHM}}$ and $w_s$ that leads to minimized spectral correlations between photons. For a comparison, \figref{fig:ksimap}(a) and (b) depict also the purity condition derived in \eqref{eq:tpdec} using the GA model, as well as the predictions of the perfect phase matching approximation given in \eqref{eq:zero:dec}. It is seen that for the shorter crystal length $L=100$~$\mu$m the simple analytical formula of \eqref{eq:zero:dec} gives accurate results. This is because the spectral anticorrelations are predominantly defined by the pump bandwidth rather than the phase matching of the crystal. This is no longer valid for the length $L=1$~mm, where the effective bandwidth of the down-conversion process becomes strongly affected by the phase matching. These observations are consistent with results presented in \figref{fig:comaprisonRCRS}: for $L=100$~$\mu$m the pair production rate is accurately given by the perfect phase matching approximation, while for $L=1$~mm effects of finite phase matching bandwidth are clearly seen.
The relation between the collecting mode waist $w_s=w_i$ and the pump pulse duration $\tau_p^{\text{FWHM}}$ that leads to minimized spectral correlations gives us some flexibility to optimize the source with respect to other parameters. In \figref{fig:ksimap}(c,d) we present the source brightness $R_c$ as a function of $w_s$ and $\tau_p$. Note that in our calculations we constrain the pump beam waist by imposing $w_s=w_i=2w_p$. It is seen that $R_c$ can be increased by reducing the fiber mode waist $w_s$. As Figs.~\ref{fig:ksimap}(c) and \ref{fig:ksimap}(d) depict the pair production rate in the same units, we can compare the brightness for the two crystal lengths. Assuming that we have no restrictions on the pump pulse duration, a shorter crystal can produce more uncorrelated photon pairs. This is because for $L=1$~mm stronger spectral anticorrelations overwhelm the benefit of a longer nonlinear medium. However, in a realistic situation there is usually a technical minimum on the pump pulse duration. For concreteness, let us assume it to be
$\tau_p^{\text{FWHM}} = 100$~fs. An inspection of \figref{fig:ksimap} shows that under the condition of nearly ideal decorrelation defined by the value of the purity parameter $\mathcal{P}\approx 0.99$ higher brightness, approximately equal to $R_c \approx 0.046$, is obtained when the fiber mode waist is $w_s\simeq 1$~mm and the crystal length $L=1$~mm. We found that for even longer crystals decorrelation can be reached only using longer, less focused pump pulses, which lowers the source brightness.
These limitations raise the question whether a more efficient strategy may rely on collecting tightly focused modes and removing spectral correlations with interference filters. Let us consider the same pump pulse duration $\tau_p^{\text{FWHM}} = 100$~fs and crystal length $L=1$~mm as before, but tighten the fiber mode waists to $w_s=100~\mu$m.
The result is significantly increased brightness, but at the cost of introducing spectral correlations. The effects of inserting interference filters into such a setup are shown in \figref{fig:StrongFocus}, where we depict the brightness $R_c$ and
the purity parameter $\mathcal{P}$ as a function of the spectral filter bandwidth. It is seen that for the bandwidth $\sigma \approx 2.6$~nm the purity parameter reaches the value $\mathcal{P} \approx 0.99$, while the brightness is $R_c \approx 3.8$, which is significantly higher than before. Thus the benefit of increased brightness is retained despite spectral filtering.
\begin{figure}
\caption{(Color online) The brightness $R_c$ (dashed blue line, left vertical scale) and the purity parameter $\mathcal{P}
\label{fig:StrongFocus}
\end{figure}
In order to gain more insight into the trade-off between the source brightness and spectral correlations, we calculated the maximum filter bandwidth that gives the purity $\mathcal{P} \simeq 0.99$ for a range of pump beam waists $w_p$, while keeping other parameters of the setup identical as in previous examples. The results are shown in \figref{fig:ScanTpandWp}. It is seen that the filter bandwidth across the analyzed range does not deviate significantly from the value $\sigma \cong 2.7$~nm, while the brightness increases substantially with tighter focusing. This can be explained by the fact that the spectral filter bandwidth is defined by the requirement to remove frequency anticorrelations which depend primarily on the crystal length and the pump pulse duration rather than the beam waist.
\begin{figure}
\caption{(Color online) The brightness $R_c$ (solid red line, left vertical scale) as a function of the collected mode waist $w_s$ obtained for the maximum filter bandwidth (dashed blue line, right vertical scale) which yields the purity parameter above $\mathcal{P}
\label{fig:ScanTpandWp}
\end{figure}
\section{Conclusions}
In this paper we introduced and utilized approximate methods that alleviate the numerical load necessary to model SPDC sources while retaining the accuracy of the results in physically relevant regimes. Our approach was based on an observation that optical fibers collecting photons effectively define a relatively narrow range of wave vectors that needs to be included in calculations. This justified applying the paraxial approximation, which made a substantial portion of the problem tractable analytically and significantly reduced the remaining numerical effort. The paraxial approximation can be also combined with a simplification of the two-photon wave function to an analytically manageable form that led to closed formulas. We exploited these strategies to analyze performance parameters that characterize the usefulness of SPDC sources for quantum information applications, such as the pair production rate and the spectral purity parameter that is critical in multiphoton interference experiments involving multiple sources.
The choice of a computation method depends on the range of the setup parameters. The most difficult regime to deal with is that of very broadband, tightly focused pump pulses and long crystals. It is then necessary to include with high precision the phase matching function over a wide range of frequencies and transverse wave vectors. The most universal method is then direct numerical integration, which however requires tremendous computational effort. In practical situations, the paraxial approximation, delivers highly accurate results with significantly reduced numerical load for typical setup parameters. The validity of the paraxial approximation can be checked with a relatively low effort by comparing it with direct numerical integration only at the edges of the region of interest that correspond to most unfavorable cases. Such a confirmation allows one to apply the paraxial approximation throughout the entire region of interest reducing the overall computational cost. For examples studied in Sec.~\ref{section:SpectrallyUncorrelatedPairs}, the paraxial approximation has been verified to yield results that did not differ by more than few percent from direct numerical integration. In more restricted scenarios, one may consider using the cosine-gaussian approximation, which extends the validity of the previously used gaussian approximation. Results obtained with these methods can be used as a starting point for designing source characteristics with more elaborate and precise tools. We also discussed a crude approximation of perfect phase matching, which gives simple, qualitative insights into the roles played by various source parameters.
The numerical methods presented in this work can be used to analyze various aspects of down-conversion sources that are relevant to experimental implementations of quantum information processing protocols. We discussed here spectral decorrelation, which is a necessary condition to achieve high-visibility multiphoton interference between independent sources, in connection with the pair production rate. For exemplary settings
chosen for the analysis, we found that spectral filtering combined with tight focusing of the pump beam can deliver higher brightness than balancing the spectral correlations using the geometry of the setup. The paraxial approximation can be also extended to analyze properties of an individual photon generated in the down-conversion process, with traced out degrees of freedom of the conjugate photon. This approach has been successfully applied to model the results of a measurement of the single-photon density matrix in the spectral domain reported in Ref.~\cite{wasilewski2007}. Theoretical details of this work will be presented elsewhere \cite{Kolenderski2009a}. Furthermore, the single photon count rates allow us to calculate the heralding efficiency, defined as the ratio of the pair production rate to the count rate on the trigger detector. This is another important parameter characterizing the usefulness of down-conversion sources \cite{Mosley2008}, that can be efficiently submitted to numerical optimization using paraxial approximation. We aim to make this a subject of a separate publication.
The numerical results presented in this paper have been obtained using a Mathematica code which can be downloaded from \footnote{{http://www.fizyka.umk.pl/$\sim$kolenderski/}}.
\end{document}
|
\begin{document}
\title{Peaks and jumps reconstruction with B-splines scaling functions}
\author{Luis Ortiz-Gracia}
\address{Centre de Recerca Matem\`{a}tica,
Campus de Bellaterra, Edifici C,
08193 Bellaterra
(Barcelona), Spain}
\email{[email protected]}
\author{Josep J. Masdemont}
\address{Departament de Matem\`{a}tica Aplicada I,
Universitat Polit\`{e}cnica de Catalunya,
Diagonal 647,
08028 Barcelona, Spain}
\email{[email protected]}
\date{October 2012}
\begin{abstract}
We consider a methodology based in B-splines scaling functions to numerically
invert Fourier or Laplace transforms of functions in the space $L^2(\mathbb{R})$. The original function is approximated
by a finite combination of $j^{th}$ order B-splines basis functions and we provide
analytical expressions for the recovered coefficients. The methodology is particularly well
suited when the original function or its derivatives present peaks or jumps due to discontinuities
in the domain. We will show in the numerical experiments the robustness and accuracy of the method.
\end{abstract}
\maketitle
\renewcommand{References}{References}
\renewcommand{Figure}{Figure}
\section{Introduction}
Fourier and Laplace transforms are useful in a wide number of applications in
science and engineering. As it is well known, the Fourier transform is
closely related to the Laplace transform for zero value funcions on the
negative time axis.
There is a strong interest in the efficient numerical inversion of Laplace
transforms (\cite{Abate1996},\cite{Abate2000}) and Fourier transforms, due to
the fact that the solutions to some problems are known in the transform domain
rather than in the original domain. An increasing number of papers have recently
appeared to invert Fourier transforms with wavelets, like \cite{Gao2003} and
\cite{Gao2005} with coiflets wavelets and \cite{Greene2008} with Mexican,
Morlet, Poisson and Battle-Lemari\'e wavelets. In particular in the Financial
Engineering context, \cite{Haven2009} inverts a Laplace transform by means of
B-splines wavelets of order $1$.
Recently, a new method called the COS method developed in
\cite{Fang2008} for solving the inverse Fourier integral is
capable to accurately recover a function from its Fourier transform in a
short CPU time being as well of very easy implementation. However, when the
function to be recovered presents discontinuities or it is highly peaked, a
lot of terms in the expansion must be considered to reduce the approximation
error.
In this paper we present a novel approximation based on B-splines scaling
functions to numerically invert Fourier transforms that it is particularly
suited
for functions that exhibit peaks and/or jumps in its domain. There are some
properties that, at first glance, make these basis functions particularly
suited to approximate such non-smooth functions. B-splines are the
most regular scaling functions with the shortest support for a given
polynomial degree. Another important fact is the explicit formulation in the
time (or space) domain as well as in the frequency domain. However a slight
drawback of these basis functions is that the system that they form is
semi-orthogonal, i.e., the scaling functions are orthogonal among different
scales but not necessarily at the same scale.
In previous work \cite{Masdemont2011} the authors numerically inverted the Laplace transform of a distribution
function in the interval $[0,1]$ making use of the Haar scaling functions. Now, in the present paper,
we consider the problem of inverting the Fourier transform to recover an
$L^2(\mathbb{R})$ function by approximating it by a finite sum of B-splines
scaling functions of order $j$ (where $j=0$ is the particular case of the Haar
system). We also provide a list of the different errors accumulating
within the numerical procedure. So, here, the
Fourier inversion is carried out with B-splines scaling functions rather than with
wavelet functions (\cite{Greene2008}) or a combination of both
(\cite{Gao2003},\cite{Gao2005}). We fix the scale parameter in the wavelet expansion and only remaining is the translation
parameter, facilitating the inversion procedure.
Furthermore, we provide an analytical
expression for the coefficients of the approximation.
As will be shown in the section devoted to numerical examples, the Wavelet
Approximation (WA) that we present is well capable to detect jumps or peaks
produced by discontinuities in the function itself or in first derivatives.
On the contrary, the COS method is better to approximate analytical functions.
The paper is organized as follows. In Section 1.1 we give a short literature review regarding the Laplace transform inversion.
Section 2 gives a brief introduction
concerning multiresolution analysis and B-splines scaling functions. In Section 3 we present
the Wavelet Approximation method to recover functions from its Fourier
transform by means of B-splines scaling functions. Section 4 is devoted to the COS method, while numerical experiments, comparing the
Wavelet Approximation and the COS method are shown in Section 5.
Finally, Section 6 concludes.
\subsection{Laplace transform inversion}
Suppose that $f$ is a real- or complex-valued function of the variable $x>0$ and $s$ is a real or complex parameter. We define the \emph{Laplace transform} of $f$ as,
\begin{equation} \label{LT_definition}
\widetilde{f}(s)=\int_0^{+\infty} e^{-sx} f(x)dx=\lim_{\tau \rightarrow +\infty} \int_{0}^{\tau} e^{-sx}f(x)dx,
\end{equation}
whenever the limit exists (as a finite number).
We state the inverse transform as a theorem (see \cite{Dyke2001} for a detailed proof).
\begin{teor} \label{LT_inversion_teor}
(Bromwich inversion integral) If the Laplace transform of $f(x)$ exists, then,
\begin{equation} \label{LT_inversion_formula}
f(x)=\lim_{k \rightarrow +\infty} \frac{1}{2 \pi i} \int_{\sigma-ik}^{\sigma+ik} \widetilde{f}(s)e^{sx}ds, \quad x>0,
\end{equation}
where $\left |f(x) \right | \le e^{\Sigma x}$ for some positive real number $\Sigma$ and $\sigma$ is any other real number such that $\sigma > \Sigma$.
\end{teor}
The usual way of evaluating this integral is via the residues method taking a closed contour, often called the Bromwich contour.
In this section we present some numerical algorithms to invert the Laplace transform.
A natural starting point for the numerical inversion of Laplace transforms is the Bromwich inversion integral stated in
Theorem \ref{LT_inversion_teor}.
If we choose a specific contour and perform the change of variables $s=\sigma+iu$ in (\ref{LT_inversion_formula}), we obtain an integral of a
real valued function of a real variable. Then, after algebraic manipulation and after applying the Trapezoidal Rule we obtain,
\begin{equation} \label{LT_inversion_formula_real_trap}
f(x) \simeq f_h(x)=\frac{he^{\sigma x}}{\pi} \Re \left(\widetilde{f}(\sigma) \right )+ \frac{2he^{\sigma x}}{\pi} \sum_{k=1}^{+\infty} \Re \left(\widetilde{f}(\sigma+ikh) \right ) \cos(khx),
\end{equation}
where $\Re(z)$ denotes the real part of $z$.
A detailed analysis of the errors can be found in \cite{Abate2000}.
As pointed out in \cite{Abate2000}, the Bromwich inversion integral is not the
only inversion formula and there are quite different numerical inversion algorithms.
We refer the readers to the Laguerre series representation given in \cite{Abate1996}, which is known to be an efficient method for smooth functions.
However, if $f$ is not smooth at just one value of $x$, then the
Laguerre method has difficulties at any value of $x$.
In the context of numerical Laplace inversion, \cite{Gao2003} recovers the function $f$ with a procedure based on wavelets.
They consider $s=\beta+i\omega$
in expression (\ref{LT_inversion_formula}), where $\omega$ is a real variable and $\beta$ is a real constant
that fulfills $f(x)e^{-\beta x} \in L^2 \left ( \mathbb{R} \right)$,
assuming that $f(x)=0$ when $x<0$. Then, equation (\ref{LT_definition}) can be rewritten as,
\begin{equation*}
\widetilde{f}(\beta+i\omega)=\int_{-\infty}^{+\infty} e^{-\beta x}e^{-i \omega x}f(x)dx.
\end{equation*}
Defining,
\begin{equation} \label{cambio_fh}
h(x)=f(x)e^{-\beta x}, \; \text{then} \; \hat{h}(\omega)=\widetilde{f}(\beta+i \omega),
\end{equation}
where $\hat{h}$ denotes the Fourier transform of $h$.
The authors expand $\hat{h}(\omega)$ in terms of Coiflets wavelets,
\begin{equation} \label{hhat_expansion}
\hat{h}(\omega)=\sum_{k=-\infty}^{+\infty} c_{m,k} \phi_{m,k}(\omega) + \sum_{j=m}^{+\infty} \sum_{k=-\infty}^{+\infty} d_{j,k} \psi_{j,k}(\omega).
\end{equation}
where
$\phi_{m,k}(\omega)=2^{m/2}\phi(2^m \omega -k)$, $\psi_{j,k}(\omega)=2^{j/2}\psi(2^j \omega -k)$, being $\phi$ and $\psi$ the scaling and wavelet functions
respectively.
The next step consists in inverting the expression (\ref{hhat_expansion}) by means of the Fourier inversion formula.
Finally, considering the expression (\ref{cambio_fh}), the formulae of Laplace inversion become,
\begin{equation*}
\begin{split}
& f_m(x)=\frac{e^{\beta x}}{2^{m+1} \pi} \hat{\phi} \left (-\frac{x}{2^m} \right ) \sum_{k=-\infty}^{+\infty} \widetilde{f} \left (\beta+ i\frac{M_1+k}{2^m} \right ) e^{ixk/2^m}, \\
& f(x)=\lim_{m \rightarrow +\infty} f_m(x).
\end{split}
\end{equation*}
One drawback of this approximation is that the wavelet approach involves an infinite product of complex series and the computation of the Fourier transform of
some scaling functions. This can look intimidating for practical applications and may also take relatively long computational time.
Based on operational matrices and Haar wavelets, the author in \cite{Chen2001} presents a new method for performing numerical inversion of
the Laplace transform where only matrix multiplications and ordinary algebraic operations are involved. However,
the essential step in the method consists in
expressing the Laplace transform in terms of $\frac{1}{s}$, which is impossible when we just know numerically the transform.
Another drawback of
this method is that the matrices become very large for larger scales.
\section{Multiresolution analysis and cardinal B-splines}
A natural and convenient way to introduce wavelets is following the notion of
multiresolution analysis (MRA). Here we provide the basic definitions and
properties regarding MRA and B-spline wavelets, for further information
see \cite{Mallat1989, Chui1992}.
\begin{defi}
A countable set $\{f_n\}$ of a Hilbert space is a Riesz basis if every
element $f$ of the space can be uniquely written as $f=\sum_n c_nf_n$, and
there exist positive constants $A$ and $B$ such that,
\begin{equation*}
A\lVert f \rVert^2 \le \sum_n \left|c_n \right|^2 \le B\lVert f \rVert^2.
\end{equation*}
\end{defi}
\begin{defi}
A function $\phi \in L^2(\mathbb{R})$ is called a scaling function, if the
subspaces $V_m$ of $L^2(\mathbb{R})$, defined by,
\begin{equation*}
V_m=clos_{L^2(\mathbb{R})} \left\{ \phi_{m,k}: k\in \mathbb{Z} \right\},
\quad m \in \mathbb{Z},
\end{equation*}
where $\phi_{m,k}=2^{m/2}\phi(2^mx-k)$, satisfy the properties,
\begin{enumerate}[(i)]
\item $\cdots \subset V_{-1} \subset V_0 \subset V_1 \subset \cdots$.
\item $clos_{L^2} \left ( \bigcup_{m \in \mathbb{Z}} V_m \right )=L^2(\mathbb{R})$.
\item $\bigcap_{m \in \mathbb{Z}} V_m=\{0\}$.
\item For each $m$, $\{ \phi_{m,k}: k\in \mathbb{Z} \}$ is a Riesz
(or unconditional) basis of $V_m$.
\end{enumerate}
We also say that the scaling function $\phi$ generates a multiresolution
analysis $\{V_m\}$ of $L^2(\mathbb{R})$.
\end{defi}
The $j^\text{th}$ order cardinal $B$-spline function, $N_j(x)$,
is defined recursively by a convolution:
\begin{equation*}
N_j(x)=\int_{-\infty}^{\infty} N_{j-1}(x-t)N_0(t)dt=\int_0^1 N_{j-1}(x-t)dt, \quad j \ge 1,
\end{equation*}
where,
\begin{equation*}
N_0(x)=\chi_{[0,1)}(x)=\begin{cases} 1 & \text{if } x\in [0,1) \\ 0 & \text{otherwise.} \end{cases}
\end{equation*}
Alternatively,
\begin{equation*}
N_j(x)=\frac{x}{j}N_{j-1}(x)+\frac{j+1-x}{j}N_{j-1}(x-1), \quad j \ge 1.
\end{equation*}
We note that cardinal $B$-spline functions are compactly supported, since the
support of the $j^\text{th}$ order $B$-spline function $N_j$ is $[0,j+1]$,
and they have as the Fourier transform,
\begin{equation*}
\widehat{N_j}(w)=\left( \frac{1-e^{-iw}}{iw} \right)^{j+1}.
\end{equation*}
In this paper we consider $\phi^j=N_j$ as the scaling function which generates
a MRA (see Figure \ref{fig-B_splines_0}). Clearly, for $j=0$ we have the scaling function of the Haar wavelet
system. We also remark that from the previous discussions, for every function
$f_m \in V_m$, there exists a unique sequence
$\{ c_{m,k}^j \}_{k \in \mathbb{Z}} \in l^2(\mathbb{Z})$, such that,
\begin{equation*}
f_m(x)=\sum_{k \in \mathbb{Z}} c_{m,k}^j \phi_{m,k}^j(2^mx-k).
\end{equation*}
\begin{figure}
\caption{Cardinal B-splines of orders $j=0,1,2,3$.}
\label{fig-B_splines_0}
\end{figure}
\section{The wavelet approximation method}
Let us now consider a function $f \in L^2(\mathbb{R})$ and its Fourier
transform, whenever it exists:
\begin{equation*}
\widehat{f}(w)=\int_{-\infty}^{+\infty} e^{-iwx}f(x)dx.
\end{equation*}
Since $f \in L^2(\mathbb{R})$ we can expect that $f$ decays to zero, so it can be well approximated in a finite interval $[a,b]$ by,
\begin{equation}
f^c(x)=
\begin{cases}
f(x) & \text{if $x \in [a,b]$,} \\
0 & \text{otherwise.}
\end{cases}
\end{equation}
Let us approximate $f^c(x) \simeq f^c_{m,j}(x)$ for all $x \in [a,b]$, where,
\begin{equation}
f^c_{m,j}(x)=\sum_{k=0}^{(j+1)\cdot(2^m-1)} c_{m,k}^j \phi_{m,k}^j \left((j+1) \cdot \frac{x-a}{b-a} \right), \quad j \ge 0,
\end{equation}
with convergence in $L^2$-norm. Note that we are not considering the left and right boundary scaling functions (we refer
the reader to Section 3 in \cite{Maleknejad2010} for a detailed description of scaling functions on a bounded interval).
The main idea behind the Wavelet Approximation method is to approximate
$\widehat{f}$ by $\widehat{f}^c_{m,j}$ and then to compute the coefficients
$c_{m,k}^j$ by inverting the Fourier Transform. Proceeding this way,
\begin{equation*}
\begin{split}
\widehat{f}(w)&=\int_{-\infty}^{+\infty} e^{-iwx}f(x)dx \simeq \int_{-\infty}^{+\infty} e^{-iwx}f^c_{m,j}(x)dx \\
& =\sum_{k=0}^{(j+1)\cdot(2^m-1)} c_{m,k}^j \left( \int_{-\infty}^{+\infty} e^{-iwx} \phi_{m,k}^j\left((j+1)\cdot\frac{x-a}{b-a}\right)dx \right).
\end{split}
\end{equation*}
Introducing the change of variables $y=(j+1)\cdot \frac{x-a}{b-a}$, gives us,
\begin{equation*}
\begin{split}
\widehat{f}(w) &\simeq \frac{b-a}{j+1} \cdot e^{-iaw} \sum_{k=0}^{(j+1) \cdot (2^m-1)} c_{m,k}^j \int_{-\infty}^{+\infty} e^{-iw\frac{b-a}{j+1}y} \phi_{m,k}^j(y)dy \\
&= \frac{b-a}{j+1} \cdot e^{-iaw} \sum_{k=0}^{(j+1) \cdot (2^m-1)} c_{m,k}^j \widehat{\phi}_{m,k}^j \left(\frac{b-a}{j+1} \cdot w \right).
\end{split}
\end{equation*}
Finally, taking into account that $\widehat{\phi}_{m,k}^j(\xi)=2^{-\frac{m}{2}}\widehat{\phi^j}(\frac{\xi}{2^m})e^{-i\frac{k}{2^m}\xi}$ and
performing the change
of variables $z=e^{-i \frac{b-a}{2^m (j+1)}w}$, we have,
\begin{equation*}
\widehat{f}\left(\frac{2^m (j+1)}{b-a}i\cdot \log(z) \right) \simeq 2^{-\frac{m}{2}} \frac{b-a}{j+1} \cdot z^{\frac{2^m(j+1)a}{b-a}} \widehat{\phi^j} \left(i\cdot \log(z)\right) \sum_{k=0}^{(j+1) \cdot (2^m-1)} c_{m,k}^j z^k.
\end{equation*}
If we consider,
\begin{equation*}
P_{m,j}(z)=\sum_{k=0}^{(j+1) \cdot (2^m-1)} c_{m,k}^j z^k \quad \text{and} \quad Q_{m,j}(z)=\frac{2^{\frac{m}{2}} (j+1) z^{-\frac{2^m(j+1)a}{b-a}} \widehat{f}\left(\frac{2^m (j+1)}{b-a}i\cdot \log(z) \right)}{(b-a) \widehat{\phi^j} \left(i\cdot \log(z)\right)},
\end{equation*}
then, according to the previous formula, we have,
\begin{equation} \label{P_approx_Q}
P_{m,j}(z) \simeq Q_{m,j}(z).
\end{equation}
Since $P_{m,j}(z)$ is a polynomial, it is (in particular) analytic inside a disc of the complex plane
$\{z \in \mathbb{C}:\left|z\right|<r\}$ for $r>0$. We can obtain expressions for the
coefficients $c_{m,k}^j$ by means of the Cauchy's integral formula. This is,
\begin{align*}
c_{m,k}^j &= \frac{1}{2\pi i}\int_{\gamma}\frac{P_{m,j}(z)}{z^{k+1}}dz, \quad k=0,...,(j+1) \cdot (2^{m}-1),
\end{align*}
where $\gamma$ denotes a circle of radius $r$, $r>0$, about the origin.
Considering now the change of variables $z=re^{iu}$, $r>0$, gives us,
\begin{equation} \label{coefs_con_P}
\begin{split}
c_{m,k}^j & = \frac{1}{2\pi r^k}\int_{0}^{2\pi}\frac{P_{m,j}(re^{iu})}{e^{iku}}du
= \frac{1}{2\pi r^k}\int_{0}^{2\pi}\left[ \Re(P_{m,j}(re^{iu}))\cos(ku)+\Im(P_{m,j}(re^{iu}))\sin(ku) \right]du,
\end{split}
\end{equation}
where $k=0,...,(j+1) \cdot (2^{m}-1)$, and $\Re(z)$ and $\Im(z)$ stand for the real and imaginary parts of $z$,
respectively.
Note that if $k \ne0$ then we can further expand the expression above by considering,
\begin{equation} \label{coefs_con_P2}
c_{m,k}^j=\frac{2}{\pi r^k}\int_{0}^{\pi}\Re(P_{m,j}(re^{iu}))\cos(ku)du.
\end{equation}
On the other side, since $\widehat{\phi^j} \left(i\cdot \log(z)\right)=\left(\frac{z-1}{\log(z)}\right)^{j+1}$, we have,
\begin{equation}
Q_{m,j}(z)=\frac{2^{\frac{m}{2}} (j+1) z^{-\frac{2^m(j+1)a}{b-a}} \widehat{f}\left(\frac{2^m (j+1)}{b-a}i\cdot \log(z) \right) (\log(z))^{j+1}}{(b-a)(z-1)^{j+1}},
\end{equation}
and it has a pole at $z=1$.
Finally, making use of (\ref{P_approx_Q}) and taking into account the former
observation, we can exchange $P_{m,j}$ by $Q_{m,j}$ in (\ref{coefs_con_P}) and (\ref{coefs_con_P2}) to
obtain, respectively,
\begin{equation} \label{trapecios00}
c_{m,0}^j \simeq \frac{1}{2\pi}\int_{0}^{2\pi}\Re(Q_{m,j}(re^{iu}))du,
\end{equation}
and,
\begin{align} \label{trapecios0}
c_{m,k}^j &\simeq \frac{2}{\pi r^k}\int_{0}^{\pi}\Re(Q_{m,j}(re^{iu}))\cos(ku)du, \quad k=1,...,(j+1) \cdot (2^{m}-1),
\end{align}
where $r \neq 1$ is a positive real number.
In practice, both integrals in (\ref{trapecios00}) and (\ref{trapecios0}) are computed by means of the Trapezoidal Rule, and we can define,
\begin{align*}
I(k) &=\int_{0}^{\pi} \Re(Q_{m,j}(re^{iu}))\cos(ku)du, \\
I(k;h) &=\frac{h}{2} \left( Q_{m,j}(r)+(-1)^kQ_{m,j}(-r)+2 \sum_{j=1}^{M-1} \Re(Q_{m,j}(re^{ih_s}))\cos(kh_s) \right),
\end{align*}
where $h=\frac{\pi}{M}$ and $h_s=sh$ for all $s=0,\dots,M$. Proceeding this
way we find,
\begin{equation} \label{coeffs_calculation1}
\begin{split}
c_{m,k}^j &\simeq \frac{2}{\pi r^k} I(k) \simeq \frac{2}{\pi r^k} I(k;h) \\
&= \frac{1}{M r^k}\left( Q_{m,j}(r)+(-1)^kQ_{m,j}(-r)+2 \sum_{s=1}^{M-1} \Re(Q_{m,j}(re^{ih_s}))\cos(kh_s) \right),
\end{split}
\end{equation}
where $k=1,...,(j+1) \cdot (2^{m}-1)$.
Let us summarize four sources of error in our procedure to compute the
numerical Fourier transform inversion using cardinal B-splines wavelets. These
are:
\begin{enumerate}[(A)]
\item Truncation of the integration range,
\begin{equation*}
\mathcal{E}_1(x):=f(x)-f^c(x), \quad x \in \mathbb{R} \backslash [a,b].
\end{equation*}
\item The approximation error at scale $m$,
\begin{equation*}
\mathcal{E}_2(x):=f^c(x)-f^c_{m,j}(x), \quad x \in [a,b].
\end{equation*}
\item The discretization error, which results when approximating the integral $I(k)$ by $I(k;h)$ using the trapezoidal rule.
We can apply the formula for the error of the compound trapezoidal rule considering,
$$
q_{m,k}^j(u)=\Re(Q_{m,j}(re^{iu}))\cos(ku), \quad
\mathcal{E}_3:=I(k)-I(k;h),
$$
and assuming that $q^j_{m,k} \in C^2([0,\pi])$. Then,
\begin{equation} \label{error_typeC}
\left |\mathcal{E}_3 \right |= \frac{\pi^3}{12M^2} \left| (q_{m,k}^j(\mu))^{\prime \prime} \right|, \quad \mu \in (0,\pi).
\end{equation}
\item The roundoff error. If we can calculate the sum in expression (\ref{coeffs_calculation1}) with a precision of $10^{-\eta}$,
then the roundoff error after multiplying by a factor $\frac{1}{M r^k}$ is approximately $\mathcal{E}_4:=\frac{1}{M r^k} \cdot 10^{-\eta}$.
Then, the roundoff error increases when $r$ approaches to 0.
\end{enumerate}
\subsection{Choice of the parameter $r$}
As mentioned before, the choice of the parameter $r$ may have influence in both, the discretization and the roundoff errors.
In this section
we present a detailed analysis of the errors listed before in order to determine the optimum $r$ value.
To do this, let us consider $f_1(x)=\chi_{[1/2,1)}(x)$ a step function defined in $[0,1]$, where $\chi$ represents the indicator
function, and its Fourier transform $\widehat{f_1}(w)=\frac{e^{-iw/2}-e^{-iw}}{iw}$.
Due to the shape of $f_1$ it seems that
the best B-splines basis to perform the approximation is based in the Haar scaling
functions (B-splines of order $0$). In this particular case
$\Re(Q_{m,j}(re^{iu}))$ and its derivatives up to order $2$ can be computed
relatively straightforwardly, so that we will be able to calculate the optimal
value of the parameter $r$ in order to minimize the discretization and the
roundoff errors. We also demonstrate that the approximation error
(type (B)) is $0$ in this case. To do so, we consider,
\begin{equation*}
Q_{m,0}(re^{iu})=\frac{r^{2^m} \left( \cos(2^mu)+i\sin(2^mu) \right)-r^{2^{m-1}} \left( \cos(2^{m-1}u)+i\sin(2^{m-1}u) \right)}{2^{m/2}(r\cos(u)+ir\sin(u)-1)},
\end{equation*}
where $u \in [0,\pi]$, and,
\begin{equation*}
\begin{split}
&\Re(Q_{m,0}(re^{iu}))=\\
&=\frac{r^{2^m+1}\cos(2^mu-u)-r^{2^m}\cos(2^mu)-r^{2^{m-1}+1}\cos(2^{m-1}u-u)+r^{2^{m-1}}\cos(2^{m-1}u)}{2^{m/2}(r^2-2r\cos(u)+1)}.
\end{split}
\end{equation*}
Now, we must choose an appropriate $r$ value to control both the
discretization and the round-off errors. First of all, we consider the
discretization error which can be estimated by means of expression
(\ref{error_typeC}). We note that $q_{m,k}^0 \in C^2([0,\pi])$
since,
\begin{equation*}
0<(r-1)^2 \le r^2-2r\cos(u)+1 \le (r+1)^2, \quad \forall u \in [0,\pi], r \neq 1.
\end{equation*}
So we have,
\begin{equation*}
\begin{split}
\frac{d}{du}q_{m,k}^0(u)&=\frac{d}{du}\Re(Q_{m,0}(re^{iu}))\cos(ku)-k\Re(Q_{m,0}(re^{iu}))\sin(ku), \\
\frac{d^2}{du^2} q_{m,k}^0(u)&=\frac{d^2}{du^2}\Re(Q_{m,0}(re^{iu}))\cos(ku)-2k\frac{d}{du}\Re(Q_{m,0}(re^{iu}))\sin(ku)-k^2\Re(Q_{m,0}(re^{iu}))\cos(ku).
\end{split}
\end{equation*}
and,
\begin{equation} \label{ReQ_app}
\left |\Re(Q_{m,0}(re^{iu}) \right| \le \frac{r^{2^m+1}+r^{2^m}+r^{2^{m-1}+1}+r^{2^{m-1}}}{2^{m/2}(r-1)^2} \simeq r^{2^{m-1}}+o(r^{2^{m-1}}),
\end{equation}
where the last approximation holds for suitably small values of the parameter $r$.
For sake of simplicity, we consider only the terms with smaller exponents in the parameter $r$ for the expressions $\frac{d}{du}\Re(Q_{m,0}(re^{iu}))$ and
$\frac{d^2}{du^2}\Re(Q_{m,0}(re^{iu}))$. Then,
\begin{equation} \label{dReQ_app}
\left |\frac{d}{du}\Re(Q_{m,0}(re^{iu})) \right | \le \frac{2^{(m-2)/2}r^{2^{m-1}}+A(r)}{(r-1)^4} \simeq r^{2^{m-1}}+o(r^{2^{m-1}}),
\end{equation}
and,
\begin{equation} \label{ddReQ_app}
\left |\frac{d^2}{du^2}\Re(Q_{m,0}(re^{iu})) \right | \le \frac{2^{(3m-4)/2}r^{2^{m-1}}+B(r)}{(r-1)^8} \simeq r^{2^{m-1}}+o(r^{2^{m-1}}),
\end{equation}
where $A(r)$ and $B(r)$ are polynomials in $r$ with degree greater than $2^{m-1}$, and the approximations in (\ref{dReQ_app}) and (\ref{ddReQ_app})
hold for suitably small values of the parameter $r$. Finally, taking into account
expressions (\ref{error_typeC}),(\ref{ReQ_app}),(\ref{dReQ_app}) and (\ref{ddReQ_app}) we have,
\begin{equation*}
\left |\mathcal{E}_3 \right | \le \frac{\pi^3}{12M^2} \left (r^{2^{m-1}}+2kr^{2^{m-1}}+k^2r^{2^{m-1}} \right)+o(r^{2^{m-1}})
=\frac{\pi^3}{12M^2} \left (k^2+2k+1 \right)r^{2^{m-1}}+o(r^{2^{m-1}}).
\end{equation*}
We note that $ \left |\mathcal{E}_3 \right | \rightarrow 0$ as $r \searrow 0$ while the roundoff error is increasing in $r$ as $r$
approaches to zero.
Then,
the total error should be approximately minimized when the two estimates are equal. This leads to the equation,
\begin{equation*}
\frac{1}{M r^k} \cdot 10^{-\eta}=\frac{\pi^3}{12M^2} \left (k^2+2k+1 \right)r^{2^{m-1}}.
\end{equation*}
After algebraic manipulation, we find,
\begin{equation} \label{optimal_r}
r_{m,k}=\left( \frac{12M \cdot 10^{-\eta}}{\pi^3(k^2+2k+1)} \right)^\frac{1}{2^{m-1}+k}, \quad k=0,\dots,2^m-1.
\end{equation}
As mentioned before, the roundoff error arises when multiplying the sum in expression (\ref{coeffs_calculation1}) by the
pre-factor $(Mr^k)^{-1}$.
Let us consider $M=2^m$, then the $k$ of interest is $k=2^m-1$ which is the greatest value that this parameter can take
(small values of $k$ do not cause roundoff errors).
The left plot of Figure \ref{fig-prefactor} represents the pre-factor for values of $r \ge 0.9$, while the right plot shows
the pre-factor values for $r \ge 0.999$. We also display in Table \ref{parameter-r} the pre-factor values
$(Mr^k)^{-1}$ for different values of $r$ and scales $m=8$, $m=9$ and $m=10$.
\begin{figure}
\caption{Pre-factor $(Mr^k)^{-1}
\label{fig-prefactor}
\end{figure}
\begin{table}[ht]\footnotesize
\begin{center}
\begin{tabular}{|c| c | c | c|}
\hline
& \multicolumn{3}{|c|}{Scale} \\
\cline{2-4}
$r$ & $m=8$ & $m=9$ & $m=10$ \\
\hline\hline
$0.9000$ & $1.8194 \cdot 10^9$ & $4.7077 \cdot 10^{20}$ & $6.3040 \cdot 10^{43}$ \\
$0.9100$ & $1.0869 \cdot 10^8$ & $1.6618 \cdot 10^{18}$ & $7.7688 \cdot 10^{38}$ \\
$0.9200$ & $6.6968 \cdot 10^6$ & $6.2395 \cdot 10^{15}$ & $1.0833 \cdot 10^{34}$ \\
$0.9300$ & $4.2522 \cdot 10^5$ & $2.4885 \cdot 10^{13}$ & $1.7047 \cdot 10^{29}$ \\
$0.9400$ & $2.7807 \cdot 10^4$ & $1.0529 \cdot 10^{11}$ & $3.0193 \cdot 10^{24}$ \\
$0.9500$ & $1.8717 \cdot 10^3$ & $4.7203 \cdot 10^{8}$ & $6.0042 \cdot 10^{19}$ \\
$0.9600$ & $1.2960 \cdot 10^2$ & $2.2394 \cdot 10^{6}$ & $1.3373 \cdot 10^{15}$ \\
$0.9700$ & $9.2550 \cdot 10^{0}$ & $1.1230 \cdot 10^{4}$ & $3.3283 \cdot 10^{10}$ \\
$0.9800$ & $6.7470 \cdot 10^{-1}$ & $5.9457 \cdot 10^{1}$ & $9.2347 \cdot 10^{5}$ \\
$0.9900$ & $5.0674 \cdot 10^{-2}$ & $3.3201 \cdot 10^{-1}$ & $2.8503 \cdot 10^{1}$ \\
$0.9990$ & $5.0415 \cdot 10^{-3}$ & $3.2566 \cdot 10^{-3}$ & $2.7177 \cdot 10^{-3}$ \\
$0.9991$ & $4.9145 \cdot 10^{-3}$ & $3.0942 \cdot 10^{-3}$ & $2.4532 \cdot 10^{-3}$ \\
$0.9993$ & $4.6699 \cdot 10^{-3}$ & $2.7934 \cdot 10^{-3}$ & $1.9990 \cdot 10^{-3}$ \\
$0.9995$ & $4.4376 \cdot 10^{-3}$ & $2.5219 \cdot 10^{-3}$ & $1.6289 \cdot 10^{-3}$ \\
$0.9997$ & $4.2169 \cdot 10^{-3}$ & $2.2768 \cdot 10^{-3}$ & $1.3274 \cdot 10^{-3}$ \\
$0.9999$ & $4.0072 \cdot 10^{-3}$ & $2.0555 \cdot 10^{-3}$ & $1.0818 \cdot 10^{-3}$ \\
\hline
\end{tabular}
\end{center}
\caption{Pre-factor $(Mr^k)^{-1}$ for $M=2^m$, $k=2^m-1$ and scales $m=8$, $m=9$ and $m=10$.}\label{parameter-r} \centering
\end{table}
On the one hand, it is clear that we must concentrate in this second interval in order to get a reasonable roundoff error and
on the other hand, the discretization error grows when $r$ is close to $1$. Later, in the numerical examples section, we will confirm
the theory developed above for the step function.
Since $f_1$ is compactly supported, we have $\mathcal{E}_1=0$.
Note that in the case that $j=0$,
\begin{equation*}
f^c(x)=\sum_{k=0}^{2^m-1}c_{m,k}^0\phi_{m,k}^0 \left(\frac{x-a}{b-a}\right)+\sum_{l=m}^{+\infty}\sum_{k=0}^{2^l-1}d_{l,k}^0\psi_{l,k}^0 \left(\frac{x-a}{b-a}\right),
\end{equation*}
where,
$\{\phi_{m,k}^0\}_{k=0,\dots,2^m-1} \cup \{\psi_{l,k}^0\}_{l \ge m,k=0,\cdots,2^l-1}$ is the Haar basis (orthonormal) system in $L^2 \left([0,1]\right)$. Then,
\begin{equation} \label{error_aproximacion_Haar}
\left \| \mathcal{E}_2 \right \|_{L^2 \left( [a,b] \right)}^2=\left \| f^c-f_m^c \right \|_{L^2 \left( [a,b] \right)}^2 = \left \| \sum_{l=m}^{+\infty}\sum_{k=0}^{2^l-1}d_{l,k}^0\psi_{l,k}\left(\frac{x-a}{b-a}\right) \right \|_{L^2 \left( [a,b] \right)}^2
=(b-a) \cdot \sum_{l=m}^{+\infty}\sum_{k=0}^{2^l-1}|d_{l,k}^0|^2,
\end{equation}
since $\left \| \psi_{l,k}^0\left(\frac{x-a}{b-a}\right) \right \|_{L^2 \left( [a,b] \right)}^2=(b-a)\cdot \left \| \psi_{l,k}^0\right \|_{L^2 \left( [0,1] \right)}^2=b-a$. Then, the approximation error depends on the length of the interval $[a,b]$ and the detail coefficients,
\begin{equation} \label{detail_coeff}
d_{l,k}^0=\int_{\mathbb{R}} f^c(x) \cdot \psi_{l,k}(x)dx.
\end{equation}
Furthermore, since the detail coefficients (\ref{detail_coeff}) are zero, then we have also that $\mathcal{E}_2=0$ and the approximation
is exact at any scale level $m$.
\section{The COS method} \label{cos_method}
For completeness, we present here the methodology developed in \cite{Fang2008}
for solving the inverse Fourier integral\footnote{In order to maintain the
notation used by the authors, here \begin{equation} \label{FT_Oosterlee}
\xi(w)=\int_{\mathbb{R}} e^{iwx}f(x)dx, \end{equation} represents the
characteristic function, and hence the Fourier transform of a density function
$f(x)$.}.
The main idea is to reconstruct the whole integral from its Fourier-cosine
series expansion extracting the series coefficients directly from the
integrand. Fourier-cosine series expansions usually give an optimal
approximation of functions with a finite support \cite{Boyd1989}. In fact, the
cosine expansion of $f(x)$ in $x$ equals the Chebyshev series expansion of
$f(\cos^{-1}(t))$ in $t$.
For a function supported on $[0,\pi]$, the cosine expansion reads,\\
\begin{equation*}
f(\theta)=\frac{A_0}{2}+\sum_{k=1}^{+\infty} A_k \cos(k\theta),
\end{equation*}
with $A_k=\frac{2}{\pi} \int_0^{\pi}f(\theta)\cos(k\theta)d\theta$. For functions supported on any other finite interval $[a,b] \in \mathbb{R}$, the Fourier-cosine series expansion can easily be obtained via a change of variables,
\begin{equation*}
\theta:=\frac{x-a}{b-a}\pi, \quad x=\frac{b-a}{\pi}\theta+a.
\end{equation*}
It then reads,
\begin{equation*}
f(x)=\frac{A_0}{2}+\sum_{k=1}^{+\infty} A_k \cos(k \pi \frac{x-a}{b-a}),
\end{equation*}
with,
\begin{equation} \label{coeff_approximation}
A_k=\frac{2}{b-a}\int_a^b f(x) \cos(k \pi \frac{x-a}{b-a})dx.
\end{equation}
Since any real function has a cosine expansion when it is finitely supported, the derivation starts with a truncation of the infinite integration range in (\ref{FT_Oosterlee}). Due to the conditions for the existence of a Fourier transform, the integrands in (\ref{FT_Oosterlee}) have to decay to zero at $\pm \infty$ and we can truncate the integration range in a proper way without losing accuracy.
Suppose $[a,b] \in \mathbb{R}$ is chosen such that the truncated integral approximates the infinite counterpart very well, i.e.,
\begin{equation} \label{approximation_CF}
\xi_1(w):=\int_a^b e^{iwx}f(x)dx \simeq \int_{\mathbb{R}} e^{iwx}f(x)dx=\xi(w).
\end{equation}
Here, $\xi_1$ denotes a numerical approximation.
Comparing equation (\ref{approximation_CF}) with the cosine series coefficients of $f(x)$ on $[a,b]$ in (\ref{coeff_approximation}), we find that,
\begin{equation*}
A_k \equiv \frac{2}{b-a} \Re \left( \phi_1 \left( \frac{k \pi}{b-a} \right) e^{-i \frac{ka \pi}{b-a}} \right),
\end{equation*}
where $\Re$ denotes the real part of the argument. It then follows from (\ref{approximation_CF}) that $A_k \simeq F_k$ with,
\begin{equation*}
F_k \equiv \frac{2}{b-a} \Re \left( \phi \left( \frac{k \pi}{b-a} \right) e^{-i \frac{ka \pi}{b-a}} \right).
\end{equation*}
We now replace $A_k$ by $F_k$ in the series expansion of $f(x)$ on $[a,b]$, i.e.,
\begin{equation} \label{series_cos}
f_1(x)=\frac{A_0}{2}+\sum_{k=1}^{+\infty} F_k \cos(k \pi \frac{x-a}{b-a}),
\end{equation}
and truncate the series summation such that,
\begin{equation} \label{series_cos_trunc}
f_2(x)=\frac{A_0}{2}+\sum_{k=1}^{N-1} F_k \cos(k \pi \frac{x-a}{b-a}).
\end{equation}
The resulting error in $f_2(x)$ consists of two parts, a series truncation error from (\ref{series_cos})
to (\ref{series_cos_trunc}) and an error originated from the approximation of $A_k$ by $F_k$. An error analysis that takes
these different approximations into account is presented in \cite{Fang2008}.
The COS method performs well when approximating smooth functions, but many terms are needed in case that the function or
its first derivative has discontinuities along the domain of approximation.
\section{Numerical examples}
The aim here is to show the accuracy of B-splines to invert Fourier
transforms of extremely peaked or discontinuous functions with finite support.
We will consider the B-splines scaling functions of order $j=0,1,2$. We denote by WAi-j the Wavelet Approximation method with
B-splines of order $i$ at scale $j$, which involves $(2^j(i+1)-i)$ coefficients
and COS-N the COS approximation method with $N$ terms.
To compute the coefficients of the Fourier inversion in expression (\ref{coeffs_calculation1}), we must set the parameters.
For this purpose we consider $M=(j+1)\cdot2^m$, where $j$ is the order of the B-spline considered and $m$ is the scale parameter.
Observe that if we take $M=2k$ instead of $M=(j+1)\cdot2^m$ in (\ref{coeffs_calculation1}) this leads to the following expression,
\begin{equation}
\begin{split}
c_{m,k} & \simeq \frac{1}{2k r^k}\left( Q_{m,j}(r)+(-1)^kQ_{m,j}(-r)+2 \sum_{s=1}^{2k-1} \Re(Q_{m,j}(re^{is\frac{\pi}{2k}}))\cos\left(ks\frac{\pi}{2k}\right) \right) \\
&= \frac{1}{2k r^k}\left( Q_{m,j}(r)+(-1)^kQ_{m,j}(-r)+2 \sum_{s=1}^{k-1} (-1)^s \Re(Q_{m,j}(re^{is\frac{\pi}{2k}})) \right),
\end{split}
\end{equation}
for $k=1,...,(j+1)\cdot(2^{m}-1)$, so the computation time reduces for large scale approximations.
Finally, we set $r=0.9995$. Although we know that $r>0$ and $r \neq 1$,
we must take into account the two types of errors (C) (discretization)
and (D) (roundoff) listed previously, which may have
influence on the computation of the coefficients. Due to the fact that the
function $\Re(Q_{m,j}(re^{iu}))$ is in general intractable from an analytical
point of view, we carried out intensive simulations in order
to understand the influence of parameter $r$. We
did these simulations considering the test functions $f_1$ and $f_2(x)=e^{-\alpha |x|}, \alpha >0$ at
different scale levels and used B-splines scaling functions of order
$j=0,1,2$. To show just an example, we have plotted the results for $f_2,
\alpha=50$ and B-splines of order $1$ at scale $m=5$
(Figure \ref{fig-r_exp_sc5_a50}) and $m=9$ (Figure \ref{fig-r_exp_sc9_a50}).
The colors represent the magnitude of the logarithm of the absolute errors.
As we can observe, the absolute error remains constant for values
$|r-1| \le \epsilon$. When $|r-1| > \epsilon$, the error increases
for high values of $x$ (i.e. high values of the translation parameter $k$)
and decreases for low values of $x$ (i.e. low values of the translation
parameter $k$). It is worth mentioning that for high scales, the error grows
very rapidly when $|r-1| > \epsilon$ (empirical findings demonstrate that
$\epsilon \simeq 0.05$, for this reason we take approximately the midpoint
of the interval avoiding $r=1$), while this effect diminishes for shorter
scales. In fact, as we will see later with the step function, the roundoff error
almost disappears at very low scales (for instance $m=1$) and the
discretization error tends to zero when $r$ tends to zero. These facts
confirm the high impact of the roundoff error at high scale levels and
only the impact of the discretization error at very low scales.
\begin{figure}
\caption{Logarithm of the absolute error for the approximation of the function $f_2$ ($\alpha=50$) with B-splines of order $1$ at scale $m=5$.}
\label{fig-r_exp_sc5_a50}
\end{figure}
\begin{figure}
\caption{Logarithm of the absolute error for the approximation of the function $f_2$ ($\alpha=50$) with B-splines of order $1$ at scale $m=9$.}
\label{fig-r_exp_sc9_a50}
\end{figure}
For comparison, we gave in Section \ref{cos_method} a brief overview of
the COS method developed by Fang and Oosterlee (\cite{Fang2008}) that is
based on a Fourier-cosine series expansion and which usually gives optimal
approximations of functions with finite support (\cite{Boyd1989}).
The COS method is state of the art and has been applied to efficiently
recover density functions from their Fourier transforms in order to solve
important problems arising in Computational Finance.
Let us consider the step function presented before. Observe that if we take $m=1$, there is almost no roundoff error and the only
remaining error is the discretization error $\mathcal{E}_3$. If we assume that $\eta=16$, then according to (\ref{optimal_r})
$r_{1,0}=7.740368 \cdot 10^{-17}$, $r_{1,1}=4.398968 \cdot 10^{-9}$. The left plot of Figure \ref{fig-f3} shows the approximation to
the step function $f_1$ with the COS and the WA method with $r=0.9995$. The right plot of Figure \ref{fig-f3} represents the
absolute error of the approximation with the COS and the WA method with $r=0.9995$ and the optimum $r$ computed with expression (\ref{optimal_r}). Note that with just $2$ coefficients the WA method is capable to accurately approximate the step function, while the COS method suffers from the Gibbs phenomenon even if a lot of terms (we show up to $2048$) are added to the COS expansion.
Note that if we consider the approximation to the step function at scale $m=10$, then the values $r_{10,k}$ computed for
the parameter $r$ are all of them in a neighborhood of $0.95$ in accordance with the massive simulations performed before.
The conclusion in that case is that the COS method performs poorly around the jump and exhibits the Gibbs phenomenon, while Haar
wavelets are naturally capable to deal with this discontinuity.
\begin{figure}
\caption{Zoom of the approximation (left) and absolute error of the approximation (right) to the function $f_1$.}
\label{fig-f3}
\end{figure}
\subsection{Exponential function}
\label{ex1}
Let us consider the exponential function, $f_2(x)=e^{-\alpha |x|}, \alpha >0$ and its Fourier transform $\widehat{f_2}(w)=\frac{2\alpha}{\alpha^2+w^2}$.
Observe that this function becomes extremely peaked when $\alpha>>1$. We will test the inversion with $\alpha=50$ and $\alpha=500$.
We consider the interval $[a,b]=[-1,1]$
and $\alpha=50,500$. We carry out the
comparison between both methods using approximately the same number of terms
in the approximation. It is worth mentioning that the computation of wavelet
coefficients is more time consuming than the calculation of COS coefficients.
Moreover, the COS method is easier to implement. Results are reported in
Table~\ref{tabla-f1} and plotted in Figure~\ref{fig-f1}. We observe that
linear B-splines are the most suitable basis functions to approximate highly
peaked functions like the exponential we are considering. While adding
many more terms in the COS expansion improves only a bit the approximation,
when we consider B-splines of order $1$ at higher scales the approximation
error decays much more faster. The WA method with B-splines of order $1$
performs better than the WA method with B-splines of orders $0$ and $2$.
\begin{table}[ht]\footnotesize
\begin{center}
\begin{tabular}{|l| c c| c c|}
\hline \hline
\multirow{2}{*}{Method} & \multicolumn{2}{|c|}{$\alpha=50$} & \multicolumn{2}{|c|}{$\alpha=500$} \\
& $\min \log|\text{error}|$ & $\max \log|\text{error}|$ & $\min \log|\text{error}|$ & $\max \log|\text{error}|$ \\
\hline \hline
WA0-6 & $-2.928931$ & $-0.374819$ & $-2.523483$ & $-0.040001$ \\
WA1-5 & $-7.719564$ & $-0.857977$ & $-8.119854$ & $-0.083319$ \\
WA1-9 & $-12.704332$ & $-3.107169$ & $-9.290222$ & $-1.194033$ \\
WA2-4 & $-5.129268$ & $-0.367390$ & $-5.624545$ & $-0.036573$ \\
COS-64 & $-5.673413$ & $-0.526046$ & $-5.825399$ & $-0.057691$ \\
COS-128 & $-5.483920$ & $-0.805937$ & $-5.305269$ & $-0.120147$ \\
COS-256 & $-6.093322$ & $-1.102053$ & $-5.898013$ & $-0.244112$ \\
COS-512 & $-7.488787$ & $-1.402252$ & $-6.433619$ & $-0.450188$ \\
COS-1024 & $-7.454768$ & $-1.703285$ & $-6.500191$ & $-0.716606$ \\
\hline
\end{tabular}
\end{center}
\caption{Approximation errors to the function $f_2$ in the interval $[-1,1]$.}
\label{tabla-f1} \centering
\end{table}
\begin{figure}
\caption{Absolute error of the approximation to the function $f_2$ with $\alpha=50$ (top) and $\alpha=500$ (bottom).}
\label{fig-f1}
\end{figure}
In addition, we consider the following representative examples,
\subsection{B-spline basis function}
\label{ex3}
We consider the function $f_3(x)=2\phi_{1,0}^1(x)$ in the interval $[a,b]=[0,2]$.
We aim to recover the original function through the Wavelet Approximation inversion method. We apply also the COS method. Like in the previous example, the errors of type (A) and (B) do not have to be considered for this function, since $f_3$ is compactly supported and we carry out the approximation by means of B-splines of order $1$. However, errors of type (C) and (D) remain.
As before, we consider $r=0.9995$. The recovered coefficients are $c_{1,0}^1=2$, $c_{1,1}^1=-5.605010\cdot 10^{-8}$ and $c_{1,2}^1=4.645665\cdot 10^{-8}$, showing high accuracy in the approximation.
Figure \ref{fig-f4} shows a zoom of the approximation with COS method to the function $f_3$ in a neighborhood of $0.5$ with $64$ and $128$ terms, while the absolute error is plotted in the right part of the figure. As expected, the error grows up at the non-smooth part of the function, that is, near the points $x=0, x=0.5$ and $x=1$.
\begin{figure}
\caption{Zoom of the approximation (left) and absolute error of the approximation (right) to the function $f_3$.}
\label{fig-f4}
\end{figure}
\subsection{Combination of B-spline basis functions}
\label{ex4}
We consider the function $f_4(x)=\sum_{k=0}^6 e^{-k} \phi_{2,k}^1(x+1)$ in the interval $[a,b]=[-1,1]$, this is, a finite sum of basis B-splines of order one with coefficients which exhibit exponential decay. Again, the coefficients can be accurately recovered by the Wavelet Approximation method with B-splines of order one at scale $2$. The maximum absolute error is,
$$\max_{k=0\div6}|e^{k}-c_{2,k}^1|=2.681058\cdot10^{-8}.$$
Left plot in Figure \ref{fig-f5} represents the approximation to the function with the COS method, while right plot shows the absolute error of the approximation in log scale. As before, the conflictive points for the approximation are the points of non differentiability.
\begin{figure}
\caption{Approximation (left) and absolute error of the approximation (right) to the function $f_4$.}
\label{fig-f5}
\end{figure}
\subsection{Gaussian function}
\label{ex5}
Finally, let us consider the Gaussian function, $f_5(x)=\frac{1}{\sigma \sqrt{2\pi}}e^{-\frac{x^2}{2\sigma^2}}$ and its Fourier
transform $\widehat{f_5}(w)=e^{-\frac{w^2\sigma^2}{2}}$.
Since the function $f_5$ is analytic, the COS method performs much more better than the Wavelet Approximation. We can see the results in
Table \ref{tabla-f5}. As expected, quadratic B-splines behave better than Haar or linear B-Splines, since the maximum error decreases when
the order of the B-Spline increases.
Left plot in Figure \ref{fig-f6} shows the graph of the exponential function while the right plot represents the graph of the Gaussian
function. It is worth observing the relation between the continuity of the function or its derivatives and the order of the B-Spline
which fits better in the approximation. Haar wavelets for functions with a jump discontinuity, first order B-splines for funtions with
a jump discontinuity in the first derivative and finally, quadratic B-Splines for more regular functions.
\begin{table}[ht]\footnotesize
\begin{center}
\begin{tabular}{|l| c c|}
\hline \hline
\multirow{2}{*}{Method} & \multicolumn{2}{|c|}{$\sigma=0.1$} \\
& $\min \log|\text{error}|$ & $\max \log|\text{error}|$ \\
\hline \hline
WA0-6 & $-16.090813$ & $-0.428258$ \\
WA1-5 & $-11.687910$ & $-1.482131$ \\
WA2-4 & $-9.285967$ & $-2.450365$ \\
COS-32 & $-9.392864$ & $-5.390718$ \\
COS-64 & $-17.699992$ & $-7.530656$ \\
\hline
\end{tabular}
\end{center}
\caption{Approximation errors to the function $f_5$ in the interval $[-1,1]$.}
\label{tabla-f5} \centering
\end{table}
\begin{figure}
\caption{The exponential function $f_2$ with $\alpha=50$ (left) and the Gaussian funtion $f_5$ with $\sigma=0.1$ (right).}
\label{fig-f6}
\end{figure}
\section{Conclusions}
We have investigated a numerical procedure, the Wavelet Approximation method, to invert the Fourier transform of functions
with finite support by means of the B-splines scaling functions. First of all, we truncate the function in a sufficiently
wide interval and then we approximate it by a finite combination of B-splines wavelets up to order $2$.
Finally the function is recovered from its Fourier transform.
We have tested and compared the accuracy of this method versus the COS method for a set of heterogeneous functions in terms of the
continuity and smoothness.
We have considered a continuous function with a jump discontinuity in its first derivative and a function with a
jump discontinuity in its domain of definition. Additionally, combinations of B-splines of order one have been also considered as
the functions to be recovered. With these last two examples we are able to assess the discretization and roundoff errors of the
Wavelet Approximation method, since no errors of type (A) and (B) take place. Finally, we also have considered the Gaussian function.
COS method performs better with infinitely differentiable functions, like the Gaussian, due to the fact that the coefficients of
the expansion decay exponentially fast. On the contrary, WA method is more suitable for functions that exhibit peaks or discontinuities
along its domain. For the function with a jump discontinuity, B-splines of order $0$ (Haar) are better, while B-splines of order $1$
fit accurately the continuous function with a jump discontinuity in its first derivative. Furthermore, little improvement is
achieved for these two last functions, when adding
a lot of terms in the COS expansion.
Due to the fact that COS coefficients are easier to compute in terms of CPU time, hybrid methods involving COS and WA, or
simply combinations of the WA method at different scales and/or with different orders of the B-splines, could be investigated in the future.
B-splines are a semi-orthogonal system, so it would be also advisable to explore the orthogonal Daubechies or Battle-Lemari\'e scaling
functions. Furthermore, it is well known that standardized B-splines tend to the Gaussian function when the order tends to infinity,
so they could be very useful to recover Gaussian functions from its Fourier transform.
\end{document}
|
\begin{document}
\title{Dzyaloshinskii-Moriya interaction as an agent to free the bound entangled states}
\author{Kapil K. Sharma$^\ast$ and S. N. Pandey$^\dagger$ \\
\textit{Department of Physics, Motilal Nehru National Institute of Technology,\\ Allahabad 211004, India.} \\
E-mail: $^\[email protected], $^\[email protected]}
\textit{PACS Numbers: 03.65.Yz, 03.67.-a, 03.67.Lx (all)Manuscript No: 15594}
\begin{abstract}
In the present paper, we investigate the efficacy of Dzyaloshisnhkii-Moriya (DM) interaction to convert the bound entangled states into free entangled states. We consider the tripartite hybrid system as a pair of non interacting two qutrits initially prepared in bound entangled states and one auxiliary qubit. Here we consider two types of bound entangled states investigated by Horodecki. The auxiliary qubit interacts with any one of the qutrit of the pair through DM interaction. We show that by tuning the probability amplitude of auxiliary qubit and DM interaction strength one can free the bound entangled states, which can be further distilled. We use the reduction criterion to find the range of the parameters of probability amplitude of auxiliary qubit and DM interaction strength, for which the states are distillable. The realignment criterion and negativity have been used for detection and quantification of entanglement.
\end{abstract}
\maketitle
\section{Introduction}
Quantum entanglement \cite{EPR1935,Neilsen2000} is a physical phenomenon which takes place when particles interact in microscopic world in such a way that the quantum state of one particle can be described in terms of each other. The entanglement phenomenon is expected to be a useful resource for future quantum technologies. Many applications, based on entanglement, have been investigated, like quantum teleportation \cite{CBennet1933}, quantum imaging \cite{qi}, quantum game theory \cite{qs}, secure key quantum transmission \cite{AkEkert1991}, etc. To develop quantum technologies, based on entanglement, the quantum community needs long time entangled
quantum systems, which are free from noise. Quantum systems are too evasive as they may loose their entanglement by external environmental interactions and can go for entanglement sudden death (ESD) for finite time \cite{YuEberly2004,YuEberly2009}. So, dynamics of entanglement and its control under various environmental interactions conceptually underpinning the quantum information processing. Dynamics in various quantum spin chains have been studied under Dzyaloshisnhkii-Moriya (DM) interaction \cite{DMmoriya,Tmoriya2_1960}. DM interaction is a useful resource in quantum information processing to entangle and disentangle the quantum systems. Recently, Zang \textit{et al.} studied the entanglement dynamics of two qubit pair by taking an third qubit or qutrit, which interact with a qubit of the pair through DM interaction \cite{ZQiang1,ZQiang2,ZQiang3}. They studied the dynamics of entanglement by taking a third qubit as a controller qubit. Further, they studied the same by taking a third qutrit as a controller qutrit. They proposed that by manipulating the probability amplitude of third qubit or qutrit and DM interaction strength one can control the entanglement between two qubits induced by DM interaction and hence in various quantum spin chains \cite{Hisenberg}. At this point, we mention here that the same method may not only be used to control the entanglement, but it may also be used to free the bound entanglement \cite{Horodecki_11,Horodecki_12,Horodecki_21,Horodecki_22} in various bound entangled states. Once the bound entangled states are free, then they can be distilled \cite{Dist}.
The quantum states beyond the dimension $3\otimes3$ have been classified in two categories like free entangled states and bound entangled states. Free entangled states are distillable states or in other words noise free states. On the other hand, bound entangled states are noisy states and no pure entanglement can be obtained by local operations and classical communication. It is difficult to use bound entangled states directly for practical quantum information processing. However, by providing additional resource, bound entanglement can be activated to increase the fidelity of quantum teleportation \cite{Activation_1,Activation_2,Activation_3}. Up to now we don't have satisfactory tools to quantify and detecting the bound entanglement. Recently, the free entanglement production from bound entangled states is proposed by using the ancillary system which is coupled to the initial bound-entangled state via appropriate weak measurements \cite{Activation_4}.
In the present work, we consider qutrit-qutrit bound entangled bipartite states proposed by Horodecki \cite{Horodecki_11,Horodecki_12,Horodecki_21,Horodecki_22}. The main goal of the present work is to show that DM interaction can be a used as a useful agent to free the bound entangled state in two qutrit system. Once the states are free, further these can be distilled. We have used reduction criterion to check the distillability of these states. Using this criterion, one can ensure that the states are surely distillable. Further, we have used realignment criterion to detect the bound entanglement and negativity to measure the free entanglement. The motivation of this study comes from our recent works \cite{KK1,KK2,KK3,KK4}.
The plan of the paper is as follows. In Sect. 2, we present the Hamiltonian of the system. Sect 3. is devoted to the description of Horodecki’s bound entangled states and reduced system dynamics. In Sect. 4, we discuss the reduction criterion, realignment criterion and negativity. Sect 5. is devoted to time evolution of negativity and realignment criterion. Lastly in Sect. 6, we report our conclusion.
\begin{figure*}
\caption{Plot of entanglement and realignment criterion with $D = 0.0$. Green color graph represents negativity (N) and red color graph represents the realignement criterion (R).}
\label{1}
\end{figure*}
\section{Hamiltonian of the system}
We consider a qutrit (A)-qutrit (B) pair and an auxiliary qubit (C) which interact with any one of the qutrit of the pair through DM interaction. Here, we assume that the auxiliary qubit (C) interact with the qutrit (B) of the pair. Now the Hamiltonian of the system can be written as
\begin{equation}
H=H_{AB}+H_{BC}^{int}, \label{eq:17}
\end{equation}
where $H_{AB}$ is the Hamiltonian of qutrit (A) and qutrit (B) and $H_{BC}^{int}$ is the interaction Hamiltonian of qutrit (B) and qubit (C). We consider uncoupled qutrit (A) and qutrit (B), so $H_{AB}$ is zero. Now the Hamiltonian becomes
\begin{eqnarray}
H=H_{BC}^{int}=\vec{D}.(\vec{\sigma_B} \times \vec{\sigma_C}), \label{eq:18}
\end{eqnarray}
where $\vec{D}$ is DM interaction between qutrit (B) and qubit (C) and
$\vec{\sigma_B}$, $\vec{\sigma_C}$ are associated vectors of qutrit (B) and qubit (C) respectively.
We assume that DM interaction exist along the z-direction only. In this case, the Hamiltonian can be simplified as\\
\begin{equation}
H=D.(\sigma_B^X \otimes \sigma_C^Y-\sigma_B^Y \otimes \sigma_C^X), \label{eq:19} \\
\end{equation}
where $\sigma_B^X$ and $\sigma_B^Y$
are Gell-Mann matrices for qutrit (B) and $\sigma_C^X$ and $\sigma_C^Y$ are X and Y Pauli matrices of qubit (C) respectively. The above Hamiltonian is a matrix having $6\times 6$ dimension. Further, it is multiplied by the identity matrix of dimension three and we obtained the dimension as $18\times 18$. The matrix of Hamiltonian is easy to diagonalize by using the method of eigendecomposition. The unitary time evolution operator is easily commutable as
\begin{eqnarray}
U(t)=e^{-i H t}, \label{eq:20}
\end{eqnarray}
which is also a $18 \times 18$ matrix. This matrix has been used to obtain the time evolution of density matrix of the system.
\begin{figure*}
\caption{Plot of negativity $(N)$ and realignment criterion $(R)$ with the parameter values $c_{0}
\label{4}
\end{figure*}
\section{Horodecki's bound entangled states and reduced system dynamics}
In this section, we describe two known bipartite Horodecki's bound entangled states \cite{Horodecki_11,Horodecki_12,Horodecki_21,Horodecki_22} in $3 \otimes 3$ dimension. In subsection 3.1, we present the state 1 and in subsection 3.2 the state 2 is presented.
\subsection{State 1.} \label{s1}
The Horodecki's bound entangled state \cite{Horodecki_11,Horodecki_12} in $3\otimes 3$ dimension is given by
\begin{eqnarray}
\rho_{\alpha}(0)=\frac{2}{7}P+\frac{\alpha}{21}Q+\frac{(5-\alpha)}{21}R, \quad 2\leq \alpha \leq 5, \label{eq:b1}
\end{eqnarray}
where
\begin{align}
& P=|\psi\rangle\langle\psi|, \quad |\psi\rangle=\frac{1}{\sqrt{3}}(|00\rangle+|11\rangle+|22\rangle) \\
& Q=(|01\rangle\langle 01|+|12\rangle \langle 12|+|20\rangle\langle 20|), \\
& R=(|10\rangle\langle 10|+|21\rangle \langle 21|+|02\rangle\langle 02|). \label{eq:con}
\end{align}
Horodecki demonstrated that \\
\begin{center}
\begin{equation}
\rho_{\alpha}(0)=
\begin{cases}
\text{Separable for} \quad 2\leq \alpha \leq 3, \\
\text{Bound entangled for} \quad 3< \alpha \leq 4, \\
\text{Free entangled for} \quad 4< \alpha \leq 5.
\end{cases} \label{eq:condition}
\end{equation}
\end{center}
\subsection{State 2.} \label{case2}
Another well known bound entangled state investigated by Horodecki \cite{Horodecki_21,Horodecki_22} in $3 \otimes 3$ dimension is given as\\
\begin{center}
\begin{equation}
\varrho_\alpha={1 \over 8\alpha + 1}
\left[ \begin{array}{ccccccccc}
\alpha &0&0&0&\alpha &0&0&0& \alpha \\
0&\alpha& 0&0&0&0&0&0&0 \\
0&0&\alpha&0&0&0&0&0&0 \\
0&0&0&\alpha&0&0&0&0&0 \\
\alpha &0&0&0&\alpha&0&0&0& \alpha \\
0&0&0&0&0&\alpha&0&0&0 \\
0&0&0&0&0&0&{1+\alpha \over 2}&0&{\sqrt{1-\alpha^2} \over 2}\\
0&0&0&0&0&0&0&\alpha&0 \\
\alpha &0&0&0&a&0&{\sqrt{1-\alpha^2} \over 2}&0&{1+\alpha \over 2}\\
\end{array}
\right ], \ \ \
\end{equation} \label{b22}
\end{center}
here $0<\alpha<1$.
\subsection{Reduced system dynamics}
In this subsection, we obtain reduced density matrix of qutrit-qutrit system. To begin with, let us consider the auxiliary qubit (C) in pure state as
\begin{equation}
|\phi\rangle=c_0|0\rangle+c_1|1\rangle \label{eq:21}
\end{equation}
with normalization condition
\begin{equation}
|c_0|^2+|c_1|^2=1 \label{eq:normal}
\end{equation}
where the probability amplitudes $c_{0}$ and $c_{1}$ are complex in general. \\
Qutrit (A)-qutrit (B) is prepared initially in bound entangled state. The density matrix of bound entangled state before interaction with qubit (C) is given by (\ref{eq:b1}). Now we can obtain the composite density matrix of the open system, $\rho_{comp.}(0)$ as
\begin{equation}
\rho_{comp.}(0)=\rho_{\alpha}(0)\otimes \rho_c, \label{eq:25}
\end{equation}
where $\rho_c$ is the density matrix of the auxiliary qubit (C). The density matrix after the interaction at time $t$ is given by
\begin{equation}
\rho_{comp.}(t)=U(t)\rho_{comp.}(0)U^\dagger(t), \label{eq:26}
\end{equation}
where U(t) is unitary time evolution operator given by (\ref{eq:20}). Now we make the order same of all the matrices involved in equation (14) as $18\times 18$. This can be done by doing the tensor product of $\rho_{comp.}(0)$ with identity matrix of the order $2\times 2$. Now the order of the matrix $\rho_{com.}(t)$ becomes as $18\times 18$. Taking partial trace of $\rho_{com.}(t)$ over the basis of auxiliary qubit (C), we get the reduced density matrix $\rho^{AB}$ of $9\times 9$ dimension as,
\begin{equation}
\rho^{AB}=Ptr_c[\rho_{comp.}(t)]. \label{eq:reduce_density}
\end{equation}
Now we obtain the reduced density matrix for bound entangled state proposed by the Horodecki given by Eq. (\ref{eq:b1}). The reduced density matrix is given below
\begin{center}
\begin{equation}
\rho^{AB}=\left[\begin{array}{ccccccccc}
X_{11} & 0 & X_{13} & 0 & X_{15} & 0 & X_{17} & 0 & X_{19}\\
0 & X_{22} & 0 & X_{24} & 0 & 0 & 0 & 0 & 0 \\
X_{31} & 0 & X_{33} & 0 & X_{35} & 0 & 0 & 0 & X_{39} \\
0 & X_{42} & 0 & X_{44} & 0 & 0 & 0 & 0 & 0 \\
X_{51} & 0 & X_{53} & 0 & X_{55} & 0 & X_{57} & 0 & X_{59} \\
0 & 0 & 0 & 0 & 0 & X_{66} & 0 & X_{68} & 0 \\
X_{71} & 0 & 0 & 0 & X_{75} & 0 & X_{77} & 0 & X_{79} \\
0 & 0 & 0 & 0 & 0 & X_{86} & 0 & X_{88} & 0 \\
X_{91} & 0 & X_{93} & 0 & X_{95} & 0 & X_{97} & 0 & X_{99} \end{array}
\right ], \nonumber
\end{equation}
\end{center}
where
\begin{align}
&X_{11}=X_{19}=X_{91}=X_{99}=\frac{2}{21}, &&X_{13}=-X_{17}=X_{31}=-X_{39}=X_{71}=X_{79} \nonumber \\ && &=X_{93}=-X_{97}=\frac{2}{21}p, \nonumber \\
&X_{15}=X_{51}=X_{59}=X_{95}=\frac{2}{21}q, &&X_{22}=\frac{1}{21}[\alpha c_{0}^2-(\alpha -5)\frac{p^2}{c_{1}^{2}}+\alpha c_{1}^{2} q^2] , \nonumber \\
& X_{24}=X_{42}=-\frac{1}{21}[\alpha+(-5+\alpha)q]p, &&X_{33}=\frac{(7-\alpha)}{42}-(\alpha -3)\frac{s}{c_{0} c_{1}}-2(\alpha -5)c_{1}^{2}, \nonumber \\
&X_{35}=X_{53}=(-3+\alpha)r, &&X_{44}=-\frac{1}{21}(\alpha -5)c_{0}^{2}+\frac{(5-2\alpha)s c_{1}}{c_{0}}+\frac{5}{42}\nonumber \\
&X_{55}=\frac{1}{42}[(\alpha+2)c_{0}^{2}-(\alpha-7)c_{1}^{2}]-s, && X_{57}=X_{75}=(-2+\alpha)r, \nonumber \\
&X_{66}=\frac{5 c_{0}^{2}}{42}+\frac{(2\alpha-5)s c_{0}}{c_{1}}+\frac{1}{21}\alpha c_{1}^{2}, &&X_{68}=X_{86}=-\frac{1}{21}[-5+\alpha+\alpha q]p, \nonumber \\
&X_{77}=\frac{1}{21}\alpha c_{0}^{2}+[\frac{(\alpha+2)}{42} c_{1}^{2}+\frac{(\alpha-2)s c_{1}}{c_{0}}] , &&X_{88}=\frac{1}{21}[5c_{1}^2-((-5+\alpha)c_{0}^2+\alpha c_{1}^2)q^2] \nonumber \\ \label{reduced}
\end{align}
and \\
$p=\sin[\sqrt{2}Dt]c_{0}c_{1}$, $q=\cos[\sqrt{2}Dt]$, $r=\frac{1}{42}\sin[2\sqrt{2} Dt]c_{0}c_{1}$, $s=\frac{1}{42}\cos[2\sqrt{2} Dt]c_{0}c_{1}$. \\
Observing the reduced density matrix, we conclude that $c_{0}$, $D$ and $t$ are the key parameters which influence the entanglement between two qutrit pair. Similarly, we obtain the reduced density matrix corresponding to another bound entangled state given in subsection \ref{b22} with the help of Eq. (\ref{eq:reduce_density}).
\begin{figure*}
\caption{Plot of eigenvalues (Eig.) of reduction criterion with the parameters range $0\leq Dt \leq 5$ and $0 \leq \alpha \leq 1$.}
\label{2}
\end{figure*}
\begin{figure*}
\caption{Contour plot of Fig 2, the state is distillable with the parameters range $1.59 \leq Dt \leq$ and $0 \leq \alpha \leq 3.439$}
\label{3}
\end{figure*}
\section{Reduction, realignment criterion and negativity}
In this section, we discuss the reduction criterion and its ability to detect the separability and distillability of bipartite states. For the separability of a composite system AB, the following conditions should be satisfied
\begin{eqnarray}
\rho_{A}\otimes I-\rho \geq 0 \quad \text{and} \quad
I \otimes \rho_{B}-\rho \geq 0, \label{r1}
\end{eqnarray}
where $\rho_{A}=Tr_{B}(\rho)$, $\rho_{B}=Tr_{A}(\rho)$ and $\rho$ is the composite density matrix of the system AB. The states which violates the condition (\ref{r1}) can be distilled. So, for distillability the following condition should be necessarily satisfied \cite{Red}.
\begin{eqnarray}
\rho_{A}\otimes I-\rho \leq 0 \quad \text{or} \quad I\otimes \rho_{B}-\rho \leq 0. \label{r2}
\end{eqnarray}
Further, we obtain the partial transpose and realigned matrix of the reduced density matrices to obtain the negativity and realignment by using the following matrices
\begin{eqnarray}
(\rho_{ij,kl}^{T})=\rho_{il,kj} \label{t}\\
(\rho_{ij,kl}^{R})=\rho_{ik,jl}. \label{r}
\end{eqnarray}
These matrices have been used to calculate the quantities defined as
\begin{align}
&N=\frac{||\rho^{T}||-1}{2}, &&R=\frac{||\rho^{R}||-1}{2}, \label{formulae}
\end{align}
where $||.. ||$ is the trace norm of the matrix. The first quantity corresponds to negativity \cite{locc,negativity,Perse1996,A.Peres,schidmt} and it has been used to measure the free entanglement while the second one has been used to detect the bound entanglement in the qutrit-qutrit system. Either $N>0$ or $R>0$ implies that the state is entangled. $N=0$ and $R>0$ implies that the state is PPT bound entangled, and $N>0$ corresponds to free entangled state. However, there is no evidence for the existence of NPT bound entangled states \cite{NPT} as yet, but we can not avoid this future possibility.
\section{Time evolution of negativity and realignment criterion}
\label{dist}
In this section, first we verify the calculation of reduced density matrices for $(Dt=0)$ for state 1. The reduced density matrix of two qutrits corresponding to state 1 is given by Eq. (\ref{reduced}).
We put the values of the parameters $Dt$ and $c_{0}$ as zero in reduced density matrix then reduced density matrix maps to the initial state of two qutrits given by Eq. (\ref{eq:b1}), which verify the correctness of reduced density matrix obtained in calculation. Further, we plot the negativity and realignment criterion and the result is shown in Fig. \ref{1}. The green color graph represent the negativity (N), while the red color graph represents the realignment criteria (R) in the figure. The realignment criterion for $2\leq \alpha \leq 3$ is negative, so it shows that the state is separable. For $3< \alpha \leq 4$ the realignment criterion is positive, which show that the state is bound entangled. Further, for $4< \alpha \leq 5$ the negativity graph is positive, which shows that the state is free entangled. These results match with the result given by Eq. (\ref{eq:condition}).
Similarly, we verify the reduced density matrix obtained for the state 2 by putting the parameter values $Dt=0$ and $c_{0}=0$. The reduced density matrix maps to the initial density matrix of the state 2 given in subsection \ref{b22}, which proves that the reduced density matrix obtained is correct. Now, we plot the negativity and realignment criterion both in the absence of DM interaction in Fig. \ref{4}. The realignment criterion achieves positive values for $0\leq \alpha \leq 1$ and negativity is zero. These results show that initially the state 2 of two qutrits is bound entangled with $0\leq \alpha \leq 1$.
\begin{figure*}
\caption{Plot of negativity (N) and realignment criterion (R) with the parameter range $c_{0}
\label{5}
\end{figure*}
Further, in this section we detect the distillability of the Horodecki bound entangled states 1 and 2, by using the reduction criterion with the reduced density matrices of two qutrits in the presence of DM interaction. Next, we study the evolution of free entanglement and realignment criterion obtained by using Eq. (\ref{formulae}) for the bound entangled states 1 and 2 in these two cases. In case 1, we consider the bound entangled state described under the subsection 3.1 and in case 2, we do the same for another bound entangled state given in subsection 3.2.
\subsection{Case 1}
In this case, we consider the state 1 given in subsection 3.1. Here, we replace $c_{1}^{2}$ in Eq. (\ref{formulae}) in terms of $c_{0}^{2}$ by using the normalization condition given in Eq. (\ref{eq:normal}). Next we check the distillability of the state by using the reduction criterion give in Eq. (\ref{r2}). We found that the matrices in reduction criterion, i.e., $(\rho_{A}\otimes I-\rho)$ or $(I\otimes \rho_{B}-\rho)$, incorporate the parameters $Dt$, $c_{0}$ and $\alpha$. Further, the eigenvalues of these matrices have been calculated by taking the range of the parameters as ($3\leq \alpha \leq 4$, $0\leq Dt \leq 5$) with the varying value of the parameter $c_{0}$. With the range ($0\leq c_{0}\leq 1$). It is found that the eigenvalues of these matrices achieve positive values. These eigenvalues are obtained with the maximum values of $c_{0}$ (i.e. $c_{0}=1$), it is found that all eigenvalues are positive, so the reduction criterion given in Eq. (\ref{r2}) fails and state 1 is not distillable.
\begin{figure*}
\caption{Plot of negativity (N) and realignment criterion (R) with the parameter range $c_{0}
\label{6}
\end{figure*}
\begin{figure*}
\caption{Plot of negativity (N) and realignment criterion (R) with the parameter range $c_{0}
\label{7}
\end{figure*}
\subsection{Case 2}
In this case, we consider the state 2 given in subsection 3.2. Here, we again replace $c_{1}^{2}$ in Eq. (\ref{formulae}) in terms of $c_{0}^{2}$ by using the normalization condition given in Eq. (\ref{eq:normal}). Further, we check the distillability of the state by using the reduced density matrix corresponding to this state in the presence of DM interaction. The distillability of the state is detected by using reduction criterion given by Eq. (\ref{r2}). The matrices involved in left hand side of reduction criterion i.e., $(\rho_{A}\otimes I-\rho)$ or $(I\otimes \rho_{B}-\rho)$, incorporate the parameters $Dt$, $c_{0}$ and $\alpha$. The eigenvalues of these matrices have been calculated by taking the range of the parameters as ($3\leq \alpha \leq 4$, $0\leq Dt \leq 5$) with the varying values of parameter $c_{0}$ in the range ($0\leq c_{0} \leq 1$). It is found that the eigenvalues of these matrices achieve negative values and hence the reduction criterion given by Eq. (\ref{r2}) is satisfied for state 2. Our goal is to find out the maximum range of the parameters $Dt$ and $\alpha$ for which the eigenvalues are negative. By simulation, we found these maximum ranges corresponding to the parameter values $c_{0}=0.7$. These eigenvalues are plotted in Fig. \ref{2} with the parameters range $0\leq Dt\leq 5$ and $0\leq \alpha\leq 1.0$ corresponding to the parameter value $c_{0}=0.7$. The corresponding contour plot of the Fig. \ref{2} is depicted in Fig. \ref{3}.
With the help of this contour plot, we obtained the maximum ranges of the parameters $Dt$ and $\alpha$ for which the state 2 is distillable. These ranges are $1.59\leq Dt\leq 3.75$ and $0\leq \alpha \leq 3.439$. The distillability of the state proves that the states are free entangled. Only free entangled state can be distilled in Bell like pairs, which may be used, further, for quantum information applications. So, based on the parameters ranges $1.59\leq Dt\leq 3.755$ and $0\leq \alpha \leq 3.439$, we plot the negativity (N) and realignment criterion (R) obtained from Eq. (\ref{formulae}) with different values of the parameter $c_{0}$ in Figs. \ref{5}, \ref{6} and \ref{7}. First, we fix the value of the parameter $c_{0}=0.2$ and vary the value of
$Dt$ within the range $1.5 \leq Dt \leq 3.7$ and for $\alpha$ with $0\leq \alpha \leq 3.439$ and plot the results in Fig. \ref{5}. Observing Fig. \ref{5}, it is concluded that with $\alpha=0$ the realignment criterion achieves the positive values, so initially the state is bound entangled, but as the value of $\alpha$ advances the states become free. So these states can be easily distillable. As the value of the parameter $Dt$ increases, the initial amplitudes of both realignment and negativity fluctuate between $0.04$ and $0.05$. At a particular value of $\alpha=0.02$, the realignment criterion becomes negative, so it fails to detect the bound entanglement. But corresponding to negative realignment criterion, we can not avoid the possibility of NPT bound entangled states. We observe that for the parameter values, $c_{0}=0.2, Dt=3.0$ the free entanglement in the states vanish after $\alpha=0.12$.
Next, we increase the value of the parameter $c_{0}$ as $0.4$ and sketch the graphs between $Dt$ and $\alpha$ in Fig. \ref{6}. We found that as the value of parameter $c_{0}$ increases from $0.2$ to $0.4$, the initial amplitudes of both realignment criterion and negativity increases and fluctuates within the limits $0.08$ to $0.10$. Initially the states are bound entangled but as the value of the parameter $Dt$ increases the states become free and hence can be distilled. Again, we found that for some values of $\alpha$, the realignment criterion fails to detect the bound entangled states. These values are $(\alpha=0.07,0.04,0.08,0.06)$. After these values the realignment criterion becomes negative and the possibility of NPT bound entangled states can not be avoided.
Further, we continue our study for $c_{0}=0.7$ and the results are shown in Fig. \ref{7}. Initially the states are bound entangled as the realignment criterion is positive for $\alpha=0$. As the value of parameter $\alpha$ advances, the states become free and hence distillable. The amplitudes of both negativity and
realignment criterion fluctuate between the range $0.12$ to $0.20$. We have found the situation when the states becomes totally free and fully distillable. These parameter values are $(c_{0}=0.7, Dt=1.5)$, $(c_{0}=0.7, Dt=2.5)$ and $(c_{0}=0.7, Dt=2.8)$. However, we detect the values of the parameter $\alpha$ where the realignment criterion becomes negative, these values are $\alpha=0.07, 0.24, 0.14$. After these values of $\alpha$, the realignment criterion fails and corresponding to negative portion of realignment criterion possibility of NPT bound entangled states can not be avoided.
\section{Conclusion}
In this article, we presented a method to distill the bound entangled states by using DM interaction. We consider two qutrits initially prepared in Horodecki bound entangled states and one auxiliary qubit. The auxiliary qubit is prepared in pure state, which interact with any one of the qutrit through DM interaction. By varying the probability amplitude of the auxiliary qubit and DM interaction strength the states can be free and further can be distilled. We have used the reduction criterion, which is necessary condition for distillability, later the maximum values of the parameters of $c_{0}$ and $Dt$ has been obtained for which the the states are distillable. The realignment criterion is used to detect the bound entangled nature of the states and negativity is used to measure the free entanglement in the states. By varying the values of the parameters $c_{0}$, $\alpha$ and $Dt$, the Horodecki bound entangled states can be converted in free entangled states. Free entangled states are easily distillable. We have found that DM interaction can be used to free the Horodecki bound entangled state 2. With certain parameter values of $c_{0}$ and $Dt$, it is completely free and hence distillable. These values are $(c_{0}=0.7, Dt=1.5)$, $(c_{0}=0.7, Dt=2.5)$ and $(c_{0}=0.7, Dt=2.8)$. As the value of the parameter $c_{0}$ increases the initial amplitudes of both negativity and realignment criterion increases. We also found that realignment criterion fails at particular values of $\alpha$ along with the values of $c_{0}$ and $Dt$. So, corresponding to the negative portion of the realignment criterion the possibility of NPT bound entangled states \cite{NPT} exists. We hope that this method of converting bound to free entanglement can be useful in quantum information processing and varieties of bound entangled states can be checked under DM interaction.
\section*{Acknowledgments}
We would like to thank the anonymous reviewers for going through the manuscript very carefully and suggesting many changes which have greatly enhanced the clarity and presentation of the results.
\end{document}
|
\begin{document}
\preprint{APS/123-QED}
\title{A global estimation of the lower bound of the privacy amplification term for
decoy-state quantum key distribution
}
\author{Haodong Jiang}
\affiliation{State Key Laboratory of Mathematical Engineering and Advanced Computing, Zhengzhou, Henan, China }
\author{Ming Gao}
\email{[email protected]}
\affiliation{State Key Laboratory of Mathematical Engineering and Advanced Computing, Zhengzhou, Henan, China }
\author{Hong Wang}
\affiliation{State Key Laboratory of Mathematical Engineering and Advanced Computing, Zhengzhou, Henan, China }
\author{Hongxin Li}
\affiliation{State Key Laboratory of Mathematical Engineering and Advanced Computing, Zhengzhou, Henan, China }
\author{Zhi Ma}
\email{ma\[email protected]}
\affiliation{State Key Laboratory of Mathematical Engineering and Advanced Computing, Zhengzhou, Henan, China }
\date{\today}
\begin{abstract}
The privacy amplification term, of which the lower bound needs to be estimated with the decoy-state method, plays a positive role in the secure key rate formula for decoy-state quantum key distribution.
In previous work, the yield and the bit error rate of single-photon state are estimated separately to gain this lower bound.
In this work, we for the first time take the privacy amplification term as a whole to consider this lower bound. The mathematical description for the correlation between the yield and the bit error rate of single-photon state is given with just two unknown variables.
Based on this, we obtain the global estimation of this lower bound for both BB84 protocol and measurement-device-independent protocol.
The results of numerical simulation show that the global estimation can significantly improve the performance of quantum key distribution.
\begin{description}
\item[PACS numbers]
03.67.Dd, 42.81.Gs, 03.67.Hk
\end{description}
\end{abstract}
\pacs{Valid PACS appear here}
\maketitle
\section{\label{sec:level1}Introduction}
Quantum key distribution (QKD) based on the laws of quantum physics can theoretically present an unconditionally secure communication \cite{bennett1984quantum,mayers2001unconditional,ekert1991quantum}.
However, there is a gap between its theory and practice due to the imperfection in real-life implementation. Particularly, the eavesdropper (Eve) can launch attacks aiming at the imperfect single-photon source and the limited detector efficiency in practical QKD system \cite{brassard2000limitations,pns2002quantum,zhao2008quantum,xu2010experimental,weier2011quantum,jain2011device}.
By utilizing the decoy-state method \cite{hwang2003quantum,lo2005decoy,wang2005beating}, the practical QKD setups with an imperfect single-photon source can be still secure.
To deal with the threat coming from the detectors \cite{lydersen2010hacking}, several approaches have been proposed. One is device-independent QKD (DI-QKD) \cite{acin2007device} of which the security is based on the violation of a Bell inequality. However, DI-QKD con not apply to existing practical system because a loophole-free Bell test at the moment is still unavailable. Another one is measurement-device-independent quantum key distribution (MDI-QKD) \cite{braunstein2012side,lo2012measurement} based on the idea of entanglement swapping which can remove all detector side channel attacks.
The security of BB84 protocol with imperfect devices is analyzed in \cite{GLLP2004security,inamori2007unconditional,scarani2008quantum,cai2009finite,lim2014concise}. The security of MDI-QKD protocol is researched in \cite{lo2012measurement,tomamichel2012tight,curty2014finite}. Some useful formulas are given to calculate the secure key rate for practical BB84 protocol and MDI-QKD protocol. The privacy amplification term makes a positive contribution in these secure key rate formulas and it can not be measured in the experiment.
In asymptotic case, the yield of single-photon state is basis independent \cite{wei2013decoy,wang2013three,yu2013decoy}. Then the privacy amplification term can be calculated in just one basis.
In previous work \footnote{For simplicity, the analysis in Sec. \ref{sec:level1} is for BB84 protocol. The same analysis for MDI-QKD protocol is presented in Sec. \ref{sec:level3}.}, the lower bound of this term is obtained by estimating the lower bound of the yield \(Y_1\) of single-photon state and the upper bound of the bit error rate \(e_1\) of single-photon state separately.
The lower bound of the yield \(Y_1\) is estimated from the gain equations while the upper bound of the bit error rate \(e_1\) is estimated from the quantum bit error rate (QBER) equations.
The yield \(Y_i\) of \(i\)-photon state existing in both the gain equations and the QBER equations is the link between the estimation of lower bound of \(Y_1\) and that of upper bound of \(e_1\).
When \(Y_i\) is one certain value, the minimum of \(Y_1\) is reached. But the maximum of \(e_1\) may be reached as \(Y_i\) is another certain value.
That is to say, the lower bound of \(Y_1\) and the upper bound of \(e_{1}\) may not be simultaneously reached.
Thus, the separate estimation can just bring a lower bound of the privacy amplification term instead of the minimum.
Inspired by Wang's method \cite{wang2005beating,wang2013three,yu2013three,zhou2014tightened}, we give a mathematical description of the correlation between \(Y_1\) and \(e_{1}\) with just two unknown variables.
In particular, we will show that globally estimating the lower bound of the privacy amplification term is equal to finding the minimum of a bivariate continuous function in a closed area. Thus the minimum of the privacy amplification term can be attained with the global estimation and higher secure key rate can be achieved.
The article is organized as follows. Section \ref{sec:level2} introduces the global estimation of the lower bound of the privacy amplification term for BB84 protocol. The global estimation for MDI-QKD protocol will be discussed in section \ref{sec:level3}. We conclude our work in section \ref{sec:level4}.
\section{\label{sec:level2} The global estimation of the lower bound of the privacy amplification term for BB84 protocol}
The privacy amplification term for BB84 protocol is given by \({Y_1}[1 - H({e_1})]\), where $Y_1$ and $e_1$ are, respectively, the yield and the bit error rate of single-photon state. Here in this section, firstly we mathematically characterize the correlation between $Y_1$ and $e_1$. Then the minimum of \({Y_1}[1 - H({e_1})]\) is given with the method of global estimation. Lastly, the numerical simulation is performed to make a comparison in performance of QKD protocol between the global estimation and the separated estimation.
\subsection{\label{sec:level2A} The correlation between $Y_1$ and $e_1$}
Given a weak coherent state source which sends three different kinds of optical pulses with intensities \(\omega\), \(\upsilon\) and \(\mu\) \((0 = \omega < \upsilon < \mu )\), the overall gains which mean the probability for Bob to obtain a detection event in one pulse are given by following three equations,
\begin{eqnarray}\label{eq:gains}
{Q_\mu } &&= \sum\limits_{i = 0}^\infty {{e^{-\mu} }\frac{{{\mu ^i}}}{{i!}}{Y_i}}, \\
{Q_\upsilon } &&= \sum\limits_{i = 0}^\infty {{e^{-\upsilon} }\frac{{{\upsilon ^i}}}{{i!}}{Y_i}}, \\
&&{Q_\omega } = {Y_0},
\end{eqnarray}
where \(Q_\nu\) and \(Y_i\) are, respectively, the overall gain with intensity \(\nu\) \((\nu \in \{ \omega ,\upsilon ,\mu \} )\) and the yield of \(i\)-photon state.
We denote \(E_\nu\) to be the overall QBER with intensity \(\nu\), \(e_i\) to be the bit error rate of \(i\)-photon state. The overall QBER equations can be given by
\begin{eqnarray}\label{eq:QBERs}
{E_\mu}{Q_\mu } &&= \sum\limits_{i = 0}^\infty {{e^{-\mu} }\frac{{{\mu ^i}}}{{i!}}{e_i}{Y_i}}, \\
{E_\upsilon}{Q_\upsilon } &&= \sum\limits_{i = 0}^\infty {{e^{-\upsilon} }\frac{{{\upsilon ^i}}}{{i!}}{e_i}{Y_i}}, \\
&&{E_\omega }{Q_\omega } ={e_0} {Y_0}.
\end{eqnarray}
It is important to note that \(Y_0\) is equal to the gain \(Q_\omega\) when Alice does not send any optical pulse, which includes the detector dark count and other background contributions. As the background is random, we assume that \({E_\omega }={e_0}=0.5\).
As three equations can only fix three variables, we temporarily take \(Y_i\) (\(i\ge 3\)) as known variables. Then three gain equations can be solved according to Cramer's rule. \(Y_1\) is given by
\begin{eqnarray}\label{eq:Y1old}
{Y_1} = &&\frac{\mu }{{\upsilon (\mu - \upsilon )}}({e^\upsilon }{Q_\upsilon } - {Y_0}) - \frac{\upsilon }{{\mu (\mu - \upsilon )}}({e^\mu }{Q_\mu } - {Y_0}) +\nonumber \\
&&\sum\limits_{i = 3}^\infty {\frac{{({\mu ^{i - 1}}\upsilon - {\upsilon ^{i - 1}}\mu )}}{{i!(\mu - \upsilon )}}} {Y_i}.
\end{eqnarray}
Similarly, \({e_1}{Y_1}\) can be gained by
\begin{eqnarray}\label{eq:e1Y1old}
{e_1}{Y_1}&& = \frac{\mu }{{\upsilon (\mu - \upsilon )}}({e^\upsilon }{E_\upsilon }{Q_\upsilon } - {e_0}{Y_0}) - \frac{\upsilon }{{\mu (\mu - \upsilon )}}\nonumber \\
&&({e^\mu }{E_\mu }{Q_\mu } - {e_0}{Y_0}) +
\sum\limits_{i = 3}^\infty {\frac{{({\mu ^{i - 1}}\upsilon - {\upsilon ^{i - 1}}\mu )}}{{i!(\mu - \upsilon )}}{e_i}} {Y_i}.
\end{eqnarray}
From equation (\ref{eq:Y1old}) and equation(\ref{eq:e1Y1old}), we can get that there are infinite variables \(Y_i\) (\(i\ge 3\)) simultaneously influencing the values of \(Y_1\) and \({e_1}{Y_1}\). Then the privacy amplification term is influenced by infinite variables.
It is computationally infeasible to find the minimum of a function with infinite variables.
Fortunately, we find a way to reduce the number of unknown variables to two inspired by Wang's method \cite{wang2005beating,wang2013three}.
We define a state of which the density operator is \(\rho = \sum\limits_{i = 3}^\infty {\frac{{({\mu ^{i - 1}}\upsilon - {\upsilon ^{i - 1}}\mu )}}{{\Omega i!(\mu - \upsilon )}}} \left| i \right\rangle \left\langle i \right|\) \((\Omega = \sum\limits_{i = 3}^\infty {\frac{{({\mu ^{i - 1}}\upsilon - {\upsilon ^{i - 1}}\mu )}}{{i!(\mu - \upsilon )}}} >0)\). The yield and the bit error rate of this state can be given by
\begin{eqnarray}
{Y_\rho } &&= \sum\limits_{i = 3}^\infty {\frac{{({\mu ^{i - 1}}\upsilon - {\upsilon ^{i - 1}}\mu )}}{{i!(\mu - \upsilon )\Omega }}} {Y_i},\label{eq:YP}\\
{e_\rho }{Y_\rho } &&= \sum\limits_{i = 3}^\infty {\frac{{({\mu ^{i - 1}}\upsilon - {\upsilon ^{i - 1}}\mu )}}{{i!(\mu - \upsilon )\Omega }}} {e_i}{Y_i}.\label{eq:epYP}
\end{eqnarray}
Then equation (\ref{eq:Y1old}) and equation (\ref{eq:e1Y1old}) can be rewritten as
\begin{eqnarray}
{Y_1} = &&\frac{\mu }{{\upsilon (\mu - \upsilon )}}({e^\upsilon }{Q_\upsilon } - {Y_0}) - \frac{\upsilon }{{\mu (\mu - \upsilon )}}({e^\mu }{Q_\mu } - {Y_0}) +\nonumber \\
+{\Omega}{Y_\rho },\label{eq:Y1}\\
{e_1}{Y_1}&& = \frac{\mu }{{\upsilon (\mu - \upsilon )}}({e^\upsilon }{E_\upsilon }{Q_\upsilon } - {e_0}{Y_0}) - \frac{\upsilon }{{\mu (\mu - \upsilon )}}\nonumber \\
&&({e^\mu }{E_\mu }{Q_\mu } - {e_0}{Y_0}) +{\Omega}{e_\rho }{Y_\rho }.\label{eq:e1Y1}
\end{eqnarray}
Thus ${Y_1}$ and ${e_1}{Y_1}$ is determined by the gains and the QBERs which can be measured in the experiment except the yield and the bit error rate of state \(\rho\).
State \(\rho\) is the link between the calculations of ${Y_1}$ and ${e_1}{Y_1}$. The yield ${Y_\rho }$ of state \(\rho\) as a unknown variable simultaneously influences the estimations of both ${Y_1}$ and ${e_1}$. In \cite{hayashi2007general}, ${Y_\rho }$ is set to 0 to get the lower bound of ${Y_1}$ while ${e_\rho }$ and ${Y_\rho }$ are both set to 1 to get the upper bound of ${e_1}$. Thus the contradiction that ${Y_\rho }$ cannot be simultaneously 0 and 1 emerges.
The quantity of the privacy amplification term is \({Y_1}[1-H(e_1)]\), which is a bivariate continuous function of ${Y_\rho }$ and ${e_\rho }$. The minimum of the continuous function on the closed area can be attained. This is one reason why we should consider the global lower bound of \({Y_1}[1-H(e_1)]\) instead of calculating the lower bound of ${Y_1}$ and the upper bound of $e_1$ separately. In previous work \cite{lo2005decoy,wang2005beating,ma2005practical,hayashi2007general}, the lower bound of ${Y_1}$ is gained by utilizing the gain equations. In fact, ${Y_1}$ also exists in QBER equations where the information of ${Y_1}$ is not extracted. This is another motivation that the global lower bound of \({Y_1}[1-H(e_1)]\) should be considered.
\subsection{\label{sec:level2B} The global lower bound of \({Y_1}[1-H(e_1)]\)}
According to previous work \cite{lo2005decoy,wang2005beating,ma2005practical,hayashi2007general}, the most accurate estimations of \(Y_1\) and \(e_1\) are given by
\begin{eqnarray}
{Y_1}\ge{Y_1^L} = &&\frac{\mu }{{\upsilon (\mu - \upsilon )}}({e^\upsilon }{Q_\upsilon } -{Y_0})\nonumber\\
&& - \frac{\upsilon }{{\mu (\mu - \upsilon )}}({e^\mu }{Q_\mu } - {Y_0}) ,\label{eq:Y1L}\\
{e_1}\le{e_1^U} &&= \frac{({e^\upsilon }{E_\upsilon }{Q_\upsilon }- {e_0}{Y_0})}{{\upsilon}{Y_1^L}}.\label{eq:e1Y1U}
\end{eqnarray}
According to the corollary in appendix, the global lower bound of \({Y_1}[1-H(e_1)]\) can be gained by
\begin{eqnarray}\label{eq:globalLBB84}
{Y_1}(1-H&&(e_1))\ge{({Y_1^L}+\theta})[1-H(\frac{{e_1^U}{Y_1^L}}{Y_1^L+\theta})],\nonumber\\
\theta=&&\frac{1}{{\mu (\mu - \upsilon )}}[\upsilon ({e^\mu }{E_\mu }{Q_\mu } - {e_0}{Y_0})\nonumber\\
&&-\mu ({e^\upsilon }{E_\upsilon }{Q_\upsilon } - {e_0}{Y_0})]>0.
\end{eqnarray}
To make a clear comparison, we denote \((Y_1^G,e_1^G)\) as the point where the minimum is achieved. Corresponding to equation (\ref{eq:Y1L}) and equation (\ref{eq:e1Y1U}), \(Y_1^G\) and \(e_1^G\) are given by
\begin{eqnarray}
{Y_1^G} = {Y_1^L}+\theta,\label{eq:Y1G}\\
{e_1^G} = \frac{{e_1^U}{Y_1^L}}{{Y_1^L}+\theta}.\label{eq:e1G}
\end{eqnarray}
Here \(\theta\) can be considered the information of \(Y_1\) coming from the QBER equations, which is abandoned for the separate estimation. By globally considering the lower bound of the privacy amplification term, we successfully extract it.
\subsection{\label{sec:level2C} Numerical simulation for BB84 protocol}
With the observed gains and error rates, the final secure key rate can be calculated \cite{GLLP2004security} by
\begin{eqnarray}
{R}\ge{p_1^{\mu}}Y_1[1-H(e_1)]-{Q_\mu}fH(E_\mu)\label{eq:key1},
\end{eqnarray}
where \(p_1^{\mu}\) is the probability that Alice sends a single-photon state pulse corresponding to signal state \(\mu\); \(f\) is the error correction inefficiency; \(H(x)=- x{\log _2}(x) - (1 - x){\log _2}(1 - x)\) is the binary Shannon entropy function.
For a fair comparison, we use the same parameters in \cite{yu2013three,zhou2014tightened} summarized in table \ref{tab:table1}. For simplicity, the detection efficiency is put to the overall channel transmission, hence we only need to assume the 100\% detection efficiency at Bob's side.
\begin{table}[htbp!]
\caption{\label{tab:table1}
List of parameters for numerical simulation}
\begin{ruledtabular}
\begin{tabular}{cccc}
\textrm{\(e_0\)}&
\textrm{\(f\)}&
\textrm{\({p_d}\)}&
\textrm{\(e_d\)}\\
\colrule
0.5 & 1.16 & \text{$3\times{10^{-6}}$} & \text{$1.5\%$}\\
\end{tabular}
\end{ruledtabular}
\end{table}
The ratios of the estimations of \(Y_1\) with two methods (equation (\ref{eq:Y1L}) and equation (\ref{eq:Y1G})) to the asymptotic limit calculated with the infinite-intensity decoy-state method are shown in figure \ref{fig:Y1}. The ratios of the asymptotic limit of \(e_1\) to the estimations with two methods (equation (\ref{eq:e1Y1U}) and equation (\ref{eq:e1G})) are shown in figure \ref{fig:e1}. The ratios of the secure key rates computed with two methods (separate estimation and global estimation) to the asymptotic limit are shown in figure \ref{fig:key1}.
From the results, we can see tighter estimations of \(Y_1\) and \(e_1\) are gained with the method of global estimation. Thus, higher secure key rates are achieved.
\begin{figure}
\caption{\label{fig:Y1}
\label{fig:Y1}
\end{figure}
\begin{figure}
\caption{\label{fig:e1}
\label{fig:e1}
\end{figure}
\begin{figure}
\caption{\label{fig:key1}
\label{fig:key1}
\end{figure}
\section{\label{sec:level3} The global estimation of the lower bound of the privacy amplification term for MDI-QKD protocol}
For MDI-QKD protocol, the secure key rate is gained \cite{lo2012measurement} by
\begin{eqnarray}\label{eq:MDIsecure key rate}
R \ge p_{11}^zY_{11}^z[1 - H(e_{11}^x)] - Q_{\mu_a \mu_b }^zfH(E_{\mu_a \mu_b }^z),
\end{eqnarray}
where \(p_{11}^z\) is the probability that Alice and Bob simultaneously send single-photon state pulses corresponding to signal state in \(z\) basis; \(Q_{\mu_a \mu_b }^z\) and \(E_{\mu_a \mu_b }^z\) are the gain and QBER when Alice and Bob simultaneously send signal state pulses; \(Y_{11}^z\) and \(e_{11}^x\) are the yield in \(Z\) basis and the bit error rate in \(X\) basis when Alice and Bob simultaneously send single-photon state pulses.
The variable values in (\ref{eq:MDIsecure key rate}) can be measured in the experiment except \(Y_{11}^z\) and \(e_{11}^x\).
So the major task in the calculation of secure key rate is estimating the lower bound of \(Y_{11}^z[1-H(e_{11}^x)]\). In previous work, to get the lower bound of \(Y_{11}^z[1-H(e_{11}^x)]\), the lower bound of \(Y_{11}^z\) and the upper bound of \(e_{11}^x\) are calculated separately.
In fact, \({Y_{11}^z}\) is equal to \(Y_{11}^x\) in asymptotic setting according to \cite{wang2013three}.
As a result, we will not temporarily distinguish the basis of $Y_{11}$ and $e_{11}$.
We will consider the lower bound of \(Y_{11}[1-H(e_{11})]\) as a whole.
Similarly, in this section we will firstly introduce the mathematical description of the correlation between $Y_{11}$ and $e_{11}$. Then the global lower bound of \({Y_{11}}[1-H(e_{11})]\) is calculated. Lastly, the results of numerical simulation will be given. The following work is on basis of the three-intensity decoy-state MDI-QKD protocol \cite{yu2013three}.
\subsection{\label{sec:level3A} The correlation between $Y_{11}$ and $e_{11}$}
For MDI-QKD protocol, the gain and QBER when Alice (Bob) sends a certain pulse with intensity \(q_a\) (\(q_b\)) can be given by
\begin{eqnarray}
{Q_{{q_a} {q_b} }} &&= \sum\limits_{i,j = 0}^\infty {{e^{ - ({q_a} + {q_b} )}}\frac{{{{q_a} ^i}{{q_b} ^j}}}{{i!j!}}} {Y_{ij}},\label{eq:Quv}\\
{E_{{q_a} {q_b} }}{Q_{{q_a} {q_b} }} &&= \sum\limits_{i,j = 0}^\infty {{e^{ - ({q_a} + {q_b})}}\frac{{{{q_a} ^i}{{q_b} ^j}}}{{i!j!}}} {e_{ij}}{Y_{ij}},\label{eq:EuvQuv}
\end{eqnarray}
where \(Y_{ij}\) and \(e_{ij}\) is the yield and the bit error rate when Alice (Bob) sends an \(i\)-photon (\(j\)-photon) state pulse.
Given two weak coherent state sources which send three different kinds of optical pulses with intensities \((0={\omega_a}<{\upsilon_a}<{\mu_a})\) and \((0={\omega_b}<{\upsilon_b}<{\mu_b})\), we eliminate the unknown variables \(Y_{0i}\) and \(Y_{j0}\), then get
\begin{eqnarray}\label{eq:Qu1v1}
{e^{(\mu_a + \mu_b )}}{{\tilde Q}_{\mu_a \mu_b }} = \sum\limits_{i,j = 1}^\infty {\frac{{{\mu_a ^i}{\mu_b ^j}}}{{i!j!}}} {Y_{ij}},\\
e^{(\mu_a + \upsilon_b )}}{{\tilde Q}_{\mu_a \upsilon_b }} = \sum\limits_{i,j = 1}^\infty {\frac{{{\mu_a ^i}{\upsilon_b ^j}}}{{i!j!}}} {Y_{ij},\label{eq:Quv1:2}\\
e^{(\upsilon_a + \mu_b )}}{{\tilde Q}_{\upsilon_a \mu_b }} = \sum\limits_{i,j = 1}^\infty {\frac{{{\upsilon_a ^i}{\mu_b ^j}}}{{i!j!}}} {Y_{ij},\label{eq:Quv1:3}\\
e^{(\upsilon_a + \upsilon_b )}}{{\tilde Q}_{\upsilon_a \upsilon_b }} = \sum\limits_{i,j = 1}^\infty {\frac{{{\upsilon_a ^i}{\upsilon_b ^j}}}{{i!j!}}} {Y_{ij},\label{eq:Quv1:4}
\end{eqnarray}
where \({{\tilde Q}_{{\mu _1},{\mu _2}}}({\mu _1} \in \{ \mu_a ,\upsilon_a \},{\mu _2}\in\{\mu_b ,\upsilon_b \} )\) is achieved by
\begin{eqnarray}\label{eq:Qu1v12}
{{\tilde Q}_{{\mu _1}{\mu _2}}} =&& {Q_{{\mu _1}{\mu _2}}} + {e^{ - ({\mu _1} + {\mu _2})}}{Q_{\omega_a \omega_b }} \nonumber\\
&&-{e^{ - {\mu _1}}}{Q_{\omega_a {\mu _2}}} - {e^{ - {\mu _2}}}{Q_{{\mu _1}\omega_b }}.
\end{eqnarray}
According to \cite{yu2013three}, \(Y_{11}\) can be solved from equations
(\ref{eq:Quv1:2}, \ref{eq:Quv1:3} and \ref{eq:Quv1:4}),
\begin{eqnarray}\label{eq:Y11}
&&{Y_{1,1}} ={Y_{11}^L}+ \sum\limits_{(i + j) \ge 4} {\frac{{{\Upsilon _{i,j}}{Y_{i,j}}}}{{i!j!({\mu _a} - {\upsilon _a})({\mu _b} - {\upsilon _b})}}},\\
&&{\Upsilon _{i,j}} = \upsilon _a^{i - 1}\mu _b^{j - 1}{\upsilon _b}({\mu _a} - {\upsilon _a}) + \mu _a^{i - 1}\upsilon _b^{j - 1}{\upsilon _a}({\mu _b} - {\upsilon _b}) \label{Y11}\nonumber\\
&&- \upsilon _a^{i - 1}\upsilon _b^{j - 1}({\mu _a}{\mu _b} - {\upsilon _a}{\upsilon _b})>0,\nonumber\\
&&{Y_{11}^L}=\frac{1}{{({\mu _a} - {\upsilon _a})({\mu _b} - {\upsilon _b})}}(\frac{{{e^{({\upsilon _a} + {\upsilon _b})}}({\mu _a}{\mu _b} - {\upsilon _a}{\upsilon _b})}}{{{\upsilon _a}{\upsilon _b}}}{\tilde Q_{{\upsilon _a}{\upsilon_b}}}- \nonumber\\
&& \frac{{{e^{({\mu _a} + {\upsilon _b})}}{\upsilon _a}({\mu _b} - {\upsilon _b})}}{{{\mu _a}{\upsilon _b}}}{\tilde Q_{{\mu _a}{\upsilon _b}}}
- \frac{{{e^{({\upsilon _a} + {\mu _b})}}{\upsilon _b}({\mu _a} - {\upsilon _a})}}{{{\upsilon _a}{\mu _b}}} {\tilde Q_{{\upsilon _a}{\mu _b}}}).\nonumber\\\label{Y11L}
\end{eqnarray}
Similarly, \(e_{11}\) can be solved from the corresponding QBER equations,
\begin{eqnarray}\label{eq:e11}
&&{e_{11}}{Y_{11}} = ({e_{11}}{Y_{11}})^L+ \sum\limits_{(i + j) \ge 4} {\frac{{{e_{i,j}}{\Upsilon _{i,j}}{Y_{i,j}}}}{{i!j!({\mu _a} - {\upsilon _a})({\mu _b} - {\upsilon _b})}}},\\
&&({e_{11}}{Y_{11}})^L=\frac{1}{{({\mu _a} - {\upsilon _a})({\mu _b} - {\upsilon _b})}}(\frac{{{e^{({\upsilon _a} + {\upsilon _b})}}({\mu _a}{\mu _b} - {\upsilon _a}{\upsilon _b})}}{{{\upsilon _a}{\upsilon _b}}}\nonumber\\
&&{\tilde Q_{{\upsilon _a}{\upsilon_b}}}{\tilde E_{{\upsilon _a}{\upsilon_b}}} - \frac{{{e^{({\mu _a} + {\upsilon _b})}}{\upsilon _a}({\mu _b} - {\upsilon _b})}}{{{\mu _a}{\upsilon _b}}}{\tilde E_{{\mu _a}{\upsilon _b}}}{\tilde Q_{{\mu _a}{\upsilon _b}}}
- \nonumber\\
&&\frac{{{e^{({\upsilon _a} + {\mu _b})}}{\upsilon _b}({\mu _a} - {\upsilon _a})}}{{{\upsilon _a}{\mu _b}}}{\tilde E_{{\upsilon _a}{\mu _b}}}{\tilde Q_{{\upsilon _a}{\mu _b}}}).
\end{eqnarray}
\({{\tilde E}_{{\mu _1},{\mu _2}}}{{\tilde Q}_{{\mu _1},{\mu _2}}}({\mu _1} \in \{ \mu_a ,\upsilon_a \},{\mu _2}\in\{\mu_b ,\upsilon_b \} )\) is achieved by
\begin{eqnarray}\label{eq:EQu1v12}
{{\tilde E}_{{\mu _1}{\mu _2}}}{\tilde Q}_{{\mu _1}{\mu _2}}=&& {E_{{\mu _1}{\mu _2}}}{Q_{{\mu _1}{\mu _2}}} + {e^{ - ({\mu _1} + {\mu _2})}}{E_{\omega_a \omega_b }}{Q_{\omega_a \omega_b }}- \nonumber\\
{e^{ - {\mu _1}}}&&{E_{\omega_a {\mu _2}}}{Q_{\omega_a {\mu _2}}} - {e^{ - {\mu _2}}}{E_{{\mu _1}\omega_b }}{Q_{{\mu _1}\omega_b }}.
\end{eqnarray}
It is easy to verify that \({\Upsilon _{i,j}}\) is positive when \((i + j) \ge 4\). So we can define a state of which the density operator is \(\psi= \sum\limits_{\scriptstyle(i+j)\ge4
} {\frac{{{\Upsilon _{i,j}}}}{{i!j!{{(\mu_a - \upsilon_a)(\mu_b - \upsilon_b)}}\Pi}}}{(\left| i \right\rangle \left\langle i \right| \otimes \left| j \right\rangle \left\langle j \right|)}\),
where \(\Pi\) is equal to \(\sum\limits_{\scriptstyle(i+j)\ge4
} {\frac{{{\Upsilon _{i,j}}}}{{i!j!{{(\mu_a - \upsilon_a )(\mu_b - \upsilon_b)}}}}}\).
Then equation (\ref{eq:Y11}) and equation (\ref{eq:e11}) can be rewritten
\begin{eqnarray}
{Y_{11}} = &&{Y_{11}^L}+\Pi{Y_\psi},\label{eq:Y11our}\\
{e_{11}}{Y_{11}} = &&({e_{11}{Y_{11}^L}})^L+ \Pi{e_\psi}{Y_\psi},\label{eq:e11our}
\end{eqnarray}
where \({Y_\psi}\) and \({e_\psi}\) is the yield and the bit error rate of state \(\psi\).
Thus \(Y_{11}\) and \(e_{11}\) is linked by the state \(\psi\). \({Y_{11}}(1-H(e_{11}))\) is a bivariate continuous function with two parameter variables
\({Y_\psi}\) and \({e_\psi}\). The lower bound of \(Y_{11}\) can be gained by setting \({Y_\psi}\) to 0 while the upper bound of \(e_{11}\) can be gained by setting \({Y_\psi}\) and \({e_\psi}\) to 1. Thus the lower bound of \({Y_{11}}(1-H(e_{11}))\) can not be reached with the separate estimation. The minimum of \({Y_{11}}(1-H(e_{11}))\) can be attained with the global estimation.
\subsection{\label{sec:level3B} The global lower bound of \({Y_{11}}(1-H(e_{11}))\)}
In \cite{yu2013three}, the lower bound of \({Y_{11}}\) is given in equation (\ref{Y11L}) by setting the last term in equation (\ref{eq:Y11}) to 0.
The upper bound of \(e_{11}\) is given by setting the term \({e_{ij}}{Y_{ij}}\) \((i+j)\ge2\) of \({{\tilde E}_{{\upsilon_a}{\upsilon_b}}}{\tilde Q}_{{\upsilon_a}{\upsilon_b}}\) to 0,
\begin{eqnarray}\label{e11U}
{e_{11}}\le{e_{11}^U} = \frac{{{e^{\upsilon_a+\upsilon_b }}{{\tilde E}_{\upsilon_a \upsilon_b }}{{\tilde Q}_{\upsilon_a \upsilon_b }}}}{{{\upsilon_a}{\upsilon_b}{Y_{11}^{L}}}}.
\end{eqnarray}
According equations (\ref{eq:Y11our}, \ref{eq:e11our} and \ref{e11U}) and corollary in appendix, the global lower bound of \({Y_{11}}(1-H[e_{11}])\) is given by
\begin{eqnarray}\label{eq:Y11e11globallowerbound}
{Y_{11}}[1-H(e_{11})]&&\ge{({Y_{11}^L}+\delta)[1-H(\frac{{e_{11}^U}{Y_{11}^L}}{{Y_{11}^L+\delta}})]},\\
\delta&&={e_{11}^U}{Y_{11}^L}-({e_{11}{Y_{11}}})^L>0\nonumber.
\end{eqnarray}
To make a clear comparison, we denote \((Y_{11}^G,e_{11}^G)\) as the point where the minimum is attained. Corresponding to equation (\ref{Y11L}) and equation (\ref{e11U}), \(Y_{11}^G\) and \(e_{11}^G\) is given by
\begin{eqnarray}
{Y_{11}^G} = {Y_{11}^L}+\delta,\label{eq:Y11G}\\
{e_{11}^G} = \frac{{e_{11}^U}{Y_{11}^L}}{{Y_{11}^L}+\delta}.\label{eq:e11G}
\end{eqnarray}
\subsection{\label{sec:level3C} Numerical simulation for MDI-QKD protocol}
Numerical simulations are performed with the parameters in table \ref{tab:table1}.
The ratios of the estimations of \(Y_{11}\) with two methods (equation (\ref{Y11L}) and equation (\ref{eq:Y11G})) to the asymptotic limit obtained with the infinite-intensity decoy-state method are shown in figure \ref{fig:Y11}. The ratios of the asymptotic limit of \(e_{11}\) to the estimations with two methods (equation (\ref{e11U}) and equation (\ref{eq:e11G})) are shown in figure \ref{fig:e11}. The ratios of the secure key rates calculated with two methods (separate estimation and global estimation) to the asymptotic limit are shown in figure \ref{fig:key2}.
From the results, we can see tighter estimations of \(Y_{11}\) and \(e_{11}\) are gained with global estimation. Thus, higher secure key rates are reached.
\begin{figure}
\caption{\label{fig:Y11}
\label{fig:Y11}
\end{figure}
\begin{figure}
\caption{\label{fig:e11}
\label{fig:e11}
\end{figure}
\begin{figure}
\caption{\label{fig:key2}
\label{fig:key2}
\end{figure}
\section{\label{sec:level4}Conclusion}
The global estimations of the privacy amplification term for both BB84 protocol and MDI-QKD protocol have been researched in this paper. Conventional separate estimation will abandon the information of the yield of single-photon state in QBER equations. With the global estimation of the privacy amplification term, this information has been extracted and the minimum of the privacy amplification term is achieved. Compared with separate consideration, more accurate estimations of the yield and the bit error rate of single-photon state are gained, which thus significantly improve the performance of the quantum key distribution for both BB84 protocol and MDI-QKD protocol.
Additionally, more accurate separate estimation will contribute to more
smaller domain of the bivariate function which thus can further help to obtain a tighter global estimation.
\appendix
\section*{appendix}
\textit{\textbf{Theorem:}} For the bivariate continuous function \(f(x,y) = (A + Cy)[1 - H(\frac{{B + Cxy}}{{A + Cy}})]\) \((A>0,C>0)\) with the definition domain \(\{ (x,y):0 \le x \le 1,0 \le y \le 1,{\frac{{B + Cxy}}{{A + Cy}}<0.5}\} \), the minimum can be attained on the border.
\textit{\textbf{proof:}} Firstly, the partial derivatives of function \(f(x,y)\) are given by
\begin{equation}\label{eq:fx}
{f_x} = - (A + Cy)H(\frac{{B + Cxy}}{{(A + Cy)}})'\frac{{Cy}}{{A + Cy}},
\end{equation}
\begin{eqnarray}\label{eq:fy}
{f_y} = &&C[1 - H(\frac{{B + Cxy}}{{A + Cy}})] - (A + Cy)H(\frac{{B + Cxy}}{{A + Cy}})'\nonumber\\
&&\frac{{(ACx - BC)}}{{{{(A + Cy)}^2}}}.
\end{eqnarray}
If there is an extreme point \((x_0,y_0)\) \((0<x_0<1,0<y_0<1)\), then \(H(\frac{{B + Cxy}}{{A + Cy}})'\) has to be 0 from the restrict \({f_x} =0\). Combine the restrict \({f_y} =0\), we can get \(C[1 - H(\frac{{B + Cxy}}{{A + Cy}})]=0\). This is in contradiction with our initial assumption.
Function \(f(x,y)\) for a fixed \(y\) is a decreasing function with parameter variable \(x\). So the minimum can be reached where \(x\) is 1. So this problem is converted to searching the minimum of
univariate continuous function \(g(y) = (A + y)[1 - H(\frac{{B + y}}{{A + y}})](0 \le y \le C)\). Calculating the derivative function of \(g(y)\), we can find
\begin{eqnarray}\label{eq:gy}
{g_y}&& = 1 - H(\frac{{B + y}}{{A + y}}) - (A + y)H(\frac{{B + y}}{{A + y}})'\frac{{(A - B)}}{{{{(A + y)}^2}}}\nonumber\\
&&= 1 + (\frac{{B + y}}{{A + y}})\log (\frac{{B + y}}{{A + y}}) + (\frac{{A - B}}{{A + y}})\log (\frac{{A - B}}{{A + y}})\nonumber\\
&& - (\frac{{A - B}}{{A + y}})\log (\frac{{A - B}}{{B + y}})\nonumber\\
&&= 1 + \log (\frac{{B + y}}{{A + y}}).
\end{eqnarray}
As we assume \(\frac{{B + y}}{{A + y}} < 1/2\), then \({g_y}<0\). That is to say, \({g_y}\) is a decreasing function with parameter variable \(y\).
\textit{\textbf{Corollary:}} For the bivariate continuous function \(f(x,y) = (A + Cy)[1 - H(\frac{{B + Cxy}}{{A + Cy}})]\) \((A>0,C>0)\) with the definition domain \(\{ (x,y):0 \le x \le 1,0 \le y \le 1,{\frac{{B + Cxy}}{{A + Cy}}<0.5}, {(B + Cxy)}<D, {(A+Cy)}>E\}\), the nonzero minimum can be obtained in the following three cases.
case 1: when \((D-B)<C\) and \((D-B)>(E-A)\),
the minimum is \(f(1,\frac{{D - B}}{C})=(A+D-B)[1-H(\frac{D}{A+D-B})]\).
case 2: when \((D-B)<C\) and \((D-B)<(E-A)\),
the minimum is \(f(\frac{D-B}{E-A},\frac{{E- A}}{C})=E[1-H(\frac{D}{E})]\).
case 3: when \((D-B)>=C\),
the minimum is \(f(1,1)=(A + C)[1 - H(\frac{{B + C}}{{A + C}})]\).
\textit{\textbf{proof:}} If we set \({(B + Cxy)}=D\), the function \(f(x,y)\) is converted to an univariate continuous increasing function \((A + Cy)[1 - H(\frac{D}{{A + Cy}})]\). Then it is easy to verify the correctness of corollary combining with the proof of theorem.
\end{document}
|
\begin{document}
\title{Regularization of ultraviolet divergence for a particle interacting with a scalar quantum field}
\author{O.\ D.\ Skoromnik}
\email[Corresponding author: ]{[email protected]}
\affiliation{Max Planck Institute for Nuclear Physics, Saupfercheckweg 1, 69117 Heidelberg, Germany}
\author{I.\ D.\ Feranchuk}
\affiliation{Belarusian State University, 4 Nezavisimosty Ave., 220030, Minsk, Belarus}
\author{D. V. Lu}
\affiliation{Belarusian State University, 4 Nezavisimosty Ave., 220030, Minsk, Belarus}
\author{C. H. Keitel}
\affiliation{Max Planck Institute for Nuclear Physics, Saupfercheckweg 1, 69117 Heidelberg, Germany}
\begin{abstract}
When a non-relativistic particle interacts with a scalar quantum field, the standard perturbation theory leads to a dependence of the energy of its ground state on an undefined parameter---``momentum cut-off''---due to the ultraviolet divergence. We show that the use of non-asymptotic states of the system results in a calculation scheme in which all observable quantities remain finite and continuously depend on the coupling constant without any additional parameters. It is furthermore demonstrated that the divergence of traditional perturbation series is caused by the energy being a function with a logarithmic singularity for small values of the coupling constant.
\end{abstract}
\pacs{11.10.-z, 11.10.Gh, 11.15.Bt, 11.15.Tk, 63.20.kd}
\keywords{quantum field theory; non-perturbative theory; renormalization; divergences}
\maketitle
\section{Introduction}
\langlebel{sec:introduction}
A characteristic property of the majority of quantum field theories (QFT) is the divergence of integrals appearing in the perturbation theory for the calculation of physical quantities such as mass and charge of the interacting particles. The divergences appear in the integrations over momenta in intermediate states both on the lower limit (infrared divergence) and on the upper limit (ultraviolet divergence). In order to circumvent this difficulty, a renormalization procedure is used, which allows the redefinition of the initial parameters of the system through their observable values. The renormalization scheme was firstly developed for quantum electrodynamics (QED) \cite{PhysRev.75.1736,*PhysRev.85.631,*PhysRev.95.1300,*Stueckelberg1953} and later generalized to other QFT models \cite{'tHooft1971173,*Hooft1971167,*'tHooft1972189}. These schemes can be used for so-called renormalizable theories, for which the reconstructed perturbation theory can be built in a way that the infinite values are included in the definition of ``physical'' charge and mass and, therefore, do not appear in other observables of the system. However, even the founders of QED anticipated ``that the renormalization theory is simply a way to sweep the difficulties of the divergences in electrodynamics under the rug.'' (R. P. Feynman \cite{Feynman12081966}). In many papers P. A. M. Dirac wrote that this approach was in contradiction with logical principles of quantum mechanics \cite{Dirac1981,Dirac1981book}.
Accordingly, the question arises whether these divergences are an intrinsic property of quantum field models or they are caused by the application of perturbation theory for the calculation of physical quantities, which are non-analytical functions of the coupling constant such as in the theory of superconductivity \cite{PhysRev.108.1175}. A large number of works is devoted to this problem, nevertheless a solution still has not been found up to now. However, this question is of great importance for a correct mathematical formulation of fundamental physical theories and is essential for examining the applicability of non-renormalizable theories \cite{PhysRevD.88.125014,*PhysRevD.87.065024} for the description of real physical systems.
Let us recall that in standard perturbation theory the Hamiltonian of non-interacting fields is used as a zeroth-order approximation, while Fock states of the free fields are employed for the calculation of the transition matrix elements in the subsequent corrections to observable characteristics of the system. This approach is based on the assumption of an asymptotic switch off of the interaction between the fields \cite{LandauQED}. However, in a series of works it has been shown \cite{PhysRev.140.B1110,Faddeev1970,PhysRevD.11.3481,PhysRev.173.1527,*PhysRev.174.1882,*PhysRev.175.1624} that the infrared divergence arises just because of the use of asymptotically free field states. As follows from reference \cite{PhysRev.140.B1110} and the subsequent publication \cite{Faddeev1970}, the infrared divergence disappears in all orders of perturbation theory in QED if, in the zeroth-order approximation, the coherent states of the electromagnetic field bound to the particle are used and the parameters of these states are appropriately chosen.
At first glance, this may contradict representation theory in quantum mechanics, in accordance to which the result of the calculation of the observable characteristics of the system should not depend on the choice of basis states, provided that those form a full basis in a Hilbert space as for free field states. However, this statement is correct only for the exact solution of the problem, whereas individual terms of the perturbation series can change with a different choice of basis in zeroth-order approximation. As was demonstrated in reference \cite{Feranchuk2015,Feranchuk1995370} the transition from one basis to another corresponds to the partial summation of a divergent series within standard perturbation theory and allows for the non-perturbative calculation of subsequent corrections in the form of a convergent sequence. A good example of how the basis choice influences the approximate calculation of the characteristics of the quantum system with a continuous spectrum is given by the scattering at a Coulomb potential \cite{PhysRevA.82.052703,*PhysRevA.70.052701}. In this well known case, the wave function of the system has no singularities. In contrast, Born's scattering amplitude, approximately calculated via an asymptotically free basis, displays a singularity for scattering at small angles. As was demonstrated in references \cite{PhysRevA.82.052703,*PhysRevA.70.052701}, this singularity does not appear with the use of non-asymptotic wave functions.
The main goal of our work is to investigate whether a proper choice of basis in zeroth-order approximation allows to construct a calculation scheme free of ultraviolet divergences. In order not to overload the proposed approach with details related to the internal degrees of freedom and to render all calculations as transparent as possible, we investigate as a representative example a model system, which consists of a non-relativistic particle without spin interacting with a scalar quantum field. A standard-perturbation-theory series, in this case, does not exhibit infrared divergence, contains however ultraviolet divergence. This results in a dependence of the energy of the ground state on the undefined momentum cut-off, which is required for the calculation of high-order corrections. Consequently, our task is to prove that the energy of the ground state of the considered model system can be calculated without any additional parameters such as a momentum cut-off. At the same time, it is important to show that the energy of the system is a non-analytical function of the coupling constant and consequently can not be represented as a series in the framework of conventional perturbation theory.
With the inclusion of a field polarization our employed model coincides with non-relativistic QED \cite{Healy1982} or if the field is scalar it has a physical realization in solids \cite{Toyozawa01071961}, where however, due to the discrete structure of a crystal, a natural momentum cut-off intrinsically appears, defined via the Brillouin-zone boundary. In free space this regularization is not present and has to be artificially included, e.g., via lattice models \cite{Smit2002}, where the boundary momentum is defined through an artificial lattice period. In contrast, in our formulation we will consider a system in free space without neither natural nor artificial cut-off.
In addition, in a series of works \cite{Spohn1998,*Spohn1989,Amann1991414,*Arai1997455,jmp.5.1190.1964,Bach1998299,*Bach2007426,*Chen20082555} an analogous model of a particle interacting with a scalar quantum field with the momentum cut-off was used for the investigation of the fundamental mathematical problem of the existence of the solutions of the Schr\"{o}dinger equation.
The article is organized in the following way. In section \ref{sec:model_description} the model of a non-relativistic particle without spin interacting with a scalar quantum field is described and its parameters
are calculated in the framework of conventional perturbation theory.
In section \ref{sec:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system}
the basis of non-asymptotic states is investigated and the iteration scheme of the calculations is presented.
The zeroth-order approximation, which is found to be free of ultraviolet divergence for the energy and effective mass is then worked out using this basis.
In section \ref{sec:second_order_iteration_for_the_energy_convergence}
the proposed iteration scheme is employed for computing the correction to the zeroth-order approximation of the energy. The convergence of all integrals is demonstrated and the character of the singularity
of the energy as a non-analytic function of the coupling constant is determined in the weak coupling limit. In addition, the details of all calculations are presented in appendices.
\section{Model description}
\langlebel{sec:model_description}
Let us examine the Hamiltonian of the system consisting of a non-relativistic particle interacting with a scalar quantum field
\begin{align}
\opa H &= \opa H_0+\opa H_{\text{int}}, \langlebel{eq:model_description1}
\\
\opa H_0 &= \frac{\opA p^2}{2}+\sum_{\boldsymbol k}\omega_{\boldsymbol k} \opa a^\dag_{\boldsymbol k}\opa a_{\boldsymbol k}, \langlebel{eq:model_description2}
\\
\opa H_{\text{int}} &=\frac{g}{\sqrt{2\Omega}}\sum_{\boldsymbol k}A_{\boldsymbol k} \left(e^{\ri\thp{k}{r}}\opa a_{\boldsymbol k}+e^{-\ri\thp{k}{r}}\opa a^\dag_{\boldsymbol k}\right). \langlebel{eq:model_description3}
\end{align}
Here, we select the system of units in which $m = 1$, $\hbar = c = 1$, the momentum operator $\opA p=-\ri \nabla$, normalization volume $\Omega$, vertex function $A_{\boldsymbol k} = 1/{\sqrt{\omega_{\boldsymbol k}}}$, creation (annihilation) operators $\opa a_{\boldsymbol k}^\dag$ ($\opa a_{\boldsymbol k}$) of the field mode with the frequency $\omega_{\boldsymbol k}=k=\vert{\boldsymbol k}\vert$, and the coupling constant $g$. The real physical system, which is described via Hamiltonian (\ref{eq:model_description1}) corresponds to an electron interacting with acoustic phonons in a continuous model of a crystal \cite{Toyozawa01071961}. If we choose $\omega_{\boldsymbol k} = 1$, $A_{\boldsymbol k} = 1/k$, and $g = \sqrt{8\pi \alpha}$, operator (\ref{eq:model_description1}) corresponds to the Fr\"ohlich Hamiltonian \cite{Froehlich1954}, which describes the interaction of an electron with optical phonons in a crystal, i.e., the so-called ``polaron'' problem \cite{RevModPhys.63.63,*Mitra198791,PhysRev.97.660,Spohn1987278}.
The total momentum operator
\begin{align}
\opA P = -\ri \nabla + \sum_{\boldsymbol k}\boldsymbol k\opa a^\dag_{\boldsymbol k}\opa a_{\boldsymbol k}\langlebel{eq:model_description4}
\end{align}
commutes with the Hamiltonian of the system (\ref{eq:model_description1}) and consequently the eigenvalues $E(\boldsymbol P)$ and eigenfunctions $|\Psi_{\boldsymbol P}\rangle$ are defined as solutions of the following system of equations
\begin{align}
\opa H|\Psi_{\boldsymbol P}\rangle &= E(\boldsymbol P)|\Psi_{\boldsymbol P}\rangle, \langlebel{eq:model_description5}
\\
\opA P|\Psi_{\boldsymbol P}\rangle &= \boldsymbol P |\Psi_{\boldsymbol P}\rangle. \langlebel{eq:model_description6}
\end{align}
In the conventional perturbation expansion over the coupling constant in the zeroth-order approximation the solution of the stationary Schr\"odinger equation with Hamiltonian (\ref{eq:model_description2}) is simply determined and corresponds to the free particle with momentum $\boldsymbol p$ and Fock states of the phonon field with the set of occupation numbers $\{n_{\boldsymbol k_1},n_{\boldsymbol k_2},\ldots\}\equiv \{n_{\boldsymbol k}\}$:
\begin{align}
|\Psi^{(0)}_{\boldsymbol p,\{n_{\boldsymbol k}\}}\rangle &= \frac{e^{\ri \thp{p}{r}}}{\sqrt{\Omega}}|\{n_{\boldsymbol k}\}\rangle,\quad \sum_{\boldsymbol k} \opa a^\dag_{\boldsymbol k}\opa a_{\boldsymbol k}|\{ n_{\boldsymbol k}\}\rangle =\sum_{\boldsymbol k}n_{\boldsymbol k} |\{ n_{\boldsymbol k}\}\rangle, \langlebel{eq:model_description7}
\\
E^{(0)}(\boldsymbol P, \{ n_{\boldsymbol k}\}) &= \frac{1}{2}\left(\boldsymbol P - \sum_{\boldsymbol k}\boldsymbol k n_{\boldsymbol k}\right)^2 + \sum_{\boldsymbol k}\omega_{\boldsymbol k} n_{\boldsymbol k},\quad \boldsymbol P = \boldsymbol p + \sum_{\boldsymbol k}\boldsymbol k n_{\boldsymbol k}. \langlebel{eq:model_description8}
\end{align}
Let us suppose that the system is in the ground state of the phonon field $\{n_{\boldsymbol k}\} = 0$, which leads to the following eigenfunction and eigenvalue
\begin{align}
|\Psi^{(0)}_{\boldsymbol P, 0}\rangle &= \frac{e^{\ri \thp{P}{r}}}{\sqrt{\Omega}} |0\rangle \langlebel{eq:model_description9}
\\
E^{(0)}(\boldsymbol P,0) &= \frac{P^2}{2}, \quad \boldsymbol P = \boldsymbol p. \langlebel{eq:model_description10}
\end{align}
The first non-vanishing correction to the system energy arises in the second order of perturbation theory (single-phonon intermediate transitions) and corresponds to the self-energy diagram, which defines the mass operator $\Sigma(\boldsymbol P)$ and is determined as
\begin{align}
\langlebel{eq:model_description11}
\Sigma (\boldsymbol P) &= \Delta E^{(2)}(\boldsymbol P,0) = - \frac{g^2}{2 \Omega} \sum_{\boldsymbol k} \frac{1}{\omega_{\boldsymbol k}} \frac{1}{k^2/2 - \thp{P}{k} + \omega_k} = - \frac{g^2}{16 \pi^3} \int \frac{d \boldsymbol k}{k [k^2/2 - \thp{P}{k} + k]}.
\end{align}
In order to select bound state energy $E_b = E^{(2)}(0,0)$ and effective mass $m^*$ of a particle we expand the energy in a series over $\boldsymbol P$ up to second order
\begin{align}
E^{(0)}(\boldsymbol P,0) + \Delta E^{(2)}(\boldsymbol P,0) \approx E_b + \frac{P^2}{2 m^*} \equiv - \frac{g^2}{16 \pi^3} \int \frac{d \boldsymbol k}{k [k^2/2 + k]} + \frac{P^2}{2} - \frac{g^2}{16 \pi^3} \int \frac{d \boldsymbol k}{k [k^2/2 + k]^3}(\thp{P}{k})^2.\langlebel{eq:model_description12}
\end{align}
The first integral in equation (\ref{eq:model_description12}) logarithmically diverges, that is the bound state energy depends on the momentum cut-off $K$
\begin{align}
E_b = - \frac{g^2}{2 \pi^2}\ln \left(\frac{K}{2} +1\right),\langlebel{eq:model_description13}
\end{align}
and becomes infinite when $K\rightarrow\infty$, such that the correction to the energy is undefined in the framework of the perturbation theory for our model \cite{Messiah1981}.
At the same time, the corrected mass is well defined and equal to
\begin{align}
\frac{1}{m^*} \simeq 1 - \frac{g^2}{6 \pi^2}; \quad m^* \simeq 1 + \frac{g^2}{6 \pi^2 }. \langlebel{eq:model_description14}
\end{align}
In contrast to this in the polaron problem all integrals are convergent because they contain in the denominator the additional power of $k$. The corresponding quantities for the polaron problem read as
\cite{RevModPhys.63.63,*Mitra198791,PhysRev.97.660,Spohn1987278}
\begin{align}
E_b \simeq - \alpha; \quad m^* \simeq 1 + \frac{\alpha}{6 }.\langlebel{eq:model_description15}
\end{align}
It is important to stress here that in our model, the interaction energy between particle and field is observable and consequently, the infinite energy (\ref{eq:model_description12}) can not be included in the mass renormalization. Thus, we can conclude that the use of perturbation theory for two physically close quantum-field models leads to qualitatively different results. Therefore, a modification of the calculation method of subsequent corrections to the energy for our model is required and appears achievable.
\section{Iteration scheme, basis choice and zeroth-order approximation of the system's energy}
\langlebel{sec:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system}
In order to build an iteration scheme not in the framework of perturbation theory we will employ the operator method (OM) for the solution of the Schr\"odinger equation, which was introduced in reference \cite{Feranchuk1982211} and its detailed explanation is given in the monograph \cite{Feranchuk2015,Feranchuk1995370}. Let us quickly revise here the basics of this method. Suppose, the eigenvalues $E_\mu$ and eigenvectors $|\Psi_{\mu}\rangle$ with a set of quantum numbers $\mu$ of the stationary Schr\"odinger equation need to be found:
\begin{align}
\opa H |\Psi_{\mu}\rangle = E_{\mu}|\Psi_{\mu}\rangle.\langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_1}
\end{align}
In contrast to perturbation theory, where the Hamiltonian $\opa H$ of the system is split into the zeroth-order approximation and perturbation parts, according to OM the total Hamiltonian is taken into account as is, while however, the state vector is probed via an approximate state:
\begin{align*}
|\Psi_{\mu}\rangle \approx |\psi_{\mu}(\omega_{\mu})\rangle,
\end{align*}
which depends on a set of variational parameters $\omega_{\mu}$. Then, the exact solution can be represented as a series
\begin{align}
|\Psi_{\mu}\rangle = |\psi_{\mu}(\omega_{\mu})\rangle + \sum_{\nu \neq \mu} C_{\mu \nu}|\psi_{\nu} (\omega_{\mu} )\rangle. \langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_2}
\end{align}
Here we want to pay attention to the fact that for a given set of quantum numbers $\mu$, the set $\omega_{\mu}$ is fixed. By plugging the expansion (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_2}) into Schr\"odinger's equation (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_1}) and projecting on different states $|\psi_{\mu}(\omega_{\mu})\rangle$ and $|\psi_{\nu}(\omega_{\mu})\rangle$ one obtains the equations for the energies $E_{\mu}$ and coefficients $C_{\mu \nu}$:
\begin{align}
E_{\mu} &= \left[1 + \sum_{\nu \neq \mu} C_{\mu \nu}I_{\mu \nu}\right]^{-1} \left[H_{\mu \mu} + \sum_{\nu \neq \mu} C_{\mu \nu}H_{\mu \nu}\right]; \langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_3}
\\
C_{\mu \gamma} &= \left[E_{\mu} - H_{\gamma \gamma}\right]^{-1}\left[ H_{\gamma \mu} - E_{\mu}I_{\gamma \mu} + \sum_{\nu \neq \mu \neq \gamma}C_{\mu \nu} (H_{\gamma \nu} - E_{\mu}I_{\gamma \nu})\right]; \langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_4}
\\
H_{\mu \nu} &\equiv \langle\psi_{\mu} (\omega_{\mu} )| \opa H | \psi_{\nu} (\omega_{\mu})\rangle; \quad I_{\mu \nu} \equiv \langle\psi_{\mu} (\omega_{\mu} )| \psi_{\nu} (\omega_{\mu})\rangle. \nonumber
\end{align}
It is important to stress here that all matrix elements are calculated with the \emph{full} Hamiltonian of the system and the set of vectors $|\psi_{\mu}(\omega_{\mu})\rangle$ can be normalized, while not necessarily being mutually orthogonal. The system of equations (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_3}), (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_4}) is the exact representation of the Schr\"odinger equation. For the approximate solution of this system, in accordance with OM the following concept is used: the closer the zeroth-order approximation of the state vector is to the exact solution, the closer the matrix $H_{\mu \nu}$ becomes to the diagonal one. Therefore, an iteration scheme for the solution of the system (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_3}), (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_4}) can be built, for which convergence is determined with the ratios of non-diagonal elements $H_{\mu \nu}$ to the diagonal ones $H_{\mu \mu}$ in the representation of the state vectors $|\psi_{\mu}(\omega_{\mu})\rangle$. A sufficiently detailed discussion of the convergence of the iteration scheme for different physical systems is given in the monograph \cite{Feranchuk2015}. Consequently, we find the system of recurrent equations
\begin{align}
E^{(s)}_{\mu} &= \left[1 + \sum_{\nu \neq \mu} C^{(s-1)}_{\mu \nu}I_{\mu \nu}\right]^{-1} \left[H_{\mu \mu} + \sum_{\nu \neq \mu} C^{(s-1)}_{\mu \nu}H_{\mu \nu}\right]; \langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_5}
\\
C^{(s)}_{\mu \gamma} &= \left[E^{(s-1)}_{\mu} - H_{\gamma \gamma}\right]^{-1}\left[ H_{\gamma \mu} - E^{(s-1)}_{\mu}I_{\gamma \mu} + \sum_{\nu \neq \mu \neq \gamma}C^{(s-1)}_{\mu \nu} (H_{\gamma \nu} - E^{(s-1)}_{\mu}I_{\gamma \nu})\right]; \langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_6}
\\
C^{(-1)}_{\mu \nu} &= C^{(0)}_{\mu \nu} = 0; \quad E^{(0)}_{\mu} = H_{\mu \mu}. \langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_7}
\end{align}
As opposed to conventional perturbation theory, where the exact solution is defined as a sum of corrections of all orders, in OM the exact value of the energy of the system is given as a limit of a sequence
\begin{align}
E_{\mu} = \lim_{s \rightarrow \infty} E^{(s)}_{\mu}; \quad s = 0,1,\ldots.\langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_8}
\end{align}
In particular, for the first two iterations one can find
\begin{align}
E^{(1)}_{\mu} &= E^{(0)}_{\mu} = H_{\mu \mu} ; \langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_9}
\\
E^{(2)}_{\mu} &= \left[1 + \sum_{\nu \neq \mu} \frac{(H_{\nu \mu} - E^{(0)}_{\mu}I_{\nu \mu})I_{\mu \nu}}{E^{(0)}_{\mu} - H_{\nu \nu} }\right]^{-1} \left[H_{\mu \mu} + \sum_{\nu \neq \mu} \frac{(H_{\nu \mu} - E^{(0)}_{\mu}I_{\nu \mu})H_{\mu \nu}}{E^{(0)}_{\mu} - H_{\nu \nu}}\right]. \langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_10}
\end{align}
The last equation looks analogously to the second-order correction of perturbation theory, while the main difference is related to the denominators of equation (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_10}), where the matrix element $H_{\nu \nu}$ is calculated with the full Hamiltonian of the system, whereas the perturbation theory relations merely involve the diagonal element of the unperturbed Hamiltonian. As will be shown below, this is exactly the reason, which determines the convergence of integrals over intermediate states.
Before proceeding with the application of the iteration scheme, let us discuss the choice of the parameters $\{\omega_{\mu}\} = \{\omega_{\mu}^1,\ldots,\omega_{\mu}^n,\ldots\}$ in more detail. For this we note that the representation (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_3}-\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_4}) is exactly equivalent to the Scr\"{o}dinger equation (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_1}), provided that the set of states $\{|\psi_{\mu}(\{\omega_{\mu}\})\rangle\}$ is a complete one. It is evident that if the state vectors $\{|\psi_{\mu}(\{\omega_{\mu}\})\rangle\}$ coincide with the exact eigenstates $|\Psi_{\mu}\rangle$ of the full Hamiltonian, the matrix $H_{\mu\nu}$ is a diagonal one, i.e. $H_{\mu\nu} = E_{\mu}\delta_{\mu\nu}$ and the coefficients $C_{\mu\nu} = 0$. The eigenvalues $E_{\mu}$ are determined exactly and are independent of the set of parameters $\{\omega_{\mu}\}$. Therefore, the relation
\begin{align}
\frac{\partial E_{\mu}}{\partial\omega_{\mu}^n} \equiv 0, \quad n=\{1,2,\ldots\} \langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_rev1}
\end{align}
holds identically.
According to our initial assumption we choose the trial parameters $\{\omega_{\mu}\}$ in the basis states $|\psi_{\mu}(\{\omega_{\mu}\})\rangle$ such that they determine the best possible approximation for the exact solution $|\Psi_{\mu}\rangle$ in the chosen class of functions. This is equivalent to the supposition that the off-diagonal elements of the matrix $H_{\mu \nu}$ are small numbers such that the ratios $H_{\mu \nu}/H_{\mu \mu}$ are proportional to some effective small parameter $\epsilon$. Therefore, the zeroth-order approximation of the operator method is chosen as
\begin{align}
E_{\mu}^{(0)}(\{\omega_{\mu}\}) = H_{\mu \mu}(\{\omega_{\mu}\}), \quad C_{\mu \nu}^{(0)} = 0, \quad |\Psi_{\mu}^{(0)}\rangle = |\psi_{\mu}(\{\omega_{\mu}\})\rangle.\langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_rev2}
\end{align}
However, the matrix $H_{\mu \nu}$ contains small off-diagonal elements, which need to be taken into account. Hence, the subsequent approximations read
\begin{align}
E_{\mu} &= H_{\mu \mu}(\{\omega_{\mu}\}) + \sum_{s = 1}^{\infty}\epsilon^s E_{\mu}^{(s)}(\{\omega_{\mu}\}), \langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_rev3}
\\
C_{\mu \nu}(\{\omega_{\mu}\}) &= \sum_{s=1}^{\infty}\epsilon^s C_{\mu \nu}^{(s)}(\{\omega_{\mu}\}),\quad \mu\neq \nu.\langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_rev4}
\end{align}
As the left-hand side of equation (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_rev3}) does not depend on the parameters $\{\omega_{\mu}\}$ it is natural to require that in each order in $\epsilon$ the right-hand side also does not depend on $\{\omega_{\mu}\}$:
\begin{align}
\frac{\partial E_{\mu}}{\partial\omega_{\mu}^n} = 0, \quad s=\{0,1,\ldots\}\langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_rev5}
\end{align}
for each $\omega_{\mu}^n$. In the monograph \cite{Feranchuk2015} it was demonstrated that the recalculation of the parameters $\{\omega_{\mu}\}$ in every order in $\epsilon$ speeds up the convergence of the iteration scheme, however does not change the qualitative behaviour of the energy levels of the system. For this reason, in all calculations below we will fix the parameters $\{\omega_{\mu}\}$ via the zeroth-order approximation:
\begin{align}
\frac{\partial E_{\mu}^{(0)}}{\partial\omega_{\mu}^n} = 0.\langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_rev6}
\end{align}
In what follows we apply the above approach to the description of our model. In accordance with OM we choose a variational state vector, which incorporates the qualitative peculiarities of the system. From a physical point of view a field can be considered as a system of an infinite number of harmonic oscillators. Due to the interaction with a particle the equilibrium positions of these harmonic oscillators are modified. In a representation of creation and annihilation operators the shift of equilibrium positions corresponds to the displacement of a classical component $u_{\boldsymbol k}$ on these operators \cite{Scully1997}, i.e. $\opa a_{\boldsymbol k}^{\dag} \rightarrow \opa a_{\boldsymbol k}^{\dag}+u_{\boldsymbol k}^{*}$ and $\opa a_{\boldsymbol k} \rightarrow \opa a_{\boldsymbol k}+u_{\boldsymbol k}$, such that we choose a basis of field oscillators consisting of coherent states. As a result a so-called localized state of a particle in the field of these classical components arises. This means that during its existence the particle becomes ``dressed'', i.e. somewhat smeared out while still localized. Moreover, this ``dressed'' state should be an eigenstate of the total momentum operator $\opA P$, since $\opA P$ commutes with $\opa H$. Concluding, we formulate the following conditions for the state vectors: i) representation in the basis of coherent states; ii) imposing the localization of the particle state; iii) use of variational state vectors as eigenstates of $\opA P$ (\ref{eq:model_description4}).
In order to incorporate the first two conditions in the state vector, we choose it as the product of the square integrable wave function of a particle, localized near an arbitrary point $\boldsymbol R$ in space, and a coherent state of the field, analogous to the polaron problem \cite{RevModPhys.63.63,*Mitra198791,0022-3719-17-24-012}:
\begin{equation}
\langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_11}
\vert\Psi({\boldsymbol r},{\boldsymbol R})\ranglengle= \phi({\boldsymbol r}-{\boldsymbol R})\exp\left(\sum_{\boldsymbol k}\Bigl(u^*_{\boldsymbol
k}e^{-\ri \thp{k}{R}} \opa a^\dag_{\boldsymbol k}-u_{\boldsymbol
k}e^{\ri \thp{k}{R}} \opa a_{\boldsymbol k}\Bigr)\right)\vert 0\ranglengle.
\end{equation}
In the state (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_11}) the classical component of the field $u_{\boldsymbol k}$ and the wave function $\phi(\boldsymbol r - \boldsymbol R)$ can be considered as the variational parameters $\{\omega_{\mu}\}$ of OM. In accordance with the above described procedure of the choice of the parameters $\{\omega_{\mu}\}$, the functional derivative over these parameters from the functional $\langlengle \Psi({\boldsymbol r},{\boldsymbol R})\vert \opa H \vert\Psi({\boldsymbol r},{\boldsymbol R})\ranglengle$ should be equal to zero. This yields an equation for the classical components of the field $u_{\boldsymbol k}$ and the wave function $\phi(\boldsymbol r - \boldsymbol R)$:
\begin{align}
\langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_12}
\frac{\delta}{\delta u_{\boldsymbol k}} &\left[ \langlengle \Psi({\boldsymbol r},{\boldsymbol R})\vert \opa H \vert\Psi({\boldsymbol r},{\boldsymbol R})\ranglengle\right] = \frac{\delta}{\delta \phi({\boldsymbol r}-{\boldsymbol R})} \left[ \langlengle \Psi({\boldsymbol r},{\boldsymbol R})\vert \opa H \vert\Psi({\boldsymbol r},{\boldsymbol R})\ranglengle\right] = 0.
\end{align}
By calculating the functional with the Hamiltonian (\ref{eq:model_description1}) and corresponding derivatives one obtains the connection between the classical components of the field and the wave function of the particle:
\begin{align}
\langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_13}
u_{\boldsymbol k} &=-\frac{g}{\sqrt{2\Omega\omega^3_{\boldsymbol k}}}\int d \boldsymbol r |\phi ({\boldsymbol r})|^2 e^{-\ri\thp{k}{r}}.
\end{align}
In the general case, the second equation in (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_12}) leads to the integral equation for the function $\phi_{\boldsymbol P}(\boldsymbol r)$. However, according to reference \cite{Feranchuk2015}, the convergence of the iteration scheme of OM does not depend on the particular choice of variational parameters, under the condition that the approximate state vector takes into account qualitative characteristics of the system. Therefore, for the analytical investigation of the energy $E_L^{(0)}(\boldsymbol P,g)$ we replace the exact numerical solution with a trial wave function, which depends on the single parameter $\langlembda$ and is equal to
\begin{align}
\langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_27}
\phi(\boldsymbol r) = \frac{\langlembda^{\frac{3}{2}}}{\pi^{\frac{3}{4}}}e^{-\frac{\langlembda^2 r^2}{2}}.
\end{align}
We notice that in the polaron problem the application of OM with the trial wave function (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_27}) yields an accuracy of the order of $1\%$ in the calculation of the bound state energy and the effective mass \cite{0022-3719-17-24-012}. With this choice of wave function, we proceed to calculate the classical component of the field $u_{\boldsymbol k}$ and the Fourier transform of the wave function (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_27}), which will be required below:
\begin{align}
u_{\boldsymbol k} &= -\frac{g}{\sqrt{2 \Omega}}\frac{1}{\sqrt{k^3}}\int d\boldsymbol r |\phi(\boldsymbol r)|^2 e^{-\ri\thp{k}{r}} = -\frac{g}{\sqrt{2 \Omega}}\frac{e^{-\frac{k^2}{4 \langlembda^2}}}{\sqrt{k^3}}; \langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_28}
\\
\phi_{\boldsymbol k} &= \int d\boldsymbol r \phi(\boldsymbol r)e^{-\ri\thp{k}{r}} = 2\sqrt{2}\frac{\pi^{\frac{3}{4}}}{\langlembda^{\frac{3}{2}}} e^{-\frac{k^2}{2 \langlembda^2}} = \phi_0 e^{-\frac{k^2}{2 \langlembda^2}}. \langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_29}
\end{align}
Furthermore, the states (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_11}) are not the eigenstates of the total momentum operator $\opA P$ of the system, i.e. they are not translationary invariant. Moreover, these states are degenerate, as they do not depend on the localization point $\boldsymbol R$ of the particle in space. The choice of the correct linear combination of these states allows one to build a set of states which are not degenerate and are eigenstates of the total momentum operator $\opA P$:
\begin{align}
|\Psi^{(0)}_{\boldsymbol P_1, n_{\boldsymbol k}}\rangle &= \frac{1}{N_{\boldsymbol P_1,n_{\boldsymbol k}}\sqrt{\Omega}}\int d \boldsymbol R \phi_{\boldsymbol P_1}(\boldsymbol r - \boldsymbol R)\exp\left\{\ri(\boldsymbol P_1 - \boldsymbol k n_{\boldsymbol k})\!\cdot\!ot \boldsymbol R \right\}\exp\left\{\sum_{\boldsymbol q}(u_{\boldsymbol q} e^{- \ri\boldsymbol q \!\cdot\!ot \boldsymbol R}\opa a_{\boldsymbol q}^\dag - u_{\boldsymbol q}^* e^{ \ri\boldsymbol q \!\cdot\!ot \boldsymbol R}\opa a_{\boldsymbol q})\right\}|n_{\boldsymbol k}\rangle,\langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_14}
\\
\opA P |\Psi^{(0)}_{\boldsymbol P_1,n_{\boldsymbol k}}\rangle &= \boldsymbol P_1 |\Psi^{(0)}_{\boldsymbol P_1,n_{\boldsymbol k}}\rangle. \nonumber
\end{align}
Here $\Omega$ is the normalization volume and $\boldsymbol P_1$ the total momentum of the system, $|n_{\boldsymbol k}\rangle$ are Fock field states with occupation number $n_{\boldsymbol k}$, $\phi_{\boldsymbol P_1}(\boldsymbol r - \boldsymbol R)$ is the wave function of the particle localized at point $\boldsymbol r = \boldsymbol R$ and the classical component of the field $u_{\boldsymbol k}$ is defined via equation (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_13}). The normalization constant for the state $|\Psi^{(0)}_{\boldsymbol P_1, 1_{\boldsymbol k}}\rangle$ is defined as
\begin{align}
\langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_15}
|N_{\boldsymbol P_1,1_{\boldsymbol k}}|^2 = \int d \boldsymbol R_1 d \boldsymbol \rho \phi_{\boldsymbol P_1}^*(\boldsymbol \rho)\phi_{\boldsymbol P_1}(\boldsymbol \rho - \boldsymbol R_1) e^{\ri(\boldsymbol P_1 - \boldsymbol k)\!\cdot\!ot \boldsymbol R_1 + \sum_k |u_k|^2(e^{-\ri\thp{k}{R_1}}-1)}\left(2|u_k|^2(\cos\thp{k}{R_1}-1)+1\right).
\end{align}
In addition the set of states (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_14}) forms a complete and an orthonormal basis in Hilbert space. The completeness of these states follows from the fact that they are eigenstates of a Hermitian operator $\opA{P}$, with an explicit proof given in Appendix~A. Concluding, the set of states (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_14}) takes into account physical peculiarities of the system, forms the complete set of states in Hilbert space for arbitrary functions $\phi_{\boldsymbol P}(\boldsymbol r)$ and $u_{\boldsymbol k}$, which are an analog to the parameters $\{\omega_{\mu}\}$, and, therefore, can be usable in the iteration scheme (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_5}-\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_7}).
The zeroth-order approximation for the ground state vector following above procedure then reads as
\begin{align}
\langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_16}
|\Psi^{(L)}_{\boldsymbol P}\rangle &= \frac{1}{N_{\boldsymbol P}\sqrt{\Omega}} \int d{\boldsymbol R}\,\phi_{\boldsymbol P}({\boldsymbol r}-{\boldsymbol R})\exp \left(\ri\thp{P}{R}+ \sum_{\boldsymbol k}\Bigl(u^*_{\boldsymbol k}e^{-\ri \thp{k}{R}} \opa a^\dag_{\boldsymbol k}-u_{\boldsymbol k}e^{\ri \thp{k}{R}} \opa a_{\boldsymbol k}\Bigr)\right)| 0 \rangle,
\end{align}
whereas equations (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_9}), (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_10}) look like
\begin{align}
E^{(2)} &= \frac{E_L^{(0)} + \sum_{\boldsymbol P_1, \{n_k \neq 0\}}C^{(1)}_{\boldsymbol P_1, \{n_{\boldsymbol k}\}}\langle\Psi^{(L)}_{\boldsymbol P}| \opa H |\Psi_{\boldsymbol P_1, \{ n_{\boldsymbol k}\}}\rangle}{1 + \sum_{\boldsymbol P_1, \{n_k\neq 0\}}C^{(1)}_{\boldsymbol P_1, \{n_{\boldsymbol k}\}}\langle\Psi^{(L)}_{\boldsymbol P}| \Psi_{\boldsymbol P_1, \{ n_{\boldsymbol k}\}}\rangle};\langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_17}
\\
C^{(1)}_{\boldsymbol P_1, \{n_{\boldsymbol k}\}} &= \frac{E_L^{(0)} \langle \Psi_{\boldsymbol P_1, \{ n_{\boldsymbol k}\}} |\Psi^{(L)}_{\boldsymbol P}\rangle - \langle \Psi_{\boldsymbol P_1, \{ n_{\boldsymbol k}\}}| \opa H |\Psi^{(L)}_{\boldsymbol P}\rangle }{H_{\boldsymbol P_1, \{n_{\boldsymbol k}\}; \boldsymbol P_1, \{n_{\boldsymbol k}\}} - E_L^{(0)}}; \langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_18}
\\
H_{\boldsymbol P_1, \{n_{\boldsymbol k}\}; \boldsymbol P_2, \{n_{1\boldsymbol k}\}} &= \langle \Psi_{\boldsymbol P_1, \{ n_{\boldsymbol k}\}}| \opa H |\Psi_{\boldsymbol P_2, \{ n_{1\boldsymbol k}\}}\rangle, \quad E_L^{(0)} = \langle \Psi^{(L)}_{\boldsymbol P}| \opa H |\Psi^{(L)}_{\boldsymbol P}\rangle. \nonumber
\end{align}
We want to emphasize once more that all matrix elements are calculated with the full Hamiltonian of the system
\begin{align}
\langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_19}
\opa H = \frac{1}{2}\left( \boldsymbol{P}^2 - 2 \sum_{\boldsymbol k} \opa a_{\boldsymbol k}^\dag \opa a_{\boldsymbol k} \boldsymbol k \!\cdot\!ot \boldsymbol{P} +\left(\sum_{\boldsymbol k} \opa a_{\boldsymbol k}^\dag \opa a_{\boldsymbol k} \boldsymbol k\right)^2\right)+\sum_{\boldsymbol k}\omega_{\boldsymbol k} \opa a^\dag_{\boldsymbol k}\opa a_{\boldsymbol k}+ \frac{g}{\sqrt{\Omega}}\sum_{\boldsymbol k}\frac{1}{\sqrt{2\omega_{\boldsymbol k}}} \left(e^{\ri\boldsymbol k\boldsymbol r}\opa a_{\boldsymbol k}+e^{-\ri\boldsymbol k\boldsymbol r}\opa a^\dag_{\boldsymbol k}\right).
\end{align}
Let us calculate the ground state energy $E^{(0)}_L$ of the system in this basis. The details of the calculations can be found in appendix C. The ground state energy reads accordingly
\begin{align}
&E_{L}^{(0)}(\boldsymbol P,g)= \frac{P^2}{2} - \thp{P}{Q} + G + E_{\text{f}}(\boldsymbol P) + E_{\text{int}}(\boldsymbol P), \langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_20}
\end{align}
with
\begin{align*}
\boldsymbol Q &= \frac{1}{|N_{\boldsymbol P}|^2} \sum_{\boldsymbol k}\boldsymbol k |u_{\boldsymbol k}|^2\int d{\boldsymbol R} d{\boldsymbol r}\,\phi_{\boldsymbol P}^*({\boldsymbol r})\phi_{\boldsymbol P}({\boldsymbol r - \boldsymbol R}) e^{\Phi (\boldsymbol R) + \ri (\boldsymbol P - \boldsymbol k)\!\cdot\!ot \boldsymbol R};
\\
G &=\frac{1}{2} \frac{1}{|N_{\boldsymbol P}|^2}\sum_{\boldsymbol m,\boldsymbol l}\thp{m}{l}|u_{\boldsymbol m}|^2 |u_{\boldsymbol l}|^2\int d\boldsymbol r d\boldsymbol R \phi^*(\boldsymbol r)\phi(\boldsymbol r - \boldsymbol R)e^{\ri\thp{P}{R}+\Phi(\boldsymbol R)-\ri(\boldsymbol m + \boldsymbol l)\!\cdot\!ot R};
\\
E_{\text{f}}(\boldsymbol P) &= \frac{1}{|N_{\boldsymbol P}|^2} \sum_{\boldsymbol k}\left(k + \frac{k^2}{2}\right)|u_{\boldsymbol k}|^2\int d{\boldsymbol R} d{\boldsymbol r}\,\phi_{\boldsymbol P}^*({\boldsymbol r})\phi_{\boldsymbol P}({\boldsymbol r-\boldsymbol R}) e^{\Phi (\boldsymbol R) + \ri (\boldsymbol P - \boldsymbol k)\!\cdot\!ot \boldsymbol R};
\\
E_{\text{int}}(\boldsymbol P) &= \frac{g}{|N_{\boldsymbol P}|^2 } \sum_{\boldsymbol k}\frac{u_{\boldsymbol k}}{\sqrt{2 k \Omega}} \int d{\boldsymbol R} d{\boldsymbol r}\left(\phi_{\boldsymbol P}^*({\boldsymbol r}+\boldsymbol R)\phi_{\boldsymbol P}({\boldsymbol r})+\phi_{\boldsymbol P}^*({\boldsymbol r})\phi_{\boldsymbol P}({\boldsymbol r}-{\boldsymbol R})\right)e^{\Phi (\boldsymbol R) + \ri (\thp{P}{R} + \thp{k}{r})};
\\
\Phi (\boldsymbol R)&= \sum_{\boldsymbol k}|u_{\boldsymbol k}|^2(e^{-\ri\thp{k}{R}}-1);
\\
|N_{\boldsymbol P}|^2 &= \int d{\boldsymbol R} d{\boldsymbol r}\,\phi_{\boldsymbol P}^*({\boldsymbol r})\phi_{\boldsymbol P}({\boldsymbol r}-{\boldsymbol R}) e^{\Phi(\boldsymbol R) + \ri \thp{P}{R}}.
\end{align*}
Actually, the iteration scheme (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_5}), (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_6}), (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_7}) can be used for arbitrary coupling constants \cite{Feranchuk2015}. However, as was described above, in the framework of our model we are interested in the behavior of the ground state energy $E_L^{(0)}$ in the weak coupling limit. In this limit we can neglect the function
\begin{align*}
\Phi(\boldsymbol R) = \sum_{\boldsymbol m}|u_{\boldsymbol m}|^2 \left(e^{-\ri\thp{m}{R}}-1\right)\sim g^2,
\end{align*}
in the exponent of all integrals in equation (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_20}) as $g\ll1$.
First of all, we investigate the situation of a particle at rest, i.e. $\boldsymbol P = 0$. In this case for the weak coupling limit the integrals in equation (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_20}) can be expressed through the Fourier transforms of the wave function $\phi(\boldsymbol r)$:
\begin{align}
\int d\boldsymbol R_1 d \boldsymbol \rho \phi^*(\boldsymbol \rho)\phi(\boldsymbol \rho - \boldsymbol R_1) e^{-\ri\thp{k}{R_1}} =\int d \boldsymbol \rho \phi^*(\boldsymbol \rho)e^{-\ri\thp{k}{\rho}}\int d \boldsymbol R \phi(\boldsymbol R)e^{\ri\thp{k}{R}} = \phi^*_{\boldsymbol k}\phi_{-\boldsymbol k} &= \phi_{\boldsymbol k}^2, \langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_30}
\\
\int d\boldsymbol R_1 d \boldsymbol \rho \phi^*(\boldsymbol \rho)\phi(\boldsymbol \rho - \boldsymbol R_1) = |\phi_0|^2 &= \phi_0^2, \langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_31}
\\
\int d \boldsymbol R_1 d \boldsymbol \rho \phi^*(\boldsymbol \rho)\phi(\boldsymbol \rho - \boldsymbol R_1) e^{-\ri\thp{k}{\rho}} = \phi^*_{\boldsymbol k}\phi_0 &= \phi_{\boldsymbol k}\phi_0. \langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_32}
\end{align}
With the use of equations (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_20}), (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_30}-\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_32}) we can rewrite the energy of the ground state in a form
\begin{align}
E^{(0)}_L(0, g) = \frac{1}{2}\sum_{\boldsymbol m,\boldsymbol l}\thp{m}{l}|u_{\boldsymbol m}|^2 |u_{\boldsymbol l}|^2\,\frac{\phi_{\boldsymbol l+ \boldsymbol m}^2}{\phi_0^2}+\sum_{\boldsymbol k}\left(k + \frac{k^2}{2}\right)|u_{\boldsymbol k}|^2 \frac{\phi_{\boldsymbol k}^2}{\phi_0^2} + \frac{2g}{\sqrt{2 \Omega}}\sum_{\boldsymbol k}\frac{u_{\boldsymbol k}}{\sqrt{k}}\frac{\phi_{\boldsymbol k}}{\phi_0}, \langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_33}
\end{align}
which up to fourth order in $g$ yields
\begin{align}
E^{(0)}_L(0, g) = \frac{g^2}{24\pi^2}\left(\langlembda(-4+\sqrt{2})\sqrt{3\pi}+\langlembda^2\right) + O (g^4).\langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_34}
\end{align}
By minimizing the energy with respect to $\langlembda$ one finds
\begin{align}
E^{(0)}_L(0, g) = - g^2\frac{(-4+\sqrt{2})^2}{32\pi}; \quad \langlembda = \frac{\sqrt{3\pi}}{2}(4 - \sqrt{2}).\langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_35}
\end{align}
In the weak-coupling limit it is also possible to obtain a renormalization for the mass of the particle. This is accomplished by expanding the energy (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_20}) in a series over $\boldsymbol P$ up to second order. The details of the calculation can be found in appendix D. The result reads
\begin{align}
E^{(0)}_L(P, g)\approx E^{(0)}_L(0, g) + \frac{P^2}{2}\left[1-\frac{g^2}{9\pi^2}\frac{17-\sqrt{2}}{21}\right]; \quad m^{(0)*} = 1+\frac{g^2}{9\pi^2}\frac{17-\sqrt{2}}{21}. \langlebel{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_36}
\end{align}
From this equation we can conclude that the factor, which determines the corrected mass is half the one via the leading second-order term from perturbation theory, see e.g. equation (\ref{eq:model_description14}).
\section{Second order iteration for the energy and convergence}
\langlebel{sec:second_order_iteration_for_the_energy_convergence}
In the previous section we have found the energy of the ground state and the renormalized mass, which are proportional to the square of the coupling constant in zeroth-order approximation. However, the correction to the energy coming from single-phonon intermediate transitions is of the same order with respect to the coupling constant. Consequently, its contribution should also be taken into account, thus requiring the calculation of the energy of the system in the second iteration (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_17}).
In order to calculate the second order iteration for the energy we notice (appendix E) that the matrix elements $\langle \Psi_{\boldsymbol P_1, \{ n_{\boldsymbol k}\}} |\Psi^{(L)}_{\boldsymbol P}\rangle$ and $\langle \Psi_{\boldsymbol P_1, \{ n_{\boldsymbol k}\}}| \opa H |\Psi^{(L)}_{\boldsymbol P}\rangle$, which are found in equations (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_17}), (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_18}) are proportional to the delta function of the total momentum of the system $\delta(\boldsymbol P_1 - \boldsymbol P)$. Therefore, during the evaluation of the sum over $\boldsymbol P_1$ in equation (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_17}) for the energy we have used the usual procedure \cite{LandauQED}: one of the delta functions in its square was replaced through the normalization volume $\Omega$, and the integration over the remaining one yields $\boldsymbol P = \boldsymbol P_1$, thus expressing the conservation of momentum.
Firstly, we consider the case, when a particle is at rest, i. e. $\boldsymbol P = 0$. The results, which are expressed through the Fourier components of the particle wave function in the weak coupling limit read:
\begin{align}
E^{(2)}(0,g) = \frac{A}{B},\langlebel{eq:second_order_iteration_for_the_energy_convergence_1}
\end{align}
where
\begin{align}
A = E_L^{(0)}&+\sum_{\boldsymbol k}\frac{1}{\phi_{\boldsymbol k}^2 \phi_0^2}\left[-u_{\boldsymbol k} \phi_{\boldsymbol k}^2\left(\frac{k^2}{2}+k\right)-\frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol k} \phi_0}{\sqrt{k}}-u_{\boldsymbol k} \phi_{\boldsymbol k}^2\bigg(g^2 I_{\boldsymbol k}+g^4 J_{\boldsymbol k}-E_L^{(0)}\bigg) \right] \nonumber
\\
&\mspace{40mu}\times\left[u_{\boldsymbol k} \phi_{\boldsymbol k}^2\left(\frac{k^2}{2}+k\right)+\frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol k} \phi_0}{\sqrt{k}}+u_{\boldsymbol k} \phi_{\boldsymbol k}^2\bigg(g^2 I_{\boldsymbol k}+g^4 J_{\boldsymbol k}\bigg) - E_L^{(0)} u_{\boldsymbol k} \phi_0^2\right] \nonumber
\\
&\mspace{40mu}\times \left[\left(\frac{k^2}{2}+k\right)+g^2 I_{\boldsymbol k}+g^4 J_{\boldsymbol k}-E_L^{(0)}\right]^{(-1)}, \langlebel{eq:second_order_iteration_for_the_energy_convergence_2}
\end{align}
and
\begin{align}
B = 1 + \sum_{\boldsymbol k}\frac{1}{\phi_{\boldsymbol k}^2 \phi_0^2}\frac{\left[-u_{\boldsymbol k} \phi_{\boldsymbol k}^2\left(\frac{k^2}{2}+k\right)-\frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol k} \phi_0}{\sqrt{k}}-u_{\boldsymbol k} \phi_{\boldsymbol k}^2\bigg(g^2 I_{\boldsymbol k}+g^4 J_{\boldsymbol k}-E_L^{(0)}\bigg) \right]u_{\boldsymbol k}(\phi_{\boldsymbol k}^2-\phi_{0}^2)}{\left(\frac{k^2}{2}+k\right)+g^2 I_{\boldsymbol k}+g^4 J_{\boldsymbol k}-E_L^{(0)}}.\langlebel{eq:second_order_iteration_for_the_energy_convergence_3}
\end{align}
In equations (\ref{eq:second_order_iteration_for_the_energy_convergence_2}-
\ref{eq:second_order_iteration_for_the_energy_convergence_3}) we have introduced the following notations
\begin{align}
\sum_{\boldsymbol m}\boldsymbol m |u_{\boldsymbol m}|^2 \phi_{\boldsymbol m+\boldsymbol k}^2 &\equiv g^2 \phi_{\boldsymbol k}^2 \boldsymbol I^{(1)}_{\boldsymbol k}; \quad
\boldsymbol I^{(1)}_{\boldsymbol k} =
\frac{\boldsymbol k}{k^2}\frac{\langlembda^2}{32\pi^2} \frac{4k-e^{\frac{2}{3}\frac{k^2}{\langlembda^2}}\sqrt{6\pi}\langlembda \text{Erf}\frac{\sqrt{\frac{2}{3}}k}{\langlembda}}{k}; \langlebel{eq:second_order_iteration_for_the_energy_convergence_4}
\\
\sum_{\boldsymbol m}\left(\frac{m^2}{2}+m\right)|u_{\boldsymbol m}|^2 \phi_{\boldsymbol m+\boldsymbol k}^2 &\equiv g^2 \phi_{\boldsymbol k}^2 I^{(2)}_{\boldsymbol k}; \quad I^{(2)}_{\boldsymbol k} = \frac{ \langlembda^2}{96 \pi^2} \frac{\sqrt{6\pi}\langlembda e^{\frac{2}{3}\frac{k^2}{\langlembda^2}}\text{Erf}\frac{\sqrt{\frac{2}{3}}k}{\langlembda} + 6 \pi \text{Erfi}\frac{\sqrt{\frac{2}{3}}k}{\langlembda}}{k}; \langlebel{eq:second_order_iteration_for_the_energy_convergence_5}
\\
\frac{g \phi_{\boldsymbol k}}{\sqrt{2 \Omega}}\sum_{\boldsymbol m}\frac{u_{\boldsymbol m}}{\sqrt{m}}(\phi_{\boldsymbol m+\boldsymbol k}+\phi_{\boldsymbol m - \boldsymbol k}) &\equiv g^2 \phi_{\boldsymbol k}^2 I^{(3)}_{\boldsymbol k}; \quad I^{(3)}_{\boldsymbol k} = - \frac{ \langlembda^2}{4\pi}\frac{\text{Erfi}\frac{k}{\sqrt{3}\langlembda}}{k}; \langlebel{eq:second_order_iteration_for_the_energy_convergence_6}
\\
I_{\boldsymbol k} &= \boldsymbol k \!\cdot\!ot \boldsymbol I_{\boldsymbol k}^{(1)}+I_{\boldsymbol k}^{(2)}+I_{\boldsymbol k}^{(3)}; \langlebel{eq:second_order_iteration_for_the_energy_convergence_7}
\\
\frac{1}{2}\sum_{\boldsymbol l,\boldsymbol m}\thp{l}{m}|u_{\boldsymbol l}|^2|u_{\boldsymbol m}|^2 \phi_{\boldsymbol l+\boldsymbol m+\boldsymbol k}^2 &\equiv g^4 \phi_{\boldsymbol k}^2 J_{\boldsymbol k}; \quad J_{\boldsymbol k} \approx \frac{5^{1/2}\langlembda^2}{4(2\pi)^3 3^5}e^{\frac{4}{5}\frac{k^2}{\langlembda^2}}\frac{\frac{2}{15}\frac{k^2}{\langlembda^2} - 1}{(1+\frac{4}{45}\frac{k^2}{\langlembda^2})^3}, \langlebel{eq:second_order_iteration_for_the_energy_convergence_8}
\end{align}
where $\text{Erf}(x) = 2/\sqrt{\pi}\int_0^x e^{-z^2}dz$ and $\text{Erfi}(x) = -\ri\text{Erf}(\ri x)$ are the error function and the imaginary error functions, respectively. When we calculated the energy of the ground state, we dropped all terms with power in $g$ larger than $g^2$. Consequently, we can neglect the term $g^4 J_{\boldsymbol k}$ in comparison with $g^2 I_{k}$, which can be confirmed by the direct numerical calculation of the integral.
Prior to the numerical evaluation of the integrals (\ref{eq:second_order_iteration_for_the_energy_convergence_2}) and (\ref{eq:second_order_iteration_for_the_energy_convergence_3}), let us understand their structure through the approximate analytical calculation. We investigate the behavior of the numerator and denominator of the quantities $A$ and $B$. We start from breaking the integration region into two parts, namely $[0,k_0]$ and $[k_0,\infty)$. The value $k_0$ will be fixed below. Let us work out the behavior of the quantity $I_{\boldsymbol k}$ for small and large values of $k$. First of all we notice that $g^2 I_{0}$ gives exactly the ground state energy $E_L^{(0)}$. For small values of $k$, with the increase of $k$ the value of $g^2 I_{\boldsymbol k}\sim -g^2 k^2/(18\pi^2) +E_L^{(0)}$, i.e, it grows quadratically in absolute value, while being negative. Therefore, due to the presence of $g^2$, this term is small in comparison with $k^2/2+k$ for small values of $k$, so that, in the denominator of quantity $A$, the leading term is $k^2/2+k$. For large values of $k$, the quantity $g^2 I_{\boldsymbol k}$ exponentially grows as $I_{\boldsymbol k}\sim e^{\frac{2}{3}\frac{k^2}{\langlembda^2}}/k$ and becomes the leading contribution in comparison with $k^2/2+k$, despite the higher power of $g$.
In analogy, we can analyze the numerator of the quantity $A$. For small values of $k$ we can neglect in every square bracket in equation (\ref{eq:second_order_iteration_for_the_energy_convergence_2}) the large powers of $g$, i.e. terms with exponents larger than $1$. Consequently, for small values of $k$, the integrand within $A$ looks like
\begin{align}
\langlebel{eq:second_order_iteration_for_the_energy_convergence_9}
-\frac{\left[u_{\boldsymbol k} \phi_{\boldsymbol k}^2\left(\frac{k^2}{2}+k\right)+\frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol k} \phi_0}{\sqrt{k}}\right]^2}{\phi_{\boldsymbol k}^2 \phi_0^2 \left(\frac{k^2}{2}+k\right)}.
\end{align}
For large values of $k$, the numerator is exponentially decreasing, with the leading term being $(-\frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol k} \phi_0}{\sqrt{k}})(-E_L^{(0)} u_{\boldsymbol k})$. This follows from the fact that $u_{\boldsymbol k}\sim e^{-\frac{k^2}{4 \langlembda^2}}$ and $\phi_{\boldsymbol k}\sim e^{-\frac{k^2}{2 \langlembda^2}}$. Consequently, the integrand for large values of $k$ can be presented as
\begin{align}
\langlebel{eq:second_order_iteration_for_the_energy_convergence_10}
\frac{(-\frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol k} \phi_0}{\sqrt{k}})(-E_L^{(0)} u_{\boldsymbol k})}{g^2\phi_{\boldsymbol k}^2 I_{\boldsymbol k}}.
\end{align}
Combining all together, we find that the quantity $A$ can be approximately calculated as
\begin{align}
A \approx E_L^{(0)} + \sum_{\boldsymbol k<\boldsymbol k_0}\frac{-\left(u_{\boldsymbol k} \frac{\phi_{\boldsymbol k}}{\phi_0}\left(\frac{k^2}{2}+k\right)+\frac{g}{\sqrt{2 \Omega}}\frac{1}{\sqrt{k}}\right)^2}{(\frac{k^2}{2}+k)}+\sum_{\boldsymbol k>\boldsymbol k_0}\frac{(-\frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol k} \phi_0}{\sqrt{k}})(-E_L^{(0)} u_{\boldsymbol k})}{\phi_{\boldsymbol k}^2 g^2 I_{\boldsymbol k}}. \langlebel{eq:second_order_iteration_for_the_energy_convergence_11}
\end{align}
In this expression, both sums are well defined and remain finite. The sum over the region $k>k_0$ is finite and convergent, while the ratio of numerator and denominator in the integrand is exponentially decreasing as $e^{-\frac{5k^2}{12 \langlembda^2}}$.
In equation (\ref{eq:second_order_iteration_for_the_energy_convergence_11}) the point $k_0$ is determined as a solution of the equation
\begin{align}
\frac{k^2}{2} + k + g^2 I_{\boldsymbol k} - E_L^{(0)} = 0, \langlebel{eq:second_order_iteration_for_the_energy_convergence_12}
\end{align}
or employing the asymptotic behavior for the function $I_{\boldsymbol k}$ (appendix F)
\begin{align}
\frac{k_0^2}{2} + k_0 = g^2\frac{\langlembda^3 \sqrt{6\pi}}{48\pi^2}\frac{e^{\frac{2}{3}\frac{k_0^2}{\langlembda^2}}}{k_0},
\langlebel{eq:second_order_iteration_for_the_energy_convergence_13}
\end{align}
and by finding the following logarithm
\begin{align}
\ln\frac{(\frac{k_0^2}{2}+k_0)k_0}{a} &= -2|\ln g|+\frac{2}{3}\frac{k_0^2}{\langlembda^2},\langlebel{eq:second_order_iteration_for_the_energy_convergence_14}
\end{align}
with
\begin{align*}
a &= \frac{\langlembda^3 \sqrt{6\pi}}{48\pi^2}.
\end{align*}
In the limit of extremely small $g$, we can build the solution of equation (\ref{eq:second_order_iteration_for_the_energy_convergence_14}) via iterations, thus yielding
\begin{align}
k_0 \sim \langlembda\sqrt{3|\ln g|}.\langlebel{eq:second_order_iteration_for_the_energy_convergence_15}
\end{align}
The estimation of quantity $B$ can be performed in a similar fashion and one finds
\begin{align}
B \approx 1+ \sum_{\boldsymbol k<\boldsymbol k_0}\frac{-\left(u_{\boldsymbol k} \frac{\phi_{\boldsymbol k}^2}{\phi_0^2}\left(\frac{k^2}{2}+k\right)+\frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol k}}{\phi_0\sqrt{k}}\right)u_{\boldsymbol k}(\phi_{\boldsymbol k}^2-\phi_0^2)}{\phi_{\boldsymbol k}^2(\frac{k^2}{2}+k)}+ \sum_{\boldsymbol k>\boldsymbol k_0}\frac{(-\frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol k} \phi_0}{\sqrt{k}})u_{\boldsymbol k}(\frac{\phi_{\boldsymbol k}^2}{\phi_0^2}-1)}{\phi_{\boldsymbol k}^2 g^2 I_{\boldsymbol k}}.\langlebel{eq:second_order_iteration_for_the_energy_convergence_16}
\end{align}
At first sight, it may appear that the quantity $B$ features an infrared divergence, because the term $\frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol k} u_{\boldsymbol k}}{\phi_0\sqrt{k}}/(\phi_{\boldsymbol k}^2(\frac{k^2}{2}+k)) \sim 1/k^3$ as $k\rightarrow 0$. However, this additional power of $k$ in the denominator is cancelled through the difference $\phi_{\boldsymbol k}^2 - \phi_0^2 \sim k^2/\langlembda^2$. The convergence at infinity is manifested with the exponential decrease of the integrand $\sim e^{-\frac{5k^2}{12 \langlembda^2}}$.
By plugging into equations (\ref{eq:second_order_iteration_for_the_energy_convergence_11}) and (\ref{eq:second_order_iteration_for_the_energy_convergence_16}) the values of $\phi_{\boldsymbol k}$ and $u_{\boldsymbol k}$, which are defined in equations (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_28}-
\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_29}) and calculating the integrals (appendix F) we find the approximate analytical formula for the second iteration for the ground state energy
\begin{align}
A &\approx E_L^{(0)}-\left[\frac{g^2\langlembda}{24 \pi ^2}\left(\sqrt{6 \pi } \text{Erf}\left(\frac{\sqrt{\frac{3}{2}} k_0}{\langlembda }\right)+\langlembda -\langlembda e^{-\frac{3k_0^2}{2 \langlembda ^2}}\right)-\frac{g^2\langlembda}{2 \sqrt{3}\pi^{3/2}}\text{Erf}\left(\frac{\sqrt{3}k_0}{2\langlembda }\right)\right]-\frac{g^2}{2\pi^2}\ln\left(\frac{k_0}{2}+1\right)+E_L^{(0)}\frac{12\sqrt{6\pi}}{5 \langlembda \pi}e^{-\frac{5k_0^2}{12 \langlembda^2}}, \langlebel{eq:second_order_iteration_for_the_energy_convergence_17}
\\
B &\approx 1+\frac{g^2}{12\pi^2}(1-e^{-\frac{3}{2}\frac{k_0^2}{\langlembda^2}})-g^2f\left(\frac{k_0}{\langlembda}\right) - \frac{144\sqrt{6\pi}}{25 \langlembda \pi}\left(1+\frac{5}{12}\frac{k_0^2}{\langlembda^2}\right)e^{-\frac{5k_0^2}{12 \langlembda^2}}, \langlebel{eq:second_order_iteration_for_the_energy_convergence_18}
\\
f(x) &= \frac{1}{4\pi^2}\int_0^x \frac{tdt}{1+t/2}e^{-\frac{3}{4}t^2}.\nonumber
\end{align}
Within the accuracy of the approximate formulas, we can set $B\approx1$. Therefore, one finally obtains
\begin{align}
E^{(2)}(0,g) \approx A. \langlebel{eq:second_order_iteration_for_the_energy_convergence_19}
\end{align}
The use of our simple analytical expressions allows to establish the behavior of the energy as a function of the coupling constant and consequently to determine the character of the singularity. In order to select the singularity, we investigate the limit
\begin{align*}
\lim_{g\rightarrow0}E^{(2)}(0,g).
\end{align*}
In this limit, the value of $k_0$ logarithmically grows. Consequently, we can approximately set $k_0 \rightarrow \infty$ both in the expression in square brackets and in the last term of equation (\ref{eq:second_order_iteration_for_the_energy_convergence_17}). This way, the square bracket becomes equal to the energy of the ground state (appendix F) and cancels $E_L^{(0)}$. The last term also does not contribute to the energy as being exponentially small. Consequently, the only term remains, which exactly determines the character of the singularity and is equal to
\begin{align}
E^{(2)}(0,g) &\underset{g\rightarrow0}{\longrightarrow}-\frac{g^2}{2\pi^2}\ln\left(\frac{k_0}{2}+1\right);
\langlebel{eq:second_order_iteration_for_the_energy_convergence_20}
\\
k_0 &\approx \langlembda\sqrt{3|\ln g|}. \langlebel{eq:second_order_iteration_for_the_energy_convergence_21}
\end{align}
We observe that this term exactly coincides with the result via perturbation theory, i.e. equation (\ref{eq:model_description13}), however, here with a well specified ``cut-off''. Moreover, the most contributions to the integral in the energy arise from the region $k<k_0$ and this is exactly the reason for the natural ``cut-off'', which is determined self consistently and is directly related to the only parameter of the Hamiltonian, namely the coupling constant. Let us mention here that the corrections to the energy of the system (\ref{eq:second_order_iteration_for_the_energy_convergence_20}) arise in the subsequent iteration and are related to the transitions into intermediate states with two phonons. These contributions are proportional to $g^4$.
In addition we note here that the absence of the ultraviolet divergence in the energy of the ground state in equations (\ref{eq:second_order_iteration_for_the_energy_convergence_1}, \ref{eq:second_order_iteration_for_the_energy_convergence_2}, \ref{eq:second_order_iteration_for_the_energy_convergence_3}) is due to the fact that as in the zeroth-order approximation and in the second-order iteration in the resolvent $[E^{(0)}_{\mu} - H_{\nu \nu}]^{-1}$ of equation (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_10}) the dressed wave functions (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_14}) were used. This leads to the effective momentum cut-off $k_0(g)$, which is determined as the solution of equation (\ref{eq:second_order_iteration_for_the_energy_convergence_12}). This cut-off is a function of the coupling constant and is not a phenomenological parameter, which needs to be introduced for the removal of the ultraviolet divergence. Moreover, as follows from equation (\ref{eq:second_order_iteration_for_the_energy_convergence_20}), the energy of the ground state has a logarithmic singularity as $g\rightarrow 0$. It is clearly seen that this dependence can not be sorted out in the framework of perturbation theory, which yields a power series over the coupling constant $g$.
\begin{figure}
\caption{(Color online) (a)) The dependence on the coupling constant of the ratio of the exact numerical evaluation to the approximate analytical formula of the second iteration for the energy. The value $k_0$ in the analytical approximation is equal to $k_0 = \langlembda\sqrt{3|\ln g|}
\end{figure}
In order to ensure that our interpretation is correct, we have evaluated the integrals numerically and have found in the limit of extremely small $g$ the ratio of the results via exact numerical versus analytical evaluations. This ratio is almost constant and is approximately equal to one, as presented in Figure~\ref{fig1}. Therefore, we can conclude that the main reason why conventional perturbation theory fails is related to the fact that the energy of the system is a non-analytical function of the coupling constant and consequently can not be expanded in a series over $g$ near a singular point.
The second interesting consequence of the numerical evaluation of the integral is related to the fact that the energy of the system contains a small imaginary part, which means that the state has a finite lifetime and is quasi-stationary. To prove this, we have calculated the transition probability to the state $|\Psi_{\boldsymbol P_1,1_{\boldsymbol k}}\rangle$ for the case when a particle is at rest, i.e.
\begin{align}
\frac{w_{0\rightarrow1}}{2} &= \pi \int |\langle \Psi_{\boldsymbol P_1, \{ n_{\boldsymbol k}\}}| \opa H |\Psi^{(L)}_{\boldsymbol P}\rangle|^2 \delta\left(H_{\boldsymbol P_1, 1_{\boldsymbol k}; \boldsymbol P_1, 1_{\boldsymbol k}}-E_L^{(0)}\right)\frac{\Omega d\boldsymbol k}{(2\pi)^3} \nonumber
\\
&=\frac{\Omega}{2\pi}\frac{k^2}{|k+1+g^2 I^\prime_{\boldsymbol k}|}\frac{\left[u_{\boldsymbol k} \phi_{\boldsymbol k}^2\left(\frac{k^2}{2}+k\right)+\frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol k} \phi_0}{\sqrt{k}}+u_{\boldsymbol k} \phi_{\boldsymbol k}^2 g^2 I_{\boldsymbol k} - E_L^{(0)} u_{\boldsymbol k} \phi_0^2\right]^2}{\phi_0^2 \phi_{\boldsymbol k}^2}\Bigg|_{\frac{k^2}{2}+k+g^2 I_{\boldsymbol k} - E_L^{(0)}=0}. \langlebel{eq:second_order_iteration_for_the_energy_convergence_22}
\end{align}
The result of evaluation is presented in Figure~\ref{fig1}. As can be seen from the figure the two curves coincide exactly. This can be interpreted via the diagram technique \cite{LandauQED}. The second order iteration for the energy of the particle can be presented via the diagram depicted in Figure~\ref{fig2}. If the diagram is split by the dashed line, the imaginary part will correspond to the transition probability to the state $|\Psi_{\boldsymbol P_1,1_{\boldsymbol k}}\rangle$.
\begin{figure}
\caption{(Color online) Feynman diagram of the process. If the diagram is split by the dashed line, the imaginary part will correspond to the transition probability to the state $|\Psi_{\boldsymbol P_1,1_{\boldsymbol k}
\end{figure}
Here, we need to stress that contrary to standard perturbation theory in our formulation the conservation of energy is governed not by the free Hamiltonian $\opa H_0$, but through the expectation value of the total Hamiltonian $H_{\boldsymbol P_1 1_{\boldsymbol k};\boldsymbol P_1 1_{\boldsymbol k}}$. Therefore, for certain values of $k$ and for certain coupling constants $g$ this energy level might appear to be below the energy $E_L^{(0)}$, featuring a so called quasi-intersection of energy levels. If the transition probability to the state $|\Psi_{\boldsymbol P_1,1_{\boldsymbol k}}\rangle$ were large, the description of the system with the state vectors (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_14}) would not be applicable and the reconstruction of the states would need to be performed, which takes into account the degeneracy between the energies $E_L^{(0)}$ and $H_{\boldsymbol P_1 1_{\boldsymbol k};\boldsymbol P_1 1_{\boldsymbol k}}$. In our case, however, the transition probability is small and consequently the description with a complex energy, with small imaginary part, is valid, in analogy to the theory of a natural line width of the atomic states or anharmonic oscillator $p^2/2+x^2/2-\mu x^4$, with $\mu>0$.
In order to conclude our formulation we have calculated the renormalized mass in the second iteration. In terms of the introduced abbreviations the second order iteration for the particle energy can be written as
\begin{align}
E^{(2)}(\boldsymbol P,g) = \frac{\frac{P^2}{2} + \tilde E_{L}^{(0)}(\boldsymbol P,g) + A_{\boldsymbol P}}{B_{\boldsymbol P}}, \langlebel{eq:second_order_iteration_for_the_energy_convergence_24}
\end{align}
with
\begin{align}
\tilde E_L^{(0)}(\boldsymbol P,g) &= - \boldsymbol P\!\cdot\!ot\sum_{\boldsymbol m}\boldsymbol m |u_{\boldsymbol m}|^2 \frac{\phi_{\boldsymbol P - \boldsymbol m}^2}{\phi_{\boldsymbol P}^2} + \sum_{\boldsymbol m}\left(\frac{m^2}{2} + m\right)|u_{\boldsymbol m}|^2 \frac{\phi_{\boldsymbol P - \boldsymbol m}^2}{\phi_{\boldsymbol P}^2} + \frac{2g}{\sqrt{2 \Omega}}\sum_{\boldsymbol m}\frac{u_{\boldsymbol m}}{\sqrt{m}}\frac{\phi_{\boldsymbol P - \boldsymbol m}}{\phi_{\boldsymbol P}}.\langlebel{eq:second_order_iteration_for_the_energy_convergence_25}
\end{align}
The quantity $A_{\boldsymbol P}$ reads as
\begin{align}
A_{\boldsymbol P} &= \sum_{\boldsymbol k}\Bigg[\frac{P^2}{2}u_{\boldsymbol k}\left(\phi_{\boldsymbol P - \boldsymbol k}^2 - \phi_{\boldsymbol P}^2\right)+\left(\frac{k^2}{2}+k-\thp{P}{k}\right)u_{\boldsymbol k}\phi_{\boldsymbol P - \boldsymbol k}^2 + \frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol P - \boldsymbol k}\phi_{\boldsymbol P}}{\sqrt{k}} + g^2 u_{\boldsymbol k} \phi_{\boldsymbol P - \boldsymbol k}^2 I_{\boldsymbol P - \boldsymbol k}-\tilde E_{L}^{(0)}(\boldsymbol P,g)u_{\boldsymbol k}\phi_{\boldsymbol P}^2\Bigg] \nonumber
\\
&\mspace{29mu}\times\left[\tilde E_{L}^{(0)}(\boldsymbol P,g)u_{\boldsymbol k}\phi_{\boldsymbol P-\boldsymbol k}^2-\left(\left(\frac{k^2}{2}+k-\thp{P}{k}\right)u_{\boldsymbol k}\phi_{\boldsymbol P - \boldsymbol k}^2 + \frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol P - \boldsymbol k}\phi_{\boldsymbol P}}{\sqrt{k}} + g^2 u_{\boldsymbol k} \phi_{\boldsymbol P - \boldsymbol k}^2 I_{\boldsymbol P - \boldsymbol k}\right)\right]\frac{1}{\phi_{\boldsymbol P - \boldsymbol k}^2 \phi_{\boldsymbol P}^2} \nonumber
\\
&\mspace{29mu}\times \left[\left(\frac{k^2}{2}+k-\thp{P}{k}\right) + g^2 I_{\boldsymbol P-\boldsymbol k} - \tilde E_L^{(0)}(\boldsymbol P,g)\right]^{-1}, \langlebel{eq:second_order_iteration_for_the_energy_convergence_26}
\end{align}
and the quantity $B_{\boldsymbol P}$ is equal to
\begin{align}
B_{\boldsymbol P} &=1+ \sum_{\boldsymbol k}\left[\tilde E_{L}^{(0)}(\boldsymbol P,g)u_{\boldsymbol k}\phi_{\boldsymbol P-\boldsymbol k}^2-\left(\left(\frac{k^2}{2}+k-\thp{P}{k}\right)u_{\boldsymbol k}\phi_{\boldsymbol P - \boldsymbol k}^2 + \frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol P - \boldsymbol k}\phi_{\boldsymbol P}}{\sqrt{k}} + g^2 u_{\boldsymbol k} \phi_{\boldsymbol P - \boldsymbol k}^2 I_{\boldsymbol P - \boldsymbol k}\right)\right]\frac{u_{\boldsymbol k}\left(\phi_{\boldsymbol P - \boldsymbol k}^2 - \phi_{\boldsymbol P}^2\right)}{\phi_{\boldsymbol P - \boldsymbol k}^2 \phi_{\boldsymbol P}^2} \nonumber
\\
&\mspace{29mu}\times \left[\left(\frac{k^2}{2}+k-\thp{P}{k}\right) + g^2 I_{\boldsymbol P-\boldsymbol k} - \tilde E_L^{(0)}(\boldsymbol P,g)\right]^{-1}. \langlebel{eq:second_order_iteration_for_the_energy_convergence_27}
\end{align}
To proceed, we again break the limit of the integration into two parts, however, now we know that the main contribution to the energy of the system comes from the region $[0,k_0]$. In this region, we again drop all terms, with power of $g$ larger than one. We recall here that the classical component of the field $u_{\boldsymbol k}$ is proportional to $g$ and the energy $E_{L}^{(0)}(\boldsymbol P,g)\sim g^2$. In addition, the limit $\boldsymbol P\ll 1$ is considered. Moreover, as the quantity $B_{\boldsymbol P}$, after expansion over momentum $\boldsymbol P$, will have a form $B_{\boldsymbol P} = 1-g^2 F(P^2)$, we can set $\boldsymbol P=0$ in $B_{\boldsymbol P}$, in order to preserve the same accuracy.
In this approximation, the quantity $A_{\boldsymbol P}$ takes the form
\begin{align}
A_{\boldsymbol P} = &-\frac{P^2}{2}\sum_{\boldsymbol k<\boldsymbol k_0}\left\{u_{\boldsymbol k}\frac{\left(\phi_{\boldsymbol P - \boldsymbol k}^2 - \phi_{\boldsymbol P}^2\right)}{\phi_{\boldsymbol P - \boldsymbol k}\phi_{\boldsymbol P}}\left[u_{\boldsymbol k}\frac{\phi_{\boldsymbol P - \boldsymbol k}}{\phi_{\boldsymbol P}} + \frac{g}{\sqrt{2 \Omega}}\frac{1}{\sqrt{k}} \left(\frac{k^2}{2}+k-\thp{P}{k}\right) ^{-1}\right]\right\} \nonumber
\\
&-\sum_{\boldsymbol k<\boldsymbol k_0}\left[\left(\frac{k^2}{2}+k-\thp{P}{k}\right)u_{\boldsymbol k}\frac{\phi_{\boldsymbol P - \boldsymbol k}}{\phi_{\boldsymbol P}} + \frac{g}{\sqrt{2 \Omega}}\frac{1}{\sqrt{k}} \right]^2\left(\frac{k^2}{2}+k-\thp{P}{k}\right)^{-1} \langlebel{eq:second_order_iteration_for_the_energy_convergence_28}
\end{align}
and
\begin{align}
B_{\boldsymbol P} = B_0 = 1 -\sum_{\boldsymbol k < \boldsymbol k_0}\frac{\left(u_{\boldsymbol k} \frac{\phi_{\boldsymbol k}^2}{\phi_0^2}\left(\frac{k^2}{2}+k\right)+\frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol k}}{\phi_0\sqrt{k}}\right)u_{\boldsymbol k}(\phi_{\boldsymbol k}^2-\phi_0^2)}{\phi_{\boldsymbol k}^2(\frac{k^2}{2}+k)}. \langlebel{eq:second_order_iteration_for_the_energy_convergence_29}
\end{align}
If the definition of $E_{L}^{(0)}(\boldsymbol P,g)$ (\ref{eq:second_order_iteration_for_the_energy_convergence_25}), together with equations (\ref{eq:second_order_iteration_for_the_energy_convergence_28}) and (\ref{eq:second_order_iteration_for_the_energy_convergence_29}), is used in equation (\ref{eq:second_order_iteration_for_the_energy_convergence_24}), the second iteration for the energy of a moving particle is obtained
\begin{align}
E^{(2)}(\boldsymbol P,g) &= E^{(2)}(0,g) -\frac{g^2}{2 \Omega}\sum_{\boldsymbol k<\boldsymbol k_0}\frac{(\thp{P}{k})^2}{k(k^2/2+k)^3} \nonumber
\\
&\mspace{50mu}+\frac{P^2}{2}\left\{1-\sum_{\boldsymbol k < \boldsymbol k_0} \left[u_{\boldsymbol k}^2 \left(\frac{\phi_{\boldsymbol k}^2}{\phi_{0}^2}-1\right)+\frac{g}{\sqrt{2 \Omega}}\frac{u_{\boldsymbol k}}{\sqrt{k}(k^2/2+k)}\frac{\left(\phi_{\boldsymbol k}^2 - \phi_{0}^2\right)}{\phi_{\boldsymbol k}\phi_{0}}\right]\right\}\nonumber
\\
&\mspace{50mu}\times\left(1-\sum_{\boldsymbol k < \boldsymbol k_0} \left[u_{\boldsymbol k}^2 \left(\frac{\phi_{\boldsymbol k}^2}{\phi_{0}^2}-1\right)+\frac{g}{\sqrt{2 \Omega}}\frac{u_{\boldsymbol k}}{\sqrt{k}(k^2/2+k)}\frac{\left(\phi_{\boldsymbol k}^2 - \phi_{0}^2\right)}{\phi_{\boldsymbol k}\phi_{0}}\right]\right)^{-1},\langlebel{eq:second_order_iteration_for_the_energy_convergence_30}
\end{align}
or after simplification, taking into account the fact that the sum in the denominator is proportional to $g^2$, one finally obtains
\begin{align}
E^{(2)}(\boldsymbol P,g) &= E^{(2)}(0,g) +\frac{P^2}{2}-\frac{g^2}{2 \Omega}\sum_{\boldsymbol k<\boldsymbol k_0}\frac{(\thp{P}{k})^2}{k(k^2/2+k)^3}.\langlebel{eq:second_order_iteration_for_the_energy_convergence_31}
\end{align}
From here, we see that the second iteration for the renormalized mass
\begin{align}
m^{(2)*} \approx 1+\frac{g^2}{6\pi^2}\langlebel{eq:second_order_iteration_for_the_energy_convergence_32}
\end{align}
coincides with the one via perturbation theory.
\section{Conclusion}
\langlebel{sec:conclusion}
In current methods of renormalization in QFT, the momentum cut-off plays an important role \cite{Collins1984}, which in fact is an additional and undefined parameter of the theory. Usually, the inclusion of such parameter for a concrete model is justified with the argument that the theory becomes incorrect on a small scale, where a more general theory must be used instead. For example, in the case of QED it is widely accepted that on a small scale the Standard Model, with its own characteristic length, should be rather used. However, in the Standard Model, as in its possible generalizations, for the renormalization of perturbation theory the cut-off is required. Consequently, we come to the requirement of the inclusion of some ``fundamental length'' or unobservable parameter of any QFT.
However, the Fr\"ohlich Hamiltonian demonstrates the absence of a cut-off in the polaron theory. In this QFT all corrections are determined through convergent integrals and, consequently, the cut-off is not required. Here we considered a more general QFT than the one associated with the polaron problem, for which standard perturbation theory gives rise to divergences. The main result of the present work consists in the construction of a calculation scheme for this more general QFT that only leads to convergent integrals. In addition to that, the regularization of all integrals is related to the effective-cut-off momentum, which is defined through the parameters of the system itself. Moreover, the divergences of standard perturbation theory are explained through the energy being a non-analytical function of the coupling constant, of a form $\ln(\sqrt{|\ln g|}/2+1)$, around zero, and, therefore, can not be represented as a power series around this singular point. It is also important that the character of the singularity, defined in equation (\ref{eq:second_order_iteration_for_the_energy_convergence_20}) in the weak coupling limit does not depend on the particular choice of the wave functions $\phi_{\boldsymbol P}(\boldsymbol r)$ of the zeroth-oder approximation.
From a formal point of view, the convergence of all integrals is explained as follows: i) the use of the decomposition (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_2}), i.e. the special state vectors, which are the product of the wave function of a localized particle and a coherent state of the field and ii) the calculation of the energy of the system with the iteration scheme (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_10}), in which the resolvent of the operator $[H_{kk}-E^{(0)}]^{-1}$ contains the matrix elements of the full Hamiltonian of the system. In standard perturbation theory the Hamiltonian of non-interacting fields is used in the analogous expressions.
From a physical point of view the argument i) corresponds to avoiding an adiabatic switch off of the interaction. This means that a particle during its existence time is considered as ``dressed'', i.e. to be in a localized state which is created due to the interaction between the particle and the field. The argument ii) leads to the ``cutting'' of all integrals for a large momentum due to the reconstruction of a localized state in intermediate states, caused by the quasi-intersection of the ground and the single-phonon states.
Our approach should not be considered and does not pretend to be the full solution of the renormalization problem in QFT, specifically, because of the use of a simple, non-relativistically covariant model. Nevertheless, it demonstrates an alternative, succeeding without introducing any phenomenological momentum cut-off.
\begin{acknowledgments}
The authors are grateful to S. Cavaletto for useful discussions.
\end{acknowledgments}
\section*{Appendix A: Proof that the states (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_14}) are eigenstates of the total momentum operator}
\langlebel{sec:appendix}
In this appendix we present an explicit proof that the total momentum operator
\begin{align*}
\opA P = -\ri \nabla_{\boldsymbol r} + \sum_{\boldsymbol k}\boldsymbol k \opa a_{\boldsymbol k}^\dag \opa a_{\boldsymbol k}
\end{align*}
commutes with the Hamiltonian
\begin{align*}
\opa H = -\frac{1}{2}\Delta + \sum_{\boldsymbol k}k \opa a_{\boldsymbol k}^\dag \opa a_{\boldsymbol k} + \frac{g}{\sqrt{2\Omega}}\sum_{\boldsymbol k}A_{\boldsymbol k} \left(e^{\ri\thp{k}{r}}\opa a_{\boldsymbol k}+e^{-\ri\thp{k}{r}}\opa a^\dag_{\boldsymbol k}\right)
\end{align*}
of the system and that the states (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_14}) are eigenstates of $\opA P$
\begin{align}
\opA P |\Psi^{(0)}_{\boldsymbol P_1,n_{\boldsymbol k}}\rangle &= \boldsymbol P_1 |\Psi^{(0)}_{\boldsymbol P_1,n_{\boldsymbol k}}\rangle,\langlebel{A1}
\end{align}
consequently forming a complete set in the Hilbert space.
Let us begin with the commutator:
\begin{align}
[\opa H, \opA P] &= \left[\frac{g}{\sqrt{2\Omega}}\sum_{\boldsymbol k}A_{\boldsymbol k} \left(e^{\ri\thp{k}{r}}\opa a_{\boldsymbol k}+e^{-\ri\thp{k}{r}}\opa a^\dag_{\boldsymbol k}\right), -\ri \nabla\right] \nonumber
\\
&\mspace{90mu}+\left[\frac{g}{\sqrt{2\Omega}}\sum_{\boldsymbol k}A_{\boldsymbol k} \left(e^{\ri\thp{k}{r}}\opa a_{\boldsymbol k}+e^{-\ri\thp{k}{r}}\opa a^\dag_{\boldsymbol k}\right),\sum_{\boldsymbol k}\boldsymbol k \opa a_{\boldsymbol k}^\dag \opa a_{\boldsymbol k}\right] \nonumber
\\
&=\frac{g}{\sqrt{2 \Omega}}\sum_{\boldsymbol k}A_{\boldsymbol k}\left(\opa a_{\boldsymbol k}e^{\ri \thp{k}{r}}(-\boldsymbol k)+\opa a_{\boldsymbol k}^\dag e^{-\ri \thp{k}{r}}\boldsymbol k\right) \nonumber
\\
&\mspace{90mu}+\frac{g}{\sqrt{2 \Omega}}\sum_{\boldsymbol k}A_{\boldsymbol k}\left(\opa a_{\boldsymbol k}e^{\ri \thp{k}{r}}\boldsymbol k+\opa a_{\boldsymbol k}^\dag e^{-\ri \thp{k}{r}}(-\boldsymbol k)\right) = 0,\langlebel{A2}
\end{align}
which was to be proven.
Now we will demonstrate that the relation (\ref{A1}) holds. Also, we will introduce the notations
\begin{align}
\opa D(\boldsymbol R) &= \exp\left\{\sum_{\boldsymbol q}(u_{\boldsymbol q} e^{- \ri\boldsymbol q \!\cdot\!ot \boldsymbol R}\opa a_{\boldsymbol q}^\dag - u_{\boldsymbol q}^* e^{ \ri\boldsymbol q \!\cdot\!ot \boldsymbol R}\opa a_{\boldsymbol q})\right\}, \text{ with} \langlebel{A3}
\\
&\opa D^\dag(\boldsymbol R)\opa D(\boldsymbol R) = \opa D(\boldsymbol R)\opa D^\dag(\boldsymbol R) = 1,\langlebel{A4}
\\
&\opa D^\dag(\boldsymbol R)\opa a_{\boldsymbol k}\opa D(\boldsymbol R) = \opa a_{\boldsymbol k} +u_{\boldsymbol k}e^{-\ri\thp{k}{R}},\langlebel{A5}
\\
&\opa D^\dag(\boldsymbol R)\opa a^\dag_{\boldsymbol k}\opa D(\boldsymbol R) = \opa a^\dag_{\boldsymbol k} + u^*_{\boldsymbol k}e^{\ri\thp{k}{R}},\langlebel{A6}
\\
&\ri\frac{\partial \opa D(\boldsymbol R)}{\partial \boldsymbol R} = \opa D(\boldsymbol R)\sum_{\boldsymbol q}\boldsymbol q\left(u_{\boldsymbol q}e^{-\ri\thp{q}{R}}\opa a_{\boldsymbol q}^\dag+u^*_{\boldsymbol q}e^{\ri\thp{q}{R}}\opa a_{\boldsymbol q}\right)\langlebel{A7}
\end{align}
Consequently, with the help of equations (\ref{A3}-\ref{A7}) we may write
\begin{align}
\opA P |\Psi^{(0)}_{\boldsymbol P_1,n_{\boldsymbol k}}\rangle &= \left(-\ri \nabla_{\boldsymbol r} + \sum_{\boldsymbol q}\boldsymbol q \opa a_{\boldsymbol q}^\dag \opa a_{\boldsymbol q}\right)\frac{1}{N_{\boldsymbol P_1,n_{\boldsymbol k}}\sqrt{\Omega}}\int d \boldsymbol R \phi_{\boldsymbol P_1}(\boldsymbol r - \boldsymbol R)\exp\left\{\ri(\boldsymbol P_1 - \boldsymbol k n_{\boldsymbol k})\!\cdot\!ot \boldsymbol R \right\}\opa D(\boldsymbol R)|n_{\boldsymbol k}\rangle \nonumber
\\
&=\frac{1}{N_{\boldsymbol P_1,n_{\boldsymbol k}}\sqrt{\Omega}}\int d \boldsymbol R (-\ri \nabla_{\boldsymbol r})(\phi_{\boldsymbol P_1}(\boldsymbol r - \boldsymbol R))\exp\left\{\ri(\boldsymbol P_1 - \boldsymbol k n_{\boldsymbol k})\!\cdot\!ot \boldsymbol R \right\}\opa D(\boldsymbol R)|n_{\boldsymbol k}\rangle \nonumber
\\
&+\frac{1}{N_{\boldsymbol P_1,n_{\boldsymbol k}}\sqrt{\Omega}}\int d \boldsymbol R \phi_{\boldsymbol P_1}(\boldsymbol r - \boldsymbol R)\exp\left\{\ri(\boldsymbol P_1 - \boldsymbol k n_{\boldsymbol k})\!\cdot\!ot \boldsymbol R \right\}\sum_{\boldsymbol q}\boldsymbol q \opa a_{\boldsymbol q}^\dag \opa a_{\boldsymbol q}\opa D(\boldsymbol R)|n_{\boldsymbol k}\rangle. \langlebel{A8}
\end{align}
By noticing that $-\ri \nabla_{\boldsymbol r}(\phi_{\boldsymbol P_1}(\boldsymbol r - \boldsymbol R)) = \ri \nabla_{\boldsymbol R}(\phi_{\boldsymbol P_1}(\boldsymbol r - \boldsymbol R))$ and transforming $\sum_{\boldsymbol q}\boldsymbol q \opa a_{\boldsymbol q}^\dag \opa a_{\boldsymbol q}\opa D(\boldsymbol R) = \opa D(\boldsymbol R)\opa D^\dag(\boldsymbol R)\sum_{\boldsymbol q}\boldsymbol q \opa a_{\boldsymbol q}^\dag \opa a_{\boldsymbol q}\opa D(\boldsymbol R)$ one obtains
\begin{align}
\opA P |\Psi^{(0)}_{\boldsymbol P_1,n_{\boldsymbol k}}\rangle &= \frac{1}{N_{\boldsymbol P_1,n_{\boldsymbol k}}\sqrt{\Omega}}\int d \boldsymbol R (\ri \nabla_{\boldsymbol R})\left[\phi_{\boldsymbol P_1}(\boldsymbol r - \boldsymbol R)\exp\left\{\ri(\boldsymbol P_1 - \boldsymbol k n_{\boldsymbol k})\!\cdot\!ot \boldsymbol R \right\}\opa D(\boldsymbol R)\right]|n_{\boldsymbol k}\rangle \nonumber
\\
&-\frac{1}{N_{\boldsymbol P_1,n_{\boldsymbol k}}\sqrt{\Omega}}\int d \boldsymbol R \phi_{\boldsymbol P_1}(\boldsymbol r - \boldsymbol R)(\ri \nabla_{\boldsymbol R})\left[\exp\left\{\ri(\boldsymbol P_1 - \boldsymbol k n_{\boldsymbol k})\!\cdot\!ot \boldsymbol R \right\}\opa D(\boldsymbol R)\right]|n_{\boldsymbol k}\rangle \nonumber
\\
&+\frac{1}{N_{\boldsymbol P_1,n_{\boldsymbol k}}\sqrt{\Omega}}\int d \boldsymbol R \phi_{\boldsymbol P_1}(\boldsymbol r - \boldsymbol R)\exp\left\{\ri(\boldsymbol P_1 - \boldsymbol k n_{\boldsymbol k})\!\cdot\!ot \boldsymbol R \right\}\opa D(\boldsymbol R) \nonumber
\\
&\mspace{350mu}\times\sum_{\boldsymbol q}\boldsymbol q \left(\opa a_{\boldsymbol q}^\dag+u_{\boldsymbol q}^* e^{\ri\thp{q}{R}}\right) \left(\opa a_{\boldsymbol q}+u_{\boldsymbol q} e^{-\ri\thp{q}{R}}\right)|n_{\boldsymbol k}\rangle. \langlebel{A9}
\end{align}
The first term in equation (\ref{A9}) vanishes due to the square-integrability of the function $\phi_{\boldsymbol P_1}(\boldsymbol r-\boldsymbol R)$. The derivative in the second term is equal to
\begin{align}
(\ri \nabla_{\boldsymbol R})\left[\exp\left\{\ri(\boldsymbol P_1 - \boldsymbol k n_{\boldsymbol k})\!\cdot\!ot \boldsymbol R \right\}\opa D(\boldsymbol R)\right] &= -(\boldsymbol P_1 - \boldsymbol k n_k)\exp\left\{\ri(\boldsymbol P_1 - \boldsymbol k n_{\boldsymbol k})\!\cdot\!ot \boldsymbol R \right\}\opa D(\boldsymbol R) \nonumber
\\
&+\exp\left\{\ri(\boldsymbol P_1 - \boldsymbol k n_{\boldsymbol k})\!\cdot\!ot \boldsymbol R \right\}\opa D(\boldsymbol R)\sum_{\boldsymbol q}\boldsymbol q\left(u_{\boldsymbol q}e^{-\ri\thp{q}{R}}\opa a_{\boldsymbol q}^\dag+u^*_{\boldsymbol q}e^{\ri\thp{q}{R}}\opa a_{\boldsymbol q}\right) \langlebel{A10}
\end{align}
and, therefore, the terms which are not proportional to $\boldsymbol P_1$ in equation (\ref{A10}) cancel the last term in equation (\ref{A9}). As a result, equation (\ref{A9}) transforms into
\begin{align}
\opA P |\Psi^{(0)}_{\boldsymbol P_1,n_{\boldsymbol k}}\rangle = \boldsymbol P_1 |\Psi^{(0)}_{\boldsymbol P_1,n_{\boldsymbol k}}\rangle, \langlebel{A11}
\end{align}
which was to be proven.
According to reference \cite{landau1965quantum}, the eigenstates of a Hermitian operator form a complete and orthogonal set of functions in the Hilbert space. As the functions (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_14}) are eigenstates of the Hermitian operator $\opA P$, they form a complete orthogonal set for arbitrary generalized parameters $\phi_{\boldsymbol P_1}(\boldsymbol r - \boldsymbol R)$ and $u_{\boldsymbol k}$.
\section*{Appendix B: Matrix elements calculation}
\langlebel{sec:matrix_elements_calculation}
In all subsequent calculations, the matrix elements of a type
\begin{align}
\langlebel{1}
\langle n_{\boldsymbol j}|&\exp\left(-\sum_{\boldsymbol m}\opa a_{\boldsymbol m}^\dag u_{\boldsymbol m}e^{-\ri\thp{m}{R^\prime}}-\opa a_{\boldsymbol m}u^*_{\boldsymbol m}e^{\ri\thp{m}{R^\prime}}\right)\sum_{\boldsymbol l}f(\opa a_{\boldsymbol l},\opa a^\dag_{\boldsymbol l})\nonumber
\\
&\times\exp\left(\sum_{\boldsymbol m}\opa a_{\boldsymbol m}^\dag u_{\boldsymbol m}e^{-\ri\thp{m}{R}}-\opa a_{\boldsymbol m} u^*_{\boldsymbol m}e^{\ri\thp{m}{R}}\right)|n_{\boldsymbol k}\rangle
\end{align}
need to be evaluated. By using the identities
\begin{align}
\opa D &= e^{\beta \opa a^\dag - \beta^* \opa a} = e^{-|\beta|^2/2}e^{\beta \opa a^\dag}e^{-\beta^* \opa a} = e^{|\beta|^2 /2}e^{-\beta^* \opa a}e^{\beta \opa a^\dag}, \langlebel{2}
\\
\opa D^{-1}\opa a \opa D &= \opa a +\beta, \quad \opa D^{-1}\opa a^\dag \opa D= \opa a^\dag + \beta^*,\langlebel{3}
\end{align}
equation (\ref{1}) can be transformed into the form
\begin{align}
&\frac{\exp(\sum_{\boldsymbol m}|u_{\boldsymbol m}|^2 (e^{-\ri\boldsymbol m \!\cdot\!ot (\boldsymbol R - \boldsymbol R^\prime)}-1))}{\sqrt{n_{\boldsymbol k}!n_{\boldsymbol j}!}}\nonumber
\\
&\times\langle0|(\opa a_{\boldsymbol j} - u_{\boldsymbol j}e^{-\ri\thp{j}{R^\prime}}+u_{\boldsymbol j}e^{-\ri\thp{j}{R}})^{n_{\boldsymbol j}}\sum_{\boldsymbol l}f\left(\opa a_{\boldsymbol l}+u_{\boldsymbol l} e^{-\ri\thp{l}{R}},\opa a^\dag_{\boldsymbol l}+u_{\boldsymbol l}^* e^{\ri\thp{l}{R^\prime}}\right)\nonumber
\\
&\times(\opa a_{\boldsymbol k}^\dag + u_{\boldsymbol k}^* e^{\ri\thp{k}{R^\prime}}-u_{\boldsymbol k}^* e^{\ri\thp{k}{R}})^{n_{\boldsymbol k}}|0\rangle. \langlebel{4}
\end{align}
The evaluation of equation (\ref{4}) is performed in the usual manner, i.e, by noticing that $\opa a|0\rangle = \langle0|\opa a^\dag = 0$ and the vacuum average is not equal to zero only if the number of creation operators is equal to the one of annihilation operators and is an even number.
\section*{Appendix C: Ground state energy}
\langlebel{sec:ground_state_energy}
According to equation (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_18}) of the manuscript, the ground state energy is defined as
\begin{align}
E_L^{(0)} = \langle \Psi^{(L)}_{\boldsymbol P}|\opa H|\Psi^{(L)}_{\boldsymbol P}\rangle \langlebel{5}
\end{align}
with the wave function
\begin{align}
\langlebel{6}
|\Psi^{(L)}_{\boldsymbol P}\rangle &= \frac{1}{N_{\boldsymbol P}\sqrt{\Omega}} \int d{\boldsymbol R}\,\phi_{\boldsymbol P}({\boldsymbol r}-{\boldsymbol R})\exp \left(\ri\thp{P}{R}+ \sum_{\boldsymbol k}\left(u_{\boldsymbol k}\opa a^\dag_{\boldsymbol k}e^{-\ri\thp{k}{R}}-\frac{1}{2}u^2_{\boldsymbol k} \right)\right)| 0 \rangle
\end{align}
and Hamiltonian
\begin{align}
\opa H &= \frac{1}{2}\left(\opA{P}^2 -2 \sum_k \opa a_k^\dag \opa a_k \boldsymbol k \!\cdot\!ot \opA{P}+\left(\sum_k \opa a_k^\dag \opa a_k \boldsymbol k\right)^2\right)+\sum_{\boldsymbol k}\omega_k \opa a^+_{\boldsymbol k}\opa a_{\boldsymbol k}\nonumber
\\
&+ \frac{g}{\sqrt{\Omega}}\sum_{\boldsymbol k}\frac{1}{\sqrt{2\omega_k}} \left(e^{\ri\boldsymbol k\boldsymbol r}\opa a_{\boldsymbol k}+e^{-\ri\boldsymbol k\boldsymbol r}\opa a^+_{\boldsymbol k}\right). \langlebel{7}
\end{align}
The normalization constant $N_{\boldsymbol P}$ is found from the condition
\begin{align}
\langlebel{8}
\langle\Psi^{(L)}_{\boldsymbol P}|\Psi^{(L)}_{\boldsymbol P}\rangle = 1.
\end{align}
In order to evaluate equation (\ref{8}), we use equation (\ref{4}), in which $n_{\boldsymbol k} = n_{\boldsymbol j} = 0$ and $f(\opa a_{\boldsymbol l},\opa a_{\boldsymbol l}^\dag) = \delta_{\boldsymbol l,0}$. This gives immediately the result
\begin{align}
|N_{\boldsymbol P}|^2 = \int d{\boldsymbol R}\int d{\boldsymbol r}\,\phi_{\boldsymbol P}^*({\boldsymbol r})\phi_{\boldsymbol P}({\boldsymbol r}-{\boldsymbol R}) \exp\left(\sum_{\boldsymbol
k}|u_{\boldsymbol k}|^2(e^{-\ri\thp{k}{R}}-1) + \ri \thp{P}{R}\right).\langlebel{9}
\end{align}
The expectation value of the energy is performed in exactly the same way. First of all, the matrix elements of the field states are calculated with the help of equation (\ref{4}). For example, if the function $f$ is chosen as $f = \boldsymbol l \opa a_{\boldsymbol l}^\dag \opa a_{\boldsymbol l}$, $n_{\boldsymbol k} = n_{\boldsymbol j} = 0$, we immediately find
\begin{align}
\boldsymbol Q &= \langle \sum_{\boldsymbol l}\boldsymbol l \opa a_{\boldsymbol l}^\dag \opa a_{\boldsymbol l}\rangle \nonumber
\\
&=\int d\boldsymbol R d\boldsymbol R^\prime d\boldsymbol r \phi^*(\boldsymbol r- \boldsymbol R^\prime)\phi(\boldsymbol r - \boldsymbol R) \sum_{\boldsymbol l}\boldsymbol l |u_{\boldsymbol l}|^2 \nonumber
\\
&\times\exp\left(\sum_{\boldsymbol m}|u_{\boldsymbol m}|^2 (e^{-\ri\boldsymbol m \!\cdot\!ot (\boldsymbol R - \boldsymbol R^\prime)}-1)+\ri(\boldsymbol P - \boldsymbol l)\!\cdot\!ot(\boldsymbol R - \boldsymbol R^\prime)\right).\langlebel{10}
\end{align}
Then by carrying out the change of variables $\boldsymbol R - \boldsymbol R^\prime = \boldsymbol R_1$ and $\boldsymbol r - \boldsymbol R^\prime = \boldsymbol \rho$, we obtain
\begin{align}
\boldsymbol Q &= \frac{1}{|N_{\boldsymbol P}|^2} \sum_{\boldsymbol k}\boldsymbol k |u_{\boldsymbol k}|^2\int d{\boldsymbol R} d{\boldsymbol r}\,\phi_{\boldsymbol P}^*({\boldsymbol r})\phi_{\boldsymbol P}({\boldsymbol r - \boldsymbol R}) e^{\Phi (\boldsymbol R) + \ri (\boldsymbol P - \boldsymbol k)\!\cdot\!ot \boldsymbol R},\langlebel{11}
\\
\Phi (\boldsymbol R)&= \sum_{\boldsymbol k}|u_{\boldsymbol k}|^2(e^{-\ri\thp{k}{R}}-1). \nonumber
\end{align}
All other matrix elements are evaluated in exactly the same fashion. Consequently, we obtain expression (\ref{eq:iteration_scheme_basis_zero_order_approximation_to_the_energy_of_the_system_20}) of the manuscript:
\begin{align}
&E_{L}^{(0)}(\boldsymbol P,g)= \frac{P^2}{2} - \thp{P}{Q} + G + E_{\text{f}}(\boldsymbol P) + E_{\text{int}}(\boldsymbol P); \langlebel{12}
\\
&\boldsymbol Q = \frac{1}{|N_{\boldsymbol P}|^2} \sum_{\boldsymbol k}\boldsymbol k |u_{\boldsymbol k}|^2\int d{\boldsymbol R} d{\boldsymbol r}\,\phi_{\boldsymbol P}^*({\boldsymbol r})\phi_{\boldsymbol P}({\boldsymbol r - \boldsymbol R}) e^{\Phi (\boldsymbol R) + \ri (\boldsymbol P - \boldsymbol k)\!\cdot\!ot \boldsymbol R}; \nonumber
\\
& G =\frac{1}{2} \frac{1}{|N_{\boldsymbol P}|^2}\sum_{\boldsymbol m,\boldsymbol l}\thp{m}{l}|u_{\boldsymbol m}|^2 |u_{\boldsymbol l}|^2\int d\boldsymbol r d\boldsymbol R \phi^*(\boldsymbol r)\phi(\boldsymbol r - \boldsymbol R)e^{\ri\thp{P}{R}+\Phi(\boldsymbol R)-\ri(\boldsymbol m + \boldsymbol l)\!\cdot\!ot R};\nonumber
\\
&E_{\text{f}}(\boldsymbol P) = \frac{1}{|N_{\boldsymbol P}|^2} \sum_{\boldsymbol k}\left(k + \frac{k^2}{2}\right)|u_{\boldsymbol k}|^2\int d{\boldsymbol R} d{\boldsymbol r}\,\phi_{\boldsymbol P}^*({\boldsymbol r})\phi_{\boldsymbol P}({\boldsymbol r-\boldsymbol R}) e^{\Phi (\boldsymbol R) + \ri (\boldsymbol P - \boldsymbol k)\!\cdot\!ot \boldsymbol R};\nonumber
\\
&E_{\text{int}}(\boldsymbol P) = \frac{g}{|N_{\boldsymbol P}|^2 } \sum_{\boldsymbol k}\frac{u_{\boldsymbol k}}{\sqrt{2 k \Omega}} \int d{\boldsymbol R} d{\boldsymbol r}\left(\phi_{\boldsymbol P}^*({\boldsymbol r}+\boldsymbol R)\phi_{\boldsymbol P}({\boldsymbol r})+\phi_{\boldsymbol P}^*({\boldsymbol r})\phi_{\boldsymbol P}({\boldsymbol r}-{\boldsymbol R})\right)e^{\Phi (\boldsymbol R) + \ri (\thp{P}{R} + \thp{k}{r})};\nonumber
\\
&\Phi (\boldsymbol R)= \sum_{\boldsymbol k}|u_{\boldsymbol k}|^2(e^{-\ri\thp{k}{R}}-1);\nonumber
\\
&|N_{\boldsymbol P}|^2 = \int d{\boldsymbol R} d{\boldsymbol r}\,\phi_{\boldsymbol P}^*({\boldsymbol r})\phi_{\boldsymbol P}({\boldsymbol r}-{\boldsymbol R}) e^{\Phi(\boldsymbol R) + \ri \thp{P}{R}}.\nonumber
\end{align}
We notice here one more time that the Fourier component of the function reads
\begin{align}
\phi(\boldsymbol r) = \frac{\langlembda^{\frac{3}{2}}}{\pi^{\frac{3}{4}}}e^{-\frac{\langlembda^2 r^2}{2}}. \langlebel{13}
\end{align}
and the classical component of the field look like
\begin{align}
u_{\boldsymbol k} &= -\frac{g}{\sqrt{2 \Omega}}\frac{1}{\sqrt{k^3}}\int d\boldsymbol r |\phi(\boldsymbol r)|^2 e^{-\ri\thp{k}{r}} = -\frac{g}{\sqrt{2 \Omega}}\frac{e^{-\frac{k^2}{4 \langlembda^2}}}{\sqrt{k^3}}; \langlebel{14}
\\
\phi_{\boldsymbol k} &= \int d\boldsymbol r \phi(\boldsymbol r)e^{-\ri\thp{k}{r}} = 2\sqrt{2}\frac{\pi^{\frac{3}{4}}}{\langlembda^{\frac{3}{2}}} e^{-\frac{k^2}{2 \langlembda^2}} = \phi_0 e^{-\frac{k^2}{2 \langlembda^2}}. \langlebel{15}
\end{align}
In order to calculate the energy, we firstly neglect the function
\begin{align}
\Phi(R) = \sum_{\boldsymbol k}|u_{\boldsymbol k}|^2 (e^{-\ri\thp{k}{R}}-1)=g^2 \frac{1}{4\pi^2}\int_0^\infty dt \frac{e^{-\frac{t^2}{2 }}}{t}\left(\frac{\sin \langlembda R t}{\langlembda R t}-1\right)\sim g^2 \langlebel{16}
\end{align}
in equation (\ref{12}). The remaining quantities can be rewritten employing the Fourier transform of the function $\phi(\boldsymbol r)$. As for example
\begin{align}
\int d\boldsymbol R_1 d \boldsymbol \rho \phi^*(\boldsymbol \rho)\phi(\boldsymbol \rho - \boldsymbol R_1) e^{-\ri\thp{k}{R_1}} =\int d \boldsymbol \rho \phi^*(\boldsymbol \rho)e^{-\ri\thp{k}{\rho}}\int d \boldsymbol R \phi(\boldsymbol R)e^{\ri\thp{k}{R}} = \phi^*_{\boldsymbol k}\phi_{-\boldsymbol k} &= \phi_{\boldsymbol k}^2, \langlebel{17}
\\
\int d\boldsymbol R_1 d \boldsymbol \rho \phi^*(\boldsymbol \rho)\phi(\boldsymbol \rho - \boldsymbol R_1) = |\phi_0|^2 &= \phi_0^2, \langlebel{18}
\\
\int d \boldsymbol R_1 d \boldsymbol \rho \phi^*(\boldsymbol \rho)\phi(\boldsymbol \rho - \boldsymbol R_1) e^{-\ri\thp{k}{\rho}} = \phi^*_{\boldsymbol k}\phi_0 &= \phi_{\boldsymbol k}\phi_0, \langlebel{19}
\end{align}
and by plugging equations (\ref{17}-\ref{19}) into equation (\ref{12}), one finds
\begin{align}
E^{(0)}_L(0, g) = \frac{1}{2}\sum_{\boldsymbol m,\boldsymbol l}\thp{m}{l}|u_{\boldsymbol m}|^2 |u_{\boldsymbol l}|^2\,\frac{\phi_{\boldsymbol l+ \boldsymbol m}^2}{\phi_0^2}+\sum_{\boldsymbol k}\left(k + \frac{k^2}{2}\right)|u_{\boldsymbol k}|^2 \frac{\phi_{\boldsymbol k}^2}{\phi_0^2} + \frac{2g}{\sqrt{2 \Omega}}\sum_{\boldsymbol k}\frac{u_{\boldsymbol k}}{\sqrt{k}}\frac{\phi_{\boldsymbol k}}{\phi_0}. \langlebel{20}
\end{align}
By insertion of the definitions of the classical component of the field $u_{\boldsymbol k}$ and the Fourier transform of the function $\phi_{\boldsymbol k}$, defined in equations (\ref{14}) and (\ref{15}), we find
\begin{align}
E^{(0)}_L&(0, g) = \frac{g^4}{8(2\pi)^6}\int d\boldsymbol l d\boldsymbol m \frac{\thp{m}{l}}{m^3 l^3}e^{-\frac{3}{2}\frac{m^2}{\langlembda^2}-\frac{3}{2}\frac{l^2}{\langlembda^2}-\frac{2\thp{m}{l}}{\langlembda^2}}+\frac{g^2}{2(2\pi)^3}\int \frac{d\boldsymbol k}{k^2}\left(1+\frac{k}{2}\right)e^{-\frac{3}{2}\frac{k^2}{\langlembda^2}} \nonumber
\\
&- \frac{g^2}{(2\pi^3)}\int \frac{d\boldsymbol k}{k^2}e^{-\frac{3}{4}\frac{k^2}{\langlembda^2}} =\frac{g^4}{8(2\pi)^5}\int \frac{d\boldsymbol l}{l^2}e^{-\frac{3}{2}\frac{l^2}{\langlembda^2}}\int dm e^{-\frac{3}{2}\frac{m^2}{\langlembda^2}}\frac{-2lm \langlembda^2 \cosh\frac{2lm}{\langlembda^2}+\langlembda^4\sinh\frac{2lm}{\langlembda^2}}{2l^2m^2} \nonumber
\\
&+\frac{g^2}{24\pi^2}\left(\langlembda(-4+\sqrt{2})\sqrt{3\pi}+\langlembda^2\right) =\frac{g^4 \langlembda^2}{16(2\pi)^4}\int_0^\infty \frac{du}{u^2}\left(4u-e^{\frac{2}{3}u^2}\sqrt{6\pi}\text{Erf}\left(\sqrt{\frac{2}{3}}u\right)\right)e^{-\frac{3}{2}u^2}\nonumber
\\
&+\frac{g^2}{24\pi^2}\left(\langlembda(-4+\sqrt{2})\sqrt{3\pi}+\langlembda^2\right) \nonumber
\\
&=-\frac{g^4 \langlembda^2}{2^8 \pi^4}\alpha + \frac{g^2}{24\pi^2}\left(\langlembda(-4+\sqrt{2})\sqrt{3\pi}+\langlembda^2\right),\langlebel{21}
\end{align}
where $\alpha = 0.736559$.
To find $\langlembda$, we minimize the energy, which results in the equation
\begin{align}
\frac{\partial E^{(0)}_L(0, g)}{\partial \langlembda} = -\frac{2g^4 \langlembda }{2^8 \pi^4}\alpha + \frac{g^2}{24\pi^2}\left((-4+\sqrt{2})\sqrt{3\pi}+2\langlembda\right),\langlebel{22}
\end{align}
from here we find
\begin{align}
\langlembda = -\frac{\sqrt{3\pi}}{2}(-4+\sqrt{2})\frac{1}{1-\frac{3 \alpha g^2}{32\pi^2}}\approx -\frac{\sqrt{3\pi}}{2}(-4+\sqrt{2})\left(1+\frac{3 \alpha g^2}{32\pi^2}\right)\langlebel{23}
\end{align}
and by plugging $\langlembda$ in equation (\ref{21})
\begin{align}
E^{(0)}_L(0, g) = -g^2\frac{(-4+\sqrt{2})^2}{32\pi} - \frac{3 \alpha g^4(-4+\sqrt{2})^2}{2^{10}\pi^3} + O(g^6). \langlebel{24}
\end{align}
\section*{Appendix D: Mass renormalization in zeroth-order approximation}
\langlebel{sec:mass_renormalization_in_zeroth_order_approximation}
In the weak coupling limit we find the renormalized mass in zeroth-order approximation. For this purpose, we rewrite the energy through Fourier components for the case $\boldsymbol P\neq0$. This yields
\begin{align}
E^{(0)}_L(\boldsymbol P, g)&= \frac{P^2}{2} - \thp{P}{Q} + G + E_{\text{f}}(\boldsymbol P) + E_{\text{int}}(\boldsymbol P); \langlebel{25}
\\
\boldsymbol Q &= \frac{1}{|N_{\boldsymbol P}|^2} \sum_{\boldsymbol k}\boldsymbol k |u_{\boldsymbol k}|^2 \phi_{\boldsymbol P - \boldsymbol k}^2; \langlebel{26}
\\
G &= \frac{1}{2} \frac{1}{|N_{\boldsymbol P}|^2} \sum_{\boldsymbol m,\boldsymbol l}\thp{m}{l}|u_{\boldsymbol m}|^2 |u_{\boldsymbol l}|^2 \phi_{\boldsymbol P - \boldsymbol l - \boldsymbol m}^2;\langlebel{27}
\\
E_{\text{f}}(\boldsymbol P) &= \frac{1}{|N_{\boldsymbol P}|^2} \sum_{\boldsymbol k}\left(k + \frac{k^2}{2}\right)|u_{\boldsymbol k}|^2 \phi_{\boldsymbol P - \boldsymbol k}^2; \langlebel{28}
\\
E_{\text{int}}(\boldsymbol P) &= \frac{g}{|N_{\boldsymbol P}|^2 } \sum_{\boldsymbol k}\frac{u_{\boldsymbol k}}{\sqrt{2 k \Omega}} (\phi_{\boldsymbol P}\phi_{\boldsymbol P -\boldsymbol k}+\phi_{\boldsymbol P} \phi_{\boldsymbol P + \boldsymbol k}); \langlebel{29}
\\
|N_{\boldsymbol P}|^2 &= \phi_{\boldsymbol P}^2.\langlebel{30}
\end{align}
From here we can immediately conclude that the quantity $G$ yields only a correction of the order of $g^4$ and can be neglected. Let us expand the Fourier transform of the function $\phi(\boldsymbol r)$ into Taylor series over $\boldsymbol P$ up to second-order
\begin{align}
\phi^2_{\boldsymbol P} &=\phi_0^2 e^{-\frac{P^2}{\langlembda^2}} \approx \phi_0^2 \left(1-\frac{P^2}{\langlembda^2}\right), \langlebel{31}
\\
\phi_{\boldsymbol P - \boldsymbol k}^2 &= \phi_0^2 e^{-\frac{(\boldsymbol P - \boldsymbol k)^2}{\langlembda^2}} = \phi_0^2\left(e^{-\frac{k^2}{\langlembda^2}}+e^{-\frac{k^2}{\langlembda^2}}\frac{2\thp{P}{k}}{\langlembda^2}+e^{-\frac{k^2}{\langlembda^2}}\frac{2(\thp{P}{k})^2}{\langlembda^4}-e^{-\frac{k^2}{\langlembda^2}} \frac{P^2}{\langlembda^2}\right),\langlebel{32}
\\
\phi_{\boldsymbol P - \boldsymbol k} &= \phi_0 e^{-\frac{(\boldsymbol P - \boldsymbol k)^2}{2\langlembda^2}} = \phi_0\left(e^{-\frac{k^2}{2\langlembda^2}}+e^{-\frac{k^2}{2\langlembda^2}}\frac{\thp{P}{k}}{\langlembda^2}+e^{-\frac{k^2}{2\langlembda^2}}\frac{(\thp{P}{k})^2}{2\langlembda^4}-e^{-\frac{k^2}{2\langlembda^2}} \frac{P^2}{2\langlembda^2}\right).\langlebel{33}
\end{align}
By plugging equations (\ref{31}-\ref{33}) into equations (\ref{26}-\ref{30}) and taking into account only the terms of the order of $P^2$, we find for the vector $\boldsymbol Q$
\begin{align}
\boldsymbol Q &= \frac{1}{1-P^2/\langlembda^2}\frac{2}{\langlembda^2}\sum_{\boldsymbol k}\boldsymbol k |u_{\boldsymbol k}|^2 e^{-\frac{k^2}{\langlembda^2}}(\thp{P}{k}) = \frac{1}{1-P^2/\langlembda^2}\frac{g^2}{\langlembda^2}\frac{\boldsymbol P}{4\pi^2}\int_0^\infty dk k e^{-\frac{3}{2}\frac{k^2}{\langlembda^2}}\int_{-1}^1 t^2 dt \nonumber
\\
&= \frac{g^2}{9\pi^2}\frac{\boldsymbol P}{2} \langlebel{34}
\end{align}
and for the field energy
\begin{align}
E_{\text{f}} = \frac{1}{1-P^2/\langlembda^2} \sum_{\boldsymbol k}\left(k + \frac{k^2}{2}\right)|u_{\boldsymbol k}|^2 \left(e^{-\frac{k^2}{\langlembda^2}}+e^{-\frac{k^2}{\langlembda^2}}\frac{2\thp{P}{k}}{\langlembda^2}+e^{-\frac{k^2}{\langlembda^2}}\frac{2(\thp{P}{k})^2}{\langlembda^4}-e^{-\frac{k^2}{\langlembda^2}} \frac{P^2}{\langlembda^2}\right). \langlebel{35}
\end{align}
In this expression, in round brackets the first and the last terms cancel each other after decomposition of the normalization constant in the Taylor series in $\boldsymbol P$. The result reads as
\begin{align}
E_{\text{f}} &= E_{\text{f}}(0)+\frac{2}{\langlembda^2}\sum_{\boldsymbol k}\left(k + \frac{k^2}{2}\right)|u_{\boldsymbol k}|^2e^{-\frac{k^2}{\langlembda^2}}(\thp{P}{k})^2 \nonumber
\\
&=E_{\text{f}}(0)+\frac{g^2}{\langlembda^4}\frac{P^2}{4\pi^2}\int_0^\infty k \left(k + \frac{k^2}{2}\right)e^{-\frac{3}{2}\frac{k^2}{\langlembda^2}}dk\int_{-1}^1 t^2 dt \nonumber
\\
&=E_{\text{f}}(0) + \frac{P^2}{2}\frac{g^2}{9\pi^2}\frac{1}{6\langlembda}(\sqrt{6\pi}+2\langlembda). \langlebel{36}
\end{align}
The remaining energy is calculated in exactly the same way
\begin{align}
E_{\text{int}} = \frac{g}{\sqrt{2 \Omega}}\frac{1}{1-P^2/(2\langlembda^2)}\sum_{\boldsymbol k}\frac{u_{\boldsymbol k}}{\sqrt{k}}\Bigg[e^{-\frac{k^2}{2\langlembda^2}}+e^{-\frac{k^2}{2\langlembda^2}}\frac{\thp{P}{k}}{\langlembda^2}+e^{-\frac{k^2}{2\langlembda^2}}\frac{(\thp{P}{k})^2}{2\langlembda^4}-e^{-\frac{k^2}{2\langlembda^2}} \frac{P^2}{2\langlembda^2} \nonumber
\\
+e^{-\frac{k^2}{2\langlembda^2}}-e^{-\frac{k^2}{2\langlembda^2}}\frac{\thp{P}{k}}{\langlembda^2}+e^{-\frac{k^2}{2\langlembda^2}}\frac{(\thp{P}{k})^2}{2\langlembda^4}-e^{-\frac{k^2}{2\langlembda^2}} \frac{P^2}{2\langlembda^2}\Bigg]. \langlebel{37}
\end{align}
In a full analogy to the field energy $E_{\text{f}}$, the first and the last terms are cancelled. The remaining terms are
\begin{align}
E_{\text{int}} &= E_{\text{int}}(0) + \frac{g}{\sqrt{2 \Omega}}\frac{1}{\langlembda^4} \sum_{\boldsymbol k}\frac{u_{\boldsymbol k}}{\sqrt{k}}e^{-\frac{k^2}{2\langlembda^2}}(\thp{P}{k})^2 \nonumber
\\
&=E_{\text{int}}(0) -\frac{g^2}{\langlembda^4}\frac{P^2}{8\pi^2}\int_0^\infty k^2 e^{-\frac{3}{4}\frac{k^2}{\langlembda^2}}dk\int_{-1}^1 t^2 dt \nonumber
\\
&=E_{\text{int}}(0) - \frac{g^2}{9\pi^2}\frac{1}{\langlembda}\sqrt{\frac{\pi}{3}}\frac{P^2}{2}. \langlebel{38}
\end{align}
By combining all results together, we find the equation for the total energy of the system with a renormalized mass, in the zeroth-order approximation:
\begin{align}
E^{(0)}_L(\boldsymbol P, g) &= E^{(0)}_L(0, g) + \frac{P^2}{2}\left[1-\frac{g^2}{9\pi^2}\left(1+\frac{1}{\langlembda}\sqrt{\frac{\pi}{3}} - \frac{(\sqrt{6\pi}+2\langlembda)}{6\langlembda}\right)\right] \nonumber
\\
&=E^{(0)}_L(0, g) + \frac{P^2}{2}\left[1-\frac{g^2}{9\pi^2}\left(\frac{2}{3}+\frac{1}{\langlembda}\frac{\sqrt{6\pi}(\sqrt{2}-1)}{6}\right)\right], \langlebel{39}
\end{align}
or by plugging in for $\langlembda$ according to equation (\ref{23}) we finally obtain
\begin{align}
E^{(0)}_L(\boldsymbol P, g) = E^{(0)}_L(0, g) + \frac{P^2}{2}\left[1-\frac{g^2}{9\pi^2}\frac{17-\sqrt{2}}{21}\right]. \langlebel{40}
\end{align}
Concluding, the renormalized mass is equal to
\begin{align}
m^{*(0)} = 1+\frac{g^2}{9\pi^2}\frac{17-\sqrt{2}}{21}. \langlebel{41}
\end{align}
\section*{Appendix E: Calculation of matrix elements in the second iteration for the energy of the system}
\langlebel{sec:calculation_of_matrix_elements_in_the_second_iteration_for_the_energy_of_the_system}
The calculation of the second iteration of the energy of the system requires the evaluation of the transition matrix elements $\langle\Psi^{(L)}_{\boldsymbol P}| \opa H |\Psi_{\boldsymbol P_1, \{ n_{\boldsymbol k}\}}\rangle$, $\langle\Psi^{(L)}_{\boldsymbol P}| \Psi_{\boldsymbol P_1, \{ n_{\boldsymbol k}\}}\rangle$ and $\langle \Psi_{\boldsymbol P_1, \{ n_{\boldsymbol k}\}}| \opa H |\Psi_{\boldsymbol P_1, \{ n_{\boldsymbol k}\}}\rangle$ from the full Hamiltonian of the system, equation (\ref{7}), with the function
\begin{align}
|\Psi^{(0)}_{\boldsymbol P_1, n_{\boldsymbol k}}\rangle &= \frac{1}{N_{\boldsymbol P_1,1_{\boldsymbol k}}\sqrt{\Omega}}\int d \boldsymbol R \phi_{\boldsymbol P_1}(\boldsymbol r- \boldsymbol R) \exp\left\{\ri (\boldsymbol P_1 - \boldsymbol k n_{\boldsymbol k})\!\cdot\!ot \boldsymbol R\right\}\nonumber
\\
&\times\exp\left[\sum_{\boldsymbol k}(u_{\boldsymbol k} e^{-\ri\thp{k}{R}}\opa a_{\boldsymbol k}^\dag - u_{\boldsymbol k}^* e^{\ri\thp{k}{R}}\opa a_{\boldsymbol k})\right]|n_{\boldsymbol k}\rangle \nonumber
\\
&=\frac{1}{N_{\boldsymbol P_1,1_{\boldsymbol k}}\sqrt{\Omega}}\int d \boldsymbol R \phi_{\boldsymbol P_1}(\boldsymbol r- \boldsymbol R) \exp\left\{\ri (\boldsymbol P_1 - \boldsymbol k n_{\boldsymbol k})\!\cdot\!ot \boldsymbol R - \frac{1}{2}\sum_{{\boldsymbol k}}|u_{\boldsymbol k}|^2 + \sum_{\boldsymbol k} u_{\boldsymbol k} e^{-\ri\thp{k}{R}}\opa a_{\boldsymbol k}^\dag \right\}\nonumber
\\
&\times\frac{(\opa a_{\boldsymbol k}^\dag - u_{\boldsymbol k}^* e^{\ri\thp{k}{R}})^{n_{\boldsymbol k}}}{\sqrt{n_{\boldsymbol k}!}}|0\rangle. \langlebel{42}
\end{align}
The normalization constant in equation (\ref{42}) is calculated with the help of equation (\ref{4}) and has the form
\begin{align}
|N_{\boldsymbol P_1,1_{\boldsymbol k}}|^2 &= \int d \boldsymbol R_1 d \boldsymbol \rho \phi_{\boldsymbol P_1}^*(\boldsymbol \rho)\phi_{\boldsymbol P_1}(\boldsymbol \rho - \boldsymbol R_1) e^{\ri(\boldsymbol P_1 - \boldsymbol k)\!\cdot\!ot \boldsymbol R_1 + \sum_{\boldsymbol k} |u_{\boldsymbol k}|^2(e^{-\ri\thp{k}{R_1}}-1)}\nonumber
\\
&\times\left(2|u_{\boldsymbol k}|^2(\cos\thp{k}{R_1}-1)+1\right).
\end{align}
The calculation of the transition matrix element is performed with the help of equation (\ref{4}):
\begin{align}
\langle \Psi_{\boldsymbol P_1,n_{\boldsymbol k}}&|\opa H|\Psi_{\boldsymbol P}^{(L)}\rangle = \frac{(2\pi)^3 \delta(\boldsymbol P - \boldsymbol P_1)}{N_{\boldsymbol P_1,1_{\boldsymbol k}}^* N_{\boldsymbol P} \Omega} \int d \boldsymbol R_1 d \boldsymbol \rho \phi_{\boldsymbol P_1}^*(\boldsymbol \rho)\phi_{\boldsymbol P}(\boldsymbol \rho - \boldsymbol R_1) \nonumber
\\
&\times e^{\ri\thp{P}{R_1} + \sum_{\boldsymbol k} |u_{\boldsymbol k}|^2(e^{-\ri\thp{k}{R_1}}-1)}\frac{u_{\boldsymbol k}^{n_{\boldsymbol k}} (e^{-\ri\thp{k}{R_1}}-1)^{n_{\boldsymbol k}}}{\sqrt{n_{\boldsymbol k}!}} \nonumber
\\
&\times\Bigg[\frac{P_1^2}{2} + \left(\frac{1}{2}k^2+k- \thp{P_1}{k}\right) n_{\boldsymbol k} e^{-\ri\thp{k}{R_1}}(e^{-\ri\thp{k}{R_1}}-1)^{-1} \nonumber
\\
&\mspace{35mu}+\frac{g}{\sqrt{2 \Omega}}\frac{e^{-\ri\thp{k}{\rho}} }{\sqrt{\omega_k}}n_{\boldsymbol k} u_{\boldsymbol k}^{-1}(e^{-\ri\thp{k}{R_1}}-1)^{-1} \nonumber
\\
&\mspace{35mu}+\sum_{\boldsymbol m} \boldsymbol m |u_{\boldsymbol m}|^2 e^{-\ri\thp{m}{R_1}}\!\cdot\!ot \left(-\boldsymbol P_1+\boldsymbol k n_{\boldsymbol k} e^{-\ri\thp{k}{R_1}}(e^{-\ri\thp{k}{R_1}}-1)^{-1}\right) \nonumber
\\
&\mspace{35mu}+ \sum_{\boldsymbol m} \left(\frac{1}{2}m^2+m\right) |u_{\boldsymbol m}|^2 e^{-\ri\thp{m}{R_1}}+ \frac{1}{2}k^2 n_{\boldsymbol k}(n_{\boldsymbol k}-1)e^{-2\ri\thp{k}{R_1}}(e^{-\ri\thp{k}{R_1}}-1)^{-2} \nonumber
\\
&\mspace{35mu}+\frac{1}{2}\left(\sum_{\boldsymbol m} \boldsymbol m |u_{\boldsymbol m}|^2 e^{-\ri\thp{m}{R_1}}\right)^2+\frac{g}{\sqrt{2 \Omega}}\sum_{\boldsymbol m} \frac{u_{\boldsymbol m} e^{\ri\thp{m}{\rho}}}{\sqrt{\omega_m}}(e^{-\ri\thp{m}{R_1}}+1)\Bigg]. \langlebel{44}
\end{align}
We further obtain the cover integral
\begin{align}
\langle \Psi_{\boldsymbol P_1,n_{\boldsymbol k}}|\Psi_{\boldsymbol P}^{(L)}\rangle &= \frac{(2\pi)^3 \delta(\boldsymbol P - \boldsymbol P_1)}{N_{\boldsymbol P_1,1_{\boldsymbol k}}^* N_{\boldsymbol P} \Omega} \int d \boldsymbol R_1 d \boldsymbol \rho \phi_{\boldsymbol P_1}^*(\boldsymbol \rho)\phi_{\boldsymbol P}(\boldsymbol \rho - \boldsymbol R_1) e^{\ri\thp{P}{R_1} + \sum_{\boldsymbol k} |u_{\boldsymbol k}|^2(e^{-\ri\thp{k}{R_1}}-1)}\nonumber
\\
&\times\frac{u_{\boldsymbol k}^{n_{\boldsymbol k}} (e^{-\ri\thp{k}{R_1}}-1)^{n_{\boldsymbol k}}}{\sqrt{n_{\boldsymbol k}!}} \langlebel{45}
\end{align}
and the expectation value of the Hamiltonian
\begin{align}
\langle \Psi_{\boldsymbol P_1,1_{\boldsymbol k}}|\opa H|\Psi_{\boldsymbol P_1,1_{\boldsymbol k}}\rangle &= \frac{P_1^2}{2} + \frac{1}{|N_{\boldsymbol P_1,1_{\boldsymbol k}}|^2 } \int d \boldsymbol R_1 d \boldsymbol \rho \phi_{\boldsymbol P_1}^*(\boldsymbol \rho)\phi_{\boldsymbol P_1}(\boldsymbol \rho - \boldsymbol R_1) e^{\ri(\boldsymbol P_1 - \boldsymbol k)\!\cdot\!ot \boldsymbol R_1 + \sum_{\boldsymbol k} |u_{\boldsymbol k}|^2(e^{-\ri\thp{k}{R_1}}-1)} \nonumber
\\
&\mspace{-105mu}\times\Bigg\{k^2 |u_{\boldsymbol k}|^2e^{-\ri\thp{k}{R_1}}\nonumber
\\
&\mspace{-75mu}+\left(2|u_{\boldsymbol k}|^2(\cos\thp{k}{R_1}-1)+1\right)\Bigg[ - \sum_{\boldsymbol m} \thp{P_1}{m} |u_{\boldsymbol m}|^2 e^{-\ri\thp{m}{R_1}} + \sum_{\boldsymbol m} \left(\frac{m^2}{2}+m\right)|u_{\boldsymbol m}|^2 e^{-\ri\thp{m}{R_1}}\nonumber
\\
&\mspace{90mu}+\frac{1}{2}\left(\sum_{\boldsymbol m} \boldsymbol m |u_{\boldsymbol m}|^2 e^{-\ri\thp{m}{R_1}}\right)^2 + \frac{g}{\sqrt{2 \Omega}}\sum_{\boldsymbol m} \frac{u_{\boldsymbol m} e^{\ri\thp{m}{\rho}}}{\sqrt{\omega_m}}(e^{-\ri\thp{m}{R_1}}+1)\Bigg]\nonumber
\\
&\mspace{-75mu}+\left(2|u_{\boldsymbol k}|^2(e^{-\ri\thp{k}{R_1}}-1)+1\right)\Bigg[-\thp{k}{P_1} + \sum_{\boldsymbol m} \thp{k}{m}|u_{\boldsymbol m}|^2 e^{-\ri\thp{m}{R_1}}+\frac{k^2}{2}+k\Bigg]\nonumber
\\
&\mspace{-75mu}+\frac{g}{\sqrt{2 \Omega}}\Bigg[\frac{u_{\boldsymbol k} e^{\ri\thp{k}{\rho}}}{\sqrt{\omega_{\boldsymbol k}}}(e^{-\ri\thp{k}{R_1}}-1) + \frac{u_{\boldsymbol k}^* e^{-\ri\thp{k}{\rho}}}{\sqrt{\omega_{\boldsymbol k}}}(1-e^{\ri\thp{k}{R_1}})\Bigg]\Bigg\}. \langlebel{46}
\end{align}
Equations (\ref{44}-\ref{46}) are valid for arbitrary coupling constants. However, in the weak coupling limit they are significantly simplified as they can be expressed via Fourier transforms. Another significant simplification comes from the fact that the action of one field mode on the system is inversely proportional to the square root of the normalization volume $\Omega$, that is $u_{\boldsymbol k}\sim 1/\sqrt{\Omega}$. Consequently, such terms can be kept only within the sum. Within this approximation, equations (\ref{44}-\ref{46}) can be rewritten as
\begin{align}
\langle \Psi_{\boldsymbol P_1,n_k}|\Psi_{\boldsymbol P}^{(L)}\rangle = \frac{(2\pi)^3 \delta(\boldsymbol P - \boldsymbol P_1)}{\Omega}\frac{u_{\boldsymbol k}(\phi_{\boldsymbol P - \boldsymbol k}^2- \phi_{\boldsymbol P}^2)}{\phi_{\boldsymbol P} \phi_{\boldsymbol P - \boldsymbol k}},\langlebel{47}
\end{align}
and
\begin{align}
\langle \Psi_{\boldsymbol P_1,n_k}|\opa H|\Psi_{\boldsymbol P}^{(L)}\rangle &= \frac{(2\pi)^3 \delta(\boldsymbol P - \boldsymbol P_1)}{\Omega}\frac{1}{\phi_{\boldsymbol P}\phi_{\boldsymbol P - \boldsymbol k}} \nonumber
\\
&\times \Bigg[\frac{P_1^2}{2} u_{\boldsymbol k}(\phi_{\boldsymbol P - \boldsymbol k}^2- \phi_{\boldsymbol P}^2) + \left(\frac{k^2}{2}+k- \thp{P_1}{k}\right)u_{\boldsymbol k} \phi_{\boldsymbol P - \boldsymbol k}^2+\frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol P - \boldsymbol k} \phi_{\boldsymbol P}}{\sqrt{k}}\nonumber
\\
&-u_{\boldsymbol k}(\boldsymbol P_1 - \boldsymbol k)\sum_{\boldsymbol m}\boldsymbol m |u_{\boldsymbol m}|^2 \phi_{\boldsymbol P - \boldsymbol m - \boldsymbol k}^2 \nonumber
\\
&+ u_{\boldsymbol k}\sum_{\boldsymbol m}\left(\frac{m^2}{2}+m\right)|u_{\boldsymbol m}|^2 \phi_{\boldsymbol P - \boldsymbol m - \boldsymbol k}^2 +\frac{u_{\boldsymbol k}}{2}\sum_{\boldsymbol l,\boldsymbol m}\thp{l}{m}|u_{\boldsymbol l}|^2|u_{\boldsymbol m}|^2 \phi_{\boldsymbol P - \boldsymbol l - \boldsymbol m - \boldsymbol k}^2 \nonumber
\\
&+\frac{g}{\sqrt{2 \Omega}}u_{\boldsymbol k}\sum_{\boldsymbol m}\frac{u_{\boldsymbol m}}{\sqrt{m}}\phi_{\boldsymbol P - \boldsymbol k}(\phi_{\boldsymbol P - \boldsymbol k - \boldsymbol m}+\phi_{\boldsymbol P - \boldsymbol k + \boldsymbol m}) - u_{\boldsymbol k}\phi_{\boldsymbol P}^2 \tilde E_L^{(0)}\Bigg], \langlebel{48}
\end{align}
as well as
\begin{align}
\langle \Psi_{\boldsymbol P_1,n_k}|\opa H|\Psi_{\boldsymbol P_1,n_k}\rangle &= \frac{P_1^2}{2}+\frac{1}{\phi_{\boldsymbol P - \boldsymbol k}^2}\Bigg[\phi_{\boldsymbol P -\boldsymbol k}^2\left(-\thp{k}{P_1}+\frac{k^2}{2}+k\right) - (\boldsymbol P_1-\boldsymbol k)\!\cdot\!ot\sum_{\boldsymbol m}\boldsymbol m |u_{\boldsymbol m}|^2\phi_{\boldsymbol P - \boldsymbol m - \boldsymbol k}^2\nonumber
\\
&+\sum_{\boldsymbol m}\left(\frac{m^2}{2}+m\right)|u_{\boldsymbol m}|^2\phi_{\boldsymbol P - \boldsymbol m - \boldsymbol k}^2 +\frac{g}{\sqrt{2 \Omega}}\sum_{\boldsymbol m}\frac{u_{\boldsymbol m}}{\sqrt{m}}\phi_{\boldsymbol P - \boldsymbol k}\left(\phi_{\boldsymbol P - \boldsymbol m-\boldsymbol k}+\phi_{\boldsymbol P + \boldsymbol m-\boldsymbol k}\right)\nonumber
\\
&+\frac{1}{2}\sum_{\boldsymbol l,\boldsymbol m}\thp{l}{m}|u_{\boldsymbol l}|^2|u_{\boldsymbol m}|^2 \phi_{\boldsymbol P - \boldsymbol m - \boldsymbol l - \boldsymbol k}^2 \Bigg]. \langlebel{49}
\end{align}
In equations (\ref{47}-\ref{49}) we have used expressions for the normalization constants
\begin{align}
N_{\boldsymbol P} = \phi_{\boldsymbol P}, \quad N_{\boldsymbol P_1,1_{\boldsymbol k}} = \phi_{\boldsymbol P_1 - \boldsymbol k} \langlebel{50}
\end{align}
and sorted out the energy of the zeroth-order approximation
\begin{align}
\tilde E_L^{(0)} &= - \boldsymbol P\sum_{\boldsymbol m}\boldsymbol m |u_{\boldsymbol m}|^2 \frac{\phi_{\boldsymbol P - \boldsymbol m}^2}{\phi_{\boldsymbol P}^2} + \frac{1}{2}\sum_{\boldsymbol l,\boldsymbol m}\thp{m}{l}|u_{\boldsymbol m}|^2 |u_{\boldsymbol l}|^2 \frac{\phi_{\boldsymbol P - \boldsymbol m - \boldsymbol l}^2}{\phi_{\boldsymbol P}^2} \nonumber
\\
&+\sum_{\boldsymbol m}\left(\frac{m^2}{2} + m\right)|u_{\boldsymbol m}|^2 \frac{\phi_{\boldsymbol P - \boldsymbol m}^2}{\phi_{\boldsymbol P}^2} + \frac{g}{\sqrt{2 \Omega}}\sum_{\boldsymbol m}\frac{u_{\boldsymbol m}}{\sqrt{m}}\phi_{\boldsymbol P}\frac{(\phi_{\boldsymbol P - \boldsymbol m}+\phi_{\boldsymbol P + \boldsymbol m})}{\phi_{\boldsymbol P}}. \langlebel{51}
\end{align}
\section*{Appendix F: Second iteration for the energy of the system with particle at rest}
\langlebel{sec:second_iteration_for_the_energy_of_the_system_with_particle_at_rest}
For the evaluation of the second iteration for the particle energy the knowledge of the behavior of different terms in expressions (\ref{47}-\ref{49}) is required. In order to determine those, we will carry out the summations over $\boldsymbol m$. For the first sum we can write
\begin{align}
\boldsymbol I^{(1)}_{\boldsymbol k} &= \frac{1}{g^2\phi_{\boldsymbol k}^2}\sum_{\boldsymbol m}\boldsymbol m |u_{\boldsymbol m}|^2 \phi_{\boldsymbol m+\boldsymbol k}^2 =\frac{\phi_0^2}{g^2\phi_{\boldsymbol k}^2} \frac{g^2}{2(2\pi)^3}\int d\boldsymbol m \left(\begin{aligned}
&m\sin \theta \cos \phi
\\
&m\sin \theta \sin \phi
\\
&m \cos \theta
\end{aligned}\right)\frac{e^{-\frac{m^2}{2 \langlembda^2}}}{m^3}e^{-\frac{m^2 + k^2 + 2\thp{m}{k}}{\langlembda^2}} \nonumber
\\
&=\frac{1}{8\pi^2}\frac{\boldsymbol k}{k}\int dm e^{-\frac{3}{2}\frac{m^2}{\langlembda^2}}\frac{-2km \langlembda^2 \cosh \frac{2km}{\langlembda^2} + \langlembda^4 \sinh \frac{2km}{\langlembda^2}}{2k^2 m^2}\nonumber
\\
&=\frac{\langlembda^2}{32\pi^2}\frac{\boldsymbol k}{k}\frac{4k-e^{\frac{2}{3}\frac{k^2}{\langlembda^2}}\sqrt{6\pi}\langlembda \text{Erf}\frac{\sqrt{\frac{2}{3}}k}{\langlembda}}{k^2} \underset{k\rightarrow\infty}{\longrightarrow}-\frac{\langlembda^2}{32\pi^2}\frac{\boldsymbol k}{k^3}\left(-4k+e^{\frac{2}{3}\frac{k^2}{\langlembda^2}}\sqrt{6\pi}\langlembda\right) \nonumber
\\
&\sim - \frac{\phi_0^2}{\phi_{\boldsymbol k}^2} \frac{\langlembda^3 \sqrt{6\pi}}{32\pi^2}\frac{e^{-\frac{1}{3}\frac{k^2}{\langlembda^2}}}{k^3}\boldsymbol k \langlebel{52}
\end{align}
as for the second
\begin{align}
I^{(2)}_{\boldsymbol k} &= \frac{1}{g^2 \phi_{\boldsymbol k}^2}\sum_{\boldsymbol m}\left(\frac{m^2}{2}+m\right)|u_{\boldsymbol m}|^2 \phi_{\boldsymbol m+\boldsymbol k}^2 = \frac{1}{2(2\pi)^3}\int \frac{d \boldsymbol m}{m^2}\left(1+\frac{m}{2}\right)e^{-\frac{3}{2}\frac{m^2}{\langlembda^2}-\frac{2\thp{m}{k}}{\langlembda^2}} \nonumber
\\
&=\frac{\langlembda^2}{8\pi^2}\int dm\left(1+\frac{m}{2}\right)e^{-\frac{3}{2}\frac{m^2}{\langlembda^2}} \frac{\sinh \frac{2km}{\langlembda^2}}{km}\nonumber
\\
&=\frac{\langlembda^2}{96k}\left(\frac{\sqrt{6\pi}\langlembda e^{\frac{2}{3}\frac{k^2}{\langlembda^2}}\text{Erf}\frac{\sqrt{\frac{2}{3}}k}{\langlembda}}{\pi^2}+\frac{6\text{Erfi}\frac{\sqrt{\frac{2}{3}}k}{\langlembda}}{\pi}\right) \nonumber
\\
&\underset{k\rightarrow\infty}{\longrightarrow} \frac{\langlembda^3}{96k^2 \pi^{\frac{3}{2}}}\left(\sqrt{6}e^{\frac{2}{3}\frac{k^2}{\langlembda^2}}(3+k)-\frac{6\ri k\sqrt{\pi}}{\langlembda}\right) \sim \frac{\phi_0^2}{\phi_{\boldsymbol k}^2} \frac{\langlembda^3\sqrt{6\pi}}{96\pi^2}\frac{e^{-\frac{1}{3}\frac{k^2}{\langlembda^2}}}{k} \langlebel{53}
\end{align}
and for the third
\begin{align}
I^{(3)}_{\boldsymbol k} &= \frac{1}{g^2 \phi_{\boldsymbol k}^2}\phi_{\boldsymbol k}\frac{g}{\sqrt{2 \Omega}}\sum_{\boldsymbol m}\frac{u_{\boldsymbol m}}{\sqrt{m}}(\phi_{\boldsymbol m+\boldsymbol k}+\phi_{\boldsymbol m - \boldsymbol k}) = - \frac{1}{2(2\pi)^3}\int \frac{d\boldsymbol m}{m^2}e^{-\frac{3}{4}\frac{m^2}{\langlembda^2}}\left(e^{-\frac{\thp{m}{k}}{\langlembda^2}}+e^{\frac{\thp{m}{k}}{\langlembda^2}}\right) \nonumber
\\
&= -\frac{\langlembda^2}{2\pi^2}\int dm e^{-\frac{3}{4}\frac{m^2}{\langlembda^2}}\frac{\sinh\frac{km}{\langlembda^2}}{km} = -\frac{\langlembda^2}{4\pi}\frac{\text{Erfi}\frac{k}{\sqrt{3}\langlembda}}{k} \nonumber
\\
&\underset{k\rightarrow\infty}{\longrightarrow} -\frac{\langlembda^2}{4k^2 \pi^{\frac{3}{2}}}(-\ri k \sqrt{\pi}+\sqrt{3}\langlembda e^{\frac{k^2}{3 \langlembda^2}}) \sim -\frac{\phi_0^2}{\phi_{\boldsymbol k}^2}\frac{\langlembda^3 \sqrt{3\pi}}{4\pi^2}\frac{e^{-\frac{2}{3}\frac{k^2}{\langlembda^2}}}{k^2}. \langlebel{54}
\end{align}
In the above expressions $\text{Erf}(x) = 2/\sqrt{\pi}\int_0^x e^{-z^2}dz$ and $\text{Erfi}(x) = -\ri\text{Erf}(\ri x)$ are the error function and the imaginary error functions, respectively.
Consequently, we can introduce the abbreviations, which where used in equations (\ref{eq:second_order_iteration_for_the_energy_convergence_4}-\ref{eq:second_order_iteration_for_the_energy_convergence_8}) of the manuscript, namely
\begin{align}
I_{\boldsymbol k} &= \boldsymbol k \!\cdot\!ot \boldsymbol I_{\boldsymbol k}^{(1)}+I_{\boldsymbol k}^{(2)}+I_{\boldsymbol k}^{(3)}\underset{k\rightarrow\infty}\sim-\frac{\langlembda^3\sqrt{6\pi}}{48\pi^2}\frac{e^{\frac{2k^2}{3 \langlembda^2}}}{k}. \langlebel{55}
\end{align}
After the determination of the asymptotic behavior of the different terms, we can find the second iteration for the energy of the system
\begin{align}
E^{(2)} = \frac{A}{B}. \langlebel{56}
\end{align}
For the numerator we have
\begin{align}
A = E_L^{(0)} &+ \sum_{\boldsymbol P_1, \{n_k\}}C^{(1)}_{\boldsymbol P_1, \{n_{\boldsymbol k}\}}\langle\Psi^{(L)}_{\boldsymbol P}| \opa H |\Psi_{\boldsymbol P_1, \{ n_{\boldsymbol k}\}}\rangle \nonumber
\\
&=E_L^{(0)} + \sum_{\boldsymbol k<\boldsymbol k_0}\frac{-\left(u_{\boldsymbol k} \frac{\phi_{\boldsymbol k}}{\phi_0}\left(\frac{k^2}{2}+k\right)+\frac{g}{\sqrt{2 \Omega}}\frac{1}{\sqrt{k}}\right)^2}{(\frac{k^2}{2}+k)}+\sum_{\boldsymbol k>\boldsymbol k_0}\frac{(-\frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol k} \phi_0}{\sqrt{k}})(-E_0 u_{\boldsymbol k})}{\phi_{\boldsymbol k}^2 I_{\boldsymbol k}g^2} \langlebel{57}
\end{align}
and for the denominator we obtain
\begin{align}
B = 1 &+ \sum_{\boldsymbol P_1, \{n_k\}}C^{(1)}_{\boldsymbol P_1, \{n_{\boldsymbol k}\}}\langle\Psi^{(L)}_{\boldsymbol P}| \Psi_{\boldsymbol P_1, \{ n_{\boldsymbol k}\}}\rangle \nonumber
\\
&=1+ \sum_{\boldsymbol k<\boldsymbol k_0}\frac{-\left(u_{\boldsymbol k} \frac{\phi_{\boldsymbol k}^2}{\phi_0^2}\left(\frac{k^2}{2}+k\right)+\frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol k}}{\phi_0\sqrt{k}}\right)u_{\boldsymbol k}(1-\frac{\phi_0^2}{\phi_{\boldsymbol k}^2})}{(\frac{k^2}{2}+k)}+ \sum_{\boldsymbol k>\boldsymbol k_0}\frac{(-\frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol k} \phi_0}{\sqrt{k}})u_{\boldsymbol k}(\frac{\phi_{\boldsymbol k}^2}{\phi_0^2}-1)}{\phi_{\boldsymbol k}^2 I_{\boldsymbol k}g^2 }. \langlebel{58}
\end{align}
Further explicit calculations yield
\begin{align}
A = E_L^{(0)} &+ \sum_{\boldsymbol k<\boldsymbol k_0}\frac{-\left(u_{\boldsymbol k} \frac{\phi_{\boldsymbol k}}{\phi_0}\left(\frac{k^2}{2}+k\right)+\frac{g}{\sqrt{2 \Omega}}\frac{1}{\sqrt{k}}\right)^2}{(\frac{k^2}{2}+k)} +\sum_{\boldsymbol k>\boldsymbol k_0}\frac{(-\frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol k} \phi_0}{\sqrt{k}})(-E_0 u_{\boldsymbol k})}{\phi_{\boldsymbol k}^2 I_{\boldsymbol k}g^2 } \nonumber
\\
&=E_L^{(0)}-\sum_{\boldsymbol k<\boldsymbol k_0}\left(u_{\boldsymbol k}^2\left(\frac{\phi_{\boldsymbol k}}{\phi_0}\right)^2\left(\frac{k^2}{2}+k\right) +\frac{2g}{\sqrt{2 \Omega}}\frac{u_{\boldsymbol k}}{\sqrt{k}}\frac{\phi_{\boldsymbol k}}{\phi_0}+\frac{g^2}{2 \Omega}\frac{1}{k^2(k/2+1)}\right)\nonumber
\\
&+\frac{g}{\sqrt{2 \Omega}}\frac{E_L^{(0)}}{g^2} \sum_{\boldsymbol k>\boldsymbol k_0}\frac{\phi_{\boldsymbol k}\phi_0 u_{\boldsymbol k}}{\sqrt{k}\phi_{\boldsymbol k}^2 I_{\boldsymbol k}} \nonumber
\\
&=E_L^{(0)}-\sum_{\boldsymbol k<\boldsymbol k_0}\left(u_{\boldsymbol k}^2\left(\frac{\phi_{\boldsymbol k}}{\phi_0}\right)^2\left(\frac{k^2}{2}+k\right) +\frac{2g}{\sqrt{2 \Omega}}\frac{u_{\boldsymbol k}}{\sqrt{k}}\frac{\phi_{\boldsymbol k}}{\phi_0}\right)-\frac{g^2}{2\pi^2}\ln\left(\frac{k_0}{2}+1\right)\nonumber
\\
&+\frac{E_L^{(0)}}{2(2\pi)^3} \int_{\boldsymbol k_0}^\infty d\boldsymbol k \frac{e^{-\frac{k^2}{4 \langlembda^2}}}{k^2}\frac{e^{-\frac{k^2}{2 \langlembda^2}}}{\frac{\langlembda^3 \sqrt{6\pi}}{48\pi^2}\frac{e^{-\frac{1}{3}\frac{k^2}{\langlembda^2}}}{k}} \nonumber
\\
&=E_L^{(0)}-\sum_{\boldsymbol k<\boldsymbol k_0}\left(u_{\boldsymbol k}^2\left(\frac{\phi_{\boldsymbol k}}{\phi_0}\right)^2\left(\frac{k^2}{2}+k\right) +\frac{2g}{\sqrt{2 \Omega}}\frac{u_{\boldsymbol k}}{\sqrt{k}}\frac{\phi_{\boldsymbol k}}{\phi_0}\right)-\frac{g^2}{2\pi^2}\ln\left(\frac{k_0}{2}+1\right)\nonumber
\\
&+E_{L}^{(0)}\frac{12}{\langlembda^3 \sqrt{6\pi}} \int_{k_0}^\infty dk k e^{-\frac{5}{12}\frac{k^2}{\langlembda^2}} \nonumber
\\
&=E_L^{(0)}-\left[\frac{g^2\langlembda}{24 \pi ^2}\left(\sqrt{6 \pi } \text{Erf}\left(\frac{\sqrt{\frac{3}{2}} k_0}{\langlembda }\right)+\langlembda -\langlembda e^{-\frac{3k_0^2}{2 \langlembda ^2}}\right)-\frac{g^2\langlembda}{2 \sqrt{3}\pi^{3/2}}\text{Erf}\left(\frac{\sqrt{3}k_0}{2\langlembda }\right)\right]\nonumber
\\
&-\frac{g^2}{2\pi^2}\ln\left(\frac{k_0}{2}+1\right)+E_L^{(0)}\frac{12\sqrt{6\pi}}{5 \langlembda \pi}e^{-\frac{5k_0^2}{12 \langlembda^2}} \langlebel{59}
\end{align}
and
\begin{align}
B &= 1+ \sum_{\boldsymbol k<\boldsymbol k_0}\frac{-\left(u_{\boldsymbol k} \frac{\phi_{\boldsymbol k}^2}{\phi_0^2}\left(\frac{k^2}{2}+k\right)+\frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol k}}{\phi_0\sqrt{k}}\right)u_{\boldsymbol k}(1-\frac{\phi_0^2}{\phi_{\boldsymbol k}^2})}{(\frac{k^2}{2}+k)} + \sum_{\boldsymbol k>\boldsymbol k_0}\frac{(-\frac{g}{\sqrt{2 \Omega}}\frac{\phi_{\boldsymbol k} \phi_0}{\sqrt{k}})u_{\boldsymbol k}(\frac{\phi_{\boldsymbol k}^2}{\phi_0^2}-1)}{\phi_{\boldsymbol k}^2 I_{\boldsymbol k}g^2}\nonumber
\\
&=1+\frac{g^2}{2(2\pi)^3}\int_0^{\boldsymbol k_0} d\boldsymbol k \frac{k^2}{\langlembda^2}\left[\frac{e^{-\frac{3k^2}{2 \langlembda^2}}}{k^3}-\frac{e^{-\frac{3k^2}{4 \langlembda^2}}}{k^2(k^2/2+k)}\right]-\frac{12}{\langlembda^5 \sqrt{6\pi}}\int_{k_0}^\infty dk k^3 e^{-\frac{5}{12}\frac{k^2}{\langlembda^2}} \nonumber
\\
&=1+\frac{g^2}{12\pi^2}(1-e^{-\frac{3}{2}\frac{k_0^2}{\langlembda^2}})-g^2f\left(\frac{k_0}{\langlembda}\right)- \frac{144\sqrt{6\pi}}{25 \langlembda \pi}\left(1+\frac{5}{12}\frac{k_0^2}{\langlembda^2}\right)e^{-\frac{5k_0^2}{12 \langlembda^2}} \langlebel{60}
\end{align}
with
\begin{align*}
f(x) = \frac{1}{4\pi^2}\int_0^x\frac{tdt}{1+t/2}e^{-\frac{3}{4}t^2}.
\end{align*}
\end{document}
|
\begin{document}
\maketitle
\begin{abstract}
In this paper, we consider a non-convex loss-minimization problem of learning Supervised PageRank models, which can account for some properties not considered by classical approaches such as the classical PageRank model. We propose gradient-based and random gradient-free methods to solve this problem.
Our algorithms are based on the concept of an inexact oracle and unlike the state state-of-the-art gradient-based method we manage to provide theoretically the convergence rate guarantees for both of them. In particular, under the assumption of local convexity of the loss function
, our random gradient-free algorithm guarantees decrease of the loss function value expectation.
At the same time, we theoretically justify that without convexity assumption for the loss function our gradient-based algorithm allows to find a point where the stationary condition is fulfilled with a given accuracy.
For both proposed optimization algorithms
, we find the settings of hyperparameters which give the lowest complexity (i.e., the number of arithmetic operations needed to achieve the given accuracy of the solution of the loss-minimization problem). The resulting estimates of the complexity are also provided.
Finally, we apply proposed optimization algorithms
to the web page ranking problem and compare proposed and state-of-the-art algorithms in terms of the considered loss function.
\varepsilonnd{abstract}
\section{\uppercase{Introduction}}
\langlebel{intro}
The most acknowledged methods of measuring importance of nodes in graphs are based on random walk models. Particularly, PageRank~\cite{page}, HITS~\cite{kleinberg}, and their variants~\cite{haveliwala1, haveliwala2, richardson} are originally based on a discrete-time Markov random walk on a link graph.
According to the PageRank algorithm, the score of a node equals to its probability in the stationary distribution of a Markov process, which models a random walk on the graph.
Despite undeniable advantages of PageRank and its mentioned modifications,
these algorithms miss important aspects of the graph that are not described by its structure.
In contrast, a number of approaches allows to account for different properties of nodes and edges between them by encoding them in restart and transition probabilities (see~\cite{dai, eiron, gao, jeh, liu, zhukovskii1, zhukovskii2}). These properties may include, e.g., the statistics about users' interactions with the nodes (in web graphs~\cite{liu} or graphs of social networks~\cite{backstrom}), types of edges (such as URL redirecting in web graphs~\cite{zhukovskii1}) or histories of nodes' and edges' changes~\cite{zhukovskii3}.
Particularly, the transition probabilities in BrowseRank algorithm~\cite{liu} are proportional to weights of edges which are equal to numbers of users' transitions.
In the general ranking framework called Supervised PageRank~\cite{zhukovskii2}, weights of nodes and edges in a graph are linear combinations of their features with coefficients as the model parameters. The existing optimization method~\cite{zhukovskii2} of learning these parameters and the optimizations methods proposed in the presented paper have two levels. On the lower level,
the following problem is solved: to estimate the value of the loss function (in the case of zero-order oracle) and its derivatives (in the case of first-order oracle) for a given parameter vector
.
On the upper level, the estimations
obtained on the lower level of the optimization methods (which we also call inexact oracle information)
are used for tuning the parameters by an iterative algorithm.
Following~\cite{gao}, the authors of Supervised PageRank consider a non-convex loss-minimization problem for learning the parameters and solve it by a two-level gradient-based method. On the lower level of this algorithm, an estimation of the stationary distribution of the considered Markov random walk is obtained by classical power method and estimations of derivatives w.r.t. the parameters of the random walk are obtained by power method introduced in~\cite{andrew1, andrew2}. On the upper level, the obtained gradient of the stationary distribution is exploited by the gradient descent algorithm.
As both power methods give imprecise values of the stationary distribution and its derivatives, there was no proof of the convergence of the state-of-the-art gradient-based method to a local optimum (for locally convex loss functions) or to the stationary point (for not locally convex loss functions).
The considered constrained non-convex loss-minimization problem from~\cite{zhukovskii2} can not be solved by existing optimization methods which require exact values of the objective function such as~\cite{nesterov3} and~\cite{ghadimi} due to presence of constraints for parameter vector and the impossibility to calculate exact value of the loss function and its gradient. Moreover, standard global optimization methods can not be applied to solve it, because they need access to some stochastic approximation for the loss-function value which in expectation coincides with the true value of the loss-function.
In our paper, we propose two two-level methods to solve the loss-minimization problem from~\cite{zhukovskii2}. On the lower level of these methods, we use the linearly convergent method from~\cite{nesterov4} to calculate an approximation to the stationary distribution of Markov random walk.
We analyze other methods from~\cite{gasnikov} and show that the chosen method is the most suitable since it allows to approximate the value of the loss function with any given accuracy and has lowest complexity estimation among others.
Upper level of the first method is gradient-based.
The main obstacle which we have overcome is that the state-of-the-art methods for constrained
non-convex optimization assume that the gradient is known exactly, which is not the case in our problem. We develop a gradient method for general constrained non-convex optimization problems with inexact oracle, estimate its convergence rate to the stationary point of the problem.
One of the advantages of our method is that it does not require to know the Lipschitz-constant of the gradient of the goal function, which is usually used to define the stepsize of a gradient algorithm. In order to calculate approximation of the gradient which is used in the upper-level method, we generalize linearly convergent method from~\cite{nesterov4} (and use it as part of the lower-level method). We prove that it has a linear rate of convergence as well.
Upper level of our
second method is random gradient-free.
Like for the gradient-based method, we encounter the problem that the existing gradient-free optimization methods~\cite{ghadimi, nesterov3} require exact values of the objective function.
Our contribution to the gradient-free methods framework
consists in adapting the approach of~\cite{nesterov3} to the case when the value of the function is calculated with some known accuracy. We prove a convergence theorem for this method
and exploit it on the upper level of the two-level algorithm for solving the problem of learning Supervised PageRank.
Another contribution consists in investigating both for the gradient and gradient-free methods the trade-off between the accuracy of the lower-level algorithm, which is controlled by the number of iterations of method in~\cite{nesterov4} and its generalization (for derivatives estimation), and the computational complexity of the two-level algorithm as a whole. Finally, we estimate the complexity of the whole two-level algorithms
for solving the loss-minimization problem with a given accuracy.
In the experiments, we apply our algorithms to learning Supervised PageRank on real data (we consider the problem of web pages' ranking). We show that both two-level methods outperform the state-of-the-art gradient-based method from~\cite{zhukovskii2}
in terms of the considered loss function.
Summing up, apart from the state-of-the-art method our algorithms have theoretically proven estimates of convergence rate
and outperform it in the ranking quality (as we prove experimentally). The main advantages of the first gradient-based algorithm are the following. There is no need to assume that the function is locally convex in order to guarantee that it converges to the stationary point.
This algorithm has smaller number of input parameters than gradient-free, because it does not need the Lipschitz constant of the gradient of the loss function.
The main advantage of the second gradient-free algorithm is that it avoids calculating the derivative for each element of a large matrix.
The remainder of the paper is organized as follows. In Section~\ref{model}, we describe the random walk model. In Section~\ref{optimal}, we define the loss-minimization problem and discuss its properties.
In Section~\ref{S:f_nf_calc}, we state two technical lemmas about the numbers of iterations of Nesterov--Nemirovski method (and its generalization) needed to achieve any given accuracy of the loss function (and its gradient).
In Section~\ref{S:gradient_free_method} and Section~\ref{S:gradient_method} we describe the framework of random gradient-free and gradient-based optimization methods respectively, generalize them to the case when the objective function values and gradients are inaccurate and propose two-level algorithms for the stated loss-minimization problem. Proofs of all our results can be found in Appendix. The experimental results are reported in Section~\ref{Experimental results}. In Section~\ref{Conclusion}, we summarize the outcomes of our study, discuss its benefits and directions of future work.
\section{\uppercase{Model description}}
\langlebel{model}
Let $\Gamma=(V,E)$ be a directed graph. Let
$$
\mathcal{F}_1=\{F(\varphi_1,\cdot): V \rightarrow\mathbb{R}\},\,\,\,
\mathcal{F}_2=\{G(\varphi_2,\cdot): E \rightarrow\mathbb{R}\}
$$
be two classes of functions parametrized by $\varphi_1\in\mathbb{R}^{m_1}, \varphi_2\in\mathbb{R}^{m_2}$ respectively, where $m_1$ is the number of nodes' features, $m_2$ is the number of edges' features. As in~\cite{zhukovskii2}, we suppose that for any $i\in V$ and any $\tilde i\rightarrow i\in E$, a vector of node's features $\mathbf{V}_i\in\mathbb{R}^{m_1}_+$ and a vector of edge's features $\mathbf{E}_{\tilde i i}\in\mathbb{R}^{m_2}_+$ are given. We set
\begin{equation}
F(\varphi_1,i)=\langlengle\varphi_1,\mathbf{V}_i\ranglengle, \quad G(\varphi_1,\tilde i\rightarrow i)=\langlengle\varphi_2,\mathbf{E}_{\tilde i i}\ranglengle.
\langlebel{eq:F_q_G_q_def}
\varepsilonnd{equation}
We denote $m=m_1+m_2$, $p=|V|$.
Let us describe the random walk on the graph $\Gamma$, which was considered in~\cite{zhukovskii2}.
A surfer starts a random walk from a random page $i\in U$ ($U$ is some subset in $V$ called {\it seed set}, $|U|=n$).
We assume that $\varphi_1$ and node features are chosen in such way that $\sum_{\tilde i\in U} F(\varphi_1,\tilde i)$ is non-zero. The initial probability of being at vertex $i \in V$ is called the {\it restart probability} and equals
\begin{equation}
[\pi^0(\varphi)]_i=\frac{F(\varphi_1, i)}{\sum_{\tilde i\in U}F(\varphi_1, \tilde i)}, \quad i \in U
\langlebel{restart}
\varepsilonnd{equation}
and $[\pi^0(\varphi)]_i=0$ for $i \in V\setminus U$.
At each step, the surfer (with a current position $\tilde i\in V$) either chooses with probability $\alpha\in(0,1)$ (originally~\cite{page}, $\alpha=0.15$), which is called the {\it damping factor}, to go to any vertex from $V$ in accordance with the distribution $\pi^0(\varphi)$ (makes a {\it restart}) or chooses to traverse an outgoing edge (makes a {\it transition}) with probability $1-\alpha$.
We assume that $\varphi_2$ and edges features are chosen in such way that $\sum_{j: \tilde{i} \to j}G(\varphi_2,\tilde{i} \to j)$ is non-zero for all $\tilde{i}$ with non-zero outdegree.
For $\tilde{i}$ with non-zero outdegree, the probability
\begin{equation}
[P(\varphi)]_{\tilde i,i}=\frac{G(\varphi_2, \tilde{i} \to i)}{\sum_{j: \tilde{i} \to j}G(\varphi_2,\tilde{i} \to j)}
\langlebel{transition}
\varepsilonnd{equation}
of traversing an edge $\tilde i\rightarrow i\in E$ is called the {\it transition probability}.
If an outdegree of $\tilde i$ equals $0$, then we set $[P(\varphi)]_{\tilde i,i}=[\pi^0(\varphi)]_i$ for all $i\in V$ (the surfer with current position $\tilde i$ makes a restart with probability $1$). Finally, by Equations \ref{restart} and~\ref{transition} the total probability of choosing vertex $i\in V$ conditioned by the surfer being at vertex $\tilde i$ equals $\alpha[\pi^0(\varphi)]_i+(1-\alpha)[P(\varphi)]_{\tilde i, i}$.
Denote by $\pi \in \mathbb R^{p}$ the stationary distribution of the described Markov process.
It can be found as a solution of the system of equations
\begin{equation}
[\pi]_i = \alpha [\pi^0(\varphi)]_i + (1-\alpha)\sum_{\tilde{i}: \tilde{i} \to i \in E}[P(\varphi)]_{\tilde i,i}
[\pi]_{\tilde i}.
\langlebel{pi_general}
\varepsilonnd{equation}
In this paper, we learn the ranking algorithm, which orders the vertices $i$ by their probabilities $[\pi]_{i}$ in the stationary distribution $\pi$.
\section{\uppercase{Loss-minimization problem statement}}
\langlebel{optimal}
Let $Q$ be a set of queries and, for any $q\in Q$, a set of nodes $V_q$ which are relevant to $q$ be given. We are also provided with a ranking algorithm which assigns nodes ranking scores $[\pi_q]_i$, $i \in V_q$, $\pi_q=\pi_q(\varphi)$, as its output. For example, in web search, the score $[\pi_q]_i$ may repesent relevance of the page $i$ w.r.t. the query $q$. Our goal is to find the parameter vector $\varphi$ which minimizes the discrepancy of the ranking scores from the ground truth scoring defined by assessors. For each $q\in Q$, there is a set of nodes in $V_q$ manually judged and grouped by relevance labels $1,\ldots,k$. We denote $V^j_q$ the set of documents annotated with label $k+1-j$ (i.e., $V_q^1$ is the set of all nodes with the highest relevance score). For any two nodes $i_1\in V_q^{j_1},i_2\in V_q^{j_2}$, let $h(j_1,j_2,[\pi_q]_{i_2}-[\pi_q]_{i_1})$ be the value of the loss function. If it is non-zero, then the position of the node $i_1$ according to our ranking algorithm is higher than the position of the node $i_2$ but $j_1>j_2$. We consider square loss with margins $b_{j_1j_2}\geq 0$, where $1\leq j_2<j_1\leq k$:
$h(j_1,j_2,x)=(\min\{x+b_{j_1j_2},0\})^2$ as it was done in previous studies~\cite{liu, zhukovskii2, zhukovskii3}. Finally, we minimize
\begin{equation}
\frac{1}{|Q|} \sum_{q=1}^{|Q|}\sum\limits_{1\leq j_2<j_1\leq k}\sum\limits_{i_1\in V_q^{j_1},i_2\in V_q^{j_2}}h(j_1,j_2,[\pi_q]_{i_2}-[\pi_q]_{i_1})
\langlebel{eq:f_phi_def_1}
\varepsilonnd{equation}
as a function of $\varphi$ over over some set of feasible values, which may depend on the ranking model, in order to learn our model using the data given by assessors.
We consider the ranking algorithm from the previous section. Namely, $\pi_q$ is the stationary distribution~\varepsilonqref{pi_general} in Markov random walk on a graph $\Gamma_q=(V_q,E_q)$ (the set of edges $E_q$ is given, its elemets represent some relations between nodes which depend on a ranking problem which is solved by the random walk algorithm). Features vectors (and, consequently, weights of nodes and edges $F_q:=F$ and $G_q:=G$) $\mathbf{V}_i^q$, $i\in V_q$, $\mathbf{E}_{\tilde i i}^q$, $\tilde i\rightarrow i\in E_q$, depend on $q$ as well. For example, vertices in $V_q$ may represent web pages which were visited by users after submitting a query $q$ and features may reflect different properties of query--page pair. For fixed $q\in Q$, the graph $\Gamma_q$ and features $\mathbf{V}_i^q$, $i\in V_q$, $\mathbf{E}_{\tilde i i}^q$, $\tilde i\rightarrow i\in E$, we consider the notations from the previous section and add the index $q$: $U_q:=U$, $\pi_q^0:=\pi^0$, $P_q:=P$, $p_q:=p$, $n_q:=n$, $\pi_q:=\pi$. The parameters $\alpha$ and $\varphi = (\varphi_1, \varphi_2)^T$ of the model do not depend on $q$. We also denote $p=\max_{q \in Q} p_q$, $n=\max_{q \in Q} n_q$. Also, let $s=\max_{q \in Q} s_q$, where $s_q=\max_{i \in V_q} |\{j: i \to j \in E_q\}|$ -- the {\it sparsity parameter}, maximum number of outgoing links from a node in $V_q$.
In order to guarantee that the probabilities in \varepsilonqref{restart} and \varepsilonqref{transition} are non-negative and that they do not blow up due to zero value of the denominator, we need appropriately choose the set $\Phi$ of possible values of parameters $\varphi$. Recalling \varepsilonqref{eq:F_q_G_q_def}, it is natural to choose some $\hat{\varphi}$ and $R >0 $ such that the set $\Phi$ (which we call the feasible set of parameters) defined as $\Phi = \{ \varphi \in \mathbb R^m: \|\varphi-\hat{\varphi}\|_2 \leq R \}$ lies in the set of vectors with positive components $\mathbb R^m_{++}$\footnote{As probablities $[\pi^0_q(\varphi)]_i$, $i\in V_q$, $[P_q(\varphi)]_{\tilde i,i}$, $\tilde i\rightarrow i\in E_q$, are scale-invariant ($\pi^0_q(\langlembda\varphi)=\pi^0_q(\varphi)$, $P_q(\langlembda\varphi)=P_q(\varphi)$), in our experiments, we consider the set $\Phi=\{ \varphi \in \mathbb R^m: \|\varphi-e_m\|_2 \leq 0.99 \}$ , where $e_m \in \mathbb R^m$ is the vector of all ones, that has large intersection with the simplex $\{\varphi \in \mathbb R^m_{++}: \|\varphi\|_1=1\}$}.
We denote by $\pi_q(\varphi)$ the solution of the equation
\begin{equation}
\pi_q = \alpha \pi^0_q(\varphi) + (1-\alpha) P_q^T(\varphi) \pi_q
\langlebel{eq:pi_Phi_P2}
\varepsilonnd{equation}
which is Equation~\varepsilonqref{pi_general} rewritten for fixed $q \in Q$ in the vector form.
From \varepsilonqref{eq:pi_Phi_P2} we obtain the following equation for $p_q \times m$ matrix $\frac{d \pi_q(\varphi)}{d \varphi^T}$ which is the derivative of stationary distribution $\pi_q(\varphi)$ with respect to $\varphi$
\begin{align}
& \frac{d \pi_q(\varphi)}{d \varphi^T} = \Pi^0_q(\varphi) + (1-\alpha) P_q^T(\varphi) \frac{d \pi_q(\varphi)}{d \varphi^T},
\langlebel{eq:pi_q_full_der}
\varepsilonnd{align}
where
\begin{equation}
\Pi^0_q(\varphi) = \alpha \frac{d \pi^0_q(\varphi)}{d \varphi^T} + (1-\alpha) \sum_{i=1}^{p_q} \frac{ d p_i(\varphi)}{d \varphi^T} [\pi_q(\varphi)]_i
\langlebel{eq:Pi_q_0_def}
\varepsilonnd{equation}
and $p_i(\varphi)$ is the $i$-th column of the matrix $P_q^T(\varphi)$.
Let us rewrite the function defined in~\varepsilonqref{eq:f_phi_def_1} as
\begin{equation}
f(\varphi)= \frac{1}{|Q|} \sum_{q=1}^{|Q|} \|(A_q \pi_q (\varphi) +b_q)_{+} \|^2_2,
\langlebel{eq:f_phi_def_2}
\varepsilonnd{equation}
where vector $x_+$ has components $[x_+]_i=\max\{x_i,0\}$, the matrices $A_q \in \mathbb R^{r_q \times p_q}, q \in Q$ represent assessor's view of the relevance of pages to the query $q$, vectors $b_q, q \in Q$ are vectors composed from thresholds $b_{j_1,j_2}$ in \varepsilonqref{eq:f_phi_def_1} with fixed $q$, $r_q$ is the number of summands in \varepsilonqref{eq:f_phi_def_1} with fixed $q$. We denote $r=\max_{q \in Q} r_q$
. Then the gradient of the function $f(\varphi)$ is easy to derive:
\begin{equation}
\nabla f(\varphi) = \frac{2}{|Q|} \sum_{q=1}^{|Q|} \left(\frac{d \pi_q(\varphi)}{d \varphi^T} \right)^T A_q^T (A_q \pi_q (\varphi) +b_q)_{+}.
\langlebel{eq:nf}
\varepsilonnd{equation}
Finally, the loss-minimization problem which we solve in this paper is as follows
\begin{equation}
\min_{ \varphi \in \Phi} f(\varphi), \Phi = \{ \varphi \in \mathbb R^m: \|\varphi-\hat{\varphi}\|_2 \leq R \}.
\langlebel{eq:prob_form}
\varepsilonnd{equation}
To solve this problem, we use gradient-free methods which are based only on $f(\varphi)$ calculations (zero-order oracle) and gradient methods which are based on $f(\varphi)$ and $\nabla f(\varphi)$ calculations (first-order oracle). We do not use methods with oracle of higher order since the loss function is not convex and we assume that $m$ is large.
\section{\uppercase{Numerical calculation of the value and the gradient of} $f(\varphi)$}
\langlebel{S:f_nf_calc}
One of the main difficulties in solving Problem \ref{eq:prob_form} is that calculation of the value of the function $f(\varphi)$ requires to calculate $|Q|$ vectors $\pi_q(\varphi)$ which solve \varepsilonqref{eq:pi_Phi_P2}. In our setting, this vector has huge dimension $p_q$ and hence it is computationally very expensive to find it exactly. Moreover, in order to calculate $\nabla f(\varphi)$ one needs to calculate the derivative for each of these huge-dimensional vectors which is also computationally very expensive to be done exactly. At the same time our ultimate goal is to provide methods for solving Problem \ref{eq:prob_form} with estimated rate of convergence and complexity. Due to the expensiveness of calculating exact values of $f(\varphi)$ and $\nabla f(\varphi)$ we have to use the framework of optimization methods with inexact oracle which requires to control the accuracy of the oracle, otherwise the convergence is not guaranteed. This means that we need to be able to calculate an approximation to the function $f(\varphi)$ value (inexact zero-order oracle) with a given accuracy for gradient-free methods and approximation to the pair $(f(\varphi),\nabla f(\varphi))$ (inexact first-order oracle) with a given accuracy for gradient methods. Hence we need some numerical scheme which allows to calculate approximation for $\pi_q(\varphi)$ and $\frac{d \pi_q(\varphi)}{d \varphi^T}$ for every $q\in Q$ with a given accuracy.
Motivated by the last requirement we have analysed state-of-the-art methods for finding the solution of Equation \ref{eq:pi_Phi_P2} in huge dimension summarized in the review~\cite{gasnikov} and power method, used in~\cite{page, backstrom, zhukovskii2}.
Only four methods allow to make the difference $\|\pi_q(\varphi)-\tilde{\pi}_q\|$, where $\tilde{\pi}_q$ is the approximation, small for some norm $\|\cdot\|$ which is crucial to estimate the error in the approximation of the function $f(\varphi)$ value. These method are: Markov Chain Monte Carlo (MCMC), Spillman's, Nesterov-Nemirovski's (NN) and power method. Spillman's algoritm and power method converge in infinity norm which is usually $p_q$ times larger than 1-norm. MCMC converges in 2-norm which is usually $\sqrt{p_q}$ times larger than 1-norm. Also MCMC is randomized and converges only in average which makes it hard to control the accuracy of the approximation $\tilde{\pi}_q$. Apart from the other three, NN is deterministic and converges in 1-norm which gives minimum $\sqrt{p_q}$ times better approximation.
At the same time, to the best of our knowledge, NN method is the only method that admits a generalization which, as we prove in this paper, calculates the derivative $\frac{d \pi_q(\varphi)}{d \varphi^T}$ with any given accuracy.
The method by~\cite{nesterov4} for approximation of $\pi_q(\varphi)$ for any fixed $q\in Q$ constructs a sequence $\pi_k$
by the following rule
\begin{equation}
\pi_0 = \pi_q^0(\varphi), \quad \pi_{k+1} = P_q^T(\varphi) \pi_k.
\langlebel{eq:pi_k+1_def}
\varepsilonnd{equation}
The output of the algorithm (for some fixed non-negative integer $N$) is
\begin{equation}
\tilde{\pi}^N_q (\varphi) = \frac{\alpha}{1-(1-\alpha)^{N+1}} \sum_{k=0}^{N} {(1-\alpha)^k \pi_k}.
\langlebel{eq:tpi_def}
\varepsilonnd{equation}
\begin{Lm}
Assume that for some $\delta_1>0$ Method \ref{eq:pi_k+1_def}, \ref{eq:tpi_def} with
$
N = \left\lceil \frac{1}{\alpha} \ln \frac{8r}{\delta_1} \right\rceil - 1
$
is used to calculate the vector $\tilde{\pi}^N_q (\varphi)$ for every $q\in Q$. Then
\begin{equation}
\tilde{f}(\varphi,\delta_1) = \frac{1}{|Q|} \sum_{q=1}^{|Q|} \|(A_q \tilde{\pi}_q^{N}(\varphi) +b_q)_{+} \|^2_2
\langlebel{eq:tfN1_def}
\varepsilonnd{equation}
satisfies
\begin{equation}
|\tilde{f}(\varphi,\delta_1) - f(\varphi)|\leq \delta_1.
\langlebel{eq:tf_error}
\varepsilonnd{equation}
Moreover, the calculation of $\tilde{f}(\varphi,\delta_1)$ requires not more than
$
|Q|(3 mps + 3 psN+6r)
$
a.o.
\langlebel{Lm:f_compl}
\varepsilonnd{Lm}
The proof of Lemma~\ref{Lm:f_compl} can be found in Appendix A.1.
Our generalization of the method~\cite{nesterov4} for calculation of $\frac{d \pi_q(\varphi)}{d \varphi^T}$ for any $q\in Q$ is the following.
Choose some non-negative integer $N_1$ and calculate $\tilde{\pi}^{N_1}_q(\varphi)$ using \varepsilonqref{eq:pi_k+1_def}, \varepsilonqref{eq:tpi_def}. Start with initial point
\begin{equation}
\Pi_0 = \alpha \frac{d \pi^0_q(\varphi)}{d \varphi^T} + (1-\alpha) \sum_{i=1}^{p_q} \frac{ d p_i(\varphi)}{d \varphi^T} [\tilde{\pi}^{N_1}_q(\varphi)]_i.
\langlebel{eq:Pi_0_def}
\varepsilonnd{equation}
Iterate
\begin{equation}
\Pi_{k+1} = P_q^T(\varphi) \Pi_k.
\langlebel{eq:Pik+1_def}
\varepsilonnd{equation}
The output is (for some fixed non-negative integer $N_2$)
\begin{equation}
\tilde{\Pi}_q^{N_2}(\varphi) = \frac{1}{1-(1-\alpha)^{N_2+1}} \sum_{k=0}^{N_2} (1-\alpha)^k \Pi_k.
\langlebel{eq:tPi_def}
\varepsilonnd{equation}
In what follows, we use the following norm on the space of matrices $A \in \mathbb R^{n_1\times n_2}$: $\|A\|_1 = \max_{j = 1,...,n_2} \sum_{i=1}^{n_1} |a_{ij}|$.
\begin{Lm}
Let $\beta_1$ be a number (explicitly computable, see Appendix A.2 Equation~\ref{beta_1}) such that for all $\varphi \in \Phi$
\begin{equation}
\|\Pi^0_q(\varphi)\|_1 \leq \alpha \left\|\frac{d \pi^0_q(\varphi)}{d \varphi^T} \right\|_1 + (1-\alpha) \sum_{i=1}^{p_q} \left\|\frac{ d p_i(\varphi)}{d \varphi^T} \right\|_1 \leq \beta_1.
\langlebel{eq:Pi_0_est}
\varepsilonnd{equation}
Assume that Method \ref{eq:pi_k+1_def}, \ref{eq:tpi_def} with
$
N_1= \left\lceil \frac{1}{\alpha} \ln \frac{24\beta_1 r}{\alpha \delta_2} \right\rceil - 1
$
is used for every $q \in Q$ to calculate the vector $\tilde{\pi}_q^{N_1}(\varphi)$ \varepsilonqref{eq:tpi_def} and Method \ref{eq:Pi_0_def}, \ref{eq:Pik+1_def}, \ref{eq:tPi_def}
with
$
N_2=\left\lceil \frac{1}{\alpha} \ln \frac{8\beta_1 r}{\alpha \delta_2} \right\rceil - 1
$
is used for every $q \in Q$ to calculate the matrix $\tilde{\Pi}_q^{N_2}(\varphi)$ \varepsilonqref{eq:tPi_def}.
Then the vector
\begin{equation}
\tilde{g}(\varphi,\delta_2) = \frac{2}{|Q|} \sum_{q=1}^{|Q|} \left(\tilde{\Pi}_q^{N_2}(\varphi) \right)^T A_q^T (A_q \tilde{\pi}_q^{N_1}(\varphi) +b_q)_{+}
\langlebel{eq:tnfN2_def}
\varepsilonnd{equation}
satisfies
\begin{equation}
\left\| \tilde{g}(\varphi,\delta_2) - \nabla f(\varphi)\right\|_{\infty} \leq \delta_2.
\langlebel{eq:tnf_error}
\varepsilonnd{equation}
Moreover the calculation of $\tilde{g}(\varphi,\delta_2)$ requires not more than
$
|Q| (10 mps + 3 psN_1+ 3mpsN_2 + 7r)
$
a.o.
\langlebel{Lm:nf_compl}
\varepsilonnd{Lm}
The proof of Lemma \ref{Lm:nf_compl} can be found in Appendix A.2.
\section{\uppercase{Random gradient-free optimization methods}}
\langlebel{S:gradient_free_method}
In this section, we first describe general framework of random gradient-free methods with inexact oracle and then apply it for Problem \ref{eq:prob_form}. Lemma \ref{Lm:f_compl} allows to control the accuracy of the inexact zero-order oracle and hence apply random gradient-free methods with inexact oracle.
\subsection{\uppercase{General framework}}
Below we extend the framework of random gradient-free methods~\cite{agarwal, nesterov3, ghadimi} for the situation of presence of uniformly bounded error of unknown nature in the value of an objective function in general optimization problem.
Apart from~\cite{nesterov3}, we consider a randomization on a Euclidean sphere which seems to give better large deviations bounds and doesn't need the assumption that the objective function can be calculated at any point of $\mathbb R^m$.
Let ${\mathbb E}c$ be a $m$-dimensional vector space. In this subsection, we consider a general function $f(\cdot): {\mathbb E}c \to \mathbb R$ and denote its argument by $x$ or $y$ to avoid confusion with other sections. We choose some norm $\|\cdot\|$ in ${\mathbb E}c$ and say that $f \in C^{1,1}_{L} (\|\cdot\|)$ iff
\begin{equation}
|f(x)-f(y) - \langle \nabla f(y) ,x-y \rangle| \leq \frac{L}{2}\|x-y\|^2, \quad \forall x,y \in {\mathbb E}c.
\langlebel{eq:fLipSm}
\varepsilonnd{equation}
The problem of our interest is to find $\min_{x\in X} f(x)$, where $f \in C^{1,1}_{L}(\|\cdot\|)$, $X$ is a closed convex set and there exists a number $D \in (0,+\infty)$ such that ${\rm diam} X := \max_{x,y \in X}\|x-y\| \leq D$. Also we assume that the inexact zero-order oracle for $f(x)$ returns a value $\tilde{f}(x,\delta)=f(x) + \tilde{\delta}(x)$, where $\tilde{\delta}(x)$ is the error satisfying for some $\delta >0$ (which is known) $|\tilde{\delta}(x)| \leq \delta$ for all $x \in X$.
Let $x^* \in \arg \min_{x\in X} f(x)$. Denote $f^* = \min_{x\in X} f(x)$.
Apart from \cite{nesterov3}, we define the biased gradient-free oracle
\begin{equation}
g_{\mu}(x,\delta) = \frac{m}{\mu}(\tilde{f}(x+\mu \xi,\delta) -\tilde{f}(x,\delta)) \xi,
\notag
\varepsilonnd{equation}
where $\xi$ is a random vector uniformly distributed over the unit sphere ${\mathcal S} =\{ t \in \mathbb R^m : \|t\|_2 = 1\}$, $\mu$ is a smoothing parameter.
Algorithm~\ref{alg:GFPGM} below is the variation of the gradient descent method. Here $\Pi_{X}(x)$ denotes the Euclidean projection of a point $x$ onto the set $X$.
\begin{algorithm}[h!]
\caption{Gradient-type method}
\langlebel{alg:GFPGM}
\begin{algorithmic}
{\mathcal S}TATE {\bfseries Input:} Point $x_0 \in X$, stepsize $h>0$, number of steps $M$.
{\mathcal S}TATE Set $k=0$.
\mathbb REPEAT
{\mathcal S}TATE Generate $\xi_k$ and calculate corresponding $g_{\mu}(x_k,\delta)$.
{\mathcal S}TATE Calculate $x_{k+1} = \Pi_{X}(x_k- h g_{\mu}(x_k,\delta))$.
{\mathcal S}TATE Set $k=k+1$.
\mathcal{U}NTIL{$k>M$}
{\mathcal S}TATE {\bfseries Output:} The point $\hat{x}_M = \arg \min_x \{ f(x): x \in \{ x_0, \dots, x_M\}\}$.
\varepsilonnd{algorithmic}
\varepsilonnd{algorithm}
Next theorem gives the convergence rate of Algorithm \ref{alg:GFPGM}.
Denote by $\mathcal{U}_k=(\xi_0, \dots, \xi_k)$ the history of realizations of the vector $\xi$ generated on each iteration of the algorithm.
\begin{Th}
Let $f \in C^{1,1}_{L} (\|\cdot\|_2)$ and convex. Assume that $x^*\in {\rm int} X$, and the sequence $x_k$ is generated by Algorithm \ref{alg:GFPGM} with $h=\frac{1}{8mL}$.
Then for any $M \geq 0$, we have
\begin{align}
& {\mathbb E}_{\mathcal{U}_{M-1}} f(\hat{x}_M) - f^*\leq \notag \\
& \leq \frac{8mL D^2}{M+1} + \frac{\mu^2 L (m+8)}{8} + \frac{\delta m D}{4 \mu } + \frac{\delta^2 m}{L \mu^2}.
\langlebel{eq:rtSmth}
\varepsilonnd{align}
\langlebel{th_1}
\varepsilonnd{Th}
The full proof of the theorem is in Appendix B.
It is easy to see that to make the right hand side of \varepsilonqref{eq:rtSmth} less than a desired accuracy $\varepsilon$ it is sufficient to choose
\begin{align}
&M = \left\lceil \frac{32mLD^2}{\varepsilon}\right\rceil, \quad \mu = \sqrt{\frac{2 \varepsilon}{L (m+8)}},\quad \delta \leq \frac{\varepsilon^{\frac32}\sqrt{2}}{8mD\sqrt{L(m+8)}}.
\langlebel{eq:Alg_GFPGM_param}
\varepsilonnd{align}
\subsection{\uppercase{Solving the learning problem}}
\langlebel{learn}
In this subsection, we apply the results of the previous subsection to solve Problem \ref{eq:prob_form} in the following way.
We assume that the set $\Phi$ is a small vicinity of some local minimum $\varphi^*$ and the function $f(\varphi)$ is convex in this vicinity (generally speaking, the function defined in \varepsilonqref{eq:f_phi_def_2} is nonconvex). We choose the desired accuracy $\varepsilon$ for approximation of the optimal value $f^*$ in this problem. This accuracy in accordance with \varepsilonqref{eq:Alg_GFPGM_param} gives us the number of steps of Algorithm~\ref{alg:GFPGM}, the value of the parameter $\mu$, the value of the required accuracy $\delta$ of the inexact zero-order oracle.
Knowing the value $\delta$, using Lemma~\ref{Lm:f_compl} we choose the number of steps $N$ of Algorithm~\ref{eq:pi_k+1_def},~\ref{eq:tpi_def} and calculate an approximation $\tilde{f}(\varphi,\delta)$ for the function $f(\varphi)$ value with accuracy $\delta$. Then we use the inexact zero-order oracle $\tilde{f}(\varphi,\delta)$ to make a step of Algorithm~\ref{alg:GFPGM}. Theorem \ref{th_1} and the fact that the feasible set $\Phi$ is a Euclidean ball makes it natural to choose $\|\cdot\|_2$-norm in the space $\mathbb R^m$ of parameter $\varphi$. It is easy to see that in this norm ${\rm diam} \Phi \leq 2R$. Algorithm \ref{alg:RGFGM_learn} is a formal record of these ideas. To the best of our knowledge, this is the first time when the idea of random gradient-free optimization methods is combined with some efficient method for huge-scale optimization using the concept of an inexact zero-order oracle.
\begin{algorithm}[h!]
\caption{Gradient-free method for Problem \ref{eq:prob_form}}
\langlebel{alg:RGFGM_learn}
\begin{algorithmic}
{\mathcal S}TATE {\bfseries Input:} Point $\varphi_0 \in \Phi$, $L$ -- Lipschitz constant for the function $f(\varphi)$ on $\Phi$, accuracy $\varepsilon >0$.
{\mathcal S}TATE Define $M=\left\lceil128 m \frac{LR^2}{\varepsilon}\right\rceil$, $\delta= \frac{\varepsilon^{\frac32}\sqrt{2}}{16mR\sqrt{L(m+8)}} $, $\mu = \sqrt{\frac{ 2\varepsilon}{L (m+8)}}$.
{\mathcal S}TATE Set $k=0$.
\mathbb REPEAT
{\mathcal S}TATE Generate random vector $\xi_k$ uniformly distributed over a unit Euclidean sphere ${\mathcal S}$ in $R^m$.
{\mathcal S}TATE Calculate $\tilde{f}(\varphi_k+ \mu \xi_k,\delta)$, $\tilde{f}(\varphi_k,\delta)$ using Lemma \ref{Lm:f_compl} with $\delta_1=\delta$.
{\mathcal S}TATE Calculate $g_{\mu}(\varphi_k,\delta) = \frac{m}{\mu}(\tilde{f}(\varphi_k+ \mu \xi_k,\delta)-\tilde{f}(\varphi_k,\delta))\xi_k$.
{\mathcal S}TATE Calculate $\varphi_{k+1} = \Pi_{\Phi}\left(\varphi_k- \frac{1}{8 m L} g_{\mu}(\varphi_k,\delta)\right)$.
{\mathcal S}TATE Set $k=k+1$.
\mathcal{U}NTIL{$k>M$}
{\mathcal S}TATE {\bfseries Output:} The point $\hat{\varphi}_M = \arg \min_{\varphi} \{ f(\varphi): \varphi \in \{ \varphi_0, \dots, \varphi_M\}\}$.
\varepsilonnd{algorithmic}
\varepsilonnd{algorithm}
The most computationally hard on each iteration of the main cycle of this method are calculations of $\tilde{f}(\varphi_k+ \mu \xi_k,\delta)$, $\tilde{f}(\varphi_k,\delta)$. Using Lemma \ref{Lm:f_compl}, we obtain that each iteration of Algorithm \ref{alg:RGFGM_learn} needs not more than
$$
2|Q|\left(3 mps + \frac{3 ps}{\alpha}\ln \frac{128mrR\sqrt{L(m+8)}}{\varepsilon^{3/2}\sqrt{2}} + 6r\right)
$$
a.o. So, we obtain the following result, which gives the complexity of Algorithm \ref{alg:RGFGM_learn}.
\begin{Th}
Assume that the set $\Phi$ in \varepsilonqref{eq:prob_form} is chosen in a way such that $f(\varphi)$ is convex on $\Phi$ and some $\varphi^* \in \arg \min_{\varphi \in \Phi}f(\varphi)$ belongs also to $ {\rm int} \Phi$. Then the mean total number of arithmetic operations of the Algorithm \ref{alg:RGFGM_learn} for the accuracy $\varepsilon$ (i.e. for the inequality ${\mathbb E}_{\mathcal{U}_{M-1}} f(\hat{\varphi}_M) - f(\varphi^*) \leq \varepsilon$ to hold) is not more than
$$
768 mps|Q| \frac{LR^2}{\varepsilon} \left( m + \frac{1}{\alpha} \ln \frac{128mrR\sqrt{L(m+8)}}{\varepsilon^{3/2}\sqrt{2}}+6r\right).
$$
\langlebel{Th:RGFGM_learn_compl}
\varepsilonnd{Th}
\section{\uppercase{Gradient-based optimization methods}}
\langlebel{S:gradient_method}
In this section, we first develop a general framework of gradient methods with inexact oracle for non-convex problems from rather general class
and then apply it for the particular Problem \ref{eq:prob_form}. Lemma \ref{Lm:f_compl} and Lemma \ref{Lm:nf_compl} allow to control the accuracy of the inexact first-order oracle and hence apply proposed framework.
\subsection{\uppercase{General framework}}
\langlebel{S:gradient_method_general}
In this subsection, we generalize the approach in~\cite{ghadimi} for constrained non-convex optimization problems. Our main contribution consists in developing this framework for an inexact first-order oracle and unknown "Lipschitz constant" of this oracle.
Let ${\mathbb E}c$ be a finite-dimensional real vector space and ${\mathbb E}c^*$ be its dual. We denote the value of linear function $g \in {\mathbb E}c^*$ at $x\in {\mathbb E}c$ by $\langle g, x \rangle$. Let $\|\cdot\|$ be some norm on ${\mathbb E}c$, $\|\cdot\|_*$ be its dual.
Our problem of interest in this subsection is a {\it composite optimization} problem of the form
\begin{equation}
\min_{x \in X} \{ \psi(x) := f(x) + h(x)\},
\langlebel{eq:PrStateInit}
\varepsilonnd{equation}
where $X \subset {\mathbb E}c$ is a closed convex set, $h(x)$ is a simple convex function, e.g. $\|x\|_1$. We assume that $f(x)$ is a general function endowed with an inexact first-order oracle in the following sense. There exists a number $L \in (0,+\infty)$ such that for any $\delta \geq 0$ and any $x\in X$ one can calculate $\tilde{f}(x,\delta) \in \mathbb R$ and $\tilde{g}(x,\delta) \in {\mathbb E}c^*$ satisfying
\begin{equation}
|f(y)-(\tilde{f}(x,\delta) - \langle\tilde{g}(x,\delta) ,y-x \rangle)| \leq \frac{L}{2}\|x-y\|^2 + \delta.
\langlebel{eq:dL_or_def}
\varepsilonnd{equation}
for all $y \in X$. The constant $L$ can be considered as "Lipschitz constant" because for the exact first-order oracle for a function $f \in C_L^{1,1} (\|\cdot\|)$ Inequality~\ref{eq:dL_or_def} holds with $\delta = 0$. This is a generalization of the concept of $(\delta,L)$-oracle considered in \cite{Devolder2013} for convex problems.
We choose a {\it prox-function} $d(x)$ which is continuously differentiable and $1$-strongly convex on $X$ with respect to $\|\cdot\|$.
This means that for any $x,y \in X$
\begin{equation}
d(y)-d(x) -\langle \nabla d(x) ,y-x \rangle \geq \frac12\|y-x\|^2.
\langlebel{eq:sc_def}
\varepsilonnd{equation}
We define also the corresponding {\it Bregman distance}:
\begin{equation}
V (x, z) = d(x) - d(z) - \langle \nabla d(z), x - z \rangle.
\langlebel{eq:BrDistDef}
\varepsilonnd{equation}
Let us define for any $\bar{x} \in {\mathbb E}c$, $g \in {\mathbb E}c^*$, $\gamma > 0$
\begin{equation}
x_X (\bar{x},g,\gamma) = \arg \min_{x \in X} \left\{\langle g,x \rangle + \frac{1}{\gamma}V(x,\bar{x}) +h(x) \right\},
\langlebel{eq:x_Q}
\varepsilonnd{equation}
\begin{equation}
g_X (\bar{x},g,\gamma) = \frac{1}{\gamma}(\bar{x}-x_X (\bar{x},g,\gamma)).
\langlebel{eq:g_Q}
\varepsilonnd{equation}
We assume that the set $X$ is {\it simple} in a sense that the vector $x_X (\bar{x},g,\gamma)$ can be calculated explicitly or very efficiently for any $\bar{x} \in X$, $g \in {\mathbb E}c^*$, $\gamma$.
\begin{algorithm}[h!]
\caption{Adaptive projected gradient algorithm}
\langlebel{alg:gen_PG_LA_2}
\begin{algorithmic}
{\mathcal S}TATE {\bfseries Input:} Point $x_0 \in X$, number $L_0 >0$.
{\mathcal S}TATE Set $k=0$, $z=+\infty$.
\mathbb REPEAT
{\mathcal S}TATE Set $M_k=L_k$, ${\rm flag}=0$.
\mathbb REPEAT
{\mathcal S}TATE Set $\delta = \frac{\varepsilon}{16M_k}$.
{\mathcal S}TATE Calculate $\tilde{f}(x_k,\delta)$ and $\tilde{g}(x_k,\delta)$.
{\mathcal S}TATE Find
\begin{equation}
w_k=x_X \left(x_k,\tilde{g}(x_k,\delta),\frac{1}{M_k}\right)
\langlebel{eq:w_k_2}
\varepsilonnd{equation}
{\mathcal S}TATE Calculate $\tilde{f}(w_k,\delta)$.
{\mathcal S}TATE If the inequality
\begin{align}
&\tilde{f}(w_k,\delta) \leq \tilde{f}(x_k,\delta) + \langle \tilde{g}(x_k,\delta) ,w_k - x_k \rangle + \notag \\
& +\frac{M_k}{2}\|w_k - x_k\|^2 + \frac{\varepsilon}{8M_k}
\langlebel{eq:gen_PG_LA_2_main}
\varepsilonnd{align}
holds, set ${\rm flag}=1$. Otherwise set $M_k=2M_k$.
\mathcal{U}NTIL{${\rm flag}=1$}
{\mathcal S}TATE Set $x_{k+1} = w_k$, $L_{k+1}=\frac{M_k}{2}$, $\tilde{g}_k = \tilde{g}(x_k,\delta)$.
{\mathcal S}TATE If $\left\|g_X \left(x_k,\tilde{g}_k,\frac{1}{M_k}\right)\right\| < z$, set $z=\left\|g_X \left(x_k,\tilde{g}_k,\frac{1}{M_k}\right)\right\|$, $\hat{k}=k$.
{\mathcal S}TATE Set $k=k+1$.
\mathcal{U}NTIL{$z \leq \varepsilon$}
{\mathcal S}TATE {\bfseries Output:} The point $x_{\hat{k}+1}$.
\varepsilonnd{algorithmic}
\varepsilonnd{algorithm}
\begin{Th}
Assume that $f(x)$ is endowed with the inexact first-order oracle in a sense \varepsilonqref{eq:dL_or_def} and that there exists a number $\psi^* > -\infty$ such that $\psi(x) \geq \psi^*$ for all $x \in X$. Then after $M$ iterations of Algorithm \ref{alg:gen_PG_LA_2} it holds that
\begin{equation}
\left\|g_X \left(x_{\hat{k}},\tilde{g}_{\hat{k}},\frac{1}{M_{\hat{k}}}\right)\right\|^2 \leq \frac{4L(\psi(x_0)-\psi^*)}{M+1} + \frac{\varepsilon}{2}.
\langlebel{eq:pg_la_2_rate}
\varepsilonnd{equation}
Moreover, the total number of checks of Inequality \ref{eq:gen_PG_LA_2_main} is not more than $M+\log_2\frac{2L}{L_0}$.
\langlebel{Th:pg_la_2_rate}
\varepsilonnd{Th}
The full proof of the theorem is in Appendix C.
It is easy to show that when $\left\|g_X \left(x_{\hat{k}},\tilde{g}_{\hat{k}},\frac{1}{M_{\hat{k}}}\right)\right\|^2 \leq \varepsilon$ for small $\varepsilon$, then for all $x \in X$ it holds that $\left\langle \nabla f(x_{\hat{k}+1}) + p , x-x_{\hat{k}+1} \right\rangle \geq - c \sqrt{\varepsilon}$, where $c >0$ is a constant, $p$ is some subgradient of $h(x)$ at $x_{\hat{k}+1}$. This means that at the point $x_{\hat{k}+1}$ the necessary condition of a local minimum is fulfilled with a good accuracy, i.e. $x_{\hat{k}+1}$ is a good approximation of a stationary point.
\subsection{\uppercase{Solving the learning problem}}
In this subsection, we return to Problem~\ref{eq:prob_form} and apply the results of the previous subsection. For this problem, $h(\cdot) \varepsilonquiv 0$. It is easy to show that in 1-norm ${ \rm diam} \Phi \leq 2 R \sqrt{m}$. For any $\delta >0$, Lemma \ref{Lm:f_compl} with $\delta_1 = \frac{\delta}{2}$ allows us to obtain $\tilde{f}(\varphi,\delta_1)$ such that Inequality~\ref{eq:tf_error} holds and Lemma~\ref{Lm:nf_compl} with $\delta_2=\frac{\delta}{4 R \sqrt{m}}$ allows us to obtain $\tilde{g}(\varphi,\delta_2)$ such that Inequality~\ref{eq:tnf_error} holds. Similar to \cite{Devolder2013}, since $f \in C_L^{1,1} (\|\cdot\|_2)$, these two inequalities lead to Inequality~\ref{eq:dL_or_def} for $\tilde{f}(\varphi,\delta_1)$ in the role of $\tilde{f}(x,\delta)$, $\tilde{g}(\varphi,\delta_2)$ in the role of $\tilde{g}(x,\delta)$ and $\|\cdot\|_2$ in the role of $\|\cdot\|$.
We choose the desired accuracy $\varepsilon$ for approximating the stationary point of Problem \ref{eq:prob_form}. This accuracy gives the required accuracy $\delta$ of the inexact first-order oracle for $f(\varphi)$ on each step of the inner cycle of the Algorithm \ref{alg:gen_PG_LA_2}.
Knowing the value $\delta_1=\frac{\delta}{2}$ and using Lemma \ref{Lm:f_compl}, we choose the number of steps $N$ of Algorithm \ref{eq:pi_k+1_def}, \ref{eq:tpi_def} and thus approximate $f(\varphi)$ with the required accuracy $\delta_1$ by $\tilde{f}(\varphi,\delta_1)$.
Knowing the value $\delta_2=\frac{\delta}{4R\sqrt{m}}$ and using Lemma \ref{Lm:nf_compl}, we choose the number of steps $N_1$ of Algorithm \ref{eq:pi_k+1_def}, \ref{eq:tpi_def} and the number of steps $N_2$ of Algorithm \ref{eq:Pi_0_def}, \ref{eq:Pik+1_def}, \ref{eq:tPi_def} and obtain the approximation $\tilde{g}(\varphi,\delta_2)$ of $\nabla f(\varphi)$ with the required accuracy $\delta_2$. Then we use the inexact first-order oracle $(\tilde{f}(\varphi,\delta_1), \tilde{g}(\varphi,\delta_2))$ to perform a step of Algorithm \ref{alg:gen_PG_LA_2}.
Since $\Phi$ is the Euclidean ball, it is natural to set ${\mathbb E}c = R^m$ and $\|\cdot\|= \|\cdot\|_2$, choose the prox-function $d(\varphi)=\frac12\|\varphi\|_2^2$. Then the Bregman distance is $V(\varphi,\omega)=\frac{1}{2}\|\varphi-\omega\|_2^2$.
Algorithm \ref{alg:NCGM_learn} is a formal record of the above ideas. To the best of our knowledge, this is the first time when the idea of gradient optimization methods is combined with some efficient method for huge-scale optimization using the concept of an inexact first-order oracle.
\begin{algorithm}[h!]
\caption{Adaptive gradient method for Problem~\ref{eq:prob_form}}
\langlebel{alg:NCGM_learn}
\begin{algorithmic}
{\mathcal S}TATE {\bfseries Input:} Point $\varphi_0 \in \Phi$, number $L_0 >0$, accuracy $\varepsilon >0$.
{\mathcal S}TATE Set $k=0$, $z=+ \infty$.
\mathbb REPEAT
{\mathcal S}TATE Set $M_k=L_k$, ${\rm flag}=0$.
\mathbb REPEAT
{\mathcal S}TATE Set $\delta_1 = \frac{\varepsilon}{32M_k}$, $\delta_2 = \frac{\varepsilon}{64M_kR\sqrt{m}}$.
{\mathcal S}TATE Calculate $\tilde{f}(\varphi_k, \delta_1)$ using Lemma \ref{Lm:f_compl} and $\tilde{g}(\varphi_k, \delta_2)$ using Lemma \ref{Lm:nf_compl}.
{\mathcal S}TATE Find
\begin{equation}
\omega_k= \arg \min_{\varphi \in \Phi} \left\{\langle \tilde{g}(\varphi_k, \delta_2),\varphi \rangle + \frac{M_k}{2}\|\varphi-\varphi_k\|_2^2. \right\}
\notag
\varepsilonnd{equation}
{\mathcal S}TATE Calculate $\tilde{f}(\omega_k, \delta_1)$ using Lemma \ref{Lm:f_compl}.
{\mathcal S}TATE If the inequality
\begin{align}
&\tilde{f}(\omega_k, \delta_1) \leq \tilde{f}(\varphi_k, \delta_1) + \langle \tilde{g}(\varphi_k, \delta_2) ,\omega_k - \varphi_k \rangle +\frac{M_k}{2}\|\omega_k - \varphi_k\|_2^2 + \frac{\varepsilon}{8M_k}
\notag
\varepsilonnd{align}
holds, set ${\rm flag}=1$. Otherwise set $M_k=2M_k$.
\mathcal{U}NTIL{${\rm flag}=1$}
{\mathcal S}TATE Set $\varphi_{k+1} = \omega_k$, $L_{k+1}=\frac{M_k}{2}$, .
{\mathcal S}TATE If $\left\|g_\Phi\left(\varphi_{k},\tilde{g}(\varphi_k, \delta_2),\frac{1}{M_k}\right)\right\|_2 < z$, set $z=\left\|g_\Phi\left(\varphi_{k},\tilde{g}(\varphi_k, \delta_2),\frac{1}{M_k}\right)\right\|_2$, $\hat{k}=k$.
{\mathcal S}TATE Set $k=k+1$.
\mathcal{U}NTIL{$z\leq \varepsilon$}
{\mathcal S}TATE {\bfseries Output:} The point $\varphi_{\hat{k}+1}$.
\varepsilonnd{algorithmic}
\varepsilonnd{algorithm}
The most computationally consuming operations of the inner cycle of Algorithm \ref{alg:NCGM_learn} are calculations of $\tilde{f}(\varphi_k, \delta_1)$, $\tilde{f}(\omega_k, \delta_1)$ and $\tilde{g}(\varphi_k, \delta_2)$. Using Lemma \ref{Lm:f_compl} and Lemma \ref{Lm:nf_compl}, we obtain that each inner iteration of Algorithm \ref{alg:NCGM_learn} needs not more than
$$
7r|Q| + \frac{6mps|Q|}{\alpha} \ln \frac{1024\beta_1 r R L \sqrt{m}}{ \alpha \varepsilon }
$$
a.o.
Using Theorem \ref{Th:pg_la_2_rate}, we obtain the following result, which gives the complexity of Algorithm \ref{alg:NCGM_learn}.
\begin{Th}
The total number of arithmetic operations in Algorithm \ref{alg:NCGM_learn} for the accuracy $\varepsilon$ (i.e. for the inequality $\left\|g_\Phi\left(\varphi_{\hat{k}},\tilde{g}(\varphi_{\hat{k}}\delta_2),\frac{1}{M_{\hat{k}}}\right)\right\|_2^2 \leq \varepsilon$ to hold) is not more than
\begin{align}
&\left(\frac{8L(f(\varphi_0)-f^*)}{\varepsilon}+\log_2\frac{2L}{L_0}\right) \left(7r|Q| + \frac{6mps|Q|}{\alpha} \ln \frac{1024\beta_1 r R L \sqrt{m}}{ \alpha \varepsilon } \right). \notag
\varepsilonnd{align}
\langlebel{Th:RGFGM_learn_compl}
\varepsilonnd{Th}
\section{\uppercase{Experimental results}}
\langlebel{Experimental results}
We apply different learning techniques, our gradient-free and gradient-based methods and state-of-the-art gradient-based method, to the web page ranking problem and compare their performances. In the next section, we describe the graph, which we exploit in our experiments (the user browsing graph). In Section~\ref{data} and Section~\ref{results}, we describe the dataset and the results of the experiments respectively.
\subsection{\uppercase{User browsing graph}}
\langlebel{graph}
In this section, we define the user web browsing graph (which was first considered in~\cite{liu}). We choose the user browsing graph instead of a link graph with the purpose to make the model query-dependent.
Let $q$ be any query from the set $Q$. A user session $S_q$ (see~\cite{liu}), which is started from $q$, is a sequence of pages $(i_1,i_2,...,i_k)$ such that, for each $j\in\{1,2,...,k-1\}$, the element $i_j$ is a web page and there is a record $i_j\rightarrow i_{j+1}$ which is made by toolbar. The session finishes if the user types a new query or if more than 30 minutes left from the time of the last user's activity. We call pages $i_j,i_{j+1}$, $j\in\{1,\ldots,k-1\}$, {\it the neighboring elements} of the session $S_q$.
We define user browsing graphs $\Gamma_q=(V_q,E_q)$, $q\in Q$, as follows. The set of vertices $V_q$ consists of all the distinct elements from all the sessions which are started from a query $q\in Q$. The set of directed edges $E_q$ represents all the ordered pairs of neighboring elements $(\tilde i,i)$ from such sessions. We add a page $i$ in the seed set $U_q$ if and only if there is a session which is started from $q$ and contains $i$ as its first element. Moreover, we set $\tilde i\rightarrow i\in E_q$ if and only if there is a session which is started from $q$ and contains the pair of neighboring elements $\tilde i,i$.
\subsection{\uppercase{Data}}
\langlebel{data}
All experiments are performed with pages and links crawled by a popular commercial search engine.
We randomly choose the set of queries $Q$ the user sessions start from, which contains $600$ queries. There are $\approx11.7$K vertices and $\approx7.5$K edges in graphs $\Gamma_q$, $q\in Q$, in total. For each query, a set of pages was judged by professional assessors hired by the search engine. Our data contains $\approx1.7$K judged query--document pairs.
The relevance score is selected from among 5 labels.
We divide our data into two parts. On the first part $Q_1$ ($50\%$ of the set of queries $Q$) we train the parameters and on the second part $Q_2$ we test the algorithms.
To define weights of nodes and edges we consider a set of $m_1=26$ query--document features.
For any $q\in Q$ and $i\in V_q$, the vector $\mathbf{V}^q_i$ contains values of all these features for query--document pair $(q,i)$.
The vector of $m_2=52$ features $\mathbf{E}_{\tilde i i}^q$ for an edge $\tilde i\rightarrow i\in E_q$ is obtained simply by concatenation of the feature vectors of pages $\tilde i$ and $i$.
To study a dependency between the efficiency of the algorithms and the sizes of the graphs, we sort the sets $Q_1,Q_2$ in ascending order of sizes of the respective graphs. Sets $Q_j^1$, $Q_j^2$, $Q_j^3$ contain first (in terms of these order) $100,200,300$ elements respectively for $j\in\{1,2\}$.
\subsection{\uppercase{Performances of the optimization algorithms}}
\langlebel{results}
We find the optimal values of the parameters $\varphi$ by all the considered methods (our gradient-free method GFN (Algorithm~\ref{alg:RGFGM_learn}), the gradient-based method GBN (Algorithm~\ref{alg:NCGM_learn}), the state-of-the-art gradient-method GBP), which solve Problem
\ref{eq:f_phi_def_2}.
The sets of hyperparameters which are exploited by the optimization methods (and not tuned by them) are the following: the Lipschitz constant $L=10^{-4}$ in GFN (and $L_0=10^{-4}$ in GBN), the accuracy $\varepsilon=10^{-6}$ (in both GBN and GFN), the radius $R=0.99$ (in both GBN and GFN). On all sets of queries, we compare final values of the loss function for GBN when $L_0\in\{10^{-4},10^{-3},10^{-2},10^{-1},1\}$. The differences are less than $10^{-7}$. We choose $L$ in GFN to be equal to $L_0$ chosen for GFN. On Figure~\ref{Fig:others}, we show how the choice of $L$ influences the output of the gradient-free algorithm. Moreover, we evaluate both our gradient-based and gradient-free algorithms for different values of the accuracies. The outputs of the algorithms differ insufficiently on all test sets $Q_2^i$, $i\in\{1,2,3\}$, when $\varepsilon\leq 10^{-6}$. On the lower level of the state-of-the-art gradient-based algorithm, the stochastic matrix and its derivative are raised to the powers $N_1$ and $N_2$ respectively. We choose $N_1=N_2=100$, since the outputs of the algorithm differ insufficiently on all test sets, when $N_1\geq 100$, $N_2\geq 100$. We evaluate GBP for different values of the step size ($50,100,200,500$). We stop the GBP algorithms when the differences between the values of the loss function on the next step and the current step are less than $-10^{-5}$ on the test sets. On Figure~\ref{Fig:sets}, we give the outputs of the optimization algorithms on each iteration of the upper levels of the learning processes on the test sets.
\begin{figure}[!ht]
\centering
\includegraphics[width=0.52\textwidth]{set100.png} \\
\includegraphics[width=0.52\textwidth]{set200.png} \\
\includegraphics[width=0.52\textwidth]{set300.png} \\
\caption{\small Vales of the loss function on each iteration of the optimization algorithms on the test sets.}
\langlebel{Fig:sets}
\varepsilonnd{figure}
In Table~\ref{Table:comparison}, we present the performances of the optimization algorithms in terms of the loss function $f$~\varepsilonqref{eq:f_phi_def_1}. We also compare the algorithms with the untuned Supervised PageRank ($\varphi=\varphi_0=e_m$).
\begin{table}[!ht]
\centering
\begin{tabular}{|c|c|c|c|c|c|c|}
\hline
& \multicolumn{2}{|c|}{$Q_2^1$} & \multicolumn{2}{|c|}{$Q_2^2$} & \multicolumn{2}{|c|}{$Q_2^3$} \\
\cline{2-7}
\small{Meth.} & \small{loss} & \small{steps} & \small{loss} & \small{steps} & \small{loss} & \small{steps} \\
\hline
\small{PR} & \small{$.00357$} & \small{$0$} & \small{$.00354$} & \small{$0$} & \small{$.0033$} & \small{$0$} \\
\hline
\small{GBN} & \small{$.00279$} & \small{$12$} & \small{$.00305$} & \small{$12$} & \small{$.00295$} & \small{$12$} \\
\hline
\small{GFN} & \small{$.00274$} & \small{$10^6$} & \small{$.00297$} & \small{$10^6$} & \small{$.00292$} & \small{$10^6$} \\
\hline
\small{GBP} & \small{$.00282$} & \small{$16$} & \small{$.00307$} & \small{$31$} & \small{$.00295$} & \small{$40$} \\
\small{$50$s.} & & & & & & \\
\hline
\small{GBP} & \small{$.00282$} & \small{$8$} & \small{$.00307$} & \small{$16$} & \small{$.00295$} & \small{$20$} \\
\small{$100$s.} & & & & & & \\
\hline
\small{GBP} & \small{$.00283$} & \small{$4$} & \small{$.00308$} & \small{$7$} & \small{$.00295$} & \small{$9$} \\
\small{$200$s.} & & & & & & \\
\hline
\small{GBP} & \small{$.00283$} & \small{$2$} & \small{$.00308$} & \small{$2$} & \small{$.00295$} & \small{$3$} \\
\small{$500$s.} & & & & & & \\
\hline
\varepsilonnd{tabular}
\caption{\small Comparison of the algorithms on the test sets.}
\langlebel{Table:comparison}
\varepsilonnd{table}
\begin{figure}[!ht]
\centering
\begin{tabular}{ll}
\hspace*{-0.04\textwidth}
\includegraphics[width=0.5\textwidth]{approximations.png} &
\hspace*{-0.05\textwidth}
\includegraphics[width=0.5\textwidth]{lipshitz.png} \\
\varepsilonnd{tabular}
\caption{\small Comparison of convergence rates of the power method and the method of Nesterov and Nemirovski (on the left) \& loss function values on each iteration of GFN with different values of the parameter $L$ on the train set $Q_1^1$} \langlebel{Fig:others}
\varepsilonnd{figure}
GFN significantly outperforms the state-of-the-art algorithms on all test sets. GBN significantly outperforms the state-of-the-art algorithm on $Q_2^1$ (we obtain the $p$-values of the paired $t$-tests for all the above differences on the test sets of queries, all these values are less than 0.005). However, GBN requires less iterations of the upper level (until it stops) than GBP for step sizes $50$ and $100$ on $Q_2^2,Q_2^3$.
Finally, we show that Nesterov--Nemirovski method converges to the stationary distribution faster than the power method. On Figure~\ref{Fig:others}, we demonstrate the dependencies of the value of the loss function on $Q_1^1$ for both methods of computing the untuned Supervised PageRank ($\varphi=\varphi_0=e_m$).
\section{\uppercase{Discussions and conclusions}}
\langlebel{Conclusion}
Let us note that Theorem \ref{th_1} allows to estimate the probability of large deviations using the obtained mean rate of convergence for Algorithm \ref{alg:GFPGM} (and hence Algorithm \ref{alg:RGFGM_learn}) in the following way. If $f(x)$ is $\tau$-strongly convex, then we prove (see Appendix) a geometric mean rate of convergence: ${\mathbb E}_{\mathcal{U}_{M-1}}f(x_{M}) - f^* \leq O\left(m\frac{L}{\tau}\ln\left(\frac{LD^2}{\varepsilon}\right)\right)$.
Using Markov's inequality, we obtain that after $O\left(m\frac{L}{\tau}\ln \left(\frac{LD^2}{\varepsilon\sigma}\right)\right)$ iterations the inequality $ f(x_M) - f^{*} \leq \varepsilon$ holds with a probability greater than $1 - \sigma$, where $\sigma\in(0,1)$ is a desired confidence level. If the function $f(x)$ is convex, but not strongly convex, then we can introduce the regularization with the parameter $\tau = \varepsilon/D^2$ minimizing the function $f(x) + \frac{\tau}{2}\|x-\hat{x}\|_2^2$ ($\hat{x}$ is some point in the set $X$), which is strongly convex. This will give us that after $O\left(m\frac{LD^2}{\varepsilon}\ln \left(\frac{LD^2}{\varepsilon\sigma}\right)\right)$ iterations the inequlity $f(x_M)- f^{*} \leq \varepsilon$ holds with a probability greater than $1 - \sigma$.
We consider a problem of learning parameters of Supervised PageRank models, which are based on calculating the stationary distributions of the Markov random walks with transition probabilities depending on the parameters. Due to the impossibility of exact calculating derivatives of the stationary distributions w.r.t. its parameters, we propose two two-level loss-minimization methods with inexact oracle to solve it instead of the previous gradient-based approach. For both proposed optimization algorithms, we find the settings of hyperparameters which give the lowest complexity (i.e., the number of arithmetic operations needed to achieve the given accuracy of the solution of the loss-minimization problem).
We apply our algorithm to the web page ranking problem by considering a dicrete-time Markov random walk on the user browsing graph. Our experiments show that our gradient-free method outperforms the state-of-the-art gradient-based method. For one of the considered test sets, our gradient-base method outperforms the state-of-the-art as well. For other test sets, the differences in the values of the loss function are insignificant. Moreover, we prove that under the assumption of local convexity of the loss function, our random gradient-free algorithm guarantees decrease of the loss function value expectation. At the same time, we theoretically justify that without convexity assumption for the loss function our gradient-based algorithm allows to find a point where the stationary condition is fulfilled with a given accuracy.
In future, it would be interesting to apply our algorithms to other ranking problems.
\appendix
\setcounter{Lm}{0}
\renewcommand{\thesection.\arabic{Lm}}{\thesection.\arabic{Lm}}
\renewcommand{\thesection.\arabic{Th}}{\thesection.\arabic{Th}}
\section{Missed proofs for Section \ref{S:f_nf_calc}}
\subsection{Proof of Lemma \ref{Lm:f_compl}}
\begin{Lm}
Let us fix some $q \in Q$. Let functions $F_q$, $G_q$ be defined in \varepsilonqref{eq:F_q_G_q_def}, $\pi^0_q(\varphi)$ be defined in \varepsilonqref{restart}, matrices $P_q(\varphi)$ be defined in \varepsilonqref{transition}. Assume that Method \ref{eq:pi_k+1_def}, \ref{eq:tpi_def} with
$$
N = \left\lceil \frac{1}{\alpha} \ln \frac{2}{\Delta_1} \right\rceil - 1
$$
is used to calculate the approximation $\tilde{\pi}_q^N (\varphi)$ to the ranking vector $\pi_q (\varphi)$ which is the solution of Equation~\ref{eq:pi_Phi_P2}. Then the vector $\tilde{\pi}_q^N (\varphi)$ satisfies
\begin{equation}
\| \tilde{\pi}^N_q (\varphi) - \pi_q (\varphi)\|_1 \leq \Delta_1
\langlebel{eq:Delta}
\varepsilonnd{equation}
and its calculation requires not more than
\begin{equation}
3 mp_qs_q + 3 p_qs_qN
\notag
\varepsilonnd{equation}
a.o.
and not more than
\begin{equation}
2p_q s_q
\notag
\varepsilonnd{equation}
memory amount additionally to the memory which is needed to store all the data about features and matrices $A_q,b_q$, $q \in Q$.
\langlebel{Lm:tpiN_compl}
\varepsilonnd{Lm}
{\bf Proof.\ }
As it is shown in~\cite{nesterov4} the vector $\tilde{\pi}^N_q (\varphi)$ \varepsilonqref{eq:tpi_def} satisfies
\begin{equation}
\| \tilde{\pi}^N_q (\varphi) - \pi_q (\varphi) \|_1 \leq 2 (1-\alpha)^{N+1}.
\langlebel{eq:tpiN_err}
\varepsilonnd{equation}
Since for any $\alpha \in (0,1]$ it holds that $\alpha \leq \ln \frac{1}{1-\alpha}$ we have from the lemma assumption that
$$
N+1 \geq \frac{1}{\alpha} \ln \frac{2}{\Delta_1} \geq \frac{\ln \frac{2}{\Delta_1}}{\ln \frac{1}{1-\alpha}}.
$$
This gives us that $2 (1-\alpha)^{N+1} \leq \Delta_1$ which in combination with \varepsilonqref{eq:tpiN_err} gives \varepsilonqref{eq:Delta}.
Let us estimate the number of a.o and the memory amount used for calculations. We will go through Method \ref{eq:pi_k+1_def}, \ref{eq:tpi_def} step by step and estimate from above the number of a.o. for each step. Since we need to estimate from above the total number of a.o. used for the whole algorithm we will update this upper bound (and denote it by ${\rm TAO}$) by adding on each step the obtained upper bound of a.o. number for this step. On each step we also estimate from above (and denote this estimate by ${\rm MM}$) maximum memory amount which was used by Method \ref{eq:pi_k+1_def}, \ref{eq:tpi_def} before the end of this step. Finally, at the end of each step we estimate from above by $\mathcal{U}M$ the memory amount which is still occupied besides the step is finished.
\begin{enumerate}
\item First iteration of this method requires to calculate $\pi = \pi_q^0$. The variable $\pi$ will store current (in terms of steps in $k$) iterate $\pi_k$ which potentially has $p_q$ non-zero elements. In accordance to its definition \varepsilonqref{restart} and Equalities \ref{eq:F_q_G_q_def} one has for all $i \in U_q$
$$
[\pi_q^0]_i=\frac{\langlengle\varphi_1,\mathbf{V}^q_i\ranglengle}{\sum_{j\in U_q}\langlengle\varphi_1,\mathbf{V}^q_{j}\ranglengle}
$$
\begin{enumerate}
\item We calculate $\langlengle\varphi_1,\mathbf{V}^q_i\ranglengle$ for all $i \in U_q$ and store the result. This requires $2 m_1 n_q $ a.o. and not more than $p_q$ memory items since $|U_q|=n_q \leq p_q$ and $\mathbf{V}^q_{j} \in \mathbb R^{m_1}$ for all $i \in U_q$.
\item We calculate $\frac{1}{\sum_{j\in U_q}\langlengle\varphi_1,\mathbf{V}^q_{j}\ranglengle}$ which requires $n_q$ a.o. and 2 memory items.
\item We calculate $\frac{\langlengle\varphi_1,\mathbf{V}^q_i\ranglengle}{\sum_{j\in U_q}\langlengle\varphi_1,\mathbf{V}^q_{j}\ranglengle}$ for all $i \in U_q$. This needs $n_q$ a.o. and no additional memory.
\varepsilonnd{enumerate}
So after this stage ${\rm MM}=p_q+2$, $\mathcal{U}M = p_q$, ${\rm TAO} = 2m_1 n_q + 2n_q$.
\item We need to calculate elements of matrix $P_q(\varphi)$. In accordance to \varepsilonqref{transition} and \varepsilonqref{eq:F_q_G_q_def} one has
$$
[P_q(\varphi)]_{ij}=\frac{\langlengle\varphi_2,\mathbf{E}^q_{ij}\ranglengle}{\sum_{l:i \to l}\langlengle\varphi_2,\mathbf{E}^q_{il}\ranglengle}.
$$
This means that one needs to calculate $p_q$ vectors like $\pi_q^0$ on the previous step but each with not more than $s_q$ non-zero elements and dimension of $\varphi_2$ equal to $m_2$. Thus we need $p_q(2m_2 s_q + 2 s_q)$ a.o. and not more than $p_q s_q + 2$ memory items additionally to $p_q$ memory items already used. At the end of this stage we have ${\rm TAO} = 2m_1 n_q + 2n_q + p_q(2m_2 s_q + 2 s_q)$, ${\rm MM}= p_q + 2+p_q s_q$ and $\mathcal{U}M= p_q + p_q s_q$ since we store $\pi$ and $P_q(\varphi)$ in memory.
\item We set $\tilde{\pi}_q^N=\pi_q^0$ (this variable will store current approximation of $\tilde{\pi}_q^N$ which potentially has $p_q$ non-zero elements). This requires $n_q$ a.o. and $p_q$ memory items. Also we set $a=(1-\alpha)$. At the end of this step we have ${\rm TAO} = 2m_1 n_q + 2n_q + p_q(2m_2 s_q + 2 s_q) + n_q + 1$, ${\rm MM}= p_q + 2+p_q s_q + p_q $ and $\mathcal{U}M=p_q + p_q s_q + p_q + 1$.
\item For every step from 1 to $N$
\begin{enumerate}
\item We set $\pi_1 = P_q^T(\varphi) \pi$. This requires not more than $2p_qs_q$ a.o. since the number of non-zero elements in the matrix $P_q^T(\varphi)$ is not more than $p_qs_q$ and we need to multiply each element by some element of $\pi$ and add it to the sum. Also we need $p_q$ memory items to store $\pi_1$.
\item We set $\tilde{\pi}_q^N=\tilde{\pi}_q^N+a\pi_1$ which requires $2p_q$ a.o.
\item We set $a=(1-\alpha)a$.
\varepsilonnd{enumerate}
At the end of this step we have. ${\rm TAO} = 2m_1 n_q + 2n_q + p_q(2m_2 s_q + 2 s_q) + n_q + 1 + N (2p_qs_q +2p_q+1) $, ${\rm MM}= p_q + 2+p_q s_q + p_q +p_q $ and $\mathcal{U}M=p_q + p_q s_q + p_q + 1 + p_q$
\item Set $\tilde{\pi}_q^N=\frac{\alpha}{1-(1-\alpha)a}\tilde{\pi}_q^N$. This takes $3+p_q$ a.o.
\varepsilonnd{enumerate}
So at the end we get
${\rm TAO} = 2m_1 n_q + 2n_q + p_q(2m_2 s_q + 2 s_q) + n_q + 1 + N (2p_qs_q +2p_q+1) +p_q+3 \leq 3 mp_qs_q + 3 p_qs_qN $, ${\rm MM}= p_q + 2+p_q s_q + p_q +p_q \leq 2p_q s_q $ and $\mathcal{U}M=p_q$.
\begin{Rem}
\langlebel{Rm:1}
Note that we also can store in the memory all the calculated quantities $\langlengle\varphi_1,\mathbf{V}^q_i\ranglengle$ for all $i \in U_q$, $\langlengle\varphi_2,\mathbf{E}^q_{ij}\ranglengle$ for all $i,j=1,\dots,p_q$ s.t. $i \to j \in E_q$, $\sum_{j\in U_q}\langlengle\varphi_1,\mathbf{V}^q_{j}\ranglengle$, $\sum_{l:i \to l}\langlengle\varphi_2,\mathbf{E}^q_{il}\ranglengle$ for the case if we need them later. This requires not more than $n_q+p_qs_q+1+p_q$ memory.
\varepsilonnd{Rem}
\begin{Lm}
\langlebel{Lm:A_pi_b_err}
Assume that $\pi_1,\pi_2 \in S_{p_q}(1) = \{ \pi \in \mathbb R^{p_q}_+: e_{p_q}^T \pi =1\}$. Assume also that inequality $\|\pi_1-\pi_2\|_k \leq \Delta_1$ holds
for some $k \in \{1,2,\infty\}$. Then
\begin{equation}
|\|(A_q \pi_1 +b_q)_{+} \|_2 - \|(A_q \pi_2 +b_q)_{+} \|_2 | \leq 2 \Delta_1 \sqrt{r_q}
\langlebel{eq:A_pi_b_2_err}
\varepsilonnd{equation}
\begin{equation}
\|(A_q \pi_1 +b_q)_{+} - (A_q \pi_2 +b_q)_{+} \|_{\infty} \leq 2 \Delta_1
\langlebel{eq:A_pi_b_inf_err}
\varepsilonnd{equation}
\begin{equation}
\|(A_q \pi_1 +b_q)_{+} \|_2 \leq \sqrt{r_q}
\langlebel{eq:A_pi_b_2_est}
\varepsilonnd{equation}
\begin{equation}
\|(A_q \pi_1 +b_q)_{+} \|_{\infty} \leq 1
\langlebel{eq:A_pi_b_inf_est}
\varepsilonnd{equation}
\varepsilonnd{Lm}
{\bf Proof.\ }
Note that in any case $k \in \{1,2,\infty\}$ it holds that $|[\pi_1]_i-[\pi_2]_i| \leq \Delta_1$ for all $i\in 1,\dots,{p_q}$. Using Lipschitz continuity with constant 1 of the 2-norm we get
\begin{equation}
|\|(A_q \pi_1 +b_q)_{+} \|_2 - \|(A_q \pi_2 +b_q)_{+} \|_2| \leq \|(A_q \pi_1 +b_q)_{+} - (A_q \pi_2 +b_q)_{+} \|_2
\langlebel{eq:Lm:A_pi_b_err_1}
\varepsilonnd{equation}
Note that every row of the matrix $A_q$ contains one 1 and one -1 and all other elements in the row are equal to zero. Using Lipschitz continuity with constant 1 of the function $(\cdot)_+$ we obtain for all $i\in 1,\dots,{r_q}$.
$$
|[(A_q \pi_1 +b_q)_{+}]_i - [(A_q \pi_2 +b_q)_{+}]_i| \leq |[\pi_1]_k-[\pi_1]_j - [\pi_2]_k+[\pi_2]_j| \leq 2 \Delta_1,
$$
where $k: [A_q]_{ik} = 1$, $j: [A_q]_{ij} = -1$.
This with \varepsilonqref{eq:Lm:A_pi_b_err_1} leads to \varepsilonqref{eq:A_pi_b_2_err}. Similarly one obtains \varepsilonqref{eq:A_pi_b_inf_err}.
Now let us fix some $i\in 1,\dots,{r_q}$. Then $|[(A_q \pi_1 +b_q)_{+}]_i| = |([\pi_1]_k-[\pi_1]_j + b_i)_+|$. Since $\pi_1 \in S_{p_q}(1)$ it holds that $[\pi_1]_k-[\pi_1]_j \in [-1,1]$. This together with inequalities $0<b_{i}<1$ leads to estimate $|([\pi_1]_k-[\pi_1]_j + b_i)_+| \leq 1$. Now \varepsilonqref{eq:A_pi_b_2_est} and \varepsilonqref{eq:A_pi_b_inf_est} become obvious.
\begin{Lm}
Assume that vectors $\tilde{\pi}_q, q \in Q$ satisfy the following inequalities
$$
\|\tilde{\pi}_q-\pi_q(\varphi)\|_k \leq \Delta_1, \quad \forall q=1,...,|Q|,
$$
for some $k \in \{1,2,\infty\}$. Then
\begin{equation}
\tilde{f}(\varphi) = \frac{1}{|Q|} \sum_{q=1}^{|Q|} \|(A_q \tilde{\pi}_q +b_q)_{+} \|^2_2
\langlebel{eq:fdvpDef}
\varepsilonnd{equation}
satisfies $|\tilde{f}(\varphi) - f(\varphi)| \leq 4 r \Delta_1$, where $f(\varphi)$ is defined in \varepsilonqref{eq:f_phi_def_2}.
\langlebel{Lm:Delta_to_delta}
\varepsilonnd{Lm}
{\bf Proof.\ }
For fixed $q \in Q$ we have
\begin{align}
&|\|(A_q \tilde{\pi}_q +b_q)_{+} \|^2_2 - \|(A_q \pi_q(\varphi) +b_q)_{+} \|^2_2| = \notag \\
& = |\|(A_q \tilde{\pi}_q +b_q)_{+} \|_2 - \|(A_q \pi_q(\varphi) +b_q)_{+} \|_2| \cdot \left(\|(A_q \tilde{\pi}_q +b_q)_{+} \|_2 + \|(A_q \pi_q(\varphi) +b_q)_{+} \|_2 \right) \stackrel{\varepsilonqref{eq:A_pi_b_2_err}, \varepsilonqref{eq:A_pi_b_2_est}}{\leq} \notag \\
& \leq 4 \Delta_1 r_q. \notag
\varepsilonnd{align}
Using \varepsilonqref{eq:f_phi_def_2} and \varepsilonqref{eq:fdvpDef} we obtain the statement of the lemma.
\noindent \textbf{The proof ot Lemma \ref{Lm:f_compl}.}
Inequality~\ref{eq:tf_error} follows from Lemma~\ref{Lm:tpiN_compl} and Lemma \ref{Lm:Delta_to_delta}.
We use the same notations ${\rm TAO}$, ${\rm MM}$, $\mathcal{U}M$ as in the proof of Lemma \ref{Lm:tpiN_compl}.
\begin{enumerate}
\item We reserve variable $a$ to store current (in terms of steps in $q$) sum of summands in \varepsilonqref{eq:tfN1_def}, variable $b$ to store next summand in this sum and vector $\pi$ to store the approximation for $\tilde{\pi}_q^{N}(\varphi)$ for current $q \in Q$. So ${\rm TAO}=0$, ${\rm MM}=\mathcal{U}M = 2+p_q$.
\item For every $q \in Q$ repeat.
\begin{enumerate}[label*=\arabic*.]
\item Set $\pi =\tilde{\pi}_q^{N}(\varphi)$. According to Lemma \ref{Lm:tpiN_compl} we obtain ${\rm TAO}=3 mp_qs_q + 3 p_qs_qN$, ${\rm MM}=2p_q s_q+p_q+2$, $\mathcal{U}M = p_q+2$.
\item Calculate $u=(A_q \tilde{\pi}_q^{N}(\varphi) +b_q)_{+} $. This requires additionally $3r_q$ a.o. and $r_q$ memory items.
\item Set $b=\|u\|_2^2$. This requires additionally $2r_q$ a.o.
\item Set $a=a+b$. This requires additionally $1$ a.o.
\varepsilonnd{enumerate}
\item Set $a=\frac{1}{|Q|}a$. This requires additionally $1$ a.o.
\item At the end we have ${\rm TAO}=\sum_{q\in Q}(3 mp_qs_q + 3 p_qs_qN+5r_q+1)+1 \leq |Q|(3 mps + 3 psN+6r)$, ${\rm MM}=\max_{q\in Q}(2p_q s_q+p_q)+2 \leq 3ps $, $\mathcal{U}M = 1$.
\varepsilonnd{enumerate}
\subsection{The proof of Lemma \ref{Lm:nf_compl}}
We use the following norms on the space of matrices $A \in \mathbb R^{n_1\times n_2}$
$$
\|A\|_1 = \max \{ \|Ax\|_1 : x \in \mathbb R^{n_2}, \|x\|_1 = 1 \} = \max_{j = 1,...,n_2} \sum_{i=1}^{n_1} |a_{ij}|,
$$
where the 1-norm of the vector $x \in \mathbb R^{n_2}$ is $\|x\|_1 = \sum_{i=1}^{n_2} |x_i|$.
$$
\|A\|_{\infty} = \max \{ \|Ax\|_{\infty} : x \in \mathbb R^{n_2}, \|x\|_{\infty} = 1 \} = \max_{i = 1,...,n_1} \sum_{j=1}^{n_2} |a_{ij}|,
$$
where the $\infty$-norm of the vector $x \in \mathbb R^{n_2}$ is $\|x\|_{\infty} = \max_{i = 1,...,n_2} |x_i|$.
Note that both matrix norms possess submultiplicative property
\begin{equation}
\|AB\|_1 \leq \|A\|_1 \|B\|_1, \quad \|AB\|_1 \leq \|A\|_{\infty} \|B\|_{\infty}
\langlebel{eq:sub_mult}
\varepsilonnd{equation}
for any pair of compatible matrices $A,B$.
\begin{Lm}
Let us fix some $q\in Q$. Let $\Pi^0_q(\varphi)$ be defined in \varepsilonqref{eq:Pi_q_0_def}, $\pi^0_q(\varphi)$ be defined in \varepsilonqref{restart}, $p_i(\varphi)^T$, $i \in 1,\dots,p_q$ be the $i$-th row of the matrix $P_q(\varphi)$ defined in \varepsilonqref{transition}. Then for the chosen functions $F_q$, $G_q$ \varepsilonqref{eq:F_q_G_q_def} and set $\Phi$ in \varepsilonqref{eq:prob_form} the following inequality holds.
\begin{equation}
\|\Pi^0_q(\varphi)\|_1 \leq \alpha \left\|\frac{d \pi^0_q(\varphi)}{d \varphi^T} \right\|_1 + (1-\alpha) \sum_{i=1}^{p_q} \left\|\frac{ d p_i(\varphi)}{d \varphi^T} \right\|_1 \leq \beta_1 \quad \forall \varphi \in \Phi,
\langlebel{eq:Pi_0_est}
\varepsilonnd{equation}
where
\begin{align}
& \beta_1 = 2\alpha \frac{\left\langle \hat{\varphi}_{1}, \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\rangle + R \left\| \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\|_2}{\left( \left\langle \hat{\varphi}_{1}, \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\rangle - R \left\| \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\|_2\right)^2} \max_{j \in 1,...,m_1} \left[\sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i}\right]_j + \notag \\
& + 2(1-\alpha) \sum_{i=1}^{p_q} \frac{\left\langle \hat{\varphi}_{2}, \sum_{\tilde i\in N_q(i)}\mathbf{E}^q_{i \tilde i} \right\rangle + R \left\| \sum_{\tilde i\in N_q(i)}\mathbf{E}^q_{ i \tilde i} \right\|_2}{\left( \left\langle \hat{\varphi}_{2}, \sum_{\tilde i\in N_q(i)}\mathbf{E}^q_{i \tilde i} \right\rangle - R \left\| \sum_{\tilde i\in N_q(i)}\mathbf{E}^q_{i \tilde i} \right\|_2\right)^2} \max_{j \in 1,...,m_2} \left[\sum_{\tilde i\in N_q(i)}\mathbf{E}^q_{i \tilde i}\right]_j \langlebel{beta_1}
\varepsilonnd{align}
and $N_q(i)=\{j\in V_q: i \to j \in E_q \}$, $ \hat{\varphi}_{1} \in \mathbb R^{m_1}$ -- first $m_1$ components of the vector $\hat{\varphi}$, $ \hat{\varphi}_{2} \in \mathbb R^{m_2}$ -- second $m_2$ components of the vector $\hat{\varphi}$.
\langlebel{Lm:Pi_0_est}
\varepsilonnd{Lm}
{\bf Proof.\ }
First inequality follows from the definition of $\Pi^0_q(\varphi)$ \varepsilonqref{eq:Pi_q_0_def}, triangle inequality for matrix norm and inequalities $|[\pi_q(\varphi)]_i|\leq 1$, $i=1,\dots,p_q$.
Let us now estimate $\left\|\frac{d \pi^0_q(\varphi)}{d \varphi^T} \right\|_1$. Note that $\varphi=(\varphi_1,\varphi_2)$. From \varepsilonqref{eq:F_q_G_q_def}, \varepsilonqref{restart} we know that $\frac{d \pi^0_q(\varphi)}{d \varphi_2^T} = 0$. First we estimate the absolute value of the element in the $i$-th row and $j$-th column of the matrix $\frac{d \pi^0_q(\varphi)}{d \varphi_1^T}$. We use that $\varphi > 0$ for all $\varphi \in \Phi$ and that for all $i\in U_q$ vectors $\mathbf{V}^q_{i}$ are non-negative and have at least one positive component.
\begin{align}
& \left|\frac{d \left[\pi^0_q(\varphi)\right]_i}{d [\varphi_1]_j} \right| = \left|\frac{1}{\sum_{\tilde i\in U_q}\langlengle\varphi_1,\mathbf{V}^q_{\tilde i}\ranglengle} \left[\mathbf{V}^q_i\right]_j - \frac{\langlengle\varphi_1,\mathbf{V}^q_{i}\ranglengle}{\left(\sum_{\tilde i\in U_q}\langlengle\varphi_1,\mathbf{V}^q_{\tilde i}\ranglengle \right)^2} \left[\sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i}\right]_j \right| = \notag \\
& = \frac{1}{\left(\sum_{\tilde i\in U_q}\langlengle\varphi_1,\mathbf{V}^q_{\tilde i}\ranglengle \right)^2} \left|\sum_{\tilde i\in U_q}\langlengle\varphi_1,\mathbf{V}^q_{\tilde i}\ranglengle \left[\mathbf{V}^q_i\right]_j - \langlengle\varphi_1,\mathbf{V}^q_{i}\ranglengle \left[\sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i}\right]_j \right| \leq \notag \\
& \leq \frac{1}{\left( \left\langle \hat{\varphi}_{1}, \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\rangle - R \left\| \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\|_1\right)^2} \left\{ \left( \sum_{\tilde i\in U_q}\langlengle\varphi_1,\mathbf{V}^q_{\tilde i}\ranglengle \right) \left[\mathbf{V}^q_i\right]_j + \langlengle\varphi_1,\mathbf{V}^q_{i}\ranglengle \left[\sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i}\right]_j \right\}. \notag
\varepsilonnd{align}
Here we used the fact that
$$
\min_{\varphi \in \Phi} \sum_{\tilde i\in U_q}\langlengle\varphi_1,\mathbf{V}^q_{\tilde i}\ranglengle = \left\langle \hat{\varphi}_{1}, \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\rangle - R \left\| \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\|_2.
$$
Then the 1-norm of the $j$-th column of the matrix $\frac{d \pi^0_q(\varphi)}{d \varphi_1^T}$ satisfies
\begin{align}
& \sum_{i \in U_q} \left|\frac{d \left[\pi^0_q(\varphi)\right]_i}{d [\varphi_1]_j} \right| \leq \frac{2}{\left( \left\langle \hat{\varphi}_{1}, \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\rangle - R \left\| \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\|_2\right)^2} \left( \sum_{\tilde i\in U_q}\langlengle\varphi_1,\mathbf{V}^q_{\tilde i}\ranglengle \right) \left[\sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i}\right]_j \leq \notag \\
& \leq 2 \frac{\left\langle \hat{\varphi}_{1}, \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\rangle + R \left\| \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\|_2}{\left( \left\langle \hat{\varphi}_{1}, \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\rangle - R \left\| \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\|_2\right)^2} \left[\sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i}\right]_j. \notag
\varepsilonnd{align}
Here we used the fact that
$$
\max_{\varphi \in \Phi} \sum_{\tilde i\in U_q}\langlengle\varphi_1,\mathbf{V}^q_{\tilde i}\ranglengle = \left\langle \hat{\varphi}_{1}, \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\rangle + R \left\| \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\|_2.
$$
Now we have
$$
\left\|\frac{d \pi^0_q(\varphi)}{d \varphi^T} \right\|_1 \leq 2 \frac{\left\langle \hat{\varphi}_{1}, \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\rangle + R \left\| \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\|_2}{\left( \left\langle \hat{\varphi}_{1}, \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\rangle - R \left\| \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\|_2\right)^2} \max_{j \in 1,...,m_1} \left[\sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i}\right]_j.
$$
In the same manner we obtain the following estimate
$$
\left\|\frac{d p_i(\varphi)}{d \varphi^T} \right\|_1 \leq 2 \frac{\left\langle \hat{\varphi}_{2}, \sum_{\tilde i\in N_q(i)}\mathbf{E}^q_{i \tilde i} \right\rangle + R \left\| \sum_{\tilde i\in N_q(i)}\mathbf{E}^q_{ i \tilde i} \right\|_2}{\left( \left\langle \hat{\varphi}_{2}, \sum_{\tilde i\in N_q(i)}\mathbf{E}^q_{i \tilde i} \right\rangle - R \left\| \sum_{\tilde i\in N_q(i)}\mathbf{E}^q_{i \tilde i} \right\|_2\right)^2} \max_{j \in 1,...,m_2} \left[\sum_{\tilde i\in N_q(i)}\mathbf{E}^q_{i \tilde i}\right]_j,
$$
where $ N_q(i) = \{k \in V_q: i \to k \in E_q\}$.
Finally we have that
\begin{align}
& \|\Pi^0_q(\varphi)\|_1 \leq 2\alpha \frac{\left\langle \hat{\varphi}_{1}, \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\rangle + R \left\| \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\|_2}{\left( \left\langle \hat{\varphi}_{1}, \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\rangle - R \left\| \sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i} \right\|_2\right)^2} \max_{j \in 1,...,m_1} \left[\sum_{\tilde i\in U_q}\mathbf{V}^q_{\tilde i}\right]_j + \notag \\
& + 2(1-\alpha) \sum_{i=1}^{p_q} \frac{\left\langle \hat{\varphi}_{2}, \sum_{\tilde i\in N_q(i)}\mathbf{E}^q_{i \tilde i} \right\rangle + R \left\| \sum_{\tilde i\in N_q(i)}\mathbf{E}^q_{ i \tilde i} \right\|_2}{\left( \left\langle \hat{\varphi}_{2}, \sum_{\tilde i\in N_q(i)}\mathbf{E}^q_{i \tilde i} \right\rangle - R \left\| \sum_{\tilde i\in N_q(i)}\mathbf{E}^q_{i \tilde i} \right\|_2\right)^2} \max_{j \in 1,...,m_2} \left[\sum_{\tilde i\in N_q(i)}\mathbf{E}^q_{i \tilde i}\right]_j. \notag
\varepsilonnd{align}
This finishes the proof.
Let us assume that we have some approximation $\tilde{\pi} \in S_{p_q}(1)$ to the vector $\pi_q(\varphi)$. We define
\begin{equation}
\tilde{\Pi}_0 = \alpha \frac{d \pi^0_q(\varphi)}{d \varphi^T} + (1-\alpha) \sum_{i=1}^{p_q} \frac{ d p_i(\varphi)}{d \varphi^T} [\tilde{\pi}]_i
\langlebel{eq:tPi_0_def}
\varepsilonnd{equation}
and consider the Method \ref{eq:Pik+1_def}, \ref{eq:tPi_def} with the starting point $\Pi_0=\tilde{\Pi}_0$. Then Method \ref{eq:Pi_0_def}, \ref{eq:Pik+1_def}, \ref{eq:tPi_def} is a particular case of this general method with $\tilde{\pi}_q^N(\varphi)$ used as approximation $\tilde{\pi}$.
\begin{Lm}
Let us fix some $q \in Q$. Let $\Pi^0_q(\varphi)$ be defined in \varepsilonqref{eq:Pi_q_0_def} and $\tilde{\Pi}_0$ be defined in \varepsilonqref{eq:tPi_0_def}, where $\pi^0_q(\varphi)$ is defined in \varepsilonqref{restart}, $p_i(\varphi)^T$, $i \in 1,\dots,p_q$ is the $i$-th row of the matrix $P_q(\varphi)$ defined in \varepsilonqref{transition}. Assume that the vector $\tilde{\pi}$ satisfies $\|\tilde{\pi}-\pi_q(\varphi)\|_1 \leq \Delta_1$. Then for the chosen functions $F_q$, $G_q$ \varepsilonqref{eq:F_q_G_q_def} and set $\Phi$ it holds that.
\begin{equation}
\|\tilde{\Pi}_0 - \Pi_q^0(\varphi)\|_1 \leq \beta_1 \Delta_1 \quad \forall \varphi \in \Phi,
\langlebel{eq:tPi_0-Piq_0_est}
\varepsilonnd{equation}
where $\beta_1$ is defined in \varepsilonqref{beta_1}.
\langlebel{Lm:tPi_0-Piq_0_est}
\varepsilonnd{Lm}
{\bf Proof.\ }
\begin{align}
&\|\tilde{\Pi}_0 - \Pi_q^0(\varphi)\|_1 \stackrel{\varepsilonqref{eq:pi_q_full_der},\varepsilonqref{eq:tPi_0_def}}{=} (1-\alpha) \left\| \sum_{i=1}^{p_q} \frac{ d p_i(\varphi)}{d \varphi^T} \left(\tilde{\pi}_i-[\pi_q(\varphi)]_i\right) \right\|_1 \leq \notag \\
& \leq (1-\alpha) \sum_{i=1}^{p_q} \left\| \frac{ d p_i(\varphi)}{d \varphi^T}\right\|_1 \left|\tilde{\pi}_i-[\pi_q(\varphi)]_i\right| \stackrel{\varepsilonqref{eq:Pi_0_est}}{\leq} \beta_1 \Delta_1 . \notag
\varepsilonnd{align}
\begin{Lm}
Let us fix some $q \in Q$. Let $\tilde{\Pi}_0$ be defined in \varepsilonqref{eq:tPi_0_def}, where $\pi^0_q(\varphi)$ is defined in \varepsilonqref{restart}, $p_i(\varphi)^T$, $i \in 1,\dots,p_q$ is the $i$-th row of the matrix $P_q(\varphi)$ defined in \varepsilonqref{transition}, $\tilde{\pi} \in S_{p_q}(1)$. Let the sequence $\Pi_k$, $k \geq 0$ be defined in \varepsilonqref{eq:Pik+1_def}, \varepsilonqref{eq:tPi_def} with starting point $\Pi_0=\tilde{\Pi}_0$. Then for the chosen functions $F_q$, $G_q$ \varepsilonqref{eq:F_q_G_q_def} and set $\Phi$ for all $k \geq 0$ it holds that
\begin{equation}
\|\Pi_k \|_1 \leq \beta_1, \quad \forall \varphi \in \Phi,
\langlebel{eq:tPi_k_est}
\varepsilonnd{equation}
\begin{equation}
\left\|\left[ P_q^T(\varphi) \right]^k \Pi^0_q(\varphi)\right\|_1 \leq \beta_1 , \quad \forall \varphi \in \Phi.
\langlebel{eq:Pk_Pi_0_est}
\varepsilonnd{equation}
Here $\Pi_q^0(\varphi)$ is defined in \varepsilonqref{eq:Pi_q_0_def}, $\beta_1$ is defined in \varepsilonqref{beta_1}.
\langlebel{Lm:tPi_k_est}
\varepsilonnd{Lm}
{\bf Proof.\ }
Similarly as it was done in Lemma \ref{Lm:Pi_0_est} one can prove that $\|\tilde{\Pi}_0 \|_1 \leq \beta_1$.
Note that all elements of the matrix $P_q^T(\varphi)$ are nonnegative for all $\varphi \in \Phi$. Also the matrix $P_q(\varphi)$ is row-stochastic: $P_q(\varphi)e_{p_q}=e_{p_q}$. Hence maximum 1-norm of the column of $P_q^T(\varphi)$ is equal to 1 and $\|P_q^T(\varphi)\|_1=1$. Using the submultiplicative property \varepsilonqref{eq:sub_mult} of the matrix 1-norm we obtain by induction that
$$
\|\Pi_{k+1} \|_1 = \|P_q^T(\varphi) \Pi_{k} \|_1 \leq \|P_q^T(\varphi)\|_1 \| \Pi_{k} \|_1 \leq \beta_1.
$$
Inequality \ref{eq:Pk_Pi_0_est} is proved in the same way using the Lemma \ref{Lm:Pi_0_est} as induction basis.
\begin{Lm}
Let the assumptions of Lemma \ref{Lm:tPi_k_est} hold. Then for any $N > 1$
\begin{equation}
\|\tilde{\Pi}_q^N(\varphi) \|_1 \leq \frac{\beta_1}{\alpha}, \quad \forall \varphi \in \Phi,
\langlebel{eq:tPi_N_est}
\varepsilonnd{equation}
where $\tilde{\Pi}_q^N(\varphi) $ is calculated by Method \ref{eq:Pik+1_def}, \ref{eq:tPi_def} with the starting point $\Pi_0=\tilde{\Pi}_0$, $\beta_1$ is defined in \varepsilonqref{beta_1}.
\langlebel{Lm:tPi_N_est}
\varepsilonnd{Lm}
{\bf Proof.\ }
Using the triangle inequality for the matrix 1-norm we obtain
$$
\|\tilde{\Pi}_q^N(\varphi) \|_1 = \left\|\frac{1}{1-(1-\alpha)^{N+1}} \sum_{k=0}^N (1-\alpha)^k \Pi_k \right\|_1 \leq \frac{1}{1-(1-\alpha)^{N+1}} \sum_{k=0}^N (1-\alpha)^k \|\Pi_k \|_1 \stackrel{\varepsilonqref{eq:tPi_k_est}}{\leq} \frac{\beta_1}{\alpha}.
$$
\begin{Lm}
Let us fix some $q \in Q$. Let $\tilde{\Pi}_q^N(\varphi)$ be calculated by Method \ref{eq:Pik+1_def}, \ref{eq:tPi_def} with starting point $\Pi_0=\tilde{\Pi}_0$ and $\frac{d \pi_q(\varphi)}{d \varphi^T}$ be given in \varepsilonqref{eq:pi_q_full_der}, where $\pi^0_q(\varphi)$ is defined in \varepsilonqref{restart}, $p_i(\varphi)^T$, $i \in 1,\dots,p_q$ is the $i$-th row of the matrix $P_q(\varphi)$ defined in \varepsilonqref{transition}. Assume that the vector $\tilde{\pi} \in S_{p_q}(1)$ in \varepsilonqref{eq:tPi_0_def} satisfies $\|\tilde{\pi}-\pi_q(\varphi)\|_1 \leq \Delta_1$. Then for the chosen functions $F_q$, $G_q$ \varepsilonqref{eq:F_q_G_q_def} and set $\Phi$, for all $N > 1$ it holds that
\begin{equation}
\left\| \tilde{\Pi}_q^N(\varphi) - \frac{d \pi_q(\varphi)}{d \varphi^T} \right\|_1 \leq \frac{\beta_1 \Delta_1}{\alpha} + \frac{2\beta_1}{\alpha} (1-\alpha)^{N+1}, \quad \forall \varphi \in \Phi,
\langlebel{eq:npi_est}
\varepsilonnd{equation}
where $\beta_1$ is defined in \varepsilonqref{beta_1}.
\langlebel{Lm:npi_est}
\varepsilonnd{Lm}
{\bf Proof.\ }
Using \varepsilonqref{eq:tPi_0-Piq_0_est} as the induction basis and making the same arguments as in the proof of the Lemma \ref{Lm:tPi_k_est} we obtain for every $k \geq 0$
\begin{align}
& \left\|\Pi_{k+1} - [P_q^T(\varphi)]^{k+1} \Pi_q^0(\varphi) \right\|_1 = \left\|P_q^T(\varphi) \left( \Pi_{k} - [P_q^T(\varphi)]^{k} \Pi_q^0(\varphi)\right) \right\|_1 \leq \notag \\
& \leq \left\|P_q^T(\varphi)\right\|_1 \left\|\Pi_{k} - [P_q^T(\varphi)]^{k} \Pi_q^0(\varphi)\right\|_1 \leq \beta_1 \Delta_1.\notag
\varepsilonnd{align}
Equation \ref{eq:pi_q_full_der} can be rewritten in the following way
\begin{equation}
\frac{d \pi_q(\varphi)}{d \varphi^T} = \left[I - (1-\alpha) P_q^T(\varphi) \right]^{-1} \Pi^0_q(\varphi) = \sum_{k=0}^\infty (1-\alpha)^k\left[ P_q^T(\varphi) \right]^k \Pi^0_q(\varphi).
\langlebel{eq:npi_q_def}
\varepsilonnd{equation}
Using this equality and the previous inequality we obtain
\begin{align}
& \left\| \sum_{k=0}^\infty (1-\alpha)^k \Pi_k - \frac{d \pi_q(\varphi)}{d \varphi^T} \right\|_1 =
\left\| \sum_{k=0}^\infty (1-\alpha)^k \Pi_k -\sum_{k=0}^\infty (1-\alpha)^k\left[ P_q^T(\varphi) \right]^k \Pi^0_q(\varphi) \right\|_1 \leq \notag \\
& \leq \sum_{k=0}^\infty (1-\alpha)^k \left\|\Pi_k - \left[ P_q^T(\varphi) \right]^k \Pi^0_q(\varphi) \right\|_1 \leq \frac{\beta_1 \Delta_1}{\alpha}.
\langlebel{eq:Th:npi_est_1}
\varepsilonnd{align}
On the other hand
\begin{align}
& \left\| \tilde{\Pi}_q^N(\varphi) - \sum_{k=0}^\infty (1-\alpha)^k\Pi_k \right\|_1 \stackrel{\varepsilonqref{eq:tPi_def}}{=} \notag \\
& = \left\| \frac{1}{1-(1-\alpha)^{N+1}} \sum_{k=0}^N (1-\alpha)^k \Pi_k - \sum_{k=0}^\infty (1-\alpha)^k\Pi_k \right\|_1 = \notag \\
& = \left\| \frac{(1-\alpha)^{N+1}}{1-(1-\alpha)^{N+1}} \sum_{k=0}^N (1-\alpha)^k \Pi_k - \sum_{k=N+1}^\infty (1-\alpha)^k\Pi_k \right\|_1 \stackrel{\varepsilonqref{eq:tPi_k_est}}{\leq } \notag \\
& \leq \frac{\beta_1 (1-\alpha)^{N+1}}{1-(1-\alpha)^{N+1}} \sum_{k=0}^N (1-\alpha)^k + \beta_1 \sum_{k=N+1}^\infty (1-\alpha)^k = \frac{2\beta_1}{\alpha} (1-\alpha)^{N+1}. \notag
\langlebel{eq:}
\varepsilonnd{align}
This inequality together with \varepsilonqref{eq:Th:npi_est_1} gives \varepsilonqref{eq:npi_est}.
\begin{Lm}
Assume that for every $q \in Q$ the approximation $\tilde{\pi}_q(\varphi)$ to the ranking vector, satisfying $\|\tilde{\pi}_q(\varphi)-\pi_q(\varphi)\|_1 \leq \Delta_1$, is available. Assume that for every $q \in Q$ the approximation $\tilde{\Pi}_q(\varphi)$ to the full derivative of ranking vector $\frac{d \pi_q(\varphi)}{d \varphi^T}$ as solution of \varepsilonqref{eq:pi_q_full_der}, satisfying
$$
\left\|\tilde{\Pi}_q(\varphi)-\frac{d \pi_q(\varphi)}{d \varphi^T}\right\|_1 \leq \Delta_2
$$
is available.
Let us define
\begin{equation}
\tilde{\nabla} f(\varphi) = \frac{2}{|Q|} \sum_{q=1}^{|Q|} \left(\tilde{\Pi}_q(\varphi) \right)^T A_q^T (A_q \tilde{\pi}_q(\varphi) +b_q)_{+}.
\langlebel{eq:tnf_def}
\varepsilonnd{equation}
Then
\begin{equation}
\left\| \tilde{\nabla} f(\varphi) - \nabla f(\varphi)\right\|_{\infty} \leq 2r \Delta_2 + 4r \Delta_1 \max_{q\in Q} \left\| \tilde{\Pi}_q(\varphi) \right\|_1,
\langlebel{eq:tnf-nf_est}
\varepsilonnd{equation}
where $\nabla f(\varphi)$ is the gradient \varepsilonqref{eq:nf} of the function $f(\varphi)$ \varepsilonqref{eq:f_phi_def_2}.
\langlebel{Lm:tnf}
\varepsilonnd{Lm}
{\bf Proof.\ }
Let us fix any $q \in Q$. Then we have
\begin{align}
& \left\| \left(\tilde{\Pi}_q(\varphi) \right)^T A_q^T (A_q \tilde{\pi}_q(\varphi) +b_q)_{+} - \left(\frac{d \pi_q(\varphi)}{d \varphi^T} \right)^T A_q^T (A_q \pi_q(\varphi) +b_q)_{+} \right\|_{\infty} \leq \notag \\
& \leq \left\| \left(\tilde{\Pi}_q(\varphi) \right)^T A_q^T (A_q \tilde{\pi}_q(\varphi) +b_q)_{+} - \left(\tilde{\Pi}_q(\varphi) \right)^T A_q^T (A_q \pi_q(\varphi) +b_q)_{+} \right\|_{\infty} + \notag \\
& + \left\| \left(\tilde{\Pi}_q(\varphi) \right)^T A_q^T (A_q \pi_q(\varphi) +b_q)_{+}
- \left(\frac{d \pi_q(\varphi)}{d \varphi^T} \right)^T A_q^T (A_q \pi_q(\varphi) +b_q)_{+} \right\|_{\infty} \leq \notag \\
& \leq \left\| \tilde{\Pi}_q(\varphi) \right\|_1 \left\| A_q \right\|_1 \left\| (A_q \pi_q(\varphi) +b_q)_{+} - (A_q \tilde{\pi}_q(\varphi) +b_q)_{+} \right\|_{\infty} + \notag \\
& + \left\| \tilde{\Pi}_q(\varphi) - \frac{d \pi_q(\varphi)}{d \varphi^T} \right\|_1 \left\| A_q \right\|_1 \left\|(A_q \pi_q(\varphi) +b_q)_{+} \right\|_{\infty} \stackrel{\varepsilonqref{eq:A_pi_b_inf_err},\varepsilonqref{eq:A_pi_b_inf_est}}{\leq} \left\| \tilde{\Pi}_q(\varphi) \right\|_1 \cdot r \cdot 2 \Delta_1 + \Delta_2 \cdot r \cdot 1 . \notag
\varepsilonnd{align}
Here we used that $A_q \in \mathbb R^{r_q \times p_q} $ and its elements are either 0 or 1 and the fact that $r_q \leq r$ for all $q \in Q$, and that for any matrix $M \in \mathbb R^{n_1 \times n_2}$ $\|M^T\|_{\infty}=\|M\|_{1}$.
Using this inequality and definitions \varepsilonqref{eq:nf}, \varepsilonqref{eq:tnf_def} we obtain \varepsilonqref{eq:tnf-nf_est}.
\noindent \textbf{Proof of Lemma \ref{Lm:nf_compl}}
Let us first prove Inequality \ref{eq:tnf_error}.
According to Lemma \ref{Lm:tpiN_compl} calculated vector $\tilde{\pi}_q^{N_1}(\varphi)$ satisfies
\begin{equation}
\|\tilde{\pi}_q^{N_1}(\varphi) - \pi_q(\varphi)\|_1 \leq \frac{\alpha \delta_2}{12 \beta_1 r} , \quad \forall q \in Q.
\langlebel{eq:Lm:f_nf_compl_1}
\varepsilonnd{equation}
This together with Lemma \ref{Lm:npi_est} with $\tilde{\pi}_q^{N_1}(\varphi)$ in the role of $\tilde{\pi}$ for all $q \in Q$ gives
$$
\left\| \tilde{\Pi}_q^{N_2}(\varphi) - \frac{d \pi_q(\varphi)}{d \varphi^T} \right\|_1 \leq \frac{\beta_1 \frac{\alpha \delta_2}{12 \beta_1 r}}{\alpha} + \frac{2\beta_1}{\alpha} (1-\alpha)^{N_2+1} \leq \frac{\delta_2}{12 r} + \frac{\beta_1}{\alpha}\frac{\alpha \delta_2}{4 \beta_1 r} = \frac{\delta_2}{3 r}
$$
This inequality together with \varepsilonqref{eq:Lm:f_nf_compl_1}, Lemma \ref{Lm:tPi_N_est} with $\tilde{\pi}_q^{N_1}(\varphi)$ in the role of $\tilde{\pi}$ for all $q \in Q$ and Lemma \ref{Lm:tnf} with $\tilde{\pi}_q^{N_1}(\varphi)$ in the role of $\tilde{\pi}_q(\varphi)$ and $\tilde{\Pi}_q^{N_2}(\varphi)$ in the role of $\tilde{\Pi}_q(\varphi)$ for all $q \in Q$ gives
$$
\left\| \tilde{g}(\varphi,\delta_2) - \nabla f(\varphi)\right\|_{\infty} \leq 2r \frac{\delta_2}{3 r} + 4r \frac{\alpha \delta_2}{12 \beta_1 r} \frac{\beta_1 }{\alpha} = \delta_2.
$$
Let us now estimate number of a.o. and memory which is needed to calculate $\tilde{g}(\varphi,\delta_2)$.
We use the same notations ${\rm TAO}$, ${\rm MM}$, $\mathcal{U}M$ as in the proof of Lemma \ref{Lm:tpiN_compl}.
\begin{enumerate}
\item We reserve vector $g_1 \in \mathbb R^m$ to store current (in terms of steps in $q$) approximation of $\tilde{g}(\varphi,\delta_2)$ and $g_2 \in \mathbb R^m$ to store next summand in the sum \varepsilonqref{eq:tnfN2_def}. So ${\rm TAO}=0$, ${\rm MM}=\mathcal{U}M = 2m$.
\item For every $q \in Q$ repeat.
\begin{enumerate}[label*=\arabic*.]
\item Set $\pi =\tilde{\pi}_q^{N_1}(\varphi)$. Also save in memory $\langlengle\varphi_1,\mathbf{V}^q_{j}\ranglengle$ for all $j\in U_q$ ; $\langlengle\varphi_2,\mathbf{E}^q_{il}\ranglengle$ for all $i \in V_q$, $l:i \to l$; $\sum_{j\in U_q}\langlengle\varphi_1,\mathbf{V}^q_{j}\ranglengle$ and $\sum_{l:i \to l}\langlengle\varphi_2,\mathbf{E}^q_{il}\ranglengle$ for all $i \in V_q$ and the matrix $P_q(\varphi)$. All this data was calculated during the calculation of $\tilde{\pi}_q^{N_1}(\varphi)$, see the proof of Lemma \ref{Lm:tpiN_compl}. According to Lemma \ref{Lm:tpiN_compl} and memory used to save the listed objects we obtain ${\rm TAO}=3 mp_qs_q + 3 p_qs_qN_1$, ${\rm MM}=2m+2p_q s_q+n_q+p_qs_q+1+p_q \leq 2m+4 p_q s_q$, $\mathcal{U}M = 2m+ p_q + n_q+p_qs_q+1+p_q +p_qs_q \leq 2m+ 3p_qs_q$.
\item Now we need to calculate $\tilde{\Pi}_q^{N_2}(\varphi)$. We reserve variables $G_t,G_1,G_2 \in \mathbb R^{p_q \times m}$ to store respectively sum in \varepsilonqref{eq:tPi_def} , $\Pi_k$, $\Pi_{k+1}$ for current $k\in 1,\dots,N_2$. Hence ${\rm TAO}=3 mp_qs_q + 3 p_qs_qN_1$, ${\rm MM}= 2m+ 4 p_q s_q + 3 m p_q$, $\mathcal{U}M = 2m+ 3p_qs_q+ 3 m p_q$.
\begin{enumerate}[label*=\arabic*.]
\item First iteration of this method requires to calculate
$$
\tilde{\Pi}_0 = \alpha \frac{d \pi^0_q(\varphi)}{d \varphi^T} + (1-\alpha) \sum_{i=1}^{p_q} \frac{ d p_i(\varphi)}{d \varphi^T} [\tilde{\pi}_q^{N_1}]_i.
$$
\begin{enumerate}[label*=\arabic*.]
\item We first calculate $G_1=\alpha \frac{d \pi^0_q(\varphi)}{d \varphi^T}$. In accordance to its definition \varepsilonqref{restart} and Equalities \ref{eq:F_q_G_q_def} one has for all $i \in U_q$, $l=1,\dots,m_1$
$$
\left[\frac{\alpha[\pi_q^0]_i}{d\varphi}\right]_l=\left[\frac{\alpha \mathbf{V}^q_i}{\sum_{j\in U_q}\langlengle\varphi_1,\mathbf{V}^q_{j}\ranglengle} - \frac{\alpha \langle\varphi_1,\mathbf{V}^q_i\rangle}{\left(\sum_{j\in U_q}\langlengle\varphi_1,\mathbf{V}^q_{j}\ranglengle\right)^2} \sum_{j\in U_q}\mathbf{V}^q_{j}\right]_l
$$
and $\left[\frac{\alpha[\pi_q^0]_i}{d\varphi}\right]_l=0$ for $l=m_1+1,\dots,m$. We set $a= \frac{\alpha }{\sum_{j\in U_q}\langlengle\varphi_1,\mathbf{V}^q_{j}\ranglengle}$ and $b=\frac{a}{\sum_{j\in U_q}\langlengle\varphi_1,\mathbf{V}^q_{j}\ranglengle}$, $v=\sum_{j\in U_q}\mathbf{V}^q_{j}$. This requires $2+m_1 n_q$ a.o. and $2+m_1$ memory items. Now the calculation of all non-zero elements of $\alpha \frac{d \pi^0_q(\varphi)}{d \varphi^T}$ takes $4m_1n_q$ a.o. since for fixed $i,l$ we need 4 a.o. We obtain ${\rm TAO}=3 mp_qs_q + 3 p_qs_qN_1 + 5 m_1n_q+2$, ${\rm MM}= 2m+ 4 p_q s_q + 3 m p_q + m_1+2 $, $\mathcal{U}M = 2m+ 3p_qs_q+ 3 m p_q$.
\item Now we calculate $\tilde{\Pi}_0$. For every $i =1,\dots,p_q$ the matrix $(1-\alpha)\frac{ d p_i(\varphi)}{d \varphi^T} [\tilde{\pi}_q^{N_1}]_i \in \mathbb R^{p_q \times m}$ is calculated in the same way as the matrix $\alpha \frac{d \pi^0_q(\varphi)}{d \varphi^T}$ with obvious modifications due to $\frac{ d p_i(\varphi)}{d \varphi_1^T} =0$ and number of non-zero elements in vector $p_i(\varphi)$ is not more than $s_q$. We also use additional a.o. number and memory amount to calculate and save $(1-\alpha) [\tilde{\pi}_q^{N_1}]_i$. We save the result for current $i$ in $G_2$. So for fixed $i$ we need additionally $3+5m_2 s_q$ a.o and $3+m_2$ memory items. Also on every step we set $G_1=G_1+G_2$ which requires not more than $m_2 s_q$ a.o. since at every step $G_2$ has not more than $m_2 s_q$ non-zero elements. We set $G_t=G_1$. Note that $G_t$ always has a block of $(p_q-n_q) \times m_1$ zero elements and hence has not more than $m_2 p_q + m_1 n_q$ non-zero elements. At the end we obtain ${\rm TAO}=3 mp_qs_q + 3 p_qs_qN_1 + 5 m_1n_q+2 + p_q (3+5m_2 s_q+m_2 s_q) + m_2 p_q + m_1 n_q$, ${\rm MM}= 2m+ 4 p_q s_q + 3 m p_q + m_1+2 + m_2+3 \leq 3m+ 4 p_q s_q + 3 m p_q +5$, $\mathcal{U}M = 2m+ p_q s_q + 3 m p_q + p_q$ (since we need to store in memory only $g_1,g_2, G_t, G_1,G_2, P_q^T(\varphi), \pi$).
\varepsilonnd{enumerate}
\item Set $a=(1-\alpha)$.
\item For every step $k$ from 1 to $N_2$
\begin{enumerate}[label*=\arabic*.]
\item We set $G_2 = P_q^T(\varphi) G_1$. In this pperation potentially each of $p_qs_q$ elements of matrix $P_q^T(\varphi)$ needs to be multiplied my $m$ elements of matrix $G_1$ and this multiplication is coupled with one addition. So in total we need $2mp_qs_q$ a.o.
\item We set $G_t=G_t+aG_1$. This requires $2m_1n_q+2m_2p_q$ a.o.
\item We set $a=(1-\alpha)a$.
\item In total every step requires not more than $2mp_qs_q + 2m_1n_q+2m_2p_q +1$ a.o.
\varepsilonnd{enumerate}
\item At the end o this stage we have. ${\rm TAO}=3 mp_qs_q + 3 p_qs_qN_1 + 5 m_1n_q+2 + p_q (3+5m_2 s_q+m_2 s_q) + m_2 p_q + m_1 n_q + N_2(2mp_qs_q + 2m_1n_q+2m_2p_q +1)$, ${\rm MM}= 3m+ 4 p_q s_q + 3 m p_q +5$, $\mathcal{U}M = 2m+ m p_q + p_q$ (since we need to store in memory only $g_1,g_2, G_t,\pi$).
\item Set $G_t=\frac{\alpha}{1-(1-\alpha)a}G_t$. This takes $3+m_2 p_q + m_1 n_q$ a.o.
\item At the end o this stage we have. ${\rm TAO}=3 mp_qs_q + 3 p_qs_qN_1 + 5 m_1n_q+2 + p_q (3+5m_2 s_q+m_2 s_q) + m_2 p_q + m_1 n_q + N_2(2mp_qs_q + 2m_1n_q+2m_2p_q +1) + 3+m_2 p_q + m_1 n_q$, ${\rm MM}= 3m+ 4 p_q s_q + 3 m p_q +5$, $\mathcal{U}M = 2m+ m p_q + p_q$ (since we need to store in memory only $g_1,g_2, G_t,\pi$).
\varepsilonnd{enumerate}
\item Calculate $u=(A_q \tilde{\pi}_q^{N_1}(\varphi) +b_q)_{+} $. This requires additionally $3r_q$ a.o. and $r_q$ memory.
\langlebel{it:step}
\item Calculate $\pi = A_q^T u$. This requires additionally $4r_q$ a.o.
\item Calculate $g_2= G_t ^T \pi$. This requires additionally $2m_1n_q+2m_2p_q$ a.o.
\item Set $g_1=g_1+g_2$. This requires additionally $m$ a.o.
\item At the end we have ${\rm TAO}=3 mp_qs_q + 3 p_qs_qN_1 + 5 m_1n_q+2 + p_q (3+5m_2 s_q+m_2 s_q) + m_2 p_q + m_1 n_q + N_2(2mp_qs_q + 2m_1n_q+2m_2p_q +1) + 3+m_2 p_q + m_1 n_q + 7r_q + 2m_1n_q+2m_2p_q + m$, ${\rm MM}= 3m+ 4 p_q s_q + 3 m p_q +5 +r_q$, $\mathcal{U}M = 2m$ (since we need to store in memory only $g_1,g_2$).
\varepsilonnd{enumerate}
\item Set $g_1=\frac{2}{|Q|}g_1$. This requires additionally $m+1$ a.o.
\item At the end we have ${\rm TAO}=\sum_{q\in Q} (3 m p_qs_q + 3 p_qs_qN_1 + 5 m_1n_q+2 + p_q (3+5m_2 s_q+m_2 s_q) + m_2 p_q + m_1 n_q + N_2(2mp_qs_q + 2m_1n_q+2m_2p_q +1) + 3+m_2 p_q + m_1 n_q + 7r_q + 2m_1n_q+2m_2p_q + m) + m + 1\leq |Q| (10 mps + 3 psN_1+ 3mpsN_2 + 7r) $, ${\rm MM}= 3m+5+ \max_{q \in Q}( 4 p_q s_q + 3 m p_q +r_q) \leq 4ps+4mp+r$, $\mathcal{U}M = m$ (since we need to store in memory only $g_1$).
\varepsilonnd{enumerate}
\section{Missed proofs for Section \ref{S:gradient_free_method}}
\langlebel{S:GF_proofs}
Consider smoothed counterpart of the function $f(x)$:
\begin{equation}
f_{\mu}(x) = {\mathbb E} f(x+\mu\zeta) = \frac{1}{V_{{\mathcal B}}} \int_{{\mathcal B}} f(x+\mu \zeta) d\zeta,
\notag
\varepsilonnd{equation}
where $\zeta$ is uniformly distributed over unit ball ${\mathcal B}=\{ t \in \mathbb R^m : \|t\|_2 \leq 1\}$ random vector, $V_{{\mathcal B}}$ is the volume of the unit ball ${\mathcal B}$, $\mu \geq 0$ is a smoothing parameter. This type of smoothing is well known.
It is easy to show that
\begin{itemize}
\item If $f$ is convex, then $f_{\mu}$ is also convex
\item If $f \in C^{1,1}_{L}(\|\cdot\|_2)$, then $f_{\mu} \in C^{1,1}_{L}(\|\cdot\|_2)$.
\item If $f \in C^{1,1}_{L}(\|\cdot\|_2)$, then $ f(x) \leq f_{\mu}(x) \leq f(x) + \frac{L \mu^2}{2}$ for all $x \in \mathbb R^m$.
\varepsilonnd{itemize}
The random gradient-free oracle is usually defined as follows
\begin{equation}
g_{\mu}(x) = \frac{m}{\mu}(f(x+\mu \xi) -f(x)) \xi,
\notag
\varepsilonnd{equation}
where $\xi$ is uniformly distributed vector over the unit sphere ${\mathcal S} =\{ t \in \mathbb R^m : \|t\|_2 = 1\}$. It can be shown that ${\mathbb E} g_{\mu} (x) = \nabla f_{\mu}(x)$.
Since we can use only inexact zeroth-order oracle we also define the counterpart of the above random gradient-free oracle which can be really computed:
\begin{equation}
g_{\mu}(x,\delta) = \frac{m}{\mu}(\tilde{f}(x+\mu \xi, \delta) -\tilde{f}(x,\delta)) \xi.
\notag
\varepsilonnd{equation}
The idea is to use gradient-type method with oracle $g_{\mu}(x,\delta)$ instead of the real gradient in order to minimize $f_{\mu}(x)$. Since $f_{\mu}(x)$ is uniformly close to $f(x)$ we can obtain a good approximation to the minimum value of $f(x)$.
We will need the following lemma.
\begin{Lm}
Let $\xi$ be random vector uniformly distributed over the unit sphere ${\mathcal S} \in \mathbb R^m$. Then
\begin{equation}
{\mathbb E}_\xi(\langle \nabla f(x), \xi \rangle)^2 = \frac{1}{m}\|\nabla f(x)\|^2_2.
\langlebel{expnfss}
\varepsilonnd{equation}
\varepsilonnd{Lm}
{\bf Proof.\ }
We have ${\mathbb E}_\xi(\langle \nabla f(x), \xi\rangle)^2 = \frac{1}{S_m(1)} \int_{{\mathcal S}} (\langle \nabla f(x), \xi \rangle)^2 d \sigma(\xi)$, where $S_m(r)$ is the volume of the unit sphere which is the border of the ball in $\mathbb R^m$ with radius $r$, $\sigma(\xi)$ is unnormalized spherical measure. Note that $S_m(r)=S_m(1) r^{m-1}$.
Let $\varphi$ be the angle between $\nabla f(x)$ and $\xi$. Then
\begin{align}
&\frac{1}{S_m(1)} \int_{{\mathcal S}} (\langle \nabla f(x), \xi \rangle)^2 d \sigma(\xi) = \frac{1}{S_m(1)} \int_{0}^{\pi} \|\nabla f(x)\|^2_2 \cos^2 \varphi S_{m-1}( \sin \varphi) d \varphi = \notag \\
&= \frac{S_{m-1}(1)}{S_m(1)} \|\nabla f(x)\|^2_2 \int_{0}^{\pi}\cos^2 \varphi \sin^{m-2} \varphi d \varphi
\notag
\varepsilonnd{align}
First changing the variable using equation $x=\cos \varphi$, and then $t=x^2$, we obtain
\begin{align}
&\int_{0}^{\pi}\cos^2 \varphi \sin^{m-2} \varphi d \varphi = \int_{-1}^{1}x^2 (1-x^2)^{(m-3)/2}d x = \int_{0}^{1}t^{1/2} (1-t)^{(m-3)/2}d t = \notag \\
& =B\left(\frac32,\frac{m-1}{2} \right)= \frac{\sqrt{\pi} \Gamma\left(\frac{m-1}{2}\right)}{2 \Gamma\left(\frac{m+2}{2}\right)}, \notag
\varepsilonnd{align}
where $\Gamma(\cdot)$ is the Gamma-function and $B$ is the Beta-function.
Also we have
\begin{equation}
\frac{S_{m-1}(1)}{S_m(1)} = \frac{m-1}{m \sqrt{\pi}} \frac{\Gamma\left(\frac{m+2}{2}\right)}{\Gamma\left(\frac{m+1}{2}\right)}.
\notag
\varepsilonnd{equation}
Finally using the relation $\Gamma(m+1) = m \Gamma(m)$, we obtain
\begin{align}
& {\mathbb E}(\langle \nabla f(x), \xi \rangle)^2 = \|\nabla f(x)\|^2_2 \left(1-\frac{1}{m}\right) \frac{ \Gamma\left(\frac{m-1}{2}\right)}{ 2\Gamma\left(\frac{m+1}{2}\right)}=\|\nabla f(x)\|^2_2 \left(1-\frac{1}{m}\right) \frac{ \Gamma\left(\frac{m-1}{2}\right)}{2 \frac{m-1}{2}\Gamma\left(\frac{m-1}{2}\right)} = \notag \\
& =\frac{1}{m}\|\nabla f(x)\|^2_2 \notag
\varepsilonnd{align}
\begin{Lm}
Let $f \in C^{1,1}_{L}(\|\cdot\|_2)$. Then, for any $x,y \in \mathbb R^m$,
\begin{align}
& {\mathbb E} \| g_{\mu}(x,\delta ) \|^2_2 \leq m^2 \mu^2 L^2 + 4 m \|\nabla f(x) \|^2_2 + \frac{8\delta^2 m^2}{\mu^2}\langlebel{expgmd} \\
& - {\mathbb E} \langle g_{\mu}(x,\delta ), x-y \rangle \leq- \langle \nabla f_{\mu}(x) , x-y \rangle + \frac{\delta m}{\mu} \|x-y\|_2.
\langlebel{expgmdxmx}
\varepsilonnd{align}
\langlebel{Lm:gmud}
\varepsilonnd{Lm}
{\bf Proof.\ }
Using \varepsilonqref{eq:fLipSm} we obtain
\begin{align}
& (\tilde{f}(x+\mu \xi, \delta) - \tilde{f}(x,\delta))^2 = \notag \\
& (f(x+\mu \xi) - f(x) - \mu \langle \nabla f(x), \xi \rangle + \mu \langle \nabla f(x), \xi \rangle + \tilde{\delta}(x+\mu \xi) - \tilde{\delta}(x))^2 \leq \notag \\
& 2 (f(x+\mu \xi) - f(x) - \mu \langle \nabla f(x), \xi \rangle + \mu \langle \nabla f(x), \xi \rangle)^2 + 2(\tilde{\delta}(x+\mu \xi) - \tilde{\delta}(x))^2 \leq \notag \\
& 4 \left(\frac{\mu^2}{2}L \|\xi\|^2\right)^2 + 4 \mu^2 (\langle \nabla f(x), \xi \rangle)^2 + 8 \delta^2 = \mu^4 L^2 \|\xi\|^4 + 4 \mu^2 (\langle \nabla f(x), \xi \rangle)^2 + 8 \delta^2 \notag
\varepsilonnd{align}
Using \varepsilonqref{expnfss}, we get
\begin{align}
& {\mathbb E}_\xi \| g_{\mu}(x,\delta) \|^2_2 \leq \frac{m^2}{\mu^2 V_s} \int_{{\mathcal S}} \left(\mu^4 L^2 \|\xi\|^4 + 4 \mu^2 (\langle \nabla f(x), \xi \rangle)^2 + 8 \delta^2 \right) \|\xi\|^2_2 d\sigma (\xi) = \notag \\
& = m^2 \mu^2 L^2 + 4 m \|\nabla f(x) \|^2_2 + \frac{8\delta^2 m^2}{\mu^2}.\notag
\varepsilonnd{align}
Using the equality ${\mathbb E}_\xi g_{\mu} (x) = \nabla f_{\mu}(x)$, we have
\begin{align}
& - {\mathbb E}_\xi \langle g_{\mu}(x,\delta), x-y \rangle = - \frac{m}{\mu V_s} \int_{{\mathcal S}} (f_{\delta}(x+\mu \xi) - f_{\delta}(x)) \langle \xi, x-y \rangle d\sigma (\xi) = \notag \\
& = - \frac{m}{\mu V_s} \int_{{\mathcal S}} (f(x+\mu \xi) - f(x)) \langle \xi, x-y \rangle d\sigma (\xi) - \notag \\
& - \frac{m}{\mu V_s} \int_{{\mathcal S}} (\tilde{\delta}(x+\mu \xi) - \tilde{\delta}(x)) \langle \xi, x-y \rangle d\sigma (\xi) \leq - \langle \nabla f_{\mu}(x) , x-y \rangle + \frac{\delta m}{\mu} \|x-y\|.
\notag
\varepsilonnd{align}
Let us denote $\psi_0 = f(x_0)$, and $\psi_k= {\mathbb E}_{\mathcal{U}_{k-1}}f(x_{k})$, $k \geq 1$.
We say that the smooth function is strongly convex with parameter $\tau \geq 0$ if and only if for any $x,y \in \mathbb R^m$ it holds that
\begin{equation}
f(x) \geq f(y) + \langle \nabla f(y) ,x-y \rangle + \frac{\tau}{2} \|x-y\|^2.
\langlebel{eq:fStrConv}
\varepsilonnd{equation}
\textbf{Theorem 1} (extended)
\textit{Let $f \in C^{1,1}_{L} (\|\cdot\|_2)$ and convex. Assume that $x^* \in {\rm int} X$ and the sequence $x_k$ be generated by Algorithm \ref{alg:GFPGM} with $h=\frac{1}{8mL}$.
Then for any $M \geq 0$, we have
\begin{align}
& {\mathbb E}_{\mathcal{U}_{M-1}} f(\hat{x}_M) - f^* \leq \frac{8mL D^2}{M+1} + \frac{\mu^2 L (m+8)}{8} + \frac{ \delta m D}{ 4\mu } + \frac{\delta^2 m}{L \mu^2},
\notag
\varepsilonnd{align}
where $f^*$ is the solution of the problem $\min_{x \in X} f(x)$.
If, moreover, $f$ is strongly convex with constant $\tau$, then
\begin{equation}
\psi_M - f^{*} \leq \frac12 L \left(\delta_{\mu} + \left(1-\frac{\tau }{16 m L} \right)^M(D^2 - \delta_{\mu}) \right),
\langlebel{eq:rtSmthSC}
\varepsilonnd{equation}
where $\delta_{\mu}=\frac{ \mu^2 L (m+8)}{4 \tau} + \frac{4m \delta D}{\tau \mu } + \frac{2 m \delta^2}{\tau \mu^2 L } $.
}
{\bf Proof.\ }
We extend the proof in~\cite{nesterov3} for the case of randomization on a sphere (instead of randomization based on normal distribution) and for the case when one can calculate the function value only with some error of unknown nature.
Consider the point $x_k$, $k\geq 0$ generated by the method on the $k$-th iteration. Denote $r_k=\|x_k-x^*\|_2$. Note that $r_k \leq D$. We have:
\begin{align}
&r_{k+1}^2 = \|x_{k+1}-x^*\|_2^2 \leq \|x_{k}-x^*-h g_{\mu}(x_k,\delta)\|_2^2 = \notag \\
& = \|x_{k}-x^*\|_2^2 - 2h \langle g_{\mu}(x_k,\delta), x_k-x^* \rangle + h^2 \|g_{\mu}(x_k,\delta) \|_2^2.
\notag
\varepsilonnd{align}
Taking the expectation with respect to $\xi_k$ we get
\begin{align}
& {\mathbb E}_{\xi_k} r_{k+1}^2 \stackrel{\varepsilonqref{expgmd},\varepsilonqref{expgmdxmx}}{\leq} r_k^2 - 2h\langle \nabla f_{\mu}(x_k) , x_k-x^* \rangle + \frac{2 \delta m h }{\mu} r_k + \notag \\
& + h^2 \left( m^2 \mu^2 L^2 + 4 m \|\nabla f(x_k) \|_2^2 + \frac{8\delta^2 m^2}{\mu^2} \right) \leq \notag \\
& \leq r_k^2 -2h (f(x_k )- f_{\mu}(x^*)) + \frac{ \delta m h D}{4 \mu} + \notag \\
& + h^2 \left( m^2 \mu^2 L^2 + 8 m L (f(x_k) - f^*) + \frac{8\delta^2 m^2}{\mu^2} \right) \leq \notag \\
& \leq r_k^2 -2h (1-4h m L) (f(x_k )- f^*) + \frac{ \delta m h D}{4\mu} + \notag \\
&+ m^2 h^2 \mu^2 L^2 + h L \mu^2 + \frac{8\delta^2 m^2 h^2}{\mu^2} \leq \notag \\
& \leq r_k^2 + \frac{D \delta }{4 \mu L} - \frac{f(x_k )- f^*}{8mL} + \frac{\mu^2(m+8)}{64 m} + \frac{\delta^2 }{8\mu^2L^2}.
\langlebel{ThalgSmthPr1}
\varepsilonnd{align}
Taking expectation with respect to $\mathcal{U}_{k-1}$ and defining $\rho_{k+1} \stackrel{\rm def}{=}{\mathbb E}_{\mathcal{U}_{k}} r_{k+1}^2$ we obtain
\begin{equation}
\rho_{k+1} \leq \rho_k- \frac{\psi_k- f^*}{8mL}+ \frac{\mu^2(m+8)}{64 m} + \frac{D \delta }{ 4\mu L} + \frac{\delta^2 }{8\mu^2L^2}.
\notag
\varepsilonnd{equation}
Summing up these inequalities from $k=0$ to $k=M$ and dividing by $M+1$ we obtain \varepsilonqref{eq:rtSmth}
Estimate \ref{eq:rtSmth} also holds for $\hat{\psi}_M \stackrel{\rm def}{=} {\mathbb E}_{\mathcal{U}_{M-1}} f(\hat{x}_M)$, where $\hat{x}_M = \arg \min_x \{ f(x): x \in \{ x_0, \dots, x_M\}\}$.
Now assume that the function $f(x)$ is strongly convex. From \varepsilonqref{ThalgSmthPr1} we get
\begin{equation}
{\mathbb E}_{\xi_k} r_{k+1}^2 \stackrel{\varepsilonqref{eq:fStrConv}}{\leq} \left(1-\frac{\tau}{16mL} \right) r_k^2 + \frac{D \delta }{4 \mu L} + \frac{\mu^2(m+8)}{64 m} + \frac{\delta^2 }{8\mu^2L^2}
\notag
\varepsilonnd{equation}
Taking expectation with respect to $\mathcal{U}_{k-1}$ we obtain
\begin{equation}
\rho_{k+1} \leq \left(1-\frac{\tau}{16mL} \right) \rho_k + \frac{R \delta }{ \mu L} + \frac{\mu^2(m+8)}{64 m} + \frac{\delta^2 }{8\mu^2L^2}
\notag
\varepsilonnd{equation}
and
\begin{align}
& \rho_{k+1} - \delta_{\mu} \leq \left(1-\frac{\tau}{16mL} \right) ( \rho_k - \delta_{\mu}) \leq \notag \\
& \leq \left(1-\frac{\tau}{16mL} \right)^{k+1} ( \rho_0 - \delta_{\mu}). \notag
\varepsilonnd{align}
Using the fact that $\rho_0 \leq D^2$ and $\psi_k - f^* \leq \frac12 L \rho_k$ we obtain \varepsilonqref{eq:rtSmthSC}.
\section{Missed proofs for Section \ref{S:gradient_method}}
We will need the following two results obtained in \cite{ghadimi}.
\begin{Lm}
Let $x_X (\bar{x},g,\gamma)$ be defined in \varepsilonqref{eq:x_Q} and $g_X (\bar{x},g,\gamma)$ be defined in \varepsilonqref{eq:g_Q}. Then, for any $\bar{x} \in X$, $g \in E^*$ and $\gamma > 0$, it holds
\begin{equation}
\langle g, g_X(\bar{x},g,\gamma) \rangle \geq \|g_X (\bar{x},g,\gamma)\|^2 + \frac{1}{\gamma}(h(x_X (\bar{x},g,\gamma))-h(x)).
\langlebel{eq:gr_map_pr_1}
\varepsilonnd{equation}
\langlebel{Lm:gr_map_pr_1}
\varepsilonnd{Lm}
\begin{Lm}
Let $g_X (\bar{x},g,\gamma)$ be defined in \varepsilonqref{eq:g_Q}. Then, for any $g_1, g_2 \in E^*$, it holds
\begin{equation}
\|g_X (\bar{x},g_1,\gamma)-g_X (\bar{x},g_2,\gamma)\| \leq \|g_1-g_2\|_*
\langlebel{eq:gr_map_lip}
\varepsilonnd{equation}
\langlebel{Lm:gr_map_lip}
\varepsilonnd{Lm}
\noindent \textbf{Proof of Theorem \ref{Th:pg_la_2_rate}.}
First of all let us show that the procedure of search of point $w_k$ satisfying \varepsilonqref{eq:w_k_2}, \varepsilonqref{eq:gen_PG_LA_2_main} is finite. It follows from the fact that for $M_k \geq L$ the following inequality follows from \varepsilonqref{eq:dL_or_def}:
\begin{align}
&\tilde{f}(w_k,\delta) - \frac{\varepsilon}{16M_k} \stackrel{\varepsilonqref{eq:dL_or_def}}{\leq} f(w_k) \stackrel{\varepsilonqref{eq:dL_or_def}}{\leq} \tilde{f}(x_k,\delta) + \langle \tilde{g}(x_k,\delta),w_k - x_k \rangle + \frac{L}{2}\|w_k - x_k\|_2 + \frac{\varepsilon}{16M_k}
\notag
\varepsilonnd{align}
which is \varepsilonqref{eq:gen_PG_LA_2_main}.
Let us now obtain the rate of convergence. Using definition of $x_{k+1}$ and \varepsilonqref{eq:gen_PG_LA_2_main} we obtain for any $k=0,\dots,M$
\begin{align}
& f(x_{k+1}) - \frac{\varepsilon}{16M_k} = f(w_k) - \frac{\varepsilon}{16M_k} \stackrel{\varepsilonqref{eq:dL_or_def}}{\leq} \tilde{f}(w_k,\delta) \stackrel{\varepsilonqref{eq:gen_PG_LA_2_main}}{\leq} \tilde{f}(x_k,\delta) + \notag \\
& + \langle \tilde{g}(x_k,\delta) , x_{k+1}-x_k \rangle +\frac{M_k}{2} \|x_{k+1}-x_k\|^2 + \frac{\varepsilon}{8M_k} \stackrel{\varepsilonqref{eq:g_Q},\varepsilonqref{eq:w_k_2}}{=} \notag \\
& = \tilde{f}(x_k,\delta) - \frac{1}{M_k} \left\langle \tilde{g}(x_k,\delta) , g_X\left(x_k, \tilde{g}(x_k,\delta) ,\frac{1}{M_k}\right) \right\rangle + \notag \\
& + \frac{1}{2M_k} \left\|g_X \left(x_k, \tilde{g}(x_k,\delta) ,\frac{1}{M_k}\right)\right\|^2 + \frac{\varepsilon}{8M_k} \stackrel{\varepsilonqref{eq:dL_or_def},\varepsilonqref{eq:gr_map_pr_1}}{\leq} \notag \\
& \leq f(x_k) + \frac{\varepsilon}{16M_k} - \left[\frac{1}{M_k} \left\|g_X \left(x_k, \tilde{g}(x_k,\delta) ,\frac{1}{M_k}\right)\right\|^2 + h(x_{k+1})-h(x_k)\right]+ \notag \\
& + \frac{1}{2M_k} \left\|g_X \left(x_k, \tilde{g}(x_k,\delta) ,\frac{1}{M_k}\right)\right\|^2 + \frac{\varepsilon}{8M_k} \notag.
\varepsilonnd{align}
This leads to
$$
\psi(x_{k+1}) \leq \psi(x_k) - \frac{1}{2M_k} \left\|g_X \left(x_k,\tilde{g}(x_k,\delta),\frac{1}{M_k}\right)\right\|^2 + \frac{\varepsilon}{4M_k}.
$$
for all $k=0,\dots,M$.
Summing up these inequalities for $k=0,\dots,N$ we get
\begin{align}
&\left\|g_X \left(x_{\hat{k}},\tilde{g}_{\hat{k}},\frac{1}{M_{\hat{k}}}\right)\right\|^2
\sum_{k=0}^N \frac{1}{2M_k} \leq \sum_{k=0}^N \frac{1}{2M_k}\left\|g_X \left(x_k,\tilde{g}_k,\frac{1}{M_k}\right)\right\|^2 \leq \notag \\
& \leq \psi(x_0) - \psi(x_{N+1}) + \frac{\varepsilon}{4} \sum_{k=0}^N \frac{1}{M_k}
\notag
\varepsilonnd{align}
Hence using the fact that $M_k \leq 2L$ for all $k\geq 0$ (which easily follows from the first argument of the proof) and that for all $x\in X$ $\psi(x) \geq \psi^* > -\infty$, we obtain
\begin{align}
&\left\|g_X \left(x_{\hat{k}},\tilde{g}_{\hat{k}},\frac{1}{M_{\hat{k}}}\right)\right\|^2 \leq \frac{1}{\sum_{k=0}^N \frac{1}{2M_k}} \left( \psi(x_0) - \psi^* + \frac{\varepsilon}{4} \sum_{k=0}^N \frac{1}{M_k} \right) \stackrel{M_k \leq 2L}{\leq} \notag \\
& \frac{4L( \psi(x_0) - \psi^*)}{N+1} + \frac{\varepsilon}{2}, \notag
\varepsilonnd{align}
which is \varepsilonqref{eq:pg_la_2_rate}.
The estimate for the number of checks of Inequality~\ref{eq:gen_PG_LA_2_main} is proved in the same way as in \cite{Nesterov2006}.
\varepsilonnd{document}
|
\begin{document}
\author{Anthony Lee \footnote{Oxford-Man Institute, 9 Alfred Street, Oxford OX1 4EH, UK, and University of Oxford, Department of Statistics, 1 South Parks Road, Oxford OX1 3TG, UK. Email: [email protected]}
\and Christopher Yau \footnote{University of Oxford, Department of Statistics, 1 South Parks Road, Oxford OX1 3TG, UK. Email: [email protected]}
\and Michael B. Giles \footnote{University of Oxford, Mathematical Institute, 24-29 St Giles, Oxford OX1 3LB, UK, and Oxford-Man Institute, 9 Alfred Street, Oxford OX1 4EH, UK. Email: [email protected]}
\and Arnaud Doucet \footnote{Institute of Statistical Mathematics, 4-6-7 Minami-Azabu, Minato-ku, Tokyo 106-8569, Japan. Email: [email protected]}
\and Christopher C. Holmes \footnote{University of Oxford, Department of Statistics, 1 South Parks Road, Oxford OX1 3TG, UK, and Oxford-Man Institute, 9 Alfred Street, Oxford OX1 4EH, UK. Email: [email protected]}
}
\date{\today}
\title{On the utility of graphics cards to perform massively parallel simulation of advanced Monte Carlo methods}
\maketitle
\begin{abstract}
We present a case-study on the utility of graphics cards to perform massively parallel simulation of advanced Monte Carlo methods. Graphics cards, containing multiple Graphics Processing Units (GPUs), are self-contained parallel computational devices that can be housed in conventional desktop and laptop computers. For certain classes of Monte Carlo algorithms they offer massively parallel simulation, with the added advantage over conventional distributed multi-core processors that they are cheap, easily accessible, easy to maintain, easy to code, dedicated local devices with low power consumption. On a canonical set of stochastic simulation examples including population-based Markov chain Monte Carlo methods and Sequential Monte Carlo methods, we find speedups from 35 to 500 fold over conventional single-threaded computer code. Our findings suggest that GPUs have the potential to facilitate the growth of statistical modelling into complex data rich domains through the availability of cheap and accessible many-core computation. We believe the speedup we observe should motivate wider use of parallelizable simulation methods and greater methodological attention to their design.
\end{abstract}
{\small \textbf{Keywords:} Sequential Monte Carlo, Population-Based Markov Chain Monte Carlo, General Purpose Computation on Graphics Processing Units, Many-Core Architecture, Stochastic Simulation, Parallel Processing }
\section{Introduction}
We describe a case-study in the utility of graphics cards involving Graphics Processing Units (GPUs) to perform local, dedicated, massively parallel stochastic simulation. GPUs were originally developed as dedicated devices to aid in real-time graphics rendering. However recently there has been an emerging literature on their use for scientific computing as they house multi-core processors. Examples include \cite{mm} and \cite{mds}, which discuss their use in molecular modelling and dynamics.
To gain an understanding of the potential benefits to statisticians we have investigated speedups on a canonical set of examples taken from the advanced Monte Carlo literature. These include Bayesian inference for a Gaussian mixture model computed using a population-based Markov chain Monte Carlo (MCMC) method and a sequential Monte Carlo (SMC) sampler and sequential Bayesian inference for a multivariate stochastic volatility model implemented using a standard SMC method, also known as a particle filter in this context. In these examples we report substantial speedups from the use of GPUs over conventional CPUs.
The potential of parallel processing to aid in statistical computing is well documented (see e.g. \cite{hpcs}). However, previous studies have relied on distributed multi-core clusters of CPUs for implementation. In contrast, graphics cards for certain generic types of computation offer parallel processing speedups with advantages on a number of fronts, including:
\begin{itemize}
\item Cost: graphics cards are relatively cheap. At time of writing the cards used in our study retail at around 200 US dollars.
\item Accessibility: graphics cards are readily obtainable from high street computer stores or over the internet.
\item Maintenance: the devices are self-contained and can be hosted on conventional desktop and laptop computers.
\item Speed: in line with multi-core CPU clusters, graphics cards offer significant speedup, albeit for a restricted class of scientific computing algorithms.
\item Power: GPUs are low energy consumption devices compared to clusters of traditional computers, with a graphics card requiring around 200 Watts. While improvements in energy efficiency are application-specific, it is reasonable in many situations to expect a GPU to use around 10 per cent of the energy to that of an equivalent CPU cluster.
\item Dedicated and local: the graphics cards slot into conventional computers offering the user ownership without the need to transport data externally.
\end{itemize}
The idea of splitting the computational effort of parallelizable algorithms amongst processors is certainly not new to statisticians. In fact, distributed systems and clusters of computers have been around for decades. Previous work on parallelization of MCMC methods on a group of networked computers include, among others, \cite{rosenthal} and \cite{brockwell}. \cite{rosenthal} discusses how to deal with computers running at different speeds and potential computer failure while \cite{brockwell} discusses the parallel implementation of a standard single chain MCMC algorithm by pre-computing acceptance ratios. The latency and bandwidth of communication in these systems make them suitable only in cases where communication between streams of computation, or threads, is infrequent and low in volume. In other words, while many algorithms involve computation that could theoretically be distributed amongst processors, the overhead associated with distributing the work erases any speedup. In contrast, many-core processor communication has very low latency and very high bandwidth due to high-speed memory that is shared amongst the cores. Low latency here means the time for a unit of data to be accessed or written to memory by a processor is low whilst high bandwidth means that the amount of data that can be sent in a unit of time is high. For many algorithms, this makes parallelization viable where it previously was not. In addition, the energy efficiency of a many-core computation compared to a single-core or distributed computation can be improved. This is because the computation can both take less time and require less overhead.
We choose to investigate the speed up for the simulation of random variates from complex distributions, a common computational task when performing inference using Monte Carlo (see e.g. \cite{mcsm}). In particular, we focus on population-based MCMC methods and SMC methods for producing random variates as these are not algorithms that typically see significant speedup on clusters due to the need for frequent, high-volume communication between computing nodes. For the examples we consider, we find that computation time can be significantly lowered for all applications, and drastically lowered in some cases. This means that we can obtain the samples we want in seconds instead of hours and minutes instead of days. The types of speedup observed are dependent on the ability of the methods to be parallelized. In particular, speedup increases with the number of auxiliary distributions in population-based MCMC and the number of particles in SMC until some device-specific capacity is reached.
The algorithms are implemented for the Compute Unified Device Architecture (CUDA) and make use of GPUs which support this architecture. CUDA offers a fairly mature development environment via an extension to the C programming language. We estimate that a programmer proficient in C should be able to code effectively in CUDA within a few weeks of dedicated study. For our applications we use CUDA version 2.1 with an NVIDIA GTX 280 as well as an NVIDIA 8800 GT. The GTX 280 has 30 multiprocessors while the 8800 GT has 14 multiprocessors. For all current NVIDIA cards, a multiprocessor comprises 8 arithmetic logic units (ALUs), 2 special units for transcendental functions, a multithreaded instruction unit and on-chip shared memory. In addition to having more multiprocessors than the 8800 GT, each GTX 280 multiprocessor itself has more registers, can support more active threads and includes one double-precision ALU. For single-precision floating point computation, one can think of the GTX 280 as having 240 ($30 \times 8$) and the 8800 GT as having 112 ($14 \times 8$) single processors respectively. At present, the retail price of the GTX 280 is just over double that of the 8800GT and it requires just over twice the power.
\section{GPUs for Parallel Processing}
\begin{figure}\label{fig:gpu_cpu_link}
\end{figure}
GPUs have evolved into many-core processing units, currently with up to 30 multiprocessors per card, in response to commercial demand for real-time graphics rendering, independently of demand for many-core processors in the scientific computing community. As such, the architecture of GPUs is very different to that of conventional central processing units (CPUs). An important difference is that GPUs devote proportionally more transistors to ALUs and less to caches and flow control in comparison to CPUs. This makes them less general purpose but highly effective for data-parallel computation with high arithmetic intensity, i.e. computations where the same instructions are executed on different data elements and where the ratio of arithmetic operations to memory operations is high. This single instruction, multiple data (SIMD) architecture puts a heavy restriction on the types of computation that optimally utilize the GPU but in cases where the architecture is suitable it reduces overhead.
Figure \ref{fig:gpu_cpu_link} gives a visualization of the link between a host machine and the graphics card, emphasizing the data bandwidth characteristics of the links and the number of processing cores. A program utilizing a GPU is hosted on a CPU with both the CPU and the GPU having their own memory. Data is passed between the host and the device via a standard memory bus, similar to how data is passed between main memory and the CPU. The memory bus between GPU memory and the GPU cores is both wider and has a higher clock rate than a standard bus, enabling much more data to be sent to the cores than the equivalent link the host. This type of architecture is ideally suited to data-parallel computation since large quantities of data can be loaded into registers for the cores to process in parallel. In contrast, typical computer architectures use a cache to speed up memory accesses using locality principles that are generally good but do not fully apply to data-parallel computations, with the absence of temporal locality most notable.
\subsection{Programming with Graphics Cards}
\label{section:programming_gpus}
CUDA provides the interface to compliant GPUs by extending the C programming language. Programs compiled with CUDA allow computation to be split between the CPU and the GPU. In this sense, the GPU can be treated as an additional, specialized processor for data-parallel computation. In the following text, host code refers to code that is executed on the CPU whilst device code is code that is executed on the GPU. We present a simple example in Figures \ref{code:is_global}-\ref{code:is_host}, explained below, that computes a classical importance sampling estimate (see Section \ref{section:methods}). In the code snippets, keywords in the C language are in bold face whilst CUDA keywords are both bold and italicized. A line beginning with a ``{\tt //}'' is a comment and is ignored by the compiler.
\begin{figure}
\caption{Kernel that evaluates an importance weight and test function}
\label{code:is_global}
\end{figure}
CUDA allows users to define special functions, called kernels, that are called by the host code to be executed in parallel on the GPU by a collection of threads. Figure \ref{code:is_global} shows an example of a kernel function, which can be invoked in host code using the syntax
\begin{verbatim}
importance_sample<<<nb,nt>>>(N, d_array, d_array_out);
\end{verbatim}
where {\tt nb} is the number of blocks of threads and {\tt nt} is the number of threads per block. The total number of threads created by this call is the product of {\tt nb} and {\tt nt} and one can think of a threads as being a single stream of computation. For most kernels, the numbers of threads and blocks can be changed to tune performance on different cards or with different data. A more detailed description of blocks and threads and their relation to the hardware is given in Section \ref{section:blocks_threads}.
\begin{figure}
\caption{Device functions for evaluating the target density, the proposal density and the test function. The target is an equally weighted, two-component mixture of normals with equal variances of 0.25 and means at -1 and 1.5 while the proposal is a standard normal distribution. The test function squares its input so that the integral that is estimated is the expectation of the second moment of a random variable distributed according to the target density.}
\label{code:is_device}
\end{figure}
A kernel is defined with the {\tt \_\_global\_\_} qualifier. Kernels are special in that they are always invoked in parallel with the numbers of blocks and threads specified and have a void return type. As such, program correctness depends only on how the threads invoked in the kernel call modify memory on the graphics card. In particular, code must be written that guarantees the correct modifications to memory once all threads have completed, especially when threads interact during execution by reading and writing from shared memory locations. In Figure \ref{code:is_global}, a kernel is defined that takes as input an array of random values sampled from a proposal distribution and places, for each value, the product of the test function and the importance weight at that value in a separate array. One can see that each thread is responsible for ${\tt N} / {\tt tt}$ values, assuming ${\tt N}$ is a multiple of {\tt tt}. Within a kernel, special functions can be called that have been defined with the {\tt \_\_device\_\_} qualifier. These functions can only be called by {\tt \_\_global\_\_} functions or {\tt \_\_device\_\_} functions themselves. In Figure \ref{code:is_global}, {\tt target\_pdf}, {\tt proposal\_pdf} and {\tt phi} are examples of this, and their definitions are provided in Figure \ref{code:is_device}. In this particular kernel we see that each thread first computes its absolute thread identifier {\tt tid} and the total number of threads {\tt tt}. It then computes an importance weight and evaluates the test function for each value in {\tt d\_array} it is responsible for and stores the result in {\tt d\_array\_out}. Since there is no thread interaction in this example kernel, it is reasonably straightforward to verify its correctness.
\begin{figure}
\caption{Host code}
\label{code:is_host}
\end{figure}
Figure \ref{code:is_host} gives a snippet of code that is run on the host and completes our example. First, memory is allocated on both the host and the graphics card using the {\tt malloc} and {\tt cudaMalloc} functions respectively. The host function {\tt populate\_randn} then puts {\tt N} standard normal random variates in {\tt array}. These values are copied into the GPU array, {\tt d\_array}, via the {\tt cudaMemcpy} function. In Figure \ref{fig:gpu_cpu_link}, this is a transfer along the memory bus that connects host and graphics card memory. At this point, the kernel is called with 64 blocks of 128 threads per block. The {\tt reduce} function is a CPU function that returns the sum of the elements in a GPU array. Of course, this function can itself invoke a GPU kernel. Finally, the importance sampling estimate is obtained by dividing this sum by {\tt N} and memory is freed. Note that this code has been written so as to expose the most common functions that are used in GPU programming using CUDA. For example, it would be faster to create the random variates on the GPU itself but this would not have allowed any memory transfer operations to be shown here.
This basic example highlights the most important characteristics of CUDA programs: memory management, kernel specification and kernel invocation. Memory management is a key component in algorithm design using graphics cards since there is often need for transfer between CPU and GPU memory as standard host functions can only access CPU memory and kernels can only access GPU memory. The fundamental memory operations are {\tt cudaMalloc}, {\tt cudaMemcpy} and {\tt cudaFree}, which are GPU analogues to the standard C functions {\tt malloc}, {\tt memcpy} and {\tt free}. Kernel specification requires ensuring that correct output will be given once all threads have returned. In the above example, it is clear that all concurrent threads will be executing the same instructions in parallel because there is no conditional branching, which occurs when different instructions are executed in concurrent threads based on the result of a data-dependent runtime comparison. Indeed, while it is possible to specify arbitrary conditional branches within a kernel, this can lead to slow performance since threads in a SIMD architecture execute sequentially when they are not executing the same instructions, which can be devastating to performance. An important constraint on kernel code that is not illustrated explicitly in the above code is that neither recursive functions nor function pointers can be defined in device code. This is due to the fact that kernel functions are completely determined at compile time with {\tt \_\_device\_\_} functions simply inlined, or inserted into the kernel, during compilation. With respect to kernel invocation, the number of threads and blocks assigned to each kernel can be decided at runtime in host code. This is useful since computation time can depend strongly on these numbers and optimal configurations will vary across graphics cards and features of the data. A final remark is that the level of abstraction provided by CUDA is close to the hardware operations on the device. This ensures that programmers are acutely aware of the benefits of writing, for example, kernels with minimal interaction between threads and avoiding branching.
\subsection{Blocks and Threads}
\label{section:blocks_threads}
CUDA abstracts the hardware of the GPU into blocks and threads to simultaneously provide a relatively simple view of the architecture to developers while still allowing a low-level abstraction of the hardware for performance reasons. One can generally think of each thread as being computed on a virtual processor. The block abstraction is necessary to provide the concept of a virtual microprocessor. Threads within a block are capable of more interaction than threads in separate blocks, mainly due to the fact that all threads in a block will be executed on the same microprocessor. As such, they have access to very fast, dynamically allocated, on-chip memory and can perform simple barrier synchronization. In Section \ref{section:programming_gpus}, this advanced functionality is not required by the example kernel.
It is important to note that blocks and threads are still very much virtual constructs. At runtime, multiple blocks may be executed concurrently on the same multiprocessor. With respect to ALU execution, operations are performed on groups of 32 threads at a time, which allows each of the 8 scalar processors to perform 4 identical instructions in quick succession in an ideal setting. The group of 32 threads that executes simultaneously is called a warp.
\subsection{Single Precision Issues}
The current generation of GPUs is 4-8 times faster at single precision arithmetic than double precision. Although this ratio will decrease in the future, there will probably remain a factor 2 difference in speed, the same as for Intel CPUs when using SSE instructions. This raises the question of whether single precision arithmetic is adequate for statistical applications.
There are two particular areas in which care must be taken. The first concerns the much more limited range of single precision floating point numbers. Because of their 8-bit exponent, their magnitude must lie in the approximate range $[10^{-38}, 10^{+38}]$, whereas the magnitude of double precision variables is in the approximate range $[10^{-308}, 10^{+308}]$. Consequently, when working in single precision it is often necessary to work with log-likelihoods, rather than the likelihoods themselves though this is rarely a restriction for statisticians.
The second area of potential issues concerns the averaging of $N$ floating point values, for $N\!\gg\! 1$. The simplest implementation uses an accumulator, to which the values are added one at a time. However, this may lead to a large increase in the error due to finite machine precision. When all of the values are of the same sign, the relative error is amplified by factor $O(N)$ in the worst case, and $O(\sqrt{N})$ in the more typical case where the rounding error at each step can be modelled as a random variable with zero mean. This behaviour is well understood \cite{higham93, higham02} and the growth can be reduced to $O(\log N)$ by using a binary tree summation algorithm in which the values are summed in pairs, and then those new values are summed in pairs, and the process is repeated until a single value is obtained. This is the natural approach for the parallel implementation of a reduction operation. Example code is provided by NVIDIA on their CUDA website, and the implementation in our code is based on this.
Despite these concerns, single precision seems perfectly sufficient for the applications in this paper. The statistical variability due to the use of random numbers within the algorithms exceeds the perturbations due to finite machine precision.
\subsection{GPU Parallelizable Algorithms}
In general, if a computing task is well-suited to SIMD parallelization then it will be well-suited to computation on a GPU. In particular, data-parallel computations with high arithmetic intensity (computations where where the ratio of arithmetic operations to memory operations is high) are able to attain maximum performance from a GPU. This is because the volume of very fast arithmetic instruction can hide the relatively slow memory accesses. It is crucial to determine whether a particular computation is data-parallel on the instruction level when determining suitability. From a statistical simulation perspective, integration via classical Monte Carlo or importance sampling are ideal computational tasks in a SIMD framework. This is because each computing node can produce and weight a sample in parallel, assuming that the sampling procedure and the weighting procedure have no conditional branches. If these methods do branch, speedup can be compromised by many computing nodes running idle while others finish their tasks. This can occur, for example, if the sampling procedure uses rejection sampling.
In contrast, if a computing task is not well-suited to SIMD parallelization then it will not be well-suited to computation on a GPU. In particular, task-parallel computations where one executes different instructions on the same or different data cannot utilize the shared flow control hardware on a GPU and often end up running sequentially. Even when a computation is data-parallel, it might not give large performance improvements on a GPU due to memory constraints. This can be due to the number of registers required by each thread (see Sections \ref{section:fsv} and \ref{section:discussion}) or due to the size and structure of the data necessary for the computation requiring large amounts of memory to be transferred between the host and the graphics card. The latter issue is analogous to the issue of thrashing in virtual memory systems and can occur, for example, when an algorithm iterates over a block of data that will not fit in memory.
There are also many computational tasks in statistical computing that are just difficult to parallelize. For example, standard Metropolis-Hastings MCMC with a single chain is difficult to parallelize in the general case because it is a naturally sequential algorithm. Parallelization of this type of method usually involves parallelization of the target density evaluation as in \cite{suchard_rambaut}, the sampling from or evaluation of the proposal density or computation of multiple possible execution paths as in \cite{brockwell} as opposed to parallelization of the general algorithm itself.
The availability of new hardware suited to parallel computation motivates use of a new model of computation for developing and analyzing the efficacy of statistical algorithms. In some cases, existing algorithms will require little modification to take advantage of this technology whilst in others major changes will have to be made. There is also the potential for previously impractical and novel algorithms to become important tools for statisticians.
\subsection{Parallel Random Number Generation}
One important aspect of any Monte Carlo simulation is the generation of pseudorandom numbers. Fortunately, many uniform pseudorandom number generators can be implemented efficiently in parallel. The key idea is that each thread computes a contiguous block of numbers within a single overall stream. The thread can jump to the start of its block of numbers using a ``skip-ahead'' algorithm which enables it to skip $n$ places in $O(\log n)$ operations (e.g. see \cite{esck02}). The uniform pseudorandom numbers can then be transformed to match various different output distributions as needed. In our applications we use a parallelized version of the multiple recursive generator MRG32k3a presented in \cite{ecuyer} as well as a parallelized version of a xorshift random number generator \cite{xorshift}. In the case of the xorshift random number generator, more time must be spent to compute the seeds for each thread before any computation is done but the random number generation itself is faster and the initialization can be done offline.
\section{Parallelizable Sampling Methods}
\label{section:methods}
In this section we consider a number of sampling methods for which parallel implementations can be produced without significant modification. There is an abundance of statistical problems that are essentially computational in nature, especially in Bayesian inference. In many such cases, the problem can be distilled into one of sampling from a probability distribution whose density $\pi$ we can compute pointwise and up to a normalizing constant, i.e. we can compute $\pi^{\ast}(\cdot)$ where $\pi(\bx)=\pi^{\ast}(\bx)/Z$. A common motivation for wanting samples from $\pi$ is so we can compute expectations of certain functions. If we denote such a function by $\phi$, the expectation of interest is
\[
I\eqdef\int_{\bx\in\X}\phi(\bx)\pi(\bx)d\bx
\]
The Monte Carlo estimate of this quantity is given by
\[
\hat{I}_{MC}\eqdef\frac{1}{N}\sum_{i=1}^{N}\phi(\bx^{(i)})
\]
where $\{\bx^{(i)}\}_{i=1}^{N}$ are samples from $\pi$.
Clearly, we need samples from $\pi$ in order to compute this estimate. In practice, however, we often cannot sample from $\pi$ directly. There are two general classes of methods for dealing with this. The first are importance sampling methods, where we generate weighted samples from $\pi$ by generating $N$ samples according to some importance density $\gamma$ proportional to $\gamma^{\ast}$ and then estimating $I$ via
\[
\hat{I}_{IS}\eqdef\sum_{i=1}^{N}W^{(i)}\phi(\bx^{(i)})
\]
where $W^{(i)}$ are normalized importance weights
\[
W^{(i)}=\frac{w(\bx^{(i)})}{\sum_{j=1}^{N}w(\bx^{(j)})}\text{ and } w(\bx^{(i)})=\frac{\pi^{\ast}(\bx^{(i)})}{\gamma^{\ast}(\bx^{(i)})}
\]
The asymptotic variance of this estimate is given by $C(\phi,\pi,\gamma)/N$, i.e. a constant over $N$. For many problems, however, it is difficult to come up with an importance density $\gamma$ such that $C(\phi,\pi,\gamma)$ is small enough for us to attain reasonable variance with practical values of $N$.
The second general class of methods are MCMC methods, in which we construct an ergodic $\pi$-stationary Markov chain sequentially. Once the chain has converged, we can use all the dependent samples to estimate $I$. The major issue with MCMC methods is that their convergence rate can be prohibitively slow in some applications.
There are many ways to parallelize sampling methods that are not the focus of this work. For example, naive importance sampling, like classical Monte Carlo, is intrinsically parallel. Therefore, in applications where we have access to a good importance density $\gamma$ we can get linear speedup with the number of processors available. Similarly, in cases where MCMC converges rapidly we can parallelize the estimation of $I$ by running separate chains on each processor. While these situations are hoped for, they are not particularly interesting from a parallel architecture standpoint because they can run equally well in a distributed system. Finally, this paper is not concerned with problems for which the computation of individual MCMC moves or importance weights are very expensive but themselves parallelizable. While the increased availability of parallel architectures will almost certainly be of help in such cases, the focus here is on potential speedups by parallelizing general sampling methods. An example of recent work in this area can be found in \cite{suchard_rambaut}, in which speedup is obtained by parallelizing evaluation of individual likelihoods.
Much work in recent years has gone into dealing with the large constants in the variance of importance sampling estimates and slow convergence rates in MCMC and it is in these `advanced' Monte Carlo methods that we direct our interest. This is mainly because while they are parallelizable, they are not trivially so and stand to benefit enormously from many-core architectures. In the remainder of this section we briefly review three such methods: population-based MCMC, SMC and SMC samplers.
\subsection{Population-Based MCMC}
A common technique in facilitating sampling from a complex distribution $\pi$ with support in $\X$ is to introduce an auxiliary variable ${\bf a} \in \A$ and sample from a higher-dimensional distribution $\bar{\pi}$ with support in the joint space $\A \times \X$, such that $\bar{\pi}$ admits $\pi$ as a marginal distribution. With such samples, one can discard the auxiliary variables and be left with samples from $\pi$.
This idea is utilized in population-based MCMC, which attempts to speed up convergence of an MCMC chain for $\pi$ by instead constructing a Markov chain on a joint space $\X^{M}$ using $M-1$ auxiliary variables each in $\X$. In general, we have $M$ parallel `subchains' each with stationary distribution $\pi_{i},i\in\M\eqdef\{1,\ldots,M\}$ and $\pi_{M}=\pi$. Associated with each subchain $i$ is an MCMC kernel $L_{i}$ that leaves $\pi_{i}$ invariant, and which we run at every time step. Of course, without any further moves, the stationary distribution of the joint chain is
\[
\bar{\pi}(\bx_{1:M})\eqdef\prod_{i=1}^{M}\pi_{i}(\bx_{i})
\]
and so $\bx_{M}\sim\pi$. This scheme does not affect the convergence rate of the independent chain $M$. However, since we can cycle mixtures of $\bar{\pi} $-stationary MCMC kernels without affecting the stationary distribution of the joint chain \cite{tierney}, we can allow certain types of interaction between the subchains which can speed up convergence \citep{geyer,hukushima_nemoto}. In general, we apply a series of kernels that act on subsets of the variables. For the sake of clarity, let us denote the number of second-stage kernels by $R$ and the kernels themselves as $K_{1},\ldots,K_{R}$, where kernel $K_{j}$ operates on variables with indices in $\I_{j}\subset\M$. The idea is that the $R$ kernels are executed sequentially and it is required that each $K_{j}$ leave $\prod_{i\in\I_{j}}\pi_{i}$ invariant.
Given $\pi$, there are a wide variety of possible choices for $M$, $\pi_{1:M-1}$, $L_{1:M}$, $R$, $\I_{1:R}$ and $K_{1:R}$ which will affect the convergence rate of the joint chain. For those interested, \cite{ajay} gives a review of some of these. It is clear that the first stage of moves involving $L_{1:M}$ is trivially parallelizable. However, the second stage is sequential in nature. For a parallel implementation, it is beneficial for the $\I_{j}$'s to be disjoint as this allows the sequence of exchange kernels to be run in parallel. Of course, this implies that $\I_{1:R}$ should vary with time since otherwise there will be no interaction between the disjoint subsets of chains. Furthermore, if the parallel architecture used is SIMD (Single Instruction Multiple Data) in nature, it is desirable to have the $K_{j}$'s be nearly identical algorithmically. The last consideration for parallelization is that while speedup is generally larger when more computational threads can be run in parallel, it is not always helpful to increase $M$ arbitrarily as this can affect the convergence rate of the chain. However, in situations where a suitable choice of $M$ is dwarfed by the number of computational threads available, one can always increase the number of chains with target $\pi$ to produce more samples.
\subsection{Sequential Monte Carlo}
SMC methods are a powerful extension of importance sampling methodology that are particularly popular for sampling from a sequence of probability distributions. In the context of state-space models, these methods are known as particle filtering methods; \cite{smc_tut} and \cite{liu} give recent surveys of the field. In this context, let $\left\{ \bx_{t}\right\} _{t\geq0}$ be an unobserved Markov process of initial density $\bx_{0}\sim p_{0}(\cdot)$ and transition density $\bx_{t}\sim f(\cdot|\bx_{t-1})$ for $t\geq1$. We only have access to an observation process $\left\{ \by_{t}\right\} _{t\geq0}$; the observations are conditionally independent conditional upon $\left\{\bx_{t}\right\} _{t\geq0}$ of marginal density $\by_{t}\sim g(\by_{t} |\bx_{t})$ for $t\geq1$. SMC\ methods are used to approximate recursively in time the filtering densities $p(\bx_{0:t}|\by_{0:t})$ which are proportional to $p(\bx_{0:t},\by_{0:t})\eqdef p_{0}(\bx_{0})\prod_{i=1}^{t}f(\bx_{i} |\bx_{i-1})\prod_{i=1}^{t}g(\by_{i}|\bx_{i})$ for $t=1,\ldots,T$. These distributions are approximated with a set of random samples called particles through use of a sequential version of importance sampling and a special particle-interaction step known as resampling.
Parallelization of SMC methods is reasonably straightforward. The importance sampling step used at each time step is trivially parallelizable as it involves only the local state of a particle. The resampling step, in which some particles are replicated and others destroyed depending on their normalized importance weights, comprises the construction of an empirical cumulative distribution function for the particles based on their importance weights followed by sampling from this $N$ times, where $N$ is the fixed number of particles used throughout the computation. While neither of these tasks is trivially parallelizable, they can benefit moderately from parallelization. However, the bulk of the speedup will generally come from the parallelization of the evolution and weighting steps. Therefore, using criteria like effective sample size \citep{liu_chen} to avoid resampling at every time step can also improve speedup.
\subsection{Sequential Monte Carlo Samplers}
SMC samplers \citep{smcs} are a more general class of methods that utilize a sequence of auxiliary distributions $\pi_{0},\ldots,\pi_{T}$, much like population-based MCMC as discussed out in \cite{ajay}. However, in contrast to population-based MCMC, SMC samplers start from an auxiliary distribution $\pi_{0}$ and recursively approximate each intermediate distribution in turn until finally $\pi_{T}=\pi$ is approximated. The algorithm has the same general structure as classical SMC, with differences only in the types of proposal distributions, target distributions and weighting functions used in the algorithm. As such, parallelization of SMC samplers closely follows that of SMC.
The difference between population-based MCMC and SMC samplers is subtle but practically important. Both can be viewed as population-based methods on a similarly defined joint space since many samples are generated at each time step in parallel. However, in population-based MCMC the samples generated at each time each have different stationary distributions and the samples from a particular chain over time provide an empirical approximation of that chain's target distribution. In SMC samplers, the weighted samples generated at each time approximate one auxiliary target distribution and the true target distribution is approximated at the last time step. This difference is further discussed in Section \ref{section:comparison}.
\section{Canonical Examples}
To demonstrate the types of speed increase one can attain by utilizing GPUs, we apply each method to a representative statistical problem. We use Bayesian inference for a Gaussian mixture model as an application of the population-based MCMC and SMC samplers, while we use a factor stochastic volatility state-space model to gauge the speedup of our parallel SMC method. We ran our parallel code on a computer equipped with an NVIDIA 8800 GT GPU, a computer equipped with an NVIDIA GTX 280 GPU and we ran reference single-threaded code on a Xeon E5420 / 2.5 GHz processor. The resulting processing times and speedups are given in Tables \ref{tab:pop_mcmc} - \ref{tab:smc}.
The applications we discuss here are representative of the types of problems that these methods are commonly used to solve. In particular, while the distribution of mixture means given observations is only one example of a multimodal distribution, it can be thought of as a canonical distribution with multiple well-separated modes. Therefore, the ability to sample points from this distribution is indicative of the ability to sample points from a wide range of multimodal distributions. Similarly, performance of a latent variable sampler in dealing with observations from a factor stochastic volatility model is indicative of performance on observations from reasonably well-behaved but non-linear and non-Gaussian continuous state-space models.
\subsection{Mixture Modelling}
Finite mixture models are a very popular class of statistical models as they provide a flexible way to model heterogeneous data \citep{fmm}. Let $\by=y_{1:m}$ denote i.i.d. observations where $y_{j}\in\mathbb{R}$ for $j\in\{1,\ldots,m\}$. A univariate Gaussian mixture model with $k$ components states that each observation is distributed according to the mixture density
\[
p(y_{j}|\mu_{1:k},\sigma_{1:k},w_{1:k-1})=\sum_{i=1}^{k}w_{i}f(y_{j}|\mu
_{i},\sigma_{i})
\]
where $f$ denotes the density of the univariate normal distribution. The density of $\by$ is then equal to $\prod_{j=1}^{m}p(y_{j}|\mu_{1:k}, \sigma_{1:k}, w_{1:k-1})$.
For simplicity, we assume that $k$, $w_{1:k-1}$ and $\sigma_{1:k}$ are known and that the prior distribution on $\mu$ is uniform on the $k$-dimensional hypercube $[-10,10]^{k}$. We set $k=4$, $\sigma_{i}=\sigma=0.55$, $w_{i}=w=1/k$ for $i\in\{1,\ldots,k\}$. We simulate $m=100$ observations for ${\bm\mu}=\mu_{1:4}=(-3,0,3,6)$. The resulting posterior distribution for ${\bm\mu}$ is given by
\[
p({\bm\mu}|\by)\propto p(\by|{\bm\mu})\ind({\bm\mu}\in\lbrack-10,10]^{4})
\]
The main computational challenge associated to Bayesian inference in finite mixture models is the nonidentifiability of the components. As we have used exchangeable priors for the parameters $\mu_{1:4}$, the posterior distribution $p({\bm\mu}|\by)$ is invariant to permutations in the labelling of the parameters. Hence this posterior admits $k!=24$ symmetric modes.
Generating $N$ samples from such a posterior is a popular method for determining the ability of samplers to explore a high-dimensional space with multiple well-separated modes, which should all be represented in the samples. Basic random-walk MCMC and importance sampling methods typically fail to provide a correct approximation of the posterior for practical values of $N$ \citep{celeux}. It should be noted that while it might not be necessary to sample from all the symmetric modes in the case of a mixture model, the successful traversal of all the modes suggests that the sampler would succeed in traversing non-symmetric modes in other distributions, so long as symmetry is not exploited by the sampler.
\subsubsection{Population-Based MCMC}
\label{section:pb_mcmc}
We select the auxiliary distributions $\pi_{1:M-1}$ following the parallel tempering methodology, i.e. $\pi_{i}(\bx) \propto\pi(\bx)^{\beta_{i}}$ with $0 < \beta_{1} < \cdots< \beta_{M} = 1$ and use $M = 200$. This class of auxiliary distributions is motivated by the fact that MCMC converges more rapidly when the target distribution is flatter. For this problem, we use the cooling schedule $\beta_{i} = (i/M)^{2}$ and a standard $\N({\bm 0},I_{k})$ random walk Metropolis-Hastings kernel for the first stage moves.
For the second stage moves, we use only the basic exchange move \citep{geyer,hukushima_nemoto}: chains $i$ and $j$ swap their values with probability $\min\{1,\alpha_{ij}\}$ where
\[
\alpha_{ij}=\frac{\pi_{i}(\bx_{j})\pi_{j}(\bx_{i})}{\pi_{i}(\bx_{i})\pi_{j}(\bx_{j})}
\]
Further, we allow exchanges to take place only between adjacent chains so that all moves can be done in parallel. We use $R=M/2$ and $\I_{1:R}$ is either $\{\{1,2\},\{3,4\},\ldots,\{M-1,M\}\}$ or $\{\{2,3\},\{4,5\},\ldots,\{M-2,M-1\},\{M,1\}\}$, each with probability half. While use of permutation or crossover moves would be appropriate for this particular model, we felt that they would detract from the ability to generalize our results to the case where the likelihood is not invariant to permutations of the labels.
\begin{table}[htp]
\small
\caption{Running times for the Population-Based MCMC Sampler for various numbers of chains $M$.
}
\label{tab:pop_mcmc}
\centering
\begin{tabular}{| c | c | c | c | c | c |}
\hline
$M$ & CPU (mins) & 8800GT (secs) & Speedup & GTX280 (secs) & Speedup\\
\hline
8 & 0.0166 & 0.887 & 1.1 & 1.083 & 0.9 \\
\hline
32 & 0.0656 & 0.904 & 4 & 1.098 & 4 \\
\hline
128 & 0.262 & 0.923 & 17 & 1.100 & 14 \\
\hline
512 & 1.04 & 1.041 & 60 & 1.235 & 51 \\
\hline
2048 & 4.16 & 1.485 & 168 & 1.427 & 175 \\
\hline
8192 & 16.64 & 4.325 & 230 & 2.323 & 430 \\
\hline
32768 & 66.7 & 14.957 & 268 & 7.729 & 527 \\
\hline
131072 & 270.3 & 58.226 & 279 & 28.349 & 572 \\
\hline
\end{tabular}
\end{table}
To test the computational time required by our algorithms we allow the number of chains to vary but fix the number of points we wish to sample from the marginal density $\pi_{M}=\pi$ at 8192. As such, an increase in the number of chains leads to a proportional increase in the total number of points sampled. Processing times for our code are given in Table \ref{tab:pop_mcmc}, in which one can see that using $131072$ chains is impractical on the CPU but entirely reasonable using the GPU. Figure \ref{fig:modes_12_mcmc} shows the estimated posterior density $p(\mu _{1:2}|\by)$ from a set of $2^{20}$ MCMC samples from $\pi_{M}$ with $M=32768$, which is nearly identical to the estimated marginal posterior densities of any other pair of components of ${\bm\mu}$. This marginal density has 12 well-separated modes in $\mathbb{R}^{2}$ but it is worth noting that the joint density $p(\mu_{1:4}|\by)$ has 24 well-separated modes in $\mathbb{R}^{4}$. Figure \ref{fig:bar_modes_mcmc} shows the number of points from each mode for various values of $M$. We also computed the average number of iterations taken for the samplers to traverse all modes for the different values of $M$. For $M = 1$ and $M = 2$, the sampler did not traverse all the modes at all, while for values of $M$ between 4 and 32 the traversal time decreased from 80000 to 10000, after which it was unchanged with increases in $M$. These numbers should be compared to $24\times H_{24} \approx 91$ - the expected number of samples required to cover every mode if one could sample independently from $\pi$ - where $H_{i}$ is the $i$th harmonic number.
\begin{figure}
\caption{Estimated marginal posterior density $p(\mu_{1:2}
\label{fig:modes_12_mcmc}
\end{figure}
\begin{figure}
\caption{Number of MCMC samples from each mode}
\label{fig:bar_modes_mcmc}
\end{figure}
\subsubsection{SMC Sampler}
As with population-based MCMC, we use a tempering approach and the same cooling schedule, i.e. $\pi_{t}(\bx)\propto\pi(\bx)^{\beta_{t}}$ with $\beta_{t}=(t/M)^{2}$ and $M=200$. we use the uniform prior on the hypercube to generate the samples $\{\bx_{0}^{(1:N)}\}$ and perform 10 MCMC steps with the standard $\N({\bm0},I_{k})$ random walk Metropolis-Hastings kernel at every time step. We use the generic backwards kernel suggested in \cite{crooks}, \cite{ais} and \cite{smcs} for the case where each kernel is $\pi_{t}$-stationary so that the unnormalized incremental importance weights are of the form $\pi_{t}(\bx_{t-1})/\pi_{t-1}(\bx_{t-1})$.
\begin{table}[htp]
\small
\caption{Running times for the Sequential Monte Carlo Sampler for various values of $N$.
}
\label{tab:smcs}
\centering
\begin{tabular}{| c | c | c | c | c | c |}
\hline
$N$ & CPU (mins) & 8800GT (secs) & Speedup & GTX280 (secs) & Speedup\\
\hline
8192 & 4.44 & 1.192 & 223.5 & 0.597 & 446 \\
\hline
16384 & 8.82 & 2.127 & 249 & 1.114 & 475 \\
\hline
32768 & 17.7 & 3.995 & 266 & 2.114 & 502 \\
\hline
65536 & 35.3 & 7.889 & 268 & 4.270 & 496 \\
\hline
131072 & 70.6 & 15.671 & 270 & 8.075 & 525 \\
\hline
262144 & 141 & 31.218 & 271 & 16.219 & 522 \\
\hline
\end{tabular}
\end{table}
We also ran the SMC sampler with no resampling at all, which for these settings corresponds to the Annealed importance sampling (AIS) method proposed in \cite{ais}; see also \cite{crooks} for a similar method in physics. The resulting samples were less successful at characterizing the full multi-modality of the posterior distribution. This is consistent with the numerical findings and theoretical results discussed in \cite[Sections 3.5 and 4.2.3]{smcs}: when the MCMC\ kernels used at every time step mixes reasonably well, it is very beneficial to resample. In addition, when ESS is used as a threshold for resampling in the SMC sampler, the resampling step takes very little time compared to the evolution and weighting of the particles simply because it happens so infrequently. As such, the running time of the SMC sampler compared to AIS is practically identical. Processing times for our code are given in Table \ref{tab:smcs}. Figure \ref{fig:modes_12_smcs} shows the estimated posterior density $p(\mu_{1:2}|\by)$ from the SMC sampler with $N=65536$. Figure \ref{fig:bar_modes_smcs} shows the number of points from each mode for various values of $N$.
\begin{figure}
\caption{Estimated marginal posterior density $p(\mu_{1:2}
\label{fig:modes_12_smcs}
\end{figure}
\begin{figure}
\caption{Effective number of SMC samples from each mode}
\label{fig:bar_modes_smcs}
\end{figure}
\subsubsection{Comparison}
\label{section:comparison}
While both methods are capable of exploring the posterior distribution for ${\bm \mu}$, there are important differences in how the methods make use of parallelization. In particular, the SMC sampler parallelizes across particles approximating the same auxiliary distribution whilst the MCMC sampler parallelizes across auxiliary distributions at the same iteration. As such, to make full use of the graphics card the SMC sampler requires many particles while the MCMC sampler requires many auxiliary distributions. In most cases, however, one will be happy to use in excess of 8192 particles for SMC but one may not want to use in excess of 32768 auxiliary distributions. Indeed, for the application described above there seems to be no benefit in increasing the number of chains beyond 128, although this might also be due to the choice of cooling schedule and random walk variances. Furthermore, we utilized only the simplest information exchange and proposal moves in our samplers so as not to trivialize the problem. It should be noted, however, that there are situations in which a large number of intermediate temperatures are required for exchange acceptance probabilities to be greater than some preset value, for example when the dimension of the distribution of interest increases \cite{predescu}. As mentioned in Section \ref{section:pb_mcmc}, we would like to emphasize that the use of very large numbers of chains is possible using these parallel methods with only a modest increase in computation time.
The SMC sampler appears to be more efficient than the MCMC sampler for this problem. Indeed, with only 8192 particles the SMC sampler gives a reasonable representation of the posterior, taking only 597ms. The MCMC sampler requires around $2^{20}$ samples to give a reasonably uniform number of samples per mode, and this takes just over 2 minutes. In addition, although we have not done so here, it is possible with both the SMC and MCMC approaches to use the samples from the auxiliary distributions to estimate integrals of interest by computing appropriate importance weights. An interesting recent proposal on how to effectively combine estimates using such samples can be found in \cite{gramacy}.
For Bayesian inference in mixture models, there are many ways of dealing with the identifiability of the mixture parameters; \cite{jasra_thesis} includes a review of these. It is worth mentioning that for this type of model, we can permute samples as a post-processing step or within an MCMC kernel so traversal of the modes can be achieved trivially. The speedup of both methods is unaffected by the use of such mechanisms. In addition, the speedup is unaffected by increases in the number of observations since this only increases the amount of computation that each thread must do by a constant factor. Increasing the number of observations also has little effect on the difficulty for the sampler to move between modes since the modes are already well separated. Similarly, the speedup observed is robust to changes in the number of mixture components. The computation of each likelihood requires memory that is linear in the number of components, while the memory required per thread dictates the number of threads that can be run in parallel. However, as the number of components increases we usually have more observations, providing an opportunity to parallelize the computation of a single likelihood across multiple threads. This allows the amount of memory per thread to be significantly lower than if the complete likelihood was calculated by each thread and thus still allows many threads to be run in parallel.
\subsection{Factor Stochastic Volatility}
\label{section:fsv}
Many financial time series exhibit changing variance. A simple multivariate volatility model that allows us to capture the changing cross-covariance patterns of time series consists of using a dynamic latent factor model. In such models, all the variances and covariances are modelled through a low dimensional stochastic volatility structure driven by common factors \citep{fsv_liu,tvc_fsv}. We consider here a factor stochastic volatility model most similar to that proposed in \cite{fsv_liu}:
\begin{align*}
\by_{t} & \sim\N(\mathbf{B}\mathbf{f}_{t},{\bm\Psi})\\
\mathbf{f}_{t} & \sim\N(\mathbf{0},\mathbf{H}_{t})\\
{\bx}_{t} & \sim\N({\bm\Phi}{\bx}_{t-1},\mathbf{U})
\end{align*}
where
\begin{align*}
{\bm\Psi} & \eqdef\text{diag}(\psi_{1},\ldots,\psi_{M})\\
\mathbf{H}_{t} & \eqdef\text{diag}(\exp({\bx}_{t}))\\
{\bm\Phi} & \eqdef\text{diag}(\phi_{1},\ldots,\phi_{K})
\end{align*}
Here, $\mathbf{f}_{t}$ is $K$-dimensional, $\by_{t}$ is $M$-dimensional and $\mathbf{B}$ is an $M \times K$ factor loading matrix with zero entries above the diagonal for reasons of identifiability. The latent variable at each time step $t$ is the $K$-dimensional vector ${\bx}_{t}$. The likelihood of the data, $\by_{t}$, given ${\bx}_{t}$ is Gaussian with
\begin{align*}
\by_{t} | {\bx}_{t} \sim\N(\mathbf{0},\mathbf{B} \mathbf{H}_{t}
\mathbf{B}^{T} + {\bm \Psi})
\end{align*}
We generate data for times $t = 1,\ldots,T = 200$, $M = 5$, $K = 3$, ${\bx}_{0} = \mathbf{0},\psi_{i} = 0.5, i \in\{1,\ldots,M\}$, $\phi_{i} = 0.9, i \in\{1,\ldots,K\}$,
\[
\mathbf{B} = \left(
\begin{array}
[c]{ccc}
1 & 0 & 0\\
0.5 & 1 & 0\\
0.5 & 0.5 & 1\\
0.2 & 0.6 & 0.3\\
0.8 & 0.7 & 0.5
\end{array}
\right) \text{ and } \mathbf{U} = \left(
\begin{array}
[c]{ccc}
0.5 & 0.2 & 0.1\\
0.2 & 0.5 & 0.2\\
0.1 & 0.2 & 0.5
\end{array}
\right)
\]
This is a simple example of a multivariate, non-linear and non-Gaussian continuous state-space model for which particle filters are commonly employed to sample from the posterior $p({\bx}_{0:T}|\by_{1:T})$. Processing times for our code are given in Table \ref{tab:smc}. In Figure \ref{fig:fsv_alpha} we plot the filter means for each component of ${\bx}$ with $\pm1$ sample standard deviations alongside the true values of ${\bx}$ used to generate the observations.
\begin{table}[htp]
\small
\caption{Running time (in seconds) for the Sequential Monte Carlo method for various values of $N$.
}
\label{tab:smc}
\centering
\begin{tabular}{| c | c | c | c | c | c |}
\hline
$N$ & CPU & 8800GT & Speedup & GTX280 & Speedup \\
\hline
8192 & 2.167 & 0.263 & 8 & 0.082 & 26 \\
\hline
16384 & 4.325 & 0.493 & 9 & 0.144 & 30 \\
\hline
32768 & 8.543 & 0.921 & 9 & 0.249 & 34 \\
\hline
65536 & 17.425 & 1.775 & 10 & 0.465 & 37 \\
\hline
131072 & 34.8 & 3.486 & 10 & .929 & 37 \\
\hline
\end{tabular}
\end{table}
\begin{figure}
\caption{Estimated and real values of $\bx$}
\label{fig:fsv_alpha}
\end{figure}
The speedups obtained in this application are considerably less than for mixture model inference problem. This can be explained by lower arithmetic intensity, higher space complexity in each thread and increased resampling rate as compared to the SMC sampler example above. The mixture model likelihood calculation contains a compute-intensive product-sum operation involving 104 values whilst the factor stochastic volatility likelihood consists mainly of matrix operations. In the latter case, the speedup is independent of $T$ but not the dimension of the observations since the amount of memory required per thread increases quadratically in the dimension of each observation. For example, we attained a speedup of 80 on the GTX 280 when running a particle filter for a multivariate stochastic volatility model with $M = K = 2$. The frequency of resampling is an issue with respect to speedup because it can typically only attain around 10 to 20 fold speedup for practical values of $N$, mainly due to the parallel scan operation. This potentially gives rise to tradeoffs in speedup between the transition and weighting steps and the time between resampling steps for some models, since more sophisticated proposal distributions that parallelize less cleanly might reduce the resampling rate. This type of performance, however, still provides considerable speedup and may be more representative of the type of speedup practitioners can expect in general.
\subsection{Floating Point Precision}
For all three algorithms discussed above, we ran identical algorithms with the same random numbers on the CPU using double precision floating point numbers and the resulting estimates of expectations of interest were affected by an order of magnitude less than the Monte Carlo variance of the estimates. The actual paths sampled, of course, were different due to the sensitivity of all of the algorithms to perturbations but this did not affect the ability of the samples to approximate the target distribution.
\section{Discussion}
\label{section:discussion}
The speedup for the population-based MCMC algorithm and the SMC sampler is tremendous. In particular, the evaluation of $p(\by|{\bm \mu})$ for the mixture-modelling application has high arithmetic intensity since it consists of a product-sum operation with 400 Gaussian log-likelihood evaluations involving only 104 values. In fact, because of the low register and memory requirements, so many threads can be run concurrently that SIMD calculation of this likelihood can be sped up by 500 times on the 8800 GT and 800 times on the GTX 280. However, the speedup attained for the standard SMC algorithm may be more representative of the kinds of gains one can expect in most applications with only reasonable arithmetic intensity. Even so, speedups of 10 to 35 make many problems tractable that previously were not by reducing a week's worth of computation to a few hours. For example, estimation of static parameters in continuous state-space models or the use of SMC proposals within MCMC can require thousands of runs, so a speedup of this scale can substantially reduce the computation time of such approaches (see e.g. \cite{pmcmc}). It is worth noting also that we can expect speedups in the vicinity of 500 with SMC if few resampling steps are required and each weighting step has small space complexity and moderate time complexity.
The GTX 280 GPU outperforms the 8800 GT GPU by a factor of about 2 in all situations in which the GPU is used to capacity. This is the case in all but the population-based MCMC algorithm, in which the number of threads is determined by the number of auxiliary distributions. The reason for this is simple: the algorithms presented are register-bound on the inputs given, in that the number of registers required by each thread is the critical quantity that bounds the number of threads that can be run concurrently. The GTX 280 has twice the number of registers per multiprocessor and more than twice the multiprocessors compared to the 8800 GT. Hence, one could expect more speedup on many-core chips with even more registers. In fact, further improvements could be made using multiple cards with large amounts of memory, configurations of which are now available in NVIDIA's Tesla line. These Tesla `personal supercomputers' comprise 3 or more high-performance GPUs, each with 4GB of memory and a CPU with at least as much memory as the GPUs' combined. It is also possible to design algorithms that are memory-bound, though we have not encountered this in the context of Monte Carlo simulation. It is certainly possible that both register and memory limitations can affect the parallelizability of the mentioned algorithms when facing very high-dimensional problems. However, in such cases it is possible that alternative uses of many-core architecture can provide speedup in these situations.
The acceleration of the Monte Carlo methods discussed here have practical benefits not only to computation time but also to energy efficiency. A general purpose CPU allocates extra circuits and hence power to flow control and caching which is unnecessary for the types of computation described here. As such, reasonable decreases in power consumption can be realized by using specialized many-core architectures like SIMD instead.
It should be noted that while we have used CUDA to implement the parallel components of algorithms, the results are not necessarily specific to this framework or to GPUs. It is expected that the many-core processor market will grow and there will be a variety of different devices and architectures to take advantage of. However, the SIMD parallelization technique and the sacrifice of caching and flow control for arithmetic processing is unlikely to disappear, particularly because when it is well-suited to a problem it will nearly always deliver considerable speedup. In addition, GPUs are affordable, off-the-shelf components that can be easily installed on a personal computer. Of particular interest is an emerging framework, the Open Computing Language (OpenCL), which provides a uniform programming environment for developers that enables them to write portable code for a variety of parallel devices, including GPUs and CPUs. For users who would like to see moderate speedup with very little effort, there is work being done to develop libraries that will take existing code and automatically generate code that will run on a GPU. An example of this is the Jacket engine for MATLAB code, created by Accelereyes.
The speedups attainable with many-core architectures have broad implications in the design, analysis and application of SMC and population-based MCMC methods. With respect to SMC, it allows more particles to be used for the same or even less computation time, which can make these samplers viable where they previously were not. When faced with designing a population-based MCMC sampler, the results expectedly show that there is little cost associated with increasing the number of auxiliary distributions until the GPU reaches the critical limit of threads it can run concurrently. After this, there is a doubling in the computation time when the number of chains is doubled. In our application, this does not occur until we have around 4096 auxiliary distributions. One might notice that this number is far larger than the number of processors on the GPU. This is due to the fact that even with many processors, significant speedup can be attained by having a full pipeline of instructions on each processor to hide the relatively slow memory reads and writes. Of course, we can expect this application-specific number to decrease when dealing with higher-dimensional distributions or those whose density evaluations require more registers or memory. Nevertheless, practitioners have more freedom to increase the number of auxiliary distributions to achieve a faster rate of convergence as the computation time associated with each step is not as closely tied to this value as it is on a single-core processor. In both SMC and MCMC, it is also clear from this case-study that it is beneficial for each thread to use as few registers as possible since this determines the number of threads that can be run simultaneously. This may be of interest to the methodology community since it creates a space-time tradeoff that might be exploited in some applications.
A consequence of the space-time tradeoff mentioned above is that methods which require large numbers of registers per thread are not necessarily suitable for parallelization using GPUs. For example, operations on large, dense matrices that are unique to each thread can restrict the number of threads that can run in parallel and hence dramatically affect potential speedup. In cases where data is shared across threads, however, this is not an issue. For example, a mixture model with large amounts of data does not affect the number of registers required whilst increasing the number of components increases the number of registers required only linearly. In contrast, increasing the number of observed assets in a factor stochastic volatility model leads to a quadratic increase in the number of registers required, substantially affecting scalability in this regard. An increase in the number of observations itself has no effect on speedup. In principle, it is not the size of the data that matters but the space complexity of the algorithm in each thread that dictates how scalable the parallelization is.
The parallelization of the advanced Monte Carlo methods described here opens up challenges for both practitioners and for algorithm designers. There are already an abundance of statistical problems that are being solved computationally and technological advances, if taken advantage of by the community, can serve to make previously impractical solutions eminently reasonable and motivate the development of new methods.
\section*{Acknowledgments}
The authors acknowledge support from the Oxford-Man Institute for Quantitative Finance and the Medical Research Council. Anthony Lee is additionally funded by a Clarendon Fund Scholarship and Christopher Yau is funded by a UK Medical Research Council Specialist Training Fellowship in Biomedical Informatics (Ref No. G0701810).
\appendix
\section*{Appendix: Web Resource}
We have created a website resource at \url{http://www.oxford-man.ox.ac.uk/gpuss/} for the statistics community with the code used in these examples as well as useful information on GPU programming for statistical computing with links to tutorials and relevant papers.
\end{document}
|
\begin{equation}gin{document}
\title[]{Multifractal analysis of some multiple ergodic averages}
\author{Ai-Hua FAN, J\"{o}rg SCHMELING and Meng WU}
\date{
}
\address{A. F. Fan: LAMFA, UMR 7352 (ex 6140) CNRS, Universit\'e de Picardie, 33 rue
Saint Leu, 80039 Amiens, France.
E-mail: [email protected]}
\address{J. Scheming: MCMS,
Lund Institute of Technology, Lund University Box 118 SE-221 00
Lund, Sweden.
E-mail: [email protected]}
\address{M. Wu: LAMFA, UMR 7352 (ex 6140) CNRS, Universit\'e de Picardie, 33 rue
Saint Leu, 80039 Amiens, France.
E-mail: [email protected]}
\subjclass{}
\keywords{Multifractal, multiple ergodic average, Hausdorff dimension}
\maketitle
\begin{equation}gin{abstract}
In this paper we study the multiple ergodic averages $$
\frac{1}{n}\sum_{k=1}^n \varphi(x_k, x_{kq}, \cdots, x_{k q^{\ell-1}}),
\qquad (x_n) \in \Sigma_m
$$ on the
symbolic space $\Sigma_m =\{0, 1, \cdots, m-1\}^{\mathbb{N}^*}$ where $m\ge 2, \ell\ge 2, q\ge 2$ are integers. We give
a complete solution to the problem of multifractal analysis of the limit of the above multiple ergodic averages.
Actually we develop a non-invariant
and non-linear version of thermodynamic formalism that is of its own interest. We study a large class of measures (called telescopic measures) and the special case of telescopic measures defined by the fixed points of some non-linear transfer operators plays a crucial role in studying our multiplicatively invariant sets. These measures share many properties with Gibbs measures in the classical thermodynamic formalism. Our work also concerns with variational principle, pressure function and Legendre transform in this new setting.
\end{abstract}
\markboth{Multifractal analysis of some multiple ergodic averages}{Ai-Hua FAN, J\"{o}rg SCHMELING and Meng WU}
\section{Introduction}\label{int}
Let $(X,T)$ be a topological dynamical system where $T$ is a
continuous map on a compact metric space $X$. F\"{u}rstenberg had
initiated the study of the {\em multiple ergodic average}:
\begin{equation}gin{equation}\label{Furstenberg}
\frac{1}{n}\sum_{k=1}^nf_1(T^kx) f_2(T^{2k}x)\cdots f_s(T^{s k}x)
\end{equation}
where $f_1,\cdots ,f_s$ are $s$ continuous functions on $X$ with
$s\geq 2$ when he gave a proof of the existence of arithmetic sequences of arbitrary length amongst
sets of integers with positive density (\cite{Furstenberg}). Later on, the research of such a kind of average has
attributed a lot of attentions (see e.g. \cite{Bergelson,Bour,Assani,HK}).
The authors in \cite{FLM} have recently proposed to analyze such
multiple ergodic averages from the point of view of
multifractal analysis. They have succeeded in a very special case
where $(X,T)$ is the shift dynamics on symbolic space and $f_1,
\cdots f_s$ are Rademacher functions on the symbolic space viewed
as an additive group. It is a challenge to solve the problem in its generality.
In the present paper, we shall consider the problem for the shift dynamics
and for a class of functions $f_1, \cdots, f_s$. The setting is
as follows. Let $S=\{0,\cdots,m-1\}$ be a set of $m$ symbols \
($m\geq 2$). Consider the shift map $T$ on the symbolic space
$X=\Sigma_m=S^\mathbb N$. Fix two integers $q\geq 2$ and $\ell\ge 2$. For
any given $\ell$ continuous functions $g_1, g_2, \cdots, g_\ell$
defined on $X$, we consider the multiple ergodic average
$$A_n(g_1,g_{2}\cdots, g_{\ell})(x)=\frac{1}{n}\sum_{k=1}^n
g_1(T^kx)g_{2}(T^{kq}x)\cdots g_{\ell}(T^{kq^{\ell-1}}x).$$
This is a special case of (\ref{Furstenberg}) with $s=q^{\ell-1}$,
$f_{q^j} =g_{j-1}$ and $f_k =1$ for other $k \not=q^j$. Furthermore
we assume that the functions $f_1,f_{2}\cdots, f_{\ell}$ depend
only on the first coordinate of $x=(x_k)_{k\ge 0} \in \Sigma_m$.
So, under this assumption of $f_j$'s we have
\begin{equation}gin{equation}\label{MEA1}
A_n(g_1,g_{2}\cdots,
g_{\ell})(x)=\frac{1}{n}\sum_{k=1}^n
g_1(x_k)g_{2}(x_{kq})\cdots g_{\ell}(x_{kq^{\ell-1}}).
\end{equation}
For the time being, there is no idea for the multifractal analysis of (\ref{Furstenberg})
in its general form. So we are content with investigating
the special case (\ref{MEA1}). Actually we can do a little more.
Given a function $\varphi: S^\ell \rightarrow \mathbb{R}$ we shall study
\begin{equation}gin{equation}\label{GMEA}
A_n\varphi(x)=\frac{1}{n}\sum_{k=1}^n
\varphi(x_k, x_{qk}, \cdots, x_{q^{\ell-1}}).
\end{equation}
The average in (\ref{MEA1}) corresponds to the special case of (\ref{GMEA}) with $\varphi=g_1\otimes\cdots \otimes g_{\ell}$.
For $\alpha\in \mathbb R$, we define
$$E(\alpha)=\left\{x\in \Sigma_m : \lim_{n\rightarrow\infty}A_n\varphi(x)=\alpha\right\}.$$
Our problem is to determine the Hausdorff dimension of $E(\alpha)$.
The problem is classical when $\ell=1$ and the answers are well
known (see e.g. \cite{FFW,FLP,BSS,Barreira}). Let
$$\alpha_{\min}=\min_{a_1,\cdots,a_{\ell}\in
S }\varphi(a_1,\cdots,a_{\ell}), \quad
\alpha_{\max}=\max_{a_1,\cdots,a_{\ell}\in
S}\varphi(a_1,\cdots,a_{\ell}).$$ We assume that
$\alpha_{\min}<\alpha_{\max}$ (otherwise $\varphi$ is constant and
the problem is trivial).
Let $\mathcal{F}(S^{\ell-1}, \mathbb{R}^+)$ be the cone of
functions defined on $S^{\ell-1}$ taking non-negative real values. For any $s \in
\mathbb{R}$, consider the transfer operator $\mathcal{L}_s$ defined on
$\mathcal{F}(S^{\ell-1}, \mathbb{R}^+)$ by
\begin{equation}gin{equation}\label{transer-operator}
\mathcal{L}_s \psi (a)
= \sum_{j \in S} e^{s \varphi(a, j)}
\psi (Ta, j)
\end{equation}
where $T: \ S^{\ell-1}\rightarrow S^{\ell-2}$ is defined by $T(a_1,\cdots,a_{\ell-1})=(a_2,\cdots,a_{\ell-1})$.
We also consider the non-linear operator $\mathcal{N}_s$ on $\mathcal{F}(S^{\ell-1}, \mathbb{R}^+)$ defined by
$$
\mathcal{N}_s \psi (a)= (\mathcal{L}_s \psi (a))^{1/q}.
$$
We shall
prove that the equation
\begin{equation}gin{equation}\label{transer_equation}
\mathcal{N}_s \psi_s = \psi_s
\end{equation}
admits a unique strictly positive solution $\psi_s=\psi_s^{(\ell -1)} : S^{\ell-1}\rightarrow
\mathbb{R}_+^*$ (see Section \ref{section nonlinear equa}, Theorem \ref{existence-unicity trans-equ}).
The function $\psi_s$ is defined on $S^{\ell-1}$. We extend it on $S^k$ for all $1\le k \le \ell -2$ by induction:
\begin{equation}gin{equation}\label{transer_equation 2}
\psi_s^{(k)} (a)=\left(\sum_{j \in S} \psi_s^{(k+1)} (a, j)\right)^{\frac{1}{q}}, \ \ (a\in S^{k}).
\end{equation}
For simplicity, we will simply write $\psi_s(a)=\psi_s^{(k)}(a)$ for $a\in S^k$ with $1\leq k\leq \ell-1$. So, $a\mapsto \psi_s(a)$ is not only defined on $S^{\ell-1}$ but on $\bigcup_{1\leq k\leq \ell-1}S^k$.
Then we define the pressure function by
\begin{equation}gin{equation}\label{pressure function}
P_{\varphi}(s) = (q-1)q^{\ell-2} \log \sum_{j\in S}\psi_s(j).
\end{equation}
Throughout this paper, $\log$ means the natural logarithm.
We will prove that $P_{\varphi}(s)$ is an analytic convex function of $s\in
\mathbb{R}$ and even strictly convex since $\alpha_{\min}
<\alpha_{\max}$. The Legendre transform of $P_{\varphi}$ is defined
as $$ P^*_{\varphi}(\alpha)=\inf_{s\in \mathbb{R}}(-s\alpha+P_{\varphi}(s)).
$$
We denote by $L_{\varphi}$ the set of $\alpha\in \mathbb R$ such that
$E(\alpha)\neq \emptyset$. One of the main results of the paper is stated as follows.
\begin{equation}gin{theorem}\label{thm principal}
We have
$$L_{\varphi}=[P'_{\varphi}(-\infty),P'_{\varphi}(+\infty)].$$
If
$\alpha =P'_{\varphi}(s_\alpha)$ for some $s_\alpha \in
\mathbb R\cup\{-\infty,+\infty\}$, then $E(\alpha)\neq \emptyset$ and the
Hausdorff dimension of $E(\alpha)$ is equal to
$$\dim_H E(\alpha)=\frac{P_{\varphi}^*(\alpha)}{q^{\ell-1}\log m}.$$
\end{theorem}
This result was announced for $\ell=2$ in \cite{FSW}.
It is obvious that $L_{\varphi}\subset[\alpha_{\min},\alpha_{\max}]$. In general, this inclusion is strict. In fact, we have the following criterion for $L_{\varphi}=[\alpha_{\min},\alpha_{\max}]$.
\begin{equation}gin{theorem}\label{critere cercle}
We have the equality $$P'_{\varphi}(-\infty)=\alpha_{\min}$$ if and only if there exist an $x=(x_i)_{i=1}^\infty\in \Sigma_m$ such that $$\forall k\geq 1,\ \varphi(x_k,x_{k+1},\cdots,x_{k+\ell-1})=\alpha_{\min}.$$ We have analogue criterion for
$P'_{\varphi}(+\infty)=\alpha_{\max}.$
\end{theorem}
Let us look at the definition of
$$A_n\varphi(x) =\frac{1}{n}\sum_{k=1}^n
\varphi(x_k, x_{kq}, \cdots, x_{k q^{\ell-1}}).
$$
One of the key points in our study of the problem is the observation that the coordinates $x_1,\cdots , x_n,\cdots $ of $x$ appearing in the definition of $A_n\varphi(x)$ share the following independence. This observation was first exploited in \cite{FLM} in order to compute the Box dimension of some subset of $E(\alpha_{\min})$. Consider the following partition of $\mathbb N^*$:
$$
\mathbb N^*=\bigsqcup_{i\geq 1,q\nmid i}\Lambda_i\ \ {\rm with}\ \Lambda_i=\{iq^j\}_{j\ge 0}.
$$
Observe that if $k=iq^j$ with $q\nmid i$, then $\varphi(x_k,x_{kq},\cdots ,x_{kq^{\ell-1}})$ depends only on $x_{|_{\Lambda_i}}$, the restriction of $x$ on $\Lambda_i$. So the summands in the definition of $A_n\varphi(x)$ can be put into different groups, each of which depends on one restriction $x_{|_{\Lambda_i}}$. For this reason, we decompose $\Sigma_m$ as follows:
$$
\Sigma_m=\prod_{i\geq1,q\nmid i}S^{\Lambda_i}.
$$
Let $\mu$ be a probability measure on $\Sigma_m$. Notice that $S^{\Lambda_i}$ is nothing but a copy of $\Sigma_m$.
We consider $\mu$ as a measure on $S^{\Lambda_i}$ for every $i$ with $q\nmid i$. Then we define the infinite product measure $\mathbb P_\mu$ on $\prod_{i\geq1,q\nmid i}S^{\Lambda_i}$ of the copies of $\mu$. More precisely, for any word $u$ of length $n$ we define
$$
\mathbb P_{\mu}([u])=\prod_{i\leq n,q\nmid i}\mu([u_{|_{\Lambda_i}}]),
$$
where $[u]$ denotes the cylinder of all sequences starting with $u$. Then $\mathbb P_{\mu}$ is a probability measure on $\Sigma_m$ and we call it a {\em telescopic product measure}. Kenyon, Peres and Solomyak \cite{KPS,KPS1} used this kind of measures to compute the Hausdorff dimension of sets like $\{x=(x_n)_{n\ge 1}\in \Sigma_2: \forall k\ge 1, x_kx_{2k}=0\}$
which was proposed in \cite{FLM}.
A class of measures $\mathbb P_\mu$ will play the same role as Gibbs measures played in the study of simple ergodic averages ($\ell=1$).
Concerning the dimension of $\mathbb P_\mu$ (see \cite{Fan1994} for the dimension of a measure), we have the following result
which is one of the main ingredients of the proof of the main result (Theorem \ref{thm principal}) and which has its own interest. A measure $\nu$ on $\Sigma_m$ is said to be exact if there exists an $\alpha\in \mathbb R$ such that
$$ \lim_{n\rightarrow \infty}\frac{\log_m\nu ([x_{|_n}])}{n}=\alpha,\ \nu{\rm -a. e.}$$ This value $\alpha$ is the dimension of $\nu$.
\begin{equation}gin{theorem}\label{dim-meas} For any given measure $\mu$,
the telescopic product measure $\mathbb{P}_\mu$ is exact and its dimension is equal to
$$\dim_{H}\mathbb P_{\mu}=\frac{(q-1)^2}{\log m}\sum_{k=1}^{\infty}\frac{H_k(\mu)}{q^{k+1}}$$
where
$$H_k(\mu)=-\sum_{a_1,\cdots,a_k\in S}\mu([a_1\cdots a_k])\log \mu([a_1\cdots a_k]).
$$
\end{theorem}
A similar formula for some special $\mathbb P_{\mu}$ has appeared in \cite{KPS1}.
Another ingredient of the proof of Theorem \ref{thm principal} is a law of large numbers relative to the probability
$\mathbb P_\mu$.
We consider $(\prod_{i\geq1,q\nmid i}S^{\Lambda_i},\mathbb P_\mu)$ as a probability space $(\Omega,\mathbb P_\mu)$. Let $(F_k)_{k\ge 1}$ be a sequence of functions defined on $\Sigma_m$. For each $k$, there exists a unique integer $i(k)$ such that $k=i(k)q^j$ and $q\nmid i$. Then
$$
x \mapsto F_k(x_{|_{\Lambda_{i(k)}}})
$$
defines a random variable on $\Omega$. Concerning the sequence of random variables $\left\{F_k(x_{|_{\Lambda_{i(k)}}})\right\}$, we have the following law of large numbers.
\begin{equation}gin{theorem}\label{LLN}
Let $(F_k)_{k\ge 1}$ be a sequence of functions defined on $\Sigma_m$. Suppose that there exist \ $C>0$ and $0<\eta<q^{3/2}$ such that for any $i\geq 1$ with $q\nmid i$, any $j_1,j_2\in \mathbb N$, we have
\begin{equation}gin{equation}\label{condition2}
{\rm cov}_{\mu}\left(F_{iq^{j_1}}(x), F_{iq^{j_2}}(x)\right)\leq C\eta^{\frac{j_1+j_2}{2}}.
\end{equation}
Then for $\mathbb P_{\mu}-$a.e. $x\in \Sigma_{m}$
$$\lim_{n\rightarrow\infty}\frac{1}{n}\sum_{k=1}^{n}\left(F_k(x_{|_{\Lambda_{i(k)}}})-\mathbb E_{\mu}F_k(x)\right)=0. $$
\end{theorem}
We observe that the set $E(\alpha)$ is not invariant. So it is not a standard set studied
from the classical dynamical system point of view. Actually, as we shall see, in general the dimension of the set $E(\alpha)$ can not be
described by invariant measures supported on it. This is confirmed by the following result.
Given two real valued functions $f_1$ and $f_2$ defined on $\Sigma_m$. For $\alpha\in \mathbb{R}$, let $E(\alpha)$ be
the set of all points $x$ such that
$$
\lim_{n \rightarrow \infty}\frac{1}{n}\sum_{k=1}^n f_1(T^k x) f_2(T^{2k}x) = \alpha.
$$
We describe the size of the invariant part of $E(\alpha)$ by
$$
F_{\rm inv}(\alpha) = \sup \left\{\dim \mu: \mu \ \mbox{\rm ergodic}, \mu(E(\alpha))=1\ \right\}.
$$
\begin{equation}gin{theorem}\label{invariant} Let $f_1$ and $f_2$ be two H\"{o}lder continuous functions on $\Sigma_m$.
If $E(\alpha)$ supports an ergodic measure, then
$$
F_{\rm inv}(\alpha)
= \sup \left\{\dim \mu: \mu \ \mbox{\rm ergodic}, \int f_1 d\mu \int f_2 d\mu = \alpha \ \right\}.
$$
\end{theorem}
It is interesting to compare this result with the level sets of $V$-statistics studied in \cite{FSW_V}.
We return to the above theorem. A remarkable corollary is that when $f_1=f_2$, we must have $\alpha\ge 0$
if $E(\alpha)$ supports an ergodic measure, or even an invariant measure (using Jacobs' entropy decomposition).
Therefore, it is possible that for some $\alpha<0$, $E(\alpha)$ has strictly positive Hausdorff dimension but it doesn't carry any invariant measure.
The paper is organized as follows. In Section 2, we first construct a class of measures, called telescopic product measures, part of which will play the same role
as Gibbs measures played in the classical theory. This construction is inspired by Kenyon-Peres-Solomyak \cite{KPS} (also see \cite{KPS1}). Then we establish a law of large numbers relative to such a telescopic product measure.
Telescopic product measures constitute a new object of study. In Section 3, we prove that any telescopic product measure is exact and we obtain
a formula for its dimension. In Section 4, we study a non--linear transfer operator and we prove the existence and the uniqueness of its positive solution.
We also prove the analyticity and the convexity of the solution as a function of its parameter $s$. Each solution defines a Markov measure associated to which is a telescopic product measure. The last measure plays the role of a Gibbs measure in our study of $E(\alpha)$.
Section 5 is devoted to the properties of the pressure function: a Ruelle type formula says that the limit in the law of large numbers is the derivative
of the pressure; the pressure function is an analytic and strictly convex function (except the trivial case); the extreme values of the derivative of the
pressure are studied. In Section 6, we establish the Gibbs property of the telescopic product measures defined by the solution of the non--linear transfer operator. After all these preparations, many of which have their own interests, we prove the main theorem (Theorem~\ref{thm principal}) in Section 7.
In Section 8, we discuss the invariant part of $E(\alpha)$.
Some concrete examples are presented in Section 9.
In the final section, we make some remarks and present some unsolved problems.
{\em Acknowledgement:} The authors would like to thank B. Host for his interests in the work and especially for his remarks, some of which are contained
in Section 8.
\section{Telescopic product measures and LLN \label{section2}}
In this section, we will study telescopic product measures and establish a law of large numbers (LLN).
These measures, which take into account the multiplicative structure of the multiple ergodic averages $A_n\varphi(x)$, will play
the same role as Gibbs measures played in the study of simple ergodic averages. In the next section, we will prove that $\mathbb{P}_\mu$ is
exact and its dimension is equal to
$$\dim_{H}\mathbb P_{\mu}=\frac{(q-1)^2}{\log m}\sum_{k=1}^{\infty}\frac{H_k(\mu)}{q^{k+1}}$$
where
$$H_k(\mu)=-\sum_{a_1,\cdots,a_k\in S}\mu([a_1\cdots a_k])\log \mu([a_1\cdots a_k]).
$$
We could call $H_k$ the $k$-th entropy of $\mu$. But we should point out that $\mu$
is not assumed to be invariant and that $\mathbb{P}_\mu$ is not invariant either.
\subsection{Telescopic product measures}
Let us recall the definition of the telescopic product measure $\mathbb P_\mu$. Consider the following partition of $\mathbb N^*$:
$$
\mathbb N^*=\bigsqcup_{i\geq 1,q\nmid i}\Lambda_i\ \ {\rm with}\ \Lambda_i=\{iq^j\}_{j\ge 0}.
$$
Then we decompose $\Sigma_m$ as follows:
$$
\Sigma_m=\prod_{i\geq1,q\nmid i}S^{\Lambda_i}.
$$
Let $\mu$ be a probability measure on $\Sigma_m$.
We consider $\mu$ as a measure on $S^{\Lambda_i}$, which is identified with $\Sigma_m$, for every $i$ with $q\nmid i$. Then we define the infinite product measure $\mathbb P_\mu$ on $\prod_{i\geq1,q\nmid i}S^{\Lambda_i}$ of the copies of $\mu$. More precisely, for any word $u$ of length $n$ we define
$$
\mathbb P_{\mu}([u])=\prod_{i\leq n,q\nmid i}\mu([u_{|_{\Lambda_i}}]),
$$
where $[u]$ denotes the cylinder of all sequences starting with $u$.
We consider $(\Sigma_{m},\mathbb P_{\mu})$ as a probability space. Let $X_{k}(x)=x_{k}$ be the $k$-th coordinate projection. For each $i$ with $q\nmid i$, consider the process $Y^{(i)}=(X_k)_{k\in \Lambda_i}$. Then, by the definition of $\mathbb P_{\mu}$, the following fact is obvious.
\begin{equation}gin{lemma}\label{prop ind}
The processes $Y^{(i)}=(X_k)_{k\in \Lambda_i}$ for different $i\geq 1 $ with $q\nmid i$ are $\mathbb P_{\mu}$-independent and identically distributed with
$\mu$ as the common probability law.
\end{lemma}
As we shall see, the behaviour of $A_n\varphi(x)$ as $n \rightarrow \infty$ will be described by measures $\mathbb P_{\mu}$ with particular choices of $\mu$. It is natural that $\mathbb P_\mu$ strongly depends on the above partition of $\mathbb N^*$. The following is a detail of the partition which will be useful.
Fix $n\in \mathbb N^*$. Let $$\Lambda_i(n)=\Lambda_i\cap \{1,\cdots,n\}. $$
We are going to examine the cardinality $\sharp\Lambda_i(n)$, called the length of $\Lambda_i(n)$ and the number $N(n,q,k)$ of $\Lambda_i(n)$'s of a given length $k$.
\begin{equation}gin{lemma}\label{lem dec}\ Let $k, n \in \mathbb N^*$.\\
\indent {\rm
(1)} $\sharp \Lambda_i(n)=k$ if and only if $\frac{n}{q^k}<i\leq \frac{n}{q^{k-1}}.$ Consequently we have $$\sharp \Lambda_i(n)=\left\lfloor\log_q\frac{n}{i}\right\rfloor.$$ \\
\indent {\rm (2)} We have the partition
$$\{1,\cdots,n\}=\bigsqcup_{k=1}^{\lfloor \log_q
n\rfloor}\ \ \bigsqcup_{\frac{n}{q^k} <i\leq \frac{n}{q^{k-1}}, q\nmid
i}\Lambda_i(n).$$
\indent {\rm (3)} $N(n,q,k)$ is the number of $i$'s such that $q\nmid i$ and $\frac{n}{q^k} <i\leq \frac{n}{q^{k-1}}$.
We have
$$ \left| \frac{N(n,q,k)}{n}-\frac{(q-1)^2}{q^{k+1}}\right| \le \frac{4}{n}.$$
\end{lemma}
\begin{equation}gin{proof}
(1) It is simply because $\sharp \Lambda_i(n)=k$ means that $$\Lambda_{i}(n)=\{i,iq,\cdots,iq^{k-1}\}\ { \rm with }\ iq^{k-1}\leq n<iq^{k}.$$\\
(2) We have the obvious partition $$\{1,\cdots , n\}=\bigsqcup_{i\leq n,q\nmid i}\Lambda_{i}(n).$$ Then we collect $\Lambda_i(n)$ by their lengths. By (1), we have $1\leq\sharp\Lambda_{i}(n)\leq \left\lfloor\log_{q}n\right\rfloor$ and
$$\{1,\cdots , n\}=\bigsqcup_{k=1}^{\left\lfloor\log_{q}n\right\rfloor}\bigsqcup_{\substack{i<n,q\nmid i, \\
\sharp\Lambda_{i}(n)= k}}\Lambda_{i}(n).$$
(3) By (1), $N(n, q, k)$ is obviously the numbers of $i$ such that $\frac{n}{q^k}< i \le \frac{n}{q^{k-1}}$ and $q\nmid i$.
It is the number of $i$'s such that $\frac{n}{q^k}< i \le \frac{n}{q^{k-1}}$ minus the $i$'s such that $\frac{n}{q^k}< i \le \frac{n}{q^{k-1}}$
and $q\mid i$,
i.e.
$$
N(n,q,k) = \left( \left\lceil \frac{n}{q^{k-1}}\right\rceil - \left\lceil \frac{n}{q^{k}}\right\rceil\right)
- \left( \left\lceil \frac{n}{q^{k}}\right\rceil - \left\lceil \frac{n}{q^{k+1}}\right\rceil\right).
$$
It follows that
$$
\left| N(n,q,k)- \left(\frac{n}{q^{k-1}}- \frac{2n}{q^k} + \frac{n}{q^{k+1}}\right)\right| \le 4. $$
It is the desired estimate for $\frac{1}{q^{k-1}}-\frac{2}{q^k} +\frac{1}{q^{k+1}} = \frac{(q-1)^2}{q^{k+1}}$.
\end{proof}
Now we consider $(\prod_{i\geq1,q\nmid i}S^{\Lambda_i},\mathbb P_\mu)$ as a probability space $(\Omega,\mathbb P_\mu)$. Let $(F_k)_{k\ge 1}$ be a sequence of functions defined on $\Sigma_m$. For each $k$, there exists a unique integer $i(k)$ such that $k=i(k)q^j$ and $q\nmid i$. Then $x \mapsto F_k(x_{|_{\Lambda_{i(k)}}})$
defines a random variable on $\Omega$. Later,
we will study the law of large numbers for the sequence of variables $\{F_k(x_{|_{\Lambda_{i(k)}}})\}_{k\ge 1}$.
Notice that if $i(k)\not=i(k')$, then the two variables $F_k(x_{|_{\Lambda_{i(k)}}})$ and $F_{k'}(x_{|_{\Lambda_{i(k')}}})$
are independent. But if $i(k)=i(k')$, they are not independent in general.
In order to prove the law of large numbers, we will need the following technical lemma which allows us to compute the expectation of the product of $F_k(x_{|_{\Lambda_{i(k)}}})$'s. The proof of the lemma is based on the independence of $x_{|_{\Lambda_i}}$'s.
\begin{equation}gin{lemma} \label{LLN lemma 0}
Let $(F_k)_{k\geq 1}$ be a sequence of functions defined on $\Sigma_m$. Then for any integer $N\geq 1$, we have
$$\mathbb E_{\mathbb P_\mu}\left(\prod_{k=1}^N F_k(x_{|_{\Lambda_{i(k)}}})\right)=\prod_{k=1}^{\lfloor\log_q N\rfloor}\prod_{\frac{N}{q^k} <i\leq \frac{N}{q^{k-1}}, q\nmid i}\mathbb E_{\mu}\left(\prod_{h=0}^{k-1}F_{iq^h}(x)\right).$$
In particular,
for any function $G$ defined on $\Sigma_m$, for any $n\geq 1$,
$$\mathbb E_{\mathbb P_\mu}G(x_{|_{\Lambda_{i(n)}}})=\mathbb E_{\mu}G(\cdot).$$
\end{lemma}
\begin{equation}gin{proof}
Let $$Q_{N}(x)=\prod_{k=1}^NF_k(x_{|_{\Lambda_{i(k)}}}), \ \ Q_{N,i}(x)=\prod_{k\in \Lambda_{i}(N)}F_k(x_{|_{\Lambda_i}}).$$
Since the variables $x_{|_{\Lambda_{i}}}$ for different $i\geq1 $ with $q\nmid i$ are independent under $\mathbb P_{\mu}$ (by Lemma \ref{prop ind}), we have
\begin{equation}gin{equation}\label{1}
\mathbb E_{\mathbb P_{\mu}}Q_{N}=\prod_{i\leq N,q\nmid i}\mathbb E_{\mathbb P_{\mu}}Q_{N,i}.
\end{equation}
Then, by (2) of Lemma \ref{lem dec}, we can rewrite the right hand side in (\ref{1}) to get
$$\mathbb E_{\mathbb P_{\mu}}Q_{N}=\prod_{k=1}^{\lfloor\log_q N\rfloor}\prod_{\frac{N}{q^k} <i\leq \frac{N}{q^{k-1}}} \mathbb E_{\mathbb P_{\mu}}Q_{N,i}.$$
However, the marginal measures on $S^{\Lambda_i}$ of $\mathbb P_{\mu}$ is equal to $\mu$ and $\Lambda_i(N)=\{i,iq,\cdots,iq^{k-1}\}$ if $\frac{N}{q^k} <i\leq \frac{N}{q^{k-1}}$. So
$$\mathbb E_{\mathbb P_{\mu}}Q_{N,i}=\mathbb E_{\mu}\left(\prod_{h=0}^{k-1}F_{iq^h}(x)\right).$$
Now, for any function $G$ defined on $\Sigma_m$ and any $n\in \mathbb N^*$, if we set $F_n=G$ and $F_k=1$ for $k\neq n$ we have
$$\mathbb E_{\mathbb P_\mu}G(x_{|_{\Lambda_{i(n)}}})=\mathbb E_{\mu}G(x).$$
\end{proof}
\subsection{Law of large numbers}
In order to prove the law of large numbers (LLN), we need the following result.
Recall that the covariance of two bounded functions $f,g$ with respect to $\mu$ is defined by
$${\rm cov}_\mu(f,g)=\mathbb E_{\mu}\left[(f-\mathbb E_\mu f)(g-\mathbb E_\mu g)\right]$$
\begin{equation}gin{proposition}\label{lemma LLN}
Let $(F_k)_{k\ge 1}$ be a sequence of functions defined on $\Sigma_m$ satisfying
\begin{equation}gin{equation}\label{condition1}
{\rm cov}_\mu \left(F_{iq^{j_1}}(x)F_{iq^{j_2}}(x)\right)\leq C\eta^{\frac{j_1+j_2}{2}}
\end{equation}
for some constants $C>0$ and $0<\eta<q^{3/2}$ and for all $i \ge 1$ with $q\nmid i$ and all $j_1, j_2 \in \mathbb{N}$.
Let $p_0, p_1$ and $p_2$ be three maps from $\mathbb N^*$ into $\mathbb N^*$ such that
\begin{equation}gin{equation}\label{condition3}
\forall n\in \mathbb N^*, \ \ 1\le \frac{p_2(n)}{p_1(n)}\leq \alpha; \quad
\sum_{n=1}^\infty \frac{p_2(n)^{\frac{3}{2}-\epsilon}}{p_0(n)^2}<+\infty.
\end{equation}
for some $\alpha>1$ and some $0<\epsilon<1/2$ with $q^{3/2-\epsilon}>\eta$.
Then for $\mathbb P_{\mu}-$a.e. $x\in \Sigma_{m}$
$$\lim_{n\rightarrow\infty}\frac{1}{p_0(n)}\sum_{k=p_1(n)}^{p_2(n)}\left(F_k(x_{|_{\Lambda_{i(k)}}})-\mathbb E_{\mu}F_k(x)\right)=0. $$
\end{proposition}
\begin{equation}gin{proof} Without loss of generality, we can assume that $\mathbb E_{\mathbb P_\mu}F_k(x_{|_{\Lambda_{i(k)}}})=0$ for all $k\in \mathbb N^*$. Otherwise, we replace $F_k(x_{|_{\Lambda_{i(k)}}})$ by $F_k(x_{|_{\Lambda_{i(k)}}})-\mathbb E_{\mathbb P_\mu}F_k(x_{|_{\Lambda_{i(k)}}})$.
We denote
$$
Z_n=\frac{1}{p_0(n)} \sum_{k=p_1(n)}^{p_2(n)} Y_k \quad {\rm with}\ \ Y_k=F_k(x_{|_{\Lambda_{i(k)}}}).
$$
We have only to show that
$$
\sum_{n=1}^{\infty}\mathbb E_{\mathbb P_\mu}Z_{n}^2<+\infty.
$$
Notice that $$
\mathbb E_{\mathbb P_\mu}Z_{n}^2=\frac{1}{p_0^2(n)}
\sum_{p_1(n)\leq
u,v\leq p_2(n)}\mathbb E_{\mathbb P_\mu} Y_uY _v.
$$
Observe that by Lemma \ref{prop ind}, $\mathbb E_{\mathbb P_\mu}Y_uY_v\neq 0$ only if $i(u)=i(v)$, in other words only if
$u$ and $v$ are in the same set $\Lambda_i$. So
\begin{equation}gin{equation}\label{LLN lemma 1}
\mathbb E_{\mathbb P_\mu}Z_{n}^2=\frac{1}{p_0^2(n)} \sum_{\substack{i\geq1,q\nmid i, \\
\Lambda_i\cap[p_1(n),p_2(n)]\neq \emptyset}}\sum_{u,v\in\Lambda_i\cap[p_1(n),p_2(n)]}\mathbb E_{\mathbb P_\mu} Y_uY_v.
\end{equation}
However by the hypothesis (\ref{condition1}) on the sequence $(F_k)_{k\ge 1}$, for any $u,v\in\Lambda_i\cap[p_1(n),p_2(n)]$ we have
$$\left|\mathbb E_{\mathbb P_\mu} Y_uY_v\right|=\left|\mathbb E_{\mu}F_u(x)F_v(x)\right|\leq C\eta^{\log_q \frac{p_2(n)}{i}}.$$
Substituting the last estimate into (\ref{LLN lemma 1}), we get
\begin{equation}gin{equation}\label{LLN lemma 2}
\mathbb E_{\mathbb P_\mu}Z_{n}^2\leq\frac{C}{p_0^2(n)}\sum_{\substack{i\geq1,q\nmid i, \\
\Lambda_i\cap[p_1(n),p_2(n)]\neq \emptyset}}\eta^{\log_q \frac{p_2(n)}{i}}\sharp \left(\Lambda_i\cap[p_1(n),p_2(n)]\right).
\end{equation}
The cardinality $\sharp \left(\Lambda_i\cap[p_1(n),p_2(n)]\right)$ is estimated as follows:
\begin{equation}gin{equation}\label{LLN lemma 3}
\sharp \left(\Lambda_i\cap[p_1(n),p_2(n)]\right)\leq 1+\log_q\alpha.
\end{equation}
In fact, assume that $$\Lambda_i\cap[p_1(n),p_2(n)]=\{a_1,\cdots,a_k\}$$
with $a_1<\cdots <a_k$.
Then by the definition of $\Lambda_i$, we must have $\frac{a_{j+1}}{a_j}\geq q$ for $1\leq j\leq k-1$ so that $$\frac{a_k}{a_1}\geq q^{k-1}.$$ On the other hand,
$$\frac{a_k}{a_1}\leq \frac{p_2(n)}{p_1(n)}\leq \alpha. $$
So $q^{k-1}\leq \alpha$, i.e. $k \le 1+\log_q\alpha$.
Substituting (\ref{LLN lemma 3}) into (\ref{LLN lemma 2}), we get
\begin{equation}gin{equation}\label{LLN lemma 4}
\mathbb E_{\mathbb P_\mu}Z_{n}^2\leq\frac{C(1+\log_q\alpha)^+}{p_0^2(n)}\sum_{\substack{i\geq1,q\nmid i, \\
\Lambda_i\cap[p_1(n),p_2(n)]\neq \emptyset}}\eta^{\log_q \frac{p_2(n)}{i}}.
\end{equation}
There are at most $p_2(n)-p_1(n)$ integers $i$ such that $i\geq1,q\nmid i$ and $\Lambda_i\cap[p_1(n),p_2(n)]\neq \emptyset$.
If they are increasingly ordered, then the $j$-th is bigger than $j$. We deduce that
$$\sum_{\substack{i\geq1,q\nmid i, \\
\Lambda_i\cap[p_1(n),p_2(n)]\neq \emptyset}}\eta^{\log_q \frac{p_2(n)}{i}}
\leq \sum_{j=1}^{p_2(n)-p_1(n)}\eta^{\log_q \frac{p_2(n)}{j}} \le \sum_{j=1}^{p_2(n)-p_1(n)}\left(\frac{p_2(n)}{j}\right)^{\frac{3}{2}-\epsilon},
$$
where the last inequality is due to the fact that $\log_q \eta <3/2 -\epsilon$.
Since $\epsilon<1/2$, we have $\sum_{j=1}^\infty j^{-(3/2-\epsilon)}<\infty$.
Then
$$ \mathbb E_{\mathbb P_\mu}Z_{n}^2\le C \frac{p_2(n)^{3/2-\epsilon}}{p_0(n)^2}.$$
We conclude by the hypothesis which says that the right hand side of the above estimate is the general term of a
convergent series.
\end{proof}
The following is the LLN which will be useful for our computation of the dimension of the telescopic
product measure $\mathbb{P}_\mu$.
\begin{equation}gin{theorem}\label{LLN}
Let $(F_k)_{k\ge 1}$ be a sequence of functions defined on $\Sigma_m$. Suppose that there exist \ $C>0$ and $0<\eta<q^{3/2}$ such that for any $i\geq 1$ with $q\nmid i$, any $j_1,j_2\in \mathbb N$,
\begin{equation}gin{equation}\label{condition2}
{\rm cov}_{\mu}\left(F_{iq^{j_1}}(x), F_{iq^{j_2}}(x)\right)\leq C\eta^{\frac{j_1+j_2}{2}}.
\end{equation}
Then for $\mathbb P_{\mu}-$a.e. $x\in \Sigma_{m}$
$$\lim_{n\rightarrow\infty}\frac{1}{n}\sum_{k=1}^{n}\left(F_k(x_{|_{\Lambda_{i(k)}}})-\mathbb E_{\mu}F_k(x)\right)=0. $$
\end{theorem}
\begin{equation}gin{proof}
Without loss of generality, we can assume that $\mathbb E_{\mathbb P_\mu}F_k(x_{|_{\Lambda_{i(k)}}})=0$ for all $k\in \mathbb N^*$.
Our aim is to prove $\lim_{n\rightarrow\infty}Y_{n}=0$ $\mathbb P_{\mu}$-a.e., where
$$Y_{n}=\frac{1}{n}\sum_{k=1}^n X_k \ \ \ {\rm with} \ \ \ X_k= F_{k}(x|_{\Lambda_{i(k)}}).$$
First we claim that it suffices to show
\begin{equation}gin{equation}\label{LLN 1}
\lim_{n\rightarrow\infty}Y_{n^2}=0, \ \ \mathbb P_{\mu}- {\rm a.e. }
\end{equation}
In fact, for every $n\in \mathbb N $ there exists a unique $k\in \mathbb N$ such that $k^2\leq n<(k+1)^2$. Then we have
$$
\left|Y_{n}\right|\leq \left|Y_{k^2}\right|+\frac{\left(|X_{k^2+1}|+\cdots+|X_{n}|+\cdots+|X_{(k+1)^2}|\right)}{k^2}.
$$
So, since $Y_{k^2} \rightarrow 0$ $\mathbb{P}_\mu$-a.e.,
we have only to show
\begin{equation}gin{equation}\label{LLN 3}
\lim_{k\rightarrow\infty}\frac{\left(|X_{k^2+1}|+\cdots+|X_{n}|+\cdots+|X_{(k+1)^2}|\right)}{k^2}=0, \ \ \mathbb P_{\mu}- {\rm a.e. }
\end{equation}
Let $p_0, p_1$ and $p_2$ be the three maps from $\mathbb N^*$ to $\mathbb N^*$ defined as follows:
$$p_0(k)=p_1(k)=k^2,\ \ p_2(k)=(k+1)^2 \ { \rm for }\ k\in \mathbb N^*.$$
Then observe that $$1\le \frac{p_2(k)}{p_1(k)}= \frac{(k+1)^2}{k^2}\leq 4 \ \ \forall k\in \mathbb N^*$$
$$\sum_{k=2}^\infty \frac{(p_2(k)^{\frac{3}{2}-\epsilon})}{p_0^2(n)}\leq\sum_{k=2}^\infty \frac{((k+1)^2)^{\frac{3}{2}-\epsilon}}{k^4}<+\infty.$$
Thus we have verified that the maps $p_0, p_1$ and $p_2$ satisfy the hypothesis of Lemma \ref{lemma LLN}. Then (\ref{LLN 3}) is assured by Proposition \ref{lemma LLN}.
Now we are going to show
\begin{equation}gin{equation}\label{LLN 4}
\sum_{n=1}^{\infty}\mathbb E_{\mathbb P_\mu}Y_{n^2}^2<+\infty,
\end{equation}
which will imply (\ref{LLN 1}). Notice that
$$
\mathbb E_{\mathbb P_\mu}Y_{n}^2=\frac{1}{n^2}\sum_{1\leq
u,v\leq n}E_{\mathbb P_\mu}X_u X_v.
$$
By Lemma \ref{prop ind}, we have $\mathbb{E}_{\mathbb P_\mu}X_u X_v\neq 0$ only if $i(u)=i(v)$. So
$$
\mathbb E_{\mathbb P_\mu}Y_{n}^2=\sum_{i\leq n,q\nmid i}\ \sum_{u,v\in \Lambda_i(n)}\mathbb{E}_{\mathbb P_\mu}X_u X_v.
$$
By (2) of Lemma \ref{lem dec}, we can rewrite the above sum as
\begin{equation}gin{equation}\label{LLN 5}
\mathbb E_{\mathbb P_\mu}Y_{n}^2=\sum_{k=1}^{\lfloor\log_q n\rfloor}\sum_{\substack{\frac{n}{q^k} <i\leq \frac{n}{q^{k-1}} \\
q\nmid i}} \sum_{u,v\in \Lambda_i(n)}\mathbb E_{\mathbb P_\mu} X_u X_v.
\end{equation}
Recall that $\mathbb E_{\mathbb P_\mu}X_k =\mathbb E_{\mu}F_k$ for all $k\in\mathbb N^*$ (Lemma \ref{LLN lemma 0}).
For $u,v\in \Lambda_i(n)$, we write $u=iq^{j_1}$ and $v=iq^{j_2}$ with $0\leq j_1,j_2\leq \sharp\Lambda_i(n)$. By the Cauchy-Schwarz inequality and the hypothesis (\ref{condition2}), we obtain
$$\left|\mathbb E_{\mathbb{P}_\mu} X_uX_v\right|\leq \sqrt{\mathbb E_{\mu} F_u^2} \sqrt{\mathbb E_{\mu} F_v^2}\leq C\eta^{\sharp \Lambda_i(n)}.$$
This estimate holds for all $u,v\in\Lambda_i(n)$. So
$$\sum_{u,v\in \Lambda_i(n)}|\mathbb{E}_{\mathbb P_\mu}X_u X_v |\leq C\left(\sharp\Lambda_i(n)\right)^2\eta^{\sharp \Lambda_i(n)}.$$
Substituting this estimate into (\ref{LLN 5}) and using (1) of Lemma \ref{lem dec}, we get
$$
\left|\mathbb E_{\mathbb P_\mu}Y_n^2\right|\leq \frac{C}{n^2}\sum_{k=1}^{\lfloor\log_q n\rfloor}\sum_{\substack{\frac{n}{q^k} <i\leq \frac{n}{q^{k-1}} \\ q\nmid i}}k^2\eta^{k} =\frac{C}{n^2}\sum_{k=1}^{\lfloor\log_q n\rfloor}k^2\eta^{k}N(n,q,k),
$$
where $N(n,q,k)$ appeared in Lemma \ref{lem dec}. Then by (3) of Lemma \ref{lem dec}, the last term is equivalent to
$$\frac{C(q-1)^2}{n}\sum_{k=1}^{\lfloor\log_q n\rfloor}\frac{k^2 \eta^{k}}{q^{k+1}}= O\left(\frac{1}{n}\left(\frac{\eta}{q}\right)^{\log_q n}\right)
=O\left(n^{-1/2-\epsilon}\right)$$
for some $\epsilon>0$. This implies (\ref{LLN 4}).
\end{proof}
\subsection{A special LLN}
When, in the LLN (Theorem \ref{LLN}), the functions $(F_i)_i$ are all the same function $F$, then we have the following special LLN.
\begin{equation}gin{theorem}\label{thm esperence general formula}
Let $\mu$ be any probability measure $\mu$ on $\Sigma_m$ and let
$F\in \mathcal{F}(S^{\ell})$. For $\mathbb P_{\mu}$ a.e. $x\in \Sigma_m$
we have
$$\lim_{n\rightarrow\infty}\frac{1}{n}\sum_{k=1}^nF(x_k,\cdots,x_{kq^{\ell-1}})=(q-1)^2\sum_{k=1}^\infty\frac{1}{q^{k+1}}\sum_{j=0}^{k-1}\mathbb E_\mu F(x_j,\cdots,x_{j+\ell-1}).$$
\end{theorem}
\begin{equation}gin{proof}
For any integer $k$ we write $k=i(k)q^j$ with $q\nmid i(k)$. Then
we define a function $F_{k}$ by
$$F_{k}(x)=F(x_j,\cdots,x_{j+\ell-1}).$$
Therefore we can re-write
$$
F(x_k,x_{kq},\cdots,x_{kq^{\ell-1}})=F_k(x_{|_{\Lambda_{i(k)}}}).
$$
By the law of large numbers, for $\mathbb P_{\mu_s}$ a.e. $x\in\Sigma_m$ we
have
$$
\lim_{n\rightarrow\infty}\frac{1}{n}\sum_{k=1}^nF_k(x_{|_{\Lambda_{i(k)}}})
=\lim_{n\rightarrow\infty}\frac{1}{n}\sum_{k=1}^n\mathbb E_{\mu}F_k(x)
$$
if the limit in the right hand side exists. The limit does exists.
In fact,
by (2) of Lemma \ref{lem dec}, we have
$$\sum_{k=1}^n\mathbb E_{\mu}F_k(x)=\sum_{k=1}^{\lfloor\log_q n\rfloor}\sum_{\substack{\frac{n}{q^k} <i\leq \frac{n}{q^{k-1}}\\ q\nmid i}}\sum_{j=0}^{\sharp \Lambda_i(n)-1}\mathbb E_{\mu}F_{iq^j}(x).$$
By the definition of the sequence $(F_k)$, for any $k=iq^j$ with
$q\nmid i$ we have
$$\mathbb E_{\mu}F_{iq^j}(x)=\mathbb E_{\mu}F(x_j,\cdots,x_{j+\ell-1}),$$
which is independent of $i$.
Combining the last two equations, we
get
$$\sum_{k=1}^n\mathbb E_{\mu}F_k(x)=\sum_{k=1}^{\lfloor\log_q n\rfloor}N(n,q,k)\sum_{j=0}^{k-1}\mathbb E_{\mu}F(x_j,\cdots,x_{j+\ell-1}),$$
where $N(n,q,k)$ appeared in Lemma \ref{lem dec}. Then, by (3) of Lemma \ref{lem dec}, we get
\begin{equation}gin{eqnarray*}
\lim_{n\rightarrow\infty}\frac{1}{n}\sum_{k=1}^n\mathbb E_{\mu}F_k(x) & = &\lim_{n\rightarrow\infty}\sum_{k=1}^{\lfloor\log_q n\rfloor}\frac{N(n,q,k)}{n}\sum_{j=0}^{k-1}\mathbb E_{\mu}F(x_j,\cdots,x_{j+\ell-1})\\
&=& (q-1)^2\sum_{k=1}^\infty\frac{1}{q^{k+1}}\sum_{j=0}^{k-1}\mathbb E_\mu F(x_j,\cdots,x_{j+\ell-1}).
\end{eqnarray*}
\end{proof}
\section{Dimensions of telescopic product measures \label{section2+}}
Let $\nu$ be a measure on $\Sigma_m$. The lower local dimension of $\nu$ at a point $x\in \Sigma_m$ is defined as
$$\bar{1}derline{D}(\nu,x):=\liminf_{n\rightarrow\infty}\frac{-\log_m\nu([x_1^n])}{n}.$$
Similarly, we can define the upper local dimension $\overline{D}(\nu,x)$. If $\bar{1}derline{D}(\nu,x)=\overline{D}(\nu,x)$, we write $D(\nu, x)$ for the common value and
we say that $\nu$ admits $D(\nu, x)$ as the exact local dimension at $x$.
See \cite{Fan1994} for the dimensions of measures. Recall that the Hausdorff dimension of a Borel measure $\nu$, denoted by $\dim_{H}\nu$, is the minimal dimension of Borel sets of full measure
and is equal to ${\rm ess \ sup}_\nu \bar{1}derline{D}(\nu,x)$ (\cite{Fan1994}).
In this section, as a consequence of the LLN,
we will prove that every telescopic product measure $\mathbb{P}_\mu$ admits its exact local dimension for $\mathbb P_\mu$-a.e. point in $\Sigma_m$, which is a constant.
\subsection{Local dimension of telescopic product measures}
For a measure $\mu$ on $\Sigma_m$ and for $k\geq1$, we define $$H_k(\mu)=-\sum_{a_1,\cdots,a_k}\mu([a_1\cdots a_k])\log \mu([a_1\cdots a_k]).$$
We note that for a probability measure $\mu$ we have $0\le H_k(\mu)\le k\log m$.
\begin{equation}gin{theorem}\label{prop loc dim}
For $\mathbb P_\mu$-a.e. $x\in \Sigma_m$, we have
$$D(\mathbb P_\mu,x)=\frac{(q-1)^2}{\log m}\sum_{k=1}^{\infty}\frac{H_k(\mu)}{q^{k+1}}.$$
\end{theorem}
\begin{equation}gin{proof}
By the definition of $\mathbb P_\mu$, we have
\begin{equation}gin{equation}\label{loc dim 0}
\log\mathbb P_\mu([x_1^n]) = \sum_{i\leq n,q\nmid i}\log\mu([x_1^{n}{|_{\Lambda_i(n)}}])
= \sum_{k=1}^{\lfloor\log_q n\rfloor}\sum_{\substack{\frac{n}{q^k} <i\leq \frac{n}{q^{k-1}}\\ q\nmid i}} \log\mu([x_1^n{|_{\Lambda_i(n)}}]).
\end{equation}
Recall that $x_1^n{|_{\Lambda_i(n)}}=x_ix_{iq}x_{iq^2}\cdots x_{iq^{\sharp \Lambda_i -1}}$. So
$$
\mu([x_1^n{|_{\Lambda_i(n)}}])=\mu([x_ix_{iq}x_{iq^2}\cdots x_{iq^{\sharp \Lambda_i -1}}]).
$$
Let us write $\mu([x_1^n{|_{\Lambda_i(n)}}])$ in the following way
$$ \mu([x_1^n{|_{\Lambda_i(n)}}])=\mu([x_i])\prod_{j=1}^{\sharp \Lambda_i -1}
\frac{\mu([x_ix_{iq}x_{iq^2}\cdots x_{iq^j}])}{\mu([x_ix_{iq}x_{iq^2}\cdots x_{iq^{j -1}}])}.
$$
Now we define a suitable sequence of functions $(F_k)_{k\ge 1}$ on $\Sigma_m$ in order to express $\mu([x_1^n{|_{\Lambda_i(n)}}])$.
If $k=i$ such that $q\nmid i$, we define $$F_k(x)=F_i(x)=-\log\mu([x_0]).$$ If $k=iq^j$ with $q\nmid i$ and $j\geq 1$, we define $$F_k(x)=F_{iq^j}(x)=-\log\frac{\mu([x_0,x_1,\cdots,x_j])}{\mu([x_0,x_1,\cdots,x_{j-1}])}.$$
Then, we have the following relationship between $F_{k}$ and $\mu$.
$$
-\log\mu([x_{1}^n{|_{\Lambda_{i}}}]) =\sum_{k\in \Lambda_{i}(n)}F_{k}(x_{|_{\Lambda_{i}}}).
$$
Substituting this expression into (\ref{loc dim 0}) we obtain
\begin{equation}gin{equation}\label{loc dim 1}
-\log\mathbb P_\mu([x_1^n])=\sum_{k=1}^nF_k(x_{|_{\Lambda_{i(k)}}}).
\end{equation}
Now we check that the sequence $(F_k)_{k\ge 1}$ verifies the hypothesis (\ref{condition2}) of the law of large numbers (Theorem~\ref{LLN}).
Notice that for any $x\in\Sigma_m$ and any $j\ge 1$, we have
$$|F_{iq^j}(x)|=\left|\log\frac{\mu([x_0,x_1,\cdots,x_j])}{\mu([x_0,x_1,\cdots,x_{j-1}])}\right|\leq \left|\log\mu([x_0,x_1,\cdots,x_j])\right|.$$
This is because $\log \frac{x}{y}\le \log \frac{1}{x}$ when $0\le x\le y\le 1$.
So, for any $i\in\mathbb N^*$ with $q\nmid i$ and $j\geq 0$, we have
$$\mathbb E_{\mu}\left(F_{iq^j}(x)\right)^2\leq \sum_{x_0,\cdots,x_j\in S}\mu([x_0,x_1,\cdots,x_j])\left(\log\mu([x_0,x_1,\cdots,x_j])\right)^2.$$
Then by Lemma \ref{lemma loc dim} stated below, we obtain
$$\mathbb E_{\mu}\left(F_{iq^j}(x)\right)^2=O(j^2)$$
which implies through Cauchy-Schwarz inequality
$$\mathbb E_{\mu}\left|F_{iq^{j_1}}(x) F_{iq^{j_2}}(x)\right|= O((j_1+j_2)^2).$$
This quadratic estimate is more than the exponential estimate required by the hypothesis (\ref{condition2}).
By the law of large numbers, we have
\begin{equation}gin{equation}\label{4.2.3}
D(\mathbb{P}_\mu, x) =\frac{1}{\log m}\lim_{n\rightarrow\infty}\frac{1}{n }\sum_{j=1}^nF_j= \frac{1}{\log m}\lim_{n\rightarrow\infty}\frac{1}{n }\sum_{j=1}^n\mathbb E_{\mu}F_j \ \ \mathbb P_\mu{\rm -a.e.}
\end{equation}
if the limit in the right side hand exists.
This limit does exist. We are going to compute it. By (2) of Lemma \ref{lem dec}, we have
\begin{equation}gin{equation}\label{4.2.3-1}
\sum_{k=1}^n\mathbb E_{\mu}F_k = \sum_{k=1}^{\lfloor\log_q n\rfloor}\sum_{\substack{\frac{n}{q^k} <i\leq \frac{n}{q^{k-1}}\\ q\nmid i}}\sum_{j=0}^{k-1}\mathbb E_{\mu}F_{iq^j}.\end{equation}
By the definition of the sequence $(F_k)_{k\ge 1}$, we have
$$
\sum_{j=0}^{k-1}F_{iq^j}(x)= -\log\mu([x_0,\cdots ,x_{k-1}])
$$
which implies immediately
$$
\sum_{j=0}^{k-1}\mathbb E_{\mu}F_{iq^j}=-\mathbb E_{\mu}\log\mu([x_0,\cdots ,x_{k-1}])=H_k(\mu).
$$
Then substituting this into (\ref{4.2.3-1}) we get
$$\sum_{k=1}^n\mathbb E_{\mu}F_k=\sum_{k=1}^{\lfloor\log_q n\rfloor}\sum_{\substack{\frac{n}{q^k} <i\leq \frac{n}{q^{k-1}}\\ q\nmid i}}H_{k}(\mu)=\sum_{k=1}^{\lfloor\log_q n\rfloor}N(n,q,k)H_k(\mu)$$
where $N(n,q,k)$ is the number of $i$'s such that $\frac{n}{q^k} <i\leq \frac{n}{q^{k-1}}$ and $ q\nmid i$. So, by (3) of Lemma \ref{lem dec}, we obtain
$$\lim_{n\rightarrow\infty}\frac{1}{n}\sum_{k=1}^n\mathbb E_{\mu}F_k=\lim_{n\rightarrow\infty}\sum_{k=1}^{\lfloor\log_q n\rfloor}\frac{N(n,q,k)}{n}H_k(\mu) = (q-1)^2\sum_{k=1}^{\infty}\frac{H_k(\mu)}{q^{k+1}}<\infty.$$
\end{proof}
\begin{equation}gin{remark}
Even if the measure $\mu$ itself is not exact dimensional the telescopic measure $\mathbb P_\mu$ is. This is because the $\mathbb P_\mu$-measure of a cylinder of length $N$ is governed by the measure $\mu$ on short pieces $\Lambda_i(N)$ while the non-exactness of $\mu$ can be seen only on long cylinders. These short pieces are independent.
\end{remark}
\subsection{An elementary inequality}
In the last proof we have used the following elementary estimation.
For $n\ge 1$, let $$
\mathcal{P}_n:=\left\{p=(p_1,\cdots,p_n)\in \mathbb R_+^n,\sum_{i=1}^np_i=1\right\}$$
be the set of probability vectors. We define $L_n: \ \mathcal{P}_n\longrightarrow \mathbb R^+$ by $$L_n(p)=\sum_{i=1}^np_i(\log p_i)^2.$$
\begin{equation}gin{lemma}\label{lemma loc dim}
There exists a constant $D>0$ such that $$\max_{p\in \mathcal{P}_n}L_n(p)\leq (\log n)^2+D \log n.$$
\end{lemma}
\begin{equation}gin{proof}
The function $x \mapsto x(\log x)^2$ is bounded on $[0,1]$ and attains its maximal values $4e^{-2}$ at $x=e^{-2}$.
Hence the inequality holds for $n=2$ with $D = 8e^{-2}$. Now we prove the inequality by induction on $n$.
Suppose that the inequality holds for $n\leq N$. Let $p\in\mathcal{P}_{N+1}$ be a maximal point of $L_{N+1}$.
If $p$ is on the boundary of $\mathcal{P}_{N+1}$, then there exists at least one component $p_{i_0}$ of $p$ such that $p_{i_0}=0$. So $$L_{N+1}(p)=\sum_{1\leq i\leq N+1, i\neq i_0}p_i(\log p_i)^2=L_N(p')$$ where
$p'=(p_1,\cdots,p_{i_0-1},p_{i_0+1},\cdots,p_{N+1})$ is in $\mathcal{P}_N$. In this case, we can conclude by the hypothesis of
induction.
Now we suppose that $p$ is not on the boundary of $\mathcal{P}_{N+1}$. We use the method of Lagrange multiplier. Differentiating $L_{N+1}(p)$ yields
$$\frac{\partial L_{N+1}}{\partial p_i}(p)=(\log p_i)^2+2\log p_i,\ \ (1\leq i\leq N+1).$$
So we have
\begin{equation}gin{equation}\label{lemma loc dim1}
(\log p_i)^2+2\log p_i=\lambda,\ \ (1\leq i\leq N+1)
\end{equation}
for some real number $\lambda$.
Let $a,b$ be the two solutions of the equation $$(\log x)^2+2\log x=\lambda.$$
The components of the maximal point $p=(p_1,\cdots ,p_{N+1})$ have two choices: $a$ or $b$. So
\begin{equation}gin{equation}\label{lemma loc dim3}
L_{N+1}(p)=ka(\log a)^2+(N+1-k)b(\log b)^2,
\end{equation}
where $k$ ($0\leq k\leq N+1$) is the number of $a$'s taken by the components of $p$. Recall that $ka+(N+1-k)b=1$.
Notice that
\begin{equation}gin{equation}\label{lemma loc dim2}
ka(\log a)^2=ka(\log ka-\log k)^2=ka(\log ka)^2+ka (\log k)^2-2ka (\log ka)\log k.
\end{equation}
Since $\max_{x\in [0,1]}-x\log x=\frac{1}{e}$ and $\max_{x\in [0,1]}x(\log x)^2=\frac{4}{e^2}$,
we get
$$ka(\log a)^2\leq \frac{4}{e^2}+ka (\log k)^2+\frac{2}{e}\log k\le \frac{4}{e^2}+ka (\log (N+1))^2+\frac{2}{e}\log (N+1).$$
A similar estimate holds for $(N+1-k)b(\log b)^2$. Put these two estimates into (\ref{lemma loc dim3}), we get
$$
P_{N+1}(p) \le \frac{8}{e^2}
+ (\log (N+1))^2 + \frac{4}{e} \log(N+1).$$
We conclude that the inequality holds with $D= \frac{8}{e^2} + \frac{4}{e}$.
\end{proof}
\section{Non-linear transfer equation}\label{section nonlinear equa}
Our study of $A_n\varphi(x)$ will depend upon a class of special telescopic product measures $\mathbb{P}_\mu$
where $\mu$ is a $(\ell-1)$-Markov measure. Our $(\ell-1)$-Markov measures are nothing but Markov measures
with $S^{\ell}$ as state space. The transition probability of such a $(\ell-1)$-Markov measure will be determined by the solution of a non-linear
transfer equation. In this section, we will study this non-linear
transfer equation, find its positive solution and construct the $(\ell-1)$-Markov measure and the corresponding telescopic
product measure.
\subsection{Non-linear transfer equation}
Let $\mathcal{F}(S^{\ell-1}, \mathbb{R}^+)$ denote the cone of functions defined on $S^{\ell-1}$ taking non-negative real values.
It is identified with a subset in the Euclidean space $\mathbb{R}^{m^{\ell-1}}$.
Let $A: S^{\ell} \rightarrow \mathbb R^+$ be a given function. We define a non-linear operator $\mathcal{N}: \mathcal{F}(S^{\ell-1}, \mathbb{R}^+) \rightarrow \mathcal{F}(S^{\ell-1}, \mathbb{R}^+)$ by
\begin{equation}gin{equation}\label{non-linear transfer operator}
\mathcal{N}y(a_1, a_2, \cdots, a_{\ell-1})
= \left(\sum_{j \in S} A(a_1,a_2, \cdots, a_{\ell-1}, j)
y(a_2, \cdots,a_{\ell-1}, j)\right)^{\frac{1}{q}}.
\end{equation}
We are interested in positive fixed points of the operator $\mathcal{N}$. That means we are interested in $y\in \mathcal{F}(S^{\ell-1}, \mathbb{R}^+)$
such that $\mathcal{N}y=y$ and $y(a)>0$ for all $a\in S^{\ell-1}$. In general, such fixed points of $\mathcal{N}$ may not exist.
If $\mathcal{N}$ admits a positive fixed point, then for each $(a_1,\cdots,a_{\ell-1})\in S^{\ell-1}$, there exists at least one $j\in S$ such that $A(a_1,\cdots,a_{\ell-1},j)$ is strictly positive. In fact, this is also a sufficient condition.
\begin{equation}gin{theorem}\label{existence-unicity trans-equ}
Suppose that $A$ is non-negative and that for every $(a_1,\cdots, a_{\ell-1})\in S^{\ell-1}$ there exists at least one $j\in S$ such that $A(a_1,\cdots,\\ a_{\ell-1}, j)>0$. Then $\mathcal{N}$ has a unique positive fixed point.
\end{theorem}
\begin{equation}gin{proof}
We define a partial order on $\mathcal{F}(S^{\ell-1},\mathbb R^+)$, denoted by $\leq$, as follows:
$$y_1\leq y_2\ \Leftrightarrow\ y_1(a)\leq y_2(a), \ \forall a\in S^{\ell-1}.$$
It is obvious that $\mathcal{N}$ is increasing with respect to this partial order, i.e., $$y_1\leq y_2\mathbb Rightarrow \mathcal{N}(y_1)\leq \mathcal{N}(y_2).$$\\
\indent {\em Uniqueness.}\
We first prove the uniqueness of the positive fixed point by contradiction. Suppose that there are two distinct positive fixed points $y_1$ and $y_2$ for $\mathcal{N}$. Without loss of generality we can suppose that $y_1\nleq y_2$. Let $$\xi=\inf\{\gamma >1,\ y_1\leq \gamma y_2\}.$$
It is clear that $\xi$ is a well defined real number and $y_1\leq \xi y_2$. Since $y_1\nleq y_2$, we must have $\xi>1$.
On the other hand, by the definition of $\mathcal{N}$, the operator $\mathcal{N}$ is homogeneous in the sense that $$\mathcal{N}(cy)=c^{\frac{1}{q}}\mathcal{N}(y),\ \forall y \in \mathcal{F}(S^{\ell-1},\mathbb R^+),\ \forall c\in\mathbb R^+.$$
It follows that $$y_1=\mathcal{N}(y_1)\leq \mathcal{N}(\xi y_2)=\xi^{\frac{1}{q}}\mathcal{N}( y_2)=\xi^{\frac{1}{q}}y_2.$$
This is a contradiction to the minimality of $\xi$ for $\xi^{\frac{1}{q}}<\xi$.\\
\indent {\em Existence.}\ Now
we prove the existence. Let
$$\theta_1=\left(\min_{a\in S^{\ell}}A(a)\right)^{\frac{1}{q-1}},\ \ \theta_2=\left(m \max_{a\in S^{\ell}}A(a)\right)^{\frac{1}{q-1}}.$$
Consider the restriction of $\mathcal{N}$ on the compact set $\mathcal{F}(S^{\ell-1},[\theta_1,\theta_2])$ consisting of functions on $S^{\ell-1}$ taking values in $[\theta_1,\theta_2]$. By the definitions of $\theta_1$ and $\theta_2$, the compact set $\mathcal{F}(S^{\ell-1},[\theta_1,\theta_2])$
is $\mathcal{N}$-invariant, i.e., $$\mathcal{N}\left(\mathcal{F}(S^{\ell-1},[\theta_1,\theta_2])\right)\subset \mathcal{F}(S^{\ell-1},[\theta_1,\theta_2]).$$
In fact, let $y \in \mathcal{F}(S^{\ell-1},[\theta_1,\theta_2])$ and let $y_{j_0} = \min_j y_j$. Then $y_{j_0}\ge \theta_1$ and $A(a, j_0) \ge \theta_1^{q-1}$
for all $a \in S^{\ell-1}$, so that
$$
\mathcal{N} y (a)\ge (A(a, j_0)y_{j_0})^{1/q} \ge \theta_1.$$
The verification of $\mathcal{N} y (a) \le \theta_2$ is even easier.
Now take any function $y_0$ from the compact set $\mathcal{F}(S^{\ell-1},[\theta_1,\theta_2])$. By the monotonicity of $\mathcal{N}$, we get
an increasing sequence
$$y_0\leq \mathcal{N}(y_0)\leq \mathcal{N}^2(y_0)\leq \cdots .$$
Since $\mathcal{F}(S^{\ell-1},[\theta_1,\theta_2])$ is compact, the limit
$g=\lim_{n\rightarrow\infty}\mathcal{N}^n(y_0)$ exists. It is a fixed point of $\mathcal{N}$.
\end{proof}
From now on, we concentrate on the following special case:
$$ A(a)=e^{s \varphi(a)},\ \ (a\in S^{\ell})$$
where $s\in \mathbb{R}$ is a parameter.
The corresponding operator will be denoted by $\mathcal{N}_{s}$.
By Theorem \ref{existence-unicity trans-equ}, there exists a unique positive fixed point for $\mathcal{N}_s$. We denote this fixed point by $\psi_s$.
In the following, we are going to study the analyticity and the convexity of the functions $s \mapsto \psi_s(a)$.
\subsection{Analyticity of $s \mapsto \psi_s(a)$}
\begin{equation}gin{proposition}\label{analyticity}
For every $a\in S^{\ell-1}$, the function $s\rightarrow \psi_s(a)$
is analytic on $\mathbb R$.
\end{proposition}
\begin{equation}gin{proof}
We consider the map $G: \mathbb R\times \mathbb R_+^{*m^{\ell-1}}\rightarrow \mathbb R^{m^{\ell-1}}$ defined by
$$G(s,(z_a)_{a\in S^{\ell-1}})=\left(G_{b}(s,(z_a)_{a\in S^{\ell-1}})\right)_{b\in S^{\ell-1}},$$
where $$G_{(b_1,\cdots,b_{\ell-1})}(s,(z_a)_{a\in S^{\ell-1}})=z_{(b_1,\cdots,b_{\ell-1})}^q-\sum_{j\in S}e^{s\varphi(b_1,\cdots,b_{\ell-1},j)}z_{(b_2,\cdots,b_{\ell-1},j)}.$$
It is clear that $G$ is analytic. By Theorem \ref{existence-unicity trans-equ}, we have
$$G(s,(\psi_s(a))_{a\in S^{\ell-1}})=0.$$
Moreover the uniqueness in Theorem \ref{existence-unicity trans-equ} implies that for any fixed $s\in \mathbb R$, $(\psi_s(a))_{a\in S^{\ell-1}}$ is the unique positive vector satisfying the above equation. For practice, in the following we will write $\bar{1}derline{\psi_s}=(\psi_s(a))_{a\in S^{\ell-1}}$ and $\bar{1}derline{z}=(z_a)_{a\in S^{\ell-1}}$.
By the implicit function theorem, if the Jacobian matrix
$$D(s)=\left(\frac{\partial G_{a}}{\partial z_b}(s,\bar{1}derline{\psi_s})\right)_{(a,b)\in S^{\ell-1}\times S^{\ell-1}}$$ is invertible on a point $s_0\in \mathbb R$, then there exist a neighbourhood $(s_0-r_0,s_0+r_0)$ of $s_0$, a neighbourhood $V$ of $\bar{1}derline{\psi_s}$ in $\mathbb R^{m^{\ell-1}}$ and a analytic function $f$ on $(s_0-r_0,s_0+r_0)$ taking values in $V$
such that for any $(t,\bar{1}derline{z})\subset (s_0-r_0,s_0+r_0)\times V$, we have
$$G(t,\bar{1}derline{z})=0\ \Leftrightarrow\ f(t)=\bar{1}derline{z}.$$ Then by the uniqueness of $\psi_s$ for fixed $s$, we have $\bar{1}derline{\psi_t}=f(t)
$.
So the functions $s\rightarrow \psi_s(a)$ $(a\in S^{\ell-1})$, which are coordinate functions of $f$, are analytic in $(s_0-r_0,s_0+r_0)$.
We now prove that the matrix $D(s)$ is invertible for any $s\in \mathbb R$. To this end, we consider the following matrix
$$\widetilde{D}(s)=\left(\psi_s(b) \frac{\partial G_{a}}{\partial z_b}(s,\bar{1}derline{\psi_s}) \right)_{(a,b)\in S^{\ell-1}\times S^{\ell-1}},
$$ which is the one obtained by multiplying the $b$-th column of $D(s)$ by $\psi_s(b)$ for each $b\in S^{\ell-1}$.
Then we have the following relation between the determinants of $D(s)$ and $\widetilde{D}(s)$:
$$\det(\widetilde{D}(s))=\left(\prod_{a\in S^{\ell-1}}\psi_s(a)\right)\det(D(s)).$$
So we only need to prove that $\widetilde{D}(s)$ is invertible. We will prove this by showing that $\widetilde{D}(s)$ is strictly diagonal dominating and by
applying the Gershgorin circle theorem (also called Levy-Desplanques Theorem) (see e.g. \cite{Varga}). Recall that a matrix is said to be strictly diagonal dominating if for every row of the matrix, the modulus
of the diagonal entry in the row is strictly larger than the sum of the modulus of all the other (non-diagonal) entries in that row.
Let $a=(a_1,\cdots,a_{\ell-1})$ be fixed. The function $G_a(s, \cdot)$ depends only on $z_a$ and $z_b$'s with $b=(a_2,\cdots,a_{\ell-1},j)$. So
$$\frac{\partial G_a}{\partial z_b}(s,\bar{1}derline{\psi_s})\neq 0$$ only if $b=a$ or $b=(a_2,\cdots,a_{\ell-1},j)$ for some $j\in S$.
It is possible that $a= (a_2,\cdots,a_{\ell-1},j)$ for some $j\in S$ and it is actually the case if and only if $a = (j, j, \cdots, j)$.
To effectively apply the implicit function theorem, we only need to show that for any $a=(a_1,\cdots,a_{\ell-1})$, we have
\begin{equation}gin{equation}\label{analyticity 1}
\left|\psi_s(a)\frac{\partial G_{a}}{\partial z_a}(s,\bar{1}derline{\psi_s})\right|- \sum_{\substack{j\in S, \\
b=(a_2,\cdots,a_{\ell-1},j)\neq a} }\left|\psi_s(b)\frac{\partial G_{a}}{\partial z_b}(s,\bar{1}derline{\psi_s})\right|>0.
\end{equation}
In fact, we have
$$
\frac{\partial G_{a}}{\partial z_a}(s,\bar{1}derline{\psi_s}) = \left\{ \begin{equation}gin{array}{ll}
q\psi_s^{q-1}(a)-e^{s\varphi(a,j)} & \textrm{ if } a=(j,\cdots ,j) \textrm{ for some } j\in S,\\
q\psi_s^{q-1}(a) & \textrm{otherwise.}
\end{array} \right.
$$
and for $b=(a_2,\cdots,a_{\ell-1},j)\neq a$, we have
$$\frac{\partial G_{a}}{\partial z_b}(s,\bar{1}derline{\psi_s})=e^{s\varphi(a,j)}.$$
Then, substituting the last two expressions into (\ref{analyticity 1}), we obtain that the member at the left hand side of (\ref{analyticity 1}) is equal to
$$q\psi_s^{q}(a)-\sum_{j\in S}e^{s\varphi(a,j)}\psi_s(a_2,\cdots,a_{\ell-1},j)=(q-1)\psi_s^{q}(a)>0.$$
For the last equality we have used the fact that $\psi_s$ is the solution of $\mathcal{N}_s \psi_s = \phi_s$.
\end{proof}
Our function $\psi_s$ is defined on $S^{\ell-1}$. We extend it on $S^{k}$ for all $1\le k \le \ell-2$ by induction on $k$ as follows
$$\psi_s(a)=\left(\sum_{j\in S}\psi_s(a,j)\right)^{\frac{1}{q}},\quad (\forall a\in S^k).$$
It is clear that all these functions $\psi_s$ are strictly positive for all $s\in \mathbb R$.
\begin{equation}gin{corollary}
For any $a\in \bigcup_{1\leq k\leq \ell-1}S^k$, the function $s\rightarrow \psi_s(a)$ is analytic on $\mathbb R$.
\end{corollary}
\subsection{Convexity of $s \mapsto \psi_s(a)$}
In this subsection, we prove that the functions $s\rightarrow \psi_s(a)$ for
$a\in \bigcup_{1\leq k\leq \ell-1}S^k$ and the pressure function
$P_{\varphi}(s)$ are convex functions on $\mathbb R$.
The following
lemma is nothing but the Cauchy-Schwarz inequality. We will use it in this form several times in the proof
of the convexity.
\begin{equation}gin{lemma}\label{lemma convexity 1}
Let $(a_j)_{j=0}^{m-1}$ and $(b_j)_{j=0}^{m-1}$ be two sequences of non-negative real numbers. Then
$$
\left(\sum_{j=0}^{m-1}a_jb_j\right)^2 \le \left(\sum_{j=0}^{m-1}a_j b_j^2\right)\left( \sum_{j=0}^{m-1}a_j\right).$$
\end{lemma}
\begin{equation}gin{proof}
We write $a_jb_j = \sqrt{a_j}b_j \cdot \sqrt{a_j}$ and then use
the Cauchy-Schwarz inequality.
\end{proof}
Let $\theta_{1}^s=\left(\min_{a\in S^{\ell}}e^{s\varphi(a)}\right)^{\frac{1}{q-1}}$. In the proof of Theorem \ref{existence-unicity trans-equ}, we have shown that
$$\psi_s=\lim_{n\rightarrow \infty}\mathcal{N}^n_s(\bar{1}derline{\theta_{1}^s}),$$
where $\bar{1}derline{\theta_{1}^s}$ the function on $S^{\ell-1}$ which is constantly equal to $\theta_{1}^s$.
By the definition of $\mathcal{N}_s$, it is obvious that
$$\mathcal{N}^n_s(\bar{1}derline{\theta^s_1})=(\theta^s_1)^{\frac{1}{q^n}}\mathcal{N}^n(\bar{1}derline{1}),$$
where $\bar{1}derline{1}$ is the function constantly equal to $1$. However,
for any $s\in \mathbb R$, we have
$\lim_{n\rightarrow \infty}(\theta_{1}^s)^{\frac{1}{q^n}}=1$, so that
$$\psi_s=\lim_{n\rightarrow \infty}\mathcal{N}^n_s(\bar{1}derline{1}).$$
The above convergence is actually uniform for $s$ in any compact set of $\mathbb R$.
Let $$\psi_{s, n}=\mathcal{N}^n_s(\bar{1}derline{1}).$$
In order to prove convexity of the functions
$$
s \mapsto \psi_s(a), \quad \log \sum_{j \in S} \psi_s(b, j), \quad (a\in S^{\ell-1}, b\in S^{\ell-2})
$$
we have only to show those of $$
s \mapsto \psi_{s, n}(a), \quad \log \sum_{j \in S} \psi_{s, n}(b,j).
$$
Actually we will make a proof by induction on $n$.
Recall that a function $H$ of class $C^2$ is convex if $H''\ge 0$. A function $H$ of class $C^2$ is log-convex if $\log H$ is convex or equivalently
$H'' H \ge (H')^2$.
First we have the following initiation of the induction.
\begin{equation}gin{lemma}\label{lemma convexity initiation} For any $a\in S^{\ell-1}$, the function $s \mapsto \mathcal{L}_s \bar{1}derline{1} (a)$
is log-convex.
\end{lemma}
\begin{equation}gin{proof}
The log-convexity of $s \mapsto \mathcal{L}_s \bar{1}derline{1} (a)$ is equivalent to
$$
(\mathcal{L}_s \bar{1}derline{1} (a))^2 \le (\mathcal{L}_s \bar{1}derline{1} (a))'' (\mathcal{L}_s \bar{1}derline{1} (a)).
$$
Recall the definition of $\mathcal{L}_s \bar{1}derline{1} (a)$:
$$
\mathcal{L}_s \bar{1}derline{1} (a) = \sum_{j\in S} e^{s \varphi(Ta, j)}.
$$
Notice that
$$\left(e^{s\varphi(a,b)}\right)'=e^{s\varphi(a,b)}\varphi(a,b),\quad
\left(e^{s\varphi(a,b)}\right)''=e^{s\varphi(a,b)}\varphi^2(a,b).$$
Then log-convexity of $s \mapsto \mathcal{L}_s \bar{1}derline{1} (a)$ is equivalent to
$$
\left(\sum_{j\in S} e^{s \varphi(Ta, j)} \varphi(Ta, j)\right)^2
\le \left(\sum_{j\in S} e^{s \varphi(Ta, j)} \varphi(Ta, j)^2\right)
\left(\sum_{j\in S} e^{s \varphi(Ta, j)} \right).
$$
This is nothing but the Cauchy-Schwarz inequality (see Lemma \ref{lemma convexity 1}).
\end{proof}
The induction will be based on the following recursive relation
$$
\psi_{s, n+1}(a) = \mathcal{N}_s \psi_{s, n} (a), \quad \mbox{equivalently}\quad
(\psi_{s, n+1}(a))^q = \mathcal{L}_s \psi_{s, n} (a).
$$
We are going to show that if $s\mapsto \mathcal{L}_s\psi_{s, n} (a)$ is log-convex, then so is
$s\mapsto \mathcal{L}_s\psi_{s, n+1} (a)$ and even $s \mapsto \mathcal{N}_s\psi_{s, n} (a)=\psi_{s, n+1}(a)$
is convex and
$$
\sum_{j\in S} \psi_{s, n+1}(b, j)
$$
is log-convex.
\begin{equation}gin{lemma}\label{lemma convexity 2}
Let $(u_s)_{s\in \mathbb R}$ be a family of functions in $\mathcal{F}(S^{\ell-1})$. We suppose that for $a\in S^{\ell-1}$, $s \mapsto u_s(a)$ is twice differentiable with respect to $s\in \mathbb R$. Let
$$
v_s(a)=\mathcal{N}_{s} u_s (a).
$$
Suppose that for any $a\in S^{\ell-1}$, $s \mapsto \mathcal{L}_s u_s(a)$ is log-convex. Then\\
\indent {\rm (1)}\ For all $a\in S^{\ell-1}$, $s\mapsto v_s (a)$ is convex.\\
\indent {\rm (2)}\ For all $b\in S^{\ell-2}$, $s \mapsto \sum_{j \in S}v_s(b, j)$ is log-convex.\\
\indent {\rm (3)}\ For all $a\in S^{\ell-1}$, $s \mapsto \mathcal{L}_s v_s(a)$ is log-convex.
\end{lemma}
\begin{equation}gin{proof} By the hypothesis, for each $ a\in S^{\ell-1}$, the function $s \mapsto \mathcal{L}_s u_s(a)$ is log-convex. That is to say, if we let $H_s(a) = \mathcal{L}_s u_s(a)$, we have
\begin{equation}gin{equation}\label{Conv0}
H_s''(a)H_s(a)\geq \left(H'_s(a)\right)^2,
\end{equation}
where, as well as in the following, $'$ and $''$ will refer to the derivatives with respect to $s$.
(1) Since $v_s (a) = (H_s(a))^{1/q}$, we have
$$(v_s (a))'=\frac{1}{q}(H_s(a))^{\frac{1}{q}-1}H'_s(a).$$
In other words,
\begin{equation}gin{equation}\label{Conv1}
(v_s (a))'=v_s (a) R_s(a)
\end{equation}
with
$$
R_s(a) = \frac{1}{q} \frac{H'_s(a)}{ H_s(a)}.
$$
Furthermore we have
\begin{equation}gin{eqnarray*}
(v_s (a))'' & = & \frac{1}{q}\left(\frac{1}{q}-1\right)
(H_s(a))^{\frac{1}{q}-2}
[H'_s(a)]^2+\frac{1}{q}(H_s(a))^{\frac{1}{q}-1} H''_s(a)\\
& = & \frac{1}{q^2}
(H_s(a))^{\frac{1}{q}-2}
[H'_s(a)]^2 + \frac{1}{q}(H_s(a))^{\frac{1}{q}-2} [H_s(a) H''_s(a)
- (H'_s(a))^2].
\end{eqnarray*}
By the hypothesis (\ref{Conv0}), $(v_s (a))''\ge 0$. Thus we have proved (1). The last equality implies
$$ (v_s (a))'' \ge \frac{1}{q^2}
(H_s(a))^{\frac{1}{q}-2}
[H'_s(a)]^2.$$
In other words,
\begin{equation}gin{equation}\label{Conv2}
(v_s (a))''\ge v_s (a) [R_s(a)]^2.
\end{equation}
The relations (\ref{Conv1}) and (\ref{Conv2}) will be useful later.
(2)
By (\ref{Conv2}), we have
$$
\left(\sum_{j \in S} (v_s (b, j))'' \right)
\left(\sum_{j \in S} v_s (b, j) \right)
\geq \left(\sum_{j \in S} v_s (b, j) R_s(b, j)^2 \right)
\left(\sum_{j \in S} v_s (b, j) \right).
$$
Then, by the Cauchy-Schwarz inequality in the form of Lemma \ref{lemma convexity 1}, we have
\begin{equation}gin{eqnarray*}
\left(\sum_{j \in S} (v_s (b, j))'' \right)
\left(\sum_{j \in S} v_s (b, j) \right)
& \geq &
\left(\sum_{j \in S} v_s (b, j) R_s(b, j) \right)^2\\
& = &
\left(\sum_{j \in S} (v_s (b, j))' \right)^2
\end{eqnarray*}
where the last equality is due to (\ref{Conv1}).
Thus we have proved (2).
(3) Recall that
$$
\mathcal{L} v_s(a) = \sum_{j \in S} e^{s \varphi(Ta, j)} v_s (Ta, j).
$$
Notice that
$\frac{d}{ds} e^{s \varphi(a,b)}v_s(Ta,j)$
$$=
e^{s \varphi(a,j)} \left[\varphi(a,j)v_s(Ta,b)+(v_s(Ta,j))^{'}\right],\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ $$
$\frac{d^2}{ds^2} e^{s \varphi(a,b)}v_s(Ta,j)$
$$\ \ \ \ \ \ \ \ \ \ \ \ = e^{s\varphi(a,j)}\left[\varphi^2(a,j) v_s(Ta,j)+ 2\varphi(a,j) (v_s(Ta,j))'+ (v_s(Ta,j))''\right].
$$
By using (\ref{Conv1}), we can write
$$\frac{d}{ds} e^{s \varphi(a,b)}v_s(Ta,j) =
e^{s\varphi(a,j )}v_s(Ta,j)\left[\varphi(a,j)+R_s(Ta,j)\right].$$
By using (\ref{Conv1}) and (\ref{Conv2}), we get
$$
\varphi^2(a,j) v_s(Ta,j)+ 2\varphi(a,j) (v_s(Ta,j))'+ (v_s(Ta,j))'' \ge \left[\varphi(a,j) +R_s(Ta,j)\right]^2,
$$
so that
\begin{equation}gin{eqnarray*}
\frac{d^2}{ds^2} e^{s \varphi(a,b)}v_s(Ta,j) \ge e^{s\varphi(a,j)} v_s(Ta,j)\left[\varphi(a,j) +R_s(Ta,j)\right]^2.
\end{eqnarray*}
There
$$
(\mathcal{L}_s v_s(a))'' \mathcal{L}_s v_s(a) \geq \left(\sum_{j\in S} C_s(Ta,j)D_s(a,j)^2\right) \left(\sum_{j\in S}C_s(a,j) \right).
$$
where
$$C_s(a,j)= e^{s\varphi(a,j)} v_s(Ta,j), \quad\ D_s(a,j)= \varphi(a,j) +R_s(Ta,j).$$
Then, by the Cauchy inequality (see Lemma \ref{lemma convexity 1}), we finally get
$$
(\mathcal{L}_s v_s(a))'' \mathcal{L}_s v_s(a) \geq \left(\sum_{j \in S}C_s(a,j)D_s(a,j)\right)^2
=[(\mathcal{L}_s v_s(a))']^2.
$$
That is the log-convexity of $s \mapsto \mathcal{L}_s v_s(a)$.
\end{proof}
\begin{equation}gin{theorem}\label{thm convexity}
For any $a\in \bigcup_{1\leq j\leq \ell} S^{\ell-j}$, the function $s\mapsto \psi_s(a)$ is convex.
The pressure function $P_\varphi(s)$ is also convex.
\end{theorem}
\begin{equation}gin{proof} We prove convexity of $s\mapsto \psi_s(a)$ for $a\in S^{\ell-1}$
by showing those of $s\mapsto \psi_{s, n}(a)$ by induction on $n$. The induction is based on Lemma~\ref{lemma convexity initiation}
and Lemma~\ref{lemma convexity 2} (only the points (1) and (3) are used).
Now we prove convexity of $s\mapsto \psi_s(a)$ for $a\in S^{\ell-k}$ ($2\le k\le \ell$) by induction on $k$ and by using
what we have just proved above (as the initiation of induction). We can do that because of the following recursive relation:
for $a\in S^{\ell-k}$ ($2\le k\le \ell$), we have
$$
\psi_s(a)^q = \sum_{j\in S}\psi_s(a, j).
$$
The right hand side is the operator $\mathcal{L}_s$ defined by the $\varphi$ which is identically zero.
So the log-convexity of $\psi_s(a, j)$ implies that of $\psi_s(a)$ just as the log-convexity of $\psi_{s,n}$ implies that of $\psi_{s,n+1}(a)$.
Recall that the pressure function is proportional to
$$
s \mapsto \log \psi_s(\emptyset) = \log \sum_{j \in S} \psi_s(j).
$$
The convexity of the pressure is just the log-convexity of $\sum_{j \in S} \psi_s(j)$, which is implied by Lemma~\ref{lemma convexity 2} (3) and the
log-convexity of $\psi_s(j)$.
\end{proof}
\subsection{Construction of the measures $\mu_s$ and $\mathbb{P}_{\mu_s}$}
Below we construct a class of $(\ell-1)$-Markov measure $\mu_s$ whose transition probability and initial law are determined by the fixed point $\psi_s$ of the operator $\mathcal{N}_s$. The corresponding telescopic product measure $\mathbb P_{\mu_s}$ will play the same role as Gibbs measure played in the study of simple ergodic averages.
Fix $s\in \mathbb R$. Let $\psi_s$ be the function mentioned above.
Recall that $\psi_s$ was first defined on $S^{\ell-1}$ as follows
$$
\left(\psi_s (a)\right)^q
= \sum_{b \in S} e^{s \varphi(a, b)}
\psi_s (Ta, b), \quad (a\in S^{\ell-1}).
$$
Then it was extended on $S^k$ by induction on $1\le k \le \ell-2$ as follows
$$
\psi_s (a)=\left(\sum_{b \in S} \psi_s (a, b)\right)^{\frac{1}{q}}, \quad \ (a\in S^k).
$$
These functions defined on words of length varying from $1$ to $\ell-1$ allow us to define a $(\ell-1)$-step Markov measure on $\Sigma_m$, which will
be denoted by $\mu_s$, with the initial law
\begin{equation}gin{equation}\label{def measure 1}
\pi_s([a_1,\cdots,a_{\ell-1}])=\prod_{j=1}^{\ell-1}\frac{\psi_s(a_1,\cdots,a_j)}{\psi_s^q(a_1,\cdots,a_{j-1})}
\end{equation}
and the transition probability
\begin{equation}gin{equation}\label{def measure 2}
Q_s\left([a_1,\cdots,a_{\ell-1}],[a_2,\cdots,a_{\ell}]\right)=e^{s\varphi(a_{1},\cdots,a_{\ell})} \frac{\psi_s(a_2,\cdots,a_{\ell})}{\psi_s^q(a_{1},\cdots,a_{\ell-1})}.
\end{equation}
Here we have identified $\Sigma_m$ with $(S^{\ell-1})^\mathbb{N}$. Actually, $\pi_s$ is a probability vector because
$$
\sum_{a_j \in S} \frac{\psi_s(a_1,\cdots,a_j)}{\psi_s^q(a_1,\cdots,a_{j-1})}=1
$$
and $Q$ is a transition probability because $\mathcal{N}_s \psi_s = \psi_s$.
As usual, $\mathbb{P}_{\mu_s}$ will denote the telescopic product measure associated to $\mu_s$. See \S 2.1 for its definition
and its general properties.
\section{Properties of the pressure function}
We have seen in the previous section that the pressure function is real analytic and convex on $\mathbb R$. In this section we continue to discuss some of its further properties. These properties mainly concern its strict convexity when $\alpha_{\min}<\alpha_{\max}$ and a Ruelle type formula relating the expected limit of the multiple ergodic average with respect to the measure $\mathbb P_{\mu_s}$ and the derivative of $P_{\varphi}$.
\subsection{Ruelle type formula}
We state here the following identity which can be regarded as an analogue of Ruelle's derivative formula concerning the classical Gibbs measure and pressure function, its proof will be given in Section \ref{Ruelle formula} (Proposition \ref{prop 1}).
\begin{equation}gin{theorem}\label{Thm Ruelle formula} We have
$$(q-1)^2\sum_{k=1}^\infty\frac{1}{q^{k+1}}\sum_{j=0}^{k-1}\mathbb E_{\mu_s}\varphi(x_j,\cdots,x_{j+\ell-1})=P_\varphi'(s).$$
\end{theorem}
As an applications of Theorem \ref{Thm Ruelle formula}, we give the following formula concerning the value $P'_\varphi(0)$.
\begin{equation}gin{proposition}\label{prop average1}
$$P'_\varphi(0)=\frac{\sum_{a\in S^{\ell}}\varphi(a)}{m^{\ell}}.$$
\end{proposition}
\begin{equation}gin{proof}
By Theorem \ref{Thm Ruelle formula}, we have
\begin{equation}gin{equation}\label{equ prop average}
P'_\varphi(0)=(q-1)^2\sum_{k=1}^\infty\frac{1}{q^{k+1}}\sum_{j=0}^{k-1}\mathbb E_{\mu_0}
\varphi(x_j,\cdots,x_{j+\ell-1}).
\end{equation}
First of all, we need to determine $\mu_0$. It is straightforward to verify that the constant function $\psi_0\equiv m^{\frac{1}{q-1}}$ is a solution of the following equations when $s=0$.
$$
\left(\psi_s (a)\right)^q
= \sum_{b \in S} e^{s \varphi(a, b)}
\psi_s (Ta, b), \quad (a\in S^{\ell-1}).
$$
Actually, the function $\psi_0$ is the only positive solution by uniqueness of the positive solution (Theorem \ref{existence-unicity trans-equ}). The measure $\mu_0$ defined by this solution as in (\ref{def measure 1}) and (\ref{def measure 2}) is the Lebesgue measure. So, for any $j\geq 0$ we have
\begin{equation}gin{eqnarray*}
\mathbb E_{\mu_0}
\varphi(x_j,\cdots,x_{j+\ell-1}) & = & \sum_{x_0,\cdots,x_{j+\ell-1}}\mu_0([x_0^{j+\ell-1}])\varphi(x_j,\cdots,x_{j+\ell-1}) \\
& = & \sum_{x_0,\cdots,x_{j+\ell-1}}m^{-(j+\ell)}\varphi(x_j,\cdots,x_{j+\ell-1}) \\
& = & \sum_{x_0,\cdots,x_{\ell-1}}m^{-\ell}\varphi(x_0,\cdots,x_{\ell-1})\\
&=& \frac{\sum_{a\in S^{\ell}}\varphi(a)}{m^{\ell}}.
\end{eqnarray*}
Now we get the desired result by substituting the above expression in (\ref{equ prop average}) and by an elementary calculation.
\end{proof}
\subsection{Translation via linearity}
\begin{equation}gin{theorem}\label{thm relation pressure}
For any $\begin{equation}ta\in \mathbb R$, we have $$P_\varphi(s)-\begin{equation}ta
s=P_{\varphi-\begin{equation}ta}(s),$$ where $P_{\varphi-\begin{equation}ta}(s)$ is the
pressure function associated to the potential $\varphi-\begin{equation}ta$.
\end{theorem}
\begin{equation}gin{proof}
Let $\mathcal{N}_{\varphi-\begin{equation}ta, s}$ be the operator as defined in (\ref{non-linear transfer operator}) with
$$ A(a)=e^{s(\varphi(a)-\begin{equation}ta) },\ \ (a\in S^{\ell}).$$
By Theorem \ref{existence-unicity trans-equ}, the operator
$\mathcal{N}_{\varphi-\begin{equation}ta, s}$ admits a unique positive fixed
function $g_s\in \mathcal{F}(S^{\ell-1})$. We have seen that $g_s$
is given by
$$g_s=\lim_{n\rightarrow\infty}\mathcal{N}_{\varphi-\begin{equation}ta, s}^n(\bar{1}derline{1}).$$
By the definitions of $\mathcal{N}_s$ and
$\mathcal{N}_{\varphi-\begin{equation}ta, s}$, it is obvious that
$$\mathcal{N}_{\varphi-\begin{equation}ta, s}=e^{-\frac{s\begin{equation}ta}{q}}\mathcal{N}_s.$$
By induction we get that $$\mathcal{N}_{\varphi-\begin{equation}ta, s}^n=e^{-s\begin{equation}ta(\frac{1}{q}+\cdots+\frac{1}{q^n})}\mathcal{N}_s^n.$$
Thus
$$g_s=\lim_{n\rightarrow\infty}\mathcal{N}_{\varphi-\begin{equation}ta, s}^n(\bar{1}derline{1})=
e^{-s\begin{equation}ta(\sum_{n=1}^\infty\frac{1}{q^n})}\psi_s=e^{-\frac{s\begin{equation}ta}{q-1}}\psi_s.$$
Since for $u\in\bigcup_{1\leq k\leq \ell-2}S^k$, $g_s(u)$ is defined by
$$g_s(u)=\left(\sum_{j=0}^{m-1}g_s(u,j)\right)^{\frac{1}{q}},
$$
we deduce that for $u\in S^k$ with $1\leq k\leq \ell-2$ we have
$$g_s(u)=e^{-\frac{s\begin{equation}ta}{(q-1)q^{\ell-1-k}}}\psi_s(u).$$
Thus
$$P_{\varphi-\begin{equation}ta}(s)=(q-1)q^{\ell-2}\log \sum_{j=0}^{m-1}g_s(j)=-s\begin{equation}ta+P_{\varphi}(s).$$
\end{proof}
\begin{equation}gin{remark}\label{remark} Note that when $\begin{equation}ta =\alpha_{min}$ (resp. $\begin{equation}ta=\alpha_{max}$), the function
$$s\longmapsto \mathcal{N}_{\varphi-\begin{equation}ta, s}$$ is increasing (resp. decreasing).
Then in this case, the function $s\ \mapsto\ g_s $ is also
increasing (resp. decreasing) and so is the pressure function
$s\mapsto P_{\varphi-\begin{equation}ta}(s)$.
\end{remark}
As an application of Theorem \ref{thm relation pressure} and Remark \ref{remark} we have the following consequence.
\begin{equation}gin{proposition}\label{prop cte}
If $s \mapsto P'_\varphi(s)$ is constant on $\mathbb R$, then $\varphi$ is
constant on $S^{\ell}$.
\end{proposition}
\begin{equation}gin{proof}
Suppose that $P'_\varphi$ is constant on $\mathbb R$. Then
$$P'_\varphi(s)\equiv P'_\varphi(0)=\frac{\sum_{a\in S^{\ell}}\varphi(a)}{m^{\ell}}:=\overline{\varphi}.$$
By Theorem \ref{thm relation pressure}, we have
$$P_\varphi(s)=\overline{\varphi} s+P_{\varphi-\overline{\varphi}}(s).$$
The last two equations imply that
$$P'_{\varphi-\overline{\varphi}}(s)\equiv0.$$
This is equivalent to that
\begin{equation}gin{equation}\label{equ prop cte}
\sum_{j=0}^{m-1}g_s'(j)\equiv0,
\end{equation}
where $g_s$ is the positive fixed point of $\mathcal{N}_{{\varphi-\overline{\varphi}},s}$.
By Theorem \ref{thm convexity}, the function $s\mapsto g_s$ is convex, so $g_s'(j)$ is increasing for all $j\in S$. This, with (\ref{equ prop cte}) imply that $g_s'(j)$ is constant for all $j\in S$. So for every $j$ the function $g_s(j)$ is affine. But these functions are strictly positive on $\mathbb R$, they are therefore necessarily constant on $\mathbb R$. So $$g_s'(j)\equiv0,\ \ \forall j\in S.$$
For $u\in \bigcup_{1\leq k\leq \ell-2}$, $g_s(u)$ is defined by the following inductive relation.
$$g_s(u)^q=\sum_{j=0}^{m-1}g_s(uj), \ \ u\in \bigcup_{1\leq k\leq \ell-2}S^k.$$
Differentiating these equations, we get
$$qg_s^{q-1}(u)g_s'(u)=\sum_{j=0}^{m-1}g_s'(uj), \ \ u\in \bigcup_{1\leq k\leq \ell-2}S^k.$$
For any $i\in S$, since $g_s'(i)\equiv0,$ we get
$$\sum_{j=0}^{m-1}g_s'(ij)\equiv0.$$
With the same argument used for proving that $g_s(j)$ is constant for all $j\in S$, we can also prove that
$g_s(ij)$ is constant for all $(i,j)\in S^2$. By induction, we can show that $g_s(u)$ are constant for all $u\in \bigcup_{1\leq k\leq \ell-1}S^{k}$. By the definition of $g_s$, for $u\in S^{\ell-1}$, we have
\begin{equation}gin{equation}\label{equ prop cte1}
g_s^q(u)=\sum_{j=0}^{m-1}e^{s(\varphi(uj)-\overline{\varphi})}g_s(Tu,j).
\end{equation}
We now suppose that $\varphi$ is not constant on $S^{\ell}$, i.e., $\alpha_{\min}<\alpha_{\max}$. Then there exists
$a\in S^{\ell}$ such that $$\varphi(a)>\overline{\varphi}.$$
Let us write $a=(u,j)$ with $u\in S^{\ell-1}$ and $j\in S$.
By (\ref{equ prop cte1}), we have
$$g_s^q(u)>e^{s(\varphi(u,j)-\overline{\varphi})}g_s(Tu,j), \ \ \forall s\in \mathbb R.$$
As $g_s(u)$ and $g_s(Tu,j)$ are strictly positive constants, this is impossible when $s$ tend to $+\infty$. Then we conclude that $\varphi$ is constant on $S^{\ell}$.
\end{proof}
\subsection{Strict convexity of the pressure function}
\begin{equation}gin{theorem}\label{thm strict convexity}
Suppose that $\alpha_{\min}<\alpha_{\max}$. Then
{\rm (i)} $P_{\varphi}'(s)$ is
strictly increasing on $\mathbb R$.
{\rm (ii)} $\alpha_{\min}\leq
P'_\varphi(-\infty)<P'_\varphi(+\infty)\leq \alpha^f_{\max}.$
\end{theorem}
\begin{equation}gin{proof}
(i) {\em $P'_\varphi(s)$ is strictly increasing on $\mathbb R$.} We know
that $P'_\varphi$ is increasing on $\mathbb R$ as $P_\varphi$ is convex on
$\mathbb R$. Suppose that $P'_\varphi$ is not strictly increasing on $\mathbb R$.
Then there exists an interval $[a,b]$ with $a<b$ such that
$P'_\varphi$ is constant on $[a,b]$. On the other hand, we know that
$P_\varphi$ is analytic and so is $P'_\varphi$. Therefore
$P'_\varphi$ must be constant on the whole line $\mathbb{R}$. It is
impossible by Proposition \ref{prop cte} as $\varphi$ is supposed to
be no constant on $S^\ell$.
(ii)\ {\em $\alpha_{\min }\leq
P'_{\varphi}(-\infty)<P'_{\varphi}(+\infty)\leq \alpha_{\max}$}. The
strict inequality $P'_{\varphi}(-\infty)<P'_{\varphi}(+\infty)$ is
implied by (i). Let us prove the first inequality. The third
inequality can be similarly proved. By Theorem \ref{thm
relation pressure}, we have
$$P_{\varphi}(s)=\alpha_{\min}s+P_{\varphi-\alpha_{\min}}(s).$$
By Remark \ref{remark}, the function $s\mapsto
P_{\varphi-\alpha_{\min}}(s)$ is increasing. Thus we have
$$P'_{\varphi}(s)=\alpha_{\min}+P'_{\varphi-\alpha_{\min}}(s)\geq \alpha_{\min}$$
which holds for all $s\in \mathbb R$. Letting $s\rightarrow -\infty$, we get
$$\alpha_{\min }\leq P'_{\varphi}(-\infty).$$
\end{proof}
To finish this section, we announce the following results concerning the extremal values of $P'_\varphi$ at infinite. Its proof will be given in Section \ref{derivative pressure infinite}.
\begin{equation}gin{theorem}\label{thm extreme1}
We have the equality $$P'_{\varphi}(-\infty)=\alpha_{\min}$$ if and only if there exists an $x=(x_i)_{i=1}^\infty\in \Sigma_m$ such that $$\varphi(x_k,x_{k+1},\cdots,x_{k+\ell-1})=\alpha_{\min},\
\forall k\geq 1.$$ We have an analogue criterion for
$P'_{\varphi}(+\infty)=\alpha_{\max}.$
\end{theorem}
\begin{equation}gin{remark}
We have a proof of three pages by combinatorially analyzing
$P_\varphi$. But we would like to give another proof in Section
\ref{derivative pressure infinite} (see Proposition \ref{thm extreme2}),
which is shorter, more intuitive and easier to understand.
\end{remark}
\section{Gibbs property of $\mathbb P_{\mu_s}$}
In the following we are going to establish a relation between the mass $\mathbb P_{\mu_s}([x_1^n])$
and the multiple ergodic sum $\sum_{j=1}^{J} \varphi(x_j\cdots x_{jq^{\ell-1}})$. This can be regarded as the Gibbs property of the measure $\mathbb P_{\mu_s}$.
\subsection{Dependence of the Local behavior of $\mathbb P_{\mu_s}$ on $\varphi(x_j\cdots x_{jq^{\ell-1}})$.}
There is an explicit relation between the mass $\mathbb P_{\mu_s}([x_1^n])$
and the multiple ergodic sum $\sum_{j=1}^{\lfloor\frac{n}{q^{\ell-1}}\rfloor} \varphi(x_j\cdots x_{jq^{\ell-1}})$.
Before stating this relation, we introduce some notation.
Recall that for any integer $k\in \mathbb N^*$ we denote by $i(k)$ the unique integer such that
$$k=i(k)q^j, \ \ \ q\nmid i(k).$$
We associate to $k$ a finite set of integers $\lambda_k$ defined by
$$\lambda_k\ := \left\{ \begin{equation}gin{array}{lcl}
\{i(k),i(k)q,\cdots,i(k)q^j\} & {\rm if } & j<\ell-1 \\
\{i(k)q^{j-(\ell-1)},\cdots,i(k)q^j\} & {\rm if } & j\geq\ell-1.
\end{array}\right.
$$
We define $\lambda_\alpha$ to be the empty set if $\alpha$ is not an integer.
For any sequence $x=(x_i)_{i=1}^\infty \in \Sigma_m$, we denote by $x_{|_{\lambda_k}}$ the restriction of $x$ on $\lambda_k$.
For $x\in \Sigma_m$, we define $$B_n(x)=\sum_{j=1}^n\psi_s(x_{|_{\lambda_j}}).$$
The following basic formula is a consequence of the definitions of $\mu_s$ and $\mathbb{P}_{\mu_s}$.
\begin{equation}gin{proposition}\label{prop basic formula loc dim}
We have
$$\log \mathbb P_{\mu_s}([x_1^n])=s\sum_{j=1}^{\left\lfloor\frac{n}{q^{\ell-1}}\right\rfloor}\varphi(x_j\cdots x_{jq^{\ell-1}})-(n-\lfloor n/q\rfloor) q \log \psi_s (\emptyset)- qB_{\frac{n}{q}}(x)+ B_n(x).$$
\end{proposition}
\begin{equation}gin{proof}
By the definition of $\mathbb P_{\mu_s}$, we have
\begin{equation}gin{equation}\label{unper bounds 1}
\log \mathbb P_{\mu_s}([x_1^n])=\sum_{q\nmid i,i\leq n}\log\mu_s([x_1^n|_{\Lambda_i(n)}]).
\end{equation}
However, by the definition of $\mu_s$, if $\sharp\Lambda_i(n)\leq \ell-1$, we have
\begin{equation}gin{equation}\label{unper bounds 1_1}
\log\mu_s([x_1^n|_{\Lambda_i(n)}])=\sum_{j=0}^{\sharp\Lambda_i(n)-1}\log\frac{\psi_s(x_i,\cdots,x_{iq^j})}{\psi_s^q(x_i,\cdots,x_{iq^{j-1}})}=\sum_{k\in \Lambda_i(n)}\log\frac{\psi_s(x_{|_{\lambda_k}})}{\psi_s^q(x_{|_{\lambda_{k/q}}})}.
\end{equation}
If $\sharp\Lambda_i(n)\geq \ell$, $\log\mu_s([x_1^n|_{\Lambda_i(n)}])$ is equal to
$$ \sum_{j=0}^{\ell-2}\log\frac{\psi_s(x_i,\cdots,x_{iq^j})}{\psi_s^q(x_i,\cdots,x_{iq^{j-1}})}+\sum_{j=\ell-1}^{\sharp\Lambda_i(n)-1}\log\frac{\psi_s(x_{iq^{j-\ell+2}},\cdots,x_{iq^j})e^{s\varphi(x_{iq^{j-\ell+1}},\cdots,x_{iq^j})}}{\psi_s^q(x_{iq^{j-\ell+1}},\cdots,x_{iq^{j-1}})}
$$
$$ =\sum_{j=0}^{\sharp\Lambda_i(n)-1}\log\frac{\psi_s(x_i,\cdots,x_{iq^j})}{\psi_s^q(x_i,\cdots,x_{iq^{j-1}})} + s\sum_{j=\ell-1}^{\sharp\Lambda_i(n)-1}\varphi(x_{iq^{j-\ell+1}},\cdots,x_{iq^j}),
$$
in other words,
\begin{equation}gin{equation}\label{unper bounds 1_2}
\mu_s([x_1^n|_{\Lambda_i(n)}])
= \sum_{k\in \Lambda_i(n)}\log\frac{\psi_s(x_{|_{\lambda_k}})}{\psi_s^q(x_{|_{\lambda_{\frac{k}{q}}}})}+s\sum_{k\in \Lambda_i(n),k\leq n}\varphi(x_{|_{\lambda_k}}).
\end{equation}
Substituting (\ref{unper bounds 1_1}) and (\ref{unper bounds 1_2}) into (\ref{unper bounds 1}), we get
\begin{equation}gin{equation}\label{upper bounds 2}
\log \mathbb P_{\mu_s}([x_1^n])= S_n' + s S_n''
\end{equation}
where
$$S_n' =
\sum_{q\nmid i,i\leq n} \sum_{k\in \Lambda_i(n)}\log\frac{\psi_s(x_{|_{\lambda_k}})}{\psi_s^q(x_{|_{\lambda_{\frac{k}{q}}}})}
$$
$$
S_n'' = \sum_{q\nmid i,i\leq n} \sum_{k\in \Lambda_i(n),k\leq n}\varphi(x_{|_{\lambda_k}}).
$$
For any fixed $i$ with $q\nmid i$, we write
$$
\sum_{k\in \Lambda_i(n)}\log\frac{\psi_s(x_{|_{\lambda_k}})}{\psi_s^q(x_{|_{\lambda_{\frac{k}{q}}}})}
= \sum_{k\in \Lambda_i(n)}\log \psi_s(x_{|_{\lambda_k}}) - q \sum_{k\in \Lambda_i(n)}\log \psi_s (x_{|_{\lambda_{\frac{k}{q}}}}).
$$
Recall that if we denote $j_0 = \lfloor \log_q \frac{n}{i}\rfloor$ the largest integer such that $iq^{j_0}\le n$, then
$$
\Lambda_i(n)=\{i, i q, iq^2, \cdots, iq^{j_0}\}.$$
If $k= i$, we have $x_{k/q}=\emptyset$. If $k =iq^j$ with $1\le j\le j_0$, we have $k/q = iq^{j-1}$ which belongs to $\Lambda_i(n)$.
In the following we formally write
$$
\Lambda_i(n/q)=\{i, i q, iq^2, \cdots, iq^{j_0-1}\}.
$$
Then we can write
$$\sum_{k\in \Lambda_i(n)}\log\frac{\psi_s(x_{|_{\lambda_k}})}{\psi_s^q(x_{|_{\lambda_{\frac{k}{q}}}})}=(1-q)\sum_{k\in \Lambda_i(\frac{n}{q})}\psi_s(x_{|_{\lambda_k}}) - q \log \psi_s (\emptyset) + \sum_{k\in \Lambda_i(n),kq>n}\psi_s(x_{|_{\lambda_k}}).$$
Notice that there is only one term in the last sum, which corresponds to $k = iq^{j_0}$. Now we take sum over $i$ to get
\begin{equation}gin{eqnarray*}
S_n'
= (1-q)\sum_{k\leq \frac{n}{q}}\psi_s(x_{|_{\lambda_k}}) - q(n-\lfloor n/q\rfloor)\log \psi_s (\emptyset)+ \sum_{k> \frac{n}{q}}\psi_s(x_{|_{\lambda_k}}),
\end{eqnarray*}
because $\sharp\{i\leq n,q\nmid i\} = n-\lfloor n/q\rfloor$ and
\begin{equation}gin{eqnarray*}
\sum_{i\leq n,q\nmid i}\sum_{k\in \Lambda_i(\frac{n}{q})}\psi_s(x_{|_{\lambda_k}})
= \sum_{k\leq \frac{n}{q}}\psi_s(x_{|_{\lambda_k}}),
\end{eqnarray*}
\begin{equation}gin{eqnarray*}
\sum_{i\leq n,q\nmid i}\sum_{k\in \Lambda_i(n),kq>n}\psi_s(x_{|_{\lambda_k}})
=\sum_{k> \frac{n}{q}}\psi_s(x_{|_{\lambda_k}}).
\end{eqnarray*}
Recall that $B_n(x)=\sum_{j=1}^n\psi_s(x_{|_{\lambda_j}}).$
We can rewrite
\begin{equation}gin{eqnarray*}
(1-q)\sum_{k\leq \frac{n}{q}}\psi_s(x_{|_{\lambda_k}}) + \sum_{k> \frac{n}{q}}\psi_s(x_{|_{\lambda_k}})
&= & -q\sum_{k\leq \frac{n}{q}}\psi_s(x_{|_{\lambda_k}}) + \sum_{k\le n }\psi_s(x_{|_{\lambda_k}})
\\
& = &- qB_{\frac{n}{q}}(x) + B_n(x).
\end{eqnarray*}
Thus
\begin{equation}gin{eqnarray*}
S_n ' = -q(n-\lfloor n/q\rfloor)\log \psi_s(\emptyset)- qB_{\frac{n}{q}}(x) + B_n(x).
\end{eqnarray*}
On the other hand, we have
$$S_n''= \sum_{q\nmid i,i\leq n}\ \ \sum_{k\in \Lambda_i(n),k\leq n}\varphi(x_{|_{\lambda_k}})=\sum_{k\leq n}\varphi(x_{|_{\lambda_k}})=\sum_{j=1}^{\left\lfloor\frac{n}{q^{\ell-1}}\right\rfloor}\varphi(x_j\cdots x_{jq^{\ell-1}}). $$
Substituting these expressions of $S_n'$ and $S_n''$ into (\ref{upper bounds 2}), we get the desired result.
\end{proof}
\section{Proof of theorem \ref{thm principal}: computation of $\dim_H E(\alpha)$ }\label{Proof thm principal}
We will use the measure $\mathbb P_{\mu_s}$ to estimate the dimensions of
levels sets $E(\alpha)$. Actually, for a given $\alpha$, there is
some $s$ such that $\mathbb P_{\mu_s}$ is a nice Frostman type measure
sitting on $E(\alpha)$.
First of all, let us calculate the
local dimensions of $\mathbb P_{\mu_s}$.
\subsection{Upper bounds of local dimensions of $\mathbb P_{\mu_s}$ on level sets}
We define
$$E^+(\alpha):=\left\{x\in \Sigma_m :
\limsup_{n\rightarrow\infty}\frac{1}{n}\sum_{k=1}^{n}\varphi(x_k,x_{kq},\cdots,x_{kq^{\ell-1}})\leq\alpha\right\},$$
and
$$E^-(\alpha):=\left\{x\in \Sigma_m :
\liminf_{n\rightarrow\infty}\frac{1}{n}\sum_{k=1}^{n}\varphi(x_k,x_{kq},\cdots,x_{kq^{\ell-1}})
\geq\alpha\right\}.$$ It is clear that
$$
E(\alpha) = E^+(\alpha) \cap E^-(\alpha).
$$
In this subsection we
will obtain upper bounds for local dimensions of $\mathbb P_{\mu_s}$ on
the sets $E^+ (\alpha)$ and $E^-(\alpha)$. The following elementary
result will be useful for the estimation of local dimensions
of $\mathbb P_{\mu_s}$.
\begin{equation}gin{lemma}\label{upperbounds lemma}
Let $(a_n)_{n\geq 1}$ be a bounded sequence of non-negative real numbers. Then
\[\liminf_{n\rightarrow\infty}\left(a_{\lfloor n/q \rfloor}-a_n\right)\leq 0.\]
\end{lemma}
\begin{equation}gin{proof}
Let $b_l=a_{q^{l-1}}-a_{q^{l}}=a_{\frac{q^l}{q}}-a_{q^{l}}$ for
$l\in\mathbb N^*$. Then the boundedness implies
$$\lim_{l \rightarrow \infty }\frac{b_1+\cdots+b_l}{l}= \lim_{l \rightarrow \infty } \frac{a_1-a_{q^l}}{l}=0.$$
This in turn implies $\liminf_{l\rightarrow \infty }b_l\leq 0$ so that
$$\liminf_{l\rightarrow \infty }\left(a_{\lfloor n/q \rfloor}-a_n\right)\leq \liminf_{l\rightarrow \infty }b_l\leq 0.$$
\end{proof}
\begin{equation}gin{proposition}\label{prop loc dim upper bound}
For every $x\in E^+(\alpha)$, we have
$$\forall s\leq0, \ \ \ \bar{1}derline{D}(\mathbb P_{\mu_s},x)\leq
\frac{P(s) -\alpha s}{q^{\ell-1}\log m}.$$
For every $x\in E^-(\alpha)$, we have
$$\forall s\geq0, \ \ \ \bar{1}derline{D}(\mathbb P_{\mu_s},x)\leq
\frac{P(s) -\alpha s}{q^{\ell-1}\log m}.$$
Consequently, for every $x\in E(\alpha)$, we have
$$\forall s\in \mathbb R, \ \ \ \bar{1}derline{D}(\mathbb P_{\mu_s},x)\leq
\frac{P(s) -\alpha s}{q^{\ell-1}\log m}.$$
\end{proposition}
\begin{equation}gin{proof} The proof is based on
Proposition \ref{prop basic formula loc dim}, which implies that for any $x\in \Sigma_m$ and any $n\geq 1$ we have
{\setlength\arraycolsep{2pt}
\begin{equation}gin{eqnarray*}
-\frac{\log\mathbb P_{\mu_s}([x_1^n])}{n}& = & -\frac{s}{n}\sum_{j=1}^{\lfloor\frac{n}{q^{\ell-1}}\rfloor}\varphi(x_j\cdots x_{jq^{\ell-1}})+
q \frac{n-\lfloor n/q\rfloor}{n}\log \psi_s(\emptyset)
\nonumber\\
& & +\frac{B_{\frac{n}{q}}(x)}{\frac{n}{q}} -\frac{B_n(x)}{n}.
\end{eqnarray*}}
Since the function $\psi_s$ is bounded, so is the sequence $(B_n(x)/n)_n$. Then, by Lemma \ref{upperbounds lemma}, we have
$$\liminf_{n\rightarrow \infty}\frac{B_{\frac{n}{q}}(x)}{\frac{n}{q}} -\frac{B_n(x)}{n}\leq 0.$$
Therefore
\begin{equation}gin{equation*}\label{UpperEstimateOfD}
\bar{1}derline{D}(\mathbb P_{\mu_s},x)\le \liminf_{n\rightarrow \infty} -\frac{s}{n
\log
m}\sum_{j=1}^{\lfloor\frac{n}{q^{\ell-1}}\rfloor}\varphi(x_j\cdots
x_{jq^{\ell-1}})+
(q -1) \log_m \psi_s(\emptyset).
\end{equation*}
Now suppose that $x\in E^+(\alpha)$ and $s\leq 0$. Since
$$\liminf_{n\rightarrow \infty}\frac{1}{n}\sum_{j=1}^{\lfloor\frac{n}{q^{\ell-1}}\rfloor}\varphi(x_j\cdots x_{jq^{\ell-1}})\leq \limsup_{n\rightarrow \infty}\frac{1}{n}\sum_{j=1}^{\lfloor\frac{n}{q^{\ell-1}}\rfloor}\varphi(x_j\cdots x_{jq^{\ell-1}})\leq\frac{\alpha}{q^{\ell-1}},$$
we have
\begin{equation}gin{eqnarray*}
\liminf_{n\rightarrow \infty}-\frac{s}{n}\sum_{j=1}^{\lfloor\frac{n}{q^{\ell-1}}\rfloor}\varphi(x_j\cdots x_{jq^{\ell-1}}) &= & -s\liminf_{n\rightarrow \infty}\frac{1}{n}\sum_{j=1}^{\lfloor\frac{n}{q^{\ell-1}}\rfloor}\varphi(x_j\cdots x_{jq^{\ell-1}}) \\
& \leq& \frac{-s\alpha}{q^{\ell-1}},
\end{eqnarray*}
so that
$$\bar{1}derline{D}(\mathbb P_{\mu_s},x) \leq -\frac{\alpha s}{q^{\ell-1}\log m }+(q-1)\log_m \psi_s(\emptyset)=\frac{P(s) - \alpha s }{q^{\ell-1}\log m}, $$
where the last equation is due to
$$P(s)=(q-1)q^{\ell -2}\log\sum_{j\in S^\ell}\psi_s(j)=(q-1)q^{\ell -2}q\log\psi_s(\emptyset). $$
By an analogue argument, we can prove the same result for $x\in E^-(\alpha)$ and $s\geq 0$.
\end{proof}
\subsection{Range of $L_\varphi$}
Recall that $L_\varphi$ is the set of $\alpha$ such that $E(\alpha)\neq \emptyset$.
\begin{equation}gin{proposition} \label{range}
We have
$L_\varphi\subset [P'_\varphi(-\infty),P'_\varphi(+\infty)].$
\end{proposition}
\begin{equation}gin{proof} We prove it by contradiction.
Suppose that $E(\alpha)\neq \emptyset$ for some
$\alpha<P'_\varphi(-\infty)$. Let $x=(x_i)_{i=1}^\infty\in
E(\alpha)$. Then by Proposition \ref{prop loc dim upper bound}, we
have
\begin{equation}gin{equation}\label{prop borne inf de mesure1}
\liminf_{n\rightarrow\infty}\frac{-\log_m\mathbb P_{\mu_s}([x_1^n])}{n}\leq
\frac{P_{\varphi}(s)-\alpha s}{q^{\ell-1}\log m}, \ \forall s\in \mathbb R.
\end{equation}
On the other hand, by the mean value theorem, we have
\begin{equation}gin{equation}\label{prop borne inferieure de mes2}
P_{\varphi}(s)-\alpha s=P_{\varphi}(s)-P_{\varphi}(0)-\alpha s+P_{\varphi}(0)=P_{\varphi}'(\eta_s)s-\alpha s+P_{\varphi}(0)
\end{equation} for some real number $\eta_s$ between $0$ and $s$. As $P_{\varphi} $ is convex, $P_{\varphi}'$ is increasing on $\mathbb R$. If we assume $s<0$, then we have
$$
P_{\varphi}'(\eta_s) s -\alpha s+P_{\varphi}(0) \leq P_{\varphi}'(-\infty) s-\alpha
s+P_{\varphi}(0)=\left(P_{\varphi}'(-\infty)-\alpha \right) s +P_{\varphi}(0).$$ As
$P_{\varphi}'(-\infty)-\alpha>0$, we deduce from (\ref{prop borne inferieure
de mes2}) that for $s$ small enough (close to $-\infty$), we have
$P_{\varphi}(s)-\alpha s<0$. Then by (\ref{prop borne inf de mesure1}), for
$s$ small enough we obtain
$$\liminf_{n\rightarrow\infty}\frac{-\log_m\mathbb P_{{\mu_s}}([x_1^n])}{n}<0$$
which implies $\mathbb P_{{\mu_s}}([x_1^n])>1$ for an infinite number of
$n$'s. This is a contradiction to the fact that $\mathbb P_{\mu}$ is a
probability measure on $\Sigma_m$. Thus we have prove that for
$\alpha$ such that $E(\alpha)\not=\emptyset$, we have $\alpha \ge
P'(-\infty)$. Similarly we can also prove $\alpha \le P'(+\infty)$.
\end{proof}
As we shall show, we will have the equality $L_\varphi=
[P'_\varphi(-\infty),P'_\varphi(+\infty)]$.
\subsection{Upper bounds of Hausdorff dimensions of level sets}
A upper bound of the Hausdorff dimensions of levels set is a direct consequence of the Billingsley lemma
and of Proposition \ref{prop loc dim upper bound}.
The Billingsley lemma is stated as follows.
\begin{equation}gin{lemma}[see Prop.4.9 in \cite{Fal90}]\label{Billingsley}
Let $E$ be a Borel set in $\Sigma_m$ and let $\nu$ be a finite Borel measure on $\Sigma_m$.\\
\indent \ {\rm (i)} We have $\dim_H(E)\geq d$ if $\nu(E) > 0$ and $\bar{1}derline{D}(\nu,x)\geq d$ for $\nu$-a.e $x$.\\
\indent {\rm (ii)} We have $\dim_H(E) \leq d$ if $\bar{1}derline{D}(\nu,x)\leq d$ for all $x \in E$.
\end{lemma}
Recall that
$$
P_{\varphi}^*(\alpha) = \inf_{s\in \mathbb{R}} (P_\varphi(s)-\alpha s).
$$
\begin{equation}gin{proposition}\label{prop upper bound}
For any $\alpha\in (P_{\varphi}'(-\infty),P_{\varphi}'(0))$, we have
$$
\dim_HE^+(\alpha) \leq \inf_{s\leq 0}\frac{1}{q^{\ell-1}\log
m}[-\alpha s+P_\varphi(s)]
$$
For any $\alpha\in (P_{\varphi}'(0),P_{\varphi}'(+\infty))$, we have
$$
\dim_HE^-(\alpha)\leq \inf_{s\geq 0}\frac{1}{q^{\ell-1}\log
m}[-\alpha s+P_\varphi(s)]
$$
In particular, we have
$$\dim_H
E(\alpha)\leq\frac{P_{\varphi}^*(\alpha)}{q^{\ell-1}\log m}.
$$
\end{proposition}
\subsection{Ruelle type formula} \label{Ruelle formula}
This subsection is mainly devoted to proving the following identity which was announced in Theorem \ref{Thm Ruelle formula}.
$$(q-1)^2\sum_{k=1}^\infty\frac{1}{q^{k+1}}\sum_{j=0}^{k-1}\mathbb E_{\mu_s}\varphi(x_j,\cdots,x_{j+\ell-1})=P_\varphi'(s).$$
This formula will be
useful for estimating the lower bounds of $\dim_H E(\alpha)$.
We need to do some preparations for proving this result.
First of all, we deduce
some identities concerning the functions
$\psi_s$.
Recall that $\psi_s(a)$ are defined for $a\in \bigcup_{1\leq k\leq
\ell-1}S^k $. They verify the following equations. For $a\in
S^{\ell-1}$, we have
$$\psi_s^q(a)=\sum_{b\in S}e^{s\varphi(a,b)}\psi_s(Ta,b)$$ and
for $a\in S^k$ ($1\le k \le \ell-2$) we have
$$\psi_s^q(a)=\sum_{b\in S}\psi_s(a,b).$$
Differentiating the two sides of each of the above two equations with respect to $s$, we get for all $a\in S^{\ell-1}$
$$q\psi_s^{q-1}(a)\psi_s'(a)=\sum_{b\in S}e^{s\varphi(a,b)}
\varphi(a,b)\psi_s(Ta,b)+\sum_{b\in
S}e^{s\varphi(a,b)}\psi_s'(Ta,b)$$
and for all $a\in
\bigcup_{1\leq k\leq \ell-2}S^k$
$$q\psi_s^{q-1}(a)\psi_s'(a)=\sum_{b\in S}\psi_s'(a,b).$$ Dividing these equations by
$\psi_s^q(a)$ (for different $a$ respectively), we get
\begin{equation}gin{lemma}
For any $a\in S^{\ell-1}$, we have
\begin{equation}gin{equation}\label{identity1}
q\frac{\psi_s'(a)}{\psi_s(a)}=\sum_{b\in
S}\frac{e^{s\varphi(a,b)}\varphi(a,b)\psi_s(Ta,b)}
{\psi_s^{q}(a)}+\sum_{b\in
S}\frac{e^{s\varphi(a,b)}\psi_s'(Ta,b)}{\psi_s^{q}(a)},
\end{equation}
and for any $a\in \bigcup_{1\leq
k\leq \ell-2}S^k$
\begin{equation}gin{equation}\label{identity2}
q\frac{\psi_s'(a)}{\psi_s(a)}=\sum_{b\in
S}\frac{\psi_s'(a,b)}{\psi_s(a,b)}.
\end{equation}
\end{lemma}
We denote
$$w(a)=\frac{\psi_s'(a)}{\psi_s(a)}, \ \ \ \
v(a)=\sum_{b\in
S}\frac{e^{s\varphi(a,b)}\psi_s'(Ta,b)}{\psi_s^{q}(a)}, (\forall
a\in S^{\ell-1}).
$$
Then we have the following identities.
\begin{equation}gin{lemma}\label{identity} For any $n\in\mathbb N$, we have
\begin{equation}gin{eqnarray}\label{E1}
\mathbb E_{\mu_s}\varphi(x_n^{n+\ell-1})&=& q\mathbb E_{\mu_s}
w(x_n^{n+\ell-2})-\mathbb E_{\mu_s} v(x_n^{n+\ell-2}), \ \ (\forall n \ge 0). \\
\label{E2}
\mathbb E_{\mu_s} w(x_n^{n+\ell-2})&=&\mathbb E_{\mu_s} v(x_{n-1}^{n+\ell-3}),\ \ (\forall n \ge 1). \\
\label{E3}
\mathbb E_{\mu_s} w(x_0^{\ell-2})&=&\frac{1}{q(q-1)}P_{\varphi}'(s).
\end{eqnarray}
\end{lemma}
\begin{equation}gin{proof}
The Markov property of $\mu_s$ can be stated as follows (see\eqref{def measure 2})
$$
\mu_s ([x_0^{n+\ell-1}]) = \mu_s ([x_0^{n+\ell-2}]) Q_s(x_n^{n+\ell -1})
$$
where $$ Q_s(x_n^{n+\ell -1}) =
\frac{e^{s\varphi(x_n^{n+\ell-1})}\psi_s(x_{n+1}^{n+\ell-1})}{\psi_s^q(x_{n}^{n+\ell-2})}.
$$
By the Markov property, we have
\begin{equation}gin{eqnarray*}
\mathbb{E}_{\mu_s} \varphi(x_n^{n+\ell-1})
& = & \sum_{x_0,\cdots,x_{n+\ell-1}}\mu_s([x_0^{n+\ell-1}])\varphi(x_n^{n+\ell-1})\\
& = &
\sum_{x_0,\cdots,x_{n+\ell-2}}\mu_s([x_0^{n+\ell-2}])\sum_{x_{n+\ell-1}}
Q_s(x_n^{n+\ell -1})
\varphi(x_n^{n+\ell-1}).
\end{eqnarray*}
However, by the definition of $Q_s$ and using (\ref{identity1}), it is straightforward to check that
$$\sum_{x_{n+\ell-1}}
Q_s(x_n^{n+\ell -1})
\varphi(x_n^{n+\ell-1})=qw(x_n^{n+\ell-2})-v(x_n^{n+\ell-2}).$$
So (\ref{E1}) is a combination of the above two equations.
To obtain (\ref{E2}), we still use the Markov property of $\mu_s$,
to get
$$\mathbb{E}_{\mu_s} w(x_n^{n+\ell -2}) =
\sum_{x_0,\cdots,x_{n+\ell-2}}\mu_s([x_0^{n+\ell-2}])w(x_n^{n+\ell-2}) \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ $$
\begin{equation}gin{eqnarray*}
\ \ \ \ & = & \sum_{x_0,\cdots,x_{n+\ell-3}}\mu_s([x_0^{n+\ell-3}])\sum_{x_{n+\ell-2}}
\frac{e^{s\varphi(x_{n-1}^{n+\ell-2})}\psi_s(x_{n}^{n+\ell-2})}
{\psi_s^q(x_{n-1}^{n+\ell-3})}\frac{\psi_s'(x_n^{n+\ell-2})}{\psi_s(x_n^{n+\ell-2})} \\
\ \ \ \ & = & \sum_{x_0,\cdots,x_{n+\ell-3}}\mu_s([x_0^{n+\ell-3}]) v(x_{n-1}^{n+\ell-3})
= \mathbb{E}_{\mu_s} v(x_{n-1}^{n+\ell -3}) .
\end{eqnarray*}
Now let us treat (\ref{E3}). First of all, by the definition of $w$ and $\mu_s$ we get
$$\psi'_s(x_0^{\ell-3})=\sum_{x_{\ell -2}}\psi'_s(x_0^{\ell -2}) ,$$ hence
\begin{equation}gin{eqnarray*}
\mathbb E_{\mu_s} w(x_0^{\ell-2}) & = & \sum_{x_0,\cdots, x_{\ell-2}}\mu_s([x_0^{\ell-2}])w(x_0^{\ell-2})\\
& = & \sum_{x_0,\cdots, x_{\ell-3}}\mu_s([x_0^{\ell-3}])\sum_{x_{\ell-2}}\frac{\psi_s'(x_0^{\ell-2})}{\psi_s(x_0^{\ell-3})}.
\end{eqnarray*}
By (\ref{identity2}), the last sum is equal to
$q\frac{\psi_s'(x_0^{\ell-3})}{\psi_s(x_0^{\ell-3})}$.
So
$$
\mathbb E_{\mu_s} w(x_0^{\ell-2})
=q\sum_{x_0,\cdots, x_{\ell-3}}\mu_s([x_0^{\ell-3}])\frac{\psi_s'(x_0^{\ell-3})}{\psi_s(x_0^{\ell-3})}.
$$
Repeating the same argument, we obtain by induction on $j$ that
$$\mathbb E_{\mu_s} w(x_0^{\ell-2})
=q^{\ell-2-j}\sum_{x_0,\cdots, x_{j}}\mu_s([x_0^{j}])\frac{\psi_s'(x_0^{j})}{\psi_s(x_0^{j})}.$$
So finally when $j=0$ we get
$$
\mathbb E_{\mu_s} w(x_0^{\ell-2})=q^{\ell-2}\sum_{b\in
S}\mu_s([b])\frac{\psi_s'(b)}{\psi_s(b)} =q^{\ell-2}\frac{\sum_{b\in
S}\psi_s'(b)}{\sum_{b\in S}\psi_s(b)}=\frac{1}{q(q-1)}P_{\varphi}'(s)
$$
where we used the fact that
$$
\mu_s ([b]) = \frac{\psi_s(b)}{\sum_{b\in S} \psi_s(b)}.
$$
\end{proof}
Now, we can prove the Ruelle type formula which was announced in Theorem \ref{Thm Ruelle formula}.
We restate it as the following proposition.
\begin{equation}gin{proposition}\label{prop 1}
For any $s\in\mathbb R$, we have
$$(q-1)^2\sum_{k=1}^\infty\frac{1}{q^{k+1}}\sum_{j=0}^{k-1}\mathbb E_{\mu_s}
\varphi(x_j,\cdots,x_{j+\ell-1})=P'_\varphi(s).$$
\end{proposition}
\begin{equation}gin{proof}
By (\ref{E1}) in Lemma \ref{identity}, for any $k\in\mathbb N^*$, we have
{\setlength\arraycolsep{2pt}
\begin{equation}gin{eqnarray*}
\sum_{j=0}^{k-1}\mathbb E_{\mu_s}\varphi(x_j,\cdots,x_{j+\ell-1}) &=&\sum_{j=0}^{k-1}\left(q\mathbb E_{\mu_s}w(x_j^{j+\ell-2})-\mathbb E_{\mu_s}v(x_j^{j+\ell-2})\right)\\
& = & q\mathbb E_{\mu_s}w(x_0^{\ell-2})+q\sum_{j=1}^{k-1}\mathbb E_{\mu_s}w(x_j^{j+\ell-2})
\nonumber\\
& & -\sum_{j=0}^{k-1}\mathbb E_{\mu_s}v(x_j^{j+\ell-2}).
\end{eqnarray*}}
Let
$$S_k=\sum_{j=0}^{k-1}\mathbb E_{\mu_s}v(x_j^{j+\ell-2}).$$
Then by~\eqref{E2} in Lemma \ref{identity}, we have
$$\sum_{j=1}^{k-1}\mathbb E_{\mu_s}w(x_j^{j+\ell-2})=S_{k-1}.$$
Using the above equality and (\ref{E3}) in Lemma \ref{identity}, we
can write
$$\sum_{j=0}^{k-1}\mathbb E_{\mu_s}\varphi(x_j,\cdots,x_{j+\ell-1})=\frac{P_{\varphi}'(s)}{q-1}+qS_{k-1}-S_k.$$
The facts $S_0=0$ and $S_k =o(k)$ imply $$
\sum_{k=1}^\infty \frac{1}{q^{k+1}} (q S_{k-1} - S_k) =0.
$$
Then
\begin{equation}gin{eqnarray*}
(q-1)^2\sum_{k=1}^\infty\frac{1}{q^{k+1}}\sum_{j=0}^{k-1}\mathbb E_{\mu_s}
\varphi(x_j,\cdots,x_{j+\ell-1})
=(q-1)^2\sum_{k=1}^\infty\frac{1}{q^{k+1}}\frac{P_{\varphi}'(s)}{q-1}.
\end{eqnarray*}
which is equal to $P_{\varphi}'(s)$, because $\sum_{k=1}^\infty 1/q^{k+1}=1/(q-1)$.
\end{proof}
\subsection{When $P'_\varphi(-\infty)=\alpha_{\min}$ and when $P'_\varphi(+\infty)=\alpha_{\max}$}\label{derivative pressure infinite}
We now give the proof of the statement announced in Theorem \ref{thm extreme1} concerning the extremal values of $P'_\varphi$ at infinity.
\begin{equation}gin{theorem}\label{thm extreme2}
We have the equality $$P'_{\varphi}(-\infty)=\alpha_{\min}$$ if and only if there exist an $x=(x_i)_{i=0}^\infty\in \Sigma_m$ such that $$\varphi(x_k,x_{k+1},\cdots,x_{k+\ell-1})=\alpha_{\min},\
\forall k\geq 0.$$ We have an analogue criterion for
$P'_{\varphi}(+\infty)=\alpha_{\max}.$
\end{theorem}
\begin{equation}gin{proof}
We give the proof of the criterion for $P'_{\varphi}(-\infty)=\alpha_{\min}$, the one for $P'_{\varphi}(+\infty)=\alpha_{\max}$ is similar.
(1). {\em Sufficient condition.} Suppose that there exists a $(z_j)_{j=0}^\infty\in \Sigma_m$ such that
$$\varphi(z_j,\cdots,z_{j+\ell-1})=\alpha_{\min},\ \ \forall j\geq 0.$$
We are going to prove that $P'_\varphi(-\infty)=\alpha_{\min}$. By Theorem \ref{thm strict convexity} (ii), we have $P'_\varphi(-\infty)\geq\alpha_{\min}$, thus we only need to show that $P'_\varphi(-\infty)\leq\alpha_{\min}$. Actually we only need to find a $(x_{j})_{j=1}^\infty\in \Sigma_m$ such that
$$\lim_{n\rightarrow \infty}\frac{1}{n}\sum_{j=1}^n\varphi(x_j,\cdots,x_{jq^{\ell-1}})=\alpha_{\min},$$
then by Proposition \ref{range}, $\alpha_{\min}\in [P'_\varphi(-\infty),P'_\varphi(+\infty)]$, so $P'_\varphi(-\infty)\leq\alpha_{\min}$. We can do this by choosing the sequence $(x_j)_{j=1}^\infty=\prod_{i\geq 1,\ q\nmid i}(x_{iq^j})_{j=0}^\infty$ with
$$(x_{iq^j})_{j=0}^\infty\ =\ (z_j)_{j=0}^\infty.$$
(2). {\em Necessary condition.} Suppose that there is no $(x_j)_{j=0}^\infty\in \Sigma_m$ such that
$$\varphi(x_j,\cdots,x_{j+\ell-1})=\alpha_{\min},\ \ \forall j\geq 0.$$
We are going to show that there exists an $\epsilon >0$ such that
$$P'_\varphi(s)\geq \alpha_{\min}+\epsilon,\ \ \forall s\in \mathbb R.$$ And this will imply that $P'_\varphi(-\infty)\geq \alpha_{\min}+\epsilon$.
From the hypothesis, we deduce that there exist no words $x_0^{n+\ell-1}$ with $n\geq m^\ell $ such that
\begin{equation}gin{equation}\label{equ prop extremal}
\varphi(x_j,\cdots,x_{j+\ell-1})=\alpha_{\min},\ \ \forall 0\leq j\leq n.
\end{equation}
Indeed, as $x_j^{j+\ell-1}\in S^{\ell}$ for all $0\leq j\leq n$ there are at most $m^\ell$ choices for $x_j^{j+\ell-1}$. So for any word $x_0^{n+\ell-1}$ with $n\geq m^\ell $, there exist at least two $j_1,j_2\in \{0,\cdots,n\}$ such that $$x_{j_1}^{j_1+\ell-1}=x_{j_2}^{j_2+\ell-1}.$$
Then if the word $x_0^{n+\ell-1}$ satisfies (\ref{equ prop extremal}), the infinite sequence
$$(y_j)_{j=0}^\infty\ =\ (x_{j_1},\cdots,x_{j_2-1})^\infty$$ would verify that
$$\varphi(y_j,\cdots,y_{j+\ell-1})=\alpha_{\min},\ \ \forall j\geq 0.$$
This is a contradicts the hypothesis. We conclude then that for any word $x_0^{m^\ell+\ell-1}\in S^{m^{\ell}+\ell-1}$ there exists at lest one $0\leq j\leq m^{\ell}$ such that
$$\varphi(x_j,\cdots,x_{j+\ell-1})\geq \alpha'_{\min}>\alpha_{\min}$$ where $\alpha'_{\min}$ is the second smallest value of $\varphi$ over $S^\ell$, i.e., $\alpha'_{\min}=\min_{a\in S^\ell}\{\varphi(a)\ :\ \varphi(a)>\alpha_{\min} \}$.
We deduce from the above discussions that for any $(x_j)_{j=0}^\infty\in \Sigma_m$ and any $k\geq0$ we have
$$\sum_{j=k}^{k+m^{\ell}}\varphi(x_j,\cdots,x_{j+\ell-1})\geq m^{\ell}\alpha_{\min }+\alpha'_{\min}=(m^{\ell}+1)\alpha_{\min}+\delta,$$ where we denote $\delta=\alpha'_{\min}-\alpha_{\min}$.
This implies that for any $(x_j)_{j=0}^\infty\in \Sigma_m$ and any $n\geq 1$, we have
\begin{equation}gin{equation}\label{equ prop extrema1}
\sum_{j=0}^{n-1}\varphi(x_j,\cdots,x_{j+\ell-1})\geq n\alpha_{\min}+\left\lfloor\frac{n}{m^{\ell}+1}\right\rfloor \delta.
\end{equation}
Now, we will use the above inequality and Proposition \ref{prop 1} to show the existence of an $\epsilon>0$ such that $$P'_\varphi(s)\geq \alpha_{\min }+\epsilon,\ \ \forall s\in \mathbb R.$$
By Proposition \ref{prop 1}, we have
\begin{equation}gin{equation}\label{equ prop extrema2}
P'_\varphi(s)=(q-1)^2\sum_{k=1}^\infty\frac{1}{q^{k+1}}\sum_{j=0}^{k-1}\mathbb E_{\mu_s}
\varphi(x_j,\cdots,x_{j+\ell-1}).
\end{equation}
We can rewrite the term $\sum_{j=0}^{k-1}\mathbb E_{\mu_s}
\varphi(x_j,\cdots,x_{j+\ell-1})$ as
$$\mathbb E_{\mu_s}\sum_{j=0}^{k-1}
\varphi(x_j,\cdots,x_{j+\ell-1}).$$
By (\ref{equ prop extrema1}), we have for any $(x_j)_{j=0}^{\infty}\in \Sigma_m$
$$\sum_{j=0}^{k-1}\varphi(x_j,\cdots,x_{j+\ell-1})\geq k\alpha_{\min}+\left\lfloor\frac{k}{m^{\ell}+1}\right\rfloor \delta.$$
As $\mu_s$ is a probability measure, we have
$$\mathbb E_{\mu_s}\sum_{j=0}^{k-1}
\varphi(x_j,\cdots,x_{j+\ell-1})\geq k
\alpha_{\min}+\left\lfloor\frac{k}{m^{\ell}+1}\right\rfloor \delta.$$
Substituting this in (\ref{equ prop extrema2}), we get
\begin{equation}gin{eqnarray*}
P'_\varphi(s) & = & (q-1)^2\sum_{k=1}^\infty\frac{1}{q^{k+1}}\left(k
\alpha_{\min}+\left\lfloor\frac{k}{m^{\ell}+1}\right\rfloor \delta\right)\\
& = & \alpha_{\min}+\delta(q-1)^2\sum_{k=1}^\infty\frac{1}{q^{k+1}}\left(\left\lfloor\frac{k}{m^{\ell}+1}\right\rfloor\right).
\end{eqnarray*}
As
$$\delta(q-1)^2\sum_{k=1}^\infty\frac{1}{q^{k+1}}\left(\left\lfloor\frac{k}{m^{\ell}+1}\right\rfloor\right)=\delta(q-1)^2\sum_{k\geq m^{\ell}+1}\frac{1}{q^{k+1}}\left(\left\lfloor\frac{k}{m^{\ell}+1}\right\rfloor\right)>0$$
we have proved the existence of an $\epsilon >0$ such that
$$P'_{\varphi}(s)\geq \alpha_{\min}+\epsilon,\ \ \forall s\in \mathbb R.$$
\end{proof}
\subsection{Lower bounds of $\dim_H E(\alpha)$}
First, as an easy application of Proposition \ref{prop 1}, we get the following formula for $\dim_H \mathbb P_{\mu_s}$.
\begin{equation}gin{proposition}\label{prop formula dim P_s}
For any $s\in \mathbb R$, we have
$$\dim_H\mathbb P_{\mu_s}=\frac{1}{q^{\ell-1}}[-sP_{\varphi}'(s)+P_{\varphi}(s)].$$
\end{proposition}
\begin{equation}gin{proof}
By Proposition \ref{prop basic formula loc dim}, we have
{\setlength\arraycolsep{2pt}
\begin{equation}gin{eqnarray}
-\frac{\log\mathbb P_{\mu_s}([x_1^n])}{n} & = &-\frac{s}{n}\sum_{j=1}^{\lfloor\frac{n}{q^{\ell-1}}\rfloor}\varphi(x_j\cdots x_{jq^{\ell-1}})+\frac{n-\lfloor n/q\rfloor}{n}\log \psi_s^q(\emptyset)
\nonumber\\
& & +\frac{B_{\frac{n}{q}}(x)}{\frac{n}{q}} -\frac{B_n(x)}{n}
\end{eqnarray}}
Applying the law of large numbers to the function $\psi_s$, we get
the $\mathbb P_{\mu_s}$-a.e. existence of the following limit
$\lim_{n\rightarrow\infty}\frac{B_n(x)}{n}$. So
$$\lim_{n\rightarrow\infty}\frac{B_{\frac{n}{q}}(x)}{\frac{n}{q}}
-\frac{B_n(x)}{n}=0, \ \ \mathbb P_{\mu_s}-{\rm a.e.}$$ On the other hand,
by Proposition \ref{prop 1} and Theorem~\ref{thm esperence general formula},
we have
$$\lim_{n\rightarrow \infty}\frac{1}{n}\sum_{j=1}^{\lfloor\frac{n}{q^{\ell-1}}\rfloor}\varphi(x_j\cdots x_{jq^{\ell-1}})=\frac{1}{q^{\ell-1}}P_{\varphi}'(s).$$
So we obtain that for $\mathbb P_{\mu_s}$-a.e. $x\in\Sigma_m$
$$\lim_{n\rightarrow\infty}-\frac{\log\mathbb P_{\mu_s}([x_1^n])}{n}=\frac{1}{q^{\ell-1}}[-sP_{\varphi}'(s)+P_{\varphi}(s)],$$ where we have used the fact that
$$P(s)=(q-1)q^{\ell -2}\log\sum_{j\in S^\ell}\psi_s(j)=(q-1)q^{\ell -2}q\log\psi_s(\emptyset). $$
\end{proof}
By Proposition \ref{prop 1}, Proposition \ref{prop formula dim P_s} and Billingsley's lemma (Lemma \ref{Billingsley}) we get the following lower bound for $\dim_HE(P'_\varphi(s))$.
\begin{equation}gin{proposition}For any $s\in\mathbb R$, we have
$$\dim_HE(P'_\varphi(s))\geq \frac{1}{q^{\ell-1}\log m}[-\alpha
P'_\varphi(s)+P_\varphi(s)].$$
\end{proposition}
By the above proposition and Proposition \ref{prop upper bound} we obtain the following theorem about the exact Hausdorff dimension of $\dim_H(\alpha)$ for $\alpha\in (P_{\varphi}'(-\infty),P_{\varphi}'(+\infty))$.
\begin{equation}gin{theorem}
{\rm (i)} If $\alpha=P'_\varphi(s_\alpha)$ for some $s_\alpha\in\mathbb R$, then
$$\dim_H
E(\alpha)= \frac{1}{q^{\ell-1}\log m}[-P'_\varphi(s_\alpha)
s_\alpha+P_\varphi(s_\alpha)]=\frac{P_{\varphi}^*(\alpha)}{q^{\ell-1}\log m}.$$
{\rm (ii)} For $\alpha\in (P'_\varphi(-\infty),P'_\varphi(0)]$, we have
$$\dim_HE^+(\alpha)=\dim_HE(\alpha).$$
For $\alpha\in [P'_\varphi(0),P'_\varphi(+\infty))$, we have
$$\dim_HE^-(\alpha)=\dim_HE(\alpha).$$
\end{theorem}
\subsection{Dimension of level sets corresponding to the extreme points in $L_\varphi$}
So far, we have calculated $\dim_HE(\alpha)$ for $\alpha$ in
$(P'_\varphi(-\infty),P'_\varphi(+\infty))$. Now we turn to the case
when $\alpha=P'_\varphi(-\infty)$ or $P'_\varphi(+\infty)$. The aim of this subsection is to prove the following result.
\begin{equation}gin{theorem}\label{thm extrema}
If $\alpha=P'_\varphi(-\infty)$ or $P'_\varphi(+\infty)$, then
$E(\alpha)\neq \emptyset$ and
$$\dim_H E(\alpha)=\frac{P_{\varphi}^*(\alpha)}{q^{\ell-1}\log m}.$$
\end{theorem}
We will give the proof of Theorem \ref{thm extrema} for
$\alpha=P'_\varphi(-\infty)$. The proof for
$\alpha=P'_\varphi(-\infty)$ is similar.
\subsubsection{Accumulation points of $\mu_s$ when $s$ tends to $-\infty$ }
We view the vector $\pi_s$ defined by (\ref{def measure 1}) and the matrix $Q_s$ defined by (\ref{def measure 2})
as functions of $s$ taking values in finite dimensional Euclidean
spaces. As all components of $\pi_s$ and $Q_s$ are non-negative
and bounded by 1, the set $\{(\pi_s,Q_s),s\in \mathbb R \}$ is pre-compact in a Euclidean space. So there
exists a sequence $(s_n)_{n\in \mathbb N}$ of real numbers with
$\lim_{n\rightarrow\infty}s_n=-\infty$ such that the limits
$$\lim_{n\rightarrow\infty}\pi_{s_n}, \ \ \ \lim_{n\rightarrow\infty} Q_{s_n}$$
exist. Using these limits as initial law and
transition probability, we construct a $(\ell-1)$-step Markov measure which we
denote by $\mu_{-\infty}$. It is clear that the Markov measure $\mu_{s_n}$
corresponding to $\pi_{s_n}$ and $Q_{s_n}$ converges to
$\mu_{-\infty}$ with respect to the weak-star topology.
\begin{equation}gin{proposition} We have
$$\mathbb P_{\mu_{-\infty}}(E(P'_\varphi(-\infty)))=1.$$
In particular, $E(P'_\varphi(-\infty))\neq \emptyset$.
\end{proposition}
\begin{equation}gin{proof}
First, we introduce a functional on the space of probability measures which is defined by
$$M(\nu)=(q-1)^2\sum_{k=1}^\infty\frac{1}{q^{k+1}}\sum_{j=0}^{k-1}\mathbb E_\nu \varphi(x_j,\cdots,x_{j+\ell-1}).$$
The function $\nu\mapsto M(\nu)$ is continuous, just because $\nu\mapsto \mathbb E_\nu \varphi(x_j,\cdots,x_{j+\ell-1})$ is continuous for all $j$.
What we have to show is that for $\mathbb P_{\mu_{-\infty}}$-a.e. $x\in \Sigma_m$ we have
$$\lim_{n\rightarrow\infty}\frac{1}{n}\sum_{k=1}^n\varphi(x_k,\cdots,x_{kq^{\ell-1}})=P_{\varphi}'(-\infty).$$
By Theorem \ref{thm esperence general formula}, for $\mathbb P_{\mu_{-\infty}}$-a.e. $x\in \Sigma_m$ the limit in the left hand side of the above equation equals to $M(\mu_{-\infty})$. As $M$ is continuous and
$\mu_{s_n}$ converges to $\mu_{-\infty}$ when $n\rightarrow \infty$, we deduce that$$\lim_{n\rightarrow \infty}M(\mu_{s_n})=M(\mu_{-\infty}).$$
By Proposition \ref{prop 1}, we know that $$M(\mu_{s_n})=P_{\varphi}'(s_n).$$
So $$M(\mu_{-\infty})=\lim_{n\rightarrow \infty}P_{\varphi}'(s_n).$$
By Theorem \ref{thm convexity}, the map $s\rightarrow P_{\varphi}'(s)$ is increasing, thus we deduce that the above limit exists and
$$M(\mu_{-\infty})=P_{\varphi}'(-\infty).$$
This implies the desired result.
\end{proof}
We have the following formula for $\dim_H\mathbb P_{\mu_{-\infty}}$.
\begin{equation}gin{proposition} We have
$$\dim_H\mathbb P_{\mu_{-\infty}}=\lim_{s\rightarrow-\infty}\frac{[-sP_{\varphi}'(s)+P_{\varphi}(s)]}{q^{\ell-1}\log m}=\frac{P_{\varphi}^*(P_{\varphi}'(-\infty))}{q^{\ell-1}\log m}.$$
\end{proposition}
\begin{equation}gin{proof}
By Proposition \ref{prop loc dim}, we know for any probability measure $\nu$ we have
$$\dim_H\mathbb P_\nu=\frac{(q-1)^2}{\log m}\sum_{k=1}^{\infty}\frac{H_k(\nu)}{q^{k+1}}.$$
As the series in the right hand side converges uniformly on $\nu$, the map $\nu\rightarrow \dim_H\mathbb P_{\nu}$ is continuous.
Since $\mu_{s_n}$ converges to $\mu_{-\infty}$ when $n\rightarrow \infty$, we deduce that
$$\lim_{n\rightarrow\infty}\dim_H\mathbb P_{\mu_{s_n}}=\dim_H\mathbb P_{\mu_{-\infty}}.$$
By Proposition \ref{prop formula dim P_s}, we have
$$\dim_H\mathbb P_{\mu_s}=\frac{[-sP_{\varphi}'(s)+P_{\varphi}(s)]}{q^{\ell-1}\log m}.$$
The derivative of the map $s\rightarrow\dim_H\mathbb P_{\mu_s}$ is
$$\frac{d}{ds}\dim_H\mathbb P_{\mu_s}=\frac{-sP_{\varphi}''(s)}{q^{\ell-1}\log m}.$$
As $P_{\varphi}(s)$ is convex on $\mathbb R$, $P_{\varphi}''(s)$ is non-negative, so for $s\leq 0$ the map $$s\rightarrow\dim_H\mathbb P_{\mu_s}$$ is increasing. Thus
$$\dim_H\mathbb P_{\mu_{-\infty}}=\lim_{n\rightarrow\infty}\dim_H\mathbb P_{\mu_{s_n}}=\lim_{s\rightarrow-\infty}\frac{[-sP_{\varphi}'(s)+P_{\varphi}(s)]}{q^{\ell-1}\log m}.$$
\end{proof}
\begin{equation}gin{proposition}
$$\dim_H E(P_{\varphi}'(-\infty))=\frac{P_{\varphi}^*(P_{\varphi}'(-\infty))}{q^{\ell-1}\log m}.$$
\end{proposition}
\begin{equation}gin{proof}
By the last two propositions and Billingsley's lemma, we get
$$\dim_HE(P_{\varphi}'(-\infty))\geq \frac{P_{\varphi}^*(P_{\varphi}'(-\infty))}{q^{\ell-1}\log m}.$$
We now show the reverse inequality. By the definition of $E^+(\alpha)$, we have
$$E(P_{\varphi}'(-\infty)))\subset \bigcap_{\alpha\in (P_{\varphi}'(-\infty),P_{\varphi}'(0)]}E^+(\alpha)= \bigcap_{s\leq 0}E^+(P_{\varphi}'(s)).$$
So $$\dim_HE(P_{\varphi}'(-\infty)))\leq \dim_HE^+(P_{\varphi}'(s))=\dim_HE(P_{\varphi}'(s))=\dim_H\mathbb P_{\mu_s},\ \ \forall s\leq0.$$
Now as $s\rightarrow \dim_H\mathbb P_{\mu_s}$ is increasing we deduce that
$$\dim_HE(P_{\varphi}'(-\infty)))\leq\lim_{s\rightarrow-\infty}\dim_H\mathbb P_{\mu_s}=\frac{P_{\varphi}^*(P_{\varphi}'(-\infty))}{q^{\ell-1}\log m}.$$
\end{proof}
\section{The invariant part of $E(\alpha)$}
From classical dynamical system point of view, the set $E(\alpha)$ is not invariant and its dimension can not be
described by invariant measures supported on it, as we shall see. Let us first examine the largest dimension of ergodic measures supported on the set $E(\alpha)$.
Here we can consider a more general setting. Let $f_1, f_2, \cdots, f_\ell$ be real functions defined on $\Sigma_m$. Let
\begin{equation}gin{equation}\label{MEA}
M_{f_1, \cdots, f_\ell}(x)=
\lim_{n\rightarrow
\infty}\frac{1}{n}\sum_{k=1}^{n}f_{1}(T^{k}x)f_{2}(T^{2k}x)\cdots
f_{\ell}(T^{\ell k}x)
\end{equation}
if the limit exists. In this section, for a real number $\alpha$, we
define $$E(\alpha)
=\{x \in \Sigma_m: M_{f_1, \cdots, f_\ell}(x) = \alpha\}.$$
In order to describe the invariant part of $E(\alpha)$, we introducing the so-called invariant spectrum:
$$
F_{\rm inv}(\alpha) = \sup \left\{\dim \mu: \mu \ \mbox{\rm ergodic}, \mu(E(\alpha))=1\ \right\}.
$$
In general,
$F_{\rm inv}(\alpha)$ is smaller than $\dim E(\alpha)$. It is even possible that no ergodic
measure is supported on $E(\alpha)$.
\begin{equation}gin{theorem}\label{mixing} Let $\ell=2$. Let $f_1$ and $f_2$ be two H\"{o}lder continuous functions on $\Sigma_m$.
If $E(\alpha)$ supports an ergodic measure, then
$$
F_{\rm inv}(\alpha)
= \sup \left\{\dim \mu: \mu \ \mbox{\rm ergodic}, \int f_1 d\mu \int f_2 d\mu = \alpha \ \right\}.
$$
\end{theorem}
\begin{equation}gin{proof} Let $\mu$ be an ergodic measure such that $\mu(E(\alpha))=1$. Then
\begin{equation}gin{eqnarray*}
\alpha
& = & \lim_{n\rightarrow \infty}\frac{1}{n}\sum_{k=1}^n \mathbb{E}_\mu [f_1(T^k x)f_2(T^{2k} x)]\\
& = & \lim_{n\rightarrow \infty}\frac{1}{n} \sum_{k=1}^n \mathbb{E}_\mu [f_1( x)f_2(T^{k} x)]\\
& = & \mathbb{E}_\mu [f_1(x) M_{f_2}(x)]
\end{eqnarray*}
where the first and third equalities are due to Lebesgue convergence theorem and the second one is due to
the invariance of $\mu$. Since $\mu$ is ergodic, $M_{f_2}(x)=\mathbb{E}_\mu f_2$ for $\mu$-a.e. $x$.
So, $\alpha = \mathbb{E}_\mu f_1 \mathbb{E}_\mu f_2$. It follows that
$$
F_{\rm inv}(\alpha) \le \sup \left\{\dim \mu: \mu \ \mbox{\rm ergodic}, \mathbb{E}_\mu f_1 \mathbb{E}_\mu f_2 = \alpha \ \right\}.
$$
To obtain the inverse inequality, it suffices to observe from standard higher--dimensional multifractal analysis for H\"older continuous functions that the above supremum is attained by a
Gibbs measure $\nu$ which is mixing and that the mixing property implies
$M_{f_1,f_2}(x) = \mathbb{E}_\nu f_1 \mathbb{E}_\nu f_2$ $\nu$-a.e..
\end{proof}
\begin{equation}gin{remark}\label{remark mixing}
In the above theorem, the assumption that $\mu$ is ergodic can be relaxed to $\mu$ is invariant. In fact, if $\nu$ is an invariant measure
such that $\nu(E(\alpha)) = 1$. Then, by the ergodic decomposition theorem
and the corresponding decomposition of entropy (a theorem due to
Jacobs), there is an ergodic measure $\mu$ such that $\mu(E(\alpha)) = 1$ and
$h_\nu\le h_\mu$. When $\ell\ge 3$, the result in above theorem remains true if we replace ``ergodic'' by ``multiple mixing'', i.e.
$$F_{\rm mix}(\alpha)=\sup \left\{\dim \mu: \mu \ \mbox{\rm multiple mixing},\ \mathbb{E}_\mu f_1 \cdots \mathbb{E}_\mu f_\ell= \alpha \ \right\},$$
where
$$
F_{\rm mix}(\alpha) = \sup \left\{\dim \mu: \mu \ \mbox{\rm multiple\ mixing}, \mu(E(\alpha))=1\ \right\}.
$$
\end{remark}
Here is a remarkable corollary of the above theorem. Assume that $f_1=f_2=f$.
If $\mu(E(\alpha)=1$ for some ergodic measure $\mu$, then we must have
$$
\alpha = \left(\int f d \mu\right)^2 \ge 0.
$$
There are examples of $f$ taking negative value such that for some $\alpha <0$ we have $\dim E(\alpha)>0$. However, the theorem together with the remark shows that there is no invariant measure with positive dimension supported by $E(\alpha)$. See Example 2 below.
In the proof of the theorem, the fact that $M_{f_1}$ is almost constant plays an important role.
It is not the case for $M_{f_1, f_2}$. So we can not generalize the theorem to $\ell =3$.
For $f_1, f_2 \in L^2(\mu)$ where $\mu$ is an ergodic measure, Bourgain proved that
$M_{f_1, f_2}(x)$ exists for $\mu$-almost all $x$. The limit is in general not constant, but can be
written by the Kronecker factor $(Z, m, S)$, which is considered as a rotation on a compact abelian group $Z$.
Let $\pi$ be the factor map. Let
$$
\tilde{f}_i = \mathbb{E}(f_i|Z).
$$
Then $\mu$-almost surely
$$
M_{f_1, f_2}(x) = \int_Z \tilde{f}_1 (\pi(x) +z ) \tilde{f}_1 (\pi(x) + 2z ) dm(z).
$$
Then it is easy to deduce that $M_{f_1, f_2}(x)$ is $\mu$-almost surely constant if and only if
$$
\forall \gamma \in \widehat{Z} \ \mbox{\rm with}\ \gamma \not=1, \widehat{\tilde{f}_1}(\gamma)
\widehat{\tilde{f}_2}(\gamma^2) =0.
$$
This condition is extremely strong if $\mu$ is not weakly mixing. In other words, when $\ell
= 3$, it would be exceptional that
$E(\alpha)$ carries an ergodic measure which is not weakly mixing. When $\mu$ is mixing,
we have $M_{f_1, f_2}(x) = \int f_1 d\mu \int f_2 d\mu$ for $\mu$-almost all $x$.
For three or more functions, the existence of the almost everywhere limit $M_{f_1, f_2, \cdots, f_\ell}$
is not yet proved. But the $L^2$-convergence is proved by Host and Kra \cite{HK}.
The limit can be written as
a similar integral, but the integral is taken over a nilmanifold of order $2$ \cite{BHK}.
Let us also remark that the supremum in the theorem is also equal to the dimension of the $\alpha$-level set of
$$
\lim_{n\rightarrow \infty} \frac{1}{n^2}\sum_{1\le j, k\le n} f_1(T^j x) f_2(T^k x).
$$
See \cite{FLP}. Also see \cite{FSW_V}, where general $V$-statistics are studied.
\section{Examples}
The motivation of the subject initiated in \cite{FLM} is the
following example. The Riesz product method used in \cite{FLM}
doesn't work for this case. However Theorem \ref{thm principal} does.
\begin{equation}gin{example}\label{11}
Let $q=2$, $m=2$, $\ell=2$ and $\varphi$ the potential given by $\varphi(x,y)=x_1y_1$ with $x=(x_i)_{i=1}^\infty ,y=(y_i)_{i=1}^\infty\in \Sigma_2$. So $$\left[\varphi(i,j)\right]_{(i,j)\in \{0,1\}^2}=
\left[ \begin{equation}gin{array}{cc}
0 & 0 \\
0 & 1
\end{array} \right].
$$
\end{example}
The system of equations (\ref{transer_equation}) in this case
becomes \begin{equation}gin{eqnarray*} \psi_s(0)^2 & = & \psi_s(0)+\ \ \psi_s(1),\\
\psi_s(1)^2& = & \psi_s(0)+e^s\psi_s(1).
\end{eqnarray*}
Fix $s\in\mathbb R$. By solving an fourth order algebraic equation, we get
the unique positive solution of the above system:
\begin{equation}gin{eqnarray*}
\psi_s(0) &= &\frac{1}{6}a(s)+\frac{2/3-2e^s}{a(s)}+\frac{2}{3},\\
\psi_s(1) &= &\psi_s(0)^2-\psi_s(0),
\end{eqnarray*}
where
$$ a(s)=\left(100-36
e^s+12\sqrt{69-54e^s-3e^{2s}-12e^{3s}}\right)^\frac{1}{3}.
$$
Recall that the pressure function is equal to
$$P_\varphi(s)=\log(\psi_s(0)+\psi_s(1)).$$
The minimal and maximal values of $\varphi$ are $0$ and $1$, which
are respectively attained by the sequences
$(x_j)_{j=0}^\infty=(0)^\infty$ and $(y_j)_{j=0}^\infty=(1)^\infty$
in the sense of
$$\varphi(x_j,x_{j+1})=0,\ \ \varphi(y_j,y_{j+1})=1,\ \ \forall j\geq 0.$$
Then by Theorem \ref{critere cercle}, we have
$$P'_\varphi(-\infty)=0,\ \ P'_\varphi(+\infty)=1.$$
Therefore, according to Theorem \ref{thm principal}, for any
$\alpha\in [0,1]$ we have
$$\dim_HE(\alpha)=\frac{-\alpha s_\alpha+P_\varphi(s_\alpha)}{2\log 2},$$
where $s_\alpha$ is the unique real such that $P'_\varphi(s_\alpha)=\alpha.$
We now consider the invariant spectrum of $E(\alpha)$. As
$\varphi(x,y)=f(x)f(y)$ with $f(x)=x_1$,
by Theorem
~\ref{mixing}, we have
$$F_{\rm inv}(\alpha)=\sup\left\{\frac{h_\mu}{\log 2}\ :\ \mu\in \mathcal{M}_{\rm inv}(\Sigma_2),\
\int x_1 d\mu=\sqrt{\alpha} \right\}.$$ It is well known (see
\cite{FFW}) that the right hand side, which is attained by a
Bernoulli measure, is equal to
$$H(\sqrt{\alpha})=-\sqrt{\alpha}\log_2 \sqrt{\alpha}-(1-\sqrt{\alpha})\log_2 (1-\sqrt{\alpha}).$$
So $$F_{\rm inv }(\alpha)\ =\ H(\sqrt{\alpha}).$$
See Figure \ref{figure 1} for the graphs of the spectra
$\alpha\mapsto \dim_HE(\alpha)$ and $\alpha\mapsto L_{\rm
inv}(\alpha)$. We remark that, except at the extremal points
($\alpha=1/4$ or $1$), we have a strict inequality $F_{\rm
inv}(\alpha)<\dim_HE(\alpha)$. This shows that the invariant part of
$E(\alpha)$ is much smaller than $E(\alpha)$ itself. This is
different of the classical ergodic theory ($\ell=1$) where in
general we have $F_{\rm inv}(\alpha)=\dim_HE(\alpha)$ for all
$\alpha$ and actually $E(\alpha)$ is invariant.
\begin{equation}gin{figure}
\centering
\includegraphics[width=6.8cm]{12comp.eps}
\caption{The graphs of the spectrum $\alpha\mapsto \dim_HE(\alpha)$ and $\alpha\mapsto F_{\rm inv}(\alpha)$ (Example 1).}
\label{figure 1}
\end{figure}
The following example is a special case of a situation studied in \cite{FLM}. So, the
result is not new. Applying Theorem~\ref{thm principal} only
provides a second way to get it. But when we compare its invariant
spectrum with its multifractal spectrum we will discover a new
phenomenon--there is "no" invariant part in $E(\alpha)$ for some
$\alpha$.
\begin{equation}gin{example}
Let $q=2$, $m=2$, $\ell =2$ and $\varphi$ be the potential given by
$\varphi(x,y)=(2x_1-1)(2x_2-1)$. So
$$\left[\varphi(i,j)\right]_{(i,j)\in \{0,1\}^2}=
\left[ \begin{equation}gin{array}{rr}
1 & -1 \\
-1 & 1
\end{array} \right].
$$
\end{example}
The system of equations (\ref{transer_equation}) in this case
reduces to
\begin{equation}gin{eqnarray*}
\psi_s(0)^2&=&e^{s}\psi_s(0)+e^{-s}\psi_s(1),\\
\psi_s(1)^2&=&e^{-s}\psi_s(0)+e^{s}\psi_s(1).
\end{eqnarray*}
Because of the symmetry of $\varphi$, it is easy to find the unique
positive solution of the system:
$$\psi_s(0)\ =\ \psi_s(1)\ =\ e^{s}+e^{-s}.$$
Thus we get the pressure function
$$P_\varphi(s)=\log(\psi_s(0)+\psi_s(1))=\log 2+\log (e^{s}+e^{-s}).$$
It is evident that
$$P'_\varphi(s)\ =\ \frac{e^{s}-e^{-s}}{e^{s}+e^{-s}}.$$
and
$$P'_\varphi(-\infty)=-1,\ \quad P'_\varphi(+\infty)=1.$$
So, by Theorem \ref{thm principal}, we have $L_{\varphi}=[-1,1]$,
and for any $\alpha\in [-1,1]$ we have
$$\dim_HE(\alpha)=\frac{-\alpha s_\alpha+P_\varphi(s_\alpha)}{2\log 2},$$
where $s_\alpha$ is such that
$$ \frac{e^{s_\alpha}-e^{-s_\alpha}}{e^{s_\alpha}+e^{-s_\alpha}}=\alpha.$$
We now consider the invariant spectrum of $E(\alpha)$. We have
$\varphi(x,y)=f(x)f(y)$ with $f(x)=2x_1-1$, then by Theorem
~\ref{mixing}, we have
$$F_{\rm inv}(\alpha)=\sup\left\{\frac{h_\mu}{\log 2}\ :\ \mu\in \mathcal{M}_{\rm inv}(\Sigma_2),\
\left(\int (2x_1-1) d\mu\right)^2=\alpha \right\}.$$ We see that we
must assume $\alpha\ge 0$. As $\int (2x_1-1) d\mu=2 \int x_1
d\mu-1$, the condition $\left(\int (2x_1-1) d\mu\right)^2=\alpha$
means $\int x_1 d\mu= \frac{1}{2}(1 \pm \sqrt{\alpha})$. The above
supremum is attained by a Bernoulli measure determined by the
probability vector $((1+\sqrt{\alpha})/2, (1- \sqrt{\alpha})/2)$.
In other word,
$$F_{\rm inv }(\alpha)\ =\ H\left(\frac{1+\sqrt{\alpha}}{2}\right)$$
where $H(x) = -x \log_2 x - (1-x) \log_2 (1-x)$.
See Figure \ref{figure 2} for the graphs of the spectra
$\alpha\mapsto \dim_HE(\alpha)$ and $\alpha\mapsto F_{\rm
inv}(\alpha)$. We see that, except at the extremal point $\alpha=0$,
we have $F_{\rm inv}(\alpha)<\dim_HE(\alpha)$. Moreover, for $-1
\leq \alpha<0$, we have $F_{\rm inv }(\alpha)=0$. That is to say,
there is no invariant
measure with positive dimension sitting on
$E(\alpha)$ for $-1\leq \alpha<0$.
But $\dim_HE(\alpha)>0$.
\begin{equation}gin{figure}
\centering
\includegraphics[width=6.8cm]{12biscomp.eps}
\caption{The graph of the spectrum $\alpha\mapsto \dim_HE(\alpha)$ (Example 2).}
\label{figure 2}
\end{figure}
The following example presents a case where the $L_\varphi$ is
strictly contained in the interval $[\alpha_{\min}, \alpha_{\max}]$.
\begin{equation}gin{example}
Let $q=2$, $m=2$, $\ell=2$ and $\varphi$ be the potential given by
$\varphi(x,y)=y_1-x_1$. In other words,
$$\left[\varphi(i,j)\right]_{(i,j)\in \{0,1\}^2}= \left[
\begin{equation}gin{array}{cc}
0 & -1 \\
1 & 0
\end{array} \right].
$$
\end{example}
The system of equations (\ref{transer_equation}) in this case
reduces to \begin{equation}gin{eqnarray*} \psi_s(0)^2
&=&\ \ \ \ \psi_s(0)+e^s\psi_s(1),\\
\psi_s(1)^2&=&e^{-s}\psi_s(0)+\ \ \psi_s(1). \end{eqnarray*}
It is easy to find the unique positive solution of the system:
$$\psi_s(0)=1+e^{\frac{s}{2}},\ \quad \psi_s(1)=1+e^{-\frac{s}{2}}.$$
The pressure function is then given by
$$P_\varphi(s)=\log(\psi_s(0)+\psi_s(1))=\log(2+e^{\frac{s}{2}}+e^{-\frac{s}{2}}).$$
So
$$P'_\varphi(s)= \frac{1}{2} \frac{e^{s/2}-e^{-s/2}}{2+e^{s/2}+e^{-s/2}},$$
and
$$P'_\varphi(-\infty)=-\frac{1}{2}, \ \quad P'_\varphi(+\infty)=\frac{1}{2}.$$
Remark that in this case we have
$$\alpha_{\min}<P'_\varphi(-\infty)<
P'_\varphi(+\infty)<\alpha_{\max}.
$$
By Theorem \ref{thm principal}, we have $L_{\varphi}=[-1/2,1/2]$,
and for any $\alpha\in [-1/2,1/2]$ we have
$$\dim_HE(\alpha)=\frac{-\alpha s_\alpha+P_\varphi(s_\alpha)}{2\log 2},$$
where $s_\alpha$ is the solution of
$$
\frac{e^{s_\alpha/2}-e^{-s_\alpha/2}}{2+e^{s_\alpha/2}+e^{-s_\alpha/2}}=2
\alpha.$$
We now consider the invariant spectrum of $E(\alpha)$. We have
$\varphi(x,y)=f(y)-f(x)$ with $f(x)=x_1$. By Lebesgue convergence theorem, for any $\alpha\in \mathbb R$ such that there exists an invariant measure $\mu$ with $\mu(E(\alpha))=1$ we have
$$\alpha=\lim_{n\rightarrow \infty}\frac{1}{n}\sum_{k=1}^n\mathbb E_\mu(x_{2k}-x_k)=\lim_{n\rightarrow \infty}\frac{1}{n}\sum_{k=1}^n\left(\mathbb E_\mu(x_{2k})-\mathbb E_\mu(x_{k})\right)=0.$$ (The last equality is due to the invariance of $\mu$). This means that the only $\alpha$ such that there is an invariant measure with positive dimension sitting on $E(\alpha)$ is $\alpha =0$. The invariant spectrum then degenerates to one point. We have $F_{\rm inv}(0)=1$.
See Figure \ref{figure 3} for the graph of the spectrum
$\alpha\mapsto \dim_HE(\alpha)$.
\begin{equation}gin{figure}
\centering
\includegraphics[width=6.8cm]{12bis.eps}
\caption{The graph of the spectrum $\alpha\mapsto \dim_HE(\alpha)$
(Example 3).} \label{figure 3}
\end{figure}
We can easily solve the system (\ref{transer_equation}) for a class
of symmetric functions described in the following example. The
example 2 is a special case.
\begin{equation}gin{example}
Let $\ell=2$, $q\geq 2$ and $m\geq2$. Let
$\varphi=\left[\varphi(i,j)\right]_{(i,j)\in \{0,\cdots,m-1\}^2}$ be
a potential considered as a matrix. Suppose that each row of the
matrix is a permutation of the first row.
\end{example}
Recall the system of equations (\ref{transer_equation}):
$$\psi_s(i)^q\ =\ \sum_{j=0}^{m-1}e^{s\varphi(i,j)}\psi_s(j),\ \ i\in \{0,\cdots,m-1\}.$$
It is straightforward to verify that the constant vector
$(a,\cdots,a)$, with
$$a=\left(\sum_{j=0}^{m-1}e^{s\varphi(1,j)}\right)^{\frac{1}{q-1}},$$
is the unique positive solution of the above system (see Theorem
\ref{existence-unicity trans-equ}). The pressure function is then
given by
$$P_\varphi(s)
=\log \sum_{j=0}^{m-1}e^{s\varphi(1,j)}+(q-1)\log m.$$
We have
$$P'_\varphi(s)=\frac{\sum_{j=0}^{m-1}e^{s\varphi(1,j)}\varphi(1,j)}{\sum_{j=0}^{m-1}e^{s\varphi(1,j)}}.$$
Then
$$\lim_{s\rightarrow-\infty}P'_\varphi(s)=\lim_{s\rightarrow-\infty}\frac{\sum_{j=0}^{m-1}e^{s(\varphi(1,j)-\alpha_{\min})}\varphi(1,j)}{\sum_{j=0}^{m-1}e^{s(\varphi(1,j)-\alpha_{\min})}}=\alpha_{\min}=\min_j \varphi(1,j).$$
Similarly, we have
$$\lim_{s\rightarrow+\infty}P'_\varphi(s)=\alpha_{\max}=\max_j \varphi(1,j).$$
By the hypothesis of symmetry on $\varphi$, it is easy to see that
there exist sequences $(x_j)_{j=0}^\infty$ and $(y_j)_{j=0}^\infty
\in \Sigma_m$ such that
$$\varphi(x_j,x_{j+1})=\alpha_{\min},\ \quad
\varphi(y_j,y_{j+1})=\alpha_{\max},\ \forall j\geq 0.$$ Therefore,
by Theorem \ref{thm principal},
$L_{\varphi}=[\alpha_{\min},\alpha_{\max}]$, and for any $\alpha\in
[\alpha_{\min},\alpha_{\max}]$ we have
$$\dim_HE(\alpha)=\frac{-\alpha s_\alpha+P_\varphi(s_\alpha)}{2\log m},$$
where $s_\alpha$ is the solution of
$$\frac{\sum_{j=0}^{m-1}e^{s_\alpha\varphi(1,j)}\varphi(1,j)}{\sum_{j=0}^{m-1}e^{s_\alpha\varphi(1,j)}}=\alpha.$$
The invariant spectrum: For $\alpha \in [\alpha_{\min},
\alpha_{\max}]$, the invariant spectrum is attained by a Markov
measure. That is to say
$$
F_{\rm inv}(\alpha) = \sup \left\{ -\sum_{0\le i, j\le m-1} \pi_i p_{i, j} \log_m
p_{i, j}:
\sum_{0\le i, j\le m-1} \varphi(i, j) \pi_i p_{i, j} = \alpha \right\}
$$
where $P=(p_{i, j})$ is a stochastic matrix and $\pi=(\pi_0,\cdots,
\pi_{m-1})$ is an invariant probability vector of $P$, i.e. $\pi
P=\pi$.
In the next example we show that in general the invariant spectrum can be strictly larger than the mixing spectrum for some level set $E(\alpha)$.
\begin{equation}gin{example}
Let $m\geq 2$. Consider two functions $f$ and $h$ on $\Sigma_m$ defined by
\[
f(i)=\begin{equation}gin{cases} 1 & 0\le i <m-1\\ 2 & i=m-1 \end{cases} \qquad h(i)=\begin{equation}gin{cases} -2 & 0\le i <m-1\\ 1 & i=m-1 \end{cases}.
\]
Consider the level set
\[
E(0)=\left\{x\in\Sigma_m\, :\, \lim_{n\rightarrow\infty}\frac1n\sum_{k=1}^nf(x_k)f(x_{2k})h(x_{3k})=0\right\}.
\]
(That means $\phi(x,y,z)=f(x)f(y)h(z)$).
We claim that $F_{\rm mix}(0)<F_{\rm inv}(0)$ for $m\geq 49$.
\end{example}
Let $\delta_j$ denotes the Dirac measure at $j\in \{0,1,\cdots,m-1\}$. Let
$$\nu=\frac{1}{m-1}\sum_{j=0}^{m-2}\delta_j.$$ We note that $\nu$ restricted on $\Sigma_{m-1}$ gives rise to the measure of maximal dimension on $\Sigma_{m-1}$. We consider a probability measure on $\Sigma_m$ defined by $$\mu=\frac{1}{2}\mu_1+\frac{1}{2}\mu_2,$$ where
$$\mu_1([x_1x_2\cdots x_n])=\prod_{k=0}^{\lfloor\frac{n-1}2\rfloor}\delta_{m-1}(x_{2k+1})\cdot\prod_{k=1}^{\lfloor\frac{n}2\rfloor}\nu(x_{2k}),$$
and
$$\mu_2([x_1x_2\cdots x_n])=\prod_{k=0}^{\lfloor\frac{n-1}2\rfloor}\nu(x_{2k+1})\cdot\prod_{k=1}^{\lfloor\frac{n}2\rfloor}\delta_{m-1}(x_{2k}).$$
Note that $T^{-1}\circ \mu_1=\mu_2$ and $T^{-1}\circ \mu_2=\mu_1$. So $\mu$ is shift invariant.
The measure $\mu$ sits on the set $A=A_1\bigcup A_2$ where
\[
A_1=\left\{x\in\Sigma_m\, :\, x_{2k+1}=m-1,\, x_{2k}\ne m-1,\, k\in\mathbb N\right\},
\]
\[
A_2=\left\{x\in\Sigma_m\, :\, x_{2k}=m-1,\, x_{2k+1}\ne m-1,\, k\in\mathbb N\right\}.
\]
Actually $\mu_1(A_1)=1$ and $\mu_2(A_2)=1$ and the sets $A_1$ and $A_2$ are disjoint.
We claim that $\mu$ is ergodic but not mixing. To see that $\mu$ is not mixing, we only need to observe that $T^{-1}A_1=A_2$ and $T^{-1}A_2=A_1$. From this and that $A_1$ and $A_2$ are disjoint we deduce that
$$\mu\left(T^{-2k}A_1\cap A_2\right)=0,\ \ \forall k\in \mathbb N. $$
This implies that $\mu$ is not mixing. The ergodicity of $\mu$ with respect to $T$ is due to the fact that $\mu_1$ and $\mu_2$ are ergodic with respect to $T^2=T\circ T$ and that they are supported
by disjoint sets.
For every $x\in A_1$ we have
$$
\lim_{n\rightarrow\infty}\frac1n\sum_{k=1}^nf(x_k)f(x_{2k})h(x_{3k})=\lim_{n\rightarrow\infty}\frac1n\left(\sum_{k-{\rm even}}+\sum_{k-{\rm odd}}\right)=\frac12\left(-2+2\right)=0
$$
and for every $x\in A_2$ we have
$$
\lim_{n\rightarrow\infty}\frac1n\sum_{k=1}^nf(x_k)f(x_{2k})h(x_{3k})=\lim_{n\rightarrow\infty}\frac1n\left(\sum_{k-{\rm even}}+\sum_{k-{\rm odd}}\right)=\frac12\left(4-4\right)=0.
$$
Hence, $\mu(E(\alpha)=1$. We note that
\[
\int_{\Sigma_m}f\, d\mu\cdot\int_{\Sigma_m}f\, d\mu\cdot\int_{\Sigma_m}h\, d\mu=\left(\frac{3}{2}\right)^2\cdot\left(-\frac{1}{2}\right)=-\frac98<0.
\]
Let us compute the dimension of $\mu$ by computing the local entropy at typical points. If $x\in A$ then
\[
\mu([x_1\cdots x_{2n}])=(m-1)^{-n}.
\]
Since $\mu(A)=1$ this implies that $\dim_H\mu=\frac12\log(m-1)$. So that $F_{\rm inv}(0)\geq \frac{1}{2}\log (m-1)$.
On the other hand, by Theorem \ref{mixing} and Remark \ref{remark mixing}, we have
\[
F_{\rm mix}(0)=\sup\left\{h_\mu \, :\, \mu-\text{multiple\ mixing, }\, \int_{\Sigma_m}h\, d\mu=0\right\}
\]
since $f$ is strictly positive. From standard multifractal analysis we know that the supremum is attained by a Bernoulli measure and
\begin{equation}gin{align*}
F_{\rm mix}(0)&=\max_{p_i\ge 0}\left\{-\sum_{i=0}^{m-1}p_i\log p_i\, :\, p_0+\cdots +p_{m-2}=\frac13, p_{m-1}=\frac23\right\}\\&
=\frac13\log(m-1)+\frac13\log3+\frac23\log\frac32.
\end{align*}
If $m>48$ we conclude $F_{\rm inv}(0)>F_{\rm mix}(0)$.
\section{Remarks and Problems}
{\em Multiplicatively invariant sets.} The first basic example ({\bf Example 1} above) which motivated our study leads to the set
$$
X_2 = \{(x_k)_{k\ge 1} \in \Sigma_2: \forall k\ge 1, x_k x_{2k} = 0 \}
$$
which was introduced in \cite{FLM}. It is known to F\"{u}rstenberg \cite{Furstenberg0} that any shift-invariant closed set
has its Hausdorff dimension equal to its Minkowski (box-counting) dimension. Unfortunately the closed set $X_2$ is not shift-invariant.
Its Minkowski dimension was computed by Fan, Liao and Ma \cite{FLM} and its Hausdorff dimension was computed by Kenyon, Peres and Solomyak \cite{KPS}.
The results show that the Hausdorff dimension is smaller than the Minkowski dimension.
Recall that
$$
\dim_M X_2 = 0.82429..., \quad \dim_H X_2 = 0.81137...
$$
As observed by Kenyon, Peres and Solomyak,
the set $X_2$ is invariant under the action of the semigroup $\mathbb{N}$ in the sense that $T_r X_2 \subset X_2$ for all $r\in \mathbb{N}$
where $T_r$ is defined by
$$
x=(x_k)_{k\ge 1} \mapsto T_r x = (x_{rk})_{k\ge 1}.
$$
As observed by Fan, Liao and Ma, we have the decomposition
$$
\mathbb{N} = \bigsqcup_{i: \rm odd} i\Lambda
$$
where $\Lambda =\{1, 2, 2^2, 2^3, \cdots\}$ is the (multiplicative) sub-semigroup generated by $2$. This is one of the key point
in the present study. A similar decomposition holds for semigroups generated by a finite number of prime numbers. Using this decomposition,
Peres, Schmeling, Solomyak and Seuret \cite{PSSS} computed the Hausdorff dimension and the Minkowski dimension of sets like
$$
X_{2,3} = \{(x_k)_{k\ge 1} \in \Sigma_2: \forall k\ge 1, x_k x_{2k} x_{3k}= 0 \}.
$$
This is an important step.
{\em A generalization.}
Combining the ideas in \cite{PSSS} and those in the present paper, we can study the following limit
$$
\lim_{n \rightarrow \infty} \frac{1}{n} \sum_{k=1}^n \varphi(x_k, x_{2k}, x_{3k}).
$$
See \cite{Wu}. Notice that the computation in this case are more involved. Also notice that, by chance, the Riesz product method
used in \cite{FLM} is well adapted to the study of the special limit
$$
\lim_{n \rightarrow \infty} \frac{1}{n} \sum_{k=1}^n (2x_k-1) (2 x_{2k} -1)\cdots (2 x_{\ell k}-1)
$$
where $\ell \ge 2$ is any integer.
{\em Vector valued potential.} We indicate here how to extend our results to vector valued potentials. First, let $\varphi, \gamma$ be $2$ functions defined on $S^\ell$ taking real values. Instead of considering the transfer operator $\mathcal{L}_s$ as defined in (\ref{transer-operator}), we consider the following one.
$$
\mathcal{L}_s \psi (a)
= \sum_{j \in S} e^{s \varphi(a, j)+\gamma(a, j)}
\psi (Ta, j),\ a\in S^{\ell-1},\ s\in \mathbb R.
$$
Still by Theorem \ref{existence-unicity trans-equ}, there exists a unique solution to the equation
$$(\mathcal{L}_s \psi)^{\frac{1}{q}}=\psi.$$
Then, we can similarly define the pressure function as indicated in (\ref{transer_equation 2}) and (\ref{pressure function}). We denote this pressure function by $P_{\varphi,\gamma}(s)$.
The arguments with which we proved the analyticity and convexity of $s\mapstoP_{\varphi}(s)$ can be also used to prove the same results for $s\mapsto P_{\varphi,\gamma}(s)$.
Let $\bar{1}derline{\varphi}=(\varphi_1,\cdots,\varphi_d)$ be a function defined on $S^\ell$ taking values in $\mathbb R^d$. For $\bar{1}derline{s}=(s_1,\cdots,s_d)\in \mathbb R^d$, we consider the following transfer operator.
$$
\mathcal{L}_{\bar{1}derline{s}} \psi (a)
= \sum_{j \in S} e^{\langle \bar{1}derline{s},\bar{1}derline{\varphi}\rangle}
\psi (Ta, j),\ a\in S^{\ell-1},
$$
where $\langle \cdot,\cdot\rangle$ denotes the scalar product in $\mathbb R^d$. We denote the associated pressure function by $P(\bar{1}derline{\varphi})(\bar{1}derline{s})$.
Then, by the above discussion, for any vectors $u,v\in \mathbb R^d$ the function
$$\mathbb R \ni s\ \longmapsto \ P(\bar{1}derline{\varphi})(us+v)$$ is analytical and convex. We deduce from this that the function $$\bar{1}derline{s}\ \longmapsto \ P(\bar{1}derline{\varphi})(\bar{1}derline{s})$$ is infinitely differentiable and convex on $\mathbb R^d$. We can prove that $P(\bar{1}derline{\varphi})(\bar{1}derline{s})$ is indeed analytical by the same argument used to prove the analyticity of $P_{\varphi}(s)$.
Similarly, we define the level sets $E(\bar{1}derline{\alpha})$ $(\bar{1}derline{\alpha}\in \mathbb R^d)$ of $\bar{1}derline{\varphi}$. A vector version of Theorem \ref{thm principal} is stated by just replacing the derivative of the pressure function by gradient.
We finish the paper with two problems.
{\em Subshifts of finite type.} Our study is strictly restricted to the full shift dynamics. It is a challenging problem
to study the dynamics of subshift of finite type.
More general are dynamics with Markov property. More efforts are needed to deal with $\begin{equation}ta$-shift which are not Markovian. New ideas are needed
to deal with these dynamics.
{\em Nonlinear cookie cutter.} The full shift is essentially the doubling dynamics $Tx = 2 x$ $\mod 1$ on the interval $[0,1)$.
Cookie cutters are the first interval maps coming into the mind after the doubling map. If the cookie cutter maps are not linear, it is a difficult problem.
Based on the computation made in \cite{PS}, Liao and Rams \cite{LR} considered a special piecewise linear map of two branches defined on two intervals
$I_0$ and $I_1$ and studied the following limit
$$
\lim_{n \rightarrow \infty} \frac{1}{n} \sum_{k=1}^n 1_{I_1}(T^kx)1_{I_1}(T^{2k}x).
$$
The techniques presented in the present paper can be used to treat the problem
for general piecewise linear cookie cutter dynamics \cite{FLW,Wu}.
\begin{equation}gin{thebibliography}{99}
\bibitem{Assani}
\newblock I. Assani,
\newblock \emph{Multiple recurrence and almost sure convergence for weakly mixing dynamical systems},
\newblock Israel. J. Math. \textbf{1-3} (1987), 111--124.
\bibitem{Barreira} L. Barreira,
``Dimension and recurrence in hyperbolic dynamics,''
\newblock Progress in Mathematics. Soc. \textbf{272}. Birkh\"{a}user Verlag, Basel, 2008.
\bibitem{BSS}
\newblock L. Barreira, B. Saussol, J. Schmeling,
\newblock \emph{Higher-dimensional multifractal analysis},
\newblock J. Math. Pures Appl. \textbf{81} (2002), 67--91.
\bibitem{Bergelson}
\newblock V. Bergelson,
\newblock \emph{Weakly mixing PET},
\newblock Ergodic Theory Dynam. Systems \textbf{3} (1987), 337--349.
\bibitem{BHK} V. Bergelson, B. Host and B. Kra, {\em Multiple recurrence and nilsequences}
(with an appendix by I. Rusza), Inventiones Math. 160, 2 (2005), 261-303.
\bibitem{Bour} J. Bourgain, {\em Double recurrence and almost sure convergence}, J. Reine Angew. Math. \textbf{404} (1990), 140-161.
\bibitem{Fal90}
\newblock K. Falconer,
\newblock ``Fractal geometry. Mathematical foundations and applications,"
\newblock John Wiley Sons, Chichester, 1990.
\bibitem{Fan1994} A.H. Fan, {\em Sur les dimensions de mesures}, Studia Math. \textbf{111} (1994), 1-17.
\bibitem{FFW}
\newblock A.H. Fan, D. J. Feng and J. Wu,
\newblock \emph{Recurrence, entropy and dimension},
\newblock J. London Math. Soc. \textbf{64} (2001), 229--244.
\bibitem{FLM} A.H. Fan, L. M. Liao and J. H. Ma, {\em Level sets of multiple ergodic averages},
\newblock Monatsh. Math. \textbf{168} (2012), 17–-26.
\bibitem{FLP} A.H. Fan, L. M. Liao and J. Peyri{\`e}re, {\em Generic points in systems of specification and Banach
valued Birkhoff ergodic average}, Discrete Contin. Dyn. Syst. \textbf{21} (2008), 1103-1128.
\bibitem{FLW}
\newblock A.H. Fan, L. M. Liao and M. Wu,
\newblock {\em Multifractal analysis of some multiple ergodic averages in linear Cookie-Cutter dynamical systems},
\newblock preprint.
\bibitem{FSW} A.H. Fan, J. Schmeling and M. Wu, {\em Multifractal analysis of multiple ergodic averages},
Comptes Rendus Math\'ematique,
Volume \textbf{349}, num\'ero 17--18 (2011), 961--964.
\bibitem{FSW_V} A.H. Fan, J. Schmeling and M. Wu, {\em Multifractal analysis of V-statistics}, to appear in {\em Further Developments in Fractals and Related Fields}, Eds. J. Barral and S. Seuret, 2013.
\bibitem{Furstenberg0} H. F\"{u}rstenberg, {\em Disjointness in ergodic theory, minimal sets, and a problem in Diophantine approximation},
Math. Systems Theory \textbf{1} (1967), 1--49.
\bibitem{Furstenberg} H. F\"{u}rstenberg, {\em Ergodic behavior of diagonal measures and a theorem of Szemer\'edi on arithmetic
progressions},
J. d'Analyse Math. \textbf{31} (1977), 204--256.
\bibitem{HK} B. Host and B. Kra, {\em Nonconventional ergodic averages and nilmanifolds}, Ann. Math. \textbf{161} (2005), 397-488.
\bibitem{KPS}
\newblock R. Kenyon, Y. Peres and B. Solomyak,
\newblock \emph{ Hausdorff dimension of the multiplicative golden
mean shift},
\newblock Comptes Rendus Mathematique, volume \textbf{349}, num\'ero 11--12 (2011), 625--628.
\bibitem{KPS1} R. Kenyon, Y. Peres and B. Solomyak, {\em Hausdorff dimension for fractals invariant under the multiplicative integers}, to appear in Ergodic Theory Dynam. Systems.
\bibitem{Kifer} Yu. Kifer, {\em A nonconventional strong law of large numbers and fractal dimensions of some multiple recurrence sets}, preprint.
\bibitem{LR}
\newblock L. M. Liao and M. Rams,
\newblock {\em Multifractal analysis of some multiple ergodic
averages for the systems with non-constant
Lyapunov exponents},
\newblock preprint.
\bibitem{PS} Y. Peres and B. Solomyak, {\em Dimension spectrum for a nonconventional ergodic average}, to appear in Real Analysis Exchange.
\bibitem{PSSS} Y. Peres, J. Schmeling, B. Solomyak and S. Seuret, {\em Dimensions of some fractals defined via the semigroup denerated by 2 and 3},
preprint.
\bibitem{Varga}
\newblock R.S. Varga,
\newblock ``Ger\v{s}gorin and his circles,"
\newblock Springer series in computational mathematics \textbf{36}, Springer-Verlag, Berlin, 2004.
\bibitem{Wu}
\newblock M. Wu,
\newblock Ph D Thesis,
\newblock in preparation.
\end{thebibliography}
\end{document}
|
\begin{document}
\title{Points on singular Frobenius nonclassical curves}
\operatorname{Aut}hor{\textbf{Herivelto Borges} \\
\mathcal{S}mall{ICMC, Universidade de S\~ao Paulo, S\~ao Carlos, Brazil} \\
\textbf{Masaaki Homma}\\
\mathcal{S}mall{Department of Mathematics and Physics, Kanagawa University, Hiratsuka 259-1293, Japan}}
\maketitle
\begin{abstract}
In 1990, Hefez and Voloch proved that the number of $\mathbb{F}_q$-rational points on a nonsingular plane $q$-Frobenius nonclassical curve of degree $d$ is $N=d(q-d+2)$.
We address these curves in the singular setting. In particular, we prove that $d(q-d+2)$ is a lower bound on the number of $\mathbb{F}_q$-rational points on such curves of degree $d$.
\end{abstract}
\mathcal{S}mallskip
\noindent \text{Keywords:} Algebraic curve, Frobenius nonclassical curve, Finite Field.
\mathcal{S}mallskip
\noindent \text{2010 Mathematics Subject Classification:}
Primary 14H45; Secondary 14Hxx.
\mathcal{S}ection{Introduction}\label{intro}
Let $p$ be a prime number and $\mathbb{F}_q$ be the field with $q=p^s$ elements, for some integer $s\geq 1$. An irreducible plane curve $\mathcal{C},$ defined over $\mathbb{F}_q$, is called $q$-Frobenius nonclassical if the $q$-Frobenius map takes each simple point $P\in \mathcal{C}$ to the tangent line to $\mathcal{C}$ at $P$.
In this case, there is an exponent $h$ with
$p \leq p^h \leq d$ so that the intersection multiplicity
$i(\mathcal{C}.T_P(\mathcal{C});P)$ of $\mathcal{C}$ and the tangent
line
$T_P(\mathcal{C})$ at a simple point $P \in \mathcal{C}$
is at least $p^h$, and actually $i(\mathcal{C}.T_P(\mathcal{C});P) = p^
h$
holds for a general point $P \in \mathcal{C}$.
For convenience,
\begin{equation}\label{eq:cases}
\nu =\begin{cases}
p^h \hspace{0.3cm} & \text{ if $\mathcal{C}$ is $q$-Frobenius
nonclassical}\\
1 & \text{if $\mathcal{C}$ is $q$-Frobenius classical}
\end{cases}
\end{equation}
is called the $q$-Frobenius order of $\mathcal{C}$.
Frobenius nonclassical curves were introduced in the work of St\"ohr and Voloch \cite{SV}, and
one reason for highlighting this special class of curves comes from the following result (see \cite[Theorem 2.3]{SV}).
\begin{thm}[St\"ohr-Voloch]\label{SV} Let $\mathcal{C}$ be an irreducible plane curve of degree $d$ and genus $g$ defined over $\mathbb{F}_q$. If $\mathcal{C}(\mathbb{F}_q)$ denotes the set of $\mathbb{F}_{q}$-rational points on $\mathcal{C}$, then
\begin{equation}\label{SV-bound}
\#\mathcal{C}(\mathbb{F}_q)\le \dfrac{\nu (2g-2) +(q+2)d}{2}.
\end{equation}
\end{thm}
Note that by $\mathbb{F}_{q}$-rational points on $\mathcal{C}$, we mean the $\mathbb{F}_{q}$-rational points
on the nonsingular model of $\mathcal{C}$.
Based on Theorem \ref{SV}, Frobenius nonclassicality can be considered as an obstruction to
use the nicer upper bound given by inequality (\ref{SV-bound}) with $\nu=1$.
That is a clear reason why one should try to understand such curves better. At the same time, investigating Frobenius nonclassical curves is a way of searching for curves with many points.
For instance, the Hermitian curve
$$x^{q+1}+y^{q+1}=1,$$
over $\mathbb{F}_{q^2}$, and the Deligne-Lusztig-Suzuki curve over $\mathbb{F}_q$:
$$y^q-y=x^{q_0}(x^q-x),$$
where $q_0=2^s$, $s\geq 1$, and $q=2q_0^2$,
which are well known examples of curves with many points, are Frobenius non-classical.
With regard to the number of rational points, a somewhat surprising fact was proved by Hefez and Voloch in the case of nonsingular curves (see \cite{HV}).
\begin{thm}[Hefez-Voloch] Let $\mathcal{X}$ be a nonsingular $q$-Frobenius nonclassical plane curve of degree $d$ defined over $\mathbb{F}_q$. If $\mathcal{X}(\mathbb{F}_q)$ denotes the set of $\mathbb{F}_{q}$-rational points on $\mathcal{X}$, then
\begin{equation}\label{HV-sharp}
\#\mathcal{X}(\mathbb{F}_q)=d(q-d+2).
\end{equation}
\end{thm}
Let us recall that if $\mathcal{X}$ is a nonsingular $q$-Frobenius nonclassical plane curve of degree $d$, and $\nu>2$ is its $q$-Frobenius order defined in \eqref{eq:cases}, then (see \cite[Theorem 8.77]{HKT})
\begin{equation}\label{d-range}
\mathcal{S}qrt{q}+1\leq d\leq {\bf F}ac{q-1}{\nu-1}.
\end{equation}
Now note that if $\nu>3$ and $d$ is within the range given by \eqref{d-range}, then
\begin{equation}\label{compare}
d(q-d+2)>{\bf F}ac{d(q+d-1)}{2},
\end{equation}
where the number on the right hand side of \eqref{compare} is the bound given by Theorem \ref{SV} for the case $\nu=1$.
In other words, \eqref{HV-sharp} tells us that nonsingular Frobenius nonclassical curves of degree $d$ usually have many rational points in comparison with the Frobenius classical ones of the same degree. In this paper, we show that this statement could be applied more broadly if we were to drop the exclusivity on nonsingularity. More precisely, we prove the following:
\begin{thm}\label{main0}Let $\mathcal{C}$ be a $q$-Frobenius nonclassical curve of degree $d$ and genus $g$. If $M_q^S$ is the number of simple points of $\mathcal{C}$ in
$PG(2,q)$, then
\begin{equation}\label{bound1}
M_q^S\geq d(q-d+2)+2(g^*-g) +\mathcal{S}um\limits_{P \in Sing(\mathbb{F}_{q})} m_P(m_P-2),
\end{equation}
where $m_P$ are the multiplicities of the singular points $P\in Sing(\mathbb{F}_{q})\mathcal{S}ubseteq PG(2,q)$ of $\mathcal{C}$,
and $$g^*:={\bf F}ac{(d-1)(d-2)}{2}-\mathcal{S}um\limits_{P \in Sing(\mathbb{F}_{q})} {\bf F}ac{1}{2}m_P(m_P-1)$$ is its $\mathbb{F}_q$-virtual genus. Moreover, equality holds in \eqref{bound1} if and only if all branches of $\mathcal{C}$ are linear.
\end{thm}
Note that the bound \eqref{bound1} does not depend on the Frobenius order $\nu$. A very interesting consequence of Thorem \ref{main0} is the following:
\begin{cor}\label{main1}Let $\mathcal{C}$ be a $q$-Frobenius nonclassical curve of degree $d$. If $M_q$ is the number of points of $\mathcal{C}$ in
$PG(2,q)$, then
\begin{equation}
M_q\geq d(q-d+2),
\end{equation}
and equality holds if and only if $\mathcal{C}$ is nonsingular.
\end{cor}
\mathcal{S}ection{Preliminaries}\label{prelim}
Let us begin by briefly recalling the notions of
classicality and $q$-Frobenius classicality for plane
curves. For a more general discussion, including the notion and properties of branches, we refer to \cite{HKT} and \cite{HK}.
Let $\mathcal{C}\mathcal{S}ubset \P^2$ be an irreducible algebraic curve of degree $d$ and genus $g$.
The numbers $0=\epsilon_0<\epsilon_1=1<\epsilon_2$
represent all possible intersection multiplicities of $\mathcal{C}$ with lines
of $\P^2$ at a generic point of $\mathcal{C}$. Such a sequence is called the order
sequence of $\mathcal{C}$, and it can be characterized as the smallest sequence (in
lexicographic order) such that $\det(D_{\zetaeta}^{\epsilon_i}x_j)\ne0$, where
$D_\zetaeta^{k}$ denotes the $k$th Hasse derivative with respect to a separating
variable $\zetaeta$, and $x_0,x_1,x_2$ are the coordinate functions on
$\mathcal{C}\mathcal{S}ubset \P^2$. The curve $\mathcal{C}$ is called classical if $\epsilon_2=2$.
If $\mathcal{C}$ is defined over a finite field $\mathbb{F}_q$, then there is a smallest
integer $\nu \in\{1,\epsilon_2\}$
such that
\begin{equation}\label{det}
\begin{pmatrix}
x_0^q & x_1^q & x_2^q\\
x_0& x_1 & x_2 \\
D_{\zetaeta}^{\nu} x_0& D_{\zetaeta}^{\nu}x_1 &
D_{\zetaeta}^{\nu}x_2
\end{pmatrix}
\neq 0
\end{equation}
The number $\nu$ is called the $q$-Frobenius order of $\mathcal{C}$, and such a
curve is called $q$-Frobenius classical if $\nu=1$.
Associated to the curve $\mathcal{C}$, there exist two distinguished divisors $R$ and $S$,
which play an important role in estimating the number of $\mathbb{F}_q$-rational points of $\mathcal{C}$. When the curve is Frobenius nonclassical,
some valuable information can be obtained by comparing the multiplicities $v_P(R)$ and $v_P(S)$ for the points $P \in \mathcal{C}$. In general, computing these multiplicities is tantamount to studying some functions in $\overlineerline{\mathbb{F}}_{q}(x,y)$ given by Wronskian determinants such as $\det(D_{\zetaeta}^{\epsilon_i}x_j)$ and \eqref{det}. This idea was first exploited by Hefez and Voloch, in their investigation of the nonsingular case \cite{HV}. As noted by Hirschfeld and Korchm\'aros in \cite{HK}, this idea can be useful in the singular case as well.
Let $\overlineerline{\mathbb{F}}_{q}(\mathcal{C}):=\overlineerline{\mathbb{F}}_{q}(x,y)$ be the function field of an irreducible curve $\mathcal{C}: f(x,y)=0$. Recall that for any given place $\mathcal{P}$ of $\overlineerline{\mathbb{F}}_{q}(\mathcal{C})$ and a local parameter $t$ at $\mathcal{P}$, one can associate a (primitive) branch $\gamma$ in special affine coordinates:
$$x(t)=a+a_1t^{j_1}+\cdots, \mathbb Quad y(t)=b+b_1t^{s}+\cdots,$$
where $s\geq j_1$. The point $(a,b) \in \overlineerline{\mathbb{F}}_{q}\times \overlineerline{\mathbb{F}}_{q}$ is called
the center of the branch $\gamma$.
The branch $\gamma$ is called linear if $j_1=1$. If $p\nmid j_1$ (resp. $p\mid j_1$) then the branch is called tame (resp. wild). Obviously, linear branches are tame.
When the curve $\mathcal{C}: f(x,y)=0$ is defined over $\mathbb{F}_{q}$, then $\mathcal{C}(\mathbb{F}_q)$ will denote the set of places of degree one in the function field $\mathbb{F}_{q}(\mathcal{C})$. Considering the projective closure $F(x,y,z)=0$ of $\mathcal{C}$, we define the following numbers, which are clearly related to $\#\mathcal{C}(\mathbb{F}_q)$:
\begin{defi}\label{define}
\begin{enumerate}[\rm(i)]
\item $M_q^S=$ number of smooth points of $F(x,y,z)=0$ in $PG(2,q)$.
\item $M_q=$ number of points of $F(x,y,z)=0$ in $PG(2,q)$.
\item $B_q=$ number of branches of $\mathcal{C}$ centered at a point in $PG(2,q)$.
\end{enumerate}
\end{defi}
Note that
\begin{equation}\label{ineq0}
M_q^S\leq M_q\leq B_q \text{ and } M_q^S\leq \#\mathcal{C}(\mathbb{F}_q)\leq B_q.
\end{equation}
Hereafter, $\mathcal{C}$ will denote an irreducible plane curve of degree $d$ and genus $g$
defined over $\mathbb{F}_q$. A relevant step to prove our main result is based on the following:
\begin{thm}[Hirschfeld-Korchm\'aros]\label{thmHK} Assume that $\mathcal{C}$ has only tame branches. If $\mathcal{C}$ is a nonclassical and $q$-Frobenius nonclassical curve, then
$$B_q\geq (q-1)d-(2g-2),$$
and equality holds if and only if every singular branch of $\mathcal{C}$ is centered at a point of $PG(2,q)$.
\end{thm}
The next lemma extends Hirschfeld-Korchm\'aros' result, and our proof is built on theirs. In particular, all the definitions and notations, explained in detail in \cite{HK}, will be borrowed.
\begin{lem}\label{lemaHK}If $\mathcal{C}$ is $q$-Frobenius nonclassical, then there exist at least $(q-1)d-(2g-2)$ tame branches centered at a point of $PG(2,q)$.
In particular,
\begin{equation}\label{ineq1}
B_q\geq (q-1)d-(2g-2).
\end{equation}
Moreover, if every branch centered at a point of $PG(2,q)$ is tame, then \eqref{ineq1} is an equality if and only if all the remaining branches are linear.
\end{lem}
\begin{proof} We closely follow the notation used in \cite{HK}.
\\
Set
\begin{center}
$
\det(D_{\zetaeta}^{(\epsilon_i)} x_j)=
\begin{vmatrix}
D_{\zetaeta}^{(1)} x & D_{\zetaeta}^{(1)} y \\
D_{\zetaeta}^{(p^m)} x & D_{\zetaeta}^{(p^m)} y
\end{vmatrix}
$
\mathbb Quad
and
\mathbb Quad
$
\det(D_{\zetaeta}^{(\nu_i)} x_j)=
\begin{vmatrix}
x^q-x & y^q-y \\
D_{\zetaeta}^{(p^m)} x & D_{\zetaeta}^{(p^m)} y
\end{vmatrix}
$
\end{center}
The $q$-Frobenius nonclassicality of $\mathcal{C}$ gives
\begin{equation}\label{fnc}
\begin{vmatrix}
x^q-x & y^q-y \\
D_{\zetaeta}^{(1)} x & D_{\zetaeta}^{(1)} y
\end{vmatrix}
=0,
\end{equation}
and then establishes the relation
$$\det(D_{\zetaeta}^{(\nu_i)} x_j)\cdot D_{\zetaeta}^{(1)} x=\det(D_{\zetaeta}^{(\epsilon_i)} x_j)\cdot (x^q-x).$$
Therefore, for any place $\mathcal{P}$ of $\overlineerline{\mathbb{F}}_{q}(\mathcal{C})$,
\begin{equation}\label{eq1}
{v}_{\mathcal{P}}(S)-{v}_{\mathcal{P}}(R)=\operatorname{ord}_{\mathcal{P}}(x^q-x)-\operatorname{ord}_{\mathcal{P}}(D_{\zetaeta}^{(1)} x).
\end{equation}
Let $\gamma$ be the (primitive) branch associated to the place $\mathcal{P}$, represented by
$$x(t)=a+a_1t^{j_1}+\cdots, \mathbb Quad y(t)=b+b_1t^{s}+\cdots,$$
with $j_1\leq s$. If $\gamma$ is tame, i.e., $p\nmid j_1$, then it follows (see \cite[proof of Theorem 1.4]{HK}) that
\begin{equation}\label{tame}
{v}_{\mathcal{P}}(S)-{v}_{\mathcal{P}}(R)= \begin{cases}
1, \mathbb Quad \text{if } (a,b)\in \mathbb{F}_q\times \mathbb{F}_q;\\
-(j_1-1), \mathbb Quad \text{otherwise}.
\end{cases}
\end{equation}
Now let us address the wild case, i.e., the case $p\mid j_1$. Note that if $D_{\zetaeta}^{(1)} x=0$ then, from \eqref{fnc}, we have $D_{\zetaeta}^{(1)} y=0$,
which contradicts the primitivity of $\gamma$. Hence, $\operatorname{ord}_{\mathcal{P}}(D_{\zetaeta}^{(1)} x) = k> j_1-1$ and \eqref{eq1} yield
\begin{equation}\label{wild}
{v}_{\mathcal{P}}(S)-{v}_{\mathcal{P}}(R)= \begin{cases}
-(k-j_1), \mathbb Quad \text{if } a\in \mathbb{F}_q;\\
-k, \mathbb Quad \text{otherwise}.
\end{cases}
\end{equation}
Therefore, \eqref{tame} and \eqref{wild} can be reduced to
\begin{equation*}
{v}_{\mathcal{P}}(S)-{v}_{\mathcal{P}}(R)= \begin{cases}
1, \mathbb Quad \text{if $\gamma$ is tame with center in } PG(2,q);\\
\leq 0, \mathbb Quad \text{otherwise}.
\end{cases}
\end{equation*}
Hence, since $\deg (S-R)=d(q-1)-(2g-2)$, we arrive at the desired lower bound for the number
of tame branches centered at a point of $PG(2,q)$.
Now let us assume that every branch centered at a point of $PG(2,q)$ is tame. If $B_q=d(q-1)-(2g-2)$, then \eqref{tame} implies that the remaining tame branches are
linear. In addition, \eqref{wild} implies that any wild branch can be considered as
$$x(t)=a+a_1t^{j_1}+\cdots, \mathbb Quad y(t)=b+b_1t^{s}+\cdots,$$
with $2\leq j_1\leq s$, $\operatorname{ord}_{\mathcal{P}}(D_{\zetaeta}^{(1)} x)=j_1$ and $a\in \mathbb{F}_{q}$.
However, if this is the case, then from
$$\operatorname{ord}_{\mathcal{P}}\Big((x^q-x)(D_{\zetaeta}^{(1)} y)\Big)=\operatorname{ord}_{\mathcal{P}}\Big((y^q-y)(D_{\zetaeta}^{(1)} x)\Big),$$ we obtain
$$\operatorname{ord}_{\mathcal{P}}(y^q-y)=\operatorname{ord}_{\mathcal{P}}(D_{\zetaeta}^{(1)} y)\geq s-1\geq 1,$$ i.e.,
$b\in \mathbb{F}_{q}$. Thus, by hypothesis, such branch must be tame, and then the assertion follows.
The converse follows immediately from the fact that linear branches are automatically tame.
\end{proof}
\mathcal{S}ection{The result}
The aim of this section is to prove Theorem \ref{main0} and some of its relevant corollaries.
\text{}\\
{ \bf Proof of Theorem \ref{main0}.} Note that from Lemma \ref{lemaHK} and the definition of $B_q$, we have
\begin{equation}\label{ineq2}
(q-1)d-(2g-2)\leq B_q \leq \mathcal{S}um\limits_{P \in PG(2,q)} m_P.
\end{equation}
Let $M_q^S$ be the number of smooth $\mathbb{F}_q$-points on $\mathcal{C}$, and set $g^*={\bf F}ac{(d-1)(d-2)}{2}-\mathcal{S}um\limits_{P \in Sing(\mathbb{F}_{q})} {\bf F}ac{1}{2}m_P(m_P-1)$. Then
\begin{eqnarray*}
M_q^S & = &\mathcal{S}um\limits_{P \in PG(2,q)} m_P-\mathcal{S}um\limits_{P \in Sing(\mathbb{F}_{q})} m_P\\
& = &\mathcal{S}um\limits_{P \in PG(2,q)} m_P-\mathcal{S}um\limits_{P \in Sing(\mathbb{F}_{q})} m_P(m_P-1)+\mathcal{S}um\limits_{P \in Sing(\mathbb{F}_{q})} m_P(m_P-2)\\
& = &\mathcal{S}um\limits_{P \in PG(2,q)} m_P+(2g^*-2)-(d^2-3d)+\mathcal{S}um\limits_{P \in Sing(\mathbb{F}_{q})} m_P(m_P-2)\\
& = &\mathcal{S}um\limits_{P \in PG(2,q)} m_P-\Big((q-1)d-(2g-2)\Big)+d(q-d+2)+2(g^*-g)+\mathcal{S}um\limits_{P \in Sing(\mathbb{F}_{q})} m_P(m_P-2).
\end{eqnarray*}
Since \eqref{ineq2} gives $\mathcal{S}um\limits_{P \in PG(2,q)} m_P-\Big((q-1)d-(2g-2)\Big)\geq 0$,
it follows that
\begin{equation}\label{ineq3}
M_q^S\geq d(q-d+2)+2(g^*-g)+\mathcal{S}um\limits_{P \in Sing(\mathbb{F}_{q})} m_P(m_P-2).
\end{equation}
Now note that equality on the latter case is equivalent to equality on both sides of \eqref{ineq2}.
Let us assume we have equality in \eqref{ineq3}. The condition $B_q = \mathcal{S}um\limits_{P \in PG(2,q)} m_P$ means that all branches centered at a point of $PG(2,q)$ are linear and then tame. Using the additional equality $B_q=(q-1)d-(2g-2)$, Lemma \ref{lemaHK} implies that all branches of $\mathcal{C}$ are linear. Conversely, the linearity of all branches of $\mathcal{C}$
immediately gives $B_q =\mathcal{S}um\limits_{P \in PG(2,q)} m_P$ and, from Lemma \ref{lemaHK},
$B_q=(q-1)d-(2g-2)$.
\mathbb Qed
\text{}\\
{ \bf Proof of Corollary \ref{main1}}. From \eqref{ineq0} and \eqref{bound1}, we clearly have $M_q\geq d(q-2+d)$.
Let us assume that equality holds. Then \eqref{ineq0} and \eqref{bound1} imply $M_q^S=M_q$ and
$g=g^*$, respectively. The first equality means that all points $\mathbb{F}_q$-points of $\mathcal{C}$ are smooth, and thus $g^*=(d-1)(d-2)/2$. The latter equality, in addition, gives $g=(d-1)(d-2)/2$. Therefore, $\mathcal{C}$ is a smooth curve. Conversely, if $\mathcal{C}$ is smooth then $M_q=B_q$, and Lemma \ref{lemaHK} gives $B_q=(q-1)d-(2g-2)$. Since $g=(d-1)(d-2)/2$, the result follows.
\mathbb Qed
The following additional consequences are also worth mentioning.
\begin{cor}\label{cor0} Let $\mathcal{C}$ be a $q$-Frobenius nonclassical curve of degree $d$ whose singularities are ordinary. If the singular points have their tangent lines defined over $\mathbb{F}_q$, then
$$\#\mathcal{C}(\mathbb{F}_q)= d(q-d+2) +\mathcal{S}um\limits_{P \in \mathcal{C}} m_P(m_P-1).$$
\end{cor}
\begin{proof} Note that all singularities are ordinary and defined over $\mathbb{F}_q$. Thus
$g^*=g$, and equality in \eqref{bound1} holds. That is,
$$M_q^S=d(q-d+2) +\mathcal{S}um\limits_{P \in Sing(\mathbb{F}_{q})} m_P(m_P-2).$$
On the other hand, since the tangent lines of the singular points are defined over $\mathbb{F}_q$, each such point $P$ gives rise to exactly $m_P$ $\mathbb{F}_q$-rational points of
$\mathcal{C}$. Therefore
$$\#\mathcal{C}(\mathbb{F}_q)=M_q^S+\mathcal{S}um\limits_{P \in Sing(\mathbb{F}_{q})} m_P=d(q-d+2) +\mathcal{S}um\limits_{P \in Sing(\mathbb{F}_{q})} m_P(m_P-1),$$ which gives the result.
\end{proof}
\begin{cor}\label{cor1} Let $\mathcal{C}$ be a $q$-Frobenius nonclassical curve of degree $d>1$.
Then $$d\geq \mathcal{S}qrt{q}+1,$$ and equality holds if and only if $\mathcal{C}$ is ($\mathbb{F}_{q}$-isomorphic to) the Hermitian curve.
\end{cor}
\begin{proof}
By Theorem \ref{main0} and Hasse-Weil bound, we have
$$d(q-d+2)\leq M_q^S\leq 1+q+(d-1)(d-2)\mathcal{S}qrt{q}.$$
Since $d(q-d+2)\leq 1+q+(d-1)(d-2)\mathcal{S}qrt{q}$ if and only if
$(d-1)(\mathcal{S}qrt{q}+1)(\mathcal{S}qrt{q}+1-d)\leq 0$, the inequality $d\geq \mathcal{S}qrt{q}+1$ follows.
The additional assertion follows from a well known characterization of the Hermitian curve
(see e.g. \cite[Theorem 10.47]{HKT}).
\end{proof}
\begin{cor} Let $\mathcal{C}$ be a plane curve defined over $\mathbb{F}_q$ of degree $d$, with $1<d\leq \mathcal{S}qrt{q}$, and genus $g$. Then
$$\#\mathcal{C}(\mathbb{F}_q) \leq {\bf F}ac{(2g-2)+(q+2)d}{2}.$$
\end{cor}
\begin{proof}
This follows directly from Corollary \ref{cor1} and Theorem \ref{SV}.
\end{proof}
\mathcal{S}ection{Examples}
One can find several examples of Frobenius nonclassical curves that ilustrate the previous results
(see \cite{Bo} and \cite{Bo1}). Let us consider the particular curve
\begin{equation}\label{ex}
\mathcal{C}: x^4y^2 + x^2y^4 + x^4yz + xy^4z + x^4z^2 + x^2y^2z^2 + y^4z^2 +x^2z^4 + xyz^4 + y^2z^4=0
\end{equation}
over $\mathbb{F}_4$. This curve has some remarkable properties (see \cite{Bo} and \cite{Homma}). One particular feature of $\mathcal{C}$ is its $4$-Frobenius nonclassicality.
The set of singular points of $\mathcal{C}$ is the whole of $PG(2,2)$, and all such singularities are nodes whose tangent lines are defined over $\mathbb{F}_4$. Therefore, Corollary \ref{cor0} gives
$$\#\mathcal{C}(\mathbb{F}_4)=6(4-6+2)+7\cdot 2\cdot (2-1)=14.$$
The next example ilustrates how the choice of singular $q$-Frobenius nonclassical curves of degree $d$, over nonsigular ones of the same degree, can make a significant difference with respect to the number of rational points. Consider the curves
$$\mathcal{C}_1: x^{13}=y^{13}+z^{13}$$
and
$$\mathcal{C}_2: x^{13}=y^{13}+y^9z^4+y^3z^{10}+yz^{12}+2z^{13},$$
over $\mathbb{F}_{27}$. They are both $27$-Frobenius nonclassical, and only $\mathcal{C}_1$ is smooth. One can check that $\#\mathcal{C}_1(\mathbb{F}_{27})=208$, whereas $\#\mathcal{C}_2(\mathbb{F}_{27})=280$, in addition to $\mathcal{C}_2$ being of smaller genus.
\mathbb{P}rintindex
\end{document}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.